index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/restore/HivePartitionRestorePolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.restore;
import org.apache.gobblin.compliance.HivePartitionDataset;
import org.apache.gobblin.configuration.State;
/**
* An abstract class for Restore policy for {@link HivePartitionDataset}
*
* @author adsharma
*/
public abstract class HivePartitionRestorePolicy implements RestorePolicy<HivePartitionDataset> {
protected State state;
public HivePartitionRestorePolicy(State state) {
this.state = new State(state);
}
}
| 3,200 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/restore/AdhocRestorePolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.restore;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import com.google.common.base.Preconditions;
import org.apache.gobblin.compliance.ComplianceConfigurationKeys;
import org.apache.gobblin.compliance.HivePartitionDataset;
import org.apache.gobblin.compliance.HivePartitionFinder;
import org.apache.gobblin.compliance.utils.ProxyUtils;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.HadoopUtils;
/**
* A restore policy to restore {@link HivePartitionDataset} from a given backup
*
* @author adsharma
*/
public class AdhocRestorePolicy extends HivePartitionRestorePolicy {
public AdhocRestorePolicy(State state) {
super(state);
}
/**
* @param dataset to restore
* @return dataset to restore with
*/
public HivePartitionDataset getDatasetToRestore(HivePartitionDataset dataset)
throws IOException {
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.DATASET_TO_RESTORE),
"Missing required property " + ComplianceConfigurationKeys.DATASET_TO_RESTORE);
HivePartitionDataset hivePartitionDataset =
HivePartitionFinder.findDataset(this.state.getProp(ComplianceConfigurationKeys.DATASET_TO_RESTORE), this.state);
FileSystem fs = ProxyUtils.getOwnerFs(new State(this.state), hivePartitionDataset.getOwner());
Preconditions.checkArgument(HadoopUtils.hasContent(fs, hivePartitionDataset.getLocation()),
"Dataset to restore doesn't have any data");
return hivePartitionDataset;
}
}
| 3,201 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/restore/RestorableHivePartitionDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.restore;
import java.io.IOException;
import java.sql.SQLException;
import java.util.Arrays;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.ql.metadata.Partition;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compliance.ComplianceConfigurationKeys;
import org.apache.gobblin.compliance.HivePartitionDataset;
import org.apache.gobblin.compliance.HiveProxyQueryExecutor;
import org.apache.gobblin.compliance.purger.HivePurgerQueryTemplate;
import org.apache.gobblin.compliance.utils.PartitionUtils;
import org.apache.gobblin.compliance.utils.ProxyUtils;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* A Restorable {@link HivePartitionDataset}. It restores a given {@link HivePartitionDataset} with a
* {@link org.apache.gobblin.compliance.HivePartitionVersion} based on {@link RestorePolicy}
*
* @author adsharma
*/
@Slf4j
public class RestorableHivePartitionDataset extends HivePartitionDataset implements RestorableDataset {
private HivePartitionDataset datasetToRestore;
private HivePartitionRestorePolicy restorePolicy;
private State state;
private Optional<String> datasetOwner = Optional.absent();
private Optional<String> datasetToRestoreOwner = Optional.absent();
private Optional<String> trashOwner = Optional.absent();
private FileSystem datasetOwnerFs;
private String timeStamp;
public RestorableHivePartitionDataset(Partition dataset, State state) {
super(dataset);
init(state);
}
public RestorableHivePartitionDataset(HivePartitionDataset hivePartitionDataset, State state) {
super(hivePartitionDataset);
init(state);
}
private void init(State state) {
this.state = new State(state);
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.RESTORE_POLICY_CLASS),
"Missing required property " + ComplianceConfigurationKeys.RESTORE_POLICY_CLASS);
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.TRASH_OWNER),
"Missing required property " + ComplianceConfigurationKeys.TRASH_OWNER);
String restorePolicyClass = this.state.getProp(ComplianceConfigurationKeys.RESTORE_POLICY_CLASS);
this.datasetOwner = getOwner();
this.trashOwner = Optional.fromNullable(this.state.getProp(ComplianceConfigurationKeys.TRASH_OWNER));
setTimeStamp();
this.restorePolicy =
GobblinConstructorUtils.invokeConstructor(HivePartitionRestorePolicy.class, restorePolicyClass, this.state);
try {
this.datasetToRestore = (HivePartitionDataset) this.restorePolicy.getDatasetToRestore(this);
log.info("Found dataset to restore with " + this.datasetToRestore.datasetURN());
} catch (IOException e) {
Throwables.propagate(e);
}
this.datasetToRestoreOwner = this.datasetToRestore.getOwner();
}
public void restore()
throws IOException {
State state = new State(this.state);
this.datasetOwnerFs = ProxyUtils.getOwnerFs(state, this.datasetOwner);
try (HiveProxyQueryExecutor queryExecutor = ProxyUtils
.getQueryExecutor(state, this.datasetOwner, this.datasetToRestoreOwner, this.trashOwner)) {
if (this.state.getPropAsBoolean(ComplianceConfigurationKeys.COMPLIANCE_JOB_SIMULATE,
ComplianceConfigurationKeys.DEFAULT_COMPLIANCE_JOB_SIMULATE)) {
log.info("Simulating restore of " + datasetURN() + " with " + this.datasetToRestore.datasetURN());
return;
}
Path trashPartitionLocation = getTrashPartitionLocation();
executeTrashTableQueries(queryExecutor);
this.datasetOwnerFs.mkdirs(trashPartitionLocation.getParent());
this.datasetOwnerFs.rename(getLocation(), trashPartitionLocation);
FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.NONE);
HadoopUtils
.setPermissions(trashPartitionLocation.getParent(), this.datasetOwner, this.trashOwner, this.datasetOwnerFs,
permission);
log.info(
"Moved dataset " + datasetURN() + " from " + getLocation() + " to trash location " + trashPartitionLocation);
fsMove(this.datasetToRestore.getLocation(), getLocation());
HadoopUtils.setPermissions(getLocation().getParent(), this.datasetOwner, this.trashOwner, this.datasetOwnerFs,
permission);
log.info("Moved data from backup " + this.datasetToRestore.getLocation() + " to location " + getLocation());
executeDropPartitionQueries(queryExecutor);
}
}
private void executeTrashTableQueries(HiveProxyQueryExecutor queryExecutor)
throws IOException {
String trashTableName = getTrashTableName();
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.TRASH_DB),
"Missing required property " + ComplianceConfigurationKeys.TRASH_DB);
String trashDbName = this.state.getProp(ComplianceConfigurationKeys.TRASH_DB);
try {
queryExecutor.executeQuery(HivePurgerQueryTemplate.getUseDbQuery(trashDbName), this.trashOwner);
queryExecutor.executeQuery(HivePurgerQueryTemplate
.getCreateTableQuery(trashDbName + "." + trashTableName, getDbName(), getTableName(),
getTrashTableLocation()), this.trashOwner);
Optional<String> fileFormat = Optional.absent();
if (this.state.getPropAsBoolean(ComplianceConfigurationKeys.SPECIFY_PARTITION_FORMAT,
ComplianceConfigurationKeys.DEFAULT_SPECIFY_PARTITION_FORMAT)) {
fileFormat = getFileFormat();
}
queryExecutor.executeQuery(HivePurgerQueryTemplate
.getAddPartitionQuery(trashTableName, PartitionUtils.getPartitionSpecString(getSpec()), fileFormat,
Optional.fromNullable(getTrashPartitionLocation().toString())), this.trashOwner);
} catch (SQLException e) {
throw new IOException(e);
}
}
private void executeDropPartitionQueries(HiveProxyQueryExecutor queryExecutor)
throws IOException {
String dbName = this.datasetToRestore.getDbName();
String tableName = this.datasetToRestore.getTableName();
String partitionSpec = PartitionUtils.getPartitionSpecString(this.datasetToRestore.getSpec());
try {
queryExecutor.executeQuery(HivePurgerQueryTemplate.getUseDbQuery(dbName), this.datasetToRestoreOwner);
queryExecutor.executeQuery(HivePurgerQueryTemplate.getDropPartitionQuery(tableName, partitionSpec),
this.datasetToRestoreOwner);
} catch (SQLException e) {
throw new IOException(e);
}
}
private String getTrashTableName() {
return getCompleteTableName() + ComplianceConfigurationKeys.TRASH + this.timeStamp;
}
private void setTimeStamp() {
this.timeStamp = Long.toString(System.currentTimeMillis());
}
private String getCompleteTableName() {
return StringUtils.join(Arrays.asList(getDbName(), getTableName()), ComplianceConfigurationKeys.DBNAME_SEPARATOR);
}
private String getTrashTableLocation() {
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.TRASH_DIR),
"Missing required property " + ComplianceConfigurationKeys.TRASH_DIR);
return this.state.getProp(ComplianceConfigurationKeys.TRASH_DIR) + getCompleteTableName();
}
private Path getTrashPartitionLocation() {
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.TRASH_DIR),
"Missing required property " + ComplianceConfigurationKeys.TRASH_DIR);
return new Path(StringUtils.join(Arrays.asList(this.state.getProp(ComplianceConfigurationKeys.TRASH_DIR),
Path.getPathWithoutSchemeAndAuthority(getLocation()).toString()), '/'));
}
private void fsMove(Path from, Path to)
throws IOException {
for (FileStatus fileStatus : this.datasetOwnerFs.listStatus(from)) {
if (fileStatus.isFile()) {
this.datasetOwnerFs.rename(fileStatus.getPath(), to);
}
}
}
}
| 3,202 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/restore/ComplianceRestoreJob.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.restore;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.thrift.TException;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import javax.annotation.Nullable;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compliance.ComplianceConfigurationKeys;
import org.apache.gobblin.compliance.ComplianceEvents;
import org.apache.gobblin.compliance.ComplianceJob;
import org.apache.gobblin.compliance.utils.ProxyUtils;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.DatasetsFinder;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* Job for restoring {@link org.apache.gobblin.compliance.HivePartitionDataset} from the backups
*
* @author adsharma
*/
@Slf4j
public class ComplianceRestoreJob extends ComplianceJob {
public ComplianceRestoreJob(Properties properties) {
super(properties);
initDatasetFinder(properties);
try {
ProxyUtils.cancelTokens(new State(properties));
} catch (InterruptedException | TException | IOException e) {
Throwables.propagate(e);
}
}
public void initDatasetFinder(Properties properties) {
Preconditions
.checkArgument(properties.containsKey(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_DATASET_FINDER_CLASS),
"Missing required propety " + ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_DATASET_FINDER_CLASS);
String finderClass = properties.getProperty(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_DATASET_FINDER_CLASS);
this.finder = GobblinConstructorUtils.invokeConstructor(DatasetsFinder.class, finderClass, new State(properties));
}
public void run()
throws IOException {
Preconditions.checkNotNull(this.finder, "Dataset finder class is not set");
List<Dataset> datasets = this.finder.findDatasets();
this.finishCleanSignal = Optional.of(new CountDownLatch(datasets.size()));
for (final Dataset dataset : datasets) {
ListenableFuture<Void> future = this.service.submit(new Callable<Void>() {
@Override
public Void call()
throws Exception {
if (dataset instanceof RestorableDataset) {
log.info("Trying to restore");
((RestorableDataset) dataset).restore();
} else {
log.warn(
"Not an instance of " + RestorableDataset.class + " Dataset won't be restored " + dataset.datasetURN());
}
return null;
}
});
Futures.addCallback(future, new FutureCallback<Void>() {
@Override
public void onSuccess(@Nullable Void result) {
ComplianceRestoreJob.this.finishCleanSignal.get().countDown();
log.info("Successfully restored: " + dataset.datasetURN());
}
@Override
public void onFailure(Throwable t) {
ComplianceRestoreJob.this.finishCleanSignal.get().countDown();
log.warn("Exception caught when restoring " + dataset.datasetURN() + ".", t);
ComplianceRestoreJob.this.throwables.add(t);
ComplianceRestoreJob.this.eventSubmitter.submit(ComplianceEvents.Restore.FAILED_EVENT_NAME, ImmutableMap
.of(ComplianceEvents.FAILURE_CONTEXT_METADATA_KEY, ExceptionUtils.getFullStackTrace(t),
ComplianceEvents.DATASET_URN_METADATA_KEY, dataset.datasetURN()));
}
});
}
}
@Override
public void close()
throws IOException {
try {
if (this.finishCleanSignal.isPresent()) {
this.finishCleanSignal.get().await();
}
if (!this.throwables.isEmpty()) {
for (Throwable t : this.throwables) {
log.error("Failed restore due to ", t);
}
throw new RuntimeException("Restore job failed for one or more datasets");
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IOException("Not all datasets finish restore job", e);
} finally {
ExecutorsUtils.shutdownExecutorService(this.service, Optional.of(log));
this.closer.close();
}
}
/**
* Generates restore metrics for the instrumentation of this class.
*/
protected void regenerateMetrics() {
// TODO
}
}
| 3,203 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/restore/LKGRestorePolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.restore;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.fs.FileSystem;
import com.google.common.base.Preconditions;
import org.apache.gobblin.compliance.ComplianceConfigurationKeys;
import org.apache.gobblin.compliance.HivePartitionDataset;
import org.apache.gobblin.compliance.HivePartitionVersion;
import org.apache.gobblin.compliance.HivePartitionVersionFinder;
import org.apache.gobblin.compliance.utils.ProxyUtils;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.WriterUtils;
/**
* Last Known Good (LKG) restore policy
*
* @author adsharma
*/
public class LKGRestorePolicy extends HivePartitionRestorePolicy {
public LKGRestorePolicy(State state) {
super(state);
}
/**
* @param dataset to restore
* @return most recent restorable dataset
*/
public HivePartitionDataset getDatasetToRestore(HivePartitionDataset dataset)
throws IOException {
List<String> patterns = new ArrayList<>();
patterns.add(getCompleteTableName(dataset) + ComplianceConfigurationKeys.BACKUP);
HivePartitionVersionFinder finder =
new HivePartitionVersionFinder(WriterUtils.getWriterFs(new State(this.state)), this.state, patterns);
List<HivePartitionVersion> versions = new ArrayList<>(finder.findDatasetVersions(dataset));
Preconditions.checkArgument(!versions.isEmpty(), "No versions to restore dataset " + dataset.datasetURN());
List<HivePartitionVersion> nonRestorableVersions = new ArrayList<>();
for (HivePartitionVersion version : versions) {
if (!isRestorable(dataset, version)) {
nonRestorableVersions.add(version);
}
}
versions.removeAll(nonRestorableVersions);
Preconditions.checkArgument(!versions.isEmpty(), "No versions to restore dataset " + dataset.datasetURN());
Collections.sort(versions);
// return the most recent restorable version
return new HivePartitionDataset(versions.get(0));
}
/**
* A version is called restorable if it can be used to restore dataset.
*
* If a version is pointing to same data location as of the dataset, then it can't be used for restoring
* If a version is pointing to an empty data location, then it can't be used for restoring
*/
private boolean isRestorable(HivePartitionDataset dataset, HivePartitionVersion version)
throws IOException {
if (version.getLocation().toString().equalsIgnoreCase(dataset.getLocation().toString())) {
return false;
}
FileSystem fs = ProxyUtils.getOwnerFs(new State(this.state), version.getOwner());
if (!HadoopUtils.hasContent(fs, version.getLocation())) {
return false;
}
return true;
}
private String getCompleteTableName(HivePartitionDataset dataset) {
return dataset.getDbName() + ComplianceConfigurationKeys.DBNAME_SEPARATOR + dataset.getTableName();
}
}
| 3,204 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/restore/RestorableHivePartitionDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.restore;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.fs.FileSystem;
import com.google.common.base.Preconditions;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compliance.ComplianceConfigurationKeys;
import org.apache.gobblin.compliance.HivePartitionDataset;
import org.apache.gobblin.compliance.HivePartitionFinder;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.WriterUtils;
/**
* A finder class for finding Restorable {@link HivePartitionDataset}
*
* @author adsharma
*/
@Slf4j
public class RestorableHivePartitionDatasetFinder extends HivePartitionFinder {
protected FileSystem fs;
public RestorableHivePartitionDatasetFinder(State state)
throws IOException {
this(WriterUtils.getWriterFs(new State(state)), state);
}
public RestorableHivePartitionDatasetFinder(FileSystem fs, State state)
throws IOException {
super(state);
this.fs = fs;
}
/**
* Will return a Singleton list of HivePartitionDataset to be restored.
*/
public List<HivePartitionDataset> findDatasets()
throws IOException {
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.RESTORE_DATASET),
"Missing required property " + ComplianceConfigurationKeys.RESTORE_DATASET);
HivePartitionDataset hivePartitionDataset =
HivePartitionFinder.findDataset(this.state.getProp(ComplianceConfigurationKeys.RESTORE_DATASET), this.state);
Preconditions.checkNotNull(hivePartitionDataset, "No dataset to restore");
return Collections.singletonList(hivePartitionDataset);
}
}
| 3,205 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/restore/RestorableDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.restore;
import java.io.IOException;
import org.apache.gobblin.dataset.Dataset;
/**
* @author adsharma
*/
public interface RestorableDataset extends Dataset {
void restore()
throws IOException;
}
| 3,206 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/validation/ValidatableDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.validation;
import java.io.IOException;
import org.apache.gobblin.dataset.Dataset;
/**
* @author adsharma
*/
public interface ValidatableDataset extends Dataset {
void validate()
throws IOException;
}
| 3,207 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/validation/ComplianceValidationJob.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.validation;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.thrift.TException;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import javax.annotation.Nullable;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compliance.ComplianceEvents;
import org.apache.gobblin.compliance.ComplianceJob;
import org.apache.gobblin.compliance.utils.ProxyUtils;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.DatasetsFinder;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import static org.apache.gobblin.compliance.ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_DATASET_FINDER_CLASS;
/**
* A compliance job for validating if datasets are compliant with policies.
*/
@Slf4j
public class ComplianceValidationJob extends ComplianceJob {
public ComplianceValidationJob(Properties properties) {
super(properties);
initDatasetFinder(properties);
try {
ProxyUtils.cancelTokens(new State(properties));
} catch (IOException | TException | InterruptedException e) {
Throwables.propagate(e);
}
}
public void initDatasetFinder(Properties properties) {
Preconditions.checkArgument(properties.containsKey(GOBBLIN_COMPLIANCE_DATASET_FINDER_CLASS),
"Missing required propety " + GOBBLIN_COMPLIANCE_DATASET_FINDER_CLASS);
String finderClass = properties.getProperty(GOBBLIN_COMPLIANCE_DATASET_FINDER_CLASS);
this.finder = GobblinConstructorUtils.invokeConstructor(DatasetsFinder.class, finderClass, new State(properties));
}
public void run()
throws IOException {
Preconditions.checkNotNull(this.finder, "Dataset finder class is not set");
List<Dataset> datasets = this.finder.findDatasets();
this.finishCleanSignal = Optional.of(new CountDownLatch(datasets.size()));
for (final Dataset dataset : datasets) {
ListenableFuture<Void> future = this.service.submit(new Callable<Void>() {
@Override
public Void call()
throws Exception {
if (dataset instanceof ValidatableDataset) {
((ValidatableDataset) dataset).validate();
} else {
log.warn(
"Not an instance of " + ValidatableDataset.class + " Dataset won't be validated " + dataset.datasetURN());
}
return null;
}
});
Futures.addCallback(future, new FutureCallback<Void>() {
@Override
public void onSuccess(@Nullable Void result) {
ComplianceValidationJob.this.finishCleanSignal.get().countDown();
log.info("Successfully validated: " + dataset.datasetURN());
}
@Override
public void onFailure(Throwable t) {
ComplianceValidationJob.this.finishCleanSignal.get().countDown();
log.warn("Exception caught when validating " + dataset.datasetURN() + ".", t);
ComplianceValidationJob.this.throwables.add(t);
ComplianceValidationJob.this.eventSubmitter.submit(ComplianceEvents.Validation.FAILED_EVENT_NAME, ImmutableMap
.of(ComplianceEvents.FAILURE_CONTEXT_METADATA_KEY, ExceptionUtils.getFullStackTrace(t),
ComplianceEvents.DATASET_URN_METADATA_KEY, dataset.datasetURN()));
}
});
}
}
@Override
public void close()
throws IOException {
try {
if (this.finishCleanSignal.isPresent()) {
this.finishCleanSignal.get().await();
}
if (!this.throwables.isEmpty()) {
for (Throwable t : this.throwables) {
log.error("Failed validation due to ", t);
}
throw new RuntimeException("Validation job failed for one or more datasets");
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IOException("Not all datasets finish validation job", e);
} finally {
ExecutorsUtils.shutdownExecutorService(this.service, Optional.of(log));
this.closer.close();
}
}
/**
* Generates validation metrics for the instrumentation of this class.
*/
protected void regenerateMetrics() {
// TODO
}
}
| 3,208 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/KafkaCommonUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableMap;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
public class KafkaCommonUtilTest {
@Test
public void testGetKafkaBrokerToSimpleNameMap() {
String brokerUri = "kafka.some-identifier.kafka.coloc-123.com:12345";
String simpleName = "some-identifier";
State state = new State();
Assert.assertEquals(KafkaCommonUtil.getKafkaBrokerToSimpleNameMap(state).size(),0);
state.setProp(ConfigurationKeys.KAFKA_BROKERS_TO_SIMPLE_NAME_MAP_KEY, String.format("%s->%s", brokerUri, simpleName));
Assert.assertEquals(KafkaCommonUtil.getKafkaBrokerToSimpleNameMap(state),
ImmutableMap.of(brokerUri, simpleName));
state.setProp(ConfigurationKeys.KAFKA_BROKERS_TO_SIMPLE_NAME_MAP_KEY,
String.format("foobar.com:12345->foobar,%s->%s", brokerUri, simpleName));
Assert.assertEquals(KafkaCommonUtil.getKafkaBrokerToSimpleNameMap(state),
ImmutableMap.of(brokerUri, simpleName, "foobar.com:12345", "foobar"));
}
}
| 3,209 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics/reporter/MockKeyValuePusher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.Queue;
import org.apache.commons.lang3.tuple.Pair;
import com.google.common.collect.Queues;
/**
* Mock instance of {@link org.apache.gobblin.metrics.reporter.KeyValuePusher} used to test
* {@link KeyValueMetricObjectReporter}
* {@link KeyValueEventObjectReporter}
*/
public class MockKeyValuePusher<K, V> implements KeyValuePusher<K, V> {
Queue<Pair<K, V>> messages = Queues.newLinkedBlockingQueue();
@Override
public void pushKeyValueMessages(List<Pair<K, V>> messages) {
this.messages.clear();
this.messages.addAll(messages);
}
@Override
public void pushMessages(List<V> messages) {
}
@Override
public void close()
throws IOException {
}
public Iterator<Pair<K, V>> messageIterator() {
return this.messages.iterator();
}
}
| 3,210 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics/reporter/MockKafkaKeyValuePusher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import com.google.common.collect.Queues;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.gobblin.metrics.kafka.Pusher;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.Queue;
/**
* Mock instance of {@link org.apache.gobblin.metrics.kafka.Pusher} used to test {@link org.apache.gobblin.metrics.kafka.KafkaAvroEventKeyValueReporter}.
*/
public class MockKafkaKeyValuePusher<K, V> implements Pusher<Pair<K, V>> {
Queue<Pair<K, V>> messages = Queues.newLinkedBlockingQueue();
@Override
public void pushMessages(List<Pair<K, V>> messages) {
this.messages.clear();
this.messages.addAll(messages);
}
@Override
public void close() throws IOException {
}
public Iterator<Pair<K, V>> messageIterator() {
return this.messages.iterator();
}
}
| 3,211 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics/reporter/MockKafkaPusher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.Queue;
import com.google.common.collect.Queues;
import org.apache.gobblin.metrics.kafka.Pusher;
/**
* Mock instance of {@link org.apache.gobblin.metrics.kafka.Pusher} used for testing.
*/
public class MockKafkaPusher implements Pusher<byte[]> {
Queue<byte[]> messages = Queues.newLinkedBlockingQueue();
public MockKafkaPusher() {
}
@Override
public void pushMessages(List<byte[]> messages) {
this.messages.addAll(messages);
}
@Override
public void close()
throws IOException {
}
public Iterator<byte[]> messageIterator() {
return this.messages.iterator();
}
}
| 3,212 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics/reporter/KafkaReporterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Meter;
import com.google.common.collect.Lists;
import org.apache.gobblin.metrics.Measurements;
import org.apache.gobblin.metrics.Metric;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.MetricReport;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.kafka.KafkaReporter;
import org.apache.gobblin.metrics.kafka.Pusher;
import org.apache.gobblin.metrics.reporter.util.MetricReportUtils;
@Test(groups = { "gobblin.metrics" })
public class KafkaReporterTest {
public KafkaReporterTest() throws IOException, InterruptedException {}
/**
* Get builder for KafkaReporter (override if testing an extension of KafkaReporter)
* @return KafkaReporter builder
*/
public KafkaReporter.Builder<? extends KafkaReporter.Builder> getBuilder(Pusher pusher) {
return KafkaReporter.BuilderFactory.newBuilder().withKafkaPusher(pusher);
}
public KafkaReporter.Builder<? extends KafkaReporter.Builder> getBuilderFromContext(Pusher pusher) {
return KafkaReporter.BuilderFactory.newBuilder().withKafkaPusher(pusher);
}
@Test
public void testKafkaReporter() throws IOException {
MetricContext metricContext =
MetricContext.builder(this.getClass().getCanonicalName() + ".testKafkaReporter").build();
Counter counter = metricContext.counter("com.linkedin.example.counter");
Meter meter = metricContext.meter("com.linkedin.example.meter");
Histogram histogram = metricContext.histogram("com.linkedin.example.histogram");
MockKafkaPusher pusher = new MockKafkaPusher();
KafkaReporter kafkaReporter = getBuilder(pusher).build("localhost:0000", "topic", new Properties());
counter.inc();
meter.mark(2);
histogram.update(1);
histogram.update(1);
histogram.update(2);
kafkaReporter.report(metricContext);
try {
Thread.sleep(1000);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
Map<String, Double> expected = new HashMap<>();
expected.put("com.linkedin.example.counter." + Measurements.COUNT, 1.0);
expected.put("com.linkedin.example.meter." + Measurements.COUNT, 2.0);
expected.put("com.linkedin.example.histogram." + Measurements.COUNT, 3.0);
MetricReport nextReport = nextReport(pusher.messageIterator());
expectMetricsWithValues(nextReport, expected);
kafkaReporter.report(metricContext);
try {
Thread.sleep(1000);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
Set<String> expectedSet = new HashSet<>();
expectedSet.add("com.linkedin.example.counter." + Measurements.COUNT);
expectedSet.add("com.linkedin.example.meter." + Measurements.COUNT);
expectedSet.add("com.linkedin.example.meter." + Measurements.MEAN_RATE);
expectedSet.add("com.linkedin.example.meter." + Measurements.RATE_1MIN);
expectedSet.add("com.linkedin.example.meter." + Measurements.RATE_5MIN);
expectedSet.add("com.linkedin.example.meter." + Measurements.RATE_15MIN);
expectedSet.add("com.linkedin.example.histogram." + Measurements.MEAN);
expectedSet.add("com.linkedin.example.histogram." + Measurements.MIN);
expectedSet.add("com.linkedin.example.histogram." + Measurements.MAX);
expectedSet.add("com.linkedin.example.histogram." + Measurements.MEDIAN);
expectedSet.add("com.linkedin.example.histogram." + Measurements.PERCENTILE_75TH);
expectedSet.add("com.linkedin.example.histogram." + Measurements.PERCENTILE_95TH);
expectedSet.add("com.linkedin.example.histogram." + Measurements.PERCENTILE_99TH);
expectedSet.add("com.linkedin.example.histogram." + Measurements.PERCENTILE_999TH);
expectedSet.add("com.linkedin.example.histogram." + Measurements.COUNT);
nextReport = nextReport(pusher.messageIterator());
expectMetrics(nextReport, expectedSet, true);
kafkaReporter.close();
}
@Test
public void kafkaReporterTagsTest() throws IOException {
MetricContext metricContext =
MetricContext.builder(this.getClass().getCanonicalName() + ".kafkaReporterTagsTest").build();
Counter counter = metricContext.counter("com.linkedin.example.counter");
Tag<?> tag1 = new Tag<>("tag1", "value1");
Tag<?> tag2 = new Tag<>("tag2", 2);
MockKafkaPusher pusher = new MockKafkaPusher();
KafkaReporter kafkaReporter =
getBuilder(pusher).withTags(Lists.newArrayList(tag1, tag2)).build("localhost:0000", "topic", new Properties());
counter.inc();
kafkaReporter.report(metricContext);
try {
Thread.sleep(1000);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
MetricReport metricReport = nextReport(pusher.messageIterator());
Assert.assertEquals(4, metricReport.getTags().size());
Assert.assertTrue(metricReport.getTags().containsKey(tag1.getKey()));
Assert.assertEquals(metricReport.getTags().get(tag1.getKey()), tag1.getValue().toString());
Assert.assertTrue(metricReport.getTags().containsKey(tag2.getKey()));
Assert.assertEquals(metricReport.getTags().get(tag2.getKey()), tag2.getValue().toString());
}
@Test
public void kafkaReporterContextTest() throws IOException {
Tag<?> tag1 = new Tag<>("tag1", "value1");
MetricContext context = MetricContext.builder("context").addTag(tag1).build();
Counter counter = context.counter("com.linkedin.example.counter");
MockKafkaPusher pusher = new MockKafkaPusher();
KafkaReporter kafkaReporter = getBuilderFromContext(pusher).build("localhost:0000", "topic", new Properties());
counter.inc();
kafkaReporter.report(context);
try {
Thread.sleep(1000);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
MetricReport metricReport = nextReport(pusher.messageIterator());
Assert.assertEquals(3, metricReport.getTags().size());
Assert.assertTrue(metricReport.getTags().containsKey(tag1.getKey()));
Assert.assertEquals(metricReport.getTags().get(tag1.getKey()), tag1.getValue().toString());
}
/**
* Expect a list of metrics with specific values.
* Fail if not all metrics are received, or some metric has the wrong value.
* @param report MetricReport.
* @param expected map of expected metric names and their values
* @throws IOException
*/
private void expectMetricsWithValues(MetricReport report, Map<String, Double> expected) throws IOException {
List<Metric> metricIterator = report.getMetrics();
for (Metric metric : metricIterator) {
if (expected.containsKey(metric.getName())) {
Assert.assertEquals(expected.get(metric.getName()), metric.getValue());
expected.remove(metric.getName());
}
}
Assert.assertTrue(expected.isEmpty());
}
/**
* Expect a set of metric names. Will fail if not all of these metrics are received.
* @param report MetricReport
* @param expected set of expected metric names
* @param strict if set to true, will fail if receiving any metric that is not expected
* @throws IOException
*/
private void expectMetrics(MetricReport report, Set<String> expected, boolean strict) throws IOException {
List<Metric> metricIterator = report.getMetrics();
for (Metric metric : metricIterator) {
//System.out.println(String.format("expectedSet.add(\"%s\")", metric.name));
if (expected.contains(metric.getName())) {
expected.remove(metric.getName());
} else if (strict && !metric.getName().contains(MetricContext.GOBBLIN_METRICS_NOTIFICATIONS_TIMER_NAME)) {
Assert.assertTrue(false, "Metric present in report not expected: " + metric.toString());
}
}
Assert.assertTrue(expected.isEmpty());
}
/**
* Extract the next metric from the Kafka iterator
* Assumes existence of the metric has already been checked.
* @param it Kafka ConsumerIterator
* @return next metric in the stream
* @throws IOException
*/
protected MetricReport nextReport(Iterator<byte[]> it) throws IOException {
Assert.assertTrue(it.hasNext());
return MetricReportUtils.deserializeReportFromJson(new MetricReport(), it.next());
}
}
| 3,213 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics/reporter/KafkaAvroEventKeyValueReporterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.gobblin.metrics.GobblinTrackingEvent;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.kafka.KafkaAvroEventKeyValueReporter;
import org.apache.gobblin.metrics.kafka.KafkaAvroSchemaRegistry;
import org.apache.gobblin.metrics.kafka.KafkaEventReporter;
import org.apache.gobblin.metrics.kafka.Pusher;
import org.apache.gobblin.metrics.reporter.util.EventUtils;
public class KafkaAvroEventKeyValueReporterTest extends KafkaAvroEventReporterTest {
private static final int SCHEMA_ID_LENGTH_BYTES = 20;
private String schemaId;
@BeforeClass
public void setUp() throws IOException {
Schema schema =
new Schema.Parser().parse(getClass().getClassLoader().getResourceAsStream("GobblinTrackingEvent.avsc"));
this.schemaId = DigestUtils.sha1Hex(schema.toString().getBytes());
}
@Override
public KafkaEventReporter.Builder<? extends KafkaEventReporter.Builder> getBuilder(MetricContext context,
Pusher pusher) {
KafkaAvroEventKeyValueReporter.Builder<?> builder = KafkaAvroEventKeyValueReporter.Factory.forContext(context);
return builder.withKafkaPusher(pusher).withKeys(Lists.newArrayList("k1", "k2", "k3"));
}
private Pair<String, GobblinTrackingEvent> nextKVEvent(Iterator<Pair<String, byte[]>> it, boolean isSchemaIdEnabled) throws IOException {
Assert.assertTrue(it.hasNext());
Pair<String, byte[]> event = it.next();
return isSchemaIdEnabled ? Pair.of(event.getKey(), EventUtils
.deserializeEventFromAvroSerialization(new GobblinTrackingEvent(), event.getValue(), schemaId)) : Pair.of(event.getKey(),
EventUtils.deserializeEventFromAvroSerialization(new GobblinTrackingEvent(), event.getValue()));
}
private GobblinTrackingEvent getEvent(boolean isMessageKeyed) {
String namespace = "gobblin.metrics.test";
String eventName = "testEvent";
GobblinTrackingEvent event = new GobblinTrackingEvent();
event.setName(eventName);
event.setNamespace(namespace);
Map<String, String> metadata = Maps.newHashMap();
metadata.put("m1", "v1");
metadata.put("m2", null);
if (isMessageKeyed) {
metadata.put("k1", "v1");
metadata.put("k2", "v2");
metadata.put("k3", "v3");
}
event.setMetadata(metadata);
return event;
}
@Test
public void testKafkaEventReporter() throws IOException {
MetricContext context = MetricContext.builder("context").build();
MockKafkaKeyValuePusher pusher = new MockKafkaKeyValuePusher();
KafkaEventReporter kafkaReporter = getBuilder(context, pusher).build("localhost:0000", "topic");
context.submitEvent(getEvent(false));
kafkaReporter.report();
Pair<String, GobblinTrackingEvent> retrievedEvent = nextKVEvent(pusher.messageIterator(), false);
Assert.assertNull(retrievedEvent.getKey());
context.submitEvent(getEvent(true));
kafkaReporter.report();
retrievedEvent = nextKVEvent(pusher.messageIterator(), false);
Assert.assertEquals(retrievedEvent.getKey(), "v1v2v3");
}
@Test
public void testKafkaEventReporterWithSchemaRegistry() throws IOException {
MetricContext context = MetricContext.builder("context").build();
MockKafkaKeyValuePusher pusher = new MockKafkaKeyValuePusher();
Schema schema =
new Schema.Parser().parse(getClass().getClassLoader().getResourceAsStream("GobblinTrackingEvent.avsc"));
String schemaId = DigestUtils.sha1Hex(schema.toString().getBytes());
KafkaAvroEventKeyValueReporter.Builder<?> builder = KafkaAvroEventKeyValueReporter.Factory.forContext(context);
KafkaAvroEventKeyValueReporter kafkaReporter =
builder.withKafkaPusher(pusher).withKeys(Lists.newArrayList("k1", "k2", "k3"))
.withSchemaRegistry(Mockito.mock(KafkaAvroSchemaRegistry.class)).withSchemaId(schemaId)
.build("localhost:0000", "topic");
context.submitEvent(getEvent(true));
kafkaReporter.report();
Pair<String, GobblinTrackingEvent> retrievedEvent = nextKVEvent(pusher.messageIterator(), true);
Assert.assertEquals(retrievedEvent.getKey(), "v1v2v3");
}
@Test (enabled=false)
public void testTagInjection() throws IOException {
// This test is not applicable for testing KafkaAvroEventKeyValueReporter
}
}
| 3,214 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics/reporter/KafkaAvroEventReporterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import java.io.IOException;
import java.util.Iterator;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.metrics.GobblinTrackingEvent;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.kafka.KafkaAvroEventReporter;
import org.apache.gobblin.metrics.kafka.KafkaEventReporter;
import org.apache.gobblin.metrics.kafka.Pusher;
import org.apache.gobblin.metrics.reporter.util.EventUtils;
@Test(groups = {"gobblin.metrics"})
public class KafkaAvroEventReporterTest extends KafkaEventReporterTest {
@Override
public KafkaEventReporter.Builder<? extends KafkaEventReporter.Builder> getBuilder(MetricContext context,
Pusher pusher) {
return KafkaAvroEventReporter.forContext(context).withKafkaPusher(pusher);
}
@Override
@SuppressWarnings("unchecked")
protected GobblinTrackingEvent nextEvent(Iterator<byte[]> it)
throws IOException {
Assert.assertTrue(it.hasNext());
return EventUtils.deserializeEventFromAvroSerialization(new GobblinTrackingEvent(), it.next());
}
}
| 3,215 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics/reporter/KeyValueMetricObjectReporterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import java.io.IOException;
import java.util.Iterator;
import java.util.Properties;
import org.apache.avro.generic.GenericRecord;
import org.apache.commons.lang3.tuple.Pair;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistryConfigurationKeys;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.reporter.util.KafkaReporterUtils;
import org.apache.gobblin.util.ConfigUtils;
public class KeyValueMetricObjectReporterTest extends KeyValueMetricObjectReporter {
private static final String TOPIC = KeyValueMetricObjectReporterTest.class.getSimpleName();
public KeyValueMetricObjectReporterTest(Builder builder, Config config) {
super(builder, config);
}
public MockKeyValuePusher getPusher() {
return (MockKeyValuePusher) pusher;
}
public static class Builder extends KeyValueMetricObjectReporter.Builder {
public KeyValueMetricObjectReporterTest build(String brokers, String topic, Config config)
throws IOException {
this.brokers = brokers;
this.topic = topic;
return new KeyValueMetricObjectReporterTest(this, config);
}
}
/**
* Get builder for KeyValueMetricObjectReporter
* @return KeyValueMetricObjectReporter builder
*/
public static KeyValueMetricObjectReporterTest.Builder getBuilder(Properties props) {
KeyValueMetricObjectReporterTest.Builder builder = new KeyValueMetricObjectReporterTest.Builder();
builder.namespaceOverride(KafkaReporterUtils.extractOverrideNamespace(props));
return builder;
}
@Test
public static void testKafkaKeyValueMetricObjectReporter()
throws IOException {
MetricContext metricContext = MetricContext.builder("context").build();
String namespace = "org.apache.gobblin.metrics:gobblin.metrics.test";
String name = TOPIC;
Properties properties = new Properties();
properties.put(KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_OVERRIDE_NAMESPACE, namespace);
properties.put("pusherClass", "org.apache.gobblin.metrics.reporter.MockKeyValuePusher");
KeyValueMetricObjectReporterTest reporter =
getBuilder(properties).build("localhost:0000", TOPIC, ConfigUtils.propertiesToConfig(properties));
reporter.report(metricContext);
try {
Thread.sleep(1000);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
MockKeyValuePusher pusher = reporter.getPusher();
Pair<String, GenericRecord> retrievedEvent = nextKVReport(pusher.messageIterator());
Assert.assertEquals(retrievedEvent.getValue().getSchema().getNamespace(), "gobblin.metrics.test");
Assert.assertEquals(retrievedEvent.getValue().getSchema().getName(), name);
int partition = Integer.parseInt(retrievedEvent.getKey());
Assert.assertTrue((0 <= partition && partition <= 99));
Assert.assertTrue(retrievedEvent.getValue().getSchema() == reporter.schema);
reporter.close();
}
/**
* Extract the next metric from the Kafka iterator
* Assumes existence of the metric has already been checked.
* @param it Kafka ConsumerIterator
* @return next metric in the stream
* @throws IOException
*/
protected static Pair<String, GenericRecord> nextKVReport(Iterator<Pair<String, GenericRecord>> it) {
Assert.assertTrue(it.hasNext());
return it.next();
}
}
| 3,216 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics/reporter/KafkaEventReporterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.metrics.GobblinTrackingEvent;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.kafka.KafkaEventReporter;
import org.apache.gobblin.metrics.kafka.Pusher;
import org.apache.gobblin.metrics.reporter.util.EventUtils;
@Test(groups = {"gobblin.metrics"})
public class KafkaEventReporterTest {
/**
* Get builder for KafkaReporter (override if testing an extension of KafkaReporter)
* @param context metricregistry
* @return KafkaReporter builder
*/
public KafkaEventReporter.Builder<? extends KafkaEventReporter.Builder> getBuilder(MetricContext context,
Pusher pusher) {
return KafkaEventReporter.Factory.forContext(context).withKafkaPusher(pusher);
}
@Test
public void testKafkaEventReporter() throws IOException {
MetricContext context = MetricContext.builder("context").build();
MockKafkaPusher pusher = new MockKafkaPusher();
KafkaEventReporter kafkaReporter = getBuilder(context, pusher).build("localhost:0000", "topic");
String namespace = "gobblin.metrics.test";
String eventName = "testEvent";
GobblinTrackingEvent event = new GobblinTrackingEvent();
event.setName(eventName);
event.setNamespace(namespace);
Map<String, String> metadata = Maps.newHashMap();
metadata.put("m1", "v1");
metadata.put("m2", null);
event.setMetadata(metadata);
context.submitEvent(event);
try {
Thread.sleep(100);
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
kafkaReporter.report();
try {
Thread.sleep(100);
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
GobblinTrackingEvent retrievedEvent = nextEvent(pusher.messageIterator());
Assert.assertEquals(retrievedEvent.getNamespace(), namespace);
Assert.assertEquals(retrievedEvent.getName(), eventName);
Assert.assertEquals(retrievedEvent.getMetadata().size(), 4);
}
@Test
public void testTagInjection() throws IOException {
String tag1 = "tag1";
String value1 = "value1";
String metadataValue1 = "metadata1";
String tag2 = "tag2";
String value2 = "value2";
MetricContext context = MetricContext.builder("context").addTag(new Tag<String>(tag1, value1)).
addTag(new Tag<String>(tag2, value2)).build();
MockKafkaPusher pusher = new MockKafkaPusher();
KafkaEventReporter kafkaReporter = getBuilder(context, pusher).build("localhost:0000", "topic");
String namespace = "gobblin.metrics.test";
String eventName = "testEvent";
GobblinTrackingEvent event = new GobblinTrackingEvent();
event.setName(eventName);
event.setNamespace(namespace);
Map<String, String> metadata = Maps.newHashMap();
metadata.put(tag1, metadataValue1);
event.setMetadata(metadata);
context.submitEvent(event);
try {
Thread.sleep(100);
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
kafkaReporter.report();
try {
Thread.sleep(100);
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
GobblinTrackingEvent retrievedEvent = nextEvent(pusher.messageIterator());
Assert.assertEquals(retrievedEvent.getNamespace(), namespace);
Assert.assertEquals(retrievedEvent.getName(), eventName);
Assert.assertEquals(retrievedEvent.getMetadata().size(), 4);
Assert.assertEquals(retrievedEvent.getMetadata().get(tag1), metadataValue1);
Assert.assertEquals(retrievedEvent.getMetadata().get(tag2), value2);
}
@Test
public void testEventReporterConfigs() throws IOException {
MetricContext context = MetricContext.builder("context").build();
MockKafkaPusher pusher = new MockKafkaPusher();
KafkaEventReporter kafkaReporter = getBuilder(context, pusher).build("localhost:0000", "topic");
Assert.assertEquals(kafkaReporter.getQueueCapacity(), EventReporter.DEFAULT_QUEUE_CAPACITY);
Assert.assertEquals(kafkaReporter.getQueueOfferTimeoutSecs(), EventReporter.DEFAULT_QUEUE_OFFER_TIMEOUT_SECS);
Config config = ConfigFactory.parseMap(
ImmutableMap.<String, Object>builder()
.put(EventReporter.QUEUE_CAPACITY_KEY, 200)
.put(EventReporter.QUEUE_OFFER_TIMOUT_SECS_KEY, 5)
.build());
kafkaReporter = getBuilder(context, pusher).withConfig(config).build("localhost:0000", "topic");
Assert.assertEquals(kafkaReporter.getQueueCapacity(), 200);
Assert.assertEquals(kafkaReporter.getQueueOfferTimeoutSecs(), 5);
}
/**
* Extract the next metric from the Kafka iterator
* Assumes existence of the metric has already been checked.
* @param it Kafka ConsumerIterator
* @return next metric in the stream
* @throws IOException
*/
protected GobblinTrackingEvent nextEvent(Iterator<byte[]> it) throws IOException {
Assert.assertTrue(it.hasNext());
return EventUtils.deserializeReportFromJson(new GobblinTrackingEvent(), it.next());
}
}
| 3,217 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics/reporter/KafkaAvroReporterWithSchemaRegistryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import java.io.IOException;
import java.util.Iterator;
import org.apache.avro.Schema;
import org.apache.commons.codec.digest.DigestUtils;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.metrics.MetricReport;
import org.apache.gobblin.metrics.kafka.KafkaAvroReporter;
import org.apache.gobblin.metrics.kafka.KafkaAvroSchemaRegistry;
import org.apache.gobblin.metrics.kafka.KafkaReporter;
import org.apache.gobblin.metrics.kafka.Pusher;
import org.apache.gobblin.metrics.reporter.util.MetricReportUtils;
/**
* Test for KafkaAvroReporter that is configured with a {@link KafkaAvroSchemaRegistry}.
* Extends KafkaAvroReporterTest and just redefines the builder and the metrics deserializer
*
* @author Sudarshan Vasudevan
*/
@Test(groups = {"gobblin.metrics"})
public class KafkaAvroReporterWithSchemaRegistryTest extends KafkaAvroReporterTest {
private static final int SCHEMA_ID_LENGTH_BYTES = 20;
private String schemaId;
private KafkaAvroSchemaRegistry schemaRegistry;
public KafkaAvroReporterWithSchemaRegistryTest(String topic)
throws IOException, InterruptedException {
super();
this.schemaId = getSchemaId();
this.schemaRegistry = getMockSchemaRegistry();
}
public KafkaAvroReporterWithSchemaRegistryTest()
throws IOException, InterruptedException {
this("KafkaAvroReporterTestWithSchemaRegistry");
this.schemaId = getSchemaId();
}
private KafkaAvroSchemaRegistry getMockSchemaRegistry() throws IOException {
KafkaAvroSchemaRegistry registry = Mockito.mock(KafkaAvroSchemaRegistry.class);
Mockito.when(registry.getSchemaIdLengthByte()).thenAnswer(new Answer<Integer>() {
@Override
public Integer answer(InvocationOnMock invocation) {
return KafkaAvroReporterWithSchemaRegistryTest.SCHEMA_ID_LENGTH_BYTES;
}
});
Mockito.when(registry.getSchemaByKey(Mockito.anyString())).thenAnswer(new Answer<Schema>() {
@Override
public Schema answer(InvocationOnMock invocation) {
return MetricReport.SCHEMA$;
}
});
return registry;
}
private String getSchemaId() throws IOException {
Schema schema =
new Schema.Parser().parse(getClass().getClassLoader().getResourceAsStream("MetricReport.avsc"));
return DigestUtils.sha1Hex(schema.toString().getBytes());
}
@Override
public KafkaReporter.Builder<? extends KafkaReporter.Builder> getBuilder(Pusher pusher) {
return KafkaAvroReporter.BuilderFactory.newBuilder().withKafkaPusher(pusher).withSchemaRegistry(this.schemaRegistry).withSchemaId(schemaId);
}
@Override
public KafkaReporter.Builder<? extends KafkaReporter.Builder> getBuilderFromContext(Pusher pusher) {
return KafkaAvroReporter.BuilderFactory.newBuilder().withKafkaPusher(pusher).withSchemaRegistry(this.schemaRegistry).withSchemaId(schemaId);
}
@Override
@SuppressWarnings("unchecked")
protected MetricReport nextReport(Iterator<byte[]> it)
throws IOException {
Assert.assertTrue(it.hasNext());
return MetricReportUtils.deserializeReportFromAvroSerialization(new MetricReport(), it.next(), this.schemaId);
}
}
| 3,218 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics/reporter/KafkaAvroReporterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import java.io.IOException;
import java.util.Iterator;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.metrics.MetricReport;
import org.apache.gobblin.metrics.kafka.KafkaAvroReporter;
import org.apache.gobblin.metrics.kafka.KafkaReporter;
import org.apache.gobblin.metrics.kafka.Pusher;
import org.apache.gobblin.metrics.reporter.util.MetricReportUtils;
/**
* Test for KafkaAvroReporter
* Extends KafkaReporterTest and just redefines the builder and the metrics deserializer
*
* @author ibuenros
*/
@Test(groups = {"gobblin.metrics"})
public class KafkaAvroReporterTest extends KafkaReporterTest {
public KafkaAvroReporterTest(String topic)
throws IOException, InterruptedException {
super();
}
public KafkaAvroReporterTest() throws IOException, InterruptedException {
this("KafkaAvroReporterTest");
}
@Override
public KafkaReporter.Builder<? extends KafkaReporter.Builder> getBuilder(Pusher pusher) {
return KafkaAvroReporter.BuilderFactory.newBuilder().withKafkaPusher(pusher);
}
@Override
public KafkaReporter.Builder<? extends KafkaReporter.Builder> getBuilderFromContext(Pusher pusher) {
return KafkaAvroReporter.BuilderFactory.newBuilder().withKafkaPusher(pusher);
}
@Override
@SuppressWarnings("unchecked")
protected MetricReport nextReport(Iterator<byte[]> it)
throws IOException {
Assert.assertTrue(it.hasNext());
return MetricReportUtils.deserializeReportFromAvroSerialization(new MetricReport(), it.next());
}
}
| 3,219 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics/reporter/KeyValueEventObjectReporterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import org.apache.avro.generic.GenericRecord;
import org.apache.commons.lang3.tuple.Pair;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Maps;
import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistryConfigurationKeys;
import org.apache.gobblin.metrics.GobblinTrackingEvent;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.reporter.util.KafkaReporterUtils;
import org.apache.gobblin.util.ConfigUtils;
public class KeyValueEventObjectReporterTest extends KeyValueEventObjectReporter {
public KeyValueEventObjectReporterTest(Builder builder) {
super(builder);
}
public MockKeyValuePusher getPusher() {
return (MockKeyValuePusher) pusher;
}
public static class Builder extends KeyValueEventObjectReporter.Builder {
protected Builder(MetricContext context) {
super(context);
}
public KeyValueEventObjectReporterTest build(String brokers, String topic) {
this.brokers = brokers;
this.topic = topic;
return new KeyValueEventObjectReporterTest(this);
}
}
/**
* Get builder for KeyValueEventObjectReporter
* @return KeyValueEventObjectReporter builder
*/
public static KeyValueEventObjectReporterTest.Builder getBuilder(MetricContext context, Properties props) {
KeyValueEventObjectReporterTest.Builder builder = new KeyValueEventObjectReporterTest.Builder(context);
builder.namespaceOverride(KafkaReporterUtils.extractOverrideNamespace(props))
.withConfig(ConfigUtils.propertiesToConfig(props));
return builder;
}
@Test
public static void testKafkaKeyValueEventObjectReporter()
throws IOException {
MetricContext context = MetricContext.builder("context").build();
String namespace = "org.apache.gobblin.metrics:gobblin.metrics.test";
Properties properties = new Properties();
properties.put(KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_OVERRIDE_NAMESPACE, namespace);
properties.put("pusherClass", "org.apache.gobblin.metrics.reporter.MockKeyValuePusher");
KeyValueEventObjectReporterTest reporter = getBuilder(context, properties).build("localhost:0000", "topic");
String eventName = "testEvent";
GobblinTrackingEvent event = new GobblinTrackingEvent();
event.setName(eventName);
event.setNamespace(namespace);
Map<String, String> metadata = Maps.newHashMap();
event.setMetadata(metadata);
context.submitEvent(event);
try {
Thread.sleep(100);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
reporter.report();
MockKeyValuePusher pusher = reporter.getPusher();
Pair<String, GenericRecord> retrievedEvent = nextKVEvent(pusher.messageIterator());
Assert.assertEquals(retrievedEvent.getValue().get("namespace"), namespace);
Assert.assertEquals(retrievedEvent.getValue().get("name"), eventName);
int partition = Integer.parseInt(retrievedEvent.getKey());
Assert.assertTrue((0 <= partition && partition <= 99));
Assert.assertTrue(retrievedEvent.getValue().getSchema() == reporter.schema);
}
private static Pair<String, GenericRecord> nextKVEvent(Iterator<Pair<String, GenericRecord>> it) {
Assert.assertTrue(it.hasNext());
Pair<String, GenericRecord> event = it.next();
return Pair.of(event.getKey(), event.getValue());
}
}
| 3,220 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics/kafka/PusherFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.broker.StringNameSharedResourceKey;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.gobblin_scopes.JobScopeInstance;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
/**
* Test {@link PusherFactory}s
*/
public class PusherFactoryTest {
@Test
private void testCreateGobblinScopedDefaultPusher()
throws NotConfiguredException {
SharedResourcesBroker<GobblinScopeTypes> instanceBroker = SharedResourcesBrokerFactory
.createDefaultTopLevelBroker(ConfigFactory.empty(), GobblinScopeTypes.GLOBAL.defaultScopeInstance());
SharedResourcesBroker<GobblinScopeTypes> jobBroker = instanceBroker.newSubscopedBuilder(
new JobScopeInstance("PusherFactoryTest", String.valueOf(System.currentTimeMillis()))).build();
StringNameSharedResourceKey key = new StringNameSharedResourceKey("test");
Pusher<Object> pusher = jobBroker.getSharedResource(new GobblinScopePusherFactory<>(), key);
Assert.assertEquals(pusher.getClass(), LoggingPusher.class);
try {
jobBroker.close();
instanceBroker.close();
} catch (IOException e) {
e.printStackTrace();
}
}
@Test
private void testCreateGobblinScopedCustomPusher()
throws NotConfiguredException {
Map<String, String> configAsMap = new HashMap<>();
configAsMap.put("gobblin.broker.pusher.class", TestPusher.class.getName());
configAsMap.put("gobblin.broker.pusher.id", "sharedId");
configAsMap.put("gobblin.broker.pusher.testPusher.id", "testPusherId");
configAsMap.put("gobblin.broker.pusher.testPusher.name", "testPusherName");
SharedResourcesBroker<GobblinScopeTypes> instanceBroker = SharedResourcesBrokerFactory
.createDefaultTopLevelBroker(ConfigFactory.parseMap(configAsMap), GobblinScopeTypes.GLOBAL.defaultScopeInstance());
SharedResourcesBroker<GobblinScopeTypes> jobBroker = instanceBroker.newSubscopedBuilder(
new JobScopeInstance("PusherFactoryTest", String.valueOf(System.currentTimeMillis()))).build();
StringNameSharedResourceKey key = new StringNameSharedResourceKey("testPusher");
Pusher<String> pusher = jobBroker.getSharedResource(new GobblinScopePusherFactory<>(), key);
Assert.assertEquals(pusher.getClass(), TestPusher.class);
TestPusher testPusher = (TestPusher) pusher;
Assert.assertTrue(!testPusher.isClosed);
Assert.assertEquals(testPusher.id, "testPusherId");
Assert.assertEquals(testPusher.name, "testPusherName");
try {
jobBroker.close();
instanceBroker.close();
} catch (IOException e) {
e.printStackTrace();
}
Assert.assertTrue(testPusher.isClosed);
}
public static class TestPusher implements Pusher<String> {
private boolean isClosed = false;
private final String id;
private final String name;
public TestPusher(Config config) {
id = config.getString("id");
name = config.getString("name");
}
@Override
public void pushMessages(List<String> messages) {
}
@Override
public void close()
throws IOException {
isClosed = true;
}
}
}
| 3,221 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/metrics/kafka/LoggingPusherTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.util.ArrayList;
import java.util.List;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.apache.log4j.spi.LoggingEvent;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import org.apache.gobblin.metrics.reporter.KeyValuePusher;
@Test
public class LoggingPusherTest {
@Test
public void testKafkaReporter() {
TestAppender testAppender = new TestAppender();
Logger logger = LogManager.getLogger(LoggingPusher.class.getName());
logger.setLevel(Level.INFO);
logger.addAppender(testAppender);
KeyValuePusher<String, String> loggingPusher =
new LoggingPusher<String, String>("broker", "topic", Optional.absent());
loggingPusher.pushMessages(ImmutableList.of("message1", "message2"));
loggingPusher.pushKeyValueMessages(ImmutableList.of(org.apache.commons.lang3.tuple.Pair.of("key", "message3")));
Assert.assertEquals(testAppender.events.size(), 3);
Assert.assertEquals(testAppender.events.get(0).getRenderedMessage(), "Pushing to broker:topic: message1");
Assert.assertEquals(testAppender.events.get(1).getRenderedMessage(), "Pushing to broker:topic: message2");
Assert.assertEquals(testAppender.events.get(2).getRenderedMessage(), "Pushing to broker:topic: key - message3");
logger.removeAppender(testAppender);
}
private class TestAppender extends AppenderSkeleton {
List<LoggingEvent> events = new ArrayList<LoggingEvent>();
public void close() {
}
public boolean requiresLayout() {
return false;
}
@Override
protected void append(LoggingEvent event) {
events.add(event);
}
}
}
| 3,222 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/converter/EnvelopeSchemaConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistryFactory;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Unit test for {@link EnvelopeSchemaConverter}.
*
* @deprecated As a result of deprecating {@link EnvelopeSchemaConverter}
*/
@Test(groups = {"gobblin.converter"})
@Deprecated
public class EnvelopeSchemaConverterTest {
public static final String SCHEMA_KEY = "testKey";
private GenericRecord mockInputRecord = mock(GenericRecord.class);
private GenericRecord mockOutputRecord = mock(GenericRecord.class);
public static Schema mockSchema = mock(Schema.class);
class EnvelopeSchemaConverterForTest extends EnvelopeSchemaConverter {
@Override
public byte[] getPayload(GenericRecord inputRecord, String payloadFieldName) {
return null;
}
@Override
public GenericRecord deserializePayload(byte[] payload, Schema payloadSchema) {
Assert.assertEquals(payloadSchema, mockSchema);
return mockOutputRecord;
}
}
@Test
public void convertRecordTest() throws Exception {
when(mockInputRecord.get("payloadSchemaId")).thenReturn(SCHEMA_KEY);
when(mockOutputRecord.getSchema()).thenReturn(mockSchema);
WorkUnitState workUnitState = new WorkUnitState();
workUnitState.setProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY, "testEvent");
workUnitState.setProp("kafka.schema.registry.url", "testUrl");
workUnitState.setProp(KafkaSchemaRegistryFactory.KAFKA_SCHEMA_REGISTRY_FACTORY_CLASS,
KafkaAvroSchemaRegistryForTest.Factory.class.getName());
EnvelopeSchemaConverterForTest converter = new EnvelopeSchemaConverterForTest();
converter.init(workUnitState);
GenericRecord output = converter.convertRecord(null, mockInputRecord, workUnitState).iterator().next();
Assert.assertEquals(output, mockOutputRecord);
}
}
| 3,223 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/converter/EnvelopePayloadConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.commons.io.FileUtils;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Iterables;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.metrics.kafka.KafkaAvroSchemaRegistryFactory;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.metrics.kafka.SchemaRegistryException;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Unit test for {@link EnvelopePayloadConverter}
*/
public class EnvelopePayloadConverterTest {
private static final KafkaSchemaRegistry mockRegistry = mock(KafkaSchemaRegistry.class);
@Test
public void testConverter()
throws IOException, DataConversionException, SchemaRegistryException {
Schema inputSchema = new Schema.Parser().parse(getClass().getResourceAsStream("/converter/envelope.avsc"));
GenericDatumReader<GenericRecord> datumReader = new GenericDatumReader<>(inputSchema);
File tmp = File.createTempFile(getClass().getSimpleName(), null);
FileUtils.copyInputStreamToFile(getClass().getResourceAsStream("/converter/envelope.avro"), tmp);
DataFileReader<GenericRecord> dataFileReader = new DataFileReader<>(tmp, datumReader);
GenericRecord inputRecord = dataFileReader.next();
Schema latestPayloadSchema = new Schema.Parser().parse(getClass().getResourceAsStream("/converter/record.avsc"));
when(mockRegistry.getLatestSchemaByTopic(any())).thenReturn(latestPayloadSchema);
when(mockRegistry.getSchemaByKey(any())).thenReturn(inputSchema.getField("nestedRecord").schema());
WorkUnitState workUnitState = new WorkUnitState();
workUnitState.setProp(BaseEnvelopeSchemaConverter.PAYLOAD_SCHEMA_TOPIC, "test");
workUnitState.setProp(BaseEnvelopeSchemaConverter.PAYLOAD_SCHEMA_ID_FIELD, "metadata.payloadSchemaId");
workUnitState
.setProp(BaseEnvelopeSchemaConverter.KAFKA_REGISTRY_FACTORY, MockKafkaAvroSchemaRegistryFactory.class.getName());
EnvelopePayloadConverter converter = new EnvelopePayloadConverter();
converter.init(workUnitState);
Schema outputSchema = converter.convertSchema(inputSchema, workUnitState);
List<GenericRecord> outputRecords = new ArrayList<>();
Iterables.addAll(outputRecords, converter.convertRecord(outputSchema, inputRecord, workUnitState));
Assert.assertTrue(outputRecords.size() == 1);
GenericRecord outputRecord = outputRecords.get(0);
GenericRecord payload = (GenericRecord) outputRecord.get("payload");
// While making the test envelope avro record, its nestedRecord was intentionally set to the deserialized payload
GenericRecord expectedPayload = (GenericRecord) outputRecord.get("nestedRecord");
Schema payloadSchema = payload.getSchema();
Schema expectedPayloadSchema = expectedPayload.getSchema();
// The expected payload schema has the same number of fields as payload schema but in different order
Assert.assertTrue(expectedPayloadSchema.getName().equals(payloadSchema.getName()));
Assert.assertTrue(expectedPayloadSchema.getNamespace().equals(payloadSchema.getNamespace()));
Assert.assertTrue(expectedPayloadSchema.getFields().size() == payloadSchema.getFields().size());
for (Schema.Field field : payload.getSchema().getFields()) {
Assert.assertTrue(expectedPayload.get(field.name()).equals(payload.get(field.name())));
}
}
static class MockKafkaAvroSchemaRegistryFactory extends KafkaAvroSchemaRegistryFactory {
@Override
public KafkaSchemaRegistry create(Properties props) {
return mockRegistry;
}
}
}
| 3,224 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/converter/KafkaSchemaChangeInjectorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.io.IOException;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.kafka.client.DecodeableKafkaRecord;
import org.apache.gobblin.metadata.GlobalMetadata;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.metrics.kafka.SchemaRegistryException;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaStreamTestUtils;
import org.apache.gobblin.stream.ControlMessage;
import org.apache.gobblin.stream.RecordEnvelope;
public class KafkaSchemaChangeInjectorTest {
// Test Avro schemas
private static final String SCHEMA1 = "{\"namespace\": \"example.avro\",\n" +
" \"type\": \"record\",\n" +
" \"name\": \"user\",\n" +
" \"fields\": [\n" +
" {\"name\": \"name\", \"type\": \"string\"},\n" +
" {\"name\": \"DUMMY\", \"type\": [\"null\",\"string\"]}\n" +
" ]\n" +
"}";
@Test
public void testInjection() throws SchemaRegistryException, IOException {
String datasetName = "topic1";
class TestInjector extends KafkaSchemaChangeInjector<Schema> {
@Override
protected Schema getSchemaIdentifier(DecodeableKafkaRecord consumerRecord) {
return ((GenericRecord) consumerRecord.getValue()).getSchema();
}
}
KafkaSchemaChangeInjector schemaChangeInjector = new TestInjector();
WorkUnitState wus = new WorkUnitState();
wus.setProp(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_CLASS, KafkaStreamTestUtils.MockSchemaRegistry.class.getName());
schemaChangeInjector.init(wus);
Schema schema1 = new Schema.Parser().parse(SCHEMA1);
Schema schema2 = new Schema.Parser().parse(SCHEMA1.replace("DUMMY", "DUMMY2"));
Schema schema3 = new Schema.Parser().parse(SCHEMA1.replace("DUMMY", "DUMMY3"));
Schema schema4 = new Schema.Parser().parse(SCHEMA1.replace("DUMMY", "DUMMY4"));
schemaChangeInjector.setInputGlobalMetadata(GlobalMetadata.<Schema>builder().schema(schema1).build(), null);
schemaChangeInjector.getSchemaRegistry().register(schema1, datasetName);
DecodeableKafkaRecord record1 = getMock(datasetName, getRecord(schema1, "name1"));
DecodeableKafkaRecord record2 = getMock(datasetName, getRecord(schema2, "name1"));
DecodeableKafkaRecord record3 = getMock(datasetName, getRecord(schema3, "name1"));
DecodeableKafkaRecord record4 = getMock(datasetName, getRecord(schema4, "name1"));
// first message will always trigger injection
Assert.assertEquals(schemaChangeInjector.getSchemaCache().size(), 0);
Assert.assertNotNull(schemaChangeInjector.injectControlMessagesBefore(new RecordEnvelope<>(record1), wus));
// next messages should not trigger injection since there is no schema change
Assert.assertNull(schemaChangeInjector.injectControlMessagesBefore(new RecordEnvelope<>(record1), wus));
Assert.assertNull(schemaChangeInjector.injectControlMessagesBefore(new RecordEnvelope<>(record1), wus));
Assert.assertEquals(schemaChangeInjector.getSchemaCache().size(), 1);
Assert.assertNull(schemaChangeInjector.injectControlMessagesBefore(new RecordEnvelope<>(record2), wus));
Assert.assertEquals(schemaChangeInjector.getSchemaCache().size(), 2);
// updating the latest schema should result in an injection
schemaChangeInjector.getSchemaRegistry().register(schema4, datasetName);
Iterable<ControlMessage<DecodeableKafkaRecord>> iterable =
schemaChangeInjector.injectControlMessagesBefore(new RecordEnvelope<>(record3), wus);
Assert.assertNotNull(iterable);
List<ControlMessage<DecodeableKafkaRecord>> controlMessages = Lists.newArrayList(iterable);
Assert.assertEquals(controlMessages.size(), 1);
// Should not see any injections since no schema update after the last call
Assert.assertNull(schemaChangeInjector.injectControlMessagesBefore(new RecordEnvelope<>(record4), wus));
}
private DecodeableKafkaRecord getMock(String datasetName, GenericRecord record) {
DecodeableKafkaRecord mockRecord = Mockito.mock(DecodeableKafkaRecord.class);
Mockito.when(mockRecord.getValue()).thenReturn(record);
Mockito.when(mockRecord.getTopic()).thenReturn(datasetName);
Mockito.when(mockRecord.getPartition()).thenReturn(1);
return mockRecord;
}
private GenericRecord getRecord(Schema schema, String name) {
GenericRecord record = new GenericData.Record(schema);
record.put("name", name);
return record;
}
} | 3,225 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/converter/EnvelopePayloadExtractingConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.commons.io.FileUtils;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Iterables;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.metrics.kafka.KafkaAvroSchemaRegistryFactory;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Unit test for {@link EnvelopePayloadExtractingConverter}.
*/
@Test(groups = {"gobblin.converter"})
public class EnvelopePayloadExtractingConverterTest {
private static final KafkaSchemaRegistry mockRegistry = mock(KafkaSchemaRegistry.class);
@Test
public void testConverter()
throws Exception {
Schema inputSchema = new Schema.Parser().parse(getClass().getResourceAsStream("/converter/envelope.avsc"));
GenericDatumReader<GenericRecord> datumReader = new GenericDatumReader<>(inputSchema);
File tmp = File.createTempFile(getClass().getSimpleName(), null);
FileUtils.copyInputStreamToFile(getClass().getResourceAsStream("/converter/envelope.avro"), tmp);
DataFileReader<GenericRecord> dataFileReader = new DataFileReader<>(tmp, datumReader);
GenericRecord inputRecord = dataFileReader.next();
Schema latestPayloadSchema = new Schema.Parser().parse(getClass().getResourceAsStream("/converter/record.avsc"));
when(mockRegistry.getLatestSchemaByTopic(any())).thenReturn(latestPayloadSchema);
when(mockRegistry.getSchemaByKey(any())).thenReturn(inputSchema.getField("nestedRecord").schema());
WorkUnitState workUnitState = new WorkUnitState();
workUnitState.setProp(BaseEnvelopeSchemaConverter.PAYLOAD_SCHEMA_TOPIC, "test");
workUnitState.setProp(BaseEnvelopeSchemaConverter.PAYLOAD_SCHEMA_ID_FIELD, "metadata.payloadSchemaId");
workUnitState.setProp(BaseEnvelopeSchemaConverter.KAFKA_REGISTRY_FACTORY,
EnvelopePayloadExtractingConverterTest.MockKafkaAvroSchemaRegistryFactory.class.getName());
EnvelopePayloadExtractingConverter converter = new EnvelopePayloadExtractingConverter();
converter.init(workUnitState);
Schema outputSchema = converter.convertSchema(inputSchema, workUnitState);
Assert.assertTrue(outputSchema.equals(latestPayloadSchema));
List<GenericRecord> outputRecords = new ArrayList<>();
Iterables.addAll(outputRecords, converter.convertRecord(outputSchema, inputRecord, workUnitState));
Assert.assertTrue(outputRecords.size() == 1);
GenericRecord payload = outputRecords.get(0);
// While making the test envelope avro input record, its nestedRecord was intentionally set to the deserialized payload
GenericRecord expectedPayload = (GenericRecord) inputRecord.get("nestedRecord");
Schema payloadSchema = payload.getSchema();
Schema expectedPayloadSchema = expectedPayload.getSchema();
// The expected payload schema has the same number of fields as payload schema but in different order
Assert.assertTrue(expectedPayloadSchema.getName().equals(payloadSchema.getName()));
Assert.assertTrue(expectedPayloadSchema.getNamespace().equals(payloadSchema.getNamespace()));
Assert.assertTrue(expectedPayloadSchema.getFields().size() == payloadSchema.getFields().size());
for (Schema.Field field : payload.getSchema().getFields()) {
Assert.assertTrue(expectedPayload.get(field.name()).equals(payload.get(field.name())));
}
}
static class MockKafkaAvroSchemaRegistryFactory extends KafkaAvroSchemaRegistryFactory {
@Override
public KafkaSchemaRegistry create(Properties props) {
return mockRegistry;
}
}
}
| 3,226 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/converter/KafkaAvroSchemaRegistryForTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import org.apache.gobblin.metrics.kafka.KafkaAvroSchemaRegistry;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistryFactory;
import java.util.Properties;
import org.apache.avro.Schema;
/**
* Override some methods of {@link KafkaAvroSchemaRegistry} for use in {@link EnvelopeSchemaConverterTest}
*
* @deprecated Checkout {@link EnvelopePayloadExtractingConverterTest} for how to mock a {@link KafkaSchemaRegistry}
*/
@Deprecated
public class KafkaAvroSchemaRegistryForTest extends KafkaAvroSchemaRegistry {
public static class Factory implements KafkaSchemaRegistryFactory {
public Factory() {}
public KafkaSchemaRegistry create(Properties props) {
return new KafkaAvroSchemaRegistryForTest(props);
}
}
public KafkaAvroSchemaRegistryForTest(Properties props) {
super(props);
}
@Override
public Schema getSchemaByKey(String key) {
if (key.equals(EnvelopeSchemaConverterTest.SCHEMA_KEY)) {
return EnvelopeSchemaConverterTest.mockSchema;
} else {
return null;
}
}
} | 3,227 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/ZipConfigStoreUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import org.apache.gobblin.config.client.ConfigClient;
import org.apache.gobblin.config.client.api.VersionStabilityPolicy;
import org.apache.gobblin.config.store.api.ConfigStoreCreationException;
import org.apache.gobblin.config.store.zip.SimpleLocalIvyConfigStoreFactory;
import org.apache.gobblin.config.store.zip.ZipFileConfigStore;
import org.apache.gobblin.kafka.client.GobblinKafkaConsumerClient;
import static org.apache.gobblin.configuration.ConfigurationKeys.CONFIG_MANAGEMENT_STORE_ENABLED;
import static org.apache.gobblin.configuration.ConfigurationKeys.CONFIG_MANAGEMENT_STORE_URI;
import static org.apache.gobblin.source.extractor.extract.kafka.ConfigStoreUtils.GOBBLIN_CONFIG_COMMONPATH;
import static org.apache.gobblin.source.extractor.extract.kafka.ConfigStoreUtils.GOBBLIN_CONFIG_FILTER;
import static org.apache.gobblin.source.extractor.extract.kafka.ConfigStoreUtils.GOBBLIN_CONFIG_TAGS_BLACKLIST;
import static org.apache.gobblin.source.extractor.extract.kafka.ConfigStoreUtils.GOBBLIN_CONFIG_TAGS_WHITELIST;
import static org.mockito.Mockito.anyList;
/**
* The same testing routine for ivy-based config-store (ZipConfigStore)
* Make sure everything inside {@link ConfigStoreUtils} will work for {@link ZipFileConfigStore} implementation.
*
* Note that {@link ZipFileConfigStore}, doesn't contain version folder. More specifically, under .zip file
* there would be configNodes directly, unlike {@link org.apache.gobblin.config.store.hdfs.SimpleHadoopFilesystemConfigStore}
* where there would be a version folder inside the configStore root path.
*/
public class ZipConfigStoreUtilsTest {
private String configStoreUri;
private ConfigClient configClient = ConfigClient.createConfigClient(VersionStabilityPolicy.WEAK_LOCAL_STABILITY);
private GobblinKafkaConsumerClient mockClient;
@BeforeClass
public void setUp()
throws URISyntaxException, ConfigStoreCreationException, IOException {
Path path =
Paths.get(ZipConfigStoreUtilsTest.class.getClassLoader().getResource("IvyConfigStoreTest.zip").getPath());
URI zipInClassPathURI = new URI(
"ivy-file:/?org=org&module=module&storePath=" + path
+ "&storePrefix=_CONFIG_STORE");
ZipFileConfigStore store = new SimpleLocalIvyConfigStoreFactory().createConfigStore(zipInClassPathURI);
configStoreUri = store.getStoreURI().toString();
mockClient = Mockito.mock(GobblinKafkaConsumerClient.class);
}
@Test
public void testGetListOfTopicNamesByFilteringTag() {
Properties properties = new Properties();
properties.setProperty(GOBBLIN_CONFIG_TAGS_WHITELIST, "/tags/whitelist");
properties.setProperty(GOBBLIN_CONFIG_FILTER, "/data/tracking");
properties.setProperty(GOBBLIN_CONFIG_COMMONPATH, "/data/tracking");
List<String> result = ConfigStoreUtils
.getListOfTopicNamesByFilteringTag(properties, configClient, Optional.absent(), configStoreUri,
GOBBLIN_CONFIG_TAGS_WHITELIST);
Assert.assertEquals(result.size(), 2);
Assert.assertTrue(result.contains("Topic1"));
Assert.assertTrue(result.contains("Topic2"));
properties.setProperty(GOBBLIN_CONFIG_TAGS_WHITELIST, "/tags/random");
result = ConfigStoreUtils
.getListOfTopicNamesByFilteringTag(properties, configClient, Optional.absent(), configStoreUri,
GOBBLIN_CONFIG_TAGS_WHITELIST);
Assert.assertEquals(result.size(), 1);
Assert.assertTrue(result.contains("Topic3"));
properties.setProperty(GOBBLIN_CONFIG_TAGS_BLACKLIST, "/tags/blacklist");
result = ConfigStoreUtils
.getListOfTopicNamesByFilteringTag(properties, configClient, Optional.absent(), configStoreUri,
GOBBLIN_CONFIG_TAGS_BLACKLIST);
Assert.assertEquals(result.size(), 2);
Assert.assertTrue(result.contains("Topic1"));
Assert.assertTrue(result.contains("Topic2"));
}
@Test
public void testGetTopicsFromConfigStore()
throws Exception {
KafkaTopic topic1 = new KafkaTopic("Topic1", Lists.newArrayList());
KafkaTopic topic2 = new KafkaTopic("Topic2", Lists.newArrayList());
KafkaTopic topic3 = new KafkaTopic("Topic3", Lists.newArrayList());
Mockito.when(mockClient.getFilteredTopics(anyList(), anyList()))
.thenReturn(ImmutableList.of(topic1, topic2, topic3));
Properties properties = new Properties();
// Empty properties returns everything: topic1, 2 and 3.
List<KafkaTopic> result = ConfigStoreUtils.getTopicsFromConfigStore(properties, configStoreUri, mockClient);
Assert.assertEquals(result.size(), 3);
properties.setProperty(GOBBLIN_CONFIG_TAGS_WHITELIST, "/tags/whitelist");
properties.setProperty(GOBBLIN_CONFIG_FILTER, "/data/tracking");
properties.setProperty(GOBBLIN_CONFIG_COMMONPATH, "/data/tracking");
// Whitelist only two topics. Should only returned whitelisted topics.
result = ConfigStoreUtils.getTopicsFromConfigStore(properties, configStoreUri, mockClient);
Assert.assertEquals(result.size(), 2);
List<String> resultInString = result.stream().map(KafkaTopic::getName).collect(Collectors.toList());
Assert.assertTrue(resultInString.contains("Topic1"));
Assert.assertTrue(resultInString.contains("Topic2"));
// Blacklist two topics. Should only return non-blacklisted topics.
properties.remove(GOBBLIN_CONFIG_TAGS_WHITELIST);
properties.setProperty(GOBBLIN_CONFIG_TAGS_BLACKLIST, "/tags/blacklist");
result = ConfigStoreUtils.getTopicsFromConfigStore(properties, configStoreUri, mockClient);
Assert.assertEquals(result.size(), 1);
Assert.assertEquals(result.get(0).getName(), "Topic3");
}
@Test
public void testGetConfigForTopic() throws Exception {
Properties properties = new Properties();
String commonPath = "/data/tracking";
properties.setProperty(GOBBLIN_CONFIG_COMMONPATH, commonPath);
properties.setProperty(CONFIG_MANAGEMENT_STORE_URI, configStoreUri);
properties.setProperty(CONFIG_MANAGEMENT_STORE_ENABLED, "true");
properties.setProperty("topic.name", "Topic1");
Config topic1Config = ConfigStoreUtils.getConfigForTopic(properties, "topic.name", configClient).get();
Assert.assertEquals(topic1Config.getString("aaaaa"), "bbbb");
}
}
| 3,228 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaExtractorUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.io.File;
import java.util.List;
import java.util.Random;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.io.Files;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistryConfigurationKeys;
import org.apache.gobblin.metastore.FileContextBasedFsStateStoreFactory;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.runtime.StateStoreBasedWatermarkStorage;
import org.apache.gobblin.source.extractor.WatermarkInterval;
import org.apache.gobblin.source.extractor.extract.kafka.workunit.packer.KafkaTopicGroupingWorkUnitPacker;
import static org.apache.gobblin.configuration.ConfigurationKeys.JOB_NAME_KEY;
import static org.apache.gobblin.configuration.ConfigurationKeys.WATERMARK_INTERVAL_VALUE_KEY;
@Slf4j
public class KafkaExtractorUtils {
private static final Integer MAX_NUM_BROKERS = 100;
private static final Integer MAX_NUM_TOPIC_PARTITIONS = 1024;
/**
* A utility method that returns a {@link WorkUnitState} which can be used to instantiate both a batch and a
* streaming Kafka extractor.
* @param topicName
* @param numPartitions
* @return
*/
public static WorkUnitState getWorkUnitState(String topicName, int numPartitions) {
Preconditions.checkArgument(numPartitions <= MAX_NUM_TOPIC_PARTITIONS, "Num partitions assigned"
+ "must be smaller than the maximum number of partitions of the topic");
WorkUnitState state = new WorkUnitState();
state.setProp(JOB_NAME_KEY, "testJob");
state.setProp(KafkaSource.TOPIC_NAME, topicName);
state.setProp(KafkaSource.GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS, KafkaStreamTestUtils.MockKafka10ConsumerClientFactory.class.getName());
state.setProp(KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_CLASS,
KafkaStreamTestUtils.MockSchemaRegistry.class.getName());
state.setProp(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_CLASS, KafkaStreamTestUtils.MockSchemaRegistry.class.getName());
//Need to set this property for LiKafka10ConsumerClient instantiation
state.setProp(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_URL, "http://dummySchemaRegistry:1000");
Random random = new Random();
for (int i=0; i<numPartitions; i++) {
//Assign a random partition id.
state.setProp(KafkaSource.PARTITION_ID + "." + i, random.nextInt(MAX_NUM_TOPIC_PARTITIONS));
state.setProp(KafkaSource.LEADER_ID + "." + i, random.nextInt(MAX_NUM_BROKERS));
state.setProp(KafkaSource.LEADER_HOSTANDPORT + "." + i, "leader-" + i + ":9091");
}
state.setProp(KafkaTopicGroupingWorkUnitPacker.NUM_PARTITIONS_ASSIGNED, numPartitions);
//Configure the watermark storage. We use FileContextBasedFsStateStoreFactory, since it allows for overwriting an existing
// state store.
state.setProp(StateStoreBasedWatermarkStorage.WATERMARK_STORAGE_TYPE_KEY, FileContextBasedFsStateStoreFactory.class.getName());
File stateStoreDir = Files.createTempDir();
stateStoreDir.deleteOnExit();
state.setProp(StateStoreBasedWatermarkStorage.WATERMARK_STORAGE_CONFIG_PREFIX + ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, stateStoreDir.getAbsolutePath());
//Kafka configurations
state.setProp(ConfigurationKeys.KAFKA_BROKERS, "localhost:9091");
// Constructing a dummy watermark and mock client-factory-class and registry-class
List<Long> dummyWatermark = ImmutableList.of(new Long(1));
WatermarkInterval watermarkInterval =
new WatermarkInterval(new MultiLongWatermark(dummyWatermark), new MultiLongWatermark(dummyWatermark));
state.setProp(WATERMARK_INTERVAL_VALUE_KEY, watermarkInterval.toJson());
state.setWuProperties(state.getProperties(), state.getProperties());
return state;
}
} | 3,229 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/ConfigStoreUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;
import org.apache.hadoop.fs.Path;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import org.apache.gobblin.config.client.ConfigClient;
import org.apache.gobblin.config.client.api.VersionStabilityPolicy;
import org.apache.gobblin.kafka.client.GobblinKafkaConsumerClient;
import static org.apache.gobblin.configuration.ConfigurationKeys.CONFIG_MANAGEMENT_STORE_ENABLED;
import static org.apache.gobblin.configuration.ConfigurationKeys.CONFIG_MANAGEMENT_STORE_URI;
import static org.apache.gobblin.source.extractor.extract.kafka.ConfigStoreUtils.GOBBLIN_CONFIG_COMMONPATH;
import static org.apache.gobblin.source.extractor.extract.kafka.ConfigStoreUtils.GOBBLIN_CONFIG_FILTER;
import static org.apache.gobblin.source.extractor.extract.kafka.ConfigStoreUtils.GOBBLIN_CONFIG_TAGS_BLACKLIST;
import static org.apache.gobblin.source.extractor.extract.kafka.ConfigStoreUtils.GOBBLIN_CONFIG_TAGS_WHITELIST;
import static org.mockito.Mockito.anyList;
/**
* Added this testing to protect no behavior changes on {@link ConfigStoreUtils} after refactoring.
*/
public class ConfigStoreUtilsTest {
// Declare as string in convenience of testing.
private String configStoreUri;
private GobblinKafkaConsumerClient mockClient;
private ConfigClient configClient = ConfigClient.createConfigClient(VersionStabilityPolicy.WEAK_LOCAL_STABILITY);
/**
* Loading a fs-based config-store for ease of unit testing.
* @throws Exception
*/
@BeforeClass
public void setup()
throws Exception {
URL url = this.getClass().getClassLoader().getResource("_CONFIG_STORE");
configStoreUri = getStoreURI(new Path(url.getPath()).getParent().toString()).toString();
mockClient = Mockito.mock(GobblinKafkaConsumerClient.class);
}
@Test
public void testGetUriStringForTopic() throws Exception {
String commonPath = "/data/tracking";
URI topic1URI = ConfigStoreUtils.getUriStringForTopic("Topic1", commonPath, configStoreUri);
URI expectedTopic1URI = new URI("simple-file", "", new URI(configStoreUri).getPath() + "/data/tracking/Topic1", null, null);
Assert.assertEquals(topic1URI, expectedTopic1URI);
URI topic2URI = ConfigStoreUtils.getUriStringForTopic("Topic2", commonPath, configStoreUri);
URI expectedTopic2URI = new URI("simple-file", "", new URI(configStoreUri).getPath() + "/data/tracking/Topic2", null, null);
Assert.assertEquals(topic2URI, expectedTopic2URI);
}
@Test
public void testGetConfigForTopic() throws Exception {
Properties properties = new Properties();
String commonPath = "/data/tracking";
properties.setProperty(GOBBLIN_CONFIG_COMMONPATH, commonPath);
properties.setProperty(CONFIG_MANAGEMENT_STORE_URI, configStoreUri);
properties.setProperty(CONFIG_MANAGEMENT_STORE_ENABLED, "true");
properties.setProperty("topic.name", "Topic1");
Config topic1Config = ConfigStoreUtils.getConfigForTopic(properties, "topic.name", configClient).get();
Assert.assertEquals(topic1Config.getString("aaaaa"), "bbbb");
}
@Test
public void testGetTopicsFromConfigStore()
throws Exception {
KafkaTopic topic1 = new KafkaTopic("Topic1", Lists.newArrayList());
KafkaTopic topic2 = new KafkaTopic("Topic2", Lists.newArrayList());
KafkaTopic topic3 = new KafkaTopic("Topic3", Lists.newArrayList());
Mockito.when(mockClient.getFilteredTopics(anyList(), anyList()))
.thenReturn(ImmutableList.of(topic1, topic2, topic3));
Properties properties = new Properties();
// Empty properties returns everything: topic1, 2 and 3.
List<KafkaTopic> result = ConfigStoreUtils.getTopicsFromConfigStore(properties, configStoreUri, mockClient);
Assert.assertEquals(result.size(), 3);
properties.setProperty(GOBBLIN_CONFIG_TAGS_WHITELIST, "/tags/whitelist");
properties.setProperty(GOBBLIN_CONFIG_FILTER, "/data/tracking");
properties.setProperty(GOBBLIN_CONFIG_COMMONPATH, "/data/tracking");
// Whitelist only two topics. Should only returned whitelisted topics.
result = ConfigStoreUtils.getTopicsFromConfigStore(properties, configStoreUri, mockClient);
Assert.assertEquals(result.size(), 2);
List<String> resultInString = result.stream().map(KafkaTopic::getName).collect(Collectors.toList());
Assert.assertTrue(resultInString.contains("Topic1"));
Assert.assertTrue(resultInString.contains("Topic2"));
// Blacklist two topics. Should only return non-blacklisted topics.
properties.remove(GOBBLIN_CONFIG_TAGS_WHITELIST);
properties.setProperty(GOBBLIN_CONFIG_TAGS_BLACKLIST, "/tags/blacklist");
result = ConfigStoreUtils.getTopicsFromConfigStore(properties, configStoreUri, mockClient);
Assert.assertEquals(result.size(), 1);
Assert.assertEquals(result.get(0).getName(), "Topic3");
}
@Test
public void testGetListOfTopicNamesByFilteringTag()
throws Exception {
Properties properties = new Properties();
properties.setProperty(GOBBLIN_CONFIG_TAGS_WHITELIST, "/tags/whitelist");
properties.setProperty(GOBBLIN_CONFIG_FILTER, "/data/tracking");
properties.setProperty(GOBBLIN_CONFIG_COMMONPATH, "/data/tracking");
List<String> result = ConfigStoreUtils
.getListOfTopicNamesByFilteringTag(properties, configClient, Optional.absent(), configStoreUri,
GOBBLIN_CONFIG_TAGS_WHITELIST);
Assert.assertEquals(result.size(), 2);
Assert.assertTrue(result.contains("Topic1"));
Assert.assertTrue(result.contains("Topic2"));
}
/**
* Return localFs-based config-store uri.
* Note that for local FS, fs.getUri will return an URI without authority. So we shouldn't add authority when
* we construct an URI for local-file backed config-store.
*/
private URI getStoreURI(String configDir)
throws URISyntaxException {
return new URI("simple-file", "", configDir, null, null);
}
}
| 3,230 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaSourceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.commons.collections.CollectionUtils;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.source.extractor.extract.kafka.validator.TopicNameValidator;
import org.apache.gobblin.source.extractor.extract.kafka.validator.TopicValidators;
import org.apache.gobblin.source.extractor.extract.kafka.workunit.packer.KafkaWorkUnitPacker;
import org.apache.gobblin.source.workunit.MultiWorkUnit;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.kafka.client.GobblinKafkaConsumerClient;
import org.apache.gobblin.kafka.client.KafkaConsumerRecord;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.util.DatasetFilterUtils;
import static org.apache.gobblin.source.extractor.extract.kafka.KafkaSource.*;
public class KafkaSourceTest {
private static List<String> testTopics =Arrays.asList(
"topic1", "topic2", "topic3");
@Test
public void testGetWorkunits() {
TestKafkaClient testKafkaClient = new TestKafkaClient();
testKafkaClient.testTopics = testTopics;
SourceState state = new SourceState();
state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, "TestPath");
state.setProp(KafkaWorkUnitPacker.KAFKA_WORKUNIT_PACKER_TYPE, KafkaWorkUnitPacker.PackerType.CUSTOM);
state.setProp(KafkaWorkUnitPacker.KAFKA_WORKUNIT_PACKER_CUSTOMIZED_TYPE, "org.apache.gobblin.source.extractor.extract.kafka.workunit.packer.KafkaTopicGroupingWorkUnitPacker");
state.setProp(GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS, "MockTestKafkaConsumerClientFactory");
TestKafkaSource testKafkaSource = new TestKafkaSource(testKafkaClient);
List<WorkUnit> workUnits = testKafkaSource.getWorkunits(state);
validatePartitionNumWithinWorkUnits(workUnits, 48);
}
@Test
public void testGetWorkunitsForFilteredPartitions() {
TestKafkaClient testKafkaClient = new TestKafkaClient();
List<String> allTopics = testTopics;
Map<String, List<Integer>> filteredTopicPartitionMap = new HashMap<>();
filteredTopicPartitionMap.put(allTopics.get(0), new LinkedList<>());
filteredTopicPartitionMap.put(allTopics.get(1), new LinkedList<>());
filteredTopicPartitionMap.put(allTopics.get(2), new LinkedList<>());
filteredTopicPartitionMap.get(allTopics.get(0)).addAll(Arrays.asList(0, 11));
filteredTopicPartitionMap.get(allTopics.get(1)).addAll(Arrays.asList(2, 8, 10));
filteredTopicPartitionMap.get(allTopics.get(2)).addAll(Arrays.asList(1, 3, 5, 7));
testKafkaClient.testTopics = allTopics;
SourceState state = new SourceState();
state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, "TestPath");
state.setProp(GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS, "MockTestKafkaConsumerClientFactory");
TestKafkaSource testKafkaSource = new TestKafkaSource(testKafkaClient);
List<WorkUnit> workUnits = testKafkaSource.getWorkunitsForFilteredPartitions(state, Optional.of(filteredTopicPartitionMap), Optional.of(3));
validatePartitionNumWithinWorkUnits(workUnits, 9);
state.setProp(KafkaWorkUnitPacker.KAFKA_WORKUNIT_PACKER_TYPE, KafkaWorkUnitPacker.PackerType.CUSTOM);
state.setProp(KafkaWorkUnitPacker.KAFKA_WORKUNIT_PACKER_CUSTOMIZED_TYPE, "org.apache.gobblin.source.extractor.extract.kafka.workunit.packer.KafkaTopicGroupingWorkUnitPacker");
workUnits = testKafkaSource.getWorkunitsForFilteredPartitions(state, Optional.of(filteredTopicPartitionMap), Optional.of(1));
validatePartitionNumWithinWorkUnits(workUnits, 9);
}
@Test
public void testGetFilteredTopics() {
TestKafkaClient testKafkaClient = new TestKafkaClient();
List<String> allTopics = Arrays.asList(
"Topic1", "topic-v2", "topic3", // allowed
"topic-with.period-in_middle", ".topic-with-period-at-start", "topicWithPeriodAtEnd.", //period topics
"not-allowed-topic");
testKafkaClient.testTopics = allTopics;
SourceState state = new SourceState();
state.setProp(KafkaSource.TOPIC_WHITELIST, ".*[Tt]opic.*");
state.setProp(KafkaSource.TOPIC_BLACKLIST, "not-allowed.*");
Assert.assertEquals(new TestKafkaSource(testKafkaClient).getFilteredTopics(state), toKafkaTopicList(allTopics.subList(0, 6)));
state.setProp(KafkaSource.ALLOW_PERIOD_IN_TOPIC_NAME, false);
Assert.assertEquals(new TestKafkaSource(testKafkaClient).getFilteredTopics(state), toKafkaTopicList(allTopics.subList(0, 3)));
}
@Test
public void testTopicValidators() {
TestKafkaClient testKafkaClient = new TestKafkaClient();
List<String> allTopics = Arrays.asList(
"Topic1", "topic-v2", "topic3", // allowed
"topic-with.period-in_middle", ".topic-with-period-at-start", "topicWithPeriodAtEnd.", //period topics
"not-allowed-topic");
testKafkaClient.testTopics = allTopics;
KafkaSource kafkaSource = new TestKafkaSource(testKafkaClient);
SourceState state = new SourceState();
state.setProp(KafkaSource.TOPIC_WHITELIST, ".*[Tt]opic.*");
state.setProp(KafkaSource.TOPIC_BLACKLIST, "not-allowed.*");
List<KafkaTopic> topicsToValidate = kafkaSource.getFilteredTopics(state);
// Test without TopicValidators in the state
Assert.assertTrue(CollectionUtils.isEqualCollection(kafkaSource.getValidTopics(topicsToValidate, state),
toKafkaTopicList(allTopics.subList(0, 6))));
// Test empty TopicValidators in the state
state.setProp(TopicValidators.VALIDATOR_CLASSES_KEY, "");
Assert.assertTrue(CollectionUtils.isEqualCollection(kafkaSource.getValidTopics(topicsToValidate, state),
toKafkaTopicList(allTopics.subList(0, 6))));
// Test TopicValidators with TopicNameValidator in the state
state.setProp(TopicValidators.VALIDATOR_CLASSES_KEY, TopicNameValidator.class.getName());
Assert.assertTrue(CollectionUtils.isEqualCollection(kafkaSource.getValidTopics(topicsToValidate, state),
toKafkaTopicList(allTopics.subList(0, 3))));
}
public static List<KafkaPartition> creatPartitions(String topicName, int partitionNum) {
List<KafkaPartition> partitions = new ArrayList<>(partitionNum);
for(int i = 0; i < partitionNum; i++ ) {
partitions.add(new KafkaPartition.Builder().withTopicName(topicName).withId(i).withLeaderHostAndPort("test").withLeaderId(1).build());
}
return partitions;
}
public static List<KafkaPartition> getPartitionFromWorkUnit(WorkUnit workUnit) {
List<KafkaPartition> topicPartitions = new ArrayList<>();
if(workUnit instanceof MultiWorkUnit) {
for(WorkUnit wu : ((MultiWorkUnit) workUnit).getWorkUnits()) {
topicPartitions.addAll(getPartitionFromWorkUnit(wu));
}
}else {
int i = 0;
String partitionIdProp = KafkaSource.PARTITION_ID + "." + i;
while (workUnit.getProp(partitionIdProp) != null) {
int partitionId = workUnit.getPropAsInt(partitionIdProp);
KafkaPartition topicPartition =
new KafkaPartition.Builder().withTopicName(workUnit.getProp(KafkaSource.TOPIC_NAME)).withId(partitionId).build();
topicPartitions.add(topicPartition);
i++;
partitionIdProp = KafkaSource.PARTITION_ID + "." + i;
}
}
return topicPartitions;
}
public static List<KafkaTopic> toKafkaTopicList(List<String> topicNames) {
return topicNames.stream().map(topicName -> new KafkaTopic(topicName, creatPartitions(topicName, 16))).collect(Collectors.toList());
}
private void validatePartitionNumWithinWorkUnits(List<WorkUnit> workUnits, int expectPartitionNum) {
List<KafkaPartition> partitionList = new ArrayList<>();
for(WorkUnit workUnit : workUnits) {
partitionList.addAll(getPartitionFromWorkUnit(workUnit));
}
Assert.assertEquals(partitionList.size(), expectPartitionNum);
}
@Alias("MockTestKafkaConsumerClientFactory")
public static class MockTestKafkaConsumerClientFactory
implements GobblinKafkaConsumerClient.GobblinKafkaConsumerClientFactory {
@Override
public GobblinKafkaConsumerClient create(Config config) {
return new TestKafkaClient();
}
}
public static class TestKafkaClient implements GobblinKafkaConsumerClient {
List<String> testTopics = KafkaSourceTest.testTopics;
@Override
public List<KafkaTopic> getFilteredTopics(List<Pattern> blacklist, List<Pattern> whitelist) {
return toKafkaTopicList(DatasetFilterUtils.filter(testTopics, blacklist, whitelist));
}
@Override
public long getEarliestOffset(KafkaPartition partition)
throws KafkaOffsetRetrievalFailureException {
return 0;
}
@Override
public long getLatestOffset(KafkaPartition partition)
throws KafkaOffsetRetrievalFailureException {
return 0;
}
@Override
public Iterator<KafkaConsumerRecord> consume(KafkaPartition partition, long nextOffset, long maxOffset) {
return null;
}
@Override
public void close()
throws IOException {
}
}
private class TestKafkaSource<S,D> extends KafkaSource<S,D> {
public TestKafkaSource(GobblinKafkaConsumerClient client) {
kafkaConsumerClient.set(client);
}
@Override
public Extractor getExtractor(WorkUnitState state)
throws IOException {
throw new RuntimeException("Not implemented");
}
}
}
| 3,231 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaStreamingExtractorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.kafka.client.DecodeableKafkaRecord;
import org.apache.gobblin.publisher.DataPublisher;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.extract.FlushingExtractor;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.stream.StreamEntity;
public class KafkaStreamingExtractorTest {
private KafkaStreamingExtractor streamingExtractor;
private final int numPartitions = 3;
@BeforeClass
public void setUp() {
WorkUnitState state1 = KafkaExtractorUtils.getWorkUnitState("testTopic", numPartitions);
state1.setProp(FlushingExtractor.FLUSH_DATA_PUBLISHER_CLASS, TestDataPublisher.class.getName());
this.streamingExtractor = new KafkaStreamingExtractor(state1);
}
@Test
public void testResetExtractorStats()
throws IOException, DataRecordException {
MultiLongWatermark highWatermark1 = new MultiLongWatermark(this.streamingExtractor.highWatermark);
//Read 3 records
StreamEntity<DecodeableKafkaRecord> streamEntity = this.streamingExtractor.readStreamEntityImpl();
Assert.assertEquals(this.streamingExtractor.nextWatermark.get(0), 1L);
Assert.assertEquals(this.streamingExtractor.nextWatermark.get(1), 0L);
Assert.assertEquals(this.streamingExtractor.nextWatermark.get(2), 0L);
this.streamingExtractor.readStreamEntityImpl();
Assert.assertEquals(this.streamingExtractor.nextWatermark.get(0), 1L);
Assert.assertEquals(this.streamingExtractor.nextWatermark.get(1), 1L);
Assert.assertEquals(this.streamingExtractor.nextWatermark.get(2), 0L);
this.streamingExtractor.readStreamEntityImpl();
Assert.assertEquals(this.streamingExtractor.nextWatermark.get(0), 1L);
Assert.assertEquals(this.streamingExtractor.nextWatermark.get(1), 1L);
Assert.assertEquals(this.streamingExtractor.nextWatermark.get(2), 1L);
//Checkpoint watermarks
this.streamingExtractor.onFlushAck();
//Reset extractor stats
this.streamingExtractor.resetExtractorStatsAndWatermarks(false);
//Ensure post-reset invariance is satisfied i.e. low watermark and next watermark are identical.
testAfterReset(highWatermark1);
MultiLongWatermark highWatermark2 = new MultiLongWatermark(this.streamingExtractor.highWatermark);
//Read 1 more record
this.streamingExtractor.readStreamEntityImpl();
Assert.assertEquals(this.streamingExtractor.nextWatermark.get(0), 2L);
Assert.assertEquals(this.streamingExtractor.nextWatermark.get(1), 1L);
Assert.assertEquals(this.streamingExtractor.nextWatermark.get(2), 1L);
Assert.assertEquals(this.streamingExtractor.lowWatermark.get(0), 1L);
Assert.assertEquals(this.streamingExtractor.lowWatermark.get(1), 1L);
Assert.assertEquals(this.streamingExtractor.lowWatermark.get(2), 1L);
//Checkpoint watermarks
this.streamingExtractor.onFlushAck();
//Reset extractor stats
this.streamingExtractor.resetExtractorStatsAndWatermarks(false);
//Ensure post-reset invariance is satisfied.
testAfterReset(highWatermark2);
}
private void testAfterReset(MultiLongWatermark previousHighWatermark) {
//Ensure that low and next watermarks are identical after reset. Also ensure the new high watermark is greater than
// the previous high watermark.
for (int i=0; i < numPartitions; i++) {
Assert.assertEquals(this.streamingExtractor.lowWatermark.get(i), this.streamingExtractor.nextWatermark.get(i));
Assert.assertTrue(previousHighWatermark.get(i) <= this.streamingExtractor.highWatermark.get(i));
}
}
@Test
public void testGenerateAdditionalTagHelper() {
// Verifying that produce rate has been added.
Map<KafkaPartition, Map<String, String>> result = this.streamingExtractor.getAdditionalTagsHelper();
for (Map<String, String> entry: result.values()) {
Assert.assertTrue(entry.containsKey(KafkaProduceRateTracker.KAFKA_PARTITION_PRODUCE_RATE_KEY));
}
}
@Test
public void testReadRecordEnvelopeImpl()
throws IOException {
WorkUnitState state = KafkaExtractorUtils.getWorkUnitState("testTopic", numPartitions);
state.setProp(FlushingExtractor.FLUSH_DATA_PUBLISHER_CLASS, TestDataPublisher.class.getName());
//Enable config that allows underlying KafkaConsumerClient to return null-valued Kafka records.
state.setProp(KafkaStreamTestUtils.MockKafkaConsumerClient.CAN_RETURN_NULL_VALUED_RECORDS, "true");
KafkaStreamingExtractor streamingExtractorWithNulls = new KafkaStreamingExtractor(state);
//Extract 4 records. Ensure each record returned by readRecordEnvelopeImpl() is a non-null valued record.
for (int i = 0; i < 4; i++) {
RecordEnvelope<DecodeableKafkaRecord> recordEnvelope = streamingExtractorWithNulls.readRecordEnvelopeImpl();
Assert.assertNotNull(recordEnvelope.getRecord().getValue() != null);
}
}
static class TestDataPublisher extends DataPublisher {
public TestDataPublisher(WorkUnitState state) {
super(state);
}
@Override
public void initialize() {
}
@Override
public void publishData(Collection<? extends WorkUnitState> states) {
}
@Override
public void publishMetadata(Collection<? extends WorkUnitState> states) {
}
@Override
public void close() {
}
}
} | 3,232 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaProduceRateTrackerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.util.ArrayList;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.joda.time.LocalDate;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.collect.EvictingQueue;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.CheckpointableWatermark;
import org.apache.gobblin.source.extractor.extract.FlushingExtractor;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.writer.LastWatermarkTracker;
import org.apache.gobblin.writer.WatermarkTracker;
public class KafkaProduceRateTrackerTest {
private static final LocalDate HOLIDAY_DATE = new LocalDate(2019, 12, 25);
private static final LocalDate NON_HOLIDAY_DATE = new LocalDate(2020, 01, 05);
private static final Long HOLIDAY_TIME = HOLIDAY_DATE.toDateTimeAtStartOfDay().toInstant().getMillis();
private static final Long NON_HOLIDAY_TIME = NON_HOLIDAY_DATE.toDateTimeAtStartOfDay().toInstant().getMillis();
private KafkaProduceRateTracker tracker;
private List<KafkaPartition> kafkaPartitions = new ArrayList<>();
private WatermarkTracker watermarkTracker;
private WorkUnitState workUnitState;
private KafkaExtractorStatsTracker extractorStatsTracker;
@BeforeClass
public void setUp() {
kafkaPartitions.add(new KafkaPartition.Builder().withTopicName("test-topic").withId(0).build());
kafkaPartitions.add(new KafkaPartition.Builder().withTopicName("test-topic").withId(1).build());
this.workUnitState = new WorkUnitState();
this.workUnitState.setProp(KafkaSource.RECORD_LEVEL_SLA_MINUTES_KEY, 5L);
this.workUnitState.setProp(ConfigurationKeys.KAFKA_BROKERS, "testBroker");
this.watermarkTracker = new LastWatermarkTracker(false);
this.extractorStatsTracker = new KafkaExtractorStatsTracker(this.workUnitState, kafkaPartitions);
}
private void assertTopicPartitionOrder(KafkaProduceRateTracker tracker, List<KafkaPartition> partitions) {
Iterator<KafkaPartition> keyIterator = tracker.getPartitionsToProdRate().keySet().iterator();
for (KafkaPartition partition : partitions) {
Assert.assertEquals(partition, keyIterator.next());
}
}
private void writeProduceRateToKafkaWatermarksHelper(long readStartTime, long decodeStartTime, long currentTime) {
this.extractorStatsTracker.reset();
assertTopicPartitionOrder(tracker, kafkaPartitions);
extractorStatsTracker.onDecodeableRecord(0, readStartTime, decodeStartTime, 100, currentTime-8000, currentTime-10000);
readStartTime++;
decodeStartTime++;
extractorStatsTracker.onDecodeableRecord(1, readStartTime, decodeStartTime, 200, currentTime-7000, currentTime-9000);
extractorStatsTracker.updateStatisticsForCurrentPartition(0, readStartTime, currentTime - 8000);
extractorStatsTracker.updateStatisticsForCurrentPartition(1, readStartTime, currentTime - 7000);
MultiLongWatermark highWatermark = new MultiLongWatermark(Lists.newArrayList(20L, 30L));
Map<KafkaPartition, Long> latestOffsetMap = Maps.newHashMap();
latestOffsetMap.put(kafkaPartitions.get(0), 35L);
latestOffsetMap.put(kafkaPartitions.get(1), 47L);
Map<String, CheckpointableWatermark> lastCommittedWatermarks = Maps.newHashMap();
KafkaPartition topicPartition1 = kafkaPartitions.get(0);
KafkaPartition topicPartition2 = kafkaPartitions.get(1);
lastCommittedWatermarks.put(topicPartition1.toString(),
new KafkaStreamingExtractor.KafkaWatermark(topicPartition1, new LongWatermark(5L)));
lastCommittedWatermarks.put(topicPartition2.toString(),
new KafkaStreamingExtractor.KafkaWatermark(topicPartition2, new LongWatermark(7L)));
this.tracker.writeProduceRateToKafkaWatermarks(latestOffsetMap, lastCommittedWatermarks, highWatermark, currentTime);
}
private void assertKafkaWatermarks(long currentTime) {
Map<String, CheckpointableWatermark> unacknowledgedWatermarks = watermarkTracker.getAllUnacknowledgedWatermarks();
Assert.assertEquals(unacknowledgedWatermarks.size(), 2);
KafkaPartition topicPartition1 = kafkaPartitions.get(0);
KafkaPartition topicPartition2 = kafkaPartitions.get(1);
for (KafkaPartition topicPartition : Lists.newArrayList(topicPartition1, topicPartition2)) {
KafkaStreamingExtractor.KafkaWatermark kafkaWatermark = (KafkaStreamingExtractor.KafkaWatermark) unacknowledgedWatermarks.get(topicPartition.toString());
if (currentTime == HOLIDAY_TIME + 10) {
Assert.assertTrue(kafkaWatermark.avgProduceRates == null);
continue;
}
Assert.assertTrue(kafkaWatermark.avgProduceRates != null);
Date date = new Date(currentTime);
int hourOfDay = KafkaProduceRateTracker.getHourOfDay(date);
int dayOfWeek = KafkaProduceRateTracker.getDayOfWeek(date);
Assert.assertTrue(kafkaWatermark.avgProduceRates[dayOfWeek][hourOfDay] > 0);
for (int i = 0; i < 7; i++) {
for (int j = 0; j < 24; j++) {
if (i != dayOfWeek || j != hourOfDay) {
Assert.assertTrue(kafkaWatermark.avgProduceRates[i][j] < 0);
}
}
}
Assert.assertTrue(kafkaWatermark.getAvgConsumeRate() > 0);
}
}
@Test
public void testWriteProduceRateToKafkaWatermarksNoData() {
long currentTime = System.currentTimeMillis();
WorkUnitState workUnitState = new WorkUnitState();
workUnitState.setProp(KafkaProduceRateTracker.KAFKA_PRODUCE_RATE_DISABLE_STATS_ON_HOLIDAYS_KEY, false);
workUnitState.setProp(FlushingExtractor.FLUSH_INTERVAL_SECONDS_KEY, 1L);
workUnitState.setProp(KafkaSource.RECORD_LEVEL_SLA_MINUTES_KEY, 5L);
workUnitState.setProp(ConfigurationKeys.KAFKA_BROKERS, "testBroker");
WatermarkTracker watermarkTracker = new LastWatermarkTracker(false);
KafkaExtractorStatsTracker extractorStatsTracker = new KafkaExtractorStatsTracker(workUnitState, kafkaPartitions);
KafkaProduceRateTracker tracker =
new KafkaProduceRateTracker(workUnitState, kafkaPartitions, watermarkTracker, extractorStatsTracker, currentTime);
Map<KafkaPartition, Long> latestOffsetMap = Maps.newHashMap();
latestOffsetMap.put(kafkaPartitions.get(0), 20L);
latestOffsetMap.put(kafkaPartitions.get(1), 30L);
Map<String, CheckpointableWatermark> lastCommittedWatermarks = Maps.newHashMap();
//No new data; High watermark same as latest offsets
MultiLongWatermark highWatermark = new MultiLongWatermark(Lists.newArrayList(20L, 30L));
extractorStatsTracker.reset();
tracker.writeProduceRateToKafkaWatermarks(latestOffsetMap, lastCommittedWatermarks, highWatermark, currentTime);
Map<String, CheckpointableWatermark> unacknowledgedWatermarks = watermarkTracker.getAllUnacknowledgedWatermarks();
for (KafkaPartition topicPartition : kafkaPartitions) {
KafkaStreamingExtractor.KafkaWatermark kafkaWatermark = (KafkaStreamingExtractor.KafkaWatermark) unacknowledgedWatermarks.get(topicPartition.toString());
Assert.assertTrue(kafkaWatermark.avgProduceRates != null);
Assert.assertTrue(kafkaWatermark.avgConsumeRate < 0);
Assert.assertTrue(kafkaWatermark.getLwm().getValue() > 0);
}
}
@Test (dependsOnMethods = "testWriteProduceRateToKafkaWatermarksNoData")
public void testWriteProduceRateToKafkaWatermarks() {
long readStartTime = System.nanoTime();
long decodeStartTime = readStartTime + 1;
long currentTime = System.currentTimeMillis();
WorkUnitState workUnitState = new WorkUnitState();
workUnitState.setProp(KafkaProduceRateTracker.KAFKA_PRODUCE_RATE_DISABLE_STATS_ON_HOLIDAYS_KEY, false);
workUnitState.setProp(FlushingExtractor.FLUSH_INTERVAL_SECONDS_KEY, 1L);
this.tracker = new KafkaProduceRateTracker(workUnitState, kafkaPartitions, watermarkTracker, extractorStatsTracker, currentTime);
//Bootstrap the extractorStatsTracker
writeProduceRateToKafkaWatermarksHelper(readStartTime, decodeStartTime, currentTime);
for (int i = 1; i < KafkaProduceRateTracker.SLIDING_WINDOW_SIZE + 1; i++) {
//Add more records and update watermarks/stats
writeProduceRateToKafkaWatermarksHelper(readStartTime + 1000 + i, decodeStartTime + 1000 + i,
currentTime + i);
}
//Ensure kafka watermark is non-null and is > 0 for the hour-of-day and day-of-week corresponding to currentTime
assertKafkaWatermarks(currentTime + KafkaProduceRateTracker.SLIDING_WINDOW_SIZE);
}
@Test (dependsOnMethods = "testWriteProduceRateToKafkaWatermarks")
public void testWriteProduceRateToKafkaWatermarksWithHolidays() {
long readStartTime = TimeUnit.MILLISECONDS.toNanos(HOLIDAY_TIME);
long decodeStartTime = readStartTime + 1;
Long currentTime = HOLIDAY_TIME + 10;
WorkUnitState workUnitState = new WorkUnitState();
workUnitState.setProp(KafkaProduceRateTracker.KAFKA_PRODUCE_RATE_DISABLE_STATS_ON_HOLIDAYS_KEY, true);
workUnitState.setProp(FlushingExtractor.FLUSH_INTERVAL_SECONDS_KEY, 1L);
this.tracker = new KafkaProduceRateTracker(workUnitState, kafkaPartitions, watermarkTracker, extractorStatsTracker, currentTime);
//Bootstrap the extractorStatsTracker; Holiday stats collection disabled.
writeProduceRateToKafkaWatermarksHelper(readStartTime, decodeStartTime, currentTime);
//Add a more records and update watermarks/stats
writeProduceRateToKafkaWatermarksHelper(readStartTime + 1000, decodeStartTime + 1000, currentTime + 1);
//Since stats collection is disabled on holidays, ensure watermarks are null.
assertKafkaWatermarks(currentTime);
readStartTime = TimeUnit.MILLISECONDS.toNanos(NON_HOLIDAY_TIME);
decodeStartTime = readStartTime + 1;
currentTime = NON_HOLIDAY_TIME + 10;
//Bootstrap the extractorStatsTracker with initial records
writeProduceRateToKafkaWatermarksHelper(readStartTime, decodeStartTime, currentTime);
for (int i = 1; i < KafkaProduceRateTracker.SLIDING_WINDOW_SIZE + 1; i++) {
//Add more records and update watermarks/stats
writeProduceRateToKafkaWatermarksHelper(readStartTime + 1000 + i, decodeStartTime + 1000 + i,
currentTime + i);
}
//Ensure kafka watermark is not null and is > 0 for the hour-of-day and day-of-week corresponding to currentTime
assertKafkaWatermarks(currentTime + KafkaProduceRateTracker.SLIDING_WINDOW_SIZE);
}
@Test
public void testIsHoliday() {
WorkUnitState workUnitState = new WorkUnitState();
workUnitState.setProp(KafkaProduceRateTracker.KAFKA_PRODUCE_RATE_DISABLE_STATS_ON_HOLIDAYS_KEY, true);
KafkaExtractorStatsTracker extractorStatsTracker = new KafkaExtractorStatsTracker(this.workUnitState, kafkaPartitions);
KafkaProduceRateTracker tracker = new KafkaProduceRateTracker(workUnitState, kafkaPartitions, watermarkTracker, extractorStatsTracker);
Assert.assertTrue(tracker.isHoliday(HOLIDAY_DATE));
//Ensure that the caching behavior is correct
Assert.assertTrue(tracker.isHoliday(HOLIDAY_DATE));
Assert.assertFalse(tracker.isHoliday(NON_HOLIDAY_DATE));
}
@Test
public void testGetPenultimateElement() {
EvictingQueue<Double> queue = EvictingQueue.create(3);
queue.add(1.0);
queue.add(2.0);
queue.add(3.0);
Double element = KafkaProduceRateTracker.getPenultimateElement(queue);
Assert.assertEquals(element, 2.0);
queue.add(4.0);
element = KafkaProduceRateTracker.getPenultimateElement(queue);
Assert.assertEquals(element, 3.0);
queue.add(5.0);
element = KafkaProduceRateTracker.getPenultimateElement(queue);
Assert.assertEquals(element, 4.0);
}
} | 3,233 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaExtractorStatsTrackerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.io.ByteArrayInputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.HdrHistogram.Histogram;
import org.HdrHistogram.HistogramLogReader;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Charsets;
import com.google.common.collect.ImmutableMap;
import org.apache.gobblin.configuration.WorkUnitState;
@Test(singleThreaded = true)
public class KafkaExtractorStatsTrackerTest {
List<KafkaPartition> kafkaPartitions = new ArrayList<>();
private KafkaExtractorStatsTracker extractorStatsTracker;
private WorkUnitState workUnitState;
final static KafkaPartition PARTITION0 = new KafkaPartition.Builder().withTopicName("test-topic").withId(0).build();
final static KafkaPartition PARTITION1 = new KafkaPartition.Builder().withTopicName("test-topic").withId(1).build();
private long epochDurationMs;
@BeforeClass
public void setUp() {
kafkaPartitions.add(PARTITION0);
kafkaPartitions.add(PARTITION1);
this.workUnitState = new WorkUnitState();
this.workUnitState.setProp(KafkaSource.RECORD_LEVEL_SLA_MINUTES_KEY, 10L);
this.workUnitState.setProp(KafkaSource.OBSERVED_LATENCY_MEASUREMENT_ENABLED, true);
this.workUnitState.setProp(ConfigurationKeys.KAFKA_BROKERS, "testBroker");
this.extractorStatsTracker = new KafkaExtractorStatsTracker(workUnitState, kafkaPartitions);
}
@Test
public void testOnUndecodeableRecord() {
//Ensure that error counters are initialized correctly
Assert.assertEquals(this.extractorStatsTracker.getErrorPartitionCount(), 0);
Assert.assertEquals(this.extractorStatsTracker.getDecodingErrorCount(0).longValue(), -1);
Assert.assertEquals(this.extractorStatsTracker.getDecodingErrorCount(0).longValue(), -1);
//Ensure that error counters are updated correctly after 1st call to KafkaExtractorStatsTracker#onUndecodeableRecord()
this.extractorStatsTracker.onUndecodeableRecord(0);
Assert.assertEquals(this.extractorStatsTracker.getDecodingErrorCount(0).longValue(), 1);
Assert.assertEquals(this.extractorStatsTracker.getDecodingErrorCount(1).longValue(), -1);
Assert.assertEquals(this.extractorStatsTracker.getErrorPartitionCount(), 1);
//Ensure that error counters are updated correctly after 2nd call to KafkaExtractorStatsTracker#onUndecodeableRecord()
this.extractorStatsTracker.onUndecodeableRecord(0);
Assert.assertEquals(this.extractorStatsTracker.getDecodingErrorCount(0).longValue(), 2);
Assert.assertEquals(this.extractorStatsTracker.getDecodingErrorCount(1).longValue(), -1);
Assert.assertEquals(this.extractorStatsTracker.getErrorPartitionCount(), 1);
}
@Test
public void testOnNullRecord() {
//Ensure that counters are initialized correctly
Assert.assertEquals(this.extractorStatsTracker.getNullRecordCount(0).longValue(), -1);
Assert.assertEquals(this.extractorStatsTracker.getNullRecordCount(0).longValue(), -1);
//Ensure that counters are updated correctly after 1st call to KafkaExtractorStatsTracker#onNullRecord()
this.extractorStatsTracker.onNullRecord(0);
Assert.assertEquals(this.extractorStatsTracker.getNullRecordCount(0).longValue(), 1);
Assert.assertEquals(this.extractorStatsTracker.getNullRecordCount(1).longValue(), -1);
//Ensure that counters are updated correctly after 2nd call to KafkaExtractorStatsTracker#onUndecodeableRecord()
this.extractorStatsTracker.onNullRecord(0);
Assert.assertEquals(this.extractorStatsTracker.getNullRecordCount(0).longValue(), 2);
Assert.assertEquals(this.extractorStatsTracker.getNullRecordCount(1).longValue(), -1);
}
@Test
public void testResetStartFetchEpochTime() {
long currentTime = System.currentTimeMillis();
this.extractorStatsTracker.resetStartFetchEpochTime(1);
Assert.assertTrue(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(1)).getStartFetchEpochTime() >= currentTime);
}
@Test
public void testOnDecodeableRecord() throws InterruptedException {
this.extractorStatsTracker.reset();
long readStartTime = System.nanoTime();
Thread.sleep(1);
long decodeStartTime = System.nanoTime();
long currentTimeMillis = System.currentTimeMillis();
long logAppendTimestamp = currentTimeMillis - TimeUnit.MINUTES.toMillis(15);
long recordCreationTimestamp = currentTimeMillis - TimeUnit.MINUTES.toMillis(16);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getProcessedRecordCount(), 0);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getPartitionTotalSize(), 0);
Assert.assertTrue(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getDecodeRecordTime() == 0);
Assert.assertTrue(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getReadRecordTime() == 0);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getSlaMissedRecordCount(), -1);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getMinLogAppendTime(), -1);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getMaxLogAppendTime(), -1);
Assert.assertEquals(this.extractorStatsTracker.getObservedLatencyHistogram().getTotalCount(), 0);
this.extractorStatsTracker.onDecodeableRecord(0, readStartTime, decodeStartTime, 100, logAppendTimestamp, recordCreationTimestamp);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getProcessedRecordCount(), 1);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getPartitionTotalSize(), 100);
Assert.assertTrue(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getDecodeRecordTime() > 0);
Assert.assertTrue(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getReadRecordTime() > 0);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getSlaMissedRecordCount(), 1);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getMinLogAppendTime(), logAppendTimestamp);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getMaxLogAppendTime(), logAppendTimestamp);
Assert.assertEquals(this.extractorStatsTracker.getObservedLatencyHistogram().getTotalCount(), 1);
readStartTime = System.nanoTime();
Thread.sleep(1);
decodeStartTime = System.nanoTime();
long previousLogAppendTimestamp = logAppendTimestamp;
currentTimeMillis = System.currentTimeMillis();
logAppendTimestamp = currentTimeMillis - 10;
recordCreationTimestamp = currentTimeMillis - 20;
long previousDecodeRecordTime = this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getDecodeRecordTime();
long previousReadRecordTime = this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getReadRecordTime();
this.extractorStatsTracker.onDecodeableRecord(0, readStartTime, decodeStartTime, 100, logAppendTimestamp, recordCreationTimestamp);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getProcessedRecordCount(), 2);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getPartitionTotalSize(), 200);
Assert.assertTrue(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getDecodeRecordTime() > previousDecodeRecordTime);
Assert.assertTrue(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getReadRecordTime() > previousReadRecordTime);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getSlaMissedRecordCount(), 1);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getMinLogAppendTime(), previousLogAppendTimestamp);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getMaxLogAppendTime(), logAppendTimestamp);
Assert.assertEquals(this.extractorStatsTracker.getObservedLatencyHistogram().getTotalCount(), 2);
}
@Test
public void testOnFetchNextMessageBuffer() throws InterruptedException {
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(1)).getFetchMessageBufferTime(), 0);
long fetchStartTime = System.nanoTime();
Thread.sleep(1);
this.extractorStatsTracker.onFetchNextMessageBuffer(1, fetchStartTime);
Assert.assertTrue(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(1)).getFetchMessageBufferTime() > 0);
}
@Test
public void testOnPartitionReadComplete() throws InterruptedException {
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(1)).getReadRecordTime(), 0);
long readStartTime = System.nanoTime();
Thread.sleep(1);
this.extractorStatsTracker.onPartitionReadComplete(1, readStartTime);
Assert.assertTrue(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(1)).getReadRecordTime() > 0);
}
@Test (dependsOnMethods = "testOnDecodeableRecord")
public void testUpdateStatisticsForCurrentPartition()
throws InterruptedException {
long readStartTime = System.nanoTime();
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getStopFetchEpochTime(), 0);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getElapsedTime(), 0);
Assert.assertTrue(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getAvgMillisPerRecord() < 0);
this.extractorStatsTracker.updateStatisticsForCurrentPartition(0, readStartTime, 0);
Assert.assertTrue(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getStopFetchEpochTime() > 0);
Assert.assertTrue(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getElapsedTime() > 0);
Assert.assertTrue(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getAvgMillisPerRecord() > 0);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getAvgRecordSize(), 100);
readStartTime = System.nanoTime();
long currentTimeMillis = System.currentTimeMillis();
long logAppendTimestamp = currentTimeMillis - 10;
long recordCreationTimestamp = currentTimeMillis - 20;
Thread.sleep(1);
long decodeStartTime = System.nanoTime();
this.extractorStatsTracker.onDecodeableRecord(1, readStartTime, decodeStartTime, 100, logAppendTimestamp, recordCreationTimestamp);
this.extractorStatsTracker.updateStatisticsForCurrentPartition(1, readStartTime, 0);
Assert.assertTrue(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(1)).getElapsedTime() > 0);
Assert.assertTrue(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(1)).getAvgMillisPerRecord() > 0);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(1)).getAvgRecordSize(), 100);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(1)).getSlaMissedRecordCount(), 0);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(1)).getMinLogAppendTime(), logAppendTimestamp);
Assert.assertEquals(this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(1)).getMaxLogAppendTime(), logAppendTimestamp);
Assert.assertEquals(this.extractorStatsTracker.getObservedLatencyHistogram().getTotalCount(), 3);
long startFetchEpochTime = this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getStartFetchEpochTime();
long stopFetchEpochTime = this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(1)).getStopFetchEpochTime();
this.epochDurationMs = stopFetchEpochTime - startFetchEpochTime;
long minLogAppendTimestamp = this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(0)).getMinLogAppendTime();
long maxLogAppendTimestamp = this.extractorStatsTracker.getStatsMap().get(kafkaPartitions.get(1)).getMaxLogAppendTime();
//Ensure aggregate extractor stats have been updated correctly for the completed epoch
Assert.assertEquals(this.extractorStatsTracker.getAggregateExtractorStats().getMinStartFetchEpochTime(), startFetchEpochTime);
Assert.assertEquals(this.extractorStatsTracker.getAggregateExtractorStats().getMaxStopFetchEpochTime(), stopFetchEpochTime);
Assert.assertEquals(this.extractorStatsTracker.getAggregateExtractorStats().getMinLogAppendTime(), minLogAppendTimestamp);
Assert.assertEquals(this.extractorStatsTracker.getAggregateExtractorStats().getMaxLogAppendTime(), maxLogAppendTimestamp);
Assert.assertEquals(this.extractorStatsTracker.getAggregateExtractorStats().getNumBytesConsumed(), 300L);
Assert.assertEquals(this.extractorStatsTracker.getAggregateExtractorStats().getProcessedRecordCount(), 3L);
Assert.assertEquals(this.extractorStatsTracker.getAggregateExtractorStats().getSlaMissedRecordCount(), 1);
}
@Test (dependsOnMethods = "testUpdateStatisticsForCurrentPartition")
public void testGetAvgRecordSize() {
Assert.assertEquals(this.extractorStatsTracker.getAvgRecordSize(0), 100);
Assert.assertEquals(this.extractorStatsTracker.getAvgRecordSize(1), 100);
this.extractorStatsTracker.reset();
Assert.assertEquals(this.extractorStatsTracker.getAvgRecordSize(0), 0);
long readStartTime = System.nanoTime();
long decodeStartTime = readStartTime + 1;
long currentTimeMillis = System.currentTimeMillis();
long logAppendTimestamp = currentTimeMillis - 10;
long recordCreationTimestamp = currentTimeMillis - 20;
this.extractorStatsTracker.onDecodeableRecord(1, readStartTime, decodeStartTime, 150, logAppendTimestamp, recordCreationTimestamp);
Assert.assertEquals(this.extractorStatsTracker.getAvgRecordSize(1), 150);
}
@Test (dependsOnMethods = "testGetAvgRecordSize")
public void testGetMaxLatency() {
Assert.assertTrue(this.extractorStatsTracker.getMaxIngestionLatency(TimeUnit.MINUTES) >= 15);
}
@Test (dependsOnMethods = "testGetMaxLatency")
public void testGetConsumptionRateMBps() {
double a = this.extractorStatsTracker.getConsumptionRateMBps();
Assert.assertEquals((new Double(Math.ceil(a * epochDurationMs * 1024 * 1024) / 1000)).longValue(), 300L);
}
@Test (dependsOnMethods = "testGetConsumptionRateMBps")
public void testGetMaxLatencyNoRecordsInEpoch() {
//Close the previous epoch
this.extractorStatsTracker.reset();
Long readStartTime = System.nanoTime();
//Call update on partitions 1 and 2 with no records cosumed from each partition
this.extractorStatsTracker.updateStatisticsForCurrentPartition(0, readStartTime, 0);
this.extractorStatsTracker.updateStatisticsForCurrentPartition(1, readStartTime, 0);
//Close the epoch
this.extractorStatsTracker.reset();
//Ensure the max latency is 0 when there are no records
Assert.assertEquals(this.extractorStatsTracker.getMaxIngestionLatency(TimeUnit.MINUTES), 0L);
}
@Test
public void testGenerateTagsForPartitions() throws Exception {
MultiLongWatermark lowWatermark = new MultiLongWatermark(Arrays.asList(new Long(10), new Long(20)));
MultiLongWatermark highWatermark = new MultiLongWatermark(Arrays.asList(new Long(20), new Long(30)));
MultiLongWatermark nextWatermark = new MultiLongWatermark(Arrays.asList(new Long(15), new Long(25)));
Map<KafkaPartition, Map<String, String>> addtionalTags =
ImmutableMap.of(PARTITION0, ImmutableMap.of("testKey", "testValue"));
this.workUnitState.removeProp(KafkaUtils.getPartitionPropName(KafkaSource.START_FETCH_EPOCH_TIME, 0));
this.workUnitState.removeProp(KafkaUtils.getPartitionPropName(KafkaSource.STOP_FETCH_EPOCH_TIME, 0));
KafkaUtils.setPartitionAvgRecordMillis(this.workUnitState, PARTITION0, 0);
KafkaExtractorStatsTracker.ExtractorStats extractorStats = this.extractorStatsTracker.getStatsMap()
.get(kafkaPartitions.get(0));
extractorStats.setStartFetchEpochTime(1000);
extractorStats.setStopFetchEpochTime(10000);
extractorStats.setAvgMillisPerRecord(10.1);
Map<KafkaPartition, Map<String, String>> result =
extractorStatsTracker.generateTagsForPartitions(lowWatermark, highWatermark, nextWatermark, addtionalTags);
// generateTagsForPartitions will set the following in the workUnitState
Assert.assertEquals(this.workUnitState.getPropAsLong(
KafkaUtils.getPartitionPropName(KafkaSource.START_FETCH_EPOCH_TIME, 0)),
extractorStats.getStartFetchEpochTime());
Assert.assertEquals(this.workUnitState.getPropAsLong(
KafkaUtils.getPartitionPropName(KafkaSource.STOP_FETCH_EPOCH_TIME, 0)),
extractorStats.getStopFetchEpochTime());
Assert.assertEquals(KafkaUtils.getPartitionAvgRecordMillis(this.workUnitState, PARTITION0),
extractorStats.getAvgMillisPerRecord());
// restore values since other tests check for them
extractorStats.setStartFetchEpochTime(0);
extractorStats.setStopFetchEpochTime(0);
extractorStats.setAvgMillisPerRecord(-1);
Assert.assertTrue(result.get(PARTITION0).containsKey("testKey"));
Assert.assertEquals(result.get(PARTITION0).get("testKey"), "testValue");
Assert.assertFalse(result.get(PARTITION1).containsKey("testKey"));
}
@Test
public void testConvertHistogramToString() {
Histogram histogram = new Histogram(1, 100, 3);
histogram.recordValue(3);
histogram.recordValue(25);
histogram.recordValue(25);
histogram.recordValue(92);
String histogramString = KafkaExtractorStatsTracker.convertHistogramToString(histogram);
HistogramLogReader logReader = new HistogramLogReader(new ByteArrayInputStream(histogramString.getBytes(
Charsets.UTF_8)));
Histogram histogram1 = (Histogram) logReader.nextIntervalHistogram();
Assert.assertEquals(histogram1.getTotalCount(), 4);
Assert.assertEquals(histogram1.getMaxValue(), 92);
Assert.assertEquals(histogram1.getCountAtValue(25), 2);
Assert.assertEquals(histogram1.getCountAtValue(3), 1);
Assert.assertEquals(histogram1.getCountAtValue(92), 1);
}
} | 3,234 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaExtractorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
public class KafkaExtractorTest {
@Test
public void testGetKafkaBrokerSimpleName() {
State state = new State();
Assert.assertThrows(IllegalArgumentException.class, () -> KafkaExtractor.getKafkaBrokerSimpleName(state));
state.setProp(ConfigurationKeys.KAFKA_BROKERS, "");
Assert.assertThrows(IllegalArgumentException.class, () -> KafkaExtractor.getKafkaBrokerSimpleName(state));
final String kafkaBrokerUri = "kafka.broker.uri.com:12345";
final String kafkaBrokerSimpleName = "simple.kafka.name";
state.setProp(ConfigurationKeys.KAFKA_BROKERS, kafkaBrokerUri);
Assert.assertEquals("", KafkaExtractor.getKafkaBrokerSimpleName(state));
state.setProp(ConfigurationKeys.KAFKA_BROKERS_TO_SIMPLE_NAME_MAP_KEY, String.format("foobar->foobarId", kafkaBrokerUri, kafkaBrokerSimpleName));
Assert.assertEquals("", KafkaExtractor.getKafkaBrokerSimpleName(state));
state.setProp(ConfigurationKeys.KAFKA_BROKERS_TO_SIMPLE_NAME_MAP_KEY, String.format("%s->%s,foobar->foobarId", kafkaBrokerUri, kafkaBrokerSimpleName));
Assert.assertEquals(KafkaExtractor.getKafkaBrokerSimpleName(state), kafkaBrokerSimpleName);
}
@Test
public void testSimpleMapKeyIsBackwardCompatible() {
Config cfg = ConfigFactory.empty()
.withValue(ConfigurationKeys.KAFKA_BROKERS, ConfigValueFactory.fromAnyRef("kafkaBrokerUri"))
.withValue(ConfigurationKeys.KAFKA_BROKERS_TO_SIMPLE_NAME_MAP_KEY,
ConfigValueFactory.fromAnyRef("kafkaBrokerUri->simpleName"));
Assert.assertEquals(cfg.getString(ConfigurationKeys.KAFKA_BROKERS), "kafkaBrokerUri");
Assert.assertEquals(cfg.getString(ConfigurationKeys.KAFKA_BROKERS_TO_SIMPLE_NAME_MAP_KEY), "kafkaBrokerUri->simpleName");
}
}
| 3,235 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaIngestionHealthCheckTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.io.IOException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.eventbus.EventBus;
import com.google.common.eventbus.Subscribe;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.source.extractor.extract.kafka.workunit.packer.KafkaTopicGroupingWorkUnitPacker;
import org.apache.gobblin.util.event.ContainerHealthCheckFailureEvent;
import org.apache.gobblin.util.eventbus.EventBusFactory;
@Test (singleThreaded = true)
public class KafkaIngestionHealthCheckTest {
private EventBus eventBus;
private CountDownLatch countDownLatch;
@BeforeClass
public void setUp() throws IOException {
this.eventBus = EventBusFactory.get(ContainerHealthCheckFailureEvent.CONTAINER_HEALTH_CHECK_EVENT_BUS_NAME,
SharedResourcesBrokerFactory.getImplicitBroker());
this.eventBus.register(this);
}
@Subscribe
@Test(enabled = false)
// When a class has "@Test" annotation, TestNG will run all public methods as tests.
// This specific method is public because eventBus is calling it. To prevent running it as a test, we mark it
// as "disabled" test.
public void handleContainerHealthCheckFailureEvent(ContainerHealthCheckFailureEvent event) {
this.countDownLatch.countDown();
}
@Test
public void testExecuteIncreasingLatencyCheckEnabled()
throws InterruptedException {
this.countDownLatch = new CountDownLatch(1);
Config config = ConfigFactory.empty().withValue(KafkaTopicGroupingWorkUnitPacker.CONTAINER_CAPACITY_KEY,
ConfigValueFactory.fromAnyRef(5))
.withValue(KafkaIngestionHealthCheck.KAFKA_INGESTION_HEALTH_CHECK_LATENCY_THRESHOLD_MINUTES_KEY, ConfigValueFactory.fromAnyRef(5));
KafkaExtractorStatsTracker extractorStatsTracker = Mockito.mock(KafkaExtractorStatsTracker.class);
Mockito.when(extractorStatsTracker.getMaxIngestionLatency(TimeUnit.MINUTES))
.thenReturn(6L)
.thenReturn(7L)
.thenReturn(7L)
.thenReturn(5L);
Mockito.when(extractorStatsTracker.getConsumptionRateMBps())
.thenReturn(2.0)
.thenReturn(1.5)
.thenReturn(2.1)
.thenReturn(2.5);
KafkaIngestionHealthCheck check = new KafkaIngestionHealthCheck(config, extractorStatsTracker);
//Latency increases continuously for the first 3 calls to execute().
check.execute();
this.countDownLatch.await(10, TimeUnit.MILLISECONDS);
Assert.assertEquals(this.countDownLatch.getCount(), 1L);
check.execute();
this.countDownLatch.await(10, TimeUnit.MILLISECONDS);
Assert.assertEquals(this.countDownLatch.getCount(), 1L);
check.execute();
//Ensure that ContainerHealthCheckFailureEvent is posted to eventBus; countDownLatch should be back to 0.
this.countDownLatch.await(10, TimeUnit.MILLISECONDS);
Assert.assertEquals(this.countDownLatch.getCount(), 0);
//Set the countdown latch back to 1.
this.countDownLatch = new CountDownLatch(1);
//Latency decreases from 10 to 5. So check.execute() should not post any event to EventBus.
check.execute();
this.countDownLatch.await(10, TimeUnit.MILLISECONDS);
Assert.assertEquals(this.countDownLatch.getCount(), 1);
config = config.withValue(KafkaIngestionHealthCheck.KAFKA_INGESTION_HEALTH_CHECK_INCREASING_LATENCY_CHECK_ENABLED_KEY, ConfigValueFactory.fromAnyRef(false));
extractorStatsTracker = Mockito.mock(KafkaExtractorStatsTracker.class);
Mockito.when(extractorStatsTracker.getMaxIngestionLatency(TimeUnit.MINUTES))
.thenReturn(10L)
.thenReturn(7L)
.thenReturn(5L);
Mockito.when(extractorStatsTracker.getConsumptionRateMBps())
.thenReturn(2.0)
.thenReturn(1.5)
.thenReturn(2.1);
check = new KafkaIngestionHealthCheck(config, extractorStatsTracker);
check.execute();
}
@Test
public void testExecuteIncreasingLatencyCheckDisabled()
throws InterruptedException {
this.countDownLatch = new CountDownLatch(1);
Config config = ConfigFactory.empty().withValue(KafkaTopicGroupingWorkUnitPacker.CONTAINER_CAPACITY_KEY,
ConfigValueFactory.fromAnyRef(5))
.withValue(KafkaIngestionHealthCheck.KAFKA_INGESTION_HEALTH_CHECK_LATENCY_THRESHOLD_MINUTES_KEY, ConfigValueFactory.fromAnyRef(5))
.withValue(KafkaIngestionHealthCheck.KAFKA_INGESTION_HEALTH_CHECK_INCREASING_LATENCY_CHECK_ENABLED_KEY, ConfigValueFactory.fromAnyRef(false));
KafkaExtractorStatsTracker extractorStatsTracker = Mockito.mock(KafkaExtractorStatsTracker.class);
Mockito.when(extractorStatsTracker.getMaxIngestionLatency(TimeUnit.MINUTES))
.thenReturn(10L)
.thenReturn(7L)
.thenReturn(6L)
.thenReturn(4L);
Mockito.when(extractorStatsTracker.getConsumptionRateMBps())
.thenReturn(2.0)
.thenReturn(1.5)
.thenReturn(2.1)
.thenReturn(2.5);
KafkaIngestionHealthCheck check = new KafkaIngestionHealthCheck(config, extractorStatsTracker);
//Latency consistently above 5 minutes for the first 3 calls to execute().
check.execute();
this.countDownLatch.await(10, TimeUnit.MILLISECONDS);
Assert.assertEquals(this.countDownLatch.getCount(), 1L);
check.execute();
this.countDownLatch.await(10, TimeUnit.MILLISECONDS);
Assert.assertEquals(this.countDownLatch.getCount(), 1L);
check.execute();
//Ensure that ContainerHealthCheckFailureEvent is posted to eventBus; countDownLatch should be back to 0.
this.countDownLatch.await(10, TimeUnit.MILLISECONDS);
Assert.assertEquals(this.countDownLatch.getCount(), 0);
//Set the countdown latch back to 1.
this.countDownLatch = new CountDownLatch(1);
//Latency decreases to 4. So check.execute() should not post any event to EventBus.
check.execute();
this.countDownLatch.await(10, TimeUnit.MILLISECONDS);
Assert.assertEquals(this.countDownLatch.getCount(), 1);
}
} | 3,236 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import org.testng.Assert;
import org.testng.annotations.Test;
public class KafkaUtilsTest {
@Test
public void testGetTopicNameFromTopicPartition() {
Assert.assertEquals(KafkaUtils.getTopicNameFromTopicPartition("topic-1"), "topic");
Assert.assertEquals(KafkaUtils.getTopicNameFromTopicPartition("topic-foo-bar-1"), "topic-foo-bar");
}
} | 3,237 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaStreamTestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Random;
import java.util.UUID;
import java.util.regex.Pattern;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.mockito.Mockito;
import com.codahale.metrics.Metric;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.typesafe.config.Config;
import org.apache.gobblin.kafka.client.AbstractBaseKafkaConsumerClient;
import org.apache.gobblin.kafka.client.DecodeableKafkaRecord;
import org.apache.gobblin.kafka.client.GobblinKafkaConsumerClient;
import org.apache.gobblin.kafka.client.KafkaConsumerRecord;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.util.ConfigUtils;
import static org.mockito.Mockito.mock;
/**
* A bunch of basic mocking class that can be used for different implementation to mock kafka clients.
*/
public class KafkaStreamTestUtils {
public static class MockKafkaConsumerClientFactory
implements GobblinKafkaConsumerClient.GobblinKafkaConsumerClientFactory {
static final AbstractBaseKafkaConsumerClient MOCKED_KAFKA_CLIENT = mock(AbstractBaseKafkaConsumerClient.class);
@Override
public GobblinKafkaConsumerClient create(Config config) {
return MOCKED_KAFKA_CLIENT;
}
}
public static class MockKafka10ConsumerClientFactory
implements GobblinKafkaConsumerClient.GobblinKafkaConsumerClientFactory {
@Override
public GobblinKafkaConsumerClient create(Config config) {
return new MockKafkaConsumerClient(config);
}
}
/**
* A mock implementation of {@link GobblinKafkaConsumerClient} that returns a {@link MockIterator} on
* invocation of the {@link MockKafkaConsumerClient#consume()} method.
*/
public static class MockKafkaConsumerClient implements GobblinKafkaConsumerClient {
public static final String NUM_PARTITIONS_ASSIGNED = "gobblin.kafka.streaming.numPartitions";
public static final String CAN_RETURN_NULL_VALUED_RECORDS = "gobblin.kafka.streaming.canReturnNulls";
private final Map<KafkaPartition, Long> latestOffsets = Maps.newHashMap();
private final Random random = new Random();
private final String topicName;
private final boolean canReturnNullValuedRecords;
private final List<Integer> partitionIds;
protected MockKafkaConsumerClient(Config baseConfig) {
this.topicName = baseConfig.getString(KafkaSource.TOPIC_NAME);
int numPartitionsAssigned = ConfigUtils.getInt(baseConfig, NUM_PARTITIONS_ASSIGNED, 0);
this.canReturnNullValuedRecords = ConfigUtils.getBoolean(baseConfig, CAN_RETURN_NULL_VALUED_RECORDS, false);
this.partitionIds = getPartitionIds(baseConfig, numPartitionsAssigned);
}
private List<Integer> getPartitionIds(Config baseConfig, int numPartitionsAssigned) {
List<Integer> partitionIds = Lists.newArrayList();
for (int i = 0; i < numPartitionsAssigned; i++) {
String partitionIdProp = KafkaSource.PARTITION_ID + "." + i;
partitionIds.add(baseConfig.getInt(partitionIdProp));
}
return partitionIds;
}
/**
*
* @return a {@link MockIterator} over {@link KafkaConsumerRecord}s.
*/
@Override
public Iterator<KafkaConsumerRecord> consume() {
return new MockIterator(this.topicName, this.partitionIds, this.canReturnNullValuedRecords);
}
@Override
public void assignAndSeek(List<KafkaPartition> topicPartitions,
Map<KafkaPartition, LongWatermark> topicWatermarks) {
return;
}
@Override
public List<KafkaTopic> getFilteredTopics(List<Pattern> blacklist, List<Pattern> whitelist) {
return null;
}
@Override
public long getEarliestOffset(KafkaPartition partition) {
return 0;
}
@Override
public long getLatestOffset(KafkaPartition partition) {
return 0;
}
/**
* Returns a random offset for each {@link KafkaPartition}. The method ensures that the offsets are monotonically
* increasing for each {@link KafkaPartition} i.e. each subsequent call to the method will return a higher offset
* for every partition in the partition list.
* @param partitions
* @return
*/
@Override
public Map<KafkaPartition, Long> getLatestOffsets(Collection<KafkaPartition> partitions) {
for (KafkaPartition partition : partitions) {
if (this.latestOffsets.containsKey(partition)) {
this.latestOffsets.put(partition, this.latestOffsets.get(partition) + 100);
} else {
this.latestOffsets.put(partition, new Long(random.nextInt(100000)));
}
}
return this.latestOffsets;
}
@Override
public Iterator<KafkaConsumerRecord> consume(KafkaPartition partition, long nextOffset, long maxOffset) {
return null;
}
@Override
public Map<String, Metric> getMetrics() {
return new HashMap<>();
}
@Override
public void close()
throws IOException {
}
}
public static class MockSchemaRegistry extends KafkaSchemaRegistry<String, Schema> {
static Schema latestSchema = Schema.create(Schema.Type.STRING);
public MockSchemaRegistry(Properties props) {
super(props);
}
@Override
protected Schema fetchSchemaByKey(String key) {
return null;
}
@Override
public Schema getLatestSchemaByTopic(String topic) {
return latestSchema;
}
@Override
public String register(Schema schema) {
return null;
}
@Override
public String register(Schema schema, String name) {
latestSchema = schema;
return schema.toString();
}
}
public static class LowLevelMockSchemaRegistry
implements org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistry<String, Schema> {
private Schema latestSchema;
public LowLevelMockSchemaRegistry(Properties props) {
}
@Override
public String register(String name, Schema schema) {
this.latestSchema = schema;
return schema.toString();
}
@Override
public Schema getById(String id) {
return null;
}
@Override
public Schema getLatestSchema(String name) {
return this.latestSchema;
}
@Override
public boolean hasInternalCache() {
return false;
}
}
/**
* A mock iterator of {@link KafkaConsumerRecord}s. The caller provides a topicName, a list of partition ids and
* optionally, the number of records the iterator must iterate over. On each call to next(), the iterator returns
* a mock {@link KafkaConsumerRecord}, with a partition id assigned in a round-robin fashion over the input list of
* partition ids.
*/
public static class MockIterator implements Iterator<KafkaConsumerRecord> {
// Schema for LiKafka10ConsumerRecords. TODO: Enhance the iterator to return random records
// according to a given schema.
private static final String SCHEMA =
"{\"namespace\": \"example.avro\",\n" + " \"type\": \"record\",\n" + " \"name\": \"user\",\n"
+ " \"fields\": [\n" + " {\"name\": \"name\", \"type\": \"string\"},\n"
+ " {\"name\": \"DUMMY\", \"type\": [\"null\",\"string\"]}\n" + " ]\n" + "}";
private final Schema schema = new Schema.Parser().parse(SCHEMA);
private final String topicName;
private final long maxNumRecords;
private final List<Integer> partitionIds;
private final long[] nextOffsets;
private final boolean canReturnNullRecords;
private long numRecordsReturnedSoFar;
private int partitionIdx = 0;
public MockIterator(String topicName, List<Integer> partitionIds, boolean canReturnNullRecords) {
this(topicName, partitionIds, canReturnNullRecords, Long.MAX_VALUE);
}
public MockIterator(String topicName, List<Integer> partitionIds, boolean canReturnNullRecords, long numRecords) {
this.topicName = topicName;
this.maxNumRecords = numRecords;
this.partitionIds = partitionIds;
this.canReturnNullRecords = canReturnNullRecords;
this.nextOffsets = new long[partitionIds.size()];
}
/**
* Returns {@code true} if the iteration has more elements.
* (In other words, returns {@code true} if {@link #next} would
* return an element rather than throwing an exception.)
*
* @return {@code true} if the iteration has more elements
*/
@Override
public boolean hasNext() {
return this.numRecordsReturnedSoFar < this.maxNumRecords;
}
/**
* Returns the next element in the iteration.
*
* @return the next element in the iteration
* @throws java.util.NoSuchElementException if the iteration has no more elements
*/
@Override
public KafkaConsumerRecord next() {
this.numRecordsReturnedSoFar++;
return getMockConsumerRecord();
}
private KafkaConsumerRecord getMockConsumerRecord() {
DecodeableKafkaRecord mockRecord = Mockito.mock(DecodeableKafkaRecord.class);
Mockito.when(mockRecord.getValue()).thenReturn(getRecord());
Mockito.when(mockRecord.getTopic()).thenReturn(topicName);
Mockito.when(mockRecord.getPartition()).thenReturn(this.partitionIds.get(partitionIdx));
this.partitionIdx = (this.partitionIdx + 1) % this.partitionIds.size();
//Increment the next offset of the record
this.nextOffsets[partitionIdx]++;
Mockito.when(mockRecord.getNextOffset()).thenReturn(this.nextOffsets[partitionIdx]);
return mockRecord;
}
private GenericRecord getRecord() {
if ((!this.canReturnNullRecords) || (this.numRecordsReturnedSoFar % 2 == 0)) {
GenericRecord record = new GenericData.Record(schema);
record.put("name", UUID.randomUUID());
return record;
} else {
return null;
}
}
}
}
| 3,238 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/validator/TopicValidatorsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka.validator;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaTopic;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TopicValidatorsTest {
@Test
public void testTopicValidators() {
List<String> allTopics = Arrays.asList(
"topic1", "topic2", // allowed
"topic-with.period-in_middle", ".topic-with-period-at-start", "topicWithPeriodAtEnd.", // bad topics
"topic3", "topic4"); // in deny list
List<KafkaTopic> topics = buildKafkaTopics(allTopics);
State state = new State();
// Without any topic validators
List<KafkaTopic> validTopics = new TopicValidators(state).validate(topics);
Assert.assertEquals(validTopics.size(), 7);
// Use 2 topic validators: TopicNameValidator and DenyListValidator
String validatorsToUse = String.join(",", ImmutableList.of(
TopicNameValidator.class.getName(), DenyListValidator.class.getName()));
state.setProp(TopicValidators.VALIDATOR_CLASSES_KEY, validatorsToUse);
validTopics = new TopicValidators(state).validate(topics);
Assert.assertEquals(validTopics.size(), 2);
Assert.assertTrue(validTopics.stream().anyMatch(topic -> topic.getName().equals("topic1")));
Assert.assertTrue(validTopics.stream().anyMatch(topic -> topic.getName().equals("topic2")));
}
@Test
public void testValidatorTimeout() {
List<String> allTopics = Arrays.asList("topic1", "topic2", "topic3");
List<KafkaTopic> topics = buildKafkaTopics(allTopics);
State state = new State();
state.setProp(TopicValidators.VALIDATOR_CLASSES_KEY, RejectEverythingValidator.class.getName());
List<KafkaTopic> validTopics = new TopicValidators(state).validate(topics, 5, TimeUnit.SECONDS);
Assert.assertEquals(validTopics.size(), 1); // topic 2 times out, it should be treated as a valid topic
Assert.assertEquals(validTopics.get(0).getName(), "topic2");
}
private List<KafkaTopic> buildKafkaTopics(List<String> topics) {
return topics.stream()
.map(topicName -> new KafkaTopic(topicName, Collections.emptyList()))
.collect(Collectors.toList());
}
// A TopicValidator class to mimic a deny list
public static class DenyListValidator extends TopicValidatorBase {
Set<String> denyList = ImmutableSet.of("topic3", "topic4");
public DenyListValidator(State state) {
super(state);
}
@Override
public boolean validate(KafkaTopic topic) {
return !this.denyList.contains(topic.getName());
}
}
// A validator that always returns false when validate() is called.
// Sleep for 5 sec when processing topic2 to simulate a slow validation.
public static class RejectEverythingValidator extends TopicValidatorBase {
public RejectEverythingValidator(State state) {
super(state);
}
@Override
public boolean validate(KafkaTopic topic) {
if (!topic.getName().equals("topic2")) {
return false;
}
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return false;
}
}
}
| 3,239 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/workunit | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/workunit/packer/ProduceRateAndLagBasedWorkUnitSizeEstimatorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka.workunit.packer;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.TimeZone;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.gson.Gson;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaPartition;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaStreamingExtractor;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.io.GsonInterfaceAdapter;
public class ProduceRateAndLagBasedWorkUnitSizeEstimatorTest {
private static final Gson GSON = GsonInterfaceAdapter.getGson(Object.class);
private static final String TEST_TOPIC = "test";
private static final long AVG_RECORD_SIZE = 1024L;
private static final String BINPACKING_TIME_1 = "11/17/2019 23:10:00";
private static final String BINPACKING_TIME_2 = "11/19/2019 08:00:00";
private double[][] avgProduceRates = new double[7][24];
private ProduceRateAndLagBasedWorkUnitSizeEstimator estimator;
@BeforeClass
public void setUp() {
double rate = 1.0;
for (int i = 0; i < 7; i++) {
for (int j = 0; j < 24; j++) {
if (i == 2) {
avgProduceRates[i][j] = -1.0;
} else {
avgProduceRates[i][j] = rate * ProduceRateAndLagBasedWorkUnitSizeEstimator.ONE_MEGA_BYTE;
rate++;
}
}
}
}
@Test
public void testCalcEstimatedSize() throws ParseException {
SourceState sourceState = new SourceState();
sourceState.setProp(ProduceRateAndLagBasedWorkUnitSizeEstimator.CATCHUP_SLA_IN_HOURS_KEY, 3);
sourceState.setProp(ProduceRateAndLagBasedWorkUnitSizeEstimator.REPLANNING_INTERVAL_IN_HOURS_KEY, 3);
sourceState.setProp(ProduceRateAndLagBasedWorkUnitSizeEstimator.PRODUCE_RATE_SCALING_FACTOR_KEY, 1);
this.estimator = new ProduceRateAndLagBasedWorkUnitSizeEstimator(sourceState);
SimpleDateFormat format = new SimpleDateFormat("MM/dd/yyyy HH:mm:ss");
format.setTimeZone(TimeZone.getDefault());
//WorkUnit with no KafkaWatermark
KafkaStreamingExtractor.KafkaWatermark watermark = null;
WorkUnit workUnit = WorkUnit.createEmpty();
workUnit.setProp(KafkaTopicGroupingWorkUnitPacker.PARTITION_WATERMARK, GSON.toJson(watermark));
workUnit.setProp(KafkaTopicGroupingWorkUnitPacker.DEFAULT_WORKUNIT_SIZE_KEY, 1.0);
workUnit.setProp(ConfigurationKeys.WORK_UNIT_HIGH_WATER_MARK_KEY, Long.toString(6 * 3600 * 1024));
workUnit.setProp(KafkaTopicGroupingWorkUnitPacker.PACKING_START_TIME_MILLIS, format.parse(BINPACKING_TIME_1).getTime());
Assert.assertEquals(new Double(this.estimator.calcEstimatedSize(workUnit)).longValue(), 1L);
//WorkUnit with Kafka watermark and previous avg produce rates
watermark = new KafkaStreamingExtractor.KafkaWatermark(new KafkaPartition.Builder().withTopicName(TEST_TOPIC).withId(0).build(), new LongWatermark(0L));
workUnit.setProp(KafkaTopicGroupingWorkUnitPacker.MIN_WORKUNIT_SIZE_KEY, 2.0);
watermark.setAvgRecordSize(AVG_RECORD_SIZE);
watermark.setAvgProduceRates(avgProduceRates);
workUnit.setProp(KafkaTopicGroupingWorkUnitPacker.PARTITION_WATERMARK, GSON.toJson(watermark));
Assert.assertEquals(new Double(this.estimator.calcEstimatedSize(workUnit)).longValue(), 29L);
//WorkUnit with Kafka watermark but no previous avg produce rates
workUnit.setProp(KafkaTopicGroupingWorkUnitPacker.PACKING_START_TIME_MILLIS, format.parse(BINPACKING_TIME_2).getTime());
workUnit.setProp(KafkaTopicGroupingWorkUnitPacker.DEFAULT_WORKUNIT_SIZE_KEY, 2.0);
Assert.assertEquals(new Double(this.estimator.calcEstimatedSize(workUnit)).longValue(), 4L);
//Create a new workunit with minimum workunit size = 5.0
workUnit = WorkUnit.createEmpty();
workUnit.setProp(KafkaTopicGroupingWorkUnitPacker.PARTITION_WATERMARK, GSON.toJson(watermark));
workUnit.setProp(KafkaTopicGroupingWorkUnitPacker.DEFAULT_WORKUNIT_SIZE_KEY, 1.0);
workUnit.setProp(KafkaTopicGroupingWorkUnitPacker.MIN_WORKUNIT_SIZE_KEY, 5.0);
workUnit.setProp(ConfigurationKeys.WORK_UNIT_HIGH_WATER_MARK_KEY, Long.toString(6 * 3600 * 1024));
workUnit.setProp(KafkaTopicGroupingWorkUnitPacker.PACKING_START_TIME_MILLIS, format.parse(BINPACKING_TIME_2).getTime());
workUnit.setProp(KafkaTopicGroupingWorkUnitPacker.DEFAULT_WORKUNIT_SIZE_KEY, 2.0);
Assert.assertEquals(new Double(this.estimator.calcEstimatedSize(workUnit)).longValue(), 5L);
}
} | 3,240 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/workunit | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/workunit/packer/KafkaTopicGroupingWorkUnitPackerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka.workunit.packer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaSource;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaUtils;
import org.apache.gobblin.source.extractor.extract.kafka.UniversalKafkaSource;
import org.apache.gobblin.source.workunit.Extract;
import org.apache.gobblin.source.workunit.MultiWorkUnit;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
public class KafkaTopicGroupingWorkUnitPackerTest {
private Properties props;
@BeforeMethod
public void setUp() {
props = new Properties();
props.setProperty("gobblin.kafka.streaming.containerCapacity", "2");
props.setProperty("kafka.workunit.size.estimator.type", "CUSTOM");
props.setProperty("kafka.workunit.size.estimator.customizedType",
"org.apache.gobblin.source.extractor.extract.kafka.workunit.packer.UnitKafkaWorkUnitSizeEstimator");
}
/**
* Check that capacity is honored. Set numContainers to 0 so the workUnit list size is determined only by the capacity
*/
@Test
public void testSingleTopic() {
KafkaSource source = new UniversalKafkaSource();
SourceState state = new SourceState(new State(props));
state.setProp("gobblin.kafka.streaming.enableIndexing", false);
state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, Files.createTempDir().getAbsolutePath());
Map<String, List<WorkUnit>> workUnitsByTopic = ImmutableMap.of("topic1", Lists
.newArrayList(getWorkUnitWithTopicPartition("topic1", 1), getWorkUnitWithTopicPartition("topic1", 2),
getWorkUnitWithTopicPartition("topic1", 3)));
List<WorkUnit> workUnits = new KafkaTopicGroupingWorkUnitPacker(source, state, Optional.absent()).pack(workUnitsByTopic, 0);
Assert.assertEquals(workUnits.size(), 2);
Assert.assertEquals(workUnits.get(0).getProp(KafkaSource.TOPIC_NAME), "topic1");
Assert.assertEquals(workUnits.get(0).getPropAsInt(KafkaUtils.getPartitionPropName(KafkaSource.PARTITION_ID, 0)), 1);
Assert.assertEquals(workUnits.get(0).getPropAsInt(KafkaUtils.getPartitionPropName(KafkaSource.PARTITION_ID, 1)), 2);
Assert.assertEquals(workUnits.get(0).getPropAsDouble(KafkaTopicGroupingWorkUnitPacker.CONTAINER_CAPACITY_KEY), 2, 0.001);
Assert.assertEquals(workUnits.get(1).getProp(KafkaSource.TOPIC_NAME), "topic1");
Assert.assertEquals(workUnits.get(1).getPropAsInt(KafkaUtils.getPartitionPropName(KafkaSource.PARTITION_ID, 0)), 3);
Assert.assertEquals(workUnits.get(1).getPropAsDouble(KafkaTopicGroupingWorkUnitPacker.CONTAINER_CAPACITY_KEY), 2, 0.001);
}
/**
* Check that topics are kept in separate work units.
*/
@Test
public void testMultiTopic() {
KafkaSource source = new UniversalKafkaSource();
SourceState state = new SourceState(new State(props));
state.setProp("gobblin.kafka.streaming.enableIndexing", false);
state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, Files.createTempDir().getAbsolutePath());
Map<String, List<WorkUnit>> workUnitsByTopic = ImmutableMap.of("topic1", Lists
.newArrayList(getWorkUnitWithTopicPartition("topic1", 1), getWorkUnitWithTopicPartition("topic1", 2),
getWorkUnitWithTopicPartition("topic1", 3)), "topic2", Lists
.newArrayList(getWorkUnitWithTopicPartition("topic2", 1), getWorkUnitWithTopicPartition("topic2", 2),
getWorkUnitWithTopicPartition("topic2", 3)));
List<WorkUnit> workUnits = new KafkaTopicGroupingWorkUnitPacker(source, state, Optional.absent()).pack(workUnitsByTopic, 0);
Assert.assertEquals(workUnits.size(), 4);
Assert.assertEquals(workUnits.get(0).getProp(KafkaSource.TOPIC_NAME), "topic1");
Assert.assertEquals(workUnits.get(0).getPropAsInt(KafkaUtils.getPartitionPropName(KafkaSource.PARTITION_ID, 0)), 1);
Assert.assertEquals(workUnits.get(0).getPropAsInt(KafkaUtils.getPartitionPropName(KafkaSource.PARTITION_ID, 1)), 2);
Assert.assertEquals(workUnits.get(0).getPropAsDouble(KafkaTopicGroupingWorkUnitPacker.CONTAINER_CAPACITY_KEY), 2, 0.001);
Assert.assertEquals(workUnits.get(1).getProp(KafkaSource.TOPIC_NAME), "topic1");
Assert.assertEquals(workUnits.get(1).getPropAsInt(KafkaUtils.getPartitionPropName(KafkaSource.PARTITION_ID, 0)), 3);
Assert.assertEquals(workUnits.get(1).getPropAsDouble(KafkaTopicGroupingWorkUnitPacker.CONTAINER_CAPACITY_KEY), 2, 0.001);
Assert.assertEquals(workUnits.get(2).getProp(KafkaSource.TOPIC_NAME), "topic2");
Assert.assertEquals(workUnits.get(2).getPropAsInt(KafkaUtils.getPartitionPropName(KafkaSource.PARTITION_ID, 0)), 1);
Assert.assertEquals(workUnits.get(2).getPropAsInt(KafkaUtils.getPartitionPropName(KafkaSource.PARTITION_ID, 1)), 2);
Assert.assertEquals(workUnits.get(2).getPropAsDouble(KafkaTopicGroupingWorkUnitPacker.CONTAINER_CAPACITY_KEY), 2, 0.001);
Assert.assertEquals(workUnits.get(3).getProp(KafkaSource.TOPIC_NAME), "topic2");
Assert.assertEquals(workUnits.get(3).getPropAsInt(KafkaUtils.getPartitionPropName(KafkaSource.PARTITION_ID, 0)), 3);
Assert.assertEquals(workUnits.get(3).getPropAsDouble(KafkaTopicGroupingWorkUnitPacker.CONTAINER_CAPACITY_KEY), 2, 0.001);
}
@Test
public void testMultiTopicWithNumContainers() {
KafkaSource source = new UniversalKafkaSource();
SourceState state = new SourceState(new State(props));
state.setProp("gobblin.kafka.streaming.enableIndexing", true);
state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, Files.createTempDir().getAbsolutePath());
Map<String, List<WorkUnit>> workUnitsByTopic = ImmutableMap.of(
"topic1", Lists.newArrayList(getWorkUnitWithTopicPartition("topic1", 1),
getWorkUnitWithTopicPartition("topic1", 2)),
"topic2", Lists.newArrayList(getWorkUnitWithTopicPartition("topic2", 1),
getWorkUnitWithTopicPartition("topic2", 2),
getWorkUnitWithTopicPartition("topic2", 3),
getWorkUnitWithTopicPartition("topic2", 4)));
KafkaTopicGroupingWorkUnitPacker packer = new KafkaTopicGroupingWorkUnitPacker(source, state, Optional.absent());
List<WorkUnit> workUnits = packer.pack(workUnitsByTopic, 5);
Assert.assertEquals(workUnits.size(), 5);
int partitionCount = 0;
for(WorkUnit workUnit : workUnits) {
partitionCount += KafkaUtils.getPartitions(workUnit).size();
}
Assert.assertEquals(partitionCount, 6);
workUnitsByTopic = ImmutableMap.of(
"topic1", Lists.newArrayList(getWorkUnitWithTopicPartition("topic1", 1),
getWorkUnitWithTopicPartition("topic1", 2)),
"topic2", Lists.newArrayList(getWorkUnitWithTopicPartition("topic2", 1),
getWorkUnitWithTopicPartition("topic2", 2),
getWorkUnitWithTopicPartition("topic2", 3),
getWorkUnitWithTopicPartition("topic2", 4)));
workUnits = packer.pack(workUnitsByTopic, 7);
// Total WU size wouldn't be more than 6
Assert.assertEquals(workUnits.size(), 6);
partitionCount = 0;
for(WorkUnit workUnit : workUnits) {
partitionCount += KafkaUtils.getPartitions(workUnit).size();
}
Assert.assertEquals(partitionCount, 6);
}
public WorkUnit getWorkUnitWithTopicPartition(String topic, int partition) {
WorkUnit workUnit = new WorkUnit(new Extract(Extract.TableType.APPEND_ONLY, "kafka", topic));
workUnit.setProp(KafkaSource.TOPIC_NAME, topic);
workUnit.setProp(KafkaSource.PARTITION_ID, Integer.toString(partition));
workUnit.setProp(KafkaSource.LEADER_HOSTANDPORT, "host:1234");
workUnit.setProp(KafkaSource.LEADER_ID, "1");
workUnit.setProp(ConfigurationKeys.WORK_UNIT_LOW_WATER_MARK_KEY, "0");
workUnit.setProp(ConfigurationKeys.WORK_UNIT_HIGH_WATER_MARK_KEY, "0");
workUnit.setProp("previousStartFetchEpochTime", "0");
workUnit.setProp("previousStopFetchEpochTime", "0");
workUnit.setProp("previousLowWatermark", "0");
workUnit.setProp("previousHighWatermark", "0");
workUnit.setProp("previousLatestOffset", "0");
workUnit.setProp("offsetFetchEpochTime", "0");
workUnit.setProp("previousOffsetFetchEpochTime", "0");
return workUnit;
}
@Test
public void testGetContainerCapacityForTopic() {
double delta = 0.000001; //Error tolerance limit for assertions involving double.
KafkaTopicGroupingWorkUnitPacker.ContainerCapacityComputationStrategy strategy =
KafkaTopicGroupingWorkUnitPacker.ContainerCapacityComputationStrategy.MIN;
List<Double> capacities = Arrays.asList(new Double[]{1.2, 1.4, 1.3, 1.4, 1.2});
double capacity = KafkaTopicGroupingWorkUnitPacker.getContainerCapacityForTopic(capacities, strategy);
Assert.assertEquals(capacity, 1.2, delta);
strategy = KafkaTopicGroupingWorkUnitPacker.ContainerCapacityComputationStrategy.MAX;
capacity = KafkaTopicGroupingWorkUnitPacker.getContainerCapacityForTopic(capacities, strategy);
Assert.assertEquals(capacity, 1.4, delta);
strategy = KafkaTopicGroupingWorkUnitPacker.ContainerCapacityComputationStrategy.MEDIAN;
capacity = KafkaTopicGroupingWorkUnitPacker.getContainerCapacityForTopic(capacities, strategy);
Assert.assertEquals(capacity, 1.3, delta);
strategy = KafkaTopicGroupingWorkUnitPacker.ContainerCapacityComputationStrategy.MEAN;
capacity = KafkaTopicGroupingWorkUnitPacker.getContainerCapacityForTopic(capacities, strategy);
Assert.assertEquals(capacity, 1.3, delta);
//Validate the median for an even sized list
capacities = Arrays.asList(new Double[]{1.2, 1.4, 1.3, 1.4});
strategy = KafkaTopicGroupingWorkUnitPacker.ContainerCapacityComputationStrategy.MEDIAN;
capacity = KafkaTopicGroupingWorkUnitPacker.getContainerCapacityForTopic(capacities, strategy);
Assert.assertEquals(capacity, 1.35, delta);
}
@Test
public void testSplitMultiWorkUnits() {
// Create a list of 2 MWU, each contains 3 WU within
List<MultiWorkUnit> multiWorkUnitList = new ArrayList<>();
for (int i = 0; i < 2; i++) {
MultiWorkUnit multiWorkUnit = MultiWorkUnit.createEmpty();
for (int j = 0; j < 3; j++) {
multiWorkUnit.addWorkUnit(WorkUnit.createEmpty());
}
multiWorkUnitList.add(multiWorkUnit);
}
// minWUSize is smaller than MWU size, so the result should remain the size of list of MWU
List<MultiWorkUnit> mwuList = KafkaTopicGroupingWorkUnitPacker.splitMultiWorkUnits(multiWorkUnitList, 1);
Assert.assertEquals(mwuList.size(), 2);
mwuList = KafkaTopicGroupingWorkUnitPacker.splitMultiWorkUnits(multiWorkUnitList, 3);
Assert.assertEquals(mwuList.size(), 3);
mwuList = KafkaTopicGroupingWorkUnitPacker.splitMultiWorkUnits(multiWorkUnitList, 6);
Assert.assertEquals(mwuList.size(), 6);
// minWUSize is bigger than number combining of all WU, so the result will be the sum of all WU
mwuList = KafkaTopicGroupingWorkUnitPacker.splitMultiWorkUnits(multiWorkUnitList, 7);
Assert.assertEquals(mwuList.size(), 6);
}
} | 3,241 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/workunit | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/workunit/packer/KafkaWorkUnitPackerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka.workunit.packer;
import java.util.List;
import java.util.Map;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import com.google.common.collect.Maps;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.source.extractor.extract.AbstractSource;
import org.apache.gobblin.source.workunit.WorkUnit;
import static org.apache.gobblin.source.extractor.extract.kafka.workunit.packer.KafkaWorkUnitPacker.KAFKA_WORKUNIT_PACKER_CUSTOMIZED_TYPE;
import static org.apache.gobblin.source.extractor.extract.kafka.workunit.packer.KafkaWorkUnitPacker.KAFKA_WORKUNIT_PACKER_TYPE;
import static org.apache.gobblin.source.extractor.extract.kafka.workunit.packer.KafkaWorkUnitPacker.KAFKA_WORKUNIT_SIZE_ESTIMATOR_CUSTOMIZED_TYPE;
import static org.apache.gobblin.source.extractor.extract.kafka.workunit.packer.KafkaWorkUnitPacker.KAFKA_WORKUNIT_SIZE_ESTIMATOR_TYPE;
public class KafkaWorkUnitPackerTest {
private KafkaWorkUnitPacker packer;
AbstractSource source = Mockito.mock(AbstractSource.class);
SourceState state;
@BeforeMethod
public void setUp() {
state = new SourceState();
// Using customized type and having customized as a known class.
state.setProp(KAFKA_WORKUNIT_PACKER_TYPE, "CUSTOM");
state.setProp(KAFKA_WORKUNIT_PACKER_CUSTOMIZED_TYPE,
"org.apache.gobblin.source.extractor.extract.kafka.workunit.packer.KafkaSingleLevelWorkUnitPacker");
state.setProp(KAFKA_WORKUNIT_SIZE_ESTIMATOR_TYPE, "CUSTOM");
state.setProp(KAFKA_WORKUNIT_SIZE_ESTIMATOR_CUSTOMIZED_TYPE,
"org.apache.gobblin.source.extractor.extract.kafka.workunit.packer.KafkaAvgRecordTimeBasedWorkUnitSizeEstimator");
packer = new TestKafkaWorkUnitPacker(source, state);
}
@Test
public void testGetWorkUnitSizeEstimator() {
KafkaWorkUnitSizeEstimator estimator = packer.getWorkUnitSizeEstimator();
Assert.assertTrue(estimator instanceof KafkaAvgRecordTimeBasedWorkUnitSizeEstimator);
}
@Test
public void testGetInstance() {
KafkaWorkUnitPacker anotherPacker = KafkaWorkUnitPacker.getInstance(source, state);
Assert.assertTrue(anotherPacker instanceof KafkaSingleLevelWorkUnitPacker);
}
@Test
public void testPackEmptyWorkUnit() {
SourceState sourceState = new SourceState(state);
Map<String, List<WorkUnit>> emptyWorkUnit = Maps.newHashMap();
// Test single level packer
KafkaWorkUnitPacker mypacker = KafkaWorkUnitPacker.getInstance(source, sourceState);
Assert.assertEquals(mypacker.pack(emptyWorkUnit, 1).size(), 0);
// Test bi level packer
sourceState.setProp(KAFKA_WORKUNIT_PACKER_CUSTOMIZED_TYPE,
"org.apache.gobblin.source.extractor.extract.kafka.workunit.packer.KafkaBiLevelWorkUnitPacker");
mypacker = KafkaWorkUnitPacker.getInstance(source, sourceState);
Assert.assertEquals(mypacker.pack(emptyWorkUnit, 1).size(), 0);
}
public class TestKafkaWorkUnitPacker extends KafkaWorkUnitPacker {
public TestKafkaWorkUnitPacker(AbstractSource<?, ?> source, SourceState state) {
super(source, state);
}
// Dummy implementation for making abstract class instantiable only.
@Override
public List<WorkUnit> pack(Map<String, List<WorkUnit>> workUnitsByTopic, int numContainers) {
return null;
}
}
} | 3,242 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/kafka/serialize/MD5DigestTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.serialize;
import java.io.UnsupportedEncodingException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import org.apache.commons.codec.digest.DigestUtils;
import org.testng.Assert;
import org.testng.annotations.Test;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class MD5DigestTest {
@Test
public void testInvalidString() {
String foobar = "clearly-bad-md5string";
try {
MD5Digest md5Digest = MD5Digest.fromString(foobar);
Assert.fail("Should have thrown an exception");
}
catch (Exception e)
{
log.info("Found expected exception", e.getMessage());
}
}
@Test
public void testValidString()
throws NoSuchAlgorithmException, UnsupportedEncodingException {
String message = "3432rdaesdfdsf2443223 234 324324 23423 e23e 23d";
byte[] md5digest = MessageDigest.getInstance("MD5").digest(message.getBytes("UTF-8"));
String md5String = DigestUtils.md5Hex(message);
Assert.assertNotNull(md5digest);
MD5Digest md5 = MD5Digest.fromBytes(md5digest);
Assert.assertEquals(md5.asString(), md5String);
Assert.assertEquals(md5.asBytes(), md5digest);
MD5Digest otherMd5 = MD5Digest.fromString(md5String);
Assert.assertEquals(otherMd5.asBytes(), md5.asBytes());
}
}
| 3,243 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/kafka/writer/KafkaWriterHelperTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import org.testng.Assert;
import org.testng.annotations.Test;
import java.util.Properties;
import org.apache.gobblin.configuration.ConfigurationKeys;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class KafkaWriterHelperTest {
@Test
public void testSharedConfig() {
Properties props = new Properties();
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "key1", "value1");
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "key2", "value2");
props.setProperty(ConfigurationKeys.SHARED_KAFKA_CONFIG_PREFIX + ".key1", "sharedValue1");
props.setProperty(ConfigurationKeys.SHARED_KAFKA_CONFIG_PREFIX + ".key3", "sharedValue3");
Properties producerProps = KafkaWriterHelper.getProducerProperties(props);
// specific config overrides shared config
Assert.assertEquals(producerProps.getProperty("key1"), "value1");
Assert.assertEquals(producerProps.getProperty("key2"), "value2");
Assert.assertEquals(producerProps.getProperty("key3"), "sharedValue3");
}
}
| 3,244 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/kafka/writer/TestTypeMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import org.apache.gobblin.types.FieldMappingException;
import org.apache.gobblin.types.TypeMapper;
public class TestTypeMapper implements TypeMapper {
@Override
public Object getField(Object record, String fieldPath)
throws FieldMappingException {
return null;
}
}
| 3,245 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/kafka/writer/KafkaWriterCommonConfigTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import java.util.Properties;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.ConfigurationException;
import org.apache.gobblin.types.AvroGenericRecordTypeMapper;
import org.apache.gobblin.util.ConfigUtils;
public class KafkaWriterCommonConfigTest {
@Test
public void testEmptyConstructor()
throws ConfigurationException {
Properties properties = new Properties();
Config config = ConfigUtils.propertiesToConfig(properties);
KafkaWriterCommonConfig kafkaWriterCommonConfig = new KafkaWriterCommonConfig(config);
Assert.assertEquals(kafkaWriterCommonConfig.isKeyed(), false);
Assert.assertEquals(kafkaWriterCommonConfig.getKeyField(), null);
Assert.assertEquals(kafkaWriterCommonConfig.getTypeMapper().getClass().getCanonicalName(),
AvroGenericRecordTypeMapper.class.getCanonicalName());
Assert.assertEquals(kafkaWriterCommonConfig.getValueField(), "*");
}
@Test
public void testKeyedConstructor() {
Properties properties = new Properties();
properties.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_KEYED_CONFIG, "true");
try {
Config config = ConfigUtils.propertiesToConfig(properties);
KafkaWriterCommonConfig kafkaWriterCommonConfig = new KafkaWriterCommonConfig(config);
Assert.fail("Should fail to construct with keyed writes set to true, without setting key field");
} catch (ConfigurationException ce) {
// Expected
}
properties.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_KEYFIELD_CONFIG, "key");
try {
Config config = ConfigUtils.propertiesToConfig(properties);
KafkaWriterCommonConfig kafkaWriterCommonConfig = new KafkaWriterCommonConfig(config);
Assert.assertEquals(kafkaWriterCommonConfig.isKeyed(), true);
Assert.assertEquals(kafkaWriterCommonConfig.getKeyField(), "key");
// Check default type mapper is AvroGenericRecord based
Assert.assertEquals(kafkaWriterCommonConfig.getTypeMapper().getClass().getCanonicalName(),
AvroGenericRecordTypeMapper.class.getCanonicalName());
Assert.assertEquals(kafkaWriterCommonConfig.getValueField(), "*");
} catch (ConfigurationException ce) {
Assert.fail("Should successfully construct with keyed writes set to true, and with setting key field", ce);
}
properties.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_TYPEMAPPERCLASS_CONFIG, TestTypeMapper.class.getCanonicalName());
try {
Config config = ConfigUtils.propertiesToConfig(properties);
KafkaWriterCommonConfig kafkaWriterCommonConfig = new KafkaWriterCommonConfig(config);
Assert.assertEquals(kafkaWriterCommonConfig.isKeyed(), true);
Assert.assertEquals(kafkaWriterCommonConfig.getKeyField(), "key");
Assert.assertEquals(kafkaWriterCommonConfig.getTypeMapper().getClass().getCanonicalName(),
TestTypeMapper.class.getCanonicalName());
Assert.assertEquals(kafkaWriterCommonConfig.getValueField(), "*");
} catch (ConfigurationException ce) {
Assert.fail("Should successfully construct", ce);
}
properties.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_VALUEFIELD_CONFIG, "foobar");
try {
Config config = ConfigUtils.propertiesToConfig(properties);
KafkaWriterCommonConfig kafkaWriterCommonConfig = new KafkaWriterCommonConfig(config);
Assert.assertEquals(kafkaWriterCommonConfig.isKeyed(), true);
Assert.assertEquals(kafkaWriterCommonConfig.getKeyField(), "key");
Assert.assertEquals(kafkaWriterCommonConfig.getTypeMapper().getClass().getCanonicalName(),
TestTypeMapper.class.getCanonicalName());
Assert.assertEquals(kafkaWriterCommonConfig.getValueField(), "foobar");
}
catch (ConfigurationException ce) {
Assert.fail("Should successfully construct", ce);
}
}
}
| 3,246 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/kafka/schemareg/HttpClientFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.schemareg;
import org.apache.commons.httpclient.HttpClient;
import org.testng.Assert;
import org.testng.annotations.Test;
public class HttpClientFactoryTest {
@Test
public void testCreate() {
HttpClientFactory httpClientFactory = new HttpClientFactory();
httpClientFactory.setHttpMethodRetryHandlerClass(GobblinHttpMethodRetryHandler.class.getName());
HttpClient client = httpClientFactory.create();
Assert.assertNotNull(client);
}
} | 3,247 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/kafka/schemareg/GobblinHttpMethodRetryHandlerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.schemareg;
import java.io.IOException;
import java.net.UnknownHostException;
import org.apache.commons.httpclient.DefaultHttpMethodRetryHandler;
import org.apache.commons.httpclient.HttpMethod;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
public class GobblinHttpMethodRetryHandlerTest {
@Test
public void testRetryMethod() {
GobblinHttpMethodRetryHandler gobblinHttpMethodRetryHandler = new GobblinHttpMethodRetryHandler(1, false);
HttpMethod mockHttpMethod = Mockito.mock(HttpMethod.class);
//GobblinHttpHandler.retryMethod should return true on UnknownHostException
Assert.assertTrue(gobblinHttpMethodRetryHandler.retryMethod(mockHttpMethod, new UnknownHostException("dummyException"), 0));
Assert.assertTrue(gobblinHttpMethodRetryHandler.retryMethod(mockHttpMethod, new UnknownHostException("dummyException"), 1));
//Return false when the retry count is exceeded
Assert.assertFalse(gobblinHttpMethodRetryHandler.retryMethod(mockHttpMethod, new UnknownHostException("dummyException"), 2));
//Ensure the GobblinHttpMethodRetryHandler has the same behavior as the DefaultHttpMethodRetryHandler for a normal
//IOException
DefaultHttpMethodRetryHandler defaultHttpMethodRetryHandler = new DefaultHttpMethodRetryHandler(1, false);
boolean shouldRetryWithGobblinRetryHandler = gobblinHttpMethodRetryHandler.retryMethod(mockHttpMethod, new IOException("dummyException"), 0);
boolean shouldRetryWithDefaultRetryHandler = defaultHttpMethodRetryHandler.retryMethod(mockHttpMethod, new IOException("dummyException"), 0);
Assert.assertTrue(shouldRetryWithGobblinRetryHandler);
Assert.assertEquals(shouldRetryWithDefaultRetryHandler, shouldRetryWithGobblinRetryHandler);
shouldRetryWithGobblinRetryHandler = gobblinHttpMethodRetryHandler.retryMethod(mockHttpMethod, new IOException("dummyException"), 2);
shouldRetryWithDefaultRetryHandler = defaultHttpMethodRetryHandler.retryMethod(mockHttpMethod, new IOException("dummyException"), 2);
Assert.assertFalse(shouldRetryWithGobblinRetryHandler);
Assert.assertEquals(shouldRetryWithDefaultRetryHandler, shouldRetryWithGobblinRetryHandler);
}
} | 3,248 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/test/java/org/apache/gobblin/kafka/schemareg/CachingKafkaSchemaRegistryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.schemareg;
import java.io.IOException;
import static org.mockito.Mockito.*;
import org.testng.Assert;
import org.testng.annotations.Test;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class CachingKafkaSchemaRegistryTest {
@Test
public void testMaxReferences()
throws IOException, SchemaRegistryException {
KafkaSchemaRegistry<Integer, String> baseRegistry = mock(KafkaSchemaRegistry.class);
String name = "test";
String schema1 = new String("schema");
String schema2 = new String("schema");
String schema3 = new String("schema");
Integer id1 = 1;
Integer id2 = 2;
Integer id3 = 3;
CachingKafkaSchemaRegistry<Integer, String> cachingReg = new CachingKafkaSchemaRegistry<>(baseRegistry, 2);
when(baseRegistry.register(name, schema1)).thenReturn(id1);
Assert.assertEquals(cachingReg.register(name, schema1), id1);
when(baseRegistry.register(name, schema2)).thenReturn(id2);
Assert.assertEquals(cachingReg.register(name, schema2), id2);
when(baseRegistry.register(name, schema3)).thenReturn(id3);
try {
cachingReg.register(name, schema3);
Assert.fail("Should have thrown an exception");
}
catch (Exception e)
{
log.info(e.getMessage());
}
}
@Test
public void testRegisterSchemaCaching()
throws IOException, SchemaRegistryException {
KafkaSchemaRegistry<Integer, String> baseRegistry = mock(KafkaSchemaRegistry.class);
String name = "test";
String schema1 = new String("schema");
Integer id1 = 1;
CachingKafkaSchemaRegistry<Integer, String> cachingReg = new CachingKafkaSchemaRegistry<>(baseRegistry, 2);
when(baseRegistry.register(name, schema1)).thenReturn(id1);
Assert.assertEquals(cachingReg.register(name, schema1), id1);
Integer id2 = 2;
when(baseRegistry.register(name, schema1)).thenReturn(id2);
// Test that we get back the original id
Assert.assertEquals(cachingReg.register(name, schema1), id1);
// Ensure that we only called baseRegistry.register once.
verify(baseRegistry, times(1)).register(anyString(), anyString());
}
@Test
public void testIdSchemaCaching()
throws IOException, SchemaRegistryException {
KafkaSchemaRegistry<Integer, String> baseRegistry = mock(KafkaSchemaRegistry.class);
String name = "test";
String schema1 = new String("schema");
Integer id1 = 1;
CachingKafkaSchemaRegistry<Integer, String> cachingReg = new CachingKafkaSchemaRegistry<>(baseRegistry, 2);
when(baseRegistry.getById(id1)).thenReturn(schema1);
String schemaReturned = cachingReg.getById(id1);
Assert.assertEquals(schemaReturned, schema1, "Schema returned by id should be the same");
verify(baseRegistry, times(1)).getById(anyInt());
when(baseRegistry.getById(id1)).thenReturn(new String("schema2"));
Assert.assertEquals(cachingReg.getById(id1), schemaReturned);
verify(baseRegistry, times(1)).getById(anyInt());
}
@Test
public void testRegisterShouldCacheIds()
throws IOException, SchemaRegistryException {
KafkaSchemaRegistry<Integer, String> baseRegistry = mock(KafkaSchemaRegistry.class);
CachingKafkaSchemaRegistry<Integer, String> cachingReg = new CachingKafkaSchemaRegistry<>(baseRegistry, 2);
String name = "test";
String schema1 = new String("schema");
Integer id1 = 1;
// first register name, schema1, get back id1
when(baseRegistry.register(name, schema1)).thenReturn(id1);
Assert.assertEquals(cachingReg.register(name, schema1), id1);
// getById should hit the cache and return id1
when(baseRegistry.getById(id1)).thenReturn(new String("schema2"));
Assert.assertEquals(cachingReg.getById(id1), schema1);
verify(baseRegistry, times(0)).getById(anyInt());
}
}
| 3,249 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/KafkaCommonUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import com.google.common.base.Splitter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
import static org.apache.gobblin.configuration.ConfigurationKeys.KAFKA_BROKERS_TO_SIMPLE_NAME_MAP_KEY;
@Slf4j
public class KafkaCommonUtil {
public static final long KAFKA_FLUSH_TIMEOUT_SECONDS = 15L;
public static final String MAP_KEY_VALUE_DELIMITER_KEY = "->";
public static final Splitter LIST_SPLITTER = Splitter.on(",").trimResults().omitEmptyStrings();
public static void runWithTimeout(final Runnable runnable, long timeout, TimeUnit timeUnit) throws Exception {
runWithTimeout(() -> {
runnable.run();
return null;
}, timeout, timeUnit);
}
public static <T> T runWithTimeout(Callable<T> callable, long timeout, TimeUnit timeUnit) throws Exception {
final ExecutorService executor = Executors.newSingleThreadExecutor();
final Future<T> future = executor.submit(callable);
// This does not cancel the already-scheduled task.
executor.shutdown();
try {
return future.get(timeout, timeUnit);
} catch (TimeoutException e) {
// stop the running thread
future.cancel(true);
throw e;
} catch (ExecutionException e) {
// unwrap the root cause
Throwable t = e.getCause();
if (t instanceof Error) {
throw (Error) t;
} else if (t instanceof Exception) {
throw (Exception) t;
} else {
throw new IllegalStateException(t);
}
}
}
public static Map<String, String> getKafkaBrokerToSimpleNameMap(State state) {
Map<String, String> kafkaBrokerUriToSimpleName = new HashMap<>();
if (!state.contains(KAFKA_BROKERS_TO_SIMPLE_NAME_MAP_KEY)) {
log.warn("Configuration does not contain value for {}", KAFKA_BROKERS_TO_SIMPLE_NAME_MAP_KEY);
return kafkaBrokerUriToSimpleName;
}
String mapStr = state.getProp(KAFKA_BROKERS_TO_SIMPLE_NAME_MAP_KEY);
for (String entry : LIST_SPLITTER.splitToList(mapStr)) {
String[] items = entry.trim().split(MAP_KEY_VALUE_DELIMITER_KEY);
kafkaBrokerUriToSimpleName.put(items[0], items[1]);
}
return kafkaBrokerUriToSimpleName;
}
}
| 3,250 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/KafkaReportingFormats.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import com.codahale.metrics.ScheduledReporter;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.kafka.KafkaAvroEventKeyValueReporter;
import org.apache.gobblin.metrics.kafka.KafkaAvroEventReporter;
import org.apache.gobblin.metrics.kafka.KafkaAvroReporter;
import org.apache.gobblin.metrics.kafka.KafkaAvroSchemaRegistry;
import org.apache.gobblin.metrics.kafka.KafkaEventReporter;
import org.apache.gobblin.metrics.reporter.KeyValueEventObjectReporter;
import org.apache.gobblin.metrics.reporter.KeyValueMetricObjectReporter;
import org.apache.gobblin.metrics.kafka.KafkaReporter;
import org.apache.gobblin.metrics.kafka.PusherUtils;
import org.apache.gobblin.metrics.reporter.util.KafkaReporterUtils;
import org.apache.gobblin.util.ConfigUtils;
/**
* Kafka reporting formats enumeration.
*/
public enum KafkaReportingFormats {
AVRO() {
@Override
public void buildMetricsReporter(String brokers, String topic, Properties properties)
throws IOException {
KafkaAvroReporter.Builder<?> builder = KafkaAvroReporter.BuilderFactory.newBuilder();
if (Boolean.valueOf(properties.getProperty(ConfigurationKeys.METRICS_REPORTING_KAFKA_USE_SCHEMA_REGISTRY,
ConfigurationKeys.DEFAULT_METRICS_REPORTING_KAFKA_USE_SCHEMA_REGISTRY))) {
builder.withSchemaRegistry(new KafkaAvroSchemaRegistry(properties));
String schemaId = properties.getProperty(ConfigurationKeys.METRICS_REPORTING_METRICS_KAFKA_AVRO_SCHEMA_ID);
if (!Strings.isNullOrEmpty(schemaId)) {
builder.withSchemaId(schemaId);
}
}
builder.build(brokers, topic, properties);
}
@Override
public ScheduledReporter buildEventsReporter(String brokers, String topic, MetricContext context,
Properties properties)
throws IOException {
KafkaAvroEventReporter.Builder<?> builder = KafkaAvroEventReporter.Factory.forContext(context);
if (Boolean.valueOf(properties.getProperty(ConfigurationKeys.METRICS_REPORTING_KAFKA_USE_SCHEMA_REGISTRY,
ConfigurationKeys.DEFAULT_METRICS_REPORTING_KAFKA_USE_SCHEMA_REGISTRY))) {
builder.withSchemaRegistry(new KafkaAvroSchemaRegistry(properties));
String schemaId = properties.getProperty(ConfigurationKeys.METRICS_REPORTING_EVENTS_KAFKA_AVRO_SCHEMA_ID);
if (!Strings.isNullOrEmpty(schemaId)) {
builder.withSchemaId(schemaId);
}
}
String pusherClassName = properties.containsKey(PusherUtils.KAFKA_PUSHER_CLASS_NAME_KEY_FOR_EVENTS) ? properties
.getProperty(PusherUtils.KAFKA_PUSHER_CLASS_NAME_KEY_FOR_EVENTS) : properties
.getProperty(PusherUtils.KAFKA_PUSHER_CLASS_NAME_KEY, PusherUtils.DEFAULT_KAFKA_PUSHER_CLASS_NAME);
builder.withPusherClassName(pusherClassName);
Config allConfig = ConfigUtils.propertiesToConfig(properties);
builder.withConfig(allConfig);
// the kafka configuration is composed of the metrics reporting specific keys with a fallback to the shared
// kafka config
Config kafkaConfig = ConfigUtils.getConfigOrEmpty(allConfig, PusherUtils.METRICS_REPORTING_KAFKA_CONFIG_PREFIX)
.withFallback(ConfigUtils.getConfigOrEmpty(allConfig, ConfigurationKeys.SHARED_KAFKA_CONFIG_PREFIX));
builder.withKafkaConfig(kafkaConfig);
return builder.build(brokers, topic);
}
}, AVRO_KEY_VALUE() {
@Override
public void buildMetricsReporter(String brokers, String topic, Properties properties)
throws IOException {
throw new IOException("Unsupported format for Metric reporting " + this.name());
}
@Override
public ScheduledReporter buildEventsReporter(String brokers, String topic, MetricContext context,
Properties properties)
throws IOException {
KafkaAvroEventKeyValueReporter.Builder<?> builder = KafkaAvroEventKeyValueReporter.Factory.forContext(context);
if (properties.containsKey(ConfigurationKeys.METRICS_REPORTING_EVENTS_KAFKAPUSHERKEYS)) {
List<String> keys = Splitter.on(",").omitEmptyStrings().trimResults()
.splitToList(properties.getProperty(ConfigurationKeys.METRICS_REPORTING_EVENTS_KAFKAPUSHERKEYS));
builder.withKeys(keys);
}
if (Boolean.valueOf(properties.getProperty(ConfigurationKeys.METRICS_REPORTING_KAFKA_USE_SCHEMA_REGISTRY,
ConfigurationKeys.DEFAULT_METRICS_REPORTING_KAFKA_USE_SCHEMA_REGISTRY))) {
builder.withSchemaRegistry(new KafkaAvroSchemaRegistry(properties));
String schemaId = properties.getProperty(ConfigurationKeys.METRICS_REPORTING_EVENTS_KAFKA_AVRO_SCHEMA_ID);
if (!Strings.isNullOrEmpty(schemaId)) {
builder.withSchemaId(schemaId);
}
}
String pusherClassName = properties.containsKey(PusherUtils.KAFKA_PUSHER_CLASS_NAME_KEY_FOR_EVENTS) ? properties
.getProperty(PusherUtils.KAFKA_PUSHER_CLASS_NAME_KEY_FOR_EVENTS) : properties
.getProperty(PusherUtils.KAFKA_PUSHER_CLASS_NAME_KEY, PusherUtils.DEFAULT_KAFKA_PUSHER_CLASS_NAME);
builder.withPusherClassName(pusherClassName);
Config allConfig = ConfigUtils.propertiesToConfig(properties);
builder.withConfig(allConfig);
// the kafka configuration is composed of the metrics reporting specific keys with a fallback to the shared
// kafka config
Config kafkaConfig = ConfigUtils.getConfigOrEmpty(allConfig, PusherUtils.METRICS_REPORTING_KAFKA_CONFIG_PREFIX)
.withFallback(ConfigUtils.getConfigOrEmpty(allConfig, ConfigurationKeys.SHARED_KAFKA_CONFIG_PREFIX));
builder.withKafkaConfig(kafkaConfig);
return builder.build(brokers, topic);
}
}, JSON() {
@Override
public void buildMetricsReporter(String brokers, String topic, Properties properties)
throws IOException {
KafkaReporter.Builder builder = KafkaReporter.BuilderFactory.newBuilder();
builder.build(brokers, topic, properties);
}
@Override
public ScheduledReporter buildEventsReporter(String brokers, String topic, MetricContext context,
Properties properties)
throws IOException {
KafkaEventReporter.Builder builder = KafkaEventReporter.Factory.forContext(context);
//builder.withConfig(getEventsConfig(properties));
String pusherClassName = properties.containsKey(PusherUtils.KAFKA_PUSHER_CLASS_NAME_KEY_FOR_EVENTS) ? properties
.getProperty(PusherUtils.KAFKA_PUSHER_CLASS_NAME_KEY_FOR_EVENTS) : properties
.getProperty(PusherUtils.KAFKA_PUSHER_CLASS_NAME_KEY, PusherUtils.DEFAULT_KAFKA_PUSHER_CLASS_NAME);
builder.withPusherClassName(pusherClassName);
Config allConfig = ConfigUtils.propertiesToConfig(properties);
builder.withConfig(allConfig);
// the kafka configuration is composed of the metrics reporting specific keys with a fallback to the shared
// kafka config
Config kafkaConfig = ConfigUtils.getConfigOrEmpty(allConfig, PusherUtils.METRICS_REPORTING_KAFKA_CONFIG_PREFIX)
.withFallback(ConfigUtils.getConfigOrEmpty(allConfig, ConfigurationKeys.SHARED_KAFKA_CONFIG_PREFIX));
builder.withKafkaConfig(kafkaConfig);
return builder.build(brokers, topic);
}
}, PLAIN_OBJECT() {
@Override
public void buildMetricsReporter(String brokers, String topic, Properties properties)
throws IOException {
KeyValueMetricObjectReporter.Builder builder = new KeyValueMetricObjectReporter.Builder();
builder.namespaceOverride(KafkaReporterUtils.extractOverrideNamespace(properties));
Config allConfig = ConfigUtils.propertiesToConfig(properties);
Config config = ConfigUtils.getConfigOrEmpty(allConfig, ConfigurationKeys.METRICS_REPORTING_CONFIGURATIONS_PREFIX)
.withFallback(allConfig);
builder.build(brokers, topic, config);
}
@Override
public ScheduledReporter buildEventsReporter(String brokers, String topic, MetricContext context,
Properties properties)
throws IOException {
KeyValueEventObjectReporter.Builder builder = new KeyValueEventObjectReporter.Builder(context);
Config allConfig = ConfigUtils.propertiesToConfig(properties);
Config config =
ConfigUtils.getConfigOrEmpty(allConfig, ConfigurationKeys.METRICS_REPORTING_EVENTS_CONFIGURATIONS_PREFIX)
.withFallback(allConfig);
builder.withConfig(config);
builder.namespaceOverride(KafkaReporterUtils.extractOverrideNamespace(properties));
return builder.build(brokers, topic);
}
};
/**
* Method to build reporters that emit metrics. This method does not return anything but schedules/starts the reporter internally
* @param brokers Kafka broker to connect
* @param topic Kafka topic to publish data
* @param properties Properties to build configurations from
* @throws IOException
*/
public abstract void buildMetricsReporter(String brokers, String topic, Properties properties)
throws IOException;
/**
* Method to build reporters that emit events.
* @param brokers Kafka broker to connect
* @param topic Kafka topic to publish data
* @param context MetricContext to report
* @param properties Properties to build configurations from
* @return an instance of the event reporter
* @throws IOException
*/
public abstract ScheduledReporter buildEventsReporter(String brokers, String topic, MetricContext context,
Properties properties)
throws IOException;
}
| 3,251 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/reporter/KeyValueMetricObjectReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.StringJoiner;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.commons.lang3.tuple.Pair;
import com.google.common.base.Optional;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.MetricReport;
import org.apache.gobblin.metrics.kafka.PusherUtils;
import org.apache.gobblin.util.AvroUtils;
import org.apache.gobblin.util.ConfigUtils;
/**
* This is a raw metric (MetricReport) key value reporter that reports metrics as GenericRecords without serialization
* Configuration for this reporter start with the prefix "metrics.reporting"
*/
@Slf4j
public class KeyValueMetricObjectReporter extends MetricReportReporter {
private static final String PUSHER_CONFIG = "pusherConfig";
private static final String PUSHER_CLASS = "pusherClass";
private static final String PUSHER_KEYS = "pusherKeys";
private static final String KEY_DELIMITER = ",";
private static final String KEY_SIZE_KEY = "keySize";
private List<String> keys;
protected final String randomKey;
protected KeyValuePusher pusher;
protected final Schema schema;
public KeyValueMetricObjectReporter(Builder builder, Config config) {
super(builder, config);
Config pusherConfig = ConfigUtils.getConfigOrEmpty(config, PUSHER_CONFIG).withFallback(config);
String pusherClassName =
ConfigUtils.getString(config, PUSHER_CLASS, PusherUtils.DEFAULT_KEY_VALUE_PUSHER_CLASS_NAME);
this.pusher = (KeyValuePusher) PusherUtils
.getPusher(pusherClassName, builder.brokers, builder.topic, Optional.of(pusherConfig));
this.closer.register(this.pusher);
randomKey = String.valueOf(
new Random().nextInt(ConfigUtils.getInt(config, KEY_SIZE_KEY, ConfigurationKeys.DEFAULT_REPORTER_KEY_SIZE)));
if (config.hasPath(PUSHER_KEYS)) {
List<String> keys = Splitter.on(",").omitEmptyStrings().trimResults().splitToList(config.getString(PUSHER_KEYS));
this.keys = keys;
} else {
log.info(
"Key not assigned from config. Please set it with property {} Using randomly generated number {} as key ",
ConfigurationKeys.METRICS_REPORTING_PUSHERKEYS, randomKey);
}
schema = AvroUtils.overrideNameAndNamespace(MetricReport.getClassSchema(), builder.topic, builder.namespaceOverride);
}
@Override
protected void emitReport(MetricReport report) {
GenericRecord record = report;
try {
record = AvroUtils.convertRecordSchema(report, schema);
} catch (IOException e){
log.error("Unable to generate generic data record", e);
return;
}
this.pusher.pushKeyValueMessages(Lists.newArrayList(Pair.of(buildKey(record), record)));
}
private String buildKey(GenericRecord report) {
String key = randomKey;
if (this.keys != null && this.keys.size() > 0) {
StringJoiner joiner = new StringJoiner(KEY_DELIMITER);
for (String keyPart : keys) {
Optional value = AvroUtils.getFieldValue(report, keyPart);
if (value.isPresent()) {
joiner.add(value.get().toString());
} else {
log.error("{} not found in the MetricReport. Setting key to {}", keyPart, key);
return key;
}
}
key = joiner.toString();
}
return key;
}
public static class Builder extends MetricReportReporter.Builder<Builder> {
protected String brokers;
protected String topic;
protected Optional<Map<String, String>> namespaceOverride = Optional.absent();
public Builder namespaceOverride(Optional<Map<String, String>> namespaceOverride) {
this.namespaceOverride = namespaceOverride;
return self();
}
public KeyValueMetricObjectReporter build(String brokers, String topic, Config config)
throws IOException {
this.brokers = brokers;
this.topic = topic;
return new KeyValueMetricObjectReporter(this, config);
}
@Override
protected Builder self() {
return this;
}
}
}
| 3,252 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/reporter/KeyValueEventObjectReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.Random;
import java.util.StringJoiner;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.commons.lang3.tuple.Pair;
import com.google.common.base.Optional;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.GobblinTrackingEvent;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.kafka.PusherUtils;
import org.apache.gobblin.util.AvroUtils;
import org.apache.gobblin.util.ConfigUtils;
/**
* This is a raw event (GobblinTrackingEvent) key value reporter that reports events as GenericRecords without serialization
* Configuration for this reporter start with the prefix "metrics.reporting.events"
*/
@Slf4j
public class KeyValueEventObjectReporter extends EventReporter {
private static final String PUSHER_CONFIG = "pusherConfig";
private static final String PUSHER_CLASS = "pusherClass";
private static final String PUSHER_KEYS = "pusherKeys";
private static final String KEY_DELIMITER = ",";
private static final String KEY_SIZE_KEY = "keySize";
protected List<String> keys;
protected final String randomKey;
protected KeyValuePusher pusher;
protected final Schema schema;
public KeyValueEventObjectReporter(Builder builder) {
super(builder);
Config config = builder.config;
Config pusherConfig = ConfigUtils.getConfigOrEmpty(config, PUSHER_CONFIG).withFallback(config);
String pusherClassName =
ConfigUtils.getString(config, PUSHER_CLASS, PusherUtils.DEFAULT_KEY_VALUE_PUSHER_CLASS_NAME);
this.pusher = (KeyValuePusher) PusherUtils
.getPusher(pusherClassName, builder.brokers, builder.topic, Optional.of(pusherConfig));
this.closer.register(this.pusher);
randomKey = String.valueOf(
new Random().nextInt(ConfigUtils.getInt(config, KEY_SIZE_KEY, ConfigurationKeys.DEFAULT_REPORTER_KEY_SIZE)));
if (config.hasPath(PUSHER_KEYS)) {
List<String> keys = Splitter.on(",").omitEmptyStrings().trimResults().splitToList(config.getString(PUSHER_KEYS));
this.keys = keys;
} else {
log.info(
"Key not assigned from config. Please set it with property {} Using randomly generated number {} as key ",
ConfigurationKeys.METRICS_REPORTING_EVENTS_PUSHERKEYS, randomKey);
}
schema = AvroUtils.overrideNameAndNamespace(GobblinTrackingEvent.getClassSchema(), builder.topic, builder.namespaceOverride);
}
@Override
public void reportEventQueue(Queue<GobblinTrackingEvent> queue) {
List<Pair<String, GenericRecord>> events = Lists.newArrayList();
GobblinTrackingEvent event;
while (null != (event = queue.poll())) {
GenericRecord record=event;
try {
record = AvroUtils.convertRecordSchema(event, schema);
} catch (IOException e){
log.error("Unable to generate generic data record", e);
}
events.add(Pair.of(buildKey(record), record));
}
if (!events.isEmpty()) {
this.pusher.pushKeyValueMessages(events);
}
}
private String buildKey(GenericRecord record) {
String key = randomKey;
if (this.keys != null && this.keys.size() > 0) {
StringJoiner joiner = new StringJoiner(KEY_DELIMITER);
for (String keyPart : keys) {
Optional value = AvroUtils.getFieldValue(record, keyPart);
if (value.isPresent()) {
joiner.add(value.get().toString());
} else {
log.info("{} not found in the GobblinTrackingEvent. Setting key to {}", keyPart, key);
return key;
}
}
key = joiner.toString();
}
return key;
}
public static class Builder extends EventReporter.Builder<Builder> {
protected String brokers;
protected String topic;
protected Optional<Map<String, String>> namespaceOverride = Optional.absent();
public Builder(MetricContext context) {
super(context);
}
@Override
protected Builder self() {
return this;
}
public Builder namespaceOverride(Optional<Map<String, String>> namespaceOverride) {
this.namespaceOverride = namespaceOverride;
return self();
}
public KeyValueEventObjectReporter build(String brokers, String topic) {
this.brokers = brokers;
this.topic = topic;
return new KeyValueEventObjectReporter(this);
}
}
}
| 3,253 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/reporter/KeyValuePusher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import java.util.List;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.gobblin.metrics.kafka.Pusher;
public interface KeyValuePusher<K, V> extends Pusher<V> {
void pushKeyValueMessages(List<Pair<K, V>> messages);
}
| 3,254 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/reporter | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/reporter/util/SchemaRegistryVersionWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter.util;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.commons.codec.DecoderException;
import org.apache.commons.codec.binary.Hex;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.Maps;
import com.typesafe.config.Config;
import javax.annotation.Nullable;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.metrics.kafka.KafkaAvroSchemaRegistry;
import org.apache.gobblin.metrics.kafka.SchemaRegistryException;
import org.apache.gobblin.util.ConfigUtils;
/**
* Implementation of {@link org.apache.gobblin.metrics.reporter.util.SchemaVersionWriter} that uses a
* {@link org.apache.gobblin.metrics.kafka.KafkaAvroSchemaRegistry} to get Schema version identifier and write it to
* {@link java.io.DataOutputStream}.
*/
@Slf4j
public class SchemaRegistryVersionWriter implements SchemaVersionWriter<Schema> {
private final KafkaAvroSchemaRegistry registry;
private Map<Schema, String> registrySchemaIds;
private final String overrideName;
private final Schema schema;
private final String schemaId;
private final int schemaIdLengthBytes;
public SchemaRegistryVersionWriter(Config config)
throws SchemaRegistryException {
this(new KafkaAvroSchemaRegistry(ConfigUtils.configToProperties(config)), null, null, null);
}
public SchemaRegistryVersionWriter(KafkaAvroSchemaRegistry registry, @Nullable String overrideName) throws SchemaRegistryException {
this(registry, overrideName, null);
}
public SchemaRegistryVersionWriter(KafkaAvroSchemaRegistry registry, @Nullable String overrideName, @Nullable Schema singleSchema)
throws SchemaRegistryException {
this(registry, overrideName, singleSchema, null);
}
public SchemaRegistryVersionWriter(KafkaAvroSchemaRegistry registry, @Nullable String overrideName, @Nullable Schema singleSchema, @Nullable String schemaId)
throws SchemaRegistryException {
this.registry = registry;
this.registrySchemaIds = Maps.newConcurrentMap();
this.overrideName = overrideName;
this.schema = singleSchema;
this.schemaIdLengthBytes = registry.getSchemaIdLengthByte();
if ((this.schema != null) && (schemaId == null)) {
this.schemaId =
(!Strings.isNullOrEmpty(this.overrideName)) ? this.registry.register(this.schema, this.overrideName)
: this.registry.register(this.schema);
} else {
if (schemaId != null) {
log.info("Skipping registering schema with schema registry. Using schema with id: {}", schemaId);
}
this.schemaId = schemaId;
}
}
@Override
public void writeSchemaVersioningInformation(Schema schema, DataOutputStream outputStream)
throws IOException {
String schemaId = this.schemaId != null ? this.schemaId : this.getIdForSchema(schema);
outputStream.writeByte(KafkaAvroSchemaRegistry.MAGIC_BYTE);
try {
outputStream.write(Hex.decodeHex(schemaId.toCharArray()));
} catch (DecoderException exception) {
throw new IOException(exception);
}
}
private String getIdForSchema(Schema schema) {
if (!this.registrySchemaIds.containsKey(schema)) {
try {
String schemaId = !Strings.isNullOrEmpty(this.overrideName) ? this.registry.register(schema, this.overrideName)
: this.registry.register(schema);
this.registrySchemaIds.put(schema, schemaId);
} catch (SchemaRegistryException e) {
throw Throwables.propagate(e);
}
}
return this.registrySchemaIds.get(schema);
}
@Override
public Schema readSchemaVersioningInformation(DataInputStream inputStream)
throws IOException {
String hexKey = getSchemaHexKey(inputStream);
try {
return this.registry.getSchemaByKey(hexKey);
} catch (SchemaRegistryException sre) {
throw new IOException("Failed to retrieve schema for key " + hexKey, sre);
}
}
@Override
public void advanceInputStreamToRecord(DataInputStream inputStream) throws IOException {
getSchemaHexKey(inputStream);
}
private String getSchemaHexKey(DataInputStream inputStream) throws IOException {
if (inputStream.readByte() != KafkaAvroSchemaRegistry.MAGIC_BYTE) {
throw new IOException("MAGIC_BYTE not found in Avro message.");
}
byte[] byteKey = new byte[schemaIdLengthBytes];
int bytesRead = inputStream.read(byteKey, 0, schemaIdLengthBytes);
if (bytesRead != schemaIdLengthBytes) {
throw new IOException(String
.format("Could not read enough bytes for schema id. Expected: %d, found: %d.", schemaIdLengthBytes,
bytesRead));
}
return Hex.encodeHexString(byteKey);
}
}
| 3,255 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/reporter | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/reporter/util/KafkaReporterUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter.util;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.avro.Schema;
import com.google.common.base.Optional;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistryConfigurationKeys;
public class KafkaReporterUtils {
public static final String METRIC_REPORT_AVRO_SCHEMA_FILE = "MetricReport.avsc";
public static final String GOBBLIN_TRACKING_EVENT_AVRO_SCHEMA_FILE = "GobblinTrackingEvent.avsc";
private static final Splitter SPLIT_BY_COMMA = Splitter.on(",").omitEmptyStrings().trimResults();
private static final Splitter SPLIT_BY_COLON = Splitter.on(":").omitEmptyStrings().trimResults();
/***
* This method extracts Map of namespaces to override in Kafka schema from Config.
*
* Example config:
* kafka.schemaRegistry.overrideNamespace = namespace1:replacement1,namespace2:replacement2
*
* For the above example, this method will create a Map with values:
* {
* "namespace1" : "replacement1",
* "namespace2" : "replacement2"
* }
*
* @param properties Properties properties.
* @return Map of namespace overrides.
*/
public static Optional<Map<String, String>> extractOverrideNamespace(Properties properties) {
if (properties.containsKey(KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_OVERRIDE_NAMESPACE)) {
Map<String, String> namespaceOverridesMap = Maps.newHashMap();
List<String> namespaceOverrides = Lists.newArrayList(SPLIT_BY_COMMA.split(properties
.getProperty(KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_OVERRIDE_NAMESPACE)));
for (String namespaceOverride : namespaceOverrides) {
List<String> override = Lists.newArrayList(SPLIT_BY_COLON.split(namespaceOverride));
if (override.size() != 2) {
throw new RuntimeException("Namespace override should be of the format originalNamespace:replacementNamespace,"
+ " found: " + namespaceOverride);
}
namespaceOverridesMap.put(override.get(0), override.get(1));
}
// If no entry found in the config value, mark it absent
if (namespaceOverridesMap.size() != 0) {
return Optional.of(namespaceOverridesMap);
}
}
return Optional.<Map<String, String>>absent();
}
public static boolean isMetricsEnabled(Properties properties) {
Optional<String> defaultTopic = getDefaultTopic(properties);
Optional<String> metricsTopic = getMetricsTopic(properties);
return metricsTopic.or(defaultTopic).isPresent();
}
public static boolean isEventsEnabled(Properties properties) {
Optional<String> defaultTopic = getDefaultTopic(properties);
Optional<String> eventsTopic = getEventsTopic(properties);
return eventsTopic.or(defaultTopic).isPresent();
}
public static Optional<String> getDefaultTopic(Properties properties) {
return Optional.fromNullable(properties.getProperty(ConfigurationKeys.METRICS_KAFKA_TOPIC));
}
public static Optional<String> getMetricsTopic(Properties properties) {
return Optional.fromNullable(properties.getProperty(ConfigurationKeys.METRICS_KAFKA_TOPIC_METRICS));
}
public static Optional<String> getEventsTopic(Properties properties) {
return Optional.fromNullable(properties.getProperty(ConfigurationKeys.METRICS_KAFKA_TOPIC_EVENTS));
}
public static boolean isKafkaReportingEnabled(Properties properties) {
return Boolean.parseBoolean(
properties.getProperty(ConfigurationKeys.METRICS_REPORTING_KAFKA_ENABLED_KEY, ConfigurationKeys.DEFAULT_METRICS_REPORTING_KAFKA_ENABLED));
}
public static boolean isKafkaAvroSchemaRegistryEnabled(Properties properties) {
return Boolean.parseBoolean(properties.getProperty(ConfigurationKeys.METRICS_REPORTING_KAFKA_USE_SCHEMA_REGISTRY, ConfigurationKeys.DEFAULT_METRICS_REPORTING_KAFKA_USE_SCHEMA_REGISTRY));
}
public static Schema getMetricReportSchema() throws IOException {
return new Schema.Parser()
.parse(KafkaReporterUtils.class.getClassLoader().getResourceAsStream(METRIC_REPORT_AVRO_SCHEMA_FILE));
}
public static Schema getGobblinTrackingEventSchema() throws IOException {
return new Schema.Parser()
.parse(KafkaReporterUtils.class.getClassLoader().getResourceAsStream(GOBBLIN_TRACKING_EVENT_AVRO_SCHEMA_FILE));
}
}
| 3,256 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/kafka/KafkaMetricReporterFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.io.IOException;
import java.util.Properties;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.ScheduledReporter;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.CustomCodahaleReporterFactory;
import org.apache.gobblin.metrics.KafkaReportingFormats;
import org.apache.gobblin.metrics.MetricReporterException;
import org.apache.gobblin.metrics.ReporterSinkType;
import org.apache.gobblin.metrics.ReporterType;
import org.apache.gobblin.metrics.reporter.util.KafkaReporterUtils;
@Slf4j
public class KafkaMetricReporterFactory implements CustomCodahaleReporterFactory {
@Override
public ScheduledReporter newScheduledReporter(MetricRegistry registry, Properties properties)
throws IOException {
if (!Boolean.valueOf(properties.getProperty(ConfigurationKeys.METRICS_REPORTING_KAFKA_ENABLED_KEY,
ConfigurationKeys.DEFAULT_METRICS_REPORTING_KAFKA_ENABLED))) {
return null;
}
boolean metricsEnabled = KafkaReporterUtils.isMetricsEnabled(properties);
if (KafkaReporterUtils.isMetricsEnabled(properties)) {
log.info("Metrics enabled --- Reporting metrics to Kafka");
}
Optional<String> metricsTopic = KafkaReporterUtils.getMetricsTopic(properties);
Optional<String> defaultTopic = KafkaReporterUtils.getDefaultTopic(properties);
try {
Preconditions.checkArgument(properties.containsKey(ConfigurationKeys.METRICS_KAFKA_BROKERS),
"Kafka metrics brokers missing.");
Preconditions.checkArgument(metricsTopic.or(defaultTopic).isPresent(), "Kafka topic missing.");
} catch (IllegalArgumentException exception) {
throw new MetricReporterException("Missing Kafka configuration(s).", exception, ReporterType.METRIC, ReporterSinkType.KAFKA);
}
String brokers = properties.getProperty(ConfigurationKeys.METRICS_KAFKA_BROKERS);
String metricsReportingFormat = properties.getProperty(ConfigurationKeys.METRICS_REPORTING_KAFKA_FORMAT,
ConfigurationKeys.DEFAULT_METRICS_REPORTING_KAFKA_FORMAT);
KafkaReportingFormats formatEnum;
try {
formatEnum = KafkaReportingFormats.valueOf(metricsReportingFormat.toUpperCase());
} catch (IllegalArgumentException exception) {
log.warn(
"Kafka metrics reporting format " + metricsReportingFormat + " not recognized. Will report in json format.",
exception);
formatEnum = KafkaReportingFormats.JSON;
}
if (metricsEnabled) {
try {
formatEnum.buildMetricsReporter(brokers, metricsTopic.or(defaultTopic).get(), properties);
} catch (IOException exception) {
throw new MetricReporterException("Failed to create Kafka metrics reporter.", exception, ReporterType.METRIC, ReporterSinkType.KAFKA);
}
}
return null;
}
}
| 3,257 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/kafka/PusherFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.broker.ResourceInstance;
import org.apache.gobblin.broker.StringNameSharedResourceKey;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.ScopeType;
import org.apache.gobblin.broker.iface.ScopedConfigView;
import org.apache.gobblin.broker.iface.SharedResourceFactory;
import org.apache.gobblin.broker.iface.SharedResourceFactoryResponse;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
/**
* Basic resource factory to create shared {@link Pusher} instance
*/
@Slf4j
public abstract class PusherFactory<T, S extends ScopeType<S>> implements SharedResourceFactory<Pusher<T>, StringNameSharedResourceKey, S> {
private static final String FACTORY_NAME = "pusher";
private static final String PUSHER_CLASS = "class";
private static final Config FALLBACK = ConfigFactory.parseMap(
ImmutableMap.<String, Object>builder()
.put(PUSHER_CLASS, LoggingPusher.class.getName())
.build());
@Override
public String getName() {
return FACTORY_NAME;
}
@Override
public SharedResourceFactoryResponse<Pusher<T>> createResource(SharedResourcesBroker<S> broker,
ScopedConfigView<S, StringNameSharedResourceKey> config)
throws NotConfiguredException {
Config pusherConfig = config.getConfig().withFallback(FALLBACK);
String pusherClass = pusherConfig.getString(PUSHER_CLASS);
Pusher<T> pusher;
try {
pusher = (Pusher) ConstructorUtils.invokeConstructor(Class.forName(pusherClass), pusherConfig);
} catch (ReflectiveOperationException e) {
log.warn("Unable to construct a pusher with class {}. LoggingPusher will be used", pusherClass, e);
pusher = new LoggingPusher<>();
}
return new ResourceInstance<>(pusher);
}
}
| 3,258 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/kafka/KafkaEventReporterFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.io.IOException;
import java.util.Properties;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.ScheduledReporter;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.CustomCodahaleReporterFactory;
import org.apache.gobblin.metrics.KafkaReportingFormats;
import org.apache.gobblin.metrics.MetricReporterException;
import org.apache.gobblin.metrics.ReporterSinkType;
import org.apache.gobblin.metrics.ReporterType;
import org.apache.gobblin.metrics.RootMetricContext;
import org.apache.gobblin.metrics.reporter.util.KafkaReporterUtils;
@Slf4j
public class KafkaEventReporterFactory implements CustomCodahaleReporterFactory {
@Override
public ScheduledReporter newScheduledReporter(MetricRegistry registry, Properties properties)
throws IOException {
if (!Boolean.valueOf(properties.getProperty(ConfigurationKeys.METRICS_REPORTING_KAFKA_ENABLED_KEY,
ConfigurationKeys.DEFAULT_METRICS_REPORTING_KAFKA_ENABLED))) {
return null;
}
boolean eventsEnabled = KafkaReporterUtils.isEventsEnabled(properties);
if (KafkaReporterUtils.isEventsEnabled(properties)) {
log.info("Events enabled --- Reporting events to Kafka");
}
Optional<String> eventsTopic = KafkaReporterUtils.getEventsTopic(properties);
Optional<String> defaultTopic = KafkaReporterUtils.getDefaultTopic(properties);
try {
Preconditions.checkArgument(properties.containsKey(ConfigurationKeys.METRICS_KAFKA_BROKERS),
"Kafka metrics brokers missing.");
Preconditions.checkArgument(eventsTopic.or(defaultTopic).isPresent(), "Kafka topic missing.");
} catch (IllegalArgumentException exception) {
throw new MetricReporterException("Missing Kafka configuration(s).", exception, ReporterType.EVENT, ReporterSinkType.KAFKA);
}
String brokers = properties.getProperty(ConfigurationKeys.METRICS_KAFKA_BROKERS);
String metricsReportingFormat = properties.getProperty(ConfigurationKeys.METRICS_REPORTING_KAFKA_FORMAT,
ConfigurationKeys.DEFAULT_METRICS_REPORTING_KAFKA_FORMAT);
KafkaReportingFormats formatEnum;
try {
formatEnum = KafkaReportingFormats.valueOf(metricsReportingFormat.toUpperCase());
} catch (IllegalArgumentException exception) {
log.warn(
"Kafka metrics reporting format " + metricsReportingFormat + " not recognized. Will report in json format.",
exception);
formatEnum = KafkaReportingFormats.JSON;
}
KafkaReportingFormats eventFormatEnum;
if (properties.containsKey(ConfigurationKeys.METRICS_REPORTING_EVENTS_KAFKA_FORMAT)) {
String eventsReportingFormat = properties.getProperty(ConfigurationKeys.METRICS_REPORTING_EVENTS_KAFKA_FORMAT,
ConfigurationKeys.DEFAULT_METRICS_REPORTING_KAFKA_FORMAT);
try {
eventFormatEnum = KafkaReportingFormats.valueOf(eventsReportingFormat.toUpperCase());
} catch (IllegalArgumentException exception) {
log.warn(
"Kafka events reporting format " + eventsReportingFormat + " not recognized. Will report in json format.",
exception);
eventFormatEnum = KafkaReportingFormats.JSON;
}
} else {
eventFormatEnum = formatEnum;
}
if (eventsEnabled) {
try {
String eventTopic = eventsTopic.or(defaultTopic).get();
return eventFormatEnum.buildEventsReporter(brokers, eventTopic, RootMetricContext.get(), properties);
} catch (IOException exception) {
throw new MetricReporterException("Failed to create Kafka events reporter.", exception, ReporterType.EVENT, ReporterSinkType.KAFKA);
}
}
return null;
}
}
| 3,259 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/kafka/SchemaRegistryException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.io.IOException;
public class SchemaRegistryException extends IOException {
private static final long serialVersionUID = 1L;
public SchemaRegistryException(String message) {
super(message);
}
public SchemaRegistryException(String message, Throwable t) {
super(message, t);
}
public SchemaRegistryException(Throwable t) {
super(t);
}
}
| 3,260 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/kafka/KafkaEventKeyValueReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.io.IOException;
import java.util.List;
import java.util.Queue;
import org.apache.commons.lang3.tuple.Pair;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.GobblinTrackingEvent;
import org.apache.gobblin.metrics.MetricContext;
/**
* {@link org.apache.gobblin.metrics.reporter.EventReporter} that emits events to Kafka as serialized Avro records with a key.
* Key for these kafka messages is obtained from values of properties provided via {@link ConfigurationKeys#METRICS_REPORTING_EVENTS_KAFKAPUSHERKEYS}.
* If the GobblinTrackingEvent does not contain any of the required property, key is set to null. In that case, this reporter
* will act like a {@link org.apache.gobblin.metrics.kafka.KafkaAvroEventReporter}
*/
@Slf4j
public class KafkaEventKeyValueReporter extends KafkaEventReporter {
private Optional<List<String>> keys = Optional.absent();
protected KafkaEventKeyValueReporter(Builder<?> builder) throws IOException {
super(builder);
if (builder.keys.size() > 0) {
this.keys = Optional.of(builder.keys);
} else {
log.warn("Cannot find keys for key-value reporter. Please set it with property {}",
ConfigurationKeys.METRICS_REPORTING_EVENTS_KAFKAPUSHERKEYS);
}
}
@Override
public void reportEventQueue(Queue<GobblinTrackingEvent> queue) {
GobblinTrackingEvent nextEvent;
List<Pair<String, byte[]>> events = Lists.newArrayList();
while(null != (nextEvent = queue.poll())) {
StringBuilder sb = new StringBuilder();
String key = null;
if (keys.isPresent()) {
for (String keyPart : keys.get()) {
if (nextEvent.getMetadata().containsKey(keyPart)) {
sb.append(nextEvent.getMetadata().get(keyPart));
} else {
log.debug("{} not found in the GobblinTrackingEvent. Setting key to null.", keyPart);
sb = null;
break;
}
}
key = (sb == null) ? null : sb.toString();
}
events.add(Pair.of(key, this.serializer.serializeRecord(nextEvent)));
}
if (!events.isEmpty()) {
this.kafkaPusher.pushMessages(events);
}
}
private static class BuilderImpl extends Builder<BuilderImpl> {
private BuilderImpl(MetricContext context) {
super(context);
}
@Override
protected BuilderImpl self() {
return this;
}
}
public static abstract class Factory {
/**
* Returns a new {@link Builder} for {@link KafkaEventKeyValueReporter}.
*
* @param context the {@link MetricContext} to report
* @return KafkaAvroReporter builder
*/
public static BuilderImpl forContext(MetricContext context) {
return new BuilderImpl(context);
}
}
/**
* Builder for {@link KafkaEventKeyValueReporter}.
* Defaults to no filter, reporting rates in seconds and times in milliseconds.
*/
public static abstract class Builder<T extends Builder<T>> extends KafkaEventReporter.Builder<T> {
private List<String> keys = Lists.newArrayList();
protected Builder(MetricContext context) {
super(context);
}
public T withKeys(List<String> keys) {
this.keys = keys;
return self();
}
/**
* Builds and returns {@link KafkaAvroEventReporter}.
*
* @param brokers string of Kafka brokers
* @param topic topic to send metrics to
* @return KafkaAvroReporter
*/
public KafkaEventKeyValueReporter build(String brokers, String topic) throws IOException {
this.brokers = brokers;
this.topic = topic;
return new KafkaEventKeyValueReporter(this);
}
}
}
| 3,261 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/kafka/KafkaSchemaRegistryFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.util.Properties;
/**
* Interface for factories for {@link KafkaSchemaRegistry}
*/
public interface KafkaSchemaRegistryFactory {
String KAFKA_SCHEMA_REGISTRY_FACTORY_CLASS = "kafka.schema.registry.factory.class";
/**
* Create a new KafkaSchemaRegistry
*
* @param props properties to pass to {@link KafkaSchemaRegistry} constructor
* @return the created {@link KafkaSchemaRegistry}
*/
KafkaSchemaRegistry create(Properties props);
}
| 3,262 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/kafka/KafkaAvroEventReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.io.IOException;
import org.apache.avro.Schema;
import com.google.common.base.Optional;
import org.apache.gobblin.metrics.GobblinTrackingEvent;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.reporter.util.AvroBinarySerializer;
import org.apache.gobblin.metrics.reporter.util.AvroSerializer;
import org.apache.gobblin.metrics.reporter.util.KafkaReporterUtils;
import org.apache.gobblin.metrics.reporter.util.SchemaRegistryVersionWriter;
import org.apache.gobblin.metrics.reporter.util.SchemaVersionWriter;
/**
* {@link org.apache.gobblin.metrics.reporter.EventReporter} that emits events to Kafka as serialized Avro records.
*/
public class KafkaAvroEventReporter extends KafkaEventReporter {
protected KafkaAvroEventReporter(Builder<?> builder) throws IOException {
super(builder);
if(builder.registry.isPresent()) {
Schema schema = KafkaReporterUtils.getGobblinTrackingEventSchema();
SchemaRegistryVersionWriter schemaVersionWriter =
builder.schemaId.isPresent() ? new SchemaRegistryVersionWriter(builder.registry.get(), builder.topic, schema,
builder.schemaId.get()) : new SchemaRegistryVersionWriter(builder.registry.get(), builder.topic, schema);
this.serializer.setSchemaVersionWriter(schemaVersionWriter);
}
}
@Override
protected AvroSerializer<GobblinTrackingEvent> createSerializer(SchemaVersionWriter schemaVersionWriter)
throws IOException {
return new AvroBinarySerializer<GobblinTrackingEvent>(GobblinTrackingEvent.SCHEMA$, schemaVersionWriter);
}
/**
* Returns a new {@link Builder} for {@link KafkaAvroEventReporter}.
*
* @param context the {@link MetricContext} to report
* @return KafkaAvroReporter builder
* @deprecated this method is bugged. Use {@link Factory#forContext} instead.
*/
@Deprecated
public static Builder<? extends Builder<?>> forContext(MetricContext context) {
return new BuilderImpl(context);
}
private static class BuilderImpl extends Builder<BuilderImpl> {
private BuilderImpl(MetricContext context) {
super(context);
}
@Override
protected BuilderImpl self() {
return this;
}
}
public static abstract class Factory {
/**
* Returns a new {@link Builder} for {@link KafkaAvroEventReporter}.
*
* @param context the {@link MetricContext} to report
* @return KafkaAvroReporter builder
*/
public static BuilderImpl forContext(MetricContext context) {
return new BuilderImpl(context);
}
}
/**
* Builder for {@link KafkaAvroEventReporter}.
* Defaults to no filter, reporting rates in seconds and times in milliseconds.
*/
public static abstract class Builder<T extends Builder<T>> extends KafkaEventReporter.Builder<T> {
private Optional<KafkaAvroSchemaRegistry> registry = Optional.absent();
private Optional<String> schemaId = Optional.absent();
private Builder(MetricContext context) {
super(context);
}
public T withSchemaRegistry(KafkaAvroSchemaRegistry registry) {
this.registry = Optional.of(registry);
return self();
}
public T withSchemaId(String schemaId) {
this.schemaId = Optional.of(schemaId);
return self();
}
/**
* Builds and returns {@link KafkaAvroEventReporter}.
*
* @param brokers string of Kafka brokers
* @param topic topic to send metrics to
* @return KafkaAvroReporter
*/
public KafkaAvroEventReporter build(String brokers, String topic) throws IOException {
this.brokers = brokers;
this.topic = topic;
return new KafkaAvroEventReporter(this);
}
}
}
| 3,263 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/kafka/KafkaAvroReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.io.IOException;
import java.util.Properties;
import org.apache.avro.Schema;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
import org.apache.gobblin.metrics.MetricReport;
import org.apache.gobblin.metrics.reporter.util.AvroBinarySerializer;
import org.apache.gobblin.metrics.reporter.util.AvroSerializer;
import org.apache.gobblin.metrics.reporter.util.KafkaReporterUtils;
import org.apache.gobblin.metrics.reporter.util.SchemaRegistryVersionWriter;
import org.apache.gobblin.metrics.reporter.util.SchemaVersionWriter;
/**
* Kafka reporter for codahale metrics writing metrics in Avro format.
*
* @author ibuenros
*/
public class KafkaAvroReporter extends KafkaReporter {
protected KafkaAvroReporter(Builder<?> builder, Config config) throws IOException {
super(builder, config);
if (builder.registry.isPresent()) {
Schema schema = KafkaReporterUtils.getMetricReportSchema();
SchemaRegistryVersionWriter schemaVersionWriter =
builder.schemaId.isPresent() ? new SchemaRegistryVersionWriter(builder.registry.get(), builder.topic, schema,
builder.schemaId.get()) : new SchemaRegistryVersionWriter(builder.registry.get(), builder.topic, schema);
this.serializer.setSchemaVersionWriter(schemaVersionWriter);
}
}
@Override
protected AvroSerializer<MetricReport> createSerializer(SchemaVersionWriter schemaVersionWriter)
throws IOException {
return new AvroBinarySerializer<>(MetricReport.SCHEMA$, schemaVersionWriter);
}
/**
* A static factory class for obtaining new {@link Builder}s
*
* @see Builder
*/
public static class BuilderFactory {
public static BuilderImpl newBuilder() {
return new BuilderImpl();
}
}
public static class BuilderImpl extends Builder<BuilderImpl> {
@Override
protected BuilderImpl self() {
return this;
}
}
/**
* Builder for {@link KafkaAvroReporter}. Defaults to no filter, reporting rates in seconds and times in milliseconds.
*/
public static abstract class Builder<T extends Builder<T>> extends KafkaReporter.Builder<T> {
private Optional<KafkaAvroSchemaRegistry> registry = Optional.absent();
private Optional<String> schemaId = Optional.absent();
public T withSchemaRegistry(KafkaAvroSchemaRegistry registry) {
this.registry = Optional.of(registry);
return self();
}
public T withSchemaId(String schemaId) {
this.schemaId = Optional.of(schemaId);
return self();
}
/**
* Builds and returns {@link KafkaAvroReporter}.
*
* @param brokers string of Kafka brokers
* @param topic topic to send metrics to
* @return KafkaAvroReporter
*/
public KafkaAvroReporter build(String brokers, String topic, Properties props) throws IOException {
this.brokers = brokers;
this.topic = topic;
// create a KafkaAvroReporter with metrics.* and gobblin.kafka.sharedConfig.* keys
return new KafkaAvroReporter(this, KafkaReporter.getKafkaAndMetricsConfigFromProperties(props));
}
}
}
| 3,264 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/kafka/Pusher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.io.Closeable;
import java.util.List;
/**
* Establish a connection to a Kafka cluster and push byte messages to a specified topic.
*/
public interface Pusher<M> extends Closeable {
/**
* Push all byte array messages to the Kafka topic.
* @param messages List of byte array messages to push to Kakfa.
*/
void pushMessages(List<M> messages);
}
| 3,265 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/kafka/KafkaEventReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.io.IOException;
import java.util.List;
import java.util.Queue;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.metrics.GobblinTrackingEvent;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.reporter.EventReporter;
import org.apache.gobblin.metrics.reporter.util.AvroJsonSerializer;
import org.apache.gobblin.metrics.reporter.util.AvroSerializer;
import org.apache.gobblin.metrics.reporter.util.FixedSchemaVersionWriter;
import org.apache.gobblin.metrics.reporter.util.SchemaVersionWriter;
/**
* Reports {@link GobblinTrackingEvent} to a Kafka topic serialized as JSON.
*/
@Slf4j
public class KafkaEventReporter extends EventReporter {
protected final AvroSerializer<GobblinTrackingEvent> serializer;
protected final Pusher kafkaPusher;
public KafkaEventReporter(Builder<?> builder) throws IOException {
super(builder);
this.serializer = this.closer.register(
createSerializer(new FixedSchemaVersionWriter()));
if(builder.kafkaPusher.isPresent()) {
this.kafkaPusher = builder.kafkaPusher.get();
} else {
String pusherClassName = builder.pusherClassName.or(PusherUtils.DEFAULT_KAFKA_PUSHER_CLASS_NAME);
this.kafkaPusher = PusherUtils.getPusher(pusherClassName, builder.brokers, builder.topic, builder.kafkaConfig);
}
this.closer.register(this.kafkaPusher);
}
@Override
public void reportEventQueue(Queue<GobblinTrackingEvent> queue) {
GobblinTrackingEvent nextEvent;
List<byte[]> events = Lists.newArrayList();
while(null != (nextEvent = queue.poll())) {
events.add(this.serializer.serializeRecord(nextEvent));
}
if (!events.isEmpty()) {
log.info("Pushing events to Kafka");
this.kafkaPusher.pushMessages(events);
}
}
protected AvroSerializer<GobblinTrackingEvent> createSerializer(SchemaVersionWriter schemaVersionWriter) throws IOException {
return new AvroJsonSerializer<GobblinTrackingEvent>(GobblinTrackingEvent.SCHEMA$, schemaVersionWriter);
}
/**
* Returns a new {@link Builder} for {@link KafkaEventReporter}.
* Will automatically add all Context tags to the reporter.
*
* @param context the {@link MetricContext} to report
* @return KafkaReporter builder
* @deprecated this method is bugged. Use {@link Factory#forContext} instead.
*/
@Deprecated
public static Builder<? extends Builder> forContext(MetricContext context) {
return new BuilderImpl(context);
}
public static class BuilderImpl extends Builder<BuilderImpl> {
private BuilderImpl(MetricContext context) {
super(context);
}
@Override
protected BuilderImpl self() {
return this;
}
}
public static class Factory {
/**
* Returns a new {@link Builder} for {@link KafkaEventReporter}.
* Will automatically add all Context tags to the reporter.
*
* @param context the {@link MetricContext} to report
* @return KafkaReporter builder
*/
public static BuilderImpl forContext(MetricContext context) {
return new BuilderImpl(context);
}
}
/**
* Builder for {@link KafkaEventReporter}.
* Defaults to no filter, reporting rates in seconds and times in milliseconds.
*/
public static abstract class Builder<T extends EventReporter.Builder<T>>
extends EventReporter.Builder<T> {
protected String brokers;
protected String topic;
protected Optional<Pusher> kafkaPusher;
protected Optional<Config> kafkaConfig = Optional.absent();
protected Optional<String> pusherClassName = Optional.absent();
protected Builder(MetricContext context) {
super(context);
this.kafkaPusher = Optional.absent();
}
/**
* Set {@link Pusher} to use.
*/
public T withKafkaPusher(Pusher pusher) {
this.kafkaPusher = Optional.of(pusher);
return self();
}
/**
* Set additional configuration.
*/
public T withKafkaConfig(Config config) {
this.kafkaConfig = Optional.of(config);
return self();
}
/**
* Set a {@link Pusher} class name
*/
public T withPusherClassName(String pusherClassName) {
this.pusherClassName = Optional.of(pusherClassName);
return self();
}
/**
* Builds and returns {@link KafkaEventReporter}.
*
* @param brokers string of Kafka brokers
* @param topic topic to send metrics to
* @return KafkaReporter
*/
public KafkaEventReporter build(String brokers, String topic) throws IOException {
this.brokers = brokers;
this.topic = topic;
return new KafkaEventReporter(this);
}
}
}
| 3,266 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/kafka/GobblinScopePusherFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import org.apache.gobblin.broker.StringNameSharedResourceKey;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.iface.ConfigView;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
/**
* An {@link PusherFactory} to create a shared {@link Pusher} instance
* in {@link GobblinScopeTypes}
*/
public class GobblinScopePusherFactory<T> extends PusherFactory<T, GobblinScopeTypes> {
@Override
public GobblinScopeTypes getAutoScope(SharedResourcesBroker<GobblinScopeTypes> broker,
ConfigView<GobblinScopeTypes, StringNameSharedResourceKey> config) {
// By default, a job level resource
return GobblinScopeTypes.JOB;
}
}
| 3,267 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/kafka/NoopPusher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.io.IOException;
import java.util.List;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
/**
* This is a {@Pusher} class that ignores the messages
* @param <M> message type
*/
@Slf4j
public class NoopPusher<M> implements Pusher<M> {
public NoopPusher() {}
public NoopPusher(Config config) {}
/**
* Constructor like the one in KafkaProducerPusher for compatibility
*/
public NoopPusher(String brokers, String topic, Optional<Config> kafkaConfig) {}
public void pushMessages(List<M> messages) {}
@Override
public void close() throws IOException {}
}
| 3,268 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/kafka/KafkaAvroSchemaRegistry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.io.IOException;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import org.apache.avro.Schema;
import org.apache.commons.httpclient.Header;
import org.apache.commons.httpclient.HttpClient;
import org.apache.commons.httpclient.HttpException;
import org.apache.commons.httpclient.HttpStatus;
import org.apache.commons.httpclient.methods.GetMethod;
import org.apache.commons.httpclient.methods.PostMethod;
import org.apache.commons.pool2.impl.GenericObjectPool;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.kafka.schemareg.HttpClientFactory;
import org.apache.gobblin.metrics.reporter.util.KafkaReporterUtils;
import org.apache.gobblin.util.AvroUtils;
/**
* An implementation of {@link KafkaSchemaRegistry}.
*
* @author Ziyang Liu
*/
@Slf4j
public class KafkaAvroSchemaRegistry extends KafkaSchemaRegistry<String, Schema> {
private static final Logger LOG = LoggerFactory.getLogger(KafkaAvroSchemaRegistry.class);
private static final String GET_RESOURCE_BY_ID = "/id=";
private static final String GET_RESOURCE_BY_TYPE = "/latest_with_type=";
private static final String SCHEMA_ID_HEADER_NAME = "Location";
private static final String SCHEMA_ID_HEADER_PREFIX = "/id=";
public static final int SCHEMA_ID_LENGTH_BYTE = 16;
public static final byte MAGIC_BYTE = 0x0;
protected final GenericObjectPool<HttpClient> httpClientPool;
private final String url;
private final Optional<Map<String, String>> namespaceOverride;
/**
* @param props properties should contain property "kafka.schema.registry.url", and optionally
* "kafka.schema.registry.max.cache.size" (default = 1000) and
* "kafka.schema.registry.cache.expire.after.write.min" (default = 10).
*/
public KafkaAvroSchemaRegistry(Properties props) {
super(props);
Preconditions.checkArgument(props.containsKey(KAFKA_SCHEMA_REGISTRY_URL),
String.format("Property %s not provided.", KAFKA_SCHEMA_REGISTRY_URL));
this.url = props.getProperty(KAFKA_SCHEMA_REGISTRY_URL);
this.namespaceOverride = KafkaReporterUtils.extractOverrideNamespace(props);
int objPoolSize =
Integer.parseInt(props.getProperty(ConfigurationKeys.KAFKA_SOURCE_WORK_UNITS_CREATION_THREADS,
"" + ConfigurationKeys.KAFKA_SOURCE_WORK_UNITS_CREATION_DEFAULT_THREAD_COUNT));
LOG.info("Create HttpClient pool with size " + objPoolSize);
GenericObjectPoolConfig config = new GenericObjectPoolConfig();
config.setMaxTotal(objPoolSize);
config.setMaxIdle(objPoolSize);
HttpClientFactory factory = new HttpClientFactory();
if (this.props.containsKey(ConfigurationKeys.KAFKA_SCHEMA_REGISTRY_HTTPCLIENT_SO_TIMEOUT)) {
String soTimeout = this.props.getProperty(ConfigurationKeys.KAFKA_SCHEMA_REGISTRY_HTTPCLIENT_SO_TIMEOUT);
factory.setSoTimeout(Integer.parseInt(soTimeout));
}
if (this.props.containsKey(ConfigurationKeys.KAFKA_SCHEMA_REGISTRY_HTTPCLIENT_CONN_TIMEOUT)) {
String connTimeout = this.props.getProperty(ConfigurationKeys.KAFKA_SCHEMA_REGISTRY_HTTPCLIENT_CONN_TIMEOUT);
factory.setConnTimeout(Integer.parseInt(connTimeout));
}
if (this.props.containsKey(ConfigurationKeys.KAFKA_SCHEMA_REGISTRY_HTTPCLIENT_METHOD_RETRY_COUNT)) {
String retryCount = this.props.getProperty(ConfigurationKeys.KAFKA_SCHEMA_REGISTRY_HTTPCLIENT_METHOD_RETRY_COUNT);
factory.setHttpMethodRetryCount(Integer.parseInt(retryCount));
}
if (this.props.containsKey(ConfigurationKeys.KAFKA_SCHEMA_REGISTRY_HTTPCLIENT_REQUEST_RETRY_ENABLED)) {
String requestRetryEnabled = this.props.getProperty(ConfigurationKeys.KAFKA_SCHEMA_REGISTRY_HTTPCLIENT_REQUEST_RETRY_ENABLED);
factory.setHttpRequestSentRetryEnabled(Boolean.parseBoolean(requestRetryEnabled));
}
if (this.props.containsKey(ConfigurationKeys.KAFKA_SCHEMA_REGISTRY_HTTPCLIENT_METHOD_RETRY_HANDLER_CLASS)) {
String httpMethodRetryHandlerClass = this.props.getProperty(ConfigurationKeys.KAFKA_SCHEMA_REGISTRY_HTTPCLIENT_METHOD_RETRY_HANDLER_CLASS);
factory.setHttpMethodRetryHandlerClass(httpMethodRetryHandlerClass);
}
this.httpClientPool = new GenericObjectPool<>(factory, config);
}
/**
* Returns the length of schema ids in this schema registry in bytes.
*/
public int getSchemaIdLengthByte() {
return SCHEMA_ID_LENGTH_BYTE;
}
/**
* Get schema from schema registry by key
*
* @param key Schema key
* @return Schema with the corresponding key
* @throws SchemaRegistryException if failed to retrieve schema.
*/
@Override
public Schema getSchemaByKey(String key) throws SchemaRegistryException {
try {
return cachedSchemasByKeys.get(key);
} catch (ExecutionException e) {
throw new SchemaRegistryException(String.format("Schema with key %s cannot be retrieved", key), e);
}
}
/**
* Get the latest schema of a topic.
*
* @param topic topic name
* @return the latest schema
* @throws SchemaRegistryException if failed to retrieve schema.
*/
@Override
public Schema getLatestSchemaByTopic(String topic) throws SchemaRegistryException {
String schemaUrl = KafkaAvroSchemaRegistry.this.url + GET_RESOURCE_BY_TYPE + topic;
LOG.debug("Fetching from URL : " + schemaUrl);
int retryInterval = Integer.parseInt(this.props.getProperty(ConfigurationKeys.KAFKA_SCHEMA_REGISTRY_RETRY_INTERVAL_IN_MILLIS, Integer.toString(5000)));
int retryTimes = Integer.parseInt(this.props.getProperty(ConfigurationKeys.KAFKA_SCHEMA_REGISTRY_RETRY_TIMES, Integer.toString(10)));
GetMethod get = new GetMethod(schemaUrl);
int statusCode = -1;
String schemaString = "";
HttpClient httpClient = this.borrowClient();
int loop = 0;
try {
while (++loop <= retryTimes) {
try {
statusCode = httpClient.executeMethod(get);
schemaString = get.getResponseBodyAsString();
break;
} catch (Exception e) {
if (loop >= retryTimes) {
throw e;
} else {
log.error("Exception when fetching schema : {}", e.toString());
Thread.sleep(retryInterval);
}
}
}
} catch (HttpException e) {
throw new RuntimeException(e);
} catch (IOException e) {
throw new RuntimeException(e);
} catch (InterruptedException e) {
throw new RuntimeException(e);
} finally {
get.releaseConnection();
this.httpClientPool.returnObject(httpClient);
}
if (statusCode != HttpStatus.SC_OK) {
throw new SchemaRegistryException(
String.format("Latest schema for topic %s cannot be retrieved. Status code = %d", topic, statusCode));
}
Schema schema;
try {
schema = new Schema.Parser().parse(schemaString);
} catch (Throwable t) {
throw new SchemaRegistryException(String.format("Latest schema for topic %s cannot be retrieved", topic), t);
}
return schema;
}
private HttpClient borrowClient() throws SchemaRegistryException {
try {
return this.httpClientPool.borrowObject();
} catch (Exception e) {
throw new SchemaRegistryException("Unable to borrow " + HttpClient.class.getSimpleName());
}
}
/**
* Register a schema to the Kafka schema registry under the provided input name. This method will change the name
* of the schema to the provided name. This is useful because certain services (like Gobblin kafka adaptor and
* Camus) get the schema for a topic by querying for the latest schema with the topic name, requiring the topic
* name and schema name to match for all topics. This method registers the schema to the schema registry in such a
* way that any schema can be written to any topic.
*
* @param schema {@link org.apache.avro.Schema} to register.
* @param overrideName Name of the schema when registerd to the schema registry. This name should match the name
* of the topic where instances will be published.
* @return schema ID of the registered schema.
* @throws SchemaRegistryException if registration failed
*/
@Override
public String register(Schema schema, String overrideName) throws SchemaRegistryException {
return register(AvroUtils.switchName(schema, overrideName));
}
/**
* Register a schema to the Kafka schema registry
*
* @param schema
* @return schema ID of the registered schema
* @throws SchemaRegistryException if registration failed
*/
@Override
public synchronized String register(Schema schema) throws SchemaRegistryException {
// Change namespace if override specified
if (this.namespaceOverride.isPresent()) {
schema = AvroUtils.switchNamespace(schema, this.namespaceOverride.get());
}
LOG.info("Registering schema " + schema.toString());
PostMethod post = new PostMethod(url);
post.addParameter("schema", schema.toString());
HttpClient httpClient = this.borrowClient();
try {
LOG.debug("Loading: " + post.getURI());
int statusCode = httpClient.executeMethod(post);
if (statusCode != HttpStatus.SC_CREATED) {
throw new SchemaRegistryException("Error occurred while trying to register schema: " + statusCode);
}
String response;
response = post.getResponseBodyAsString();
if (response != null) {
LOG.info("Received response " + response);
}
String schemaKey;
Header[] headers = post.getResponseHeaders(SCHEMA_ID_HEADER_NAME);
if (headers.length != 1) {
throw new SchemaRegistryException(
"Error reading schema id returned by registerSchema call: headers.length = " + headers.length);
} else if (!headers[0].getValue().startsWith(SCHEMA_ID_HEADER_PREFIX)) {
throw new SchemaRegistryException(
"Error parsing schema id returned by registerSchema call: header = " + headers[0].getValue());
} else {
LOG.info("Registered schema successfully");
schemaKey = headers[0].getValue().substring(SCHEMA_ID_HEADER_PREFIX.length());
}
return schemaKey;
} catch (Throwable t) {
throw new SchemaRegistryException(t);
} finally {
post.releaseConnection();
this.httpClientPool.returnObject(httpClient);
}
}
/**
* Fetch schema by key.
*/
@Override
protected Schema fetchSchemaByKey(String key) throws SchemaRegistryException {
String schemaUrl = KafkaAvroSchemaRegistry.this.url + GET_RESOURCE_BY_ID + key;
GetMethod get = new GetMethod(schemaUrl);
int statusCode;
String schemaString;
HttpClient httpClient = this.borrowClient();
try {
statusCode = httpClient.executeMethod(get);
schemaString = get.getResponseBodyAsString();
} catch (IOException e) {
throw new SchemaRegistryException(e);
} finally {
get.releaseConnection();
this.httpClientPool.returnObject(httpClient);
}
if (statusCode != HttpStatus.SC_OK) {
throw new SchemaRegistryException(
String.format("Schema with key %s cannot be retrieved, statusCode = %d", key, statusCode));
}
Schema schema;
try {
schema = new Schema.Parser().parse(schemaString);
} catch (Throwable t) {
throw new SchemaRegistryException(String.format("Schema with ID = %s cannot be parsed", key), t);
}
return schema;
}
}
| 3,269 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/kafka/LoggingPusher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.io.IOException;
import java.util.List;
import org.apache.commons.lang3.tuple.Pair;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.reporter.KeyValuePusher;
import org.apache.gobblin.util.ConfigUtils;
import lombok.extern.slf4j.Slf4j;
/**
* This is a {@Pusher} class that logs the messages
* @param <V> message type
*/
@Slf4j
public class LoggingPusher<K, V> implements KeyValuePusher<K, V> {
private final String brokers;
private final String topic;
private static final String KAFKA_TOPIC = "kafka.topic";
private static final String NO_BROKERS = "NoBrokers";
private static final String NO_TOPIC = "NoTopic";
public LoggingPusher() {
this(NO_BROKERS, NO_TOPIC, Optional.absent());
}
public LoggingPusher(Config config) {
this.brokers = ConfigUtils.getString(config, ConfigurationKeys.KAFKA_BROKERS, NO_BROKERS);
this.topic = ConfigUtils.getString(config, KAFKA_TOPIC, NO_TOPIC);
}
/**
* Constructor like the one in KafkaProducerPusher for compatibility
*/
public LoggingPusher(String brokers, String topic, Optional<Config> kafkaConfig) {
this.brokers = brokers;
this.topic = topic;
}
@Override
public void close()
throws IOException {
}
@Override
public void pushKeyValueMessages(List<Pair<K, V>> messages) {
for (Pair<K, V> message : messages) {
log.info("Pushing to {}:{}: {} - {}", this.brokers, this.topic, message.getKey(), message.getValue().toString());
}
}
@Override
public void pushMessages(List<V> messages) {
for (V message : messages) {
log.info("Pushing to {}:{}: {}", this.brokers, this.topic, message.toString());
}
}
}
| 3,270 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/kafka/KafkaReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.io.IOException;
import java.util.Properties;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.MetricReport;
import org.apache.gobblin.metrics.reporter.MetricReportReporter;
import org.apache.gobblin.metrics.reporter.util.AvroJsonSerializer;
import org.apache.gobblin.metrics.reporter.util.AvroSerializer;
import org.apache.gobblin.metrics.reporter.util.FixedSchemaVersionWriter;
import org.apache.gobblin.metrics.reporter.util.SchemaVersionWriter;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ConfigUtils;
import lombok.extern.slf4j.Slf4j;
/**
* Kafka reporter for metrics.
*
* @author ibuenros
*/
@Slf4j
public class KafkaReporter extends MetricReportReporter {
public static final String SCHEMA_VERSION_WRITER_TYPE = "metrics.kafka.schemaVersionWriterType";
private static final String METRICS_KAFKA_PREFIX = "metrics.kafka";
protected final AvroSerializer<MetricReport> serializer;
protected final Pusher kafkaPusher;
protected KafkaReporter(Builder<?> builder, Config config) throws IOException {
super(builder, config);
SchemaVersionWriter versionWriter;
if (config.hasPath(SCHEMA_VERSION_WRITER_TYPE)) {
try {
ClassAliasResolver<SchemaVersionWriter> resolver = new ClassAliasResolver<>(SchemaVersionWriter.class);
Class<? extends SchemaVersionWriter> klazz = resolver.resolveClass(config.getString(SCHEMA_VERSION_WRITER_TYPE));
versionWriter = klazz.newInstance();
} catch (ReflectiveOperationException roe) {
throw new IOException("Could not instantiate version writer.", roe);
}
} else {
versionWriter = new FixedSchemaVersionWriter();
}
log.info("Schema version writer: " + versionWriter.getClass().getName());
this.serializer = this.closer.register(createSerializer(versionWriter));
if (builder.kafkaPusher.isPresent()) {
this.kafkaPusher = builder.kafkaPusher.get();
} else {
Config kafkaConfig = ConfigUtils.getConfigOrEmpty(config, PusherUtils.METRICS_REPORTING_KAFKA_CONFIG_PREFIX)
.withFallback(ConfigUtils.getConfigOrEmpty(config, ConfigurationKeys.SHARED_KAFKA_CONFIG_PREFIX));
String pusherClassName = ConfigUtils.getString(config, PusherUtils.KAFKA_PUSHER_CLASS_NAME_KEY,
PusherUtils.DEFAULT_KAFKA_PUSHER_CLASS_NAME);
this.kafkaPusher = PusherUtils.getPusher(pusherClassName, builder.brokers, builder.topic, Optional.of(kafkaConfig));
}
this.closer.register(this.kafkaPusher);
}
protected AvroSerializer<MetricReport> createSerializer(SchemaVersionWriter schemaVersionWriter) throws IOException {
return new AvroJsonSerializer<>(MetricReport.SCHEMA$, schemaVersionWriter);
}
/**
* Get config with metrics configuration and shared kafka configuration
*/
public static Config getKafkaAndMetricsConfigFromProperties(Properties props) {
return ConfigUtils.propertiesToConfig(props, Optional.of(ConfigurationKeys.METRICS_CONFIGURATIONS_PREFIX))
.withFallback(ConfigUtils.propertiesToConfig(props,
Optional.of(ConfigurationKeys.SHARED_KAFKA_CONFIG_PREFIX)));
}
/**
* A static factory class for obtaining new {@link Builder}s
*
* @see Builder
*/
public static class BuilderFactory {
public static BuilderImpl newBuilder() {
return new BuilderImpl();
}
}
public static class BuilderImpl extends Builder<BuilderImpl> {
@Override
protected BuilderImpl self() {
return this;
}
}
/**
* Builder for {@link KafkaReporter}. Defaults to no filter, reporting rates in seconds and times in milliseconds.
*/
public static abstract class Builder<T extends MetricReportReporter.Builder<T>>
extends MetricReportReporter.Builder<T> {
protected String brokers;
protected String topic;
protected Optional<Pusher> kafkaPusher;
protected Builder() {
super();
this.name = "KafkaReporter";
this.kafkaPusher = Optional.absent();
}
/**
* Set {@link Pusher} to use.
*/
public T withKafkaPusher(Pusher pusher) {
this.kafkaPusher = Optional.of(pusher);
return self();
}
/**
* Builds and returns {@link KafkaReporter}.
*
* @param brokers string of Kafka brokers
* @param topic topic to send metrics to
* @return KafkaReporter
*/
public KafkaReporter build(String brokers, String topic, Properties props) throws IOException {
this.brokers = brokers;
this.topic = topic;
// create a KafkaReporter with metrics.* and gobblin.kafka.sharedConfig.* keys
return new KafkaReporter(this, KafkaReporter.getKafkaAndMetricsConfigFromProperties(props));
}
}
@Override
protected void emitReport(MetricReport report) {
this.kafkaPusher.pushMessages(Lists.newArrayList(this.serializer.serializeRecord(report)));
}
}
| 3,271 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/kafka/PusherUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
public class PusherUtils {
public static final String METRICS_REPORTING_KAFKA_CONFIG_PREFIX = "metrics.reporting.kafka.config";
public static final String KAFKA_PUSHER_CLASS_NAME_KEY = "metrics.reporting.kafkaPusherClass";
public static final String KAFKA_PUSHER_CLASS_NAME_KEY_FOR_EVENTS = "metrics.reporting.events.kafkaPusherClass";
public static final String DEFAULT_KAFKA_PUSHER_CLASS_NAME = "org.apache.gobblin.metrics.kafka.KafkaPusher";
public static final String DEFAULT_KEY_VALUE_PUSHER_CLASS_NAME = "org.apache.gobblin.metrics.kafka.LoggingPusher";
/**
* Create a {@link Pusher}
* @param pusherClassName the {@link Pusher} class to instantiate
* @param brokers brokers to connect to
* @param topic the topic to write to
* @param config additional configuration for configuring the {@link Pusher}
* @return a {@link Pusher}
*/
public static Pusher getPusher(String pusherClassName, String brokers, String topic, Optional<Config> config) {
try {
Class<?> pusherClass = Class.forName(pusherClassName);
return (Pusher) GobblinConstructorUtils.invokeLongestConstructor(pusherClass, brokers, topic, config);
} catch (ReflectiveOperationException e) {
throw new RuntimeException("Could not instantiate kafka pusher", e);
}
}
}
| 3,272 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/kafka/KafkaSchemaRegistry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.lang.reflect.InvocationTargetException;
import java.util.Properties;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.Maps;
import lombok.extern.slf4j.Slf4j;
/**
* A abstract schema registry class for Kafka, which supports fetching schema by key, fetching the latest schema
* of a topic, and registering a schema.
*
* @param <K> key type
* @param <S> schema type
*
* @author Ziyang Liu
*/
@Slf4j
public abstract class KafkaSchemaRegistry<K, S> {
public static final String KAFKA_SCHEMA_REGISTRY_CLASS = "kafka.schema.registry.class";
public static final String KAFKA_SCHEMA_REGISTRY_URL = "kafka.schema.registry.url";
public static final int GET_SCHEMA_BY_ID_MAX_TIRES = 3;
public static final int GET_SCHEMA_BY_ID_MIN_INTERVAL_SECONDS = 1;
public static final String KAFKA_SCHEMA_REGISTRY_MAX_CACHE_SIZE = "kafka.schema.registry.max.cache.size";
public static final String DEFAULT_KAFKA_SCHEMA_REGISTRY_MAX_CACHE_SIZE = "1000";
public static final String KAFKA_SCHEMA_REGISTRY_CACHE_EXPIRE_AFTER_WRITE_MIN =
"kafka.schema.registry.cache.expire.after.write.min";
public static final String DEFAULT_KAFKA_SCHEMA_REGISTRY_CACHE_EXPIRE_AFTER_WRITE_MIN = "10";
protected final Properties props;
// Cache that stores schemas by keys.
protected final LoadingCache<K, S> cachedSchemasByKeys;
protected KafkaSchemaRegistry(Properties props) {
this.props = props;
int maxCacheSize = Integer.parseInt(
props.getProperty(KAFKA_SCHEMA_REGISTRY_MAX_CACHE_SIZE, DEFAULT_KAFKA_SCHEMA_REGISTRY_MAX_CACHE_SIZE));
int expireAfterWriteMin = Integer.parseInt(props.getProperty(KAFKA_SCHEMA_REGISTRY_CACHE_EXPIRE_AFTER_WRITE_MIN,
DEFAULT_KAFKA_SCHEMA_REGISTRY_CACHE_EXPIRE_AFTER_WRITE_MIN));
this.cachedSchemasByKeys = CacheBuilder.newBuilder().maximumSize(maxCacheSize)
.expireAfterWrite(expireAfterWriteMin, TimeUnit.MINUTES).build(new KafkaSchemaCacheLoader());
}
@SuppressWarnings("unchecked")
public static <K, S> KafkaSchemaRegistry<K, S> get(Properties props) {
Preconditions.checkArgument(props.containsKey(KAFKA_SCHEMA_REGISTRY_CLASS),
"Missing required property " + KAFKA_SCHEMA_REGISTRY_CLASS);
Class<? extends KafkaSchemaRegistry<?, ?>> clazz;
try {
clazz =
(Class<? extends KafkaSchemaRegistry<?, ?>>) Class.forName(props.getProperty(KAFKA_SCHEMA_REGISTRY_CLASS));
return (KafkaSchemaRegistry<K, S>) ConstructorUtils.invokeConstructor(clazz, props);
} catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException | InvocationTargetException
| InstantiationException e) {
log.error("Failed to instantiate " + KafkaSchemaRegistry.class, e);
throw Throwables.propagate(e);
}
}
/**
* Get schema from schema registry by key.
* @throws SchemaRegistryException if failed to get schema by key.
*/
public S getSchemaByKey(K key) throws SchemaRegistryException {
try {
return cachedSchemasByKeys.get(key);
} catch (ExecutionException e) {
throw new SchemaRegistryException(String.format("Schema with key %s cannot be retrieved", key), e);
}
}
/**
* Fetch schema by key.
*
* This method is called in {@link #getSchemaByKey(K)} to fetch the schema if the given key does not exist
* in the cache.
* @throws SchemaRegistryException if failed to fetch schema by key.
*/
protected abstract S fetchSchemaByKey(K key) throws SchemaRegistryException;
/**
* Get the latest schema of a topic.
* @throws SchemaRegistryException if failed to get schema by topic.
*/
public abstract S getLatestSchemaByTopic(String topic) throws SchemaRegistryException;
/**
* Register a schema to the schema registry
* @return the key of the registered schema.
* @throws SchemaRegistryException if registration failed.
*/
public abstract K register(S schema) throws SchemaRegistryException;
/**
* Register a schema to the schema registry under the given name
* @return the key of the registered schema.
* @throws SchemaRegistryException if registration failed.
*/
public abstract K register(S schema, String name) throws SchemaRegistryException;
private class KafkaSchemaCacheLoader extends CacheLoader<K, S> {
private final ConcurrentMap<K, FailedFetchHistory> failedFetchHistories;
private KafkaSchemaCacheLoader() {
super();
this.failedFetchHistories = Maps.newConcurrentMap();
}
@Override
public S load(K key) throws Exception {
if (shouldFetchFromSchemaRegistry(key)) {
try {
return KafkaSchemaRegistry.this.fetchSchemaByKey(key);
} catch (SchemaRegistryException e) {
addFetchToFailureHistory(key);
throw e;
}
}
// Throw exception if we've just tried to fetch this id, or if we've tried too many times for this id.
throw new SchemaRegistryException(String.format("Schema with key %s cannot be retrieved", key));
}
private void addFetchToFailureHistory(K key) {
this.failedFetchHistories.putIfAbsent(key, new FailedFetchHistory(System.nanoTime()));
this.failedFetchHistories.get(key).incrementNumOfAttempts();
this.failedFetchHistories.get(key).setPreviousAttemptTime(System.nanoTime());
}
private boolean shouldFetchFromSchemaRegistry(K key) {
if (!this.failedFetchHistories.containsKey(key)) {
return true;
}
FailedFetchHistory failedFetchHistory = this.failedFetchHistories.get(key);
boolean maxTriesNotExceeded = failedFetchHistory.getNumOfAttempts() < GET_SCHEMA_BY_ID_MAX_TIRES;
boolean minRetryIntervalSatisfied =
System.nanoTime() - failedFetchHistory.getPreviousAttemptTime() >= TimeUnit.SECONDS
.toNanos(GET_SCHEMA_BY_ID_MIN_INTERVAL_SECONDS);
return maxTriesNotExceeded && minRetryIntervalSatisfied;
}
private class FailedFetchHistory {
private final AtomicInteger numOfAttempts;
private long previousAttemptTime;
private FailedFetchHistory(long previousAttemptTime) {
this.numOfAttempts = new AtomicInteger();
this.previousAttemptTime = previousAttemptTime;
}
private int getNumOfAttempts() {
return numOfAttempts.get();
}
private long getPreviousAttemptTime() {
return previousAttemptTime;
}
private void setPreviousAttemptTime(long previousAttemptTime) {
this.previousAttemptTime = previousAttemptTime;
}
private void incrementNumOfAttempts() {
this.numOfAttempts.incrementAndGet();
}
}
}
}
| 3,273 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/kafka/KafkaAvroEventKeyValueReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.io.IOException;
import org.apache.avro.Schema;
import com.google.common.base.Optional;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.metrics.GobblinTrackingEvent;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.reporter.util.AvroBinarySerializer;
import org.apache.gobblin.metrics.reporter.util.AvroSerializer;
import org.apache.gobblin.metrics.reporter.util.KafkaReporterUtils;
import org.apache.gobblin.metrics.reporter.util.SchemaRegistryVersionWriter;
import org.apache.gobblin.metrics.reporter.util.SchemaVersionWriter;
/**
* Implement of {@link KafkaEventKeyValueReporter} for avro records.
*/
@Slf4j
public class KafkaAvroEventKeyValueReporter extends KafkaEventKeyValueReporter {
protected KafkaAvroEventKeyValueReporter(Builder<?> builder) throws IOException {
super(builder);
if(builder.registry.isPresent()) {
Schema schema = KafkaReporterUtils.getGobblinTrackingEventSchema();
SchemaRegistryVersionWriter schemaVersionWriter =
builder.schemaId.isPresent() ? new SchemaRegistryVersionWriter(builder.registry.get(), builder.topic, schema,
builder.schemaId.get()) : new SchemaRegistryVersionWriter(builder.registry.get(), builder.topic, schema);
this.serializer.setSchemaVersionWriter(schemaVersionWriter);
}
}
@Override
protected AvroSerializer<GobblinTrackingEvent> createSerializer(SchemaVersionWriter schemaVersionWriter)
throws IOException {
return new AvroBinarySerializer<>(GobblinTrackingEvent.SCHEMA$, schemaVersionWriter);
}
private static class BuilderImpl extends Builder<BuilderImpl> {
private BuilderImpl(MetricContext context) {
super(context);
}
@Override
protected BuilderImpl self() {
return this;
}
}
public static abstract class Factory {
/**
* Returns a new {@link Builder} for {@link KafkaAvroEventKeyValueReporter}.
*
* @param context the {@link MetricContext} to report
* @return KafkaAvroReporter builder
*/
public static BuilderImpl forContext(MetricContext context) {
return new BuilderImpl(context);
}
}
/**
* Builder for {@link KafkaAvroEventKeyValueReporter}.
* Defaults to no filter, reporting rates in seconds and times in milliseconds.
*/
public static abstract class Builder<T extends Builder<T>> extends KafkaEventKeyValueReporter.Builder<T> {
private Optional<KafkaAvroSchemaRegistry> registry = Optional.absent();
private Optional<String> schemaId = Optional.absent();
private Builder(MetricContext context) {
super(context);
}
public T withSchemaRegistry(KafkaAvroSchemaRegistry registry) {
this.registry = Optional.of(registry);
return self();
}
public T withSchemaId(String schemaId) {
this.schemaId = Optional.of(schemaId);
return self();
}
/**
* Builds and returns {@link KafkaAvroEventReporter}.
*
* @param brokers string of Kafka brokers
* @param topic topic to send metrics to
* @return KafkaAvroReporter
*/
public KafkaAvroEventKeyValueReporter build(String brokers, String topic) throws IOException {
this.brokers = brokers;
this.topic = topic;
return new KafkaAvroEventKeyValueReporter(this);
}
}
}
| 3,274 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/metrics/kafka/KafkaAvroSchemaRegistryFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.util.Properties;
/**
* Factory for {@link KafkaAvroSchemaRegistry}
*/
public class KafkaAvroSchemaRegistryFactory implements KafkaSchemaRegistryFactory {
public KafkaSchemaRegistry create(Properties props) {
return new KafkaAvroSchemaRegistry(props);
}
}
| 3,275 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/converter/LiKafkaByteArrayMsgToAvroConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import com.google.common.base.Preconditions;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistry;
import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistryFactory;
import org.apache.gobblin.kafka.schemareg.SchemaRegistryException;
import org.apache.gobblin.kafka.serialize.LiAvroDeserializerBase;
import org.apache.gobblin.kafka.serialize.SerializationException;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaSource;
import org.apache.gobblin.util.EmptyIterable;
import lombok.extern.slf4j.Slf4j;
/**
* Converts LiKafka byte array messages into avro.
*/
@Slf4j
public class LiKafkaByteArrayMsgToAvroConverter<S> extends ToAvroConverterBase<S, byte[]> {
KafkaSchemaRegistry schemaRegistry;
LiAvroDeserializerBase deserializer;
@Override
public Converter<S, Schema, byte[], GenericRecord> init(WorkUnitState workUnit) {
this.schemaRegistry = KafkaSchemaRegistryFactory.getSchemaRegistry(workUnit.getProperties());
this.deserializer = new LiAvroDeserializerBase(this.schemaRegistry);
return this;
}
@Override
public Schema convertSchema(S schemaIn, WorkUnitState workUnit)
throws SchemaConversionException {
Preconditions.checkArgument(workUnit.contains(KafkaSource.TOPIC_NAME), "Must specify topic name.");
String topic = workUnit.getProp(KafkaSource.TOPIC_NAME);
try {
return (Schema) this.schemaRegistry.getLatestSchema(topic);
} catch (IOException | SchemaRegistryException e) {
throw new SchemaConversionException(e);
}
}
@Override
public Iterable<GenericRecord> convertRecord(Schema outputSchema, byte[] inputRecord, WorkUnitState workUnit)
throws DataConversionException {
try {
String topic = workUnit.getProp(KafkaSource.TOPIC_NAME);
GenericRecord record = this.deserializer.deserialize(topic, inputRecord, outputSchema);
return new SingleRecordIterable<>(record);
} catch (SerializationException e) {
log.error("Cannot decode one record.", e);
return new EmptyIterable<GenericRecord>();
}
}
}
| 3,276 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/converter/EnvelopeSchemaConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.concurrent.ExecutionException;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import com.google.common.base.Optional;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import javax.xml.bind.DatatypeConverter;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.filter.AvroProjectionConverter;
import org.apache.gobblin.converter.filter.AvroSchemaFieldRemover;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistryFactory;
import org.apache.gobblin.util.AvroUtils;
/**
* A converter for extracting schema/records from an envelope schema.
* Input schema: envelope schema - must have fields payloadSchemaId (the schema registry key of the output
* schema) and payload (byte data for output record)
* Input record: record corresponding to input schema
* Output schema: schema obtained from schema registry using key provided in input record's {@link #PAYLOAD_SCHEMA_ID_FIELD}
* Output record: record corresponding to output schema obtained from input record's {@link #PAYLOAD_FIELD} as bytes
*
* @deprecated use {@link EnvelopePayloadExtractingConverter}
*/
@Deprecated
public class EnvelopeSchemaConverter extends Converter<Schema, String, GenericRecord, GenericRecord> {
public static final String PAYLOAD_SCHEMA_ID_FIELD = "EnvelopeSchemaConverter.schemaIdField";
public static final String PAYLOAD_FIELD = "EnvelopeSchemaConverter.payloadField";
public static final String DEFAULT_PAYLOAD_SCHEMA_ID_FIELD ="payloadSchemaId";
public static final String DEFAULT_PAYLOAD_FIELD = "payload";
public static final String DEFAULT_KAFKA_SCHEMA_REGISTRY_FACTORY_CLASS = "org.apache.gobblin.metrics.kafka.KafkaAvroSchemaRegistryFactory";
private Optional<AvroSchemaFieldRemover> fieldRemover;
private KafkaSchemaRegistry registry;
private DecoderFactory decoderFactory;
private LoadingCache<Schema, GenericDatumReader<GenericRecord>> readers;
/**
* To remove certain fields from the Avro schema or records of a topic/table, set property
* {topic/table name}.remove.fields={comma-separated, fully qualified field names} in workUnit.
*/
@Override
public EnvelopeSchemaConverter init(WorkUnitState workUnit) {
if (workUnit.contains(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY)) {
String removeFieldsPropName = workUnit.getProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY) + AvroProjectionConverter.REMOVE_FIELDS;
if (workUnit.contains(removeFieldsPropName)) {
this.fieldRemover = Optional.of(new AvroSchemaFieldRemover(workUnit.getProp(removeFieldsPropName)));
} else {
this.fieldRemover = Optional.absent();
}
}
String registryFactoryField = workUnit.contains(KafkaSchemaRegistryFactory.KAFKA_SCHEMA_REGISTRY_FACTORY_CLASS) ?
workUnit.getProp(KafkaSchemaRegistryFactory.KAFKA_SCHEMA_REGISTRY_FACTORY_CLASS) : DEFAULT_KAFKA_SCHEMA_REGISTRY_FACTORY_CLASS;
try {
KafkaSchemaRegistryFactory registryFactory = ((Class<? extends KafkaSchemaRegistryFactory>) Class.forName(registryFactoryField)).newInstance();
this.registry = registryFactory.create(workUnit.getProperties());
} catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) {
return null;
}
this.decoderFactory = DecoderFactory.get();
this.readers = CacheBuilder.newBuilder().build(new CacheLoader<Schema, GenericDatumReader<GenericRecord>>() {
@Override
public GenericDatumReader<GenericRecord> load(final Schema key) throws Exception {
return new GenericDatumReader<>(key);
}
});
return this;
}
/**
* Do nothing, actual schema must be obtained from records.
*/
@Override
public String convertSchema(Schema inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
return EnvelopeSchemaConverter.class.getName();
}
/**
* Get actual schema from registry and deserialize payload using it.
*/
@Override
public Iterable<GenericRecord> convertRecord(String outputSchema, GenericRecord inputRecord, WorkUnitState workUnit)
throws DataConversionException {
try {
String schemaIdField = workUnit.contains(PAYLOAD_SCHEMA_ID_FIELD) ?
workUnit.getProp(PAYLOAD_SCHEMA_ID_FIELD) : DEFAULT_PAYLOAD_SCHEMA_ID_FIELD;
String payloadField = workUnit.contains(PAYLOAD_FIELD) ?
workUnit.getProp(PAYLOAD_FIELD) : DEFAULT_PAYLOAD_FIELD;
String schemaKey = String.valueOf(inputRecord.get(schemaIdField));
Schema payloadSchema = (Schema) this.registry.getSchemaByKey(schemaKey);
byte[] payload = getPayload(inputRecord, payloadField);
GenericRecord outputRecord = deserializePayload(payload, payloadSchema);
if (this.fieldRemover.isPresent()) {
payloadSchema = this.fieldRemover.get().removeFields(payloadSchema);
}
return new SingleRecordIterable<>(AvroUtils.convertRecordSchema(outputRecord, payloadSchema));
} catch (IOException | ExecutionException e) {
throw new DataConversionException(e);
}
}
/**
* Get payload field from GenericRecord and convert to byte array
*/
public byte[] getPayload(GenericRecord inputRecord, String payloadFieldName) {
ByteBuffer bb = (ByteBuffer) inputRecord.get(payloadFieldName);
byte[] payloadBytes;
if (bb.hasArray()) {
payloadBytes = bb.array();
} else {
payloadBytes = new byte[bb.remaining()];
bb.get(payloadBytes);
}
String hexString = new String(payloadBytes, StandardCharsets.UTF_8);
return DatatypeConverter.parseHexBinary(hexString);
}
/**
* Deserialize payload using payload schema
*/
public GenericRecord deserializePayload(byte[] payload, Schema payloadSchema) throws IOException, ExecutionException {
Decoder decoder = this.decoderFactory.binaryDecoder(payload, null);
GenericDatumReader<GenericRecord> reader = this.readers.get(payloadSchema);
return reader.read(null, decoder);
}
}
| 3,277 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/converter/KafkaSchemaChangeInjector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.util.Collections;
import java.util.concurrent.TimeUnit;
import org.apache.avro.Schema;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import lombok.AccessLevel;
import lombok.Getter;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.kafka.client.DecodeableKafkaRecord;
import org.apache.gobblin.metadata.GlobalMetadata;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.metrics.kafka.SchemaRegistryException;
import org.apache.gobblin.stream.ControlMessage;
import org.apache.gobblin.stream.ControlMessageInjector;
import org.apache.gobblin.stream.MetadataUpdateControlMessage;
import org.apache.gobblin.stream.RecordEnvelope;
/**
* A {@link org.apache.gobblin.stream.MetadataUpdateControlMessage} that detects changes in the latest schema and notifies downstream constructs by
* injecting a {@link org.apache.gobblin.stream.MetadataUpdateControlMessage}.
*/
public abstract class KafkaSchemaChangeInjector<S>
extends ControlMessageInjector<Schema, DecodeableKafkaRecord> {
@VisibleForTesting
@Getter(AccessLevel.PACKAGE)
private KafkaSchemaRegistry<String, Schema> schemaRegistry;
@VisibleForTesting
@Getter(AccessLevel.PACKAGE)
private Cache<S, String> schemaCache;
private Schema latestSchema;
private GlobalMetadata<Schema> globalMetadata;
// classes that extend this need to implement getSchemaIdentifier
protected abstract S getSchemaIdentifier(DecodeableKafkaRecord consumerRecord);
@Override
public ControlMessageInjector<Schema, DecodeableKafkaRecord> init(WorkUnitState workUnitState) {
this.schemaRegistry = KafkaSchemaRegistry.get(workUnitState.getProperties());
this.schemaCache = CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.HOURS).build();
return this;
}
@Override
public void setInputGlobalMetadata(GlobalMetadata<Schema> inputGlobalMetadata, WorkUnitState workUnitState) {
this.globalMetadata = inputGlobalMetadata;
}
/**
* Inject a {@link org.apache.gobblin.stream.MetadataUpdateControlMessage} if the latest schema has changed. Check whether there is a new latest
* schema if the input record's schema is not present in the schema cache.
*
* @param inputRecordEnvelope input record envelope
* @param workUnitState work unit state
* @return the injected messages
*/
@Override
public Iterable<ControlMessage<DecodeableKafkaRecord>> injectControlMessagesBefore(
RecordEnvelope<DecodeableKafkaRecord> inputRecordEnvelope, WorkUnitState workUnitState) {
DecodeableKafkaRecord consumerRecord = inputRecordEnvelope.getRecord();
S schemaIdentifier = getSchemaIdentifier(consumerRecord);
String topicName = consumerRecord.getTopic();
// If a new schema is seen then check the latest schema in the registry has changed.
// Only check for the latest schema when a new schema is seen since the call to get the latest schema is not
// cacheable and is expensive.
if (this.schemaCache.getIfPresent(schemaIdentifier) == null) {
try {
Schema latestSchema = this.schemaRegistry.getLatestSchemaByTopic(topicName);
this.schemaCache.put(schemaIdentifier, "");
// latest schema changed, so inject a metadata update control message
if (!latestSchema.equals(this.latestSchema)) {
// update the metadata in this injector since the control message is only applied downstream
this.globalMetadata = GlobalMetadata.builderWithInput(this.globalMetadata, Optional.of(latestSchema)).build();
// update the latestSchema
this.latestSchema = latestSchema;
// inject a metadata update control message before the record so that the downstream constructs
// are aware of the new schema before processing the record
ControlMessage datasetLevelMetadataUpdate = new MetadataUpdateControlMessage(this.globalMetadata);
return Collections.singleton(datasetLevelMetadataUpdate);
}
} catch (SchemaRegistryException e) {
throw new RuntimeException("Exception when getting the latest schema for topic " + topicName, e);
}
}
// no schema change detected
return null;
}
@Override
public Iterable<ControlMessage<DecodeableKafkaRecord>> injectControlMessagesAfter(
RecordEnvelope<DecodeableKafkaRecord> inputRecordEnvelope, WorkUnitState workUnitState) {
return null;
}
} | 3,278 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/converter/EnvelopePayloadConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.util.ArrayList;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Field;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import com.linkedin.avroutil1.compatibility.AvroCompatibilityHelper;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.util.AvroUtils;
/**
* A converter decorates the envelope record with its payload deserialized into schema'ed object
*
* <p> Given an envelope schema as the input schema, the output schema will have the payload
* field, configured by key {@value PAYLOAD_FIELD}, set with its latest schema fetched from a
* {@link #registry} (see {@code createLatestPayloadField(Field)}). The converter copies the other fields
* from the input schema to the output schema
*
* <p> Given an envelope record as the input record, the output record will have the payload set
* to its deserialized object using the latest schema (see {@code convertPayload(GenericRecord)}).
* The converter copies the other fields from the input record to the output record
*
* <p> If the current payload schema is incompatible with its latest schema, {@code convertPayload(GenericRecord)}
* will throw an exception and the job fail
*/
public class EnvelopePayloadConverter extends BaseEnvelopeSchemaConverter<GenericRecord> {
public static final String DECORATED_PAYLOAD_DOC = "Decorated payload data";
@Override
public Schema convertSchema(Schema inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
List<Field> outputSchemaFields = new ArrayList<>();
for (Field field : inputSchema.getFields()) {
outputSchemaFields.add(convertFieldSchema(inputSchema, field, workUnit));
}
Schema outputSchema = Schema
.createRecord(inputSchema.getName(), inputSchema.getDoc(), inputSchema.getNamespace(), inputSchema.isError());
outputSchema.setFields(outputSchemaFields);
return outputSchema;
}
/**
* Convert to the output schema of a field
*/
protected Field convertFieldSchema(Schema inputSchema, Field field, WorkUnitState workUnit)
throws SchemaConversionException {
if (field.name().equals(payloadField)) {
// Create a payload field with latest schema
return createLatestPayloadField(field);
}
// Make a copy of the field to the output schema
return AvroCompatibilityHelper.createSchemaField(field.name(), field.schema(), field.doc(),
AvroUtils.getCompatibleDefaultValue(field), field.order());
}
/**
* Create a payload field with its latest schema fetched from {@link #registry}
*
* @param field the original payload field from input envelope schema
* @return a new payload field with its latest schema
*/
private Field createLatestPayloadField(Field field)
throws SchemaConversionException {
try {
Schema payloadSchema = fetchLatestPayloadSchema();
return AvroCompatibilityHelper.createSchemaField(field.name(), payloadSchema, DECORATED_PAYLOAD_DOC,
AvroUtils.getCompatibleDefaultValue(field), field.order());
} catch (Exception e) {
throw new SchemaConversionException(e);
}
}
@Override
public Iterable<GenericRecord> convertRecord(Schema outputSchema, GenericRecord inputRecord, WorkUnitState workUnit)
throws DataConversionException {
GenericRecord outputRecord = new GenericData.Record(outputSchema);
for (Field field : inputRecord.getSchema().getFields()) {
outputRecord.put(field.name(), convertFieldValue(outputSchema, field, inputRecord, workUnit));
}
return new SingleRecordIterable<>(outputRecord);
}
/**
* Convert to the output value of a field
*/
protected Object convertFieldValue(Schema outputSchema, Field field, GenericRecord inputRecord,
WorkUnitState workUnit)
throws DataConversionException {
if (field.name().equals(payloadField)) {
return upConvertPayload(inputRecord);
}
return inputRecord.get(field.name());
}
}
| 3,279 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/converter/BaseEnvelopeSchemaConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.nio.ByteBuffer;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistryFactory;
import org.apache.gobblin.util.AvroUtils;
import com.google.common.base.Optional;
/**
* Base class for an envelope schema converter using {@link KafkaSchemaRegistry}
*/
public abstract class BaseEnvelopeSchemaConverter<P> extends Converter<Schema, Schema, GenericRecord, GenericRecord> {
public static final String CONF_PREFIX = "converter.envelopeSchemaConverter.";
public static final String PAYLOAD_SCHEMA_ID_FIELD = CONF_PREFIX + "schemaIdField";
public static final String PAYLOAD_FIELD = CONF_PREFIX + "payloadField";
public static final String PAYLOAD_SCHEMA_TOPIC = CONF_PREFIX + "payloadSchemaTopic";
public static final String KAFKA_REGISTRY_FACTORY = CONF_PREFIX + "kafkaRegistryFactory";
public static final String DEFAULT_PAYLOAD_FIELD = "payload";
public static final String DEFAULT_PAYLOAD_SCHEMA_ID_FIELD = "payloadSchemaId";
public static final String DEFAULT_KAFKA_SCHEMA_REGISTRY_FACTORY_CLASS =
"org.apache.gobblin.metrics.kafka.KafkaAvroSchemaRegistryFactory";
protected String payloadSchemaIdField;
protected String payloadField;
protected String payloadSchemaTopic;
protected GenericDatumReader<P> latestPayloadReader;
protected KafkaSchemaRegistry registry;
@Override
public BaseEnvelopeSchemaConverter init(WorkUnitState workUnit) {
super.init(workUnit);
payloadSchemaIdField = workUnit.getProp(PAYLOAD_SCHEMA_ID_FIELD, DEFAULT_PAYLOAD_SCHEMA_ID_FIELD);
payloadField = workUnit.getProp(PAYLOAD_FIELD, DEFAULT_PAYLOAD_FIELD);
// Get the schema specific topic to fetch the schema in the registry
if (!workUnit.contains(PAYLOAD_SCHEMA_TOPIC)) {
throw new RuntimeException("Configuration not found: " + PAYLOAD_SCHEMA_TOPIC);
}
payloadSchemaTopic = workUnit.getProp(PAYLOAD_SCHEMA_TOPIC);
String registryFactoryField = workUnit.getProp(KAFKA_REGISTRY_FACTORY, DEFAULT_KAFKA_SCHEMA_REGISTRY_FACTORY_CLASS);
try {
KafkaSchemaRegistryFactory registryFactory =
((Class<? extends KafkaSchemaRegistryFactory>) Class.forName(registryFactoryField)).newInstance();
registry = registryFactory.create(workUnit.getProperties());
} catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) {
throw new RuntimeException(e);
}
return this;
}
/**
* Get the payload schema
*
* @param inputRecord the input record which has the payload
* @return the current schema of the payload
* @deprecated use {@link #getFieldSchema(GenericRecord, String)}
*/
@Deprecated
protected Schema getPayloadSchema(GenericRecord inputRecord)
throws Exception {
return getFieldSchema(inputRecord, payloadSchemaIdField);
}
/**
* Get the schema of a field
*
* @param record the input record which has the schema id
* @param schemaIdLocation a dot separated location string the schema id
* @return a schema referenced by the schema id
*/
protected Schema getFieldSchema(GenericRecord record, String schemaIdLocation) throws Exception {
Optional<Object> schemaIdValue = AvroUtils.getFieldValue(record, schemaIdLocation);
if (!schemaIdValue.isPresent()) {
throw new Exception("Schema id with key " + schemaIdLocation + " not found in the record");
}
String schemaKey = String.valueOf(schemaIdValue.get());
return (Schema) registry.getSchemaByKey(schemaKey);
}
/**
* Get payload field and convert to byte array
*
* @param inputRecord the input record which has the payload
* @return the byte array of the payload in the input record
* @deprecated use {@link #getFieldAsBytes(GenericRecord, String)}
*/
@Deprecated
protected byte[] getPayloadBytes(GenericRecord inputRecord) {
try {
return getFieldAsBytes(inputRecord, payloadField);
} catch (Exception e) {
return null;
}
}
/**
* Get field value byte array
*
* @param record the input record which has the field
* @param fieldLocation a dot separated location string to the field
* @return the byte array of field value
*/
protected byte[] getFieldAsBytes(GenericRecord record, String fieldLocation) throws Exception {
Optional<Object> bytesValue = AvroUtils.getFieldValue(record, fieldLocation);
if (!bytesValue.isPresent()) {
throw new Exception("Bytes value with key " + fieldLocation + " not found in the record");
}
ByteBuffer bb = (ByteBuffer) bytesValue.get();
if (bb.hasArray()) {
return bb.array();
} else {
byte[] payloadBytes = new byte[bb.remaining()];
bb.get(payloadBytes);
return payloadBytes;
}
}
protected Schema fetchLatestPayloadSchema() throws Exception {
Schema latestPayloadSchema = (Schema)registry.getLatestSchemaByTopic(payloadSchemaTopic);
latestPayloadReader = new GenericDatumReader<>(latestPayloadSchema);
return latestPayloadSchema;
}
/**
* Convert the payload in the input record to a deserialized object with the latest schema
*
* @param inputRecord the input record
* @return the schema'ed payload object
*/
protected P upConvertPayload(GenericRecord inputRecord) throws DataConversionException {
try {
Schema payloadSchema = getPayloadSchema(inputRecord);
// Set writer schema
latestPayloadReader.setSchema(payloadSchema);
byte[] payloadBytes = getPayloadBytes(inputRecord);
Decoder decoder = DecoderFactory.get().binaryDecoder(payloadBytes, null);
// 'latestPayloadReader.read' will convert the record from 'payloadSchema' to the latest payload schema
return latestPayloadReader.read(null, decoder);
} catch (Exception e) {
throw new DataConversionException(e);
}
}
}
| 3,280 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/converter/EnvelopePayloadExtractingConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
/**
* A converter for extracting schema/records from an envelope schema.
* Input schema: envelope schema - must have fields payloadSchemaId (the schema registry key of the output
* schema) and payload (byte data for output record)
* Input record: record corresponding to input schema
* Output schema: latest schema obtained from schema registry with topic {@link #PAYLOAD_SCHEMA_TOPIC}
* Output record: record corresponding to output schema obtained from input record's {@link #PAYLOAD_FIELD} as bytes
*/
public class EnvelopePayloadExtractingConverter extends BaseEnvelopeSchemaConverter<GenericRecord> {
@Override
public Schema convertSchema(Schema inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
try {
return fetchLatestPayloadSchema();
} catch (Exception e) {
throw new SchemaConversionException(e);
}
}
@Override
public Iterable<GenericRecord> convertRecord(Schema outputSchema, GenericRecord inputRecord, WorkUnitState workUnit)
throws DataConversionException {
return new SingleRecordIterable<>(upConvertPayload(inputRecord));
}
}
| 3,281 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/converter/GenericRecordBasedKafkaSchemaChangeInjector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.gobblin.kafka.client.DecodeableKafkaRecord;
/**
* A {@link org.apache.gobblin.stream.ControlMessageInjector} that detects changes in the latest schema and notifies downstream constructs by
* injecting a {@link org.apache.gobblin.stream.MetadataUpdateControlMessage}.
* Also supports multi-dataset schema changes.
*/
public class GenericRecordBasedKafkaSchemaChangeInjector extends KafkaSchemaChangeInjector<Schema> {
@Override
protected Schema getSchemaIdentifier(DecodeableKafkaRecord consumerRecord) {
GenericRecord genericRecord = (GenericRecord) consumerRecord.getValue();
return genericRecord.getSchema();
}
} | 3,282 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/UniversalKafkaSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import com.google.common.eventbus.EventBus;
import java.io.IOException;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.InfiniteSource;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.extract.EventBasedExtractor;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.stream.WorkUnitChangeEvent;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import com.google.common.base.Preconditions;
/**
* A {@link KafkaSource} to use with arbitrary {@link EventBasedExtractor}. Specify the extractor to use with key
* {@link #EXTRACTOR_TYPE}.
*/
@Slf4j
public class UniversalKafkaSource<S, D> extends KafkaSource<S, D> implements InfiniteSource<S, D> {
public static final String EXTRACTOR_TYPE = "gobblin.source.kafka.extractorType";
private final EventBus eventBus = new EventBus(this.getClass().getSimpleName());
@Override
public Extractor<S, D> getExtractor(WorkUnitState state)
throws IOException {
Preconditions.checkArgument(state.contains(EXTRACTOR_TYPE), "Missing key " + EXTRACTOR_TYPE);
try {
ClassAliasResolver<EventBasedExtractor> aliasResolver = new ClassAliasResolver<>(EventBasedExtractor.class);
Class<? extends EventBasedExtractor> klazz = aliasResolver.resolveClass(state.getProp(EXTRACTOR_TYPE));
return GobblinConstructorUtils.invokeLongestConstructor(klazz, state);
} catch (ReflectiveOperationException e) {
throw new RuntimeException(e);
}
}
public void onWorkUnitUpdate(List<String> oldTaskIds, List<WorkUnit> newWorkUnits) {
if (this.eventBus != null) {
log.info("post workunit change event");
this.eventBus.post(new WorkUnitChangeEvent(oldTaskIds, newWorkUnits));
}
}
@Override
public EventBus getEventBus() {
return this.eventBus;
}
}
| 3,283 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/StartOffsetOutOfRangeException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
@SuppressWarnings("serial")
public class StartOffsetOutOfRangeException extends Exception {
public StartOffsetOutOfRangeException(String message) {
super(message);
}
}
| 3,284 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaAvroExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.generic.GenericData.Record;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.Decoder;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.kafka.client.ByteArrayBasedKafkaRecord;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.metrics.kafka.SchemaRegistryException;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.util.AvroUtils;
/**
* An abstract implementation of {@link Extractor} for Kafka, where events are in Avro format.
*
* Subclasses should implement {@link #getRecordSchema(byte[])} and {@link #getDecoder(byte[])}. Additionally, if
* schema registry is not used (i.e., property {@link KafkaSchemaRegistry#KAFKA_SCHEMA_REGISTRY_CLASS} is not
* specified, method {@link #getExtractorSchema()} should be overriden.
*
* @author Ziyang Liu
*/
@Slf4j
public abstract class KafkaAvroExtractor<K> extends KafkaExtractor<Schema, GenericRecord> {
protected static final Schema DEFAULT_SCHEMA = SchemaBuilder.record("DefaultSchema").fields().name("header")
.type(SchemaBuilder.record("header").fields().name("time").type("long").withDefault(0).endRecord()).noDefault()
.endRecord();
protected final Optional<KafkaSchemaRegistry<K, Schema>> schemaRegistry;
protected final Optional<Schema> schema;
protected final Optional<GenericDatumReader<Record>> reader;
public KafkaAvroExtractor(WorkUnitState state) {
super(state);
this.schemaRegistry = state.contains(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_CLASS)
? Optional.of(KafkaSchemaRegistry.<K, Schema> get(state.getProperties()))
: Optional.<KafkaSchemaRegistry<K, Schema>> absent();
this.schema = getExtractorSchema();
if (this.schema.isPresent()) {
this.reader = Optional.of(new GenericDatumReader<Record>(this.schema.get()));
} else {
log.error(String.format("Cannot find latest schema for topic %s. This topic will be skipped", this.topicName));
this.reader = Optional.absent();
}
}
/**
* Get the schema to be used by this extractor. All extracted records that have different schemas
* will be converted to this schema.
*/
protected Optional<Schema> getExtractorSchema() {
return Optional.fromNullable(getLatestSchemaByTopic(this.topicName));
}
protected Schema getLatestSchemaByTopic(String topic) {
Preconditions.checkState(this.schemaRegistry.isPresent());
try {
return this.schemaRegistry.get().getLatestSchemaByTopic(topic);
} catch (SchemaRegistryException e) {
log.error(String.format("Cannot find latest schema for topic %s. This topic will be skipped", topic), e);
return null;
}
}
@Override
public GenericRecord readRecordImpl(GenericRecord reuse) throws DataRecordException, IOException {
if (!this.schema.isPresent()) {
return null;
}
return super.readRecordImpl(reuse);
}
@Override
public Schema getSchema() {
return this.schema.or(DEFAULT_SCHEMA);
}
@Override
protected GenericRecord decodeRecord(ByteArrayBasedKafkaRecord messageAndOffset) throws IOException {
byte[] payload = messageAndOffset.getMessageBytes();
Schema recordSchema = getRecordSchema(payload);
Decoder decoder = getDecoder(payload);
this.reader.get().setSchema(recordSchema);
try {
GenericRecord record = this.reader.get().read(null, decoder);
record = convertRecord(record);
return record;
} catch (IOException e) {
log.error(String.format("Error during decoding record for partition %s: ", getCurrentPartition()));
throw e;
}
}
/**
* Convert the record to the output schema of this extractor
* @param record the input record
* @return the converted record
* @throws IOException
*/
@Override
protected GenericRecord convertRecord(GenericRecord record) throws IOException {
return AvroUtils.convertRecordSchema(record, this.schema.get());
}
/**
* Obtain the Avro {@link Schema} of a Kafka record given the payload of the record.
*/
protected abstract Schema getRecordSchema(byte[] payload);
/**
* Obtain the Avro {@link Decoder} for a Kafka record given the payload of the record.
*/
protected abstract Decoder getDecoder(byte[] payload);
}
| 3,285 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.codahale.metrics.Timer;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.base.Stopwatch;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.typesafe.config.Config;
import lombok.Getter;
import lombok.Setter;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.dataset.DatasetConstants;
import org.apache.gobblin.dataset.DatasetDescriptor;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.kafka.client.GobblinKafkaConsumerClient;
import org.apache.gobblin.kafka.client.GobblinKafkaConsumerClient.GobblinKafkaConsumerClientFactory;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.lineage.LineageInfo;
import org.apache.gobblin.source.extractor.extract.EventBasedSource;
import org.apache.gobblin.source.extractor.extract.kafka.workunit.packer.KafkaTopicGroupingWorkUnitPacker;
import org.apache.gobblin.source.extractor.extract.kafka.workunit.packer.KafkaWorkUnitPacker;
import org.apache.gobblin.source.extractor.extract.kafka.validator.TopicValidators;
import org.apache.gobblin.source.extractor.limiter.LimiterConfigurationKeys;
import org.apache.gobblin.source.workunit.Extract;
import org.apache.gobblin.source.workunit.MultiWorkUnit;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.DatasetFilterUtils;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.dataset.DatasetUtils;
import static java.util.stream.Collectors.toSet;
/**
* A {@link org.apache.gobblin.source.Source} implementation for Kafka source.
*
* @author Ziyang Liu
*/
public abstract class KafkaSource<S, D> extends EventBasedSource<S, D> {
private static final Logger LOG = LoggerFactory.getLogger(KafkaSource.class);
public static final String TOPIC_BLACKLIST = "topic.blacklist";
public static final String TOPIC_WHITELIST = "topic.whitelist";
public static final String LATEST_OFFSET = "latest";
public static final String EARLIEST_OFFSET = "earliest";
public static final String NEAREST_OFFSET = "nearest";
public static final String OFFSET_LOOKBACK = "offset_lookback";
public static final String BOOTSTRAP_WITH_OFFSET = "bootstrap.with.offset";
public static final String KAFKA_OFFSET_LOOKBACK = "kafka.offset.lookback";
public static final String DEFAULT_BOOTSTRAP_WITH_OFFSET = LATEST_OFFSET;
public static final String TOPICS_MOVE_TO_LATEST_OFFSET = "topics.move.to.latest.offset";
public static final String RESET_ON_OFFSET_OUT_OF_RANGE = "reset.on.offset.out.of.range";
public static final String DEFAULT_RESET_ON_OFFSET_OUT_OF_RANGE = NEAREST_OFFSET;
public static final String TOPIC_NAME = "topic.name";
public static final String PARTITION_ID = "partition.id";
public static final String LEADER_ID = "leader.id";
public static final String LEADER_HOSTANDPORT = "leader.hostandport";
public static final Extract.TableType DEFAULT_TABLE_TYPE = Extract.TableType.APPEND_ONLY;
public static final String DEFAULT_NAMESPACE_NAME = "KAFKA";
public static final String ALL_TOPICS = "all";
//A workunit property that contains the number of topic partitions for a given topic. Useful for
//workunit size estimation to assign weights to a given topic partition.
public static final String NUM_TOPIC_PARTITIONS = "numTopicPartitions";
public static final String AVG_RECORD_MILLIS = "avg.record.millis";
public static final String START_FETCH_EPOCH_TIME = "startFetchEpochTime";
public static final String STOP_FETCH_EPOCH_TIME = "stopFetchEpochTime";
public static final String PREVIOUS_START_FETCH_EPOCH_TIME = "previousStartFetchEpochTime";
public static final String PREVIOUS_STOP_FETCH_EPOCH_TIME = "previousStopFetchEpochTime";
public static final String PREVIOUS_LOW_WATERMARK = "previousLowWatermark";
public static final String PREVIOUS_HIGH_WATERMARK = "previousHighWatermark";
public static final String PREVIOUS_LATEST_OFFSET = "previousLatestOffset";
public static final String OFFSET_FETCH_EPOCH_TIME = "offsetFetchEpochTime";
public static final String PREVIOUS_OFFSET_FETCH_EPOCH_TIME = "previousOffsetFetchEpochTime";
public static final String ALLOW_PERIOD_IN_TOPIC_NAME = "gobblin.kafka.allowPeriodInTopicName";
public static final String GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS = "gobblin.kafka.consumerClient.class";
public static final String GOBBLIN_KAFKA_EXTRACT_ALLOW_TABLE_TYPE_NAMESPACE_CUSTOMIZATION =
"gobblin.kafka.extract.allowTableTypeAndNamspaceCustomization";
public static final String DEFAULT_GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS =
"org.apache.gobblin.kafka.client.Kafka08ConsumerClient$Factory";
public static final String GOBBLIN_KAFKA_SHOULD_ENABLE_DATASET_STATESTORE =
"gobblin.kafka.shouldEnableDatasetStateStore";
public static final boolean DEFAULT_GOBBLIN_KAFKA_SHOULD_ENABLE_DATASET_STATESTORE = false;
public static final String OFFSET_FETCH_TIMER = "offsetFetchTimer";
public static final String RECORD_LEVEL_SLA_MINUTES_KEY = "gobblin.kafka.recordLevelSlaMinutes";
public static final String MAX_POSSIBLE_OBSERVED_LATENCY_IN_HOURS = "gobblin.kafka.maxobservedLatencyInHours";
public static final Integer DEFAULT_MAX_POSSIBLE_OBSERVED_LATENCY_IN_HOURS = 24;
public static final String OBSERVED_LATENCY_PRECISION = "gobblin.kafka.observedLatencyPrecision";
public static final Integer DEFAULT_OBSERVED_LATENCY_PRECISION = 3;
public static final String OBSERVED_LATENCY_MEASUREMENT_ENABLED = "gobblin.kafka.observedLatencyMeasurementEnabled";
public static final Boolean DEFAULT_OBSERVED_LATENCY_MEASUREMENT_ENABLED = false;
public static final String RECORD_CREATION_TIMESTAMP_FIELD = "gobblin.kafka.recordCreationTimestampField";
public static final String RECORD_CREATION_TIMESTAMP_UNIT = "gobblin.kafka.recordCreationTimestampUnit";
private final Set<String> moveToLatestTopics = Sets.newTreeSet(String.CASE_INSENSITIVE_ORDER);
private final Map<KafkaPartition, Long> previousOffsets = Maps.newConcurrentMap();
private final Map<KafkaPartition, Long> previousLowWatermarks = Maps.newConcurrentMap();
private final Map<KafkaPartition, Long> previousExpectedHighWatermarks = Maps.newConcurrentMap();
private final Map<KafkaPartition, Long> previousOffsetFetchEpochTimes = Maps.newConcurrentMap();
private final Map<KafkaPartition, Long> previousStartFetchEpochTimes = Maps.newConcurrentMap();
private final Map<KafkaPartition, Long> previousStopFetchEpochTimes = Maps.newConcurrentMap();
private final Set<KafkaPartition> partitionsToBeProcessed = Sets.newConcurrentHashSet();
private final AtomicInteger failToGetOffsetCount = new AtomicInteger(0);
private final AtomicInteger offsetTooEarlyCount = new AtomicInteger(0);
private final AtomicInteger offsetTooLateCount = new AtomicInteger(0);
// sharing the kafka consumer may result in contention, so support thread local consumers
protected final ConcurrentLinkedQueue<GobblinKafkaConsumerClient> kafkaConsumerClientPool = new ConcurrentLinkedQueue();
protected static final ThreadLocal<GobblinKafkaConsumerClient> kafkaConsumerClient =
new ThreadLocal<GobblinKafkaConsumerClient>();
private GobblinKafkaConsumerClient sharedKafkaConsumerClient = null;
private final ClassAliasResolver<GobblinKafkaConsumerClientFactory> kafkaConsumerClientResolver =
new ClassAliasResolver<>(GobblinKafkaConsumerClientFactory.class);
private volatile boolean doneGettingAllPreviousOffsets = false;
private Extract.TableType tableType;
private String extractNamespace;
private boolean isFullExtract;
private String kafkaBrokers;
private boolean shouldEnableDatasetStateStore;
private AtomicBoolean isDatasetStateEnabled = new AtomicBoolean(false);
private Set<String> topicsToProcess;
private MetricContext metricContext;
protected Optional<LineageInfo> lineageInfo;
private List<String> getLimiterExtractorReportKeys() {
List<String> keyNames = new ArrayList<>();
keyNames.add(KafkaSource.TOPIC_NAME);
keyNames.add(KafkaSource.PARTITION_ID);
return keyNames;
}
private void setLimiterReportKeyListToWorkUnits(List<WorkUnit> workUnits, List<String> keyNameList) {
if (keyNameList.isEmpty()) {
return;
}
String keyList = Joiner.on(',').join(keyNameList.iterator());
for (WorkUnit workUnit : workUnits) {
workUnit.setProp(LimiterConfigurationKeys.LIMITER_REPORT_KEY_LIST, keyList);
}
}
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
return this.getWorkunitsForFilteredPartitions(state, Optional.absent(), Optional.absent());
}
/**
* Compute Workunits for Kafka Topics. If filteredTopicPartition present, respect this map and only compute the provided
* topics and filtered partitions. If not, use state to discover Kafka topics and all available partitions.
*
* @param filteredTopicPartition optional parameter to determine if only filtered topic-partitions are needed.
* @param minContainer give an option to specify a minimum container count. Please be advised that how it being used is
* determined by the implementation of concrete {@link KafkaWorkUnitPacker} class.
*
* TODO: Utilize the minContainer in {@link KafkaTopicGroupingWorkUnitPacker#pack(Map, int)}, as the numContainers variable
* is not used currently.
*/
public List<WorkUnit> getWorkunitsForFilteredPartitions(SourceState state,
Optional<Map<String, List<Integer>>> filteredTopicPartition, Optional<Integer> minContainer) {
this.metricContext = Instrumented.getMetricContext(state, KafkaSource.class);
this.lineageInfo = LineageInfo.getLineageInfo(state.getBroker());
Map<String, List<Integer>> filteredTopicPartitionMap = filteredTopicPartition.or(new HashMap<>());
Map<String, List<WorkUnit>> kafkaTopicWorkunitMap = Maps.newConcurrentMap();
if (state.getPropAsBoolean(KafkaSource.GOBBLIN_KAFKA_EXTRACT_ALLOW_TABLE_TYPE_NAMESPACE_CUSTOMIZATION)) {
String tableTypeStr =
state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY, KafkaSource.DEFAULT_TABLE_TYPE.toString());
tableType = Extract.TableType.valueOf(tableTypeStr);
extractNamespace =
state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY, KafkaSource.DEFAULT_NAMESPACE_NAME);
} else {
// To be compatible, reject table type and namespace configuration keys as previous implementation
tableType = KafkaSource.DEFAULT_TABLE_TYPE;
extractNamespace = KafkaSource.DEFAULT_NAMESPACE_NAME;
}
isFullExtract = state.getPropAsBoolean(ConfigurationKeys.EXTRACT_IS_FULL_KEY);
kafkaBrokers = state.getProp(ConfigurationKeys.KAFKA_BROKERS, "");
this.shouldEnableDatasetStateStore = state.getPropAsBoolean(GOBBLIN_KAFKA_SHOULD_ENABLE_DATASET_STATESTORE,
DEFAULT_GOBBLIN_KAFKA_SHOULD_ENABLE_DATASET_STATESTORE);
try {
Config config = ConfigUtils.propertiesToConfig(state.getProperties());
GobblinKafkaConsumerClientFactory kafkaConsumerClientFactory = kafkaConsumerClientResolver
.resolveClass(
state.getProp(GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS,
DEFAULT_GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS)).newInstance();
this.kafkaConsumerClient.set(kafkaConsumerClientFactory.create(config));
Collection<KafkaTopic> topics;
if(filteredTopicPartition.isPresent()) {
// If filteredTopicPartition present, use it to construct the whitelist pattern while leave blacklist empty
topics = this.kafkaConsumerClient.get().getFilteredTopics(Collections.emptyList(),
filteredTopicPartitionMap.keySet().stream().map(Pattern::compile).collect(Collectors.toList()));
} else {
topics = getValidTopics(getFilteredTopics(state), state);
}
this.topicsToProcess = topics.stream().map(KafkaTopic::getName).collect(toSet());
Map<String, State> topicSpecificStateMap =
DatasetUtils.getDatasetSpecificProps(Iterables.transform(topics, new Function<KafkaTopic, String>() {
@Override
public String apply(KafkaTopic topic) {
return topic.getName();
}
}), state);
int numOfThreads = state.getPropAsInt(ConfigurationKeys.KAFKA_SOURCE_WORK_UNITS_CREATION_THREADS,
ConfigurationKeys.KAFKA_SOURCE_WORK_UNITS_CREATION_DEFAULT_THREAD_COUNT);
ExecutorService threadPool =
Executors.newFixedThreadPool(numOfThreads, ExecutorsUtils.newThreadFactory(Optional.of(LOG)));
if (state.getPropAsBoolean(ConfigurationKeys.KAFKA_SOURCE_SHARE_CONSUMER_CLIENT,
ConfigurationKeys.DEFAULT_KAFKA_SOURCE_SHARE_CONSUMER_CLIENT)) {
this.sharedKafkaConsumerClient = this.kafkaConsumerClient.get();
} else {
// preallocate one client per thread
populateClientPool(numOfThreads, kafkaConsumerClientFactory, config);
}
Stopwatch createWorkUnitStopwatch = Stopwatch.createStarted();
for (KafkaTopic topic : topics) {
LOG.info("Discovered topic " + topic);
if (topic.getTopicSpecificState().isPresent()) {
topicSpecificStateMap.computeIfAbsent(topic.getName(), k -> new State())
.addAllIfNotExist(topic.getTopicSpecificState().get());
}
Optional<Set<Integer>> partitionIDSet = Optional.absent();
if(filteredTopicPartition.isPresent()) {
List<Integer> list = java.util.Optional.ofNullable(filteredTopicPartitionMap.get(topic.getName()))
.orElse(new ArrayList<>());
partitionIDSet = Optional.of(new HashSet<>(list));
LOG.info("Compute the workunit for topic {} with num of filtered partitions: {}",
topic.getName(), list.size());
}
threadPool.submit(
new WorkUnitCreator(topic, state, Optional.fromNullable(topicSpecificStateMap.get(topic.getName())),
kafkaTopicWorkunitMap, partitionIDSet));
}
ExecutorsUtils.shutdownExecutorService(threadPool, Optional.of(LOG), 1L, TimeUnit.HOURS);
LOG.info(String.format("Created workunits for %d topics in %d seconds", kafkaTopicWorkunitMap.size(),
createWorkUnitStopwatch.elapsed(TimeUnit.SECONDS)));
// Create empty WorkUnits for skipped partitions (i.e., partitions that have previous offsets,
// but aren't processed). When filteredTopicPartition present, only filtered topic-partitions are needed so skip this call
if(!filteredTopicPartition.isPresent()) {
createEmptyWorkUnitsForSkippedPartitions(kafkaTopicWorkunitMap, topicSpecificStateMap, state);
}
KafkaWorkUnitPacker kafkaWorkUnitPacker = KafkaWorkUnitPacker.getInstance(this, state, Optional.of(this.metricContext));
int numOfMultiWorkunits = minContainer.or(1);
if(state.contains(ConfigurationKeys.MR_JOB_MAX_MAPPERS_KEY)) {
numOfMultiWorkunits = Math.max(numOfMultiWorkunits,
calculateNumMappersForPacker(state, kafkaWorkUnitPacker, kafkaTopicWorkunitMap));
}
addTopicSpecificPropsToWorkUnits(kafkaTopicWorkunitMap, topicSpecificStateMap);
List<WorkUnit> workUnitList = kafkaWorkUnitPacker.pack(kafkaTopicWorkunitMap, numOfMultiWorkunits);
setLimiterReportKeyListToWorkUnits(workUnitList, getLimiterExtractorReportKeys());
return workUnitList;
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
throw new RuntimeException("Checked exception caught", e);
} catch (Throwable t) {
throw new RuntimeException("Unexpected throwable caught, ", t);
} finally {
try {
GobblinKafkaConsumerClient consumerClient = this.kafkaConsumerClient.get();
if (consumerClient != null) {
consumerClient.close();
}
// cleanup clients from pool
for (GobblinKafkaConsumerClient client: kafkaConsumerClientPool) {
client.close();
}
} catch (Throwable t) {
//Swallow any exceptions in the finally{..} block to allow potential exceptions from the main try{..} block to be
//propagated
LOG.error("Exception {} encountered closing GobblinKafkaConsumerClient ", t);
}
}
}
protected void populateClientPool(int count,
GobblinKafkaConsumerClientFactory kafkaConsumerClientFactory,
Config config) {
for (int i = 0; i < count; i++) {
kafkaConsumerClientPool.offer(kafkaConsumerClientFactory.create(config));
}
}
private void addTopicSpecificPropsToWorkUnits(Map<String, List<WorkUnit>> workUnits, Map<String, State> topicSpecificStateMap) {
for (List<WorkUnit> workUnitList : workUnits.values()) {
for (WorkUnit workUnit : workUnitList) {
addTopicSpecificPropsToWorkUnit(workUnit, topicSpecificStateMap);
}
}
}
private void addTopicSpecificPropsToWorkUnit(WorkUnit workUnit, Map<String, State> topicSpecificStateMap) {
if (workUnit instanceof MultiWorkUnit) {
for (WorkUnit wu : ((MultiWorkUnit) workUnit).getWorkUnits()) {
addTopicSpecificPropsToWorkUnit(wu, topicSpecificStateMap);
}
} else if (!workUnit.contains(TOPIC_NAME)) {
return;
} else {
addDatasetUrnOptionally(workUnit);
if (topicSpecificStateMap == null) {
return;
} else if (!topicSpecificStateMap.containsKey(workUnit.getProp(TOPIC_NAME))) {
return;
} else {
workUnit.addAll(topicSpecificStateMap.get(workUnit.getProp(TOPIC_NAME)));
}
}
}
private void addDatasetUrnOptionally(WorkUnit workUnit) {
if (!this.shouldEnableDatasetStateStore) {
return;
}
workUnit.setProp(ConfigurationKeys.DATASET_URN_KEY, workUnit.getProp(TOPIC_NAME));
}
private void createEmptyWorkUnitsForSkippedPartitions(Map<String, List<WorkUnit>> workUnits,
Map<String, State> topicSpecificStateMap, SourceState state) {
// in case the previous offset not been set
getAllPreviousOffsetState(state);
// For each partition that has a previous offset, create an empty WorkUnit for it if
// it is not in this.partitionsToBeProcessed.
for (Map.Entry<KafkaPartition, Long> entry : this.previousOffsets.entrySet()) {
KafkaPartition partition = entry.getKey();
if (!this.partitionsToBeProcessed.contains(partition)) {
String topicName = partition.getTopicName();
if (!this.isDatasetStateEnabled.get() || this.topicsToProcess.contains(topicName)) {
long previousOffset = entry.getValue();
WorkUnit emptyWorkUnit = createEmptyWorkUnit(partition, previousOffset,
this.previousOffsetFetchEpochTimes.get(partition),
Optional.fromNullable(topicSpecificStateMap.get(partition.getTopicName())));
if (workUnits.containsKey(topicName)) {
workUnits.get(topicName).add(emptyWorkUnit);
} else {
workUnits.put(topicName, Lists.newArrayList(emptyWorkUnit));
}
}
}
}
}
//determine the number of mappers/containers for workunit packer
private int calculateNumMappersForPacker(SourceState state,
KafkaWorkUnitPacker kafkaWorkUnitPacker, Map<String, List<WorkUnit>> workUnits) {
int maxMapperNum =
state.getPropAsInt(ConfigurationKeys.MR_JOB_MAX_MAPPERS_KEY, ConfigurationKeys.DEFAULT_MR_JOB_MAX_MAPPERS);
int numContainers = maxMapperNum;
if(state.contains(ConfigurationKeys.MR_TARGET_MAPPER_SIZE)) {
double totalEstDataSize = kafkaWorkUnitPacker.setWorkUnitEstSizes(workUnits);
LOG.info(String.format("The total estimated data size is %.2f", totalEstDataSize));
double targetMapperSize = state.getPropAsDouble(ConfigurationKeys.MR_TARGET_MAPPER_SIZE);
numContainers = (int) (totalEstDataSize / targetMapperSize) + 1;
numContainers = Math.min(numContainers, maxMapperNum);
}
return numContainers;
}
/*
* This function need to be thread safe since it is called in the Runnable
*/
private List<WorkUnit> getWorkUnitsForTopic(KafkaTopic topic, SourceState state,
Optional<State> topicSpecificState, Optional<Set<Integer>> filteredPartitions) {
Timer.Context context = this.metricContext.timer("isTopicQualifiedTimer").time();
boolean topicQualified = isTopicQualified(topic);
context.close();
List<WorkUnit> workUnits = Lists.newArrayList();
List<KafkaPartition> topicPartitions = topic.getPartitions();
for (KafkaPartition partition : topicPartitions) {
if(filteredPartitions.isPresent() && !filteredPartitions.get().contains(partition.getId())) {
continue;
}
WorkUnit workUnit = getWorkUnitForTopicPartition(partition, state, topicSpecificState);
if (workUnit != null) {
// For disqualified topics, for each of its workunits set the high watermark to be the same
// as the low watermark, so that it will be skipped.
if (!topicQualified) {
skipWorkUnit(workUnit);
}
workUnit.setProp(NUM_TOPIC_PARTITIONS, topicPartitions.size());
workUnits.add(workUnit);
}
}
this.partitionsToBeProcessed.addAll(topic.getPartitions());
return workUnits;
}
/**
* Whether a {@link KafkaTopic} is qualified to be pulled.
*
* This method can be overridden by subclasses for verifying topic eligibility, e.g., one may want to
* skip a topic if its schema cannot be found in the schema registry.
*/
protected boolean isTopicQualified(KafkaTopic topic) {
return true;
}
@SuppressWarnings("deprecation")
private static void skipWorkUnit(WorkUnit workUnit) {
workUnit.setProp(ConfigurationKeys.WORK_UNIT_HIGH_WATER_MARK_KEY, workUnit.getLowWaterMark());
}
private WorkUnit getWorkUnitForTopicPartition(KafkaPartition partition, SourceState state,
Optional<State> topicSpecificState) {
Offsets offsets = new Offsets();
boolean failedToGetKafkaOffsets = false;
try (Timer.Context context = this.metricContext.timer(OFFSET_FETCH_TIMER).time()) {
offsets.setOffsetFetchEpochTime(System.currentTimeMillis());
offsets.setEarliestOffset(this.kafkaConsumerClient.get().getEarliestOffset(partition));
offsets.setLatestOffset(this.kafkaConsumerClient.get().getLatestOffset(partition));
} catch (Throwable t) {
failedToGetKafkaOffsets = true;
LOG.error("Caught error in creating work unit for {}", partition, t);
}
long previousOffset = 0;
long previousOffsetFetchEpochTime = 0;
boolean previousOffsetNotFound = false;
try {
previousOffset = getPreviousOffsetForPartition(partition, state);
offsets.setPreviousEndOffset(previousOffset);
offsets.setPreviousStartOffset(getPreviousLowWatermark(partition, state));
offsets.setPreviousStartFetchEpochTime(getPreviousStartFetchEpochTimeForPartition(partition, state));
offsets.setPreviousStopFetchEpochTime(getPreviousStopFetchEpochTimeForPartition(partition, state));
offsets.setPreviousLatestOffset(getPreviousExpectedHighWatermark(partition, state));
previousOffsetFetchEpochTime = getPreviousOffsetFetchEpochTimeForPartition(partition, state);
offsets.setPreviousOffsetFetchEpochTime(previousOffsetFetchEpochTime);
} catch (PreviousOffsetNotFoundException e) {
previousOffsetNotFound = true;
}
if (failedToGetKafkaOffsets) {
// Increment counts, which will be reported as job metrics
this.failToGetOffsetCount.incrementAndGet();
// When unable to get earliest/latest offsets from Kafka, skip the partition and create an empty workunit,
// so that previousOffset is persisted.
LOG.warn(String
.format("Failed to retrieve earliest and/or latest offset for partition %s. This partition will be skipped.",
partition));
return previousOffsetNotFound ? null : createEmptyWorkUnit(partition, previousOffset, previousOffsetFetchEpochTime,
topicSpecificState);
}
if (shouldMoveToLatestOffset(partition, state)) {
offsets.startAtLatestOffset();
} else if (previousOffsetNotFound) {
/**
* When previous offset cannot be found, either start at earliest offset, latest offset, go back with (latest - lookback)
* (long value to be deducted from latest offset in order to avoid data loss) or skip the partition
* (no need to create an empty workunit in this case since there's no offset to persist).
* In case of no previous state OFFSET_LOOKBACK will make sure to avoid consuming huge amount of data (earlist) and data loss (latest offset)
* lookback can be set to any long value where (latest-lookback) is nearest offset for each partition. If computed offset is out of range then
* partition will be consumed from latest offset
**/
String offsetNotFoundMsg = String.format("Previous offset for partition %s does not exist. ", partition);
String offsetOption = state.getProp(BOOTSTRAP_WITH_OFFSET, DEFAULT_BOOTSTRAP_WITH_OFFSET).toLowerCase();
if (offsetOption.equals(LATEST_OFFSET)) {
LOG.warn(offsetNotFoundMsg + "This partition will start from the latest offset: " + offsets.getLatestOffset());
offsets.startAtLatestOffset();
} else if (offsetOption.equals(EARLIEST_OFFSET)) {
LOG.warn(
offsetNotFoundMsg + "This partition will start from the earliest offset: " + offsets.getEarliestOffset());
offsets.startAtEarliestOffset();
} else if (offsetOption.equals(OFFSET_LOOKBACK)) {
long lookbackOffsetRange = state.getPropAsLong(KAFKA_OFFSET_LOOKBACK , 0L);
long latestOffset = offsets.getLatestOffset();
long offset = latestOffset - lookbackOffsetRange;
LOG.warn(offsetNotFoundMsg + "This partition will start from latest-lookback [ " + latestOffset + " - " + lookbackOffsetRange + " ] start offset: " + offset);
try {
offsets.startAt(offset);
} catch (StartOffsetOutOfRangeException e) {
// Increment counts, which will be reported as job metrics
if (offsets.getStartOffset() <= offsets.getLatestOffset()) {
this.offsetTooEarlyCount.incrementAndGet();
} else {
this.offsetTooLateCount.incrementAndGet();
}
// When above computed offset (latest-lookback) is out of range, either start at earliest, latest or nearest offset, or skip the
// partition. If skipping, need to create an empty workunit so that previousOffset is persisted.
String offsetOutOfRangeMsg = String.format(
"Start offset for partition %s is out of range. Start offset = %d, earliest offset = %d, latest offset = %d.",
partition, offsets.getStartOffset(), offsets.getEarliestOffset(), offsets.getLatestOffset());
offsetOption =
state.getProp(RESET_ON_OFFSET_OUT_OF_RANGE, DEFAULT_RESET_ON_OFFSET_OUT_OF_RANGE).toLowerCase();
if (offsetOption.equals(LATEST_OFFSET) || (offsetOption.equals(NEAREST_OFFSET)
&& offsets.getStartOffset() >= offsets.getLatestOffset())) {
LOG.warn(
offsetOutOfRangeMsg + "This partition will start from the latest offset: " + offsets.getLatestOffset());
offsets.startAtLatestOffset();
} else if (offsetOption.equals(EARLIEST_OFFSET) || offsetOption.equals(NEAREST_OFFSET)) {
LOG.warn(offsetOutOfRangeMsg + "This partition will start from the earliest offset: " + offsets
.getEarliestOffset());
offsets.startAtEarliestOffset();
} else {
LOG.warn(offsetOutOfRangeMsg + "This partition will be skipped.");
return createEmptyWorkUnit(partition, previousOffset, previousOffsetFetchEpochTime, topicSpecificState);
}
}
}
else {
LOG.warn(offsetNotFoundMsg + "This partition will be skipped.");
return null;
}
} else {
try {
offsets.startAt(previousOffset);
} catch (StartOffsetOutOfRangeException e) {
// Increment counts, which will be reported as job metrics
if (offsets.getStartOffset() <= offsets.getLatestOffset()) {
this.offsetTooEarlyCount.incrementAndGet();
} else {
this.offsetTooLateCount.incrementAndGet();
}
// When previous offset is out of range, either start at earliest, latest or nearest offset, or skip the
// partition. If skipping, need to create an empty workunit so that previousOffset is persisted.
String offsetOutOfRangeMsg = String.format(
"Start offset for partition %s is out of range. Start offset = %d, earliest offset = %d, latest offset = %d.",
partition, offsets.getStartOffset(), offsets.getEarliestOffset(), offsets.getLatestOffset());
String offsetOption =
state.getProp(RESET_ON_OFFSET_OUT_OF_RANGE, DEFAULT_RESET_ON_OFFSET_OUT_OF_RANGE).toLowerCase();
if (offsetOption.equals(LATEST_OFFSET) || (offsetOption.equals(NEAREST_OFFSET)
&& offsets.getStartOffset() >= offsets.getLatestOffset())) {
LOG.warn(
offsetOutOfRangeMsg + "This partition will start from the latest offset: " + offsets.getLatestOffset());
offsets.startAtLatestOffset();
} else if (offsetOption.equals(EARLIEST_OFFSET) || offsetOption.equals(NEAREST_OFFSET)) {
LOG.warn(offsetOutOfRangeMsg + "This partition will start from the earliest offset: " + offsets
.getEarliestOffset());
offsets.startAtEarliestOffset();
} else {
LOG.warn(offsetOutOfRangeMsg + "This partition will be skipped.");
return createEmptyWorkUnit(partition, previousOffset, previousOffsetFetchEpochTime, topicSpecificState);
}
}
}
WorkUnit workUnit = getWorkUnitForTopicPartition(partition, offsets, topicSpecificState);
addSourceStatePropsToWorkUnit(workUnit, state);
return workUnit;
}
/**
* A method to copy specific properties from the {@link SourceState} object to {@link WorkUnitState}.
* @param workUnit WorkUnit state
* @param state Source state
*/
private void addSourceStatePropsToWorkUnit(WorkUnit workUnit, SourceState state) {
//Copy the SLA config from SourceState to WorkUnitState.
if (state.contains(KafkaSource.RECORD_LEVEL_SLA_MINUTES_KEY)) {
workUnit.setProp(KafkaSource.RECORD_LEVEL_SLA_MINUTES_KEY, state.getProp(KafkaSource.RECORD_LEVEL_SLA_MINUTES_KEY));
}
boolean isobservedLatencyMeasurementEnabled = state.getPropAsBoolean(KafkaSource.OBSERVED_LATENCY_MEASUREMENT_ENABLED, DEFAULT_OBSERVED_LATENCY_MEASUREMENT_ENABLED);
if (isobservedLatencyMeasurementEnabled) {
Preconditions.checkArgument(state.contains(KafkaSource.RECORD_CREATION_TIMESTAMP_FIELD), "Missing config key: " + KafkaSource.RECORD_CREATION_TIMESTAMP_FIELD);
workUnit.setProp(KafkaSource.OBSERVED_LATENCY_MEASUREMENT_ENABLED, isobservedLatencyMeasurementEnabled);
workUnit.setProp(KafkaSource.MAX_POSSIBLE_OBSERVED_LATENCY_IN_HOURS,
state.getPropAsInt(KafkaSource.MAX_POSSIBLE_OBSERVED_LATENCY_IN_HOURS, DEFAULT_MAX_POSSIBLE_OBSERVED_LATENCY_IN_HOURS));
workUnit.setProp(KafkaSource.OBSERVED_LATENCY_PRECISION,
state.getPropAsInt(KafkaSource.OBSERVED_LATENCY_PRECISION, KafkaSource.DEFAULT_OBSERVED_LATENCY_PRECISION));
workUnit.setProp(KafkaSource.RECORD_CREATION_TIMESTAMP_FIELD, state.getProp(KafkaSource.RECORD_CREATION_TIMESTAMP_FIELD));
workUnit.setProp(KafkaSource.RECORD_CREATION_TIMESTAMP_UNIT, state.getProp(KafkaSource.RECORD_CREATION_TIMESTAMP_UNIT, TimeUnit.MILLISECONDS.name()));
}
if (state.contains(ConfigurationKeys.JOB_NAME_KEY)) {
workUnit.setProp(ConfigurationKeys.JOB_NAME_KEY, state.getProp(ConfigurationKeys.JOB_NAME_KEY));
}
if (state.contains(ConfigurationKeys.JOB_ID_KEY)) {
workUnit.setProp(ConfigurationKeys.JOB_ID_KEY, state.getProp(ConfigurationKeys.JOB_ID_KEY));
}
}
private long getPreviousStartFetchEpochTimeForPartition(KafkaPartition partition, SourceState state) {
getAllPreviousOffsetState(state);
return this.previousStartFetchEpochTimes.containsKey(partition) ?
this.previousStartFetchEpochTimes.get(partition) : 0;
}
private long getPreviousStopFetchEpochTimeForPartition(KafkaPartition partition, SourceState state) {
getAllPreviousOffsetState(state);
return this.previousStopFetchEpochTimes.containsKey(partition) ?
this.previousStopFetchEpochTimes.get(partition) : 0;
}
private long getPreviousOffsetFetchEpochTimeForPartition(KafkaPartition partition, SourceState state)
throws PreviousOffsetNotFoundException {
getAllPreviousOffsetState(state);
if (this.previousOffsetFetchEpochTimes.containsKey(partition)) {
return this.previousOffsetFetchEpochTimes.get(partition);
}
throw new PreviousOffsetNotFoundException(String
.format("Previous offset fetch epoch time for topic %s, partition %s not found.", partition.getTopicName(),
partition.getId()));
}
private long getPreviousOffsetForPartition(KafkaPartition partition, SourceState state)
throws PreviousOffsetNotFoundException {
getAllPreviousOffsetState(state);
if (this.previousOffsets.containsKey(partition)) {
return this.previousOffsets.get(partition);
}
throw new PreviousOffsetNotFoundException(String
.format("Previous offset for topic %s, partition %s not found.", partition.getTopicName(), partition.getId()));
}
private long getPreviousExpectedHighWatermark(KafkaPartition partition, SourceState state)
throws PreviousOffsetNotFoundException {
getAllPreviousOffsetState(state);
if (this.previousExpectedHighWatermarks.containsKey(partition)) {
return this.previousExpectedHighWatermarks.get(partition);
}
throw new PreviousOffsetNotFoundException(String
.format("Previous expected high watermark for topic %s, partition %s not found.", partition.getTopicName(),
partition.getId()));
}
private long getPreviousLowWatermark(KafkaPartition partition, SourceState state)
throws PreviousOffsetNotFoundException {
getAllPreviousOffsetState(state);
if (this.previousLowWatermarks.containsKey(partition)) {
return this.previousLowWatermarks.get(partition);
}
throw new PreviousOffsetNotFoundException(String
.format("Previous low watermark for topic %s, partition %s not found.", partition.getTopicName(),
partition.getId()));
}
// need to be synchronized as this.previousOffsets, this.previousExpectedHighWatermarks, and
// this.previousOffsetFetchEpochTimes need to be initialized once
private synchronized void getAllPreviousOffsetState(SourceState state) {
if (this.doneGettingAllPreviousOffsets) {
return;
}
this.previousOffsets.clear();
this.previousLowWatermarks.clear();
this.previousExpectedHighWatermarks.clear();
this.previousOffsetFetchEpochTimes.clear();
this.previousStartFetchEpochTimes.clear();
this.previousStopFetchEpochTimes.clear();
Map<String, Iterable<WorkUnitState>> workUnitStatesByDatasetUrns = state.getPreviousWorkUnitStatesByDatasetUrns();
if (!workUnitStatesByDatasetUrns.isEmpty() &&
!(workUnitStatesByDatasetUrns.size() == 1 && workUnitStatesByDatasetUrns.keySet().iterator().next()
.equals(""))) {
this.isDatasetStateEnabled.set(true);
}
for (WorkUnitState workUnitState : state.getPreviousWorkUnitStates()) {
List<KafkaPartition> partitions = KafkaUtils.getPartitions(workUnitState);
WorkUnit workUnit = workUnitState.getWorkunit();
MultiLongWatermark watermark = workUnitState.getActualHighWatermark(MultiLongWatermark.class);
MultiLongWatermark previousLowWatermark = workUnit.getLowWatermark(MultiLongWatermark.class);
MultiLongWatermark previousExpectedHighWatermark = workUnit.getExpectedHighWatermark(MultiLongWatermark.class);
Preconditions.checkArgument(partitions.size() == watermark.size(), String
.format("Num of partitions doesn't match number of watermarks: partitions=%s, watermarks=%s", partitions,
watermark));
for (int i = 0; i < partitions.size(); i++) {
KafkaPartition partition = partitions.get(i);
if (watermark.get(i) != ConfigurationKeys.DEFAULT_WATERMARK_VALUE) {
this.previousOffsets.put(partition, watermark.get(i));
}
if (previousLowWatermark.get(i) != ConfigurationKeys.DEFAULT_WATERMARK_VALUE) {
this.previousLowWatermarks.put(partition, previousLowWatermark.get(i));
}
if (previousExpectedHighWatermark.get(i) != ConfigurationKeys.DEFAULT_WATERMARK_VALUE) {
this.previousExpectedHighWatermarks.put(partition, previousExpectedHighWatermark.get(i));
}
this.previousOffsetFetchEpochTimes.put(partition,
KafkaUtils.getPropAsLongFromSingleOrMultiWorkUnitState(workUnitState, OFFSET_FETCH_EPOCH_TIME, i));
this.previousStartFetchEpochTimes.put(partition,
KafkaUtils.getPropAsLongFromSingleOrMultiWorkUnitState(workUnitState, START_FETCH_EPOCH_TIME, i));
this.previousStopFetchEpochTimes.put(partition,
KafkaUtils.getPropAsLongFromSingleOrMultiWorkUnitState(workUnitState, STOP_FETCH_EPOCH_TIME, i));
}
}
this.doneGettingAllPreviousOffsets = true;
}
/**
* A topic can be configured to move to the latest offset in {@link #TOPICS_MOVE_TO_LATEST_OFFSET}.
*
* Need to be synchronized as access by multiple threads
*/
private synchronized boolean shouldMoveToLatestOffset(KafkaPartition partition, SourceState state) {
if (!state.contains(TOPICS_MOVE_TO_LATEST_OFFSET)) {
return false;
}
if (this.moveToLatestTopics.isEmpty()) {
this.moveToLatestTopics.addAll(
Splitter.on(',').trimResults().omitEmptyStrings().splitToList(state.getProp(TOPICS_MOVE_TO_LATEST_OFFSET)));
}
return this.moveToLatestTopics.contains(partition.getTopicName()) || this.moveToLatestTopics.contains(ALL_TOPICS);
}
// thread safe
private WorkUnit createEmptyWorkUnit(KafkaPartition partition, long previousOffset, long previousFetchEpochTime,
Optional<State> topicSpecificState) {
Offsets offsets = new Offsets();
offsets.setEarliestOffset(previousOffset);
offsets.setLatestOffset(previousOffset);
offsets.startAtEarliestOffset();
offsets.setOffsetFetchEpochTime(previousFetchEpochTime);
return getWorkUnitForTopicPartition(partition, offsets, topicSpecificState);
}
private WorkUnit getWorkUnitForTopicPartition(KafkaPartition partition, Offsets offsets,
Optional<State> topicSpecificState) {
// Default to job level configurations
Extract.TableType currentTableType = tableType;
String currentExtractNamespace = extractNamespace;
String currentExtractTableName = partition.getTopicName();
boolean isCurrentFullExtract = isFullExtract;
// Update to topic specific configurations if any
if (topicSpecificState.isPresent()) {
State topicState = topicSpecificState.get();
if (topicState.contains(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY)) {
currentTableType = Extract.TableType.valueOf(topicState.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY));
}
currentExtractNamespace = topicState.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY, extractNamespace);
currentExtractTableName =
topicState.getProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY, partition.getTopicName());
isCurrentFullExtract = topicState.getPropAsBoolean(ConfigurationKeys.EXTRACT_IS_FULL_KEY, isFullExtract);
}
Extract extract = this.createExtract(currentTableType, currentExtractNamespace, currentExtractTableName);
if (isCurrentFullExtract) {
extract.setProp(ConfigurationKeys.EXTRACT_IS_FULL_KEY, true);
}
WorkUnit workUnit = WorkUnit.create(extract);
workUnit.setProp(TOPIC_NAME, partition.getTopicName());
addDatasetUrnOptionally(workUnit);
workUnit.setProp(PARTITION_ID, partition.getId());
workUnit.setProp(LEADER_ID, partition.getLeader().getId());
workUnit.setProp(LEADER_HOSTANDPORT, partition.getLeader().getHostAndPort().toString());
workUnit.setProp(ConfigurationKeys.WORK_UNIT_LOW_WATER_MARK_KEY, offsets.getStartOffset());
workUnit.setProp(ConfigurationKeys.WORK_UNIT_HIGH_WATER_MARK_KEY, offsets.getLatestOffset());
workUnit.setProp(PREVIOUS_START_FETCH_EPOCH_TIME, offsets.getPreviousStartFetchEpochTime());
workUnit.setProp(PREVIOUS_STOP_FETCH_EPOCH_TIME, offsets.getPreviousStopFetchEpochTime());
workUnit.setProp(PREVIOUS_LOW_WATERMARK, offsets.getPreviousStartOffset());
workUnit.setProp(PREVIOUS_HIGH_WATERMARK, offsets.getPreviousEndOffset());
workUnit.setProp(PREVIOUS_OFFSET_FETCH_EPOCH_TIME, offsets.getPreviousOffsetFetchEpochTime());
workUnit.setProp(OFFSET_FETCH_EPOCH_TIME, offsets.getOffsetFetchEpochTime());
workUnit.setProp(PREVIOUS_LATEST_OFFSET, offsets.getPreviousLatestOffset());
// Add lineage info
DatasetDescriptor source = new DatasetDescriptor(DatasetConstants.PLATFORM_KAFKA, partition.getTopicName());
source.addMetadata(DatasetConstants.BROKERS, kafkaBrokers);
if (this.lineageInfo.isPresent()) {
this.lineageInfo.get().setSource(source, workUnit);
}
LOG.info(String.format("Created workunit for partition %s: lowWatermark=%d, highWatermark=%d, range=%d", partition,
offsets.getStartOffset(), offsets.getLatestOffset(), offsets.getLatestOffset() - offsets.getStartOffset()));
return workUnit;
}
/**
* Return topics to be processed filtered by job-level whitelist and blacklist.
*/
protected List<KafkaTopic> getFilteredTopics(SourceState state) {
List<Pattern> blacklist = DatasetFilterUtils.getPatternList(state, TOPIC_BLACKLIST);
List<Pattern> whitelist = DatasetFilterUtils.getPatternList(state, TOPIC_WHITELIST);
// TODO: replace this with TopicNameValidator in the config once TopicValidators is rolled out.
if (!state.getPropAsBoolean(KafkaSource.ALLOW_PERIOD_IN_TOPIC_NAME, true)) {
blacklist.add(Pattern.compile(".*\\..*"));
}
return kafkaConsumerClient.get().getFilteredTopics(blacklist, whitelist);
}
@Override
public void shutdown(SourceState state) {
state.setProp(ConfigurationKeys.OFFSET_TOO_EARLY_COUNT, this.offsetTooEarlyCount);
state.setProp(ConfigurationKeys.OFFSET_TOO_LATE_COUNT, this.offsetTooLateCount);
state.setProp(ConfigurationKeys.FAIL_TO_GET_OFFSET_COUNT, this.failToGetOffsetCount);
}
/**
* Return topics that pass all the topic validators.
*/
protected List<KafkaTopic> getValidTopics(List<KafkaTopic> topics, SourceState state) {
return new TopicValidators(state).validate(topics);
}
/**
* This class contains startOffset, earliestOffset and latestOffset for a Kafka partition.
*/
private static class Offsets {
@Getter
private long startOffset = 0;
@Getter
@Setter
private long earliestOffset = 0;
@Getter
@Setter
private long latestOffset = 0;
@Getter
@Setter
private long offsetFetchEpochTime = 0;
@Getter
@Setter
private long previousOffsetFetchEpochTime = 0;
@Getter
@Setter
private long previousLatestOffset = 0;
// previous low watermark
@Getter
@Setter
private long previousStartOffset = 0;
// previous actual high watermark
@Getter
@Setter
private long previousEndOffset = 0;
@Getter
@Setter
private long previousStartFetchEpochTime = 0;
@Getter
@Setter
private long previousStopFetchEpochTime = 0;
private void startAt(long offset)
throws StartOffsetOutOfRangeException {
if (offset < this.earliestOffset || offset > this.latestOffset) {
throw new StartOffsetOutOfRangeException(String
.format("start offset = %d, earliest offset = %d, latest offset = %d", offset, this.earliestOffset,
this.latestOffset));
}
this.startOffset = offset;
}
private void startAtEarliestOffset() {
this.startOffset = this.earliestOffset;
}
private void startAtLatestOffset() {
this.startOffset = this.latestOffset;
}
}
private class WorkUnitCreator implements Runnable {
public static final String WORK_UNITS_FOR_TOPIC_TIMER = "workUnitsForTopicTimer";
private final KafkaTopic topic;
private final SourceState state;
private final Optional<State> topicSpecificState;
private final Map<String, List<WorkUnit>> allTopicWorkUnits;
private final Optional<Set<Integer>> filteredPartitionsId;
WorkUnitCreator(KafkaTopic topic, SourceState state, Optional<State> topicSpecificState,
Map<String, List<WorkUnit>> workUnits) {
this(topic, state, topicSpecificState, workUnits, Optional.absent());
}
WorkUnitCreator(KafkaTopic topic, SourceState state, Optional<State> topicSpecificState,
Map<String, List<WorkUnit>> workUnits, Optional<Set<Integer>> filteredPartitionsId) {
this.topic = topic;
this.state = state;
this.topicSpecificState = topicSpecificState;
this.allTopicWorkUnits = workUnits;
this.filteredPartitionsId = filteredPartitionsId;
}
@Override
public void run() {
try (Timer.Context context = metricContext.timer(WORK_UNITS_FOR_TOPIC_TIMER).time()) {
// use shared client if configure, otherwise set a thread local one from the pool
if (KafkaSource.this.sharedKafkaConsumerClient != null) {
KafkaSource.this.kafkaConsumerClient.set(KafkaSource.this.sharedKafkaConsumerClient);
} else {
GobblinKafkaConsumerClient client = KafkaSource.this.kafkaConsumerClientPool.poll();
Preconditions.checkNotNull(client, "Unexpectedly ran out of preallocated consumer clients");
KafkaSource.this.kafkaConsumerClient.set(client);
}
this.allTopicWorkUnits.put(this.topic.getName(),
KafkaSource.this.getWorkUnitsForTopic(this.topic, this.state, this.topicSpecificState, this.filteredPartitionsId));
} catch (Throwable t) {
LOG.error("Caught error in creating work unit for " + this.topic.getName(), t);
throw new RuntimeException(t);
} finally {
// return the client to the pool
if (KafkaSource.this.sharedKafkaConsumerClient == null) {
KafkaSource.this.kafkaConsumerClientPool.offer(KafkaSource.this.kafkaConsumerClient.get());
KafkaSource.this.kafkaConsumerClient.remove();
}
}
}
}
} | 3,286 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaTopic.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.util.Collections;
import java.util.List;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import lombok.EqualsAndHashCode;
import lombok.ToString;
import org.apache.gobblin.configuration.State;
/**
* A kafka topic is composed of a topic name, and a list of partitions.
*
* @author Ziyang Liu
*
*/
@EqualsAndHashCode
@ToString
public final class KafkaTopic {
private final String name;
private final List<KafkaPartition> partitions;
private Optional<State> topicSpecificState;
public KafkaTopic(String name, List<KafkaPartition> partitions) {
this(name, partitions, Optional.absent());
}
public KafkaTopic(String name, List<KafkaPartition> partitions, Optional<State> topicSpecificState) {
this.name = name;
this.partitions = Lists.newArrayList();
for (KafkaPartition partition : partitions) {
this.partitions.add(new KafkaPartition(partition));
}
this.topicSpecificState = topicSpecificState;
}
public String getName() {
return this.name;
}
public List<KafkaPartition> getPartitions() {
return Collections.unmodifiableList(this.partitions);
}
public Optional<State> getTopicSpecificState() {
return this.topicSpecificState;
}
}
| 3,287 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaRecord.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import lombok.Getter;
@Getter
public class KafkaRecord implements Comparable<KafkaRecord> {
private final Long offset;
private final String key;
private final String payload;
public KafkaRecord(long offset, String key, String payload) {
super();
this.offset = offset;
this.key = key;
this.payload = payload;
}
@Override
public String toString() {
return "KafkaRecord [offset=" + this.offset + ", key=" + this.key + ", payload=" + this.payload + "]";
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((this.key == null) ? 0 : this.key.hashCode());
result = prime * result + (int) (this.offset ^ (this.offset >>> 32));
result = prime * result + ((this.payload == null) ? 0 : this.payload.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
KafkaRecord other = (KafkaRecord) obj;
if (this.key == null) {
if (other.key != null) {
return false;
}
} else if (!this.key.equals(other.key)) {
return false;
}
if (!this.offset.equals(other.offset)) {
return false;
}
if (this.payload == null) {
if (other.payload != null) {
return false;
}
} else if (!this.payload.equals(other.payload)) {
return false;
}
return true;
}
@Override
public int compareTo(KafkaRecord o) {
return this.offset.compareTo(o.offset);
}
}
| 3,288 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/MultiLongWatermark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.math.RoundingMode;
import java.util.List;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.math.LongMath;
import com.google.gson.Gson;
import com.google.gson.JsonElement;
import org.apache.gobblin.source.extractor.Watermark;
/**
* A {@link org.apache.gobblin.source.extractor.Watermark} that holds multiple long values.
*
* @author Ziyang Liu
*/
public class MultiLongWatermark implements Watermark {
private static final Gson GSON = new Gson();
private final List<Long> values;
/**
* Copy constructor.
*/
public MultiLongWatermark(MultiLongWatermark other) {
this.values = Lists.newArrayList(other.values);
}
public MultiLongWatermark(List<Long> values) {
this.values = Lists.newArrayList(values);
}
/**
* Increment the idx'th value. idx must be between 0 and size()-1.
*/
public void increment(int idx) {
Preconditions.checkElementIndex(idx, this.values.size());
Preconditions.checkArgument(this.values.get(idx) < Long.MAX_VALUE);
this.values.set(idx, this.values.get(idx) + 1);
}
/**
* Serializes the MultiLongWatermark into a JsonElement.
*/
@Override
public JsonElement toJson() {
return GSON.toJsonTree(this);
}
/**
* Given a low watermark (starting point) and a high watermark (target), returns the percentage
* of events pulled.
*
* @return a percentage value between 0 and 100.
*/
@Override
public short calculatePercentCompletion(Watermark lowWatermark, Watermark highWatermark) {
Preconditions.checkArgument(
lowWatermark instanceof MultiLongWatermark && highWatermark instanceof MultiLongWatermark,
String.format("Arguments of %s.%s must be of type %s", MultiLongWatermark.class.getSimpleName(),
Thread.currentThread().getStackTrace()[1].getMethodName(), MultiLongWatermark.class.getSimpleName()));
long pulled = ((MultiLongWatermark) lowWatermark).getGap(this);
long all = ((MultiLongWatermark) lowWatermark).getGap((MultiLongWatermark) highWatermark);
Preconditions.checkState(all > 0);
long percent = Math.min(100, LongMath.divide(pulled * 100, all, RoundingMode.HALF_UP));
return (short) percent;
}
/**
* Get the number of records that need to be pulled given the high watermark.
*/
public long getGap(MultiLongWatermark highWatermark) {
Preconditions.checkNotNull(highWatermark);
Preconditions.checkArgument(this.values.size() == highWatermark.values.size());
long diff = 0;
for (int i = 0; i < this.values.size(); i++) {
Preconditions.checkArgument(this.values.get(i) <= highWatermark.values.get(i));
diff += highWatermark.values.get(i) - this.values.get(i);
}
return diff;
}
/**
* @return the number of long values this watermark holds.
*/
public int size() {
return this.values.size();
}
/**
* The idx'th value of this watermark. idx must be between 0 and size()-1.
* @return
*/
public long get(int idx) {
Preconditions.checkElementIndex(idx, this.values.size());
return this.values.get(idx);
}
/**
* Set the idx'th value of this watermark. idx must be between 0 and size()-1.
*/
public long set(int idx, long value) {
Preconditions.checkElementIndex(idx, this.values.size());
return this.values.set(idx, value);
}
@Override
public String toString() {
return this.values.toString();
}
}
| 3,289 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaPartition.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import com.google.common.net.HostAndPort;
/**
* A kafka topic partition.
* Two partitions are considered equivalent if they have the same topic name and partition id. They may have different leaders.
*
* @author Ziyang Liu
*
*/
public final class KafkaPartition {
private final int id;
private final String topicName;
private KafkaLeader leader;
private int hashCode;
public static class Builder {
private int id = 0;
private String topicName = "";
private int leaderId = 0;
private HostAndPort leaderHostAndPort;
public Builder withId(int id) {
this.id = id;
return this;
}
public Builder withTopicName(String topicName) {
this.topicName = topicName;
return this;
}
public Builder withLeaderId(int leaderId) {
this.leaderId = leaderId;
return this;
}
public Builder withLeaderHostAndPort(String hostPortString) {
this.leaderHostAndPort = HostAndPort.fromString(hostPortString);
return this;
}
public Builder withLeaderHostAndPort(String host, int port) {
this.leaderHostAndPort = HostAndPort.fromParts(host, port);
return this;
}
public KafkaPartition build() {
return new KafkaPartition(this);
}
}
public KafkaPartition(KafkaPartition other) {
this.topicName = other.topicName;
this.id = other.id;
this.leader = new KafkaLeader(other.leader.id, other.leader.hostAndPort);
}
private KafkaPartition(Builder builder) {
this.id = builder.id;
this.topicName = builder.topicName;
this.leader = new KafkaLeader(builder.leaderId, builder.leaderHostAndPort);
}
public KafkaLeader getLeader() {
return this.leader;
}
public String getTopicName() {
return this.topicName;
}
public int getId() {
return this.id;
}
public void setLeader(int leaderId, String leaderHost, int leaderPort) {
this.leader = new KafkaLeader(leaderId, HostAndPort.fromParts(leaderHost, leaderPort));
}
@Override
public String toString() {
return this.getTopicName() + "-" + this.getId();
}
@Override
public int hashCode() {
int result = hashCode;
if (result == 0) {
final int prime = 31;
result = 1;
result = prime * result + this.id;
result = prime * result + ((this.topicName == null) ? 0 : this.topicName.hashCode());
hashCode = result;
}
return result;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof KafkaPartition)) {
return false;
}
KafkaPartition other = (KafkaPartition) obj;
if (this.id != other.id) {
return false;
}
if (this.topicName == null) {
if (other.topicName != null) {
return false;
}
} else if (!this.topicName.equals(other.topicName)) {
return false;
}
return true;
}
public final static class KafkaLeader {
private final int id;
private final HostAndPort hostAndPort;
public int getId() {
return this.id;
}
public HostAndPort getHostAndPort() {
return this.hostAndPort;
}
public KafkaLeader(int id, HostAndPort hostAndPort) {
this.id = id;
this.hostAndPort = hostAndPort;
}
}
}
| 3,290 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/ConfluentKafkaSchemaRegistry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.io.IOException;
import java.util.Properties;
import org.apache.avro.Schema;
import com.google.common.annotations.VisibleForTesting;
import io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient;
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.metrics.kafka.SchemaRegistryException;
/**
* Extension of {@link KafkaSchemaRegistry} that wraps Confluent's {@link SchemaRegistryClient}.
*
* <p>
* While Confluent's Schema Registry Client API provides more functionality that Gobblin's {@link KafkaSchemaRegistry},
* most of the methods are not necessary for Gobblin's Kafka Adaptor. Thus only a subset of the
* {@link SchemaRegistryClient} methods are used.
* </p>
*
* <p>
* Like the {@link KafkaSchemaRegistry} this class allows fetching a {@link Schema} by a unique {@link Integer} id
* that uniquely identifies the {@link Schema}. It is also capable of fetching the latest {@link Schema} for a topic.
* </p>
*/
@Slf4j
public class ConfluentKafkaSchemaRegistry extends KafkaSchemaRegistry<Integer, Schema> {
public static final String CONFLUENT_MAX_SCHEMAS_PER_SUBJECT =
"kafka.schema_registry.confluent.max_schemas_per_subject";
public static final String CONFLUENT_SCHEMA_NAME_SUFFIX = "kafka.schema_registry.confluent.schema_name_suffix";
// Default suffix of the topic name to register / retrieve from the registry
private static final String DEFAULT_CONFLUENT_SCHEMA_NAME_SUFFIX = "-value";
@Getter
private final SchemaRegistryClient schemaRegistryClient;
private final String schemaNameSuffix;
public ConfluentKafkaSchemaRegistry(Properties props) {
this(props, new CachedSchemaRegistryClient(props.getProperty(KAFKA_SCHEMA_REGISTRY_URL),
Integer.parseInt(props.getProperty(CONFLUENT_MAX_SCHEMAS_PER_SUBJECT, String.valueOf(Integer.MAX_VALUE)))));
}
@VisibleForTesting
ConfluentKafkaSchemaRegistry(Properties props, SchemaRegistryClient schemaRegistryClient) {
super(props);
this.schemaRegistryClient = schemaRegistryClient;
this.schemaNameSuffix = props.getProperty(CONFLUENT_SCHEMA_NAME_SUFFIX, DEFAULT_CONFLUENT_SCHEMA_NAME_SUFFIX);
}
@Override
protected Schema fetchSchemaByKey(Integer key) throws SchemaRegistryException {
try {
return this.schemaRegistryClient.getByID(key);
} catch (IOException | RestClientException e) {
throw new SchemaRegistryException(e);
}
}
@Override
public Schema getLatestSchemaByTopic(String topic) throws SchemaRegistryException {
String schemaName = topic + this.schemaNameSuffix;
try {
return new Schema.Parser().parse(this.schemaRegistryClient.getLatestSchemaMetadata(schemaName).getSchema());
} catch (IOException | RestClientException e) {
log.error("Failed to get schema for topic " + topic + "; subject " + schemaName);
throw new SchemaRegistryException(e);
}
}
@Override
public Integer register(Schema schema) throws SchemaRegistryException {
return register(schema, schema.getName());
}
@Override
public Integer register(Schema schema, String name) throws SchemaRegistryException {
try {
String schemaName = name + this.schemaNameSuffix;
return this.schemaRegistryClient.register(schemaName, schema);
} catch (IOException | RestClientException e) {
throw new SchemaRegistryException(e);
}
}
}
| 3,291 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaIngestionHealthCheck.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import com.google.common.collect.EvictingQueue;
import com.google.common.eventbus.EventBus;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.commit.CommitStep;
import org.apache.gobblin.source.extractor.extract.kafka.workunit.packer.KafkaTopicGroupingWorkUnitPacker;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.event.ContainerHealthCheckFailureEvent;
import org.apache.gobblin.util.eventbus.EventBusFactory;
@Slf4j
@Alias(value = "KafkaIngestionHealthCheck")
public class KafkaIngestionHealthCheck implements CommitStep {
public static final String KAFKA_INGESTION_HEALTH_CHECK_PREFIX = "gobblin.kafka.healthCheck.";
public static final String KAFKA_INGESTION_HEALTH_CHECK_SLIDING_WINDOW_SIZE_KEY = KAFKA_INGESTION_HEALTH_CHECK_PREFIX + "slidingWindow.size";
public static final String KAFKA_INGESTION_HEALTH_CHECK_LATENCY_THRESHOLD_MINUTES_KEY = KAFKA_INGESTION_HEALTH_CHECK_PREFIX + "ingestionLatency.minutes";
public static final String KAFKA_INGESTION_HEALTH_CHECK_CONSUMPTION_RATE_DROPOFF_FRACTION_KEY = KAFKA_INGESTION_HEALTH_CHECK_PREFIX + "consumptionRate.dropOffFraction";
public static final String KAFKA_INGESTION_HEALTH_CHECK_INCREASING_LATENCY_CHECK_ENABLED_KEY = KAFKA_INGESTION_HEALTH_CHECK_PREFIX + "increasingLatencyCheckEnabled";
public static final int DEFAULT_KAFKA_INGESTION_HEALTH_CHECK_SLIDING_WINDOW_SIZE = 3;
public static final long DEFAULT_KAFKA_INGESTION_HEALTH_CHECK_LATENCY_THRESHOLD_MINUTES= 15;
public static final double DEFAULT_KAFKA_INGESTION_HEALTH_CHECK_CONSUMPTION_RATE_DROPOFF_FRACTION = 0.7;
private static final boolean DEFAULT_KAFKA_INGESTION_HEALTH_CHECK_INCREASING_LATENCY_CHECK_ENABLED = true;
private final Config config;
private final EventBus eventBus;
private final KafkaExtractorStatsTracker statsTracker;
private final double expectedConsumptionRate;
private final double consumptionRateDropOffFraction;
private final long ingestionLatencyThresholdMinutes;
private final int slidingWindowSize;
private final EvictingQueue<Long> ingestionLatencies;
private final EvictingQueue<Double> consumptionRateMBps;
private final boolean increasingLatencyCheckEnabled;
public KafkaIngestionHealthCheck(Config config, KafkaExtractorStatsTracker statsTracker) {
this.config = config;
this.slidingWindowSize = ConfigUtils.getInt(config, KAFKA_INGESTION_HEALTH_CHECK_SLIDING_WINDOW_SIZE_KEY, DEFAULT_KAFKA_INGESTION_HEALTH_CHECK_SLIDING_WINDOW_SIZE);
this.ingestionLatencyThresholdMinutes = ConfigUtils.getLong(config, KAFKA_INGESTION_HEALTH_CHECK_LATENCY_THRESHOLD_MINUTES_KEY, DEFAULT_KAFKA_INGESTION_HEALTH_CHECK_LATENCY_THRESHOLD_MINUTES);
this.consumptionRateDropOffFraction = ConfigUtils.getDouble(config, KAFKA_INGESTION_HEALTH_CHECK_CONSUMPTION_RATE_DROPOFF_FRACTION_KEY, DEFAULT_KAFKA_INGESTION_HEALTH_CHECK_CONSUMPTION_RATE_DROPOFF_FRACTION);
this.expectedConsumptionRate = ConfigUtils.getDouble(config, KafkaTopicGroupingWorkUnitPacker.CONTAINER_CAPACITY_KEY, KafkaTopicGroupingWorkUnitPacker.DEFAULT_CONTAINER_CAPACITY);
this.increasingLatencyCheckEnabled = ConfigUtils.getBoolean(config, KAFKA_INGESTION_HEALTH_CHECK_INCREASING_LATENCY_CHECK_ENABLED_KEY, DEFAULT_KAFKA_INGESTION_HEALTH_CHECK_INCREASING_LATENCY_CHECK_ENABLED);
this.ingestionLatencies = EvictingQueue.create(this.slidingWindowSize);
this.consumptionRateMBps = EvictingQueue.create(this.slidingWindowSize);
EventBus eventBus;
try {
eventBus = EventBusFactory.get(ContainerHealthCheckFailureEvent.CONTAINER_HEALTH_CHECK_EVENT_BUS_NAME,
SharedResourcesBrokerFactory.getImplicitBroker());
} catch (IOException e) {
log.error("Could not find EventBus instance for container health check", e);
eventBus = null;
}
this.eventBus = eventBus;
this.statsTracker = statsTracker;
}
/**
*
* @return true if (i) ingestionLatency in the each of the recent epochs exceeds the threshold latency , AND (ii)
* if {@link KafkaIngestionHealthCheck#increasingLatencyCheckEnabled} is true, the latency
* is increasing over these epochs.
*/
private boolean checkIngestionLatency() {
Long previousLatency = -1L;
for (Long ingestionLatency: ingestionLatencies) {
if (ingestionLatency < this.ingestionLatencyThresholdMinutes) {
return false;
} else {
if (this.increasingLatencyCheckEnabled) {
if (previousLatency > ingestionLatency) {
return false;
}
previousLatency = ingestionLatency;
}
}
}
return true;
}
/**
* Determine whether the commit step has been completed.
*/
@Override
public boolean isCompleted()
throws IOException {
return false;
}
/**
* @return Return a serialized string representation of health check report.
*/
private String getHealthCheckReport() {
return String.format("Ingestion Latencies = %s, Ingestion Latency Threshold = %s minutes, "
+ "Consumption Rates = %s, Target Consumption Rate = %s MBps", this.ingestionLatencies.toString(),
this.ingestionLatencyThresholdMinutes, this.consumptionRateMBps.toString(), this.expectedConsumptionRate);
}
/**
* Execute the commit step. The execute method gets the maximum ingestion latency and the consumption rate and emits
* a {@link ContainerHealthCheckFailureEvent} if the following conditions are satisfied:
* <li>
* <ul>The ingestion latency increases monotonically over the {@link KafkaIngestionHealthCheck#slidingWindowSize} intervals, AND </ul>
* <ul>The maximum consumption rate over the {@link KafkaIngestionHealthCheck#slidingWindowSize} intervals is smaller than
* {@link KafkaIngestionHealthCheck#consumptionRateDropOffFraction} * {@link KafkaIngestionHealthCheck#expectedConsumptionRate}</ul>.
* </li>
*
* The {@link ContainerHealthCheckFailureEvent} is posted to a global event bus. The handlers of this event type
* can perform suitable actions based on the execution environment.
*/
@Override
public void execute() {
this.ingestionLatencies.add(this.statsTracker.getMaxIngestionLatency(TimeUnit.MINUTES));
this.consumptionRateMBps.add(this.statsTracker.getConsumptionRateMBps());
double avgConsumptionRate = getMaxConsumptionRate();
log.info("Avg. Consumption Rate = {} MBps, Target Consumption rate = {} MBps", avgConsumptionRate, this.expectedConsumptionRate);
if (ingestionLatencies.size() < this.slidingWindowSize) {
log.info("SUCCESS: Num observations: {} smaller than {}", ingestionLatencies.size(), this.slidingWindowSize);
return;
}
if (!checkIngestionLatency()) {
log.info("SUCCESS: Ingestion Latencies = {}, Ingestion Latency Threshold: {}", this.ingestionLatencies.toString(), this.ingestionLatencyThresholdMinutes);
return;
}
if (avgConsumptionRate > this.consumptionRateDropOffFraction * this.expectedConsumptionRate) {
log.info("SUCCESS: Avg. Consumption Rate = {} MBps, Target Consumption rate = {} MBps", avgConsumptionRate, this.expectedConsumptionRate);
return;
}
log.error("FAILED: {}", getHealthCheckReport());
if (this.eventBus != null) {
log.info("Posting {} message to EventBus", ContainerHealthCheckFailureEvent.class.getSimpleName());
ContainerHealthCheckFailureEvent event = new ContainerHealthCheckFailureEvent(this.config, getClass().getName());
event.addMetadata("ingestionLatencies", this.ingestionLatencies.toString());
event.addMetadata("consumptionRates", this.consumptionRateMBps.toString());
event.addMetadata("ingestionLatencyThreshold", Long.toString(this.ingestionLatencyThresholdMinutes));
event.addMetadata("targetConsumptionRate", Double.toString(this.expectedConsumptionRate));
this.eventBus.post(event);
}
}
private double getMaxConsumptionRate() {
return consumptionRateMBps.stream().mapToDouble(consumptionRate -> consumptionRate)
.filter(consumptionRate -> consumptionRate >= 0.0).max().orElse(0.0);
}
}
| 3,292 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/PreviousOffsetNotFoundException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
@SuppressWarnings("serial")
public class PreviousOffsetNotFoundException extends Exception {
public PreviousOffsetNotFoundException(String message) {
super(message);
}
}
| 3,293 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaStreamingExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.io.IOException;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.avro.Schema;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Metric;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.AtomicDouble;
import com.google.gson.JsonElement;
import lombok.Getter;
import lombok.Setter;
import lombok.ToString;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.commit.CommitStep;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.kafka.client.DecodeableKafkaRecord;
import org.apache.gobblin.kafka.client.GobblinKafkaConsumerClient;
import org.apache.gobblin.kafka.client.KafkaConsumerRecord;
import org.apache.gobblin.metrics.ContextAwareGauge;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.metrics.kafka.SchemaRegistryException;
import org.apache.gobblin.source.extractor.CheckpointableWatermark;
import org.apache.gobblin.source.extractor.ComparableWatermark;
import org.apache.gobblin.source.extractor.Watermark;
import org.apache.gobblin.source.extractor.WatermarkSerializerHelper;
import org.apache.gobblin.source.extractor.extract.FlushingExtractor;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.stream.FlushRecordEnvelope;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ClustersNames;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import static org.apache.gobblin.source.extractor.extract.kafka.KafkaProduceRateTracker.KAFKA_PARTITION_PRODUCE_RATE_KEY;
import static org.apache.gobblin.source.extractor.extract.kafka.KafkaSource.DEFAULT_GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS;
import static org.apache.gobblin.source.extractor.extract.kafka.KafkaSource.GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS;
import static org.apache.gobblin.source.extractor.extract.kafka.workunit.packer.KafkaTopicGroupingWorkUnitPacker.NUM_PARTITIONS_ASSIGNED;
/**
* An implementation of {@link org.apache.gobblin.source.extractor.Extractor} which reads from Kafka and returns records .
* Type of record depends on deserializer set.
*/
@Slf4j
public class KafkaStreamingExtractor<S> extends FlushingExtractor<S, DecodeableKafkaRecord> {
public static final String DATASET_KEY = "dataset";
public static final String DATASET_PARTITION_KEY = "datasetPartition";
private static final Long MAX_LOG_ERRORS = 100L;
private static final String KAFKA_EXTRACTOR_STATS_REPORTING_INTERVAL_MINUTES_KEY =
"gobblin.kafka.extractor.statsReportingIntervalMinutes";
private static final Long DEFAULT_KAFKA_EXTRACTOR_STATS_REPORTING_INTERVAL_MINUTES = 1L;
private final ClassAliasResolver<GobblinKafkaConsumerClient.GobblinKafkaConsumerClientFactory>
kafkaConsumerClientResolver;
private final ScheduledExecutorService scheduledExecutorService = Executors.newScheduledThreadPool(1);
private final Map<String, AtomicDouble> consumerMetricsGauges = new ConcurrentHashMap<>();
private final KafkaExtractorStatsTracker statsTracker;
private final KafkaProduceRateTracker produceRateTracker;
private final List<KafkaPartition> partitions;
private final long extractorStatsReportingTimeIntervalMillis;
//Mapping from Kafka Partition Id to partition index
@Getter
private final Map<Integer, Integer> partitionIdToIndexMap;
private final String recordCreationTimestampFieldName;
private final TimeUnit recordCreationTimestampUnit;
private Iterator<KafkaConsumerRecord> messageIterator = null;
private long readStartTime;
private long lastExtractorStatsReportingTime;
private Map<KafkaPartition, Long> latestOffsetMap = Maps.newHashMap();
protected MultiLongWatermark lowWatermark;
protected MultiLongWatermark highWatermark;
protected MultiLongWatermark nextWatermark;
protected Map<Integer, DecodeableKafkaRecord> perPartitionLastSuccessfulRecord;
private final AtomicBoolean shutdownRequested = new AtomicBoolean(false);
@Override
public void shutdown() {
this.scheduledExecutorService.shutdownNow();
try {
boolean shutdown = this.scheduledExecutorService.awaitTermination(5, TimeUnit.SECONDS);
if (!shutdown) {
log.error("Could not shutdown metrics collection threads in 5 seconds.");
}
} catch (InterruptedException e) {
log.error("Interrupted when attempting to shutdown metrics collection threads.");
}
this.shutdownRequested.set(true);
super.shutdown();
}
@ToString
public static class KafkaWatermark implements CheckpointableWatermark {
@Getter
KafkaPartition topicPartition;
LongWatermark _lwm;
//Average TopicPartition Produce Rate by hour-of-day and day-of-week in records/sec.
@Getter
@Setter
double[][] avgProduceRates;
//Average consume rate for the topic when backlogged.
@Getter
@Setter
double avgConsumeRate = -1.0;
@Getter
@Setter
long avgRecordSize;
@VisibleForTesting
public KafkaWatermark(KafkaPartition topicPartition, LongWatermark lwm) {
this.topicPartition = topicPartition;
_lwm = lwm;
}
@Override
public String getSource() {
return topicPartition.toString();
}
@Override
public ComparableWatermark getWatermark() {
return _lwm;
}
@Override
public short calculatePercentCompletion(Watermark lowWatermark, Watermark highWatermark) {
return 0;
}
@Override
public JsonElement toJson() {
return WatermarkSerializerHelper.convertWatermarkToJson(this);
}
@Override
public int compareTo(CheckpointableWatermark o) {
Preconditions.checkArgument(o instanceof KafkaWatermark);
KafkaWatermark ko = (KafkaWatermark) o;
Preconditions.checkArgument(topicPartition.equals(ko.topicPartition));
return _lwm.compareTo(ko._lwm);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (!(obj instanceof KafkaWatermark)) {
return false;
}
return this.compareTo((CheckpointableWatermark) obj) == 0;
}
@Override
public int hashCode() {
final int prime = 31;
return topicPartition.hashCode() * prime + _lwm.hashCode();
}
public LongWatermark getLwm() {
return _lwm;
}
}
AtomicLong _rowCount = new AtomicLong(0);
protected final Optional<KafkaSchemaRegistry<String, S>> _schemaRegistry;
protected final GobblinKafkaConsumerClient kafkaConsumerClient;
private final List<KafkaPartition> topicPartitions; // list of topic partitions assigned to this extractor
public KafkaStreamingExtractor(WorkUnitState state) {
super(state);
this.kafkaConsumerClientResolver =
new ClassAliasResolver<>(GobblinKafkaConsumerClient.GobblinKafkaConsumerClientFactory.class);
try {
this.kafkaConsumerClient = this.closer.register(
this.kafkaConsumerClientResolver.resolveClass(state.getProp(GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS,
DEFAULT_GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS))
.newInstance()
.create(ConfigUtils.propertiesToConfig(state.getProperties())));
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
throw new RuntimeException(e);
}
this._schemaRegistry = state.contains(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_CLASS) ? Optional.of(
KafkaSchemaRegistry.<String, S>get(state.getProperties())) : Optional.<KafkaSchemaRegistry<String, S>>absent();
this.topicPartitions = getTopicPartitionsFromWorkUnit(state);
this.kafkaConsumerClient.assignAndSeek(topicPartitions, getTopicPartitionWatermarks(this.topicPartitions));
this.messageIterator = this.kafkaConsumerClient.consume();
this.partitions = KafkaUtils.getPartitions(state);
this.partitionIdToIndexMap = Maps.newHashMapWithExpectedSize(this.partitions.size());
try {
this.latestOffsetMap = this.kafkaConsumerClient.getLatestOffsets(this.partitions);
} catch (KafkaOffsetRetrievalFailureException e) {
e.printStackTrace();
}
this.statsTracker = new KafkaExtractorStatsTracker(state, partitions);
this.produceRateTracker = new KafkaProduceRateTracker(state, partitions, getWatermarkTracker(), statsTracker);
this.extractorStatsReportingTimeIntervalMillis =
state.getPropAsLong(KAFKA_EXTRACTOR_STATS_REPORTING_INTERVAL_MINUTES_KEY,
DEFAULT_KAFKA_EXTRACTOR_STATS_REPORTING_INTERVAL_MINUTES) * 60 * 1000;
resetExtractorStatsAndWatermarks(true);
//Even though we haven't start ingesting yet, emit event to indicate the container transition.
submitEventToIndicateContainerTransition();
//Schedule a thread for reporting Kafka consumer metrics
this.scheduledExecutorService.scheduleAtFixedRate(() -> {
Map<String, Metric> codahaleMetricMap = kafkaConsumerClient.getMetrics();
for (Map.Entry<String, Metric> metricEntry : codahaleMetricMap.entrySet()) {
if (log.isDebugEnabled()) {
log.debug("Metric name: {}, Value: {}", metricEntry.getKey(),
((Gauge<Double>) metricEntry.getValue()).getValue());
}
consumerMetricsGauges.computeIfAbsent(metricEntry.getKey(), k -> {
AtomicDouble d = new AtomicDouble();
ContextAwareGauge<Double> consumerMetricGauge =
getMetricContext().newContextAwareGauge(metricEntry.getKey(), () -> d.get());
getMetricContext().register(metricEntry.getKey(), consumerMetricGauge);
return d;
}).set(((Gauge<Double>) metricEntry.getValue()).getValue());
}
}, 0, 60, TimeUnit.SECONDS);
this.recordCreationTimestampFieldName =
this.workUnitState.getProp(KafkaSource.RECORD_CREATION_TIMESTAMP_FIELD, null);
this.recordCreationTimestampUnit = TimeUnit.valueOf(
this.workUnitState.getProp(KafkaSource.RECORD_CREATION_TIMESTAMP_UNIT, TimeUnit.MILLISECONDS.name()));
}
private void submitEventToIndicateContainerTransition() {
if (this.isInstrumentationEnabled()) {
this.statsTracker.submitEventToIndicateContainerTransition(getMetricContext());
}
}
private Map<KafkaPartition, LongWatermark> getTopicPartitionWatermarks(List<KafkaPartition> topicPartitions) {
List<String> topicPartitionStrings =
topicPartitions.stream().map(topicPartition -> topicPartition.toString()).collect(Collectors.toList());
// read watermarks from storage
Map<String, CheckpointableWatermark> kafkaWatermarkMap =
super.getCommittedWatermarks(KafkaWatermark.class, topicPartitionStrings);
Map<KafkaPartition, LongWatermark> longWatermarkMap = new HashMap<>();
for (KafkaPartition topicPartition : topicPartitions) {
String topicPartitionString = topicPartition.toString();
if (kafkaWatermarkMap.containsKey(topicPartitionString)) {
LongWatermark longWatermark = ((KafkaWatermark) kafkaWatermarkMap.get(topicPartitionString)).getLwm();
longWatermarkMap.put(topicPartition, longWatermark);
} else {
longWatermarkMap.put(topicPartition, new LongWatermark(0L));
}
}
for (Map.Entry<KafkaPartition, LongWatermark> entry : longWatermarkMap.entrySet()) {
log.info("Retrieved watermark {} for partition {}", entry.getValue().toString(), entry.getKey().toString());
}
return longWatermarkMap;
}
public static List<KafkaPartition> getTopicPartitionsFromWorkUnit(WorkUnitState state) {
// what topic partitions are we responsible for?
List<KafkaPartition> topicPartitions = new ArrayList<>();
WorkUnit workUnit = state.getWorkunit();
String topicNameProp = KafkaSource.TOPIC_NAME;
int numOfPartitions =
workUnit.contains(NUM_PARTITIONS_ASSIGNED) ? Integer.parseInt(workUnit.getProp(NUM_PARTITIONS_ASSIGNED)) : 0;
for (int i = 0; i < numOfPartitions; ++i) {
if (workUnit.getProp(topicNameProp, null) == null) {
log.warn("There's no topic.name property being set in workunit which could be an illegal state");
break;
}
String topicName = workUnit.getProp(topicNameProp);
String partitionIdProp = KafkaSource.PARTITION_ID + "." + i;
int partitionId = workUnit.getPropAsInt(partitionIdProp);
KafkaPartition topicPartition = new KafkaPartition.Builder().withTopicName(topicName).withId(partitionId).build();
topicPartitions.add(topicPartition);
}
return topicPartitions;
}
/**
* Get the schema (metadata) of the extracted data records.
*
* @return the schema of Kafka topic being extracted
*/
@Override
public S getSchema() {
try {
if(this._schemaRegistry.isPresent()) {
return (S)(Schema) this._schemaRegistry.get().getLatestSchemaByTopic(this.topicPartitions.get(0).getTopicName());
}
return (S) this.topicPartitions.iterator().next().getTopicName();
} catch (SchemaRegistryException e) {
e.printStackTrace();
}
return null;
}
@Override
public List<Tag<?>> generateTags(State state) {
List<Tag<?>> tags = super.generateTags(state);
String clusterIdentifier = ClustersNames.getInstance().getClusterName();
tags.add(new Tag<>("clusterIdentifier", clusterIdentifier));
return tags;
}
/**
* Return the next record. Return null if we're shutdown.
*/
@SuppressWarnings("unchecked")
@Override
public RecordEnvelope<DecodeableKafkaRecord> readRecordEnvelopeImpl() throws IOException {
if (this.shutdownRequested.get()) {
return null;
}
this.readStartTime = System.nanoTime();
long fetchStartTime = System.nanoTime();
try {
DecodeableKafkaRecord kafkaConsumerRecord;
while(true) {
while (this.messageIterator == null || !this.messageIterator.hasNext()) {
Long currentTime = System.currentTimeMillis();
//it's time to flush, so break the while loop and directly return null
if ((currentTime - timeOfLastFlush) > this.flushIntervalMillis) {
return new FlushRecordEnvelope();
}
try {
fetchStartTime = System.nanoTime();
this.messageIterator = this.kafkaConsumerClient.consume();
} catch (Exception e) {
log.error("Failed to consume from Kafka", e);
}
}
kafkaConsumerRecord = (DecodeableKafkaRecord) this.messageIterator.next();
if (kafkaConsumerRecord.getValue() != null) {
break;
} else {
//Filter the null-valued records early, so that they do not break the pipeline downstream.
if (shouldLogError()) {
log.error("Encountered a null-valued record at offset: {}, partition: {}", kafkaConsumerRecord.getOffset(),
kafkaConsumerRecord.getPartition());
}
this.statsTracker.onNullRecord(this.partitionIdToIndexMap.get(kafkaConsumerRecord.getPartition()));
}
}
int partitionIndex = this.partitionIdToIndexMap.get(kafkaConsumerRecord.getPartition());
this.statsTracker.onFetchNextMessageBuffer(partitionIndex, fetchStartTime);
// track time for converting KafkaConsumerRecord to a RecordEnvelope
long decodeStartTime = System.nanoTime();
KafkaPartition topicPartition =
new KafkaPartition.Builder().withTopicName(kafkaConsumerRecord.getTopic()).withId(kafkaConsumerRecord.getPartition()).build();
RecordEnvelope<DecodeableKafkaRecord> recordEnvelope = new RecordEnvelope(kafkaConsumerRecord,
new KafkaWatermark(topicPartition, new LongWatermark(kafkaConsumerRecord.getOffset())));
recordEnvelope.setRecordMetadata("topicPartition", topicPartition);
recordEnvelope.setRecordMetadata(DATASET_KEY, topicPartition.getTopicName());
recordEnvelope.setRecordMetadata(DATASET_PARTITION_KEY, "" + topicPartition.getId());
this.statsTracker.onDecodeableRecord(partitionIndex, readStartTime, decodeStartTime,
kafkaConsumerRecord.getValueSizeInBytes(),
kafkaConsumerRecord.isTimestampLogAppend() ? kafkaConsumerRecord.getTimestamp() : 0L,
(this.recordCreationTimestampFieldName != null) ? kafkaConsumerRecord.getRecordCreationTimestamp(
this.recordCreationTimestampFieldName, this.recordCreationTimestampUnit) : 0L);
this.perPartitionLastSuccessfulRecord.put(partitionIndex, kafkaConsumerRecord);
this.nextWatermark.set(partitionIndex, kafkaConsumerRecord.getNextOffset());
return recordEnvelope;
} catch (Throwable t) {
this.statsTracker.onUndecodeableRecord(0);
if (shouldLogError()) {
log.error("Error when decoding a Kafka consumer record");
}
throw new IOException("Error in extraction", t);
}
}
private boolean shouldLogError() {
return (this.statsTracker.getUndecodableMessageCount() + this.statsTracker.getNullRecordCount()) <= MAX_LOG_ERRORS;
}
@Override
protected void onFlushAck() throws IOException {
try {
//Refresh the latest offsets of TopicPartitions processed by the KafkaExtractor.
this.latestOffsetMap = this.kafkaConsumerClient.getLatestOffsets(this.partitions);
} catch (KafkaOffsetRetrievalFailureException e) {
log.error("Unable to retrieve latest offsets due to {}", e);
}
long currentTime = System.currentTimeMillis();
//Update the watermarks to include the current topic partition produce rates
this.produceRateTracker.writeProduceRateToKafkaWatermarks(this.latestOffsetMap, getLastCommittedWatermarks(),
this.highWatermark, currentTime);
// Assemble additional tags to be part of GTE, for now only Partition-ProduceRate.
Map<KafkaPartition, Map<String, String>> additionalTags = getAdditionalTagsHelper();
//Commit offsets to the watermark storage.
super.onFlushAck();
//Emit GobblinTrackingEvent with current extractor stats and reset them before the next epoch starts.
if (this.isInstrumentationEnabled()) {
if (currentTime - this.lastExtractorStatsReportingTime > this.extractorStatsReportingTimeIntervalMillis) {
for (int partitionIndex = 0; partitionIndex < this.partitions.size(); partitionIndex++) {
this.statsTracker.updateStatisticsForCurrentPartition(partitionIndex, readStartTime,
getLastSuccessfulRecordHeaderTimestamp(partitionIndex));
}
Map<KafkaPartition, Map<String, String>> tagsForPartitions =
this.statsTracker.generateTagsForPartitions(lowWatermark, highWatermark, nextWatermark, additionalTags);
this.statsTracker.emitTrackingEvents(getMetricContext(), tagsForPartitions);
this.resetExtractorStatsAndWatermarks(false);
this.lastExtractorStatsReportingTime = currentTime;
}
}
}
@Override
public CommitStep initCommitStep(String commitStepAlias, boolean isPrecommit) throws IOException {
try {
log.info("Instantiating {}", commitStepAlias);
return (CommitStep) GobblinConstructorUtils.invokeLongestConstructor(
new ClassAliasResolver(CommitStep.class).resolveClass(commitStepAlias), config, statsTracker);
} catch (ReflectiveOperationException e) {
throw new IOException(e);
}
}
/**
* A helper function to transform a Map<KafkaPartition, Double> to Map<KafkaPartition, Map<String, String>>.
* If hard to read: Using Collectors.toMap method to construct inline-initialized Map.
*/
Map<KafkaPartition, Map<String, String>> getAdditionalTagsHelper() {
return produceRateTracker.getPartitionsToProdRate()
.entrySet()
.stream()
.collect(Collectors.toMap(Map.Entry::getKey,
value -> Stream.of(new AbstractMap.SimpleEntry<>(KAFKA_PARTITION_PRODUCE_RATE_KEY, value.toString()))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))));
}
@VisibleForTesting
public void resetExtractorStatsAndWatermarks(boolean isInit) {
if (isInit) {
//Initialize nextwatermark, highwatermark and lowwatermarks for Extractor stats reporting.
this.perPartitionLastSuccessfulRecord = Maps.newHashMapWithExpectedSize(this.partitions.size());
this.lastExtractorStatsReportingTime = System.currentTimeMillis();
this.lowWatermark =
new MultiLongWatermark(this.partitions.stream().map(partition -> 0L).collect(Collectors.toList()));
this.highWatermark =
new MultiLongWatermark(this.partitions.stream().map(partition -> 0L).collect(Collectors.toList()));
}
this.workUnitState.removeProp(KafkaSource.PREVIOUS_START_FETCH_EPOCH_TIME);
this.workUnitState.removeProp(KafkaSource.PREVIOUS_STOP_FETCH_EPOCH_TIME);
this.workUnitState.removeProp(KafkaSource.PREVIOUS_LOW_WATERMARK);
this.workUnitState.removeProp(KafkaSource.PREVIOUS_HIGH_WATERMARK);
this.workUnitState.removeProp(KafkaSource.PREVIOUS_LATEST_OFFSET);
int partitionIndex = 0;
for (KafkaPartition partition : partitions) {
if (isInit) {
this.partitionIdToIndexMap.put(partition.getId(), partitionIndex);
}
this.workUnitState.setProp(KafkaUtils.getPartitionPropName(KafkaSource.PREVIOUS_HIGH_WATERMARK, partitionIndex),
this.highWatermark.get(partitionIndex));
this.workUnitState.setProp(KafkaUtils.getPartitionPropName(KafkaSource.PREVIOUS_LOW_WATERMARK, partitionIndex),
this.lowWatermark.get(partitionIndex));
this.workUnitState.setProp(
KafkaUtils.getPartitionPropName(KafkaSource.PREVIOUS_START_FETCH_EPOCH_TIME, partitionIndex),
this.statsTracker.getStatsMap().get(partitions.get(partitionIndex)).getStartFetchEpochTime());
this.workUnitState.setProp(
KafkaUtils.getPartitionPropName(KafkaSource.PREVIOUS_STOP_FETCH_EPOCH_TIME, partitionIndex),
this.statsTracker.getStatsMap().get(partitions.get(partitionIndex)).getStopFetchEpochTime());
this.workUnitState.setProp(KafkaUtils.getPartitionPropName(KafkaSource.PREVIOUS_LATEST_OFFSET, partitionIndex),
this.highWatermark.get(partitionIndex));
KafkaWatermark kafkaWatermark = (KafkaWatermark) this.lastCommittedWatermarks.get(partition.toString());
long lowWatermarkValue = 0L;
if (kafkaWatermark != null) {
lowWatermarkValue = kafkaWatermark.getLwm().getValue() + 1;
}
this.lowWatermark.set(partitionIndex, lowWatermarkValue);
if (latestOffsetMap.containsKey(partition)) {
this.highWatermark.set(partitionIndex, latestOffsetMap.get(partition));
}
partitionIndex++;
}
this.nextWatermark = new MultiLongWatermark(this.lowWatermark);
// Add error partition count and error message count to workUnitState
this.workUnitState.setProp(ConfigurationKeys.ERROR_PARTITION_COUNT, this.statsTracker.getErrorPartitionCount());
this.workUnitState.setProp(ConfigurationKeys.ERROR_MESSAGE_UNDECODABLE_COUNT,
this.statsTracker.getUndecodableMessageCount());
this.workUnitState.setActualHighWatermark(this.nextWatermark);
//Reset stats tracker
this.statsTracker.reset();
}
protected long getLastSuccessfulRecordHeaderTimestamp(int partitionId) {
return 0;
}
/**
* Call back that asks the extractor to remove work from its plate
* @param workUnitState
*/
public boolean onWorkUnitRemove(WorkUnitState workUnitState) {
// TODO: check if these topic partitions actually were part of the assignment
// add to queue of flush control messages
// set up ack on them
return false;
}
public boolean onWorkUnitAdd(WorkUnitState workUnitState) {
List<KafkaPartition> newTopicPartitions = getTopicPartitionsFromWorkUnit(workUnitState);
// get watermarks for these topic partitions
Map<KafkaPartition, LongWatermark> topicWatermarksMap = getTopicPartitionWatermarks(newTopicPartitions);
this.topicPartitions.addAll(newTopicPartitions);
this.kafkaConsumerClient.assignAndSeek(topicPartitions, topicWatermarksMap);
return true;
}
@Override
public long getExpectedRecordCount() {
return _rowCount.get();
}
@Override
public void close() throws IOException {
this.closer.close();
}
@Deprecated
@Override
public long getHighWatermark() {
return 0;
}
@Override
public String toString() {
return topicPartitions.toString();
}
} | 3,294 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import lombok.Getter;
import org.apache.gobblin.KafkaCommonUtil;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.kafka.client.ByteArrayBasedKafkaRecord;
import org.apache.gobblin.kafka.client.DecodeableKafkaRecord;
import org.apache.gobblin.kafka.client.GobblinKafkaConsumerClient;
import org.apache.gobblin.kafka.client.GobblinKafkaConsumerClient.GobblinKafkaConsumerClientFactory;
import org.apache.gobblin.kafka.client.KafkaConsumerRecord;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.runtime.JobShutdownException;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.extract.EventBasedExtractor;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ConfigUtils;
import static org.apache.gobblin.configuration.ConfigurationKeys.KAFKA_BROKERS_TO_SIMPLE_NAME_MAP_KEY;
/**
* An implementation of {@link Extractor} for Apache Kafka. Each {@link KafkaExtractor} processes
* one or more partitions of the same topic.
*
* @author Ziyang Liu
*/
public abstract class KafkaExtractor<S, D> extends EventBasedExtractor<S, D> {
private static final Logger LOG = LoggerFactory.getLogger(KafkaExtractor.class);
private final ClassAliasResolver<GobblinKafkaConsumerClientFactory> kafkaConsumerClientResolver;
private final AtomicBoolean shutdownRequested = new AtomicBoolean(false);
private final String recordCreationTimestampFieldName;
private final TimeUnit recordCreationTimestampUnit;
private Iterator<KafkaConsumerRecord> messageIterator = null;
@Getter
private int currentPartitionIdx = INITIAL_PARTITION_IDX;
@Getter
private long readStartTime;
protected static final int INITIAL_PARTITION_IDX = -1;
protected static final Long MAX_LOG_DECODING_ERRORS = 5L;
protected final WorkUnitState workUnitState;
protected final String topicName;
protected final List<KafkaPartition> partitions;
protected final MultiLongWatermark lowWatermark;
protected final MultiLongWatermark highWatermark;
protected final MultiLongWatermark nextWatermark;
protected final KafkaExtractorStatsTracker statsTracker;
protected final GobblinKafkaConsumerClient kafkaConsumerClient;
protected D currentPartitionLastSuccessfulRecord = null;
public KafkaExtractor(WorkUnitState state) {
super(state);
this.workUnitState = state;
this.topicName = KafkaUtils.getTopicName(state);
this.partitions = KafkaUtils.getPartitions(state);
this.lowWatermark = state.getWorkunit().getLowWatermark(MultiLongWatermark.class);
this.highWatermark = state.getWorkunit().getExpectedHighWatermark(MultiLongWatermark.class);
this.nextWatermark = new MultiLongWatermark(this.lowWatermark);
this.kafkaConsumerClientResolver = new ClassAliasResolver<>(GobblinKafkaConsumerClientFactory.class);
try {
this.kafkaConsumerClient =
this.closer.register(this.kafkaConsumerClientResolver
.resolveClass(
state.getProp(KafkaSource.GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS,
KafkaSource.DEFAULT_GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS)).newInstance()
.create(ConfigUtils.propertiesToConfig(state.getProperties())));
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
throw new RuntimeException(e);
}
this.statsTracker = new KafkaExtractorStatsTracker(state, partitions);
// The actual high watermark starts with the low watermark
this.workUnitState.setActualHighWatermark(this.lowWatermark);
this.recordCreationTimestampFieldName = this.workUnitState.getProp(KafkaSource.RECORD_CREATION_TIMESTAMP_FIELD, null);
this.recordCreationTimestampUnit = TimeUnit.valueOf(this.workUnitState.getProp(KafkaSource.RECORD_CREATION_TIMESTAMP_UNIT, TimeUnit.MILLISECONDS.name()));
}
@Override
public List<Tag<?>> generateTags(State state) {
List<Tag<?>> tags = super.generateTags(state);
tags.add(new Tag<>("kafkaTopic", KafkaUtils.getTopicName(state)));
return tags;
}
protected KafkaPartition getCurrentPartition() {
Preconditions.checkElementIndex(this.currentPartitionIdx, this.partitions.size(),
"KafkaExtractor has finished extracting all partitions. There's no current partition.");
return this.partitions.get(this.currentPartitionIdx);
}
/**
* Return the next decodable record from the current partition. If the current partition has no more
* decodable record, move on to the next partition. If all partitions have been processed, return null.
*/
@SuppressWarnings("unchecked")
@Override
public D readRecordImpl(D reuse) throws DataRecordException, IOException {
if (this.shutdownRequested.get()) {
return null;
}
this.readStartTime = System.nanoTime();
while (!allPartitionsFinished()) {
if (currentPartitionFinished()) {
moveToNextPartition();
continue;
}
if (this.messageIterator == null || !this.messageIterator.hasNext()) {
try {
long fetchStartTime = System.nanoTime();
this.messageIterator = fetchNextMessageBuffer();
this.statsTracker.onFetchNextMessageBuffer(this.currentPartitionIdx, fetchStartTime);
} catch (Exception e) {
LOG.error(String.format("Failed to fetch next message buffer for partition %s. Will skip this partition.",
getCurrentPartition()), e);
moveToNextPartition();
continue;
}
if (this.messageIterator == null || !this.messageIterator.hasNext()) {
moveToNextPartition();
continue;
}
}
while (!currentPartitionFinished()) {
if (!this.messageIterator.hasNext()) {
break;
}
KafkaConsumerRecord nextValidMessage = this.messageIterator.next();
// Even though we ask Kafka to give us a message buffer starting from offset x, it may
// return a buffer that starts from offset smaller than x, so we need to skip messages
// until we get to x.
if (nextValidMessage.getOffset() < this.nextWatermark.get(this.currentPartitionIdx)) {
continue;
}
this.nextWatermark.set(this.currentPartitionIdx, nextValidMessage.getNextOffset());
try {
// track time for decode/convert depending on the record type
long decodeStartTime = System.nanoTime();
D record = decodeKafkaMessage(nextValidMessage);
this.statsTracker.onDecodeableRecord(this.currentPartitionIdx, readStartTime, decodeStartTime,
nextValidMessage.getValueSizeInBytes(), nextValidMessage.isTimestampLogAppend() ? nextValidMessage.getTimestamp() : 0L,
(this.recordCreationTimestampFieldName != null) ? nextValidMessage
.getRecordCreationTimestamp(this.recordCreationTimestampFieldName, this.recordCreationTimestampUnit) : 0L);
this.currentPartitionLastSuccessfulRecord = record;
return record;
} catch (Throwable t) {
statsTracker.onUndecodeableRecord(this.currentPartitionIdx);
if (shouldLogError()) {
LOG.error(String.format("A record from partition %s cannot be decoded.", getCurrentPartition()), t);
}
}
}
}
LOG.info("Finished pulling topic " + this.topicName);
return null;
}
protected D decodeKafkaMessage(KafkaConsumerRecord message) throws DataRecordException, IOException {
D record = null;
if (message instanceof ByteArrayBasedKafkaRecord) {
record = decodeRecord((ByteArrayBasedKafkaRecord)message);
} else if (message instanceof DecodeableKafkaRecord){
// if value is null then this is a bad record that is returned for further error handling, so raise an error
if (((DecodeableKafkaRecord) message).getValue() == null) {
throw new DataRecordException("Could not decode Kafka record");
}
// get value from decodeable record and convert to the output schema if necessary
record = convertRecord(((DecodeableKafkaRecord<?, D>) message).getValue());
} else {
throw new IllegalStateException(
"Unsupported KafkaConsumerRecord type. The returned record can either be ByteArrayBasedKafkaRecord"
+ " or DecodeableKafkaRecord");
}
return record;
}
@Override
public void shutdown()
throws JobShutdownException {
this.shutdownRequested.set(true);
}
private boolean allPartitionsFinished() {
return this.currentPartitionIdx != INITIAL_PARTITION_IDX && this.currentPartitionIdx >= this.highWatermark.size();
}
private boolean currentPartitionFinished() {
if (this.currentPartitionIdx == INITIAL_PARTITION_IDX) {
return true;
} else if (this.nextWatermark.get(this.currentPartitionIdx) >= this.highWatermark.get(this.currentPartitionIdx)) {
LOG.info("Finished pulling partition " + getCurrentPartition());
return true;
} else {
return false;
}
}
/**
* Record the avg time per record for the current partition, then increment this.currentPartitionIdx,
* and switch metric context to the new partition.
*/
private void moveToNextPartition() {
if (this.currentPartitionIdx == INITIAL_PARTITION_IDX) {
LOG.info("Pulling topic " + this.topicName);
this.currentPartitionIdx = 0;
} else {
this.statsTracker.updateStatisticsForCurrentPartition(currentPartitionIdx, readStartTime, getLastSuccessfulRecordHeaderTimestamp());
this.currentPartitionIdx++;
this.currentPartitionLastSuccessfulRecord = null;
}
this.messageIterator = null;
if (this.currentPartitionIdx < this.partitions.size()) {
LOG.info(String.format("Pulling partition %s from offset %d to %d, range=%d", getCurrentPartition(),
this.nextWatermark.get(this.currentPartitionIdx), this.highWatermark.get(this.currentPartitionIdx),
this.highWatermark.get(this.currentPartitionIdx) - this.nextWatermark.get(this.currentPartitionIdx)));
switchMetricContextToCurrentPartition();
}
if (!allPartitionsFinished()) {
this.statsTracker.resetStartFetchEpochTime(currentPartitionIdx);
}
}
protected long getLastSuccessfulRecordHeaderTimestamp() {
return 0;
}
private void switchMetricContextToCurrentPartition() {
if (this.currentPartitionIdx >= this.partitions.size()) {
return;
}
int currentPartitionId = getCurrentPartition().getId();
switchMetricContext(Lists.<Tag<?>> newArrayList(new Tag<>("kafka_partition", currentPartitionId)));
}
private Iterator<KafkaConsumerRecord> fetchNextMessageBuffer() {
return this.kafkaConsumerClient.consume(this.partitions.get(this.currentPartitionIdx),
this.nextWatermark.get(this.currentPartitionIdx), this.highWatermark.get(this.currentPartitionIdx));
}
private boolean shouldLogError() {
return this.statsTracker.getDecodingErrorCount(this.currentPartitionIdx) <= MAX_LOG_DECODING_ERRORS;
}
protected abstract D decodeRecord(ByteArrayBasedKafkaRecord kafkaConsumerRecord) throws IOException;
/**
* Convert a record to the output format
* @param record the input record
* @return the converted record
* @throws IOException
*/
protected D convertRecord(D record) throws IOException {
// default implementation does no conversion
return record;
}
@Override
public long getExpectedRecordCount() {
return this.lowWatermark.getGap(this.highWatermark);
}
@Override
public void close() throws IOException {
if (!allPartitionsFinished() && currentPartitionIdx != INITIAL_PARTITION_IDX) {
this.statsTracker.updateStatisticsForCurrentPartition(currentPartitionIdx, readStartTime, getLastSuccessfulRecordHeaderTimestamp());
}
// Add error partition count and error message count to workUnitState
this.workUnitState.setProp(ConfigurationKeys.ERROR_PARTITION_COUNT, this.statsTracker.getErrorPartitionCount());
this.workUnitState.setProp(ConfigurationKeys.ERROR_MESSAGE_UNDECODABLE_COUNT, this.statsTracker.getUndecodableMessageCount());
this.workUnitState.setActualHighWatermark(this.nextWatermark);
// Need to call this even when not emitting metrics because some state, such as the average pull time,
// is updated when the tags are generated
Map<KafkaPartition, Map<String, String>> tagsForPartitionsMap = this.statsTracker.generateTagsForPartitions(
this.lowWatermark, this.highWatermark, this.nextWatermark, Maps.newHashMap());
if (isInstrumentationEnabled()) {
this.statsTracker.emitTrackingEvents(getMetricContext(), tagsForPartitionsMap);
}
}
@Deprecated
@Override
public long getHighWatermark() {
return 0;
}
public static String getKafkaBrokerSimpleName(State state) {
Preconditions.checkArgument(state.contains(ConfigurationKeys.KAFKA_BROKERS), String.format("%s is not defined in"
+ " the configuration.", ConfigurationKeys.KAFKA_BROKERS));
List<String> kafkaBrokerUriList = state.getPropAsList(ConfigurationKeys.KAFKA_BROKERS);
Preconditions.checkArgument(kafkaBrokerUriList.size() == 1,
String.format("The %s only supports having exactly one kafka broker defined for %s. "
+ "This is partially because the watermark implementation (e.g. %s class) does not have a schema that supports writing watermarks that contains offsets "
+ "from multiple brokers in a single job", KafkaExtractor.class.getSimpleName(),
ConfigurationKeys.KAFKA_BROKERS, KafkaStreamingExtractor.KafkaWatermark.class.getName()));
String brokerUri = kafkaBrokerUriList.get(0);
Map<String, String> brokerToSimpleName = KafkaCommonUtil.getKafkaBrokerToSimpleNameMap(state);
if (!brokerToSimpleName.containsKey(brokerUri)) {
LOG.warn("Unable to find simple name for the kafka cluster broker uri in the config. Please check the map value of {}. brokerUri={}, configMapValue=%{}",
KAFKA_BROKERS_TO_SIMPLE_NAME_MAP_KEY, brokerUri, brokerToSimpleName);
return "";
}
return brokerToSimpleName.get(brokerUri);
}
}
| 3,295 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaExtractorStatsTracker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import java.io.UnsupportedEncodingException;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.HdrHistogram.Histogram;
import org.HdrHistogram.HistogramLogWriter;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import lombok.AccessLevel;
import lombok.Data;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.GobblinEventBuilder;
import org.apache.gobblin.runtime.api.TaskEventMetadataGenerator;
import org.apache.gobblin.util.TaskEventMetadataUtils;
/**
* A class that tracks KafkaExtractor statistics such as record decode time, #processed records, #undecodeable records etc.
*
*/
@Slf4j
public class KafkaExtractorStatsTracker {
// Constants for event submission
public static final String TOPIC = "topic";
public static final String BROKER_NAME = "brokerName";
public static final String PARTITION = "partition";
private static final String EMPTY_STRING = "";
private static final String GOBBLIN_KAFKA_NAMESPACE = "gobblin.kafka";
private static final String KAFKA_EXTRACTOR_CONTAINER_TRANSITION_EVENT_NAME = "KafkaExtractorContainerTransitionEvent";
private static final String KAFKA_EXTRACTOR_TOPIC_METADATA_EVENT_NAME = "KafkaExtractorTopicMetadata";
private static final String LOW_WATERMARK = "lowWatermark";
private static final String ACTUAL_HIGH_WATERMARK = "actualHighWatermark";
private static final String EXPECTED_HIGH_WATERMARK = "expectedHighWatermark";
private static final String ELAPSED_TIME = "elapsedTime";
private static final String PROCESSED_RECORD_COUNT = "processedRecordCount";
private static final String SLA_MISSED_RECORD_COUNT = "slaMissedRecordCount";
private static final String MIN_LOG_APPEND_TIMESTAMP = "minLogAppendTimestamp";
private static final String MAX_LOG_APPEND_TIMESTAMP = "maxLogAppendTimestamp";
private static final String UNDECODABLE_MESSAGE_COUNT = "undecodableMessageCount";
private static final String NULL_RECORD_COUNT = "nullRecordCount";
private static final String PARTITION_TOTAL_SIZE = "partitionTotalSize";
private static final String AVG_RECORD_PULL_TIME = "avgRecordPullTime";
private static final String AVG_RECORD_SIZE = "avgRecordSize";
private static final String READ_RECORD_TIME = "readRecordTime";
private static final String DECODE_RECORD_TIME = "decodeRecordTime";
private static final String FETCH_MESSAGE_BUFFER_TIME = "fetchMessageBufferTime";
private static final String LAST_RECORD_HEADER_TIMESTAMP = "lastRecordHeaderTimestamp";
private static final String OBSERVED_LATENCY_HISTOGRAM = "observedLatencyHistogram";
@Getter
private final Map<KafkaPartition, ExtractorStats> statsMap;
private final Set<Integer> errorPartitions;
private final WorkUnitState workUnitState;
private final TaskEventMetadataGenerator taskEventMetadataGenerator;
@Getter
private final Histogram observedLatencyHistogram;
private boolean isSlaConfigured;
private long recordLevelSlaMillis;
private String brokerName;
//Minimum partition index processed by this task. Statistics that are aggregated across all partitions (e.g. observed latency histogram)
// processed by the task are reported against this partition index.
private int minPartitionIdx = Integer.MAX_VALUE;
//A global count of number of undecodeable messages encountered by the KafkaExtractor across all Kafka
//TopicPartitions.
@Getter
private int undecodableMessageCount = 0;
@Getter
private int nullRecordCount = 0;
private List<KafkaPartition> partitions;
private long maxPossibleLatency;
//Extractor stats aggregated across all partitions processed by the extractor.
@Getter (AccessLevel.PACKAGE)
@VisibleForTesting
private AggregateExtractorStats aggregateExtractorStats = new AggregateExtractorStats();
//Aggregate stats for the extractor derived from the most recently completed epoch
private AggregateExtractorStats lastAggregateExtractorStats;
public KafkaExtractorStatsTracker(WorkUnitState state, List<KafkaPartition> partitions) {
this.workUnitState = state;
this.brokerName = KafkaExtractor.getKafkaBrokerSimpleName(workUnitState);
this.partitions = partitions;
this.statsMap = Maps.newHashMapWithExpectedSize(this.partitions.size());
this.partitions.forEach(partition -> {
this.statsMap.put(partition, new ExtractorStats());
if (partition.getId() < minPartitionIdx) {
minPartitionIdx = partition.getId();
}
});
this.errorPartitions = Sets.newHashSet();
if (this.workUnitState.contains(KafkaSource.RECORD_LEVEL_SLA_MINUTES_KEY)) {
this.isSlaConfigured = true;
this.recordLevelSlaMillis = TimeUnit.MINUTES.toMillis(this.workUnitState.getPropAsLong(KafkaSource.RECORD_LEVEL_SLA_MINUTES_KEY));
}
this.taskEventMetadataGenerator = TaskEventMetadataUtils.getTaskEventMetadataGenerator(workUnitState);
if (state.getPropAsBoolean(KafkaSource.OBSERVED_LATENCY_MEASUREMENT_ENABLED, KafkaSource.DEFAULT_OBSERVED_LATENCY_MEASUREMENT_ENABLED)) {
this.observedLatencyHistogram = buildobservedLatencyHistogram(state);
} else {
this.observedLatencyHistogram = null;
}
}
/**
* A method that constructs a {@link Histogram} object based on a minimum value, a maximum value and precision in terms
* of number of significant digits. The returned {@link Histogram} is not an auto-resizing histogram and any outliers
* above the maximum possible value are discarded in favor of bounding the worst-case performance.
*
* @param state
* @return a non auto-resizing {@link Histogram} with a bounded range and precision.
*/
private Histogram buildobservedLatencyHistogram(WorkUnitState state) {
this.maxPossibleLatency = TimeUnit.HOURS.toMillis(state.getPropAsInt(KafkaSource.MAX_POSSIBLE_OBSERVED_LATENCY_IN_HOURS,
KafkaSource.DEFAULT_MAX_POSSIBLE_OBSERVED_LATENCY_IN_HOURS));
int numSignificantDigits = state.getPropAsInt(KafkaSource.OBSERVED_LATENCY_PRECISION, KafkaSource.DEFAULT_OBSERVED_LATENCY_PRECISION);
if (numSignificantDigits > 5) {
log.warn("Max precision must be <= 5; Setting precision for observed latency to 5.");
numSignificantDigits = 5;
} else if (numSignificantDigits < 1) {
log.warn("Max precision must be >= 1; Setting precision to the default value of 3.");
numSignificantDigits = 3;
}
return new Histogram(1, maxPossibleLatency, numSignificantDigits);
}
public int getErrorPartitionCount() {
return this.errorPartitions.size();
}
/**
* A Java POJO that encapsulates per-partition extractor stats.
*/
@Data
public static class ExtractorStats {
private long decodingErrorCount = -1L;
private long nullRecordCount = -1L;
private double avgMillisPerRecord = -1;
private long avgRecordSize;
private long elapsedTime;
private long processedRecordCount;
private long slaMissedRecordCount = -1L;
private long partitionTotalSize;
private long decodeRecordTime;
private long fetchMessageBufferTime;
private long readRecordTime;
private long startFetchEpochTime;
private long stopFetchEpochTime;
private long lastSuccessfulRecordHeaderTimestamp;
private long minLogAppendTime = -1L;
private long maxLogAppendTime = -1L;
}
/**
* A Java POJO to track the aggregate extractor stats across all partitions processed by the extractor.
*/
@Data
public static class AggregateExtractorStats {
private long maxIngestionLatency;
private long numBytesConsumed;
private long minStartFetchEpochTime = Long.MAX_VALUE;
private long maxStopFetchEpochTime;
private long minLogAppendTime = Long.MAX_VALUE;
private long maxLogAppendTime;
private long slaMissedRecordCount;
private long processedRecordCount;
}
/**
*
* @param partitionIdx index of Kafka topic partition.
* @return the number of undecodeable records for a given partition id.
*/
public Long getDecodingErrorCount(int partitionIdx) {
return this.statsMap.get(this.partitions.get(partitionIdx)).getDecodingErrorCount();
}
/**
*
* @param partitionIdx index of Kafka topic partition.
* @return the number of null valued records for a given partition id.
*/
public Long getNullRecordCount(int partitionIdx) {
return this.statsMap.get(this.partitions.get(partitionIdx)).getNullRecordCount();
}
/**
* Called when the KafkaExtractor encounters an undecodeable record.
*/
public void onUndecodeableRecord(int partitionIdx) {
this.errorPartitions.add(partitionIdx);
this.undecodableMessageCount++;
incrementErrorCount(partitionIdx);
}
public void onNullRecord(int partitionIdx) {
this.nullRecordCount++;
incrementNullRecordCount(partitionIdx);
}
private void incrementNullRecordCount(int partitionIdx) {
this.statsMap.computeIfPresent(this.partitions.get(partitionIdx), (k, v) -> {
if (v.nullRecordCount < 0) {
v.nullRecordCount = 1;
} else {
v.nullRecordCount++;
}
return v;
});
}
private void incrementErrorCount(int partitionIdx) {
this.statsMap.computeIfPresent(this.partitions.get(partitionIdx), (k, v) -> {
if (v.decodingErrorCount < 0) {
v.decodingErrorCount = 1;
} else {
v.decodingErrorCount++;
}
return v;
});
}
public void resetStartFetchEpochTime(int partitionIdx) {
this.statsMap.computeIfPresent(this.partitions.get(partitionIdx), (k, v) -> {
v.startFetchEpochTime = System.currentTimeMillis();
return v;
});
}
/**
* A method that is called when a Kafka record is successfully decoded.
* @param partitionIdx the index of Kafka Partition .
* @param readStartTime the start time when readRecord() is invoked.
* @param decodeStartTime the time instant immediately before a record decoding begins.
* @param recordSizeInBytes the size of the decoded record in bytes.
* @param logAppendTimestamp the log append time of the {@link org.apache.gobblin.kafka.client.KafkaConsumerRecord}.
* @param recordCreationTimestamp the time of the {@link org.apache.gobblin.kafka.client.KafkaConsumerRecord}.
*/
public void onDecodeableRecord(int partitionIdx, long readStartTime, long decodeStartTime, long recordSizeInBytes, long logAppendTimestamp, long recordCreationTimestamp) {
this.statsMap.computeIfPresent(this.partitions.get(partitionIdx), (k, v) -> {
long currentTime = System.nanoTime();
v.processedRecordCount++;
v.partitionTotalSize += recordSizeInBytes;
v.decodeRecordTime += currentTime - decodeStartTime;
v.readRecordTime += currentTime - readStartTime;
if (this.observedLatencyHistogram != null && recordCreationTimestamp > 0) {
long observedLatency = System.currentTimeMillis() - recordCreationTimestamp;
// Discard outliers larger than maxPossibleLatency to avoid additional overhead that may otherwise be incurred due to dynamic
// re-sizing of Histogram when observedLatency exceeds the maximum assumed latency. Essentially, we trade-off accuracy for
// performance in a pessimistic scenario.
if (observedLatency < this.maxPossibleLatency) {
this.observedLatencyHistogram.recordValue(observedLatency);
}
}
if (this.isSlaConfigured) {
if (v.slaMissedRecordCount < 0) {
v.slaMissedRecordCount = 0;
v.minLogAppendTime = logAppendTimestamp;
v.maxLogAppendTime = logAppendTimestamp;
} else {
if (logAppendTimestamp < v.minLogAppendTime) {
v.minLogAppendTime = logAppendTimestamp;
}
if (logAppendTimestamp > v.maxLogAppendTime) {
v.maxLogAppendTime = logAppendTimestamp;
}
}
if (logAppendTimestamp > 0 && (System.currentTimeMillis() - logAppendTimestamp > recordLevelSlaMillis)) {
v.slaMissedRecordCount++;
}
}
return v;
});
}
/**
* A method that is called after a batch of records has been fetched from Kafka e.g. via a consumer.poll().
* @param partitionIdx the index of Kafka partition
* @param fetchStartTime the time instant immediately before fetching records from Kafka.
*/
public void onFetchNextMessageBuffer(int partitionIdx, long fetchStartTime) {
this.statsMap.computeIfPresent(this.partitions.get(partitionIdx), (k, v) -> {
v.fetchMessageBufferTime += System.nanoTime() - fetchStartTime;
return v;
});
}
/**
* A method when a partition has been processed.
* @param partitionIdx the index of Kafka partition
* @param readStartTime the start time when readRecord.
*/
void onPartitionReadComplete(int partitionIdx, long readStartTime) {
this.statsMap.computeIfPresent(this.partitions.get(partitionIdx), (k, v) -> {
v.readRecordTime += System.nanoTime() - readStartTime;
return v;
});
}
/**
* A method that is invoked to update the statistics for current partition. In the batch mode of execution, this is
* invoked when a partition has been processed and before the next partition can be processed. In the streaming mode of
* execution, this method is invoked on every flush.
* @param partitionIdx the index of Kafka partition
* @param readStartTime the start time when readRecord() is invoked.
*/
public void updateStatisticsForCurrentPartition(int partitionIdx, long readStartTime, long lastSuccessfulRecordHeaderTimestamp) {
long stopFetchEpochTime = System.currentTimeMillis();
this.statsMap.computeIfPresent(this.partitions.get(partitionIdx), (k, v) -> {
v.stopFetchEpochTime = stopFetchEpochTime;
if (v.processedRecordCount != 0) {
v.elapsedTime = stopFetchEpochTime - this.statsMap.get(this.partitions.get(partitionIdx)).getStartFetchEpochTime();
//Compute average stats
v.avgMillisPerRecord = (double) v.elapsedTime / (double) v.processedRecordCount;
v.avgRecordSize = this.statsMap.get(this.partitions.get(partitionIdx)).getPartitionTotalSize() / v.processedRecordCount;
v.lastSuccessfulRecordHeaderTimestamp = lastSuccessfulRecordHeaderTimestamp;
}
return v;
});
onPartitionReadComplete(partitionIdx, readStartTime);
updateAggregateExtractorStats(partitionIdx);
}
private void updateAggregateExtractorStats(int partitionIdx) {
ExtractorStats partitionStats = this.statsMap.get(this.partitions.get(partitionIdx));
if (partitionStats.getStartFetchEpochTime() < aggregateExtractorStats.getMinStartFetchEpochTime()) {
aggregateExtractorStats.setMinStartFetchEpochTime(partitionStats.getStartFetchEpochTime());
}
if (partitionStats.getStopFetchEpochTime() > aggregateExtractorStats.getMaxStopFetchEpochTime()) {
aggregateExtractorStats.setMaxStopFetchEpochTime(partitionStats.getStopFetchEpochTime());
}
long partitionLatency = 0L;
//Check if there are any records consumed from this KafkaPartition.
if (partitionStats.getMinLogAppendTime() > 0) {
partitionLatency = partitionStats.getStopFetchEpochTime() - partitionStats.getMinLogAppendTime();
}
if (aggregateExtractorStats.getMaxIngestionLatency() < partitionLatency) {
aggregateExtractorStats.setMaxIngestionLatency(partitionLatency);
}
if (aggregateExtractorStats.getMinLogAppendTime() > partitionStats.getMinLogAppendTime()) {
aggregateExtractorStats.setMinLogAppendTime(partitionStats.getMinLogAppendTime());
}
if (aggregateExtractorStats.getMaxLogAppendTime() < partitionStats.getMaxLogAppendTime()) {
aggregateExtractorStats.setMaxLogAppendTime(partitionStats.getMaxLogAppendTime());
}
aggregateExtractorStats.setProcessedRecordCount(aggregateExtractorStats.getProcessedRecordCount() + partitionStats.getProcessedRecordCount());
aggregateExtractorStats.setNumBytesConsumed(aggregateExtractorStats.getNumBytesConsumed() + partitionStats.getPartitionTotalSize());
if (partitionStats.getSlaMissedRecordCount() > 0) {
aggregateExtractorStats.setSlaMissedRecordCount(aggregateExtractorStats.getSlaMissedRecordCount() + partitionStats.getSlaMissedRecordCount());
}
}
private Map<String, String> createTagsForPartition(int partitionId, MultiLongWatermark lowWatermark, MultiLongWatermark highWatermark, MultiLongWatermark nextWatermark) {
Map<String, String> tagsForPartition = Maps.newHashMap();
KafkaPartition partition = this.partitions.get(partitionId);
tagsForPartition.put(TOPIC, partition.getTopicName());
tagsForPartition.put(PARTITION, Integer.toString(partition.getId()));
tagsForPartition.put(LOW_WATERMARK, Long.toString(lowWatermark.get(partitionId)));
tagsForPartition.put(ACTUAL_HIGH_WATERMARK, Long.toString(nextWatermark.get(partitionId)));
// These are used to compute the load factor,
// gobblin consumption rate relative to the kafka production rate.
// The gobblin rate is computed as (processed record count/elapsed time)
// The kafka rate is computed as (expected high watermark - previous latest offset) /
// (current offset fetch epoch time - previous offset fetch epoch time).
tagsForPartition.put(EXPECTED_HIGH_WATERMARK, Long.toString(highWatermark.get(partitionId)));
tagsForPartition.put(KafkaSource.PREVIOUS_OFFSET_FETCH_EPOCH_TIME,
Long.toString(KafkaUtils.getPropAsLongFromSingleOrMultiWorkUnitState(this.workUnitState,
KafkaSource.PREVIOUS_OFFSET_FETCH_EPOCH_TIME, partitionId)));
tagsForPartition.put(KafkaSource.OFFSET_FETCH_EPOCH_TIME,
Long.toString(KafkaUtils.getPropAsLongFromSingleOrMultiWorkUnitState(this.workUnitState,
KafkaSource.OFFSET_FETCH_EPOCH_TIME, partitionId)));
tagsForPartition.put(KafkaSource.PREVIOUS_LATEST_OFFSET,
Long.toString(KafkaUtils.getPropAsLongFromSingleOrMultiWorkUnitState(this.workUnitState,
KafkaSource.PREVIOUS_LATEST_OFFSET, partitionId)));
tagsForPartition.put(KafkaSource.PREVIOUS_LOW_WATERMARK,
Long.toString(KafkaUtils.getPropAsLongFromSingleOrMultiWorkUnitState(this.workUnitState,
KafkaSource.PREVIOUS_LOW_WATERMARK, partitionId)));
tagsForPartition.put(KafkaSource.PREVIOUS_HIGH_WATERMARK,
Long.toString(KafkaUtils.getPropAsLongFromSingleOrMultiWorkUnitState(this.workUnitState,
KafkaSource.PREVIOUS_HIGH_WATERMARK, partitionId)));
tagsForPartition.put(KafkaSource.PREVIOUS_START_FETCH_EPOCH_TIME,
Long.toString(KafkaUtils.getPropAsLongFromSingleOrMultiWorkUnitState(this.workUnitState,
KafkaSource.PREVIOUS_START_FETCH_EPOCH_TIME, partitionId)));
tagsForPartition.put(KafkaSource.PREVIOUS_STOP_FETCH_EPOCH_TIME,
Long.toString(KafkaUtils.getPropAsLongFromSingleOrMultiWorkUnitState(this.workUnitState,
KafkaSource.PREVIOUS_STOP_FETCH_EPOCH_TIME, partitionId)));
ExtractorStats stats = this.statsMap.getOrDefault(partition, new ExtractorStats());
tagsForPartition.put(KafkaSource.START_FETCH_EPOCH_TIME, Long.toString(stats.getStartFetchEpochTime()));
tagsForPartition.put(KafkaSource.STOP_FETCH_EPOCH_TIME, Long.toString(stats.getStopFetchEpochTime()));
this.workUnitState.setProp(KafkaUtils.getPartitionPropName(KafkaSource.START_FETCH_EPOCH_TIME, partitionId),
Long.toString(stats.getStartFetchEpochTime()));
this.workUnitState.setProp(KafkaUtils.getPartitionPropName(KafkaSource.STOP_FETCH_EPOCH_TIME, partitionId),
Long.toString(stats.getStopFetchEpochTime()));
tagsForPartition.put(PROCESSED_RECORD_COUNT, Long.toString(stats.getProcessedRecordCount()));
tagsForPartition.put(SLA_MISSED_RECORD_COUNT, Long.toString(stats.getSlaMissedRecordCount()));
tagsForPartition.put(MIN_LOG_APPEND_TIMESTAMP, Long.toString(stats.getMinLogAppendTime()));
tagsForPartition.put(MAX_LOG_APPEND_TIMESTAMP, Long.toString(stats.getMaxLogAppendTime()));
tagsForPartition.put(PARTITION_TOTAL_SIZE, Long.toString(stats.getPartitionTotalSize()));
tagsForPartition.put(AVG_RECORD_SIZE, Long.toString(stats.getAvgRecordSize()));
tagsForPartition.put(ELAPSED_TIME, Long.toString(stats.getElapsedTime()));
tagsForPartition.put(DECODE_RECORD_TIME, Long.toString(TimeUnit.NANOSECONDS.toMillis(stats.getDecodeRecordTime())));
tagsForPartition.put(FETCH_MESSAGE_BUFFER_TIME,
Long.toString(TimeUnit.NANOSECONDS.toMillis(stats.getFetchMessageBufferTime())));
tagsForPartition.put(READ_RECORD_TIME, Long.toString(TimeUnit.NANOSECONDS.toMillis(stats.getReadRecordTime())));
tagsForPartition.put(UNDECODABLE_MESSAGE_COUNT, Long.toString(stats.getDecodingErrorCount()));
tagsForPartition.put(NULL_RECORD_COUNT, Long.toString(stats.getNullRecordCount()));
tagsForPartition.put(LAST_RECORD_HEADER_TIMESTAMP, Long.toString(stats.getLastSuccessfulRecordHeaderTimestamp()));
// Commit avg time to pull a record for each partition
double avgMillis = stats.getAvgMillisPerRecord();
if (avgMillis >= 0) {
log.info(String.format("Avg time to pull a record for partition %s = %f milliseconds", partition, avgMillis));
KafkaUtils.setPartitionAvgRecordMillis(this.workUnitState, partition, avgMillis);
tagsForPartition.put(AVG_RECORD_PULL_TIME, Double.toString(avgMillis));
} else {
log.info(String.format("Avg time to pull a record for partition %s not recorded", partition));
tagsForPartition.put(AVG_RECORD_PULL_TIME, Double.toString(-1));
}
//Report observed latency histogram as part
if ((partitionId == minPartitionIdx) && (this.observedLatencyHistogram != null)) {
tagsForPartition.put(OBSERVED_LATENCY_HISTOGRAM, convertHistogramToString(this.observedLatencyHistogram));
}
return tagsForPartition;
}
/**
* A helper method to serialize a {@link Histogram} to its string representation. This method uses the
* compressed logging format provided by the {@link org.HdrHistogram.HistogramLogWriter}
* to represent the Histogram as a string. The readers can use the {@link org.HdrHistogram.HistogramLogReader} to
* deserialize the string back to a {@link Histogram} object.
* @param observedLatencyHistogram
* @return
*/
@VisibleForTesting
public static String convertHistogramToString(Histogram observedLatencyHistogram) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try (PrintStream stream = new PrintStream(baos, true, Charsets.UTF_8.name())) {
HistogramLogWriter histogramLogWriter = new HistogramLogWriter(stream);
histogramLogWriter.outputIntervalHistogram(observedLatencyHistogram);
return new String(baos.toByteArray(), Charsets.UTF_8);
} catch (UnsupportedEncodingException e) {
log.error("Exception {} encountered when creating PrintStream; returning empty string", e);
return EMPTY_STRING;
}
}
/**
* Emit Tracking events reporting the various statistics to be consumed by a monitoring application.
* @param context the current {@link MetricContext}
* @param tagsForPartitionsMap tags for each partition
*/
public void emitTrackingEvents(MetricContext context, Map<KafkaPartition, Map<String, String>> tagsForPartitionsMap) {
for (Map.Entry<KafkaPartition, Map<String, String>> eventTags : tagsForPartitionsMap.entrySet()) {
EventSubmitter.Builder eventSubmitterBuilder = new EventSubmitter.Builder(context, GOBBLIN_KAFKA_NAMESPACE);
eventSubmitterBuilder.addMetadata(this.taskEventMetadataGenerator.getMetadata(workUnitState, KAFKA_EXTRACTOR_TOPIC_METADATA_EVENT_NAME));
eventSubmitterBuilder.addMetadata(BROKER_NAME, this.brokerName);
eventSubmitterBuilder.build().submit(KAFKA_EXTRACTOR_TOPIC_METADATA_EVENT_NAME, eventTags.getValue());
}
}
/**
* Emit Tracking events reporting the topic partition information this extractor handled to be consumed by a monitoring application.
* @param context the current {@link MetricContext}
*/
public void submitEventToIndicateContainerTransition(MetricContext context) {
for (int i = 0; i < this.partitions.size(); i++) {
KafkaPartition partitionKey = this.partitions.get(i);
GobblinEventBuilder gobblinEventBuilder = new GobblinEventBuilder(KAFKA_EXTRACTOR_CONTAINER_TRANSITION_EVENT_NAME, GOBBLIN_KAFKA_NAMESPACE);
gobblinEventBuilder.addMetadata(TOPIC, partitionKey.getTopicName());
gobblinEventBuilder.addMetadata(BROKER_NAME, this.brokerName);
gobblinEventBuilder.addMetadata(PARTITION, Integer.toString(partitionKey.getId()));
gobblinEventBuilder.addAdditionalMetadata(this.taskEventMetadataGenerator.getMetadata(workUnitState, KAFKA_EXTRACTOR_CONTAINER_TRANSITION_EVENT_NAME));
EventSubmitter.submit(context, gobblinEventBuilder);
}
}
/**
* A helper function to merge tags for KafkaPartition. Separate into a package-private method for ease of testing.
*/
public Map<KafkaPartition, Map<String, String>> generateTagsForPartitions(MultiLongWatermark lowWatermark, MultiLongWatermark highWatermark,
MultiLongWatermark nextWatermark, Map<KafkaPartition, Map<String, String>> additionalTags) {
Map<KafkaPartition, Map<String, String>> tagsForPartitionsMap = Maps.newHashMap();
for (int i = 0; i < this.partitions.size(); i++) {
KafkaPartition partitionKey = this.partitions.get(i);
log.info(String.format("Actual high watermark for partition %s=%d, expected=%d", this.partitions.get(i),
nextWatermark.get(i), highWatermark.get(i)));
tagsForPartitionsMap
.put(this.partitions.get(i), createTagsForPartition(i, lowWatermark, highWatermark, nextWatermark));
// Merge with additionalTags from argument-provided map if exists.
if (additionalTags.containsKey(partitionKey)) {
tagsForPartitionsMap.get(partitionKey).putAll(additionalTags.get(partitionKey));
}
}
return tagsForPartitionsMap;
}
/**
*
* @param partitionIdx the index of Kafka partition
* @return the average record size of records for a given {@link KafkaPartition}
*/
public long getAvgRecordSize(int partitionIdx) {
ExtractorStats stats = this.statsMap.getOrDefault(this.partitions.get(partitionIdx), null);
if (stats != null) {
if (stats.getAvgRecordSize() != 0) {
//Average record size already computed.
return stats.getAvgRecordSize();
} else {
//Compute average record size
if (stats.getProcessedRecordCount() != 0) {
return stats.getPartitionTotalSize() / stats.getProcessedRecordCount();
}
}
}
return 0;
}
/**
* @param timeUnit the time unit for the ingestion latency.
* @return the maximum ingestion latency across all partitions processed by the extractor from the last
* completed epoch.
*/
public long getMaxIngestionLatency(TimeUnit timeUnit) {
return timeUnit.convert(this.lastAggregateExtractorStats.getMaxIngestionLatency(), TimeUnit.MILLISECONDS);
}
/**
*
* @return the consumption rate in MB/s across all partitions processed by the extractor from the last
* completed epoch.
*/
public double getConsumptionRateMBps() {
double consumptionDurationSecs = ((double) (this.lastAggregateExtractorStats.getMaxStopFetchEpochTime() - this.lastAggregateExtractorStats
.getMinStartFetchEpochTime())) / 1000;
return this.lastAggregateExtractorStats.getNumBytesConsumed() / (consumptionDurationSecs * (1024 * 1024L));
}
/**
* Reset all KafkaExtractor stats.
*/
public void reset() {
this.lastAggregateExtractorStats = this.aggregateExtractorStats;
this.aggregateExtractorStats = new AggregateExtractorStats();
this.partitions.forEach(partition -> this.statsMap.put(partition, new ExtractorStats()));
for (int partitionIdx = 0; partitionIdx < this.partitions.size(); partitionIdx++) {
resetStartFetchEpochTime(partitionIdx);
}
if (this.observedLatencyHistogram != null) {
this.observedLatencyHistogram.reset();
}
}
}
| 3,296 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaOffsetRetrievalFailureException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
@SuppressWarnings("serial")
public class KafkaOffsetRetrievalFailureException extends Exception {
public KafkaOffsetRetrievalFailureException(String message) {
super(message);
}
}
| 3,297 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaProduceRateTracker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.net.URL;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Date;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.joda.time.DateTimeZone;
import org.joda.time.LocalDate;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.EvictingQueue;
import com.google.common.collect.Maps;
import de.jollyday.HolidayManager;
import de.jollyday.parameter.UrlManagerParameter;
import lombok.AllArgsConstructor;
import lombok.Getter;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.CheckpointableWatermark;
import org.apache.gobblin.source.extractor.extract.FlushingExtractor;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.writer.WatermarkTracker;
/**
* A helper class that tracks the produce rate for each TopicPartition currently consumed by the {@link KafkaStreamingExtractor}.
* The produce rates are stored in the {@link org.apache.gobblin.source.extractor.extract.kafka.KafkaStreamingExtractor.KafkaWatermark}
* and checkpointed to the {@link org.apache.gobblin.writer.WatermarkStorage} along with the watermarks.
*
* The produce rates are maintained based on hour-of-day and day-of-week, and are computed in bytes/sec. The new produce
* rates estimates are obtained as Exponentially Weighted Moving Average (EWMA) as:
* new_produce_rate = a * avg_rate_in_current_window + (1 - a) * historic_produce_rate, where:
* "a" is the exponential decay factor. Small values of "a" result in estimates updated more slowly, while large values of
* "a" will result in giving more weight to recent values.
*/
public class KafkaProduceRateTracker {
private static final String KAFKA_PRODUCE_RATE_EXPONENTIAL_DECAY_FACTOR_KEY =
"gobblin.kafka.produceRateTracker.exponentialDecayFactor";
private static final Double DEFAULT_KAFKA_PRODUCE_RATE_EXPONENTIAL_DECAY_FACTOR = 0.375;
static final String KAFKA_PRODUCE_RATE_DISABLE_STATS_ON_HOLIDAYS_KEY = "gobblin.kafka.produceRateTracker.disableStatsOnHolidays";
private static final Boolean DEFAULT_KAFKA_PRODUCE_RATE_DISABLE_STATS_ON_HOLIDAYS = false;
private static final String KAFKA_PRODUCE_RATE_HOLIDAY_LOCALE_KEY = "gobblin.kafka.produceRateTracker.holidayLocale";
private static final String DEFAULT_KAFKA_PRODUCE_RATE_HOLIDAY_LOCALE = "ca";
private static final String HOLIDAY_FILE = "Holidays.xml";
private static final DateTimeZone DEFAULT_TIME_ZONE = DateTimeZone.getDefault();
static final int SLIDING_WINDOW_SIZE = 3;
static final String KAFKA_PARTITION_PRODUCE_RATE_KEY = "produceRate";
/**
* The element-insertion order has to be maintained since:
* 1. API provided by {@link KafkaExtractorStatsTracker} accepts index of partition, instead of partition
* object like {@link #partitionsToProdRate}.
* 2. When traversing {@link #partitionsToProdRate} to update new produce-rate value, we need find the previous value
* of watermark to help calculate produce-rate, which is also indexed by partition-index.
* 3. To make sure the entry mapping between entry in the {@link #partitionsToProdRate} and entry in {@link KafkaExtractorStatsTracker}
* and watermark, the order of entries in this map which is defined by input list of {@link KafkaPartition} needs
* to be preserved, as this input list also serves source of constructing entry-order in {@link KafkaExtractorStatsTracker}.
*/
@Getter
private final LinkedHashMap<KafkaPartition, Double> partitionsToProdRate;
private final WatermarkTracker watermarkTracker;
private final KafkaExtractorStatsTracker statsTracker;
private final Double exponentialDecayFactor;
private final Boolean disableStatsOnHolidays;
private final HolidayManager holidayManager;
private final long flushIntervalSecs;
private final String holidayLocale;
private Long lastReportTimeMillis;
private final Map<LocalDate, Boolean> holidayMap = Maps.newHashMap();
private final EvictingQueue<Long> ingestionLatencies = EvictingQueue.create(SLIDING_WINDOW_SIZE);
private final EvictingQueue<Double> consumptionRateMBps = EvictingQueue.create(SLIDING_WINDOW_SIZE);
public KafkaProduceRateTracker(WorkUnitState state, List<KafkaPartition> partitions, WatermarkTracker watermarkTracker,
KafkaExtractorStatsTracker statsTracker) {
this(state, partitions, watermarkTracker, statsTracker, System.currentTimeMillis());
}
@VisibleForTesting
KafkaProduceRateTracker(WorkUnitState state, List<KafkaPartition> partitions, WatermarkTracker watermarkTracker,
KafkaExtractorStatsTracker statsTracker, Long lastReportTimeMillis) {
this.partitionsToProdRate = (LinkedHashMap<KafkaPartition, Double>) partitions.stream()
.collect(Collectors.toMap(Function.identity(), x -> new Double(-1), (e1, e2) -> e1, LinkedHashMap::new));
this.watermarkTracker = watermarkTracker;
this.statsTracker = statsTracker;
this.lastReportTimeMillis = lastReportTimeMillis;
this.exponentialDecayFactor = state.getPropAsDouble(KAFKA_PRODUCE_RATE_EXPONENTIAL_DECAY_FACTOR_KEY, DEFAULT_KAFKA_PRODUCE_RATE_EXPONENTIAL_DECAY_FACTOR);
URL calendarFileUrl = getClass().getClassLoader().getResource(HOLIDAY_FILE);
this.holidayManager =
calendarFileUrl != null ? HolidayManager.getInstance(new UrlManagerParameter(calendarFileUrl, new Properties()))
: HolidayManager.getInstance();
this.disableStatsOnHolidays = state.getPropAsBoolean(KAFKA_PRODUCE_RATE_DISABLE_STATS_ON_HOLIDAYS_KEY,
DEFAULT_KAFKA_PRODUCE_RATE_DISABLE_STATS_ON_HOLIDAYS);
this.holidayLocale = state.getProp(KAFKA_PRODUCE_RATE_HOLIDAY_LOCALE_KEY, DEFAULT_KAFKA_PRODUCE_RATE_HOLIDAY_LOCALE);
this.flushIntervalSecs = state.getPropAsLong(FlushingExtractor.FLUSH_INTERVAL_SECONDS_KEY, FlushingExtractor.DEFAULT_FLUSH_INTERVAL_SECONDS);
}
public static int getHourOfDay(Date date) {
Calendar calendar = Calendar.getInstance();
calendar.setTime(date);
//HOUR_OF_DAY ranges from 0-23.
return calendar.get(Calendar.HOUR_OF_DAY);
}
public static int getDayOfWeek(Date date) {
Calendar calendar = Calendar.getInstance();
calendar.setTime(date);
//DAY_OF_WEEK ranges from 1-7.
return calendar.get(Calendar.DAY_OF_WEEK) - 1;
}
@Getter
@AllArgsConstructor
public static class TopicPartitionStats {
private double[][] avgProduceRates;
private double avgConsumeRate;
private long avgRecordSize;
// Cache the recent caught produce-rate of a partition.
private double currentProduceRate;
}
private TopicPartitionStats getNewTopicPartitionStats(Long previousMaxOffset, Long maxOffset, KafkaStreamingExtractor.KafkaWatermark lastCommittedWatermark,
long currentTimeMillis, Long avgRecordSize) {
if (previousMaxOffset == 0) {
TopicPartitionStats stats = new TopicPartitionStats(lastCommittedWatermark.getAvgProduceRates(), lastCommittedWatermark.getAvgConsumeRate(),
lastCommittedWatermark.getAvgRecordSize(), 0);
return stats;
}
long numRecordsProduced = maxOffset - previousMaxOffset;
long newAvgRecordSize;
if (numRecordsProduced > 0) {
if (lastCommittedWatermark.getAvgRecordSize() > 0) {
newAvgRecordSize = updateMovingAverage(avgRecordSize, lastCommittedWatermark.getAvgRecordSize()).longValue();
} else {
//No previously recorded average record size.
newAvgRecordSize = avgRecordSize;
}
} else {
//No records see in the current window. No need to update the average record size.
newAvgRecordSize = lastCommittedWatermark.getAvgRecordSize();
}
Date date = new Date(currentTimeMillis);
int hourOfDay = getHourOfDay(date);
int dayOfWeek = getDayOfWeek(date);
double currentProduceRate =
(numRecordsProduced * avgRecordSize) * 1000 / (double) (currentTimeMillis - lastReportTimeMillis + 1);
double[][] historicProduceRates = lastCommittedWatermark.getAvgProduceRates();
if (!isHoliday(new LocalDate(currentTimeMillis, DEFAULT_TIME_ZONE))) {
if (historicProduceRates != null) {
if (historicProduceRates[dayOfWeek][hourOfDay] >= 0) {
historicProduceRates[dayOfWeek][hourOfDay] =
updateMovingAverage(currentProduceRate, historicProduceRates[dayOfWeek][hourOfDay]);
} else {
historicProduceRates[dayOfWeek][hourOfDay] = currentProduceRate;
}
} else {
//No previous values found. Bootstrap with the average rate computed in the current window.
historicProduceRates = new double[7][24];
for (double[] row : historicProduceRates) {
Arrays.fill(row, -1.0);
}
historicProduceRates[dayOfWeek][hourOfDay] = currentProduceRate;
}
}
double consumeRate = lastCommittedWatermark.getAvgConsumeRate();
ingestionLatencies.add(this.statsTracker.getMaxIngestionLatency(TimeUnit.SECONDS));
consumptionRateMBps.add(this.statsTracker.getConsumptionRateMBps());
if (isConsumerBacklogged()) {
//If ingestion latency is high, it means the consumer is backlogged. Hence, its current consumption rate
//must equal the peak consumption rate.
consumeRate = consumeRate >= 0 ? updateMovingAverage(getPenultimateElement(consumptionRateMBps), consumeRate)
: this.statsTracker.getConsumptionRateMBps();
}
return new TopicPartitionStats(historicProduceRates, consumeRate, newAvgRecordSize, currentProduceRate);
}
private boolean isConsumerBacklogged() {
if (this.ingestionLatencies.size() < SLIDING_WINDOW_SIZE) {
return false;
}
for (long latency: this.ingestionLatencies) {
if (latency < (2 * this.flushIntervalSecs)) {
return false;
}
}
return true;
}
/**
* Returns the element before the final element. It does this by removing the oldest element and peeking at the
* next element.
* @param queue
* @return
*/
static Double getPenultimateElement(EvictingQueue<Double> queue) {
Preconditions.checkArgument(queue.size() > 1);
queue.remove();
return queue.peek();
}
/**
* A method that computes a new moving average from previous average estimate and current value using an
* Exponentially weighted moving average (EWMA) algorithm.
* @param currentValue
* @param previousAverage
* @return updated moving average computed as an EWMA.
*/
private Double updateMovingAverage(double currentValue, double previousAverage) {
return exponentialDecayFactor * currentValue + (1 - exponentialDecayFactor) * previousAverage;
}
/**
* Several side effects in this method:
* 1. Write ProduceRate of each KafkaPartition into its watermark as the method name indicates.
* 2. Update {@link #partitionsToProdRate} for each KafkaPartitions with their newest ProduceRate, this would be
* part of GTE to be emitted as each flush happens.
*/
public void writeProduceRateToKafkaWatermarks(Map<KafkaPartition, Long> latestOffsetMap, Map<String, CheckpointableWatermark> lastCommittedWatermarks,
MultiLongWatermark highWatermark, long currentTimeMillis) {
int partitionIndex = 0;
Map<String, CheckpointableWatermark> unacknowledgedWatermarks = watermarkTracker.getAllUnacknowledgedWatermarks();
for (KafkaPartition partition : this.partitionsToProdRate.keySet()) {
long maxOffset = latestOffsetMap.getOrDefault(partition, -1L);
KafkaStreamingExtractor.KafkaWatermark kafkaWatermark =
(KafkaStreamingExtractor.KafkaWatermark) lastCommittedWatermarks.get(partition.toString());
KafkaStreamingExtractor.KafkaWatermark unacknowledgedWatermark =
(KafkaStreamingExtractor.KafkaWatermark) unacknowledgedWatermarks.get(partition.toString());
if (kafkaWatermark == null) {
//If there is no previously committed watermark for the topic partition, create a dummy watermark for computing stats
kafkaWatermark = new KafkaStreamingExtractor.KafkaWatermark(partition, new LongWatermark(maxOffset >= 0? maxOffset : 0L));
}
long avgRecordSize = this.statsTracker.getAvgRecordSize(partitionIndex);
long previousMaxOffset = highWatermark.get(partitionIndex++);
//If maxOffset < 0, it means that we could not get max offsets from Kafka due to metadata fetch failure.
// In this case, carry previous state forward and set produce-rate to negative value, indicating it's not available.
TopicPartitionStats stats =
maxOffset >= 0 ? getNewTopicPartitionStats(previousMaxOffset, maxOffset, kafkaWatermark, currentTimeMillis,
avgRecordSize)
: new TopicPartitionStats(kafkaWatermark.getAvgProduceRates(), kafkaWatermark.getAvgConsumeRate(), kafkaWatermark.getAvgRecordSize(), -1);
if (unacknowledgedWatermark == null) {
//If no record seen for this topicPartition in the current time window; carry forward the previously committed
// watermark with the updated statistics
unacknowledgedWatermark = kafkaWatermark;
watermarkTracker.unacknowledgedWatermark(unacknowledgedWatermark);
}
unacknowledgedWatermark.setAvgProduceRates(stats.getAvgProduceRates());
unacknowledgedWatermark.setAvgConsumeRate(stats.getAvgConsumeRate());
unacknowledgedWatermark.setAvgRecordSize(stats.getAvgRecordSize());
partitionsToProdRate.put(partition, stats.getCurrentProduceRate());
}
this.lastReportTimeMillis = currentTimeMillis;
}
/**
* @param date
* @return true if:
* <ul>
* <li>Stats collection on holidays is enabled</li>, or
* <li>{@param date} is a holiday for the given locale</li>
* </ul>
*/
boolean isHoliday(LocalDate date) {
if (!this.disableStatsOnHolidays) {
return false;
}
if (holidayMap.containsKey(date)) {
return holidayMap.get(date);
} else {
boolean isHolidayToday = this.holidayManager.isHoliday(date, this.holidayLocale);
holidayMap.put(date, isHolidayToday);
return isHolidayToday;
}
}
} | 3,298 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaSimpleExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.io.IOException;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.kafka.client.ByteArrayBasedKafkaRecord;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.metrics.kafka.SchemaRegistryException;
/**
* An implementation of {@link KafkaExtractor} from which reads and returns records as an array of bytes.
*
* @author akshay@nerdwallet.com
*
* @deprecated use {@link KafkaDeserializerExtractor} and {@link KafkaDeserializerExtractor.Deserializers#BYTE_ARRAY} instead
*/
public class KafkaSimpleExtractor extends KafkaExtractor<String, byte[]> {
private final KafkaSchemaRegistry<String, String> kafkaSchemaRegistry;
public KafkaSimpleExtractor(WorkUnitState state) {
super(state);
this.kafkaSchemaRegistry = new SimpleKafkaSchemaRegistry(state.getProperties());
}
@Override
protected byte[] decodeRecord(ByteArrayBasedKafkaRecord kafkaConsumerRecord) throws IOException {
return kafkaConsumerRecord.getMessageBytes();
}
/**
* Get the schema (metadata) of the extracted data records.
*
* @return the Kafka topic being extracted
* @throws IOException if there is problem getting the schema
*/
@Override
public String getSchema() throws IOException {
try {
return this.kafkaSchemaRegistry.getLatestSchemaByTopic(this.topicName);
} catch (SchemaRegistryException e) {
throw new RuntimeException(e);
}
}
}
| 3,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.