_id stringlengths 2 7 | title stringlengths 3 140 | partition stringclasses 3
values | text stringlengths 73 34.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q172400 | JmsSourceUpgrader.upgradeV5ToV6 | test | private static void upgradeV5ToV6(List<Config> configs, Context context) {
List<Config> dataFormatConfigs = configs.stream()
.filter(c -> c.getName().startsWith("dataFormat"))
.collect(Collectors.toList());
// Remove those configs
configs.removeAll(dataFormatConfigs);
// There is an interesting history with compression - at some point (version 2), we explicitly added it, then
// we have hidden it. So this config might or might not exists, depending on the version in which the pipeline
// was created. However the service is expecting it and thus we need to ensure that it's there.
if(dataFormatConfigs.stream().noneMatch(c -> "dataFormatConfig.compression".equals(c.getName()))) {
dataFormatConfigs.add(new Config("dataFormatConfig.compression", "NONE"));
}
// And finally register new service
context.registerService(DataFormatParserService.class, dataFormatConfigs);
} | java | {
"resource": ""
} |
q172401 | DataStore.getInputStream | test | public InputStream getInputStream() throws IOException {
acquireLock();
try {
isClosed = false;
forWrite = false;
LOG.trace("Starts read '{}'", file);
verifyAndRecover();
InputStream is = new ProxyInputStream(new FileInputStream(file.toFile())) {
@Override
public void close() throws IOException {
if (isClosed) {
return;
}
try {
super.close();
} finally {
release();
isClosed = true;
stream = null;
}
LOG.trace("Finishes read '{}'", file);
}
};
stream = is;
return is;
} catch (Exception ex) {
release();
throw ex;
}
} | java | {
"resource": ""
} |
q172402 | DataStore.getOutputStream | test | public OutputStream getOutputStream() throws IOException {
acquireLock();
try {
isClosed = false;
forWrite = true;
LOG.trace("Starts write '{}'", file);
verifyAndRecover();
if (Files.exists(file)) {
Files.move(file, fileOld);
LOG.trace("Starting write, move '{}' to '{}'", file, fileOld);
}
OutputStream os = new ProxyOutputStream(new FileOutputStream(fileTmp.toFile())) {
@Override
public void close() throws IOException {
if (isClosed) {
return;
}
try {
super.close();
} finally {
isClosed = true;
stream = null;
}
LOG.trace("Finishes write '{}'", file);
}
};
stream = os;
return os;
} catch (Exception ex) {
release();
throw ex;
}
} | java | {
"resource": ""
} |
q172403 | DataStore.exists | test | public boolean exists() throws IOException {
acquireLock();
try {
verifyAndRecover();
return Files.exists(file) && Files.size(file) > 0;
} finally {
release();
}
} | java | {
"resource": ""
} |
q172404 | HdfsUtils.parseFsPermission | test | public static FsPermission parseFsPermission(String permissions) throws IllegalArgumentException {
try {
// Octal or symbolic representation
return new FsPermission(permissions);
} catch (IllegalArgumentException e) {
// FsPermission.valueOf will work with unix style permissions which is 10 characters
// where the first character says the type of file
if (permissions.length() == 9) {
// This means it is a posix standard without the first character for file type
// We will simply set it to '-' suggesting regular file
permissions = "-" + permissions;
}
// Try to parse unix style format.
return FsPermission.valueOf(permissions);
}
} | java | {
"resource": ""
} |
q172405 | ClassLoaderStageLibraryTask.validateAllServicesAvailable | test | private void validateAllServicesAvailable() {
// Firstly validate that all stages have satisfied service dependencies
List<String> missingServices = new LinkedList<>();
for(StageDefinition stage : stageList) {
for(ServiceDependencyDefinition service : stage.getServices()) {
if(!serviceMap.containsKey(service.getService())) {
missingServices.add(Utils.format("Stage {} is missing service {}", stage.getName(), service.getService().getName()));
}
}
}
if(!missingServices.isEmpty()) {
throw new RuntimeException("Missing services: " + StringUtils.join(missingServices, ", "));
}
// Secondly ensure that all loaded services are compatible with what is supported by our runtime engine
List<String> unsupportedServices = new LinkedList<>();
for(ServiceDefinition serviceDefinition : serviceList) {
if(!ServiceRuntime.supports(serviceDefinition.getProvides())) {
unsupportedServices.add(serviceDefinition.getProvides().toString());
}
}
if(!unsupportedServices.isEmpty()) {
throw new RuntimeException("Unsupported services: " + StringUtils.join(unsupportedServices, ", "));
}
} | java | {
"resource": ""
} |
q172406 | Aggregators.createSimple | test | @SuppressWarnings("unchecked")
public <A extends SimpleAggregator> A createSimple(String name, Class<? extends Aggregator> klass) {
Utils.checkState(!started, "Already started");
try {
A aggregator = (A) CONSTRUCTORS.get(klass).newInstance(name);
dataProvider.addAggregator(aggregator);
aggregator.setDataProvider(dataProvider);
return aggregator;
} catch (Exception ex) {
throw new RuntimeException(ex);
}
} | java | {
"resource": ""
} |
q172407 | Aggregators.getAggregatorUnit | test | <A extends SimpleAggregator, T> Class<? extends Number> getAggregatorUnit(Class<A> klass) {
try {
A aggregator = (A) CONSTRUCTORS.get(klass).newInstance("forAggregatorTypeDiscoveryOnly");
return aggregator.getValueType();
} catch (Exception ex) {
throw new RuntimeException(ex);
}
} | java | {
"resource": ""
} |
q172408 | Aggregators.createAggregatorData | test | <A extends SimpleAggregator, T> AggregatorData<A, T> createAggregatorData(
Class<A> klass,
String name,
long timeWindowMillis) {
try {
A aggregator = (A) CONSTRUCTORS.get(klass).newInstance(name);
return aggregator.createAggregatorData(timeWindowMillis);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
} | java | {
"resource": ""
} |
q172409 | Aggregators.createGroupBy | test | @SuppressWarnings("unchecked")
public <A extends SimpleAggregator, N extends Number> GroupByAggregator<A, N> createGroupBy(
String name, Class<? extends Aggregator> aKlass
) {
Utils.checkState(!started, "Already started");
GroupByAggregator<A, N> aggregator = new GroupByAggregator(name, aKlass, this);
dataProvider.addAggregator(aggregator);
aggregator.setDataProvider(dataProvider);
return aggregator;
} | java | {
"resource": ""
} |
q172410 | Aggregators.start | test | public void start(long newDataWindowEndTimeMillis) {
Utils.checkState(!started, "Already started");
Utils.checkState(!stopped, "Already stopped");
dataProvider.start(newDataWindowEndTimeMillis);
started = true;
} | java | {
"resource": ""
} |
q172411 | Aggregators.stop | test | public Map<Aggregator, AggregatorData> stop() {
Utils.checkState(started, "Already started");
Utils.checkState(!stopped, "Already stopped");
Map<Aggregator, AggregatorData> aggregatorDataMap = dataProvider.stop();
stopped = true;
return aggregatorDataMap;
} | java | {
"resource": ""
} |
q172412 | Aggregators.roll | test | public Map<Aggregator, AggregatorData> roll(long newDataWindowEndTimeMillis) {
Utils.checkState(started, "Not started");
Utils.checkState(!stopped, "Already stopped");
return dataProvider.roll(newDataWindowEndTimeMillis);
} | java | {
"resource": ""
} |
q172413 | LoginManager.acquireLoginManager | test | public static final LoginManager acquireLoginManager(LoginType loginType, Map<String, ?> configs) throws IOException, LoginException {
synchronized (LoginManager.class) {
LoginManager loginManager = CACHED_INSTANCES.get(loginType);
if (loginManager == null) {
loginManager = new LoginManager(loginType, configs);
CACHED_INSTANCES.put(loginType, loginManager);
}
return loginManager.acquire();
}
} | java | {
"resource": ""
} |
q172414 | LoginManager.release | test | public void release() {
synchronized (LoginManager.class) {
if (refCount == 0)
throw new IllegalStateException("release called on LoginManager with refCount == 0");
else if (refCount == 1) {
CACHED_INSTANCES.remove(loginType);
login.shutdown();
}
--refCount;
}
} | java | {
"resource": ""
} |
q172415 | FileFilter.globToRegex | test | private static String globToRegex(String glob) {
if (glob.charAt(0) == '.' || glob.contains("/") || glob.contains("~")) {
throw new IllegalArgumentException("Invalid character in file glob");
}
// treat dot as a literal.
glob = glob.replace(".", "\\.");
glob = glob.replace("*", ".+");
glob = glob.replace("?", ".{1}+");
return glob;
} | java | {
"resource": ""
} |
q172416 | AmazonS3Util.listObjectsLexicographically | test | static List<S3ObjectSummary> listObjectsLexicographically(
AmazonS3 s3Client,
S3ConfigBean s3ConfigBean,
AntPathMatcher pathMatcher,
S3Offset s3Offset,
int fetchSize
) {
// Incrementally scan objects after the marker (s3Offset).
List<S3ObjectSummary> list = new ArrayList<>(fetchSize);
ListObjectsRequest listObjectsRequest = new ListObjectsRequest();
listObjectsRequest.setBucketName(s3ConfigBean.s3Config.bucket);
listObjectsRequest.setPrefix(s3ConfigBean.s3Config.commonPrefix);
listObjectsRequest.setMaxKeys(BATCH_SIZE);
if (s3Offset.getKey() != null) {
listObjectsRequest.setMarker(s3Offset.getKey());
}
ObjectListing objectListing = s3Client.listObjects(listObjectsRequest);
while (true) {
for (S3ObjectSummary s : objectListing.getObjectSummaries()) {
String fullPrefix = s.getKey();
String remainingPrefix = fullPrefix.substring(s3ConfigBean.s3Config.commonPrefix.length(), fullPrefix.length());
if (!remainingPrefix.isEmpty()) {
if (pathMatcher.match(s3ConfigBean.s3FileConfig.prefixPattern, remainingPrefix)) {
list.add(s);
}
// We've got enough objects.
if (list.size() == fetchSize) {
return list;
}
}
}
// Listing is complete. No more objects to be listed.
if (!objectListing.isTruncated()) {
break;
}
objectListing = s3Client.listNextBatchOfObjects(objectListing);
}
return list;
} | java | {
"resource": ""
} |
q172417 | ProductionPipelineRunner.stop | test | public void stop() throws PipelineException {
this.stop = true;
if(batchesToCapture > 0) {
cancelSnapshot(this.snapshotName);
snapshotStore.deleteSnapshot(pipelineName, revision, snapshotName);
}
} | java | {
"resource": ""
} |
q172418 | ProductionPipelineRunner.produceEmptyBatchesForIdleRunners | test | public int produceEmptyBatchesForIdleRunners(long idleTime) throws PipelineException, StageException {
LOG.debug("Checking if any active runner is idle");
// The empty batch is suppose to be fast - almost as a zero time. It could however happened that from some reason it
// will take a long time (possibly more then idleTime). To avoid infinite loops, this method will only processes up
// to total number of runners before returning.
int counter = 0;
try {
destroyLock.lock();
while(running && counter < pipes.size()) {
counter++;
PipeRunner runner = null;
try {
runner = runnerPool.getIdleRunner(idleTime);
// No more idle runners, simply stop the idle execution now
if(runner == null) {
return counter;
}
LOG.debug("Generating empty batch for runner: {}", runner.getRunnerId());
pipeContext.getRuntimeStats().incIdleBatchCount();
// Pipe batch to keep the batch info
FullPipeBatch pipeBatch = new FullPipeBatch(null, null, 0, false);
pipeBatch.setIdleBatch(true);
// We're explicitly skipping origin because this is framework generated, empty batch
pipeBatch.skipStage(originPipe);
executeRunner(
runner,
System.currentTimeMillis(),
pipeBatch,
null,
null,
new HashMap<>(),
new HashMap<>()
);
} finally {
if(runner != null) {
runnerPool.returnRunner(runner);
}
}
}
} finally {
destroyLock.unlock();
}
return counter;
} | java | {
"resource": ""
} |
q172419 | ProductionPipelineRunner.createFailureBatch | test | private void createFailureBatch(FullPipeBatch pipeBatch) {
if(!pipelineConfigBean.shouldCreateFailureSnapshot) {
return;
}
try {
for(SnapshotInfo info : snapshotStore.getSummaryForPipeline(pipelineName, revision)) {
// Allow only one failure snapshot to be present on a pipeline
if(info.isFailureSnapshot()) {
LOG.trace("Skipping creation of failure snapshot as {} already exists.", info.getId());
return;
}
}
String snapshotName = "Failure_" + UUID.randomUUID().toString();
String snapshotLabel = "Failure at " + LocalDateTime.now().toString();
snapshotStore.create("", pipelineName, revision, snapshotName, snapshotLabel, true);
snapshotStore.save(pipelineName, revision, snapshotName, -1, ImmutableList.of(pipeBatch.createFailureSnapshot()));
} catch (PipelineException ex) {
LOG.error("Can't serialize failure snapshot", ex);
}
} | java | {
"resource": ""
} |
q172420 | CassandraTarget.recordToBoundStatement | test | @SuppressWarnings("unchecked")
private BoundStatement recordToBoundStatement(Record record) throws StageException {
ImmutableList.Builder<Object> values = new ImmutableList.Builder<>();
SortedSet<String> columnsPresent = Sets.newTreeSet(columnMappings.keySet());
for (Map.Entry<String, String> mapping : columnMappings.entrySet()) {
String columnName = mapping.getKey();
String fieldPath = mapping.getValue();
// If we're missing fields, skip them.
// If a field is present, but null, also remove it from columnsPresent since we can't write nulls.
if (!record.has(fieldPath) || record.get(fieldPath).getValue() == null) {
columnsPresent.remove(columnName);
continue;
}
final Object value = record.get(fieldPath).getValue();
// Special cases for handling SDC Lists and Maps,
// basically unpacking them into raw types.
if (value instanceof List) {
List<Object> unpackedList = new ArrayList<>();
for (Field item : (List<Field>) value) {
unpackedList.add(item.getValue());
}
values.add(unpackedList);
} else if (value instanceof Map) {
Map<Object, Object> unpackedMap = new HashMap<>();
for (Map.Entry<String, Field> entry : ((Map<String, Field>) value).entrySet()) {
unpackedMap.put(entry.getKey(), entry.getValue().getValue());
}
values.add(unpackedMap);
} else {
values.add(value);
}
}
PreparedStatement stmt = statementCache.getUnchecked(columnsPresent);
// .toArray required to pass in a list to a varargs method.
Object[] valuesArray = values.build().toArray();
BoundStatement boundStmt = null;
try {
boundStmt = stmt.bind(valuesArray);
} catch (CodecNotFoundException | InvalidTypeException | NullPointerException e) {
// NPE can occur if one of the values is a collection type with a null value inside it. Thus, it's a record
// error. Note that this runs the risk of mistakenly treating a bug as a record error.
// CodecNotFound is caused when there is no type conversion definition available from the provided type
// to the target type.
errorRecordHandler.onError(
new OnRecordErrorException(
record,
Errors.CASSANDRA_06,
record.getHeader().getSourceId(),
e.toString(),
e
)
);
}
return boundStmt;
} | java | {
"resource": ""
} |
q172421 | Configuration.set | test | public void set(Map<String, String> newConfiguration) {
for(Map.Entry<String, String> entry : newConfiguration.entrySet()) {
if(entry.getValue() == null) {
this.unset(entry.getKey());
} else {
this.set(entry.getKey(), entry.getValue());
}
}
} | java | {
"resource": ""
} |
q172422 | KuduLookupProcessor.generateLookupKey | test | private KuduLookupKey generateLookupKey(final Record record, final String tableName) throws OnRecordErrorException{
Map<String, Field> keyList = new HashMap<>();
for (Map.Entry<String, String> key : columnToField.entrySet()){
String fieldName = key.getValue();
if (!record.has(fieldName)) {
throw new OnRecordErrorException(record, Errors.KUDU_32, fieldName);
}
keyList.put(key.getKey(), record.get(fieldName));
}
return new KuduLookupKey(tableName, keyList);
} | java | {
"resource": ""
} |
q172423 | MultithreadedTableProvider.handlePartitioningTurnedOffOrOn | test | private void handlePartitioningTurnedOffOrOn(
SortedSetMultimap<TableContext, TableRuntimeContext> reconstructedPartitions
) {
for (TableContext tableContext : reconstructedPartitions.keySet()) {
final SortedSet<TableRuntimeContext> partitions = reconstructedPartitions.get(tableContext);
final TableRuntimeContext lastPartition = partitions.last();
final TableContext sourceTableContext = lastPartition.getSourceTableContext();
Utils.checkState(
sourceTableContext.equals(tableContext),
String.format(
"Source table context for %s should match TableContext map key of %s",
lastPartition.getDescription(),
tableContext.getQualifiedName()
)
);
final boolean partitioningTurnedOff = lastPartition.isPartitioned()
&& sourceTableContext.getPartitioningMode() == PartitioningMode.DISABLED;
final boolean partitioningTurnedOn = !lastPartition.isPartitioned()
&& sourceTableContext.isPartitionable()
&& sourceTableContext.getPartitioningMode() != PartitioningMode.DISABLED;
if (!partitioningTurnedOff && !partitioningTurnedOn) {
continue;
}
final Map<String, String> nextStartingOffsets = new HashMap<>();
final Map<String, String> nextMaxOffsets = new HashMap<>();
final int newPartitionSequence = lastPartition.getPartitionSequence() > 0 ? lastPartition.getPartitionSequence() + 1 : 1;
if (partitioningTurnedOff) {
LOG.info(
"Table {} has switched from partitioned to non-partitioned; partition sequence {} will be the last (with" +
" no max offsets)",
sourceTableContext.getQualifiedName(),
newPartitionSequence
);
lastPartition.getStartingPartitionOffsets().forEach(
(col, off) -> {
String basedOnStartOffset = lastPartition.generateNextPartitionOffset(col, off);
nextStartingOffsets.put(col, basedOnStartOffset);
}
);
} else if (partitioningTurnedOn) {
lastPartition.getStartingPartitionOffsets().forEach(
(col, off) -> {
String basedOnStoredOffset = lastPartition.getInitialStoredOffsets().get(col);
nextStartingOffsets.put(col, basedOnStoredOffset);
}
);
nextStartingOffsets.forEach(
(col, off) -> nextMaxOffsets.put(col, lastPartition.generateNextPartitionOffset(col, off))
);
if (!reconstructedPartitions.remove(sourceTableContext, lastPartition)) {
throw new IllegalStateException(String.format(
"Failed to remove partition %s for table %s in switching partitioning from off to on",
lastPartition.getDescription(),
sourceTableContext.getQualifiedName()
));
}
LOG.info(
"Table {} has switched from non-partitioned to partitioned; using last stored offsets as the starting" +
" offsets for the new partition {}",
sourceTableContext.getQualifiedName(),
newPartitionSequence
);
}
final TableRuntimeContext nextPartition = new TableRuntimeContext(
sourceTableContext,
lastPartition.isUsingNonIncrementalLoad(),
(lastPartition.isPartitioned() && !partitioningTurnedOff) || partitioningTurnedOn,
newPartitionSequence,
nextStartingOffsets,
nextMaxOffsets
);
reconstructedPartitions.put(sourceTableContext, nextPartition);
}
} | java | {
"resource": ""
} |
q172424 | LogCharDataParser.readLine | test | int readLine(StringBuilder sb) throws IOException {
int c = reader.read();
int count = (c == -1) ? -1 : 0;
while (c > -1 && !isOverMaxObjectLen(count) && !checkEolAndAdjust(c)) {
count++;
sb.append((char) c);
c = reader.read();
}
if (isOverMaxObjectLen(count)) {
sb.setLength(sb.length() - 1);
while (c > -1 && c != '\n' && c != '\r') {
count++;
c = reader.read();
}
checkEolAndAdjust(c);
}
return count;
} | java | {
"resource": ""
} |
q172425 | UsageTimer.roll | test | public UsageTimer roll() {
int multiplier;
synchronized (this) {
multiplier = getMultiplier();
changeMultiplier(-multiplier); //stopAll;
}
return new UsageTimer().setName(getName()).changeMultiplier(multiplier);
} | java | {
"resource": ""
} |
q172426 | SchAdmin.enableDPM | test | public static void enableDPM(DPMInfoJson dpmInfo, Context context) throws IOException {
Utils.checkNotNull(dpmInfo, "DPMInfo");
String dpmBaseURL = normalizeDpmBaseURL(dpmInfo.getBaseURL());
// Since we support enabling/Disabling DPM, first check if token already exists for the given DPM URL.
// If token exists skip first 3 steps
String currentDPMBaseURL = context.configuration.get(RemoteSSOService.DPM_BASE_URL_CONFIG, "");
String currentAppAuthToken = context.configuration.get(RemoteSSOService.SECURITY_SERVICE_APP_AUTH_TOKEN_CONFIG, "").trim();
if (!currentDPMBaseURL.equals(dpmBaseURL) || currentAppAuthToken.length() == 0) {
// 1. Login to DPM to get user auth token
String userAuthToken = retrieveUserToken(dpmBaseURL, dpmInfo.getUserID(), dpmInfo.getUserPassword());
String appAuthToken = null;
// 2. Create Data Collector application token
Response response = null;
try {
Map<String, Object> newComponentJson = new HashMap<>();
newComponentJson.put("organization", dpmInfo.getOrganization());
newComponentJson.put("componentType", "dc");
newComponentJson.put("numberOfComponents", 1);
newComponentJson.put("active", true);
response = ClientBuilder.newClient()
.target(dpmBaseURL + "/security/rest/v1/organization/" + dpmInfo.getOrganization() + "/components")
.register(new CsrfProtectionFilter("CSRF"))
.request()
.header(SSOConstants.X_USER_AUTH_TOKEN, userAuthToken)
.put(Entity.json(newComponentJson));
if (response.getStatus() != Response.Status.CREATED.getStatusCode()) {
throw new RuntimeException(Utils.format("DPM Create Application Token failed, status code '{}': {}",
response.getStatus(),
response.readEntity(String.class)
));
}
List<Map<String, Object>> newComponent = response.readEntity(new GenericType<List<Map<String,Object>>>() {});
if (newComponent.size() > 0) {
appAuthToken = (String) newComponent.get(0).get("fullAuthToken");
} else {
throw new RuntimeException("DPM Create Application Token failed: No token data from DPM Server.");
}
} finally {
if (response != null) {
response.close();
}
// Logout from DPM
logout(dpmBaseURL, userAuthToken);
}
// 3. Update App Token file
updateTokenFile(context, appAuthToken);
}
// 4. Update dpm.properties file
updateDpmProperties(context, dpmBaseURL, dpmInfo.getLabels(), true);
} | java | {
"resource": ""
} |
q172427 | SchAdmin.disableDPM | test | public static void disableDPM(String username, String password, String organizationId, Context context) throws IOException {
String dpmBaseURL = normalizeDpmBaseURL(context.configuration.get(RemoteSSOService.DPM_BASE_URL_CONFIG, ""));
String userToken = retrieveUserToken(dpmBaseURL, username, password);
try {
disableDPM(userToken, organizationId, context);
} finally {
logout(dpmBaseURL, userToken);
}
} | java | {
"resource": ""
} |
q172428 | SchAdmin.normalizeDpmBaseURL | test | private static String normalizeDpmBaseURL(String url) {
if (url.endsWith("/")) {
url = url.substring(0, url.length() - 1);
}
return url;
} | java | {
"resource": ""
} |
q172429 | SchAdmin.retrieveUserToken | test | private static String retrieveUserToken(String url, String username, String password) {
Response response = null;
try {
Map<String, String> loginJson = new HashMap<>();
loginJson.put("userName", username);
loginJson.put("password", password);
response = ClientBuilder.newClient()
.target(url + "/security/public-rest/v1/authentication/login")
.register(new CsrfProtectionFilter("CSRF"))
.request()
.post(Entity.json(loginJson));
if (response.getStatus() != Response.Status.OK.getStatusCode()) {
throw new RuntimeException(Utils.format("DPM Login failed, status code '{}': {}",
response.getStatus(),
response.readEntity(String.class)
));
}
} finally {
if (response != null) {
response.close();
}
}
return response.getHeaderString(SSOConstants.X_USER_AUTH_TOKEN);
} | java | {
"resource": ""
} |
q172430 | SchAdmin.logout | test | private static void logout(String dpmBaseURL, String userAuthToken) {
Response response = null;
try {
response = ClientBuilder.newClient()
.target(dpmBaseURL + "/security/_logout")
.register(new CsrfProtectionFilter("CSRF"))
.request()
.header(SSOConstants.X_USER_AUTH_TOKEN, userAuthToken)
.cookie(SSOConstants.AUTHENTICATION_COOKIE_PREFIX + "LOGIN", userAuthToken)
.get();
} finally {
if (response != null) {
response.close();
}
}
} | java | {
"resource": ""
} |
q172431 | SchAdmin.updateTokenFile | test | private static void updateTokenFile(Context context, String appAuthToken) throws IOException {
File tokenFile = context.tokenFilePath == null ? new File(context.runtimeInfo.getConfigDir(), APP_TOKEN_FILE) : new File(context.tokenFilePath);
DataStore dataStore = new DataStore(tokenFile);
try (OutputStream os = dataStore.getOutputStream()) {
IOUtils.write(appAuthToken, os);
dataStore.commit(os);
} finally {
dataStore.release();
}
} | java | {
"resource": ""
} |
q172432 | SchAdmin.updateDpmProperties | test | private static void updateDpmProperties(Context context, String dpmBaseURL, List<String> labels, boolean enableSch) {
if(context.skipUpdatingDpmProperties) {
return;
}
try {
FileBasedConfigurationBuilder<PropertiesConfiguration> builder =
new FileBasedConfigurationBuilder<>(PropertiesConfiguration.class)
.configure(new Parameters().properties()
.setFileName(context.runtimeInfo.getConfigDir() + "/dpm.properties")
.setThrowExceptionOnMissing(true)
.setListDelimiterHandler(new DefaultListDelimiterHandler(';'))
.setIncludesAllowed(false));
PropertiesConfiguration config = null;
config = builder.getConfiguration();
config.setProperty(RemoteSSOService.DPM_ENABLED, Boolean.toString(enableSch));
config.setProperty(RemoteSSOService.DPM_BASE_URL_CONFIG, dpmBaseURL);
config.setProperty(RemoteSSOService.SECURITY_SERVICE_APP_AUTH_TOKEN_CONFIG, APP_TOKEN_FILE_PROP_VAL);
if (labels != null && labels.size() > 0) {
config.setProperty(RemoteEventHandlerTask.REMOTE_JOB_LABELS, StringUtils.join(labels, ','));
} else {
config.setProperty(RemoteEventHandlerTask.REMOTE_JOB_LABELS, "");
}
builder.save();
} catch (ConfigurationException e) {
throw new RuntimeException(Utils.format("Updating dpm.properties file failed: {}", e.getMessage()), e);
}
} | java | {
"resource": ""
} |
q172433 | AvroSchemaGenerator.schemaFieldForType | test | private Schema.Field schemaFieldForType(
String fieldPath,
Record record,
String fieldName,
Field field
) throws OnRecordErrorException {
Schema simpleSchema = simpleSchemaForType(fieldPath, record, field);
Schema finalSchema = simpleSchema;
// If Nullable check box was selected, wrap the whole schema in union with null
if(getConfig().avroNullableFields) {
finalSchema = Schema.createUnion(ImmutableList.of(
Schema.create(Schema.Type.NULL),
simpleSchema
));
}
return new Schema.Field(
fieldName,
finalSchema,
null,
getDefaultValue(simpleSchema)
);
} | java | {
"resource": ""
} |
q172434 | AvroSchemaGenerator.complexSchemaForType | test | private Schema complexSchemaForType(
String fieldPath,
Record record,
Field field
) throws OnRecordErrorException {
Schema simpleSchema = simpleSchemaForType(fieldPath, record, field);
Schema finalSchema = simpleSchema;
if(getConfig().avroNullableFields) {
finalSchema = Schema.createUnion(ImmutableList.of(
Schema.create(Schema.Type.NULL),
simpleSchema
));
}
JsonNode defaultValue = getDefaultValue(simpleSchema);
if(defaultValue != null) {
finalSchema.addProp("defaultValue", defaultValue);
}
return finalSchema;
} | java | {
"resource": ""
} |
q172435 | AvroSchemaGenerator.getDecimalScaleOrPrecision | test | private int getDecimalScaleOrPrecision(
Record record,
Field field,
String attributeName,
int defaultValue,
int minAllowed
) throws OnRecordErrorException {
int finalValue = -1; // Invalid value
// Firstly try the field attribute
String stringValue = field.getAttribute(attributeName);
if(!StringUtils.isEmpty(stringValue)) {
finalValue = Integer.valueOf(stringValue);
}
// If it's invalid, then use the default value
if(finalValue < minAllowed) {
finalValue = defaultValue;
}
// If even the default value is invalid, then send the record to error
if(finalValue < minAllowed) {
throw new OnRecordErrorException(record, Errors.SCHEMA_GEN_0004, finalValue, field);
}
return finalValue;
} | java | {
"resource": ""
} |
q172436 | AvroSchemaGenerator.getDefaultValue | test | private JsonNode getDefaultValue(Schema schema) {
if(getConfig().avroNullableFields && getConfig().avroDefaultNullable) {
return NullNode.getInstance();
}
if(!getConfig().avroNullableFields && defaultValuesForTypes.containsKey(schema.getType())) {
return defaultValuesForTypes.get(schema.getType());
}
return null;
} | java | {
"resource": ""
} |
q172437 | FileRefUtil.initMetricsIfNeeded | test | @SuppressWarnings("unchecked")
public static synchronized void initMetricsIfNeeded(ProtoConfigurableEntity.Context context) {
Gauge<Map<String, Object>> gauge = context.getGauge(fileStatisticGaugeName(context));
if(gauge == null) {
gauge = context.createGauge(fileStatisticGaugeName(context), Comparator.comparing(GAUGE_MAP_ORDERING::get));
Map<String, Object> gaugeStatistics = gauge.getValue();
//File name is populated at the MetricEnabledWrapperStream.
gaugeStatistics.put(FileRefUtil.FILE, "");
gaugeStatistics.put(FileRefUtil.TRANSFER_THROUGHPUT, 0L);
gaugeStatistics.put(FileRefUtil.SENT_BYTES, String.format(FileRefUtil.BRACKETED_TEMPLATE, 0, 0));
gaugeStatistics.put(FileRefUtil.REMAINING_BYTES, 0L);
gaugeStatistics.put(FileRefUtil.COMPLETED_FILE_COUNT, 0L);
}
Meter dataTransferMeter = context.getMeter(FileRefUtil.TRANSFER_THROUGHPUT_METER);
if (dataTransferMeter == null) {
context.createMeter(FileRefUtil.TRANSFER_THROUGHPUT_METER);
}
} | java | {
"resource": ""
} |
q172438 | LocalFileSystem.compare | test | public int compare(WrappedFile path1, WrappedFile path2, boolean useLastModified) {
// why not just check if the file exists? Well, there is a possibility file gets moved/archived/deleted right after
// that check. In that case we will still fail. So fail, and recover.
try {
if (useLastModified && !exists(path2)) {
return 1;
}
return getComparator(useLastModified).compare(path1, path2);
} catch (RuntimeException ex) {
Throwable cause = ex.getCause();
// Happens only in timestamp ordering.
// Very unlikely this will happen, new file has to be added to the queue at the exact time when
// the currentFile was consumed and archived while a new file has not yet been picked up for processing.
// Ignore - we just add the new file, since this means this file is indeed newer
// (else this would have been consumed and archived first)
if (cause != null && cause instanceof NoSuchFileException) {
LOG.debug("Starting file may have already been archived.", cause);
return 1;
}
LOG.warn("Error while comparing files", ex);
throw ex;
}
} | java | {
"resource": ""
} |
q172439 | Pattern.indexOf | test | public int indexOf(String groupName, int index) {
int idx = -1;
if (groupInfo.containsKey(groupName)) {
List<GroupInfo> list = groupInfo.get(groupName);
idx = list.get(index).groupIndex();
}
return idx;
} | java | {
"resource": ""
} |
q172440 | Pattern.groupNames | test | public List<String> groupNames() {
if (groupNames == null) {
groupNames = new ArrayList<String>(groupInfo.keySet());
}
return groupNames;
} | java | {
"resource": ""
} |
q172441 | Pattern.isEscapedChar | test | static private boolean isEscapedChar(String s, int pos) {
return isSlashEscapedChar(s, pos) || isQuoteEscapedChar(s, pos);
} | java | {
"resource": ""
} |
q172442 | Pattern.isSlashEscapedChar | test | static private boolean isSlashEscapedChar(String s, int pos) {
// Count the backslashes preceding this position. If it's
// even, there is no escape and the slashes are just literals.
// If it's odd, one of the slashes (the last one) is escaping
// the character at the given position.
int numSlashes = 0;
while (pos > 0 && (s.charAt(pos - 1) == '\\')) {
pos--;
numSlashes++;
}
return numSlashes % 2 != 0;
} | java | {
"resource": ""
} |
q172443 | Pattern.isInsideCharClass | test | static private boolean isInsideCharClass(String s, int pos) {
boolean openBracketFound = false;
boolean closeBracketFound = false;
// find last non-escaped open-bracket
String s2 = s.substring(0, pos);
int posOpen = pos;
while ((posOpen = s2.lastIndexOf('[', posOpen - 1)) != -1) {
if (!isEscapedChar(s2, posOpen)) {
openBracketFound = true;
break;
}
}
if (openBracketFound) {
// search remainder of string (after open-bracket) for a close-bracket
String s3 = s.substring(posOpen, pos);
int posClose = -1;
while ((posClose = s3.indexOf(']', posClose + 1)) != -1) {
if (!isEscapedChar(s3, posClose)) {
closeBracketFound = true;
break;
}
}
}
return openBracketFound && !closeBracketFound;
} | java | {
"resource": ""
} |
q172444 | Pattern.countOpenParens | test | static private int countOpenParens(String s, int pos) {
java.util.regex.Pattern p = java.util.regex.Pattern.compile("\\(");
java.util.regex.Matcher m = p.matcher(s.subSequence(0, pos));
int numParens = 0;
while (m.find()) {
// ignore parentheses inside character classes: [0-9()a-f]
// which are just literals
if (isInsideCharClass(s, m.start())) {
continue;
}
// ignore escaped parens
if (isEscapedChar(s, m.start())) continue;
if (!isNoncapturingParen(s, m.start())) {
numParens++;
}
}
return numParens;
} | java | {
"resource": ""
} |
q172445 | Pattern.extractGroupInfo | test | static public Map<String,List<GroupInfo> > extractGroupInfo(String namedPattern) {
Map<String,List<GroupInfo> > groupInfo = new LinkedHashMap<String,List<GroupInfo> >();
java.util.regex.Matcher matcher = NAMED_GROUP_PATTERN.matcher(namedPattern);
while(matcher.find()) {
int pos = matcher.start();
// ignore escaped paren
if (isEscapedChar(namedPattern, pos)) continue;
String name = matcher.group(INDEX_GROUP_NAME);
int groupIndex = countOpenParens(namedPattern, pos);
List<GroupInfo> list;
if (groupInfo.containsKey(name)) {
list = groupInfo.get(name);
} else {
list = new ArrayList<GroupInfo>();
}
list.add(new GroupInfo(groupIndex, pos));
groupInfo.put(name, list);
}
return groupInfo;
} | java | {
"resource": ""
} |
q172446 | Pattern.replace | test | static private StringBuilder replace(StringBuilder input, java.util.regex.Pattern pattern, String replacement) {
java.util.regex.Matcher m = pattern.matcher(input);
while (m.find()) {
if (isEscapedChar(input.toString(), m.start())) {
continue;
}
// since we're replacing the original string being matched,
// we have to reset the matcher so that it searches the new
// string
input.replace(m.start(), m.end(), replacement);
m.reset(input);
}
return input;
} | java | {
"resource": ""
} |
q172447 | JMXJsonServlet.doGet | test | @Override
public void doGet(HttpServletRequest request, HttpServletResponse response) {
try {
JsonGenerator jg = null;
String jsonpcb = null;
PrintWriter writer = null;
try {
writer = response.getWriter();
// "callback" parameter implies JSONP outpout
jsonpcb = request.getParameter(CALLBACK_PARAM);
if (jsonpcb != null) {
response.setContentType("application/javascript; charset=utf8");
writer.write(jsonpcb + "(");
} else {
response.setContentType("application/json; charset=utf8");
}
jg = jsonFactory.createGenerator(writer);
jg.disable(JsonGenerator.Feature.AUTO_CLOSE_TARGET);
jg.useDefaultPrettyPrinter();
jg.writeStartObject();
// query per mbean attribute
String getmethod = request.getParameter("get");
if (getmethod != null) {
String[] splitStrings = getmethod.split("\\:\\:");
if (splitStrings.length != 2) {
jg.writeStringField("result", "ERROR");
jg.writeStringField("message", "query format is not as expected.");
jg.flush();
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
return;
}
listBeans(jg, new ObjectName(splitStrings[0]), splitStrings[1],
response);
return;
}
// query per mbean
String qry = request.getParameter("qry");
if (qry == null) {
qry = "*:*";
}
listBeans(jg, new ObjectName(qry), null, response);
} finally {
if (jg != null) {
jg.close();
}
if (jsonpcb != null) {
writer.write(");");
}
if (writer != null) {
writer.close();
}
}
} catch (IOException e) {
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
} catch (MalformedObjectNameException e) {
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
}
} | java | {
"resource": ""
} |
q172448 | OracleCDCSource.refreshSchema | test | private boolean refreshSchema(BigDecimal scnDecimal, SchemaAndTable schemaAndTable) throws SQLException {
try {
if (!tableSchemaLastUpdate.containsKey(schemaAndTable) || scnDecimal.compareTo(tableSchemaLastUpdate.get(schemaAndTable)) > 0) {
if (containerized) {
try (Statement switchToPdb = connection.createStatement()) {
switchToPdb.execute("ALTER SESSION SET CONTAINER = " + configBean.pdb);
}
}
tableSchemas.put(schemaAndTable, getTableSchema(schemaAndTable));
tableSchemaLastUpdate.put(schemaAndTable, scnDecimal);
return true;
}
return false;
} finally {
alterSession();
}
} | java | {
"resource": ""
} |
q172449 | OracleCDCSource.expired | test | private boolean expired(Map.Entry<TransactionIdKey, HashQueue<RecordSequence>> entry, LocalDateTime startTime) {
return startTime != null && // Can be null if starting from SCN and first batch is not complete yet.
entry.getKey().txnStartTime.isBefore(startTime.minusSeconds(configBean.txnWindow)) &&
entry.getValue().peek().seq == 1;
} | java | {
"resource": ""
} |
q172450 | SingleLineLiveFileReader.getOffset | test | @Override
public long getOffset() {
Utils.checkState(open, Utils.formatL("LiveFileReder for '{}' is not open", currentFile));
return (truncateMode) ? -offset : offset;
} | java | {
"resource": ""
} |
q172451 | SingleLineLiveFileReader.fastForward | test | private boolean fastForward() throws IOException {
try {
boolean stillTruncate;
buffer.clear();
if (channel.read(buffer) > -1 || isEof()) {
//set the buffer into read from mode
buffer.flip();
//we have data, lets look for the first EOL in it.
int firstEolIdx = findEndOfFirstLine(buffer);
if (firstEolIdx > -1) {
// set position to position after first EOL
buffer.position(firstEolIdx + 1);
// set the buffer back into write into mode keeping data after first EOL
buffer.compact();
stillTruncate = false;
offset = channel.position() - buffer.position();
} else {
// no EOL yet
// whatever was read will be discarded on next next() call
stillTruncate = true;
offset = channel.position();
}
} else {
// no data read
// whatever was read will be discarded on next next() call
stillTruncate = true;
offset = channel.position();
}
return stillTruncate;
} catch (IOException ex) {
closeChannel();
throw ex;
}
} | java | {
"resource": ""
} |
q172452 | PipelineStoreResource.createDetachedStageEnvelope | test | @Path("/detachedstage")
@GET
@ApiOperation(value = "Returns empty envelope for detached stage.",
response = DetachedStageConfigurationJson.class,
authorizations = @Authorization(value = "basic")
)
@Produces(MediaType.APPLICATION_JSON)
@RolesAllowed({
AuthzRole.CREATOR, AuthzRole.ADMIN, AuthzRole.CREATOR_REMOTE, AuthzRole.ADMIN_REMOTE
})
public Response createDetachedStageEnvelope() throws PipelineException {
DetachedStageConfigurationJson detachedStage = new DetachedStageConfigurationJson(new DetachedStageConfiguration());
return Response.ok().entity(detachedStage).build();
} | java | {
"resource": ""
} |
q172453 | MathEL.convertStringToAppropriateNumber | test | private static Object convertStringToAppropriateNumber(String value) {
if(value.contains(".")) {
return Double.valueOf(value);
} else {
return Long.valueOf(value);
}
} | java | {
"resource": ""
} |
q172454 | CollectdParser.parsePart | test | private int parsePart(int startOffset, ByteBuf buf, Map<String, Field> fields) throws OnRecordErrorException {
int offset = startOffset;
int type = buf.getUnsignedShort(offset); // 0-1
offset += 2;
final int length = buf.getUnsignedShort(offset); // 2-3
offset += 2;
switch (type) {
case HOST:
case PLUGIN:
case PLUGIN_INSTANCE:
case TYPE:
case TYPE_INSTANCE:
case MESSAGE:
pruneFields(type);
fields.put(PART_TYPES.get(type), Field.create(parseString(offset, length, buf)));
offset += length - 4;
break;
case TIME_HIRES:
case INTERVAL_HIRES:
if (type != INTERVAL_HIRES || !excludeInterval) {
long value = parseNumeric(offset, buf);
if (convertTime) {
value *= (Math.pow(2, -30) * 1000);
type = type == TIME_HIRES ? TIME : INTERVAL;
}
fields.put(PART_TYPES.get(type), Field.create(value));
}
offset += 8;
break;
case TIME:
case INTERVAL:
case SEVERITY:
if (type != INTERVAL || !excludeInterval) {
fields.put(PART_TYPES.get(type), Field.create(parseNumeric(offset, buf)));
}
offset += 8;
break;
case VALUES:
offset = parseValues(offset, buf);
startNewRecord();
break;
case SIGNATURE:
if (!verifySignature(offset, length, buf)) {
throw new OnRecordErrorException(Errors.COLLECTD_02);
}
offset += length - 4;
break;
case ENCRYPTION:
String user = parseUser(offset, buf);
offset += (2 + user.length());
byte[] iv = parseIv(offset, buf);
offset += 16;
decrypt(offset, length, buf, user, iv);
// Skip the checksum and continue processing.
offset += 20;
break;
default:
// Don't recognize this part type, so skip it
LOG.warn("Unrecognized part type: {}", type);
offset += length - 4;
break;
}
return offset;
} | java | {
"resource": ""
} |
q172455 | CollectdParser.parseValues | test | private int parseValues(int startOffset, ByteBuf buf) throws OnRecordErrorException {
int offset = startOffset;
// N Values
// For each Value:
// 1 byte data type code
int numValues = buf.getUnsignedShort(offset); // 4-5
offset += 2;
List<Byte> types = new ArrayList<>(numValues);
while (numValues-- > 0) {
types.add(buf.getByte(offset));
offset += 1;
}
for (int i = 0; i < types.size(); i++) {
Byte type = types.get(i);
String label = getValueLabel(i, type);
switch (type) {
case COUNTER:
fields.put(label, Field.create(buf.getUnsignedInt(offset)));
offset += 8;
break;
case GAUGE:
fields.put(
label,
Field.create(buf.order(ByteOrder.LITTLE_ENDIAN).getDouble(offset))
);
offset += 8;
break;
case DERIVE:
fields.put(label, Field.create(buf.getLong(offset)));
offset += 8;
break;
case ABSOLUTE:
fields.put(label, Field.create(buf.getUnsignedInt(offset)));
offset += 8;
break;
default:
// error
throw new OnRecordErrorException(Errors.COLLECTD_01, type);
}
}
return offset;
} | java | {
"resource": ""
} |
q172456 | JdbcBaseRecordWriter.lookupPrimaryKeys | test | void lookupPrimaryKeys() throws StageException {
Connection connection = null;
try {
connection = dataSource.getConnection();
primaryKeyColumns = jdbcUtil.getPrimaryKeys(connection, schema, tableName);
} catch (SQLException e) {
String formattedError = jdbcUtil.formatSqlException(e);
LOG.error(formattedError, e);
throw new StageException(JdbcErrors.JDBC_17, tableName, formattedError);
} finally {
if (connection != null) {
try {
connection.close();
} catch (SQLException e) {
String formattedError = jdbcUtil.formatSqlException(e);
LOG.error(formattedError, e);
}
}
}
} | java | {
"resource": ""
} |
q172457 | JdbcBaseRecordWriter.createCustomFieldMappings | test | private void createCustomFieldMappings() {
for (JdbcFieldColumnParamMapping mapping : customMappings) {
LOG.debug("Custom mapping field {} to column {}", mapping.field, mapping.columnName);
if (columnsToFields.containsKey(mapping.columnName)) {
LOG.debug("Mapping field {} to column {}", mapping.field, mapping.columnName);
columnsToFields.put(mapping.columnName, mapping.field);
columnsToParameters.put(mapping.columnName, mapping.paramValue);
}
}
} | java | {
"resource": ""
} |
q172458 | JdbcBaseRecordWriter.getSQLTypeName | test | static String getSQLTypeName(Field.Type type) throws OnRecordErrorException {
switch (type) {
case BOOLEAN:
return "BOOLEAN";
case CHAR:
return "CHAR";
case BYTE:
return "BINARY";
case SHORT:
return "SMALLINT";
case INTEGER:
return "INTEGER";
case LONG:
return "BIGINT";
case FLOAT:
return "FLOAT";
case DOUBLE:
return "DOUBLE";
case DATE:
return "DATE";
case TIME:
return "TIME";
case DATETIME:
return "TIMESTAMP";
case DECIMAL:
return "DECIMAL";
case STRING:
return "VARCHAR";
case BYTE_ARRAY:
return "VARBINARY";
case LIST_MAP:
case MAP:
throw new OnRecordErrorException(JdbcErrors.JDBC_05, "Unsupported list or map type: MAP");
case LIST:
return "ARRAY";
default:
throw new OnRecordErrorException(JdbcErrors.JDBC_05, "Unsupported type: " + type.name());
}
} | java | {
"resource": ""
} |
q172459 | JdbcBaseRecordWriter.getTableName | test | protected String getTableName() {
if (!Strings.isNullOrEmpty(schema)) {
if (caseSensitive) {
return "\"" + schema + "\"." + "\"" + tableName + "\"";
} else {
return schema + "." + tableName;
}
}
if (caseSensitive) {
return "\"" + tableName + "\"";
}
return tableName;
} | java | {
"resource": ""
} |
q172460 | JdbcBaseRecordWriter.setPrimaryKeys | test | int setPrimaryKeys(int index, final Record record, PreparedStatement statement, int opCode)
throws OnRecordErrorException {
for (String key : getPrimaryKeyColumns()) {
Field field = record.get(recordReader.getFieldPath(key, getColumnsToFields(), opCode));
if(field == null){
LOG.error("Primary key {} is missing in record", key);
throw new OnRecordErrorException(record, JdbcErrors.JDBC_19, key);
}
Object value = field.getValue();
try {
statement.setObject(index, value, getColumnType(key));
} catch (SQLException ex){
LOG.error("SQLException thrown: {}", ex.getMessage());
throw new OnRecordErrorException(record, JdbcErrors.JDBC_19, key, ex);
}
++index;
}
return index;
} | java | {
"resource": ""
} |
q172461 | JdbcBaseRecordWriter.handleSqlException | test | void handleSqlException(SQLException e) throws StageException {
String formattedError = jdbcUtil.formatSqlException(e);
LOG.error(formattedError, e);
throw new StageException(JdbcErrors.JDBC_14, e.getSQLState(), e.getErrorCode(), e.getMessage(), formattedError, e);
} | java | {
"resource": ""
} |
q172462 | SourcePipe.process | test | public void process(
Map<String, String> offsets,
int batchSize,
ReportErrorDelegate reportErrorDelegate
) throws StageException, PipelineRuntimeException {
this.reportErrorDelegate = reportErrorDelegate;
getStage().setReportErrorDelegate(this);
try {
MDC.put(LogConstants.STAGE, getStage().getInfo().getInstanceName());
getStage().execute(offsets, batchSize);
} finally {
MDC.put(LogConstants.STAGE, "");
}
} | java | {
"resource": ""
} |
q172463 | SourcePipe.prepareBatchContext | test | public void prepareBatchContext(BatchContextImpl batchContext) {
PipeBatch pipeBatch = batchContext.getPipeBatch();
// Start stage in the pipe batch and persist reference to batch maker in the batch context
BatchMakerImpl batchMaker = pipeBatch.startStage(this);
batchContext.setBatchMaker(batchMaker);
batchContext.setOriginStageName(
getStage().getInfo().getInstanceName(),
getStage().getInfo().getLabel()
);
} | java | {
"resource": ""
} |
q172464 | SourcePipe.finishBatchContext | test | public Map<String, Object> finishBatchContext(BatchContextImpl batchContext) throws StageException {
return finishBatchAndCalculateMetrics(
batchContext.getStartTime(),
batchContext.getPipeBatch(),
(BatchMakerImpl) batchContext.getBatchMaker(),
batchContext.getPipeBatch().getBatch(this),
batchContext.getPipeBatch().getErrorSink(),
batchContext.getPipeBatch().getEventSink(),
null
);
} | java | {
"resource": ""
} |
q172465 | FieldFlattenerProcessor.flattenEntireRecord | test | private Map<String, Field> flattenEntireRecord(Field rootField) {
Map<String, Field> ret = new LinkedHashMap<>();
switch (rootField.getType()) {
case MAP:
case LIST_MAP:
flattenMap("", rootField.getValueAsMap(), ret);
break;
case LIST:
flattenList("", rootField.getValueAsList(), ret);
break;
default:
break;
}
return ret;
} | java | {
"resource": ""
} |
q172466 | DetachedStageRuntime.create | test | public static<T> DetachedStageRuntime<? extends T> create(
StageBean bean,
Stage.Info info,
Stage.Context context,
Class<T> klass
) {
switch (bean.getDefinition().getType()) {
case PROCESSOR:
return new DetachedStageRuntime.DetachedProcessor(bean, info, context);
case TARGET:
case EXECUTOR:
return new DetachedStageRuntime.DetachedTarget(bean, info, context);
default:
throw new RuntimeException("Unsupported stage type: " + bean.getDefinition().getType());
}
} | java | {
"resource": ""
} |
q172467 | SampleExecutor.execute | test | private void execute(Record record) throws OnRecordErrorException {
// This is a contrived example, normally you may be performing an operation that could throw
// an exception or produce an error condition. In that case you can throw an OnRecordErrorException
// to send this record to the error pipeline with some details.
if (!record.has("/someField")) {
throw new OnRecordErrorException(Errors.SAMPLE_01, record, "exception detail message.");
}
// TODO: execute action
} | java | {
"resource": ""
} |
q172468 | JdbcMySqlBinLogRecordReader.getColumnsToParameters | test | @Override
public SortedMap<String, String> getColumnsToParameters(
final Record record, int op,
Map<String, String> parameters,
Map<String, String> columnsToFields)
{
SortedMap<String, String> columnsToParameters = new TreeMap<>();
for (Map.Entry<String, String> entry : columnsToFields.entrySet()) {
String columnName = entry.getKey();
String fieldPath = entry.getValue();
if(op == OperationType.DELETE_CODE){
fieldPath = fieldPath.replace(DATA_FIELD, OLD_DATA_FIELD);
}
if (record.has(fieldPath)) {
columnsToParameters.put(columnName, parameters.get(columnName));
}
}
return columnsToParameters;
} | java | {
"resource": ""
} |
q172469 | OffsetQueryUtil.getConditionForPartitionColumn | test | private static String getConditionForPartitionColumn(
String partitionColumn,
OffsetComparison comparison,
List<String> preconditions,
String quoteChar
) {
String conditionTemplate = comparison.getQueryCondition();
List<String> finalConditions = new ArrayList<>(preconditions);
finalConditions.add(
String.format(
conditionTemplate,
String.format(QUOTED_NAME, quoteChar, partitionColumn, quoteChar),
PREPARED_STATEMENT_POSITIONAL_PARAMETER
)
);
return AND_JOINER.join(finalConditions);
} | java | {
"resource": ""
} |
q172470 | OffsetQueryUtil.validateStoredAndSpecifiedOffset | test | public static Map<String, String> validateStoredAndSpecifiedOffset(TableContext tableContext, String offset) throws StageException {
Set<String> expectedColumns = Sets.newHashSet(tableContext.getOffsetColumns());
final Map<String, String> actualOffsets = getColumnsToOffsetMapFromOffsetFormat(offset);
// only perform the actual validation below if there ARE stored offsets
if (actualOffsets.size() == 0) {
return actualOffsets;
}
Set<String> actualColumns = actualOffsets.keySet();
Set<String> expectedSetDifference = Sets.difference(expectedColumns, actualColumns);
Set<String> actualSetDifference = Sets.difference(actualColumns, expectedColumns);
if (expectedSetDifference.size() > 0 || actualSetDifference.size() > 0) {
throw new StageException(
JdbcErrors.JDBC_71,
tableContext.getQualifiedName(),
COMMA_SPACE_JOINER.join(actualColumns),
COMMA_SPACE_JOINER.join(expectedColumns)
);
}
return actualOffsets;
} | java | {
"resource": ""
} |
q172471 | SupportBundleManager.getRequestedDefinitions | test | private List<BundleContentGeneratorDefinition> getRequestedDefinitions(List<String> generators) {
Stream<BundleContentGeneratorDefinition> stream = definitions.stream();
if(generators == null || generators.isEmpty()) {
// Filter out default generators
stream = stream.filter(BundleContentGeneratorDefinition::isEnabledByDefault);
} else {
stream = stream.filter(def -> generators.contains(def.getId()));
}
return stream
.sorted(Comparator.comparingInt(BundleContentGeneratorDefinition::getOrder))
.collect(Collectors.toList());
} | java | {
"resource": ""
} |
q172472 | YarnAppLauncher.getNonEmptyArgs | test | private String[] getNonEmptyArgs(List<String> appArgs) {
List<String> nonEmpty = new ArrayList<>();
appArgs.forEach((String val) -> {
if (!StringUtils.isEmpty(val)) {
nonEmpty.add(val);
}
});
return nonEmpty.toArray(new String[nonEmpty.size()]);
} | java | {
"resource": ""
} |
q172473 | KuduTarget.getOperation | test | protected Operation getOperation(KuduTable table, int op) throws UnsupportedOperationException {
Operation operation = null;
switch (op) {
case OperationType.INSERT_CODE:
operation = table.newInsert();
break;
case OperationType.UPSERT_CODE:
operation = table.newUpsert();
break;
case OperationType.UPDATE_CODE:
operation = table.newUpdate();
break;
case OperationType.DELETE_CODE:
operation = table.newDelete();
break;
default:
LOG.error("Operation {} not supported", op);
throw new UnsupportedOperationException(String.format("Unsupported Operation: %s", op));
}
return operation;
} | java | {
"resource": ""
} |
q172474 | MetricEnabledWrapperStream.convertBytesToDisplayFormat | test | static String convertBytesToDisplayFormat(double bytes) {
int unitIdx = 0;
double unitChangedBytes = bytes;
while (unitIdx < UNITS.length - 1 && Math.floor(unitChangedBytes / 1024) > 0) {
unitChangedBytes = unitChangedBytes / 1024;
unitIdx++;
}
return df.format(unitChangedBytes) + " " + UNITS[unitIdx];
} | java | {
"resource": ""
} |
q172475 | LiveDirectoryScanner.scan | test | public LiveFile scan(LiveFile current) throws IOException {
try {
return scanInternal(current);
} catch (NoSuchFileException ex) {
// this could happen because there has been a file rotation/deletion after the search/filter/sort and before
// the creation of the nen current. Lets sleep for 50ms and try again, if fails again give up.
ThreadUtil.sleep(50);
return scanInternal(current);
}
} | java | {
"resource": ""
} |
q172476 | LiveDirectoryScanner.getPendingFiles | test | public long getPendingFiles(LiveFile current) throws IOException{
//Current will not be acceptable for roll files (if active file is without a counter/date pattern)
//and will be later renamed to a file with counter/date suffix, if that is the case we should
//return 0 as number of pending files
if (current == null || rollMode.isCurrentAcceptable(current.getPath().getFileName().toString())) {
return findToBeProcessedMatchingFiles(current!=null? current.refresh() : null).size();
}
return 0;
} | java | {
"resource": ""
} |
q172477 | CollisionWhitelist.isWhitelisted | test | public static boolean isWhitelisted(
String name,
Properties specificWhitelist,
Map<String, List<Dependency>> dependencies
) {
if(specificWhitelist != null && specificWhitelist.containsKey(name)) {
return versionsMatch(specificWhitelist.getProperty(name), dependencies.keySet());
}
// Otherwise try hardcoded rules:
WhitelistRule rule = WHITELIST_RULES.get(name);
return rule != null && rule.isWhitelisted(dependencies);
} | java | {
"resource": ""
} |
q172478 | CollisionWhitelist.versionsMatch | test | private static boolean versionsMatch(String expectedVersions, Set<String> versions) {
Set<String> expectedSet = Sets.newHashSet(expectedVersions.split(","));
return Sets.symmetricDifference(expectedSet, versions).isEmpty();
} | java | {
"resource": ""
} |
q172479 | BootstrapClusterStreaming.main | test | public static void main(String[] args) throws Exception {
SparkStreamingBinding binding = null;
try {
binding = SparkStreamingBindingFactory.build(BootstrapCluster.getProperties());
binding.init();
BootstrapCluster.createTransformers(binding.getStreamingContext().sparkContext(), binding.getSparkSession());
binding.startContext();
binding.awaitTermination();
} catch (Throwable error) {
String msg = "Error trying to invoke BootstrapClusterStreaming.main: " + error;
System.err.println(new Date()+ ": " + msg);
error.printStackTrace(System.err); // required as in local mode the following seems to be lost
LOG.error(msg, error);
throw new IllegalStateException(msg, error);
} finally {
try {
if (binding != null) {
binding.close();
}
} catch (Exception ex) {
LOG.warn("Error on binding close: " + ex, ex);
}
}
} | java | {
"resource": ""
} |
q172480 | PipelineConfigurationValidator.propagateRuntimeConfiguration | test | private void propagateRuntimeConfiguration() {
// If pipeline wasn't loaded or there if there are no stages, there is nothing to propagate
if(pipelineBean == null || pipelineBean.getPipelineStageBeans() == null) {
return;
}
for(StageBean stageBean : pipelineBean.getPipelineStageBeans().getStages()) {
for(ServiceDependencyDefinition serviceDependency: stageBean.getDefinition().getServices()) {
ServiceBean stageService = stageBean.getService(serviceDependency.getService());
if (stageService == null){
continue;
}
ServiceConfiguration serviceConfiguration = stageService.getConf();
List<Config> configs = serviceConfiguration.getConfiguration();
// Simply remove all RUNTIME configs
configs.removeAll(
serviceDependency.getConfiguration().keySet().stream()
.map(serviceConfiguration::getConfig)
.collect(Collectors.toList())
);
// And insert them with the stage-instance-constant values
serviceDependency.getConfiguration().forEach((key, value) -> configs.add(new Config(key, value)));
// And overwrite the new state
serviceConfiguration.setConfig(configs);
}
}
} | java | {
"resource": ""
} |
q172481 | OmnitureSource.validateReportDescription | test | private void validateReportDescription(List<ConfigIssue> issues){
if(!jsonMapper.isValidJson(this.reportDescription)) {
issues.add(
getContext().createConfigIssue(
Groups.REPORT.name(),
"reportDescription",
Errors.OMNITURE_03
));
}
} | java | {
"resource": ""
} |
q172482 | JdbcRecordReaderWriterFactory.createJdbcRecordWriter | test | public static JdbcRecordWriter createJdbcRecordWriter(
String connectionString,
HikariDataSource dataSource,
String schema,
String tableName,
List<JdbcFieldColumnParamMapping> customMappings,
boolean rollbackOnError,
boolean useMultiRowOp,
int maxPrepStmtParameters,
int defaultOpCode,
UnsupportedOperationAction unsupportedAction,
DuplicateKeyAction duplicateKeyAction,
JdbcRecordReader recordReader,
boolean caseSensitive,
List<String> customDataSqlStateCodes
) throws StageException {
if (defaultOpCode == OperationType.LOAD_CODE) {
return new JdbcLoadRecordWriter(
connectionString,
dataSource,
schema,
tableName,
customMappings,
duplicateKeyAction,
recordReader,
caseSensitive,
customDataSqlStateCodes
);
} else {
return createJdbcRecordWriter(
connectionString,
dataSource,
schema,
tableName,
customMappings,
null,
rollbackOnError,
useMultiRowOp,
maxPrepStmtParameters,
defaultOpCode,
unsupportedAction,
recordReader,
caseSensitive,
customDataSqlStateCodes
);
}
} | java | {
"resource": ""
} |
q172483 | JdbcRecordReaderWriterFactory.createJdbcRecordWriter | test | public static JdbcRecordWriter createJdbcRecordWriter(
String connectionString,
HikariDataSource dataSource,
String schema,
String tableName,
List<JdbcFieldColumnParamMapping> customMappings,
List<JdbcFieldColumnMapping> generatedColumnMappings,
boolean rollbackOnError,
boolean useMultiRowOp,
int maxPrepStmtParameters,
int defaultOpCode,
UnsupportedOperationAction unsupportedAction,
JdbcRecordReader recordReader,
boolean caseSensitive,
List<String> customDataSqlStateCodes
) throws StageException {
JdbcRecordWriter recordWriter;
if (useMultiRowOp) {
recordWriter = new JdbcMultiRowRecordWriter(
connectionString,
dataSource,
schema,
tableName,
rollbackOnError,
customMappings,
maxPrepStmtParameters,
defaultOpCode,
unsupportedAction,
generatedColumnMappings,
recordReader,
caseSensitive,
customDataSqlStateCodes
);
} else {
recordWriter = new JdbcGenericRecordWriter(
connectionString,
dataSource,
schema,
tableName,
rollbackOnError,
customMappings,
defaultOpCode,
unsupportedAction,
generatedColumnMappings,
recordReader,
caseSensitive,
customDataSqlStateCodes
);
}
return recordWriter;
} | java | {
"resource": ""
} |
q172484 | BigQueryTargetUpgrader.upgradeV1toV2 | test | private void upgradeV1toV2(List<Config> configs) {
configs.removeIf(config -> (config.getName().equals(IMPLICIT_FIELD_MAPPING_CONFIG) ||
config.getName().equals(BIG_QUERY_IMPLICIT_FIELD_MAPPING_CONFIG)));
configs.add(new Config(MAX_CACHE_SIZE, -1));
} | java | {
"resource": ""
} |
q172485 | Grok.extractNamedGroups | test | public Map<String, String> extractNamedGroups(final CharSequence rawData) {
Matcher matcher = compiledPattern.matcher(rawData);
if (matcher.find()) {
MatchResult r = matcher.toMatchResult();
if (r != null && r.namedGroups() != null) {
return r.namedGroups();
}
}
return null;
} | java | {
"resource": ""
} |
q172486 | ControlChannel.consumerCommit | test | public void consumerCommit(String offset) {
Object offsetValue = offset;
if (offsetValue == null) {
offsetValue = new NullOffset();
}
LOG.trace("Commit Offset: '{}'", offsetValue);
try {
producerQueue.put(new Message(MessageType.CONSUMER_COMMIT, offsetValue));
} catch (InterruptedException e) {
LOG.info("Interrupted while queuing '{}'", MessageType.CONSUMER_COMMIT.name(), offsetValue);
Thread.currentThread().interrupt();
}
} | java | {
"resource": ""
} |
q172487 | EmbeddedSDCPool.create | test | @SuppressWarnings("unchecked")
protected EmbeddedSDC create() throws Exception {
Utils.checkState(open, "Not open");
final EmbeddedSDC embeddedSDC = new EmbeddedSDC();
Object source;
// post-batch runnable
Object pipelineStartResult = BootstrapCluster.startPipeline(() -> LOG.debug("Batch completed"));
source = pipelineStartResult.getClass().getDeclaredField("source").get(pipelineStartResult);
if (source instanceof DSource) {
long startTime = System.currentTimeMillis();
long endTime = startTime;
long diff = 0;
Source actualSource = ((DSource) source).getSource();
while (actualSource == null && diff < 60000) {
Thread.sleep(100);
actualSource = ((DSource) source).getSource();
endTime = System.currentTimeMillis();
diff = endTime - startTime;
}
if (actualSource == null) {
throw new IllegalStateException("Actual source is null, pipeline may not have been initialized");
}
source = actualSource;
}
if (!(source instanceof ClusterSource)) {
throw new IllegalArgumentException("Source is not of type ClusterSource: " + source.getClass().getName());
}
embeddedSDC.setSource((ClusterSource) source);
embeddedSDC.setSparkProcessors(
(List<Object>)pipelineStartResult.getClass().getDeclaredField("sparkProcessors").get(pipelineStartResult));
return embeddedSDC;
} | java | {
"resource": ""
} |
q172488 | MongoDBProcessor.setFieldsInRecord | test | private void setFieldsInRecord(Record record, Map<String, Field> fields) {
record.set(configBean.resultField, Field.createListMap(new LinkedHashMap<>(fields)));
} | java | {
"resource": ""
} |
q172489 | RemoteDataCollector.getRemotePipelinesWithChanges | test | @Override
public List<PipelineAndValidationStatus> getRemotePipelinesWithChanges() throws PipelineException {
List<PipelineAndValidationStatus> pipelineAndValidationStatuses = new ArrayList<>();
for (Pair<PipelineState, Map<String, String>> pipelineStateAndOffset: stateEventListener.getPipelineStateEvents()) {
PipelineState pipelineState = pipelineStateAndOffset.getLeft();
Map<String, String> offset = pipelineStateAndOffset.getRight();
String name = pipelineState.getPipelineId();
String rev = pipelineState.getRev();
boolean isClusterMode = (pipelineState.getExecutionMode() != ExecutionMode.STANDALONE) ? true : false;
List<WorkerInfo> workerInfos = new ArrayList<>();
String title;
int runnerCount = 0;
if (pipelineStore.hasPipeline(name)) {
title = pipelineStore.getInfo(name).getTitle();
Runner runner = manager.getRunner(name, rev);
if (isClusterMode) {
workerInfos = getWorkers(runner.getSlaveCallbackList(CallbackObjectType.METRICS));
}
runnerCount = runner.getRunnerCount();
} else {
title = null;
}
pipelineAndValidationStatuses.add(new PipelineAndValidationStatus(
getSchGeneratedPipelineName(name, rev),
title,
rev,
pipelineState.getTimeStamp(),
true,
pipelineState.getStatus(),
pipelineState.getMessage(),
workerInfos,
isClusterMode,
getSourceOffset(name, offset),
null,
runnerCount
));
}
return pipelineAndValidationStatuses;
} | java | {
"resource": ""
} |
q172490 | RemoteDataCollectorResult.futureAck | test | public static RemoteDataCollectorResult futureAck(Future<AckEvent> futureResult) {
return new RemoteDataCollectorResult(futureResult, null, false, null);
} | java | {
"resource": ""
} |
q172491 | MetricsConfigurator.createGauge | test | public static Gauge<Map<String, Object>> createGauge(MetricRegistry metrics, String name, Gauge gauge, final String pipelineName, final String pipelineRev) {
return create(
metrics,
gauge,
metricName(name, GAUGE_SUFFIX),
pipelineName,
pipelineRev
);
} | java | {
"resource": ""
} |
q172492 | JdbcMongoDBOplogRecordReader.getColumnsToParameters | test | @Override
public SortedMap<String, String> getColumnsToParameters(
final Record record, int op,
Map<String, String> parameters,
Map<String, String> columnsToFields)
{
SortedMap<String, String> columnsToParameters = new TreeMap<>();
for (Map.Entry<String, String> entry : columnsToFields.entrySet()) {
String columnName = entry.getKey();
String fieldPath = getFieldPath(columnName, columnsToFields, op);
if (record.has(fieldPath)) {
columnsToParameters.put(columnName, parameters.get(columnName));
} else {
LOG.trace("Record is missing a field for column {} for the operation code {}", columnName, op);
}
}
return columnsToParameters;
} | java | {
"resource": ""
} |
q172493 | WholeFileTransformerProcessor.generateHeaderAttrs | test | private Map<String, Object> generateHeaderAttrs(Path file) throws StageException {
try {
Map<String, Object> recordHeaderAttr = new HashMap<>();
recordHeaderAttr.put(HeaderAttributeConstants.FILE, file.toAbsolutePath());
recordHeaderAttr.put(HeaderAttributeConstants.FILE_NAME, file.getFileName());
recordHeaderAttr.put(HeaderAttributeConstants.SIZE, Files.size(file));
recordHeaderAttr.put(HeaderAttributeConstants.LAST_MODIFIED_TIME, Files.getLastModifiedTime(file));
return recordHeaderAttr;
} catch (IOException e) {
throw new TransformerStageCheckedException(Errors.CONVERT_09, e.toString(), e);
}
} | java | {
"resource": ""
} |
q172494 | WholeFileTransformerProcessor.validateRecord | test | private void validateRecord(Record record) throws StageException {
try {
FileRefUtil.validateWholeFileRecord(record);
} catch (IllegalArgumentException e) {
throw new TransformerStageCheckedException(Errors.CONVERT_01, e.toString(), e);
}
} | java | {
"resource": ""
} |
q172495 | WholeFileTransformerProcessor.handleOldTempFiles | test | private void handleOldTempFiles(Path tempParquetFile) throws IOException {
if (tempParquetFile == null) {
LOG.warn("temporary parquet file is empty");
return;
}
Files.deleteIfExists(tempParquetFile);
} | java | {
"resource": ""
} |
q172496 | WholeFileTransformerProcessor.getAvroInputStream | test | private InputStream getAvroInputStream(Record record) throws StageException {
try {
FileRef fileRef = record.get(FileRefUtil.FILE_REF_FIELD_PATH).getValueAsFileRef();
// get avro reader
final boolean includeChecksumInTheEvents = false;
InputStream is = FileRefUtil.getReadableStream(
getContext(),
fileRef,
InputStream.class,
includeChecksumInTheEvents,
null,
null
);
return is;
} catch (IOException ex) {
throw new TransformerStageCheckedException(Errors.CONVERT_07, ex.toString(), ex);
}
} | java | {
"resource": ""
} |
q172497 | WholeFileTransformerProcessor.getFileReader | test | private DataFileStream<GenericRecord> getFileReader(InputStream is, String sourceFileName) throws StageException {
try {
DatumReader<GenericRecord> reader = new GenericDatumReader<>();
DataFileStream<GenericRecord> fileReader = new DataFileStream<>(is, reader);
return fileReader;
} catch (IOException ex) {
throw new TransformerStageCheckedException(Errors.CONVERT_11, sourceFileName, ex);
}
} | java | {
"resource": ""
} |
q172498 | WholeFileTransformerProcessor.writeParquet | test | private void writeParquet(String sourceFileName, DataFileStream<GenericRecord> fileReader, Path tempParquetFile) throws StageException {
long recordCount = 0;
GenericRecord avroRecord;
Schema schema = fileReader.getSchema();
LOG.debug("Start reading input file : {}", sourceFileName);
try {
// initialize parquet writer
Configuration jobConfiguration = new Configuration();
String compressionCodecName = compressionElEval.eval(variables, jobConfig.avroParquetConfig.compressionCodec, String.class);
jobConfiguration.set(AvroParquetConstants.COMPRESSION_CODEC_NAME, compressionCodecName);
jobConfiguration.setInt(AvroParquetConstants.ROW_GROUP_SIZE, jobConfig.avroParquetConfig.rowGroupSize);
jobConfiguration.setInt(AvroParquetConstants.PAGE_SIZE, jobConfig.avroParquetConfig.pageSize);
jobConfiguration.setInt(AvroParquetConstants.DICTIONARY_PAGE_SIZE, jobConfig.avroParquetConfig.dictionaryPageSize);
jobConfiguration.setInt(AvroParquetConstants.MAX_PADDING_SIZE, jobConfig.avroParquetConfig.maxPaddingSize);
// Parquet writer
ParquetWriter.Builder builder = AvroToParquetConverterUtil.initializeWriter(
new org.apache.hadoop.fs.Path(tempParquetFile.toString()),
schema,
jobConfiguration
);
parquetWriter = builder.build();
while (fileReader.hasNext()) {
avroRecord = fileReader.next();
parquetWriter.write(avroRecord);
recordCount++;
}
parquetWriter.close();
} catch (IOException ex) {
throw new TransformerStageCheckedException(
Errors.CONVERT_08,
sourceFileName,
recordCount,
ex
);
}
LOG.debug("Finished writing {} records to {}", recordCount, tempParquetFile.getFileName());
} | java | {
"resource": ""
} |
q172499 | FieldEncrypter.checkInputEncrypt | test | public Optional<Field> checkInputEncrypt(Record record, Field field) {
if (UNSUPPORTED_TYPES.contains(field.getType())) {
getContext().toError(record, CRYPTO_03, field.getType());
return Optional.empty();
}
return Optional.of(field);
} | java | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.