focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public static void validate(TableConfig tableConfig, @Nullable Schema schema) {
validate(tableConfig, schema, null);
}
|
@Test
public void validateTimeColumnValidationConfig() {
// REALTIME table
// null timeColumnName and schema
TableConfig tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).build();
try {
TableConfigUtils.validate(tableConfig, null);
Assert.fail("Should fail for null timeColumnName and null schema in REALTIME table");
} catch (IllegalStateException e) {
// expected
}
// null schema only
tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setTimeColumnName(TIME_COLUMN).build();
try {
TableConfigUtils.validate(tableConfig, null);
Assert.fail("Should fail for null schema in REALTIME table");
} catch (IllegalStateException e) {
// expected
}
// null timeColumnName only
Schema schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).build();
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).build();
try {
TableConfigUtils.validate(tableConfig, schema);
Assert.fail("Should fail for null timeColumnName in REALTIME table");
} catch (IllegalStateException e) {
// expected
}
// timeColumnName not present in schema
schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).build();
tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setTimeColumnName(TIME_COLUMN).build();
try {
TableConfigUtils.validate(tableConfig, schema);
Assert.fail("Should fail for timeColumnName not present in schema for REALTIME table");
} catch (IllegalStateException e) {
// expected
}
// timeColumnName not present as valid time spec schema
schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME)
.addSingleValueDimension(TIME_COLUMN, FieldSpec.DataType.LONG).build();
tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setTimeColumnName(TIME_COLUMN).build();
try {
TableConfigUtils.validate(tableConfig, schema);
Assert.fail("Should fail for invalid fieldSpec for timeColumnName in schema for REALTIME table");
} catch (IllegalStateException e) {
// expected
}
// valid
schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME)
.addDateTime(TIME_COLUMN, FieldSpec.DataType.LONG, "1:MILLISECONDS:EPOCH", "1:MILLISECONDS").build();
tableConfig =
new TableConfigBuilder(TableType.REALTIME).setStreamConfigs(getStreamConfigs()).setTableName(TABLE_NAME)
.setTimeColumnName(TIME_COLUMN).build();
TableConfigUtils.validate(tableConfig, schema);
// OFFLINE table
// null timeColumnName and schema - allowed in OFFLINE
tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).build();
TableConfigUtils.validate(tableConfig, null);
// null schema only - allowed in OFFLINE
tableConfig =
new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTimeColumnName(TIME_COLUMN).build();
TableConfigUtils.validate(tableConfig, null);
// null timeColumnName only - allowed in OFFLINE
schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).build();
tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).build();
TableConfigUtils.validate(tableConfig, schema);
// non-null schema and timeColumnName, but timeColumnName not present in schema
schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).build();
tableConfig =
new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTimeColumnName(TIME_COLUMN).build();
try {
TableConfigUtils.validate(tableConfig, schema);
Assert.fail("Should fail for timeColumnName not present in schema for OFFLINE table");
} catch (IllegalStateException e) {
// expected
}
// non-null schema nd timeColumnName, but timeColumnName not present as a time spec in schema
schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME)
.addSingleValueDimension(TIME_COLUMN, FieldSpec.DataType.STRING).build();
tableConfig =
new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTimeColumnName(TIME_COLUMN).build();
try {
TableConfigUtils.validate(tableConfig, schema);
Assert.fail("Should fail for timeColumnName not present in schema for OFFLINE table");
} catch (IllegalStateException e) {
// expected
}
// empty timeColumnName - valid
schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).build();
tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTimeColumnName("").build();
TableConfigUtils.validate(tableConfig, schema);
// valid
schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME)
.addDateTime(TIME_COLUMN, FieldSpec.DataType.LONG, "1:MILLISECONDS:EPOCH", "1:MILLISECONDS").build();
tableConfig =
new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTimeColumnName(TIME_COLUMN).build();
TableConfigUtils.validate(tableConfig, schema);
}
|
public static void checkTdg(String tenant, String dataId, String group) throws NacosException {
checkTenant(tenant);
if (StringUtils.isBlank(dataId) || !ParamUtils.isValid(dataId)) {
throw new NacosException(NacosException.CLIENT_INVALID_PARAM, DATAID_INVALID_MSG);
}
if (StringUtils.isBlank(group) || !ParamUtils.isValid(group)) {
throw new NacosException(NacosException.CLIENT_INVALID_PARAM, GROUP_INVALID_MSG);
}
}
|
@Test
void testCheckTdgFail2() throws NacosException {
Throwable exception = assertThrows(NacosException.class, () -> {
String tenant = "a";
String dataId = "b";
String group = "";
ParamUtils.checkTdg(tenant, dataId, group);
});
assertTrue(exception.getMessage().contains("group invalid"));
}
|
public RunResponse start(
@NotNull String workflowId, @NotNull String version, @NotNull RunRequest runRequest) {
WorkflowDefinition definition = workflowDao.getWorkflowDefinition(workflowId, version);
validateRequest(version, definition, runRequest);
RunProperties runProperties =
RunProperties.from(
Checks.notNull(
definition.getPropertiesSnapshot(),
"property snapshot cannot be null for workflow: " + workflowId));
// create and initiate a new instance with overrides and param evaluation
WorkflowInstance instance =
workflowHelper.createWorkflowInstance(
definition.getWorkflow(),
definition.getInternalId(),
definition.getMetadata().getWorkflowVersionId(),
runProperties,
runRequest);
RunStrategy runStrategy = definition.getRunStrategyOrDefault();
int ret = runStrategyDao.startWithRunStrategy(instance, runStrategy);
RunResponse response = RunResponse.from(instance, ret);
LOG.info("Created a workflow instance with response {}", response);
return response;
}
|
@Test
public void testStartDuplicated() {
when(runStrategyDao.startWithRunStrategy(any(), any())).thenReturn(0);
RunRequest request =
RunRequest.builder()
.initiator(new ManualInitiator())
.requestId(UUID.fromString("41f0281e-41a2-468d-b830-56141b2f768b"))
.currentPolicy(RunPolicy.START_FRESH_NEW_RUN)
.build();
RunResponse response = actionHandler.start("sample-minimal-wf", "active", request);
verify(workflowDao, times(1)).getWorkflowDefinition("sample-minimal-wf", "active");
verify(runStrategyDao, times(1)).startWithRunStrategy(any(), any());
assertEquals(instance.getWorkflowId(), response.getWorkflowId());
assertEquals(1L, response.getWorkflowVersionId());
assertEquals(0L, response.getWorkflowInstanceId());
assertEquals(0L, response.getWorkflowRunId());
assertEquals("41f0281e-41a2-468d-b830-56141b2f768b", response.getWorkflowUuid());
assertEquals(RunResponse.Status.DUPLICATED, response.getStatus());
}
|
void reset(PartitionReplica localReplica) {
assert localReplica != null;
this.replicas = new PartitionReplica[MAX_REPLICA_COUNT];
this.localReplica = localReplica;
version = 0;
resetMigrating();
}
|
@Test
public void testReset() {
for (int i = 0; i < MAX_REPLICA_COUNT; i++) {
replicaOwners[i] = new PartitionReplica(newAddress(5000 + i), UuidUtil.newUnsecureUUID());
}
partition.setReplicas(replicaOwners);
partition.reset(localReplica);
for (int i = 0; i < MAX_REPLICA_COUNT; i++) {
assertNull(partition.getReplicaAddress(i));
}
assertFalse(partition.isMigrating());
assertEquals(0, partition.version());
}
|
@Override
public void dropFunction(QualifiedObjectName functionName, Optional<List<TypeSignature>> parameterTypes, boolean exists)
{
checkCatalog(functionName);
jdbi.useTransaction(handle -> {
FunctionNamespaceDao transactionDao = handle.attach(functionNamespaceDaoClass);
List<SqlInvokedFunction> functions = getSqlFunctions(transactionDao, functionName, parameterTypes);
checkExists(functions, functionName, parameterTypes);
if (!parameterTypes.isPresent()) {
transactionDao.setDeleted(functionName.getCatalogName(), functionName.getSchemaName(), functionName.getObjectName());
}
else {
SqlInvokedFunction latest = getOnlyElement(functions);
checkState(latest.hasVersion(), "Function version missing: %s", latest.getFunctionId());
transactionDao.setDeletionStatus(hash(latest.getFunctionId()), latest.getFunctionId(), getLongVersion(latest), true);
}
});
refreshFunctionsCache(functionName);
}
|
@Test
public void testDropFunction()
{
// Create functions
createFunction(FUNCTION_POWER_TOWER_DOUBLE, false);
createFunction(FUNCTION_POWER_TOWER_INT, false);
createFunction(FUNCTION_TANGENT, false);
// Drop function by name
dropFunction(TANGENT, Optional.empty(), false);
assertGetFunctions(TANGENT);
// Recreate functions
createFunction(FUNCTION_TANGENT, false);
// Drop a specific function by name and parameter types
dropFunction(POWER_TOWER, Optional.of(ImmutableList.of(parseTypeSignature(DOUBLE))), true);
assertGetFunctions(POWER_TOWER, FUNCTION_POWER_TOWER_INT.withVersion("1"));
dropFunction(TANGENT, Optional.of(ImmutableList.of(parseTypeSignature(DOUBLE))), true);
assertGetFunctions(POWER_TOWER, FUNCTION_POWER_TOWER_INT.withVersion("1"));
assertGetFunctions(TANGENT);
// Recreate functions, power_double(double) is created with a different definition
createFunction(FUNCTION_POWER_TOWER_DOUBLE_UPDATED, false);
createFunction(FUNCTION_TANGENT, false);
// Drop functions consecutively
dropFunction(POWER_TOWER, Optional.of(ImmutableList.of(parseTypeSignature(DOUBLE))), false);
dropFunction(POWER_TOWER, Optional.empty(), false);
}
|
@InvokeOnHeader(Web3jConstants.ETH_GET_BLOCK_TRANSACTION_COUNT_BY_NUMBER)
void ethGetBlockTransactionCountByNumber(Message message) throws IOException {
DefaultBlockParameter atBlock
= toDefaultBlockParameter(message.getHeader(Web3jConstants.AT_BLOCK, configuration::getAtBlock, String.class));
Request<?, EthGetBlockTransactionCountByNumber> request = web3j.ethGetBlockTransactionCountByNumber(atBlock);
setRequestId(message, request);
EthGetBlockTransactionCountByNumber response = request.send();
boolean hasError = checkForError(message, response);
if (!hasError) {
message.setBody(response.getTransactionCount());
}
}
|
@Test
public void ethGetBlockTransactionCountByNumberTest() throws Exception {
EthGetBlockTransactionCountByNumber response = Mockito.mock(EthGetBlockTransactionCountByNumber.class);
Mockito.when(mockWeb3j.ethGetBlockTransactionCountByNumber(any())).thenReturn(request);
Mockito.when(request.send()).thenReturn(response);
Mockito.when(response.getTransactionCount()).thenReturn(BigInteger.ONE);
Exchange exchange
= createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_GET_BLOCK_TRANSACTION_COUNT_BY_NUMBER);
template.send(exchange);
BigInteger body = exchange.getIn().getBody(BigInteger.class);
assertEquals(BigInteger.ONE, body);
}
|
@Override
public void loadConfiguration(NacosLoggingProperties loggingProperties) {
String location = loggingProperties.getLocation();
configurator.setLoggingProperties(loggingProperties);
LoggerContext loggerContext = loadConfigurationOnStart(location);
if (hasNoListener(loggerContext)) {
addListener(loggerContext, location);
}
}
|
@Test
void testLoadConfigurationStart() {
LoggerContext loggerContext = (LoggerContext) LoggerFactory.getILoggerFactory();
loggerContext.putObject(CoreConstants.RECONFIGURE_ON_CHANGE_TASK, new ReconfigureOnChangeTask());
logbackNacosLoggingAdapter.loadConfiguration(loggingProperties);
loggerContext.start();
verify(loggerContextListener).onStart(loggerContext);
for (Logger each : loggerContext.getLoggerList()) {
if (!"com.alibaba.nacos.client.naming".equals(each.getName())) {
continue;
}
assertNotNull(each.getAppender("ASYNC-NAMING"));
}
}
|
Object getFromForeach(String foreachStepId, String stepId, String paramName) {
try {
return executor
.submit(() -> fromForeach(foreachStepId, stepId, paramName))
.get(TIMEOUT_IN_MILLIS, TimeUnit.MILLISECONDS);
} catch (Exception e) {
throw new MaestroInternalError(
e,
"getFromForeach throws an exception for foreachStepId=[%s], stepId=[%s], paramName=[%s]",
foreachStepId,
stepId,
paramName);
}
}
|
@Test
public void testInvalidGetFromForeach() throws Exception {
AssertHelper.assertThrows(
"Cannot find the referenced step id",
MaestroInternalError.class,
"getFromForeach throws an exception",
() -> paramExtension.getFromForeach("non-existing-job", "job1", "sleep_seconds"));
StepRuntimeSummary summary = StepRuntimeSummary.builder().type(StepType.NOOP).build();
when(allStepOutputData.get("foreach-job"))
.thenReturn(Collections.singletonMap("maestro_step_runtime_summary", summary));
AssertHelper.assertThrows(
"Only foreach step can call getFromForeach",
MaestroInternalError.class,
"getFromForeach throws an exception",
() -> paramExtension.getFromForeach("foreach-job", "job1", "sleep_seconds"));
summary = loadObject(TEST_STEP_RUNTIME_SUMMARY, StepRuntimeSummary.class);
summary.getArtifacts().remove(Artifact.Type.FOREACH.key());
when(allStepOutputData.get("foreach-job"))
.thenReturn(Collections.singletonMap("maestro_step_runtime_summary", summary));
AssertHelper.assertThrows(
"Cannot load param from uninitialized foreach",
MaestroInternalError.class,
"getFromForeach throws an exception",
() -> paramExtension.getFromForeach("foreach-job", "job1", "sleep_seconds"));
summary = loadObject(TEST_STEP_RUNTIME_SUMMARY, StepRuntimeSummary.class);
when(allStepOutputData.get("foreach-job"))
.thenReturn(Collections.singletonMap("maestro_step_runtime_summary", summary));
when(stepInstanceDao.getForeachParamType(any(), any(), any())).thenReturn(ParamType.LONG_ARRAY);
when(stepInstanceDao.getEvaluatedResultsFromForeach(any(), any(), any()))
.thenReturn(Collections.singletonMap(1L, "12"));
AssertHelper.assertThrows(
"cannot get non-primitive type param from foreach",
MaestroInternalError.class,
"getFromForeach throws an exception",
() -> paramExtension.getFromForeach("foreach-job", "job1", "sleep_seconds"));
}
|
@SuppressWarnings("deprecation")
@Override
public Integer call() throws Exception {
super.call();
try (var files = Files.walk(directory)) {
List<String> flows = files
.filter(Files::isRegularFile)
.filter(YamlFlowParser::isValidExtension)
.map(path -> {
try {
return IncludeHelperExpander.expand(Files.readString(path, Charset.defaultCharset()), path.getParent());
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.toList();
String body = "";
if (flows.isEmpty()) {
stdOut("No flow found on '{}'", directory.toFile().getAbsolutePath());
} else {
body = String.join("\n---\n", flows);
}
try(DefaultHttpClient client = client()) {
MutableHttpRequest<String> request = HttpRequest
.POST(apiUri("/flows/") + namespace + "?delete=" + delete, body).contentType(MediaType.APPLICATION_YAML);
List<UpdateResult> updated = client.toBlocking().retrieve(
this.requestOptions(request),
Argument.listOf(UpdateResult.class)
);
stdOut(updated.size() + " flow(s) for namespace '" + namespace + "' successfully updated !");
updated.forEach(flow -> stdOut("- " + flow.getNamespace() + "." + flow.getId()));
} catch (HttpClientResponseException e){
FlowValidateCommand.handleHttpException(e, "flow");
return 1;
}
} catch (ConstraintViolationException e) {
FlowValidateCommand.handleException(e, "flow");
return 1;
}
return 0;
}
|
@Test
void runNoDelete() {
URL directory = FlowNamespaceUpdateCommandTest.class.getClassLoader().getResource("flows");
URL subDirectory = FlowNamespaceUpdateCommandTest.class.getClassLoader().getResource("flows/flowsSubFolder");
ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
try (ApplicationContext ctx = ApplicationContext.run(Environment.CLI, Environment.TEST)) {
EmbeddedServer embeddedServer = ctx.getBean(EmbeddedServer.class);
embeddedServer.start();
String[] args = {
"--server",
embeddedServer.getURL().toString(),
"--user",
"myuser:pass:word",
"io.kestra.cli",
directory.getPath(),
};
PicocliRunner.call(FlowNamespaceUpdateCommand.class, ctx, args);
assertThat(out.toString(), containsString("3 flow(s)"));
out.reset();
// no "delete" arg should behave as no-delete
args = new String[]{
"--server",
embeddedServer.getURL().toString(),
"--user",
"myuser:pass:word",
"io.kestra.cli",
subDirectory.getPath()
};
PicocliRunner.call(FlowNamespaceUpdateCommand.class, ctx, args);
assertThat(out.toString(), containsString("1 flow(s)"));
out.reset();
args = new String[]{
"--server",
embeddedServer.getURL().toString(),
"--user",
"myuser:pass:word",
"--no-delete",
"io.kestra.cli",
subDirectory.getPath()
};
PicocliRunner.call(FlowNamespaceUpdateCommand.class, ctx, args);
assertThat(out.toString(), containsString("1 flow(s)"));
}
}
|
protected boolean isJob( HttpServletRequest request ) {
return isJob( request.getParameter( PARAMETER_TYPE ) );
}
|
@Test
public void testIsJobHttpServletRequest() {
// CASE: job
HttpServletRequest requestJob = Mockito.mock( HttpServletRequest.class );
when(requestJob.getParameter( RegisterPackageServlet.PARAMETER_TYPE ) ).thenReturn( "job" );
assertTrue( servlet.isJob( requestJob ) );
// CASE: transformation
HttpServletRequest requestTrans = Mockito.mock( HttpServletRequest.class );
when(requestJob.getParameter( RegisterPackageServlet.PARAMETER_TYPE ) ).thenReturn( "trans" );
assertFalse( servlet.isJob( requestTrans ) );
}
|
@Override
public CommandLineImpl parse(final List<String> originalArgs, final Logger logger) {
return CommandLineImpl.of(originalArgs, logger);
}
|
@Test
public void testHelp() throws Exception {
final CommandLineParserImpl parser = new CommandLineParserImpl();
final CommandLineImpl commandLine = parse(parser, "-h");
assertEquals(Command.NONE, commandLine.getCommand());
assertEquals(
"Usage: embulk [common options] <command> [command options]" + NEWLINE
+ NEWLINE
+ "Commands:" + NEWLINE
+ " run Run a bulk load transaction." + NEWLINE
+ " cleanup Cleanup resume state." + NEWLINE
+ " preview Dry-run a bulk load transaction, and preview it." + NEWLINE
+ " guess Guess missing parameters to complete configuration." + NEWLINE
+ " install Installs a Maven artifact, typically an Embulk plugin." + NEWLINE
+ " example Create example files for a quick trial of Embulk." + NEWLINE
+ " license Print out the license notice." + NEWLINE
+ " selfupdate Upgrade Embulk to the specified version." + NEWLINE
+ " gem Run \"gem\" to install a RubyGem plugin." + NEWLINE
+ " mkbundle Create a new plugin bundle environment." + NEWLINE
+ " bundle Update a plugin bundle environment." + NEWLINE
+ NEWLINE
+ "Common options:" + NEWLINE
+ " -h, --help Print help" + NEWLINE
+ " -version, --version Show Embulk version" + NEWLINE
+ " -l, --log-level LEVEL Set log level (error, warn, info, debug, trace)" + NEWLINE
+ " --log-path PATH Output log messages to a file (default: -)" + NEWLINE
+ " -X KEY=VALUE Set Embulk system properties" + NEWLINE
+ " -R OPTION Command-line option for JRuby. (Only '--dev')" + NEWLINE
+ NEWLINE,
commandLine.getStdOut());
assertEquals("", commandLine.getStdErr());
}
|
public static void checkArgument(boolean expression, Object errorMessage) {
if (Objects.isNull(errorMessage)) {
throw new IllegalArgumentException("errorMessage cannot be null.");
}
if (!expression) {
throw new IllegalArgumentException(String.valueOf(errorMessage));
}
}
|
@Test
void testCheckArgument3Args1false() {
assertThrows(IllegalArgumentException.class, () -> {
Preconditions.checkArgument(false, ERRORMSG, ARG);
});
}
|
@Override
public boolean isWritable() {
return false;
}
|
@Test
public void shouldIndicateNotWritable() {
assertFalse(unmodifiableBuffer(buffer(1)).isWritable());
}
|
public PackageDefinition find(final String id) {
return stream().filter(packageDefinition -> packageDefinition.getId().equals(id)).findFirst().orElse(null);
}
|
@Test
public void shouldFindPackageGivenThePkgId() throws Exception {
PackageRepository repository = PackageRepositoryMother.create("repo-id2", "repo2", "plugin-id", "1.0", null);
PackageDefinition p1 = PackageDefinitionMother.create("id1", "pkg1", null, repository);
PackageDefinition p2 = PackageDefinitionMother.create("id2", "pkg2", null, repository);
Packages packages = new Packages(p1, p2);
assertThat(packages.find("id2"), is(p2));
}
|
public static RandomProjection sparse(int n, int p, String... columns) {
if (n < 2) {
throw new IllegalArgumentException("Invalid dimension of input space: " + n);
}
if (p < 1 || p > n) {
throw new IllegalArgumentException("Invalid dimension of feature space: " + p);
}
Matrix projection = new Matrix(p, n);
double scale = Math.sqrt(3);
for (int i = 0; i < p; i++) {
for (int j = 0; j < n; j++) {
projection.set(i, j, scale * (MathEx.random(prob) - 1));
}
}
return new RandomProjection(projection, columns);
}
|
@Test
public void testSparseRandomProjection() {
System.out.println("sparse random projection");
RandomProjection instance = RandomProjection.sparse(128, 40);
Matrix p = instance.projection;
System.out.println(p.toString(true));
}
|
@Override
public BasicTypeDefine reconvert(Column column) {
BasicTypeDefine.BasicTypeDefineBuilder builder =
BasicTypeDefine.builder()
.name(column.getName())
.nullable(column.isNullable())
.comment(column.getComment())
.defaultValue(column.getDefaultValue());
switch (column.getDataType().getSqlType()) {
case BOOLEAN:
builder.columnType(DB2_BOOLEAN);
builder.dataType(DB2_BOOLEAN);
break;
case TINYINT:
case SMALLINT:
builder.columnType(DB2_SMALLINT);
builder.dataType(DB2_SMALLINT);
break;
case INT:
builder.columnType(DB2_INT);
builder.dataType(DB2_INT);
break;
case BIGINT:
builder.columnType(DB2_BIGINT);
builder.dataType(DB2_BIGINT);
break;
case FLOAT:
builder.columnType(DB2_REAL);
builder.dataType(DB2_REAL);
break;
case DOUBLE:
builder.columnType(DB2_DOUBLE);
builder.dataType(DB2_DOUBLE);
break;
case DECIMAL:
DecimalType decimalType = (DecimalType) column.getDataType();
long precision = decimalType.getPrecision();
int scale = decimalType.getScale();
if (precision <= 0) {
precision = DEFAULT_PRECISION;
scale = DEFAULT_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is precision less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (precision > MAX_PRECISION) {
scale = (int) Math.max(0, scale - (precision - MAX_PRECISION));
precision = MAX_PRECISION;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum precision of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_PRECISION,
precision,
scale);
}
if (scale < 0) {
scale = 0;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is scale less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (scale > MAX_SCALE) {
scale = MAX_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_SCALE,
precision,
scale);
}
builder.columnType(String.format("%s(%s,%s)", DB2_DECIMAL, precision, scale));
builder.dataType(DB2_DECIMAL);
builder.precision(precision);
builder.scale(scale);
break;
case BYTES:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(
String.format("%s(%s)", DB2_VARBINARY, MAX_VARBINARY_LENGTH));
builder.dataType(DB2_VARBINARY);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_BINARY_LENGTH) {
builder.columnType(
String.format("%s(%s)", DB2_BINARY, column.getColumnLength()));
builder.dataType(DB2_BINARY);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_VARBINARY_LENGTH) {
builder.columnType(
String.format("%s(%s)", DB2_VARBINARY, column.getColumnLength()));
builder.dataType(DB2_VARBINARY);
builder.length(column.getColumnLength());
} else {
long length = column.getColumnLength();
if (length > MAX_BLOB_LENGTH) {
length = MAX_BLOB_LENGTH;
log.warn(
"The length of blob type {} is out of range, "
+ "it will be converted to {}({})",
column.getName(),
DB2_BLOB,
length);
}
builder.columnType(String.format("%s(%s)", DB2_BLOB, length));
builder.dataType(DB2_BLOB);
builder.length(length);
}
break;
case STRING:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(String.format("%s(%s)", DB2_VARCHAR, MAX_VARCHAR_LENGTH));
builder.dataType(DB2_VARCHAR);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_CHAR_LENGTH) {
builder.columnType(String.format("%s(%s)", DB2_CHAR, column.getColumnLength()));
builder.dataType(DB2_CHAR);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_VARCHAR_LENGTH) {
builder.columnType(
String.format("%s(%s)", DB2_VARCHAR, column.getColumnLength()));
builder.dataType(DB2_VARCHAR);
builder.length(column.getColumnLength());
} else {
long length = column.getColumnLength();
if (length > MAX_CLOB_LENGTH) {
length = MAX_CLOB_LENGTH;
log.warn(
"The length of clob type {} is out of range, "
+ "it will be converted to {}({})",
column.getName(),
DB2_CLOB,
length);
}
builder.columnType(String.format("%s(%s)", DB2_CLOB, length));
builder.dataType(DB2_CLOB);
builder.length(length);
}
break;
case DATE:
builder.columnType(DB2_DATE);
builder.dataType(DB2_DATE);
break;
case TIME:
builder.columnType(DB2_TIME);
builder.dataType(DB2_TIME);
break;
case TIMESTAMP:
if (column.getScale() != null && column.getScale() > 0) {
int timestampScale = column.getScale();
if (column.getScale() > MAX_TIMESTAMP_SCALE) {
timestampScale = MAX_TIMESTAMP_SCALE;
log.warn(
"The timestamp column {} type timestamp({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to timestamp({})",
column.getName(),
column.getScale(),
MAX_TIMESTAMP_SCALE,
timestampScale);
}
builder.columnType(String.format("%s(%s)", DB2_TIMESTAMP, timestampScale));
builder.scale(timestampScale);
} else {
builder.columnType(DB2_TIMESTAMP);
}
builder.dataType(DB2_TIMESTAMP);
break;
default:
throw CommonError.convertToConnectorTypeError(
DatabaseIdentifier.DB_2,
column.getDataType().getSqlType().name(),
column.getName());
}
return builder.build();
}
|
@Test
public void testReconvertString() {
Column column =
PhysicalColumn.builder()
.name("test")
.dataType(BasicType.STRING_TYPE)
.columnLength(null)
.build();
BasicTypeDefine typeDefine = DB2TypeConverter.INSTANCE.reconvert(column);
Assertions.assertEquals(column.getName(), typeDefine.getName());
Assertions.assertEquals("VARCHAR(32672)", typeDefine.getColumnType());
Assertions.assertEquals(DB2TypeConverter.DB2_VARCHAR, typeDefine.getDataType());
column =
PhysicalColumn.builder()
.name("test")
.dataType(BasicType.STRING_TYPE)
.columnLength(DB2TypeConverter.MAX_CHAR_LENGTH)
.build();
typeDefine = DB2TypeConverter.INSTANCE.reconvert(column);
Assertions.assertEquals(column.getName(), typeDefine.getName());
Assertions.assertEquals(
String.format("%s(%s)", DB2TypeConverter.DB2_CHAR, column.getColumnLength()),
typeDefine.getColumnType());
Assertions.assertEquals(DB2TypeConverter.DB2_CHAR, typeDefine.getDataType());
column =
PhysicalColumn.builder()
.name("test")
.dataType(BasicType.STRING_TYPE)
.columnLength(DB2TypeConverter.MAX_VARCHAR_LENGTH)
.build();
typeDefine = DB2TypeConverter.INSTANCE.reconvert(column);
Assertions.assertEquals(column.getName(), typeDefine.getName());
Assertions.assertEquals(
String.format("%s(%s)", DB2TypeConverter.DB2_VARCHAR, column.getColumnLength()),
typeDefine.getColumnType());
Assertions.assertEquals(DB2TypeConverter.DB2_VARCHAR, typeDefine.getDataType());
column =
PhysicalColumn.builder()
.name("test")
.dataType(BasicType.STRING_TYPE)
.columnLength(DB2TypeConverter.MAX_VARCHAR_LENGTH + 1)
.build();
typeDefine = DB2TypeConverter.INSTANCE.reconvert(column);
Assertions.assertEquals(column.getName(), typeDefine.getName());
Assertions.assertEquals(
String.format("%s(%s)", DB2TypeConverter.DB2_CLOB, column.getColumnLength()),
typeDefine.getColumnType());
Assertions.assertEquals(DB2TypeConverter.DB2_CLOB, typeDefine.getDataType());
}
|
public void listenToUris(String clusterName)
{
// if cluster name is a symlink, watch for D2SymlinkNode instead
String resourceName = D2_URI_NODE_PREFIX + clusterName;
if (SymlinkUtil.isSymlinkNodeOrPath(clusterName))
{
listenToSymlink(clusterName, resourceName);
}
else
{
_watchedUriResources.computeIfAbsent(clusterName, k ->
{
XdsClient.D2URIMapResourceWatcher watcher = getUriResourceWatcher(clusterName);
_xdsClient.watchXdsResource(resourceName, watcher);
return watcher;
});
}
}
|
@Test
public void testListenToUriSymlink() throws PropertySerializationException
{
XdsToD2PropertiesAdaptorFixture fixture = new XdsToD2PropertiesAdaptorFixture();
fixture.getSpiedAdaptor().listenToUris(SYMLINK_NAME);
// verify symlink is watched
verify(fixture._xdsClient).watchXdsResource(eq(URI_SYMLINK_RESOURCE_NAME), anyNodeWatcher());
// update symlink data
NodeResourceWatcher symlinkNodeWatcher = fixture._nodeWatcher;
symlinkNodeWatcher.onChanged(getSymlinkNodeUpdate(PRIMARY_URI_RESOURCE_NAME));
// verify actual cluster of the uris is watched
verify(fixture._xdsClient).watchXdsResource(eq(PRIMARY_URI_RESOURCE_NAME), anyMapWatcher());
// update uri data
D2URIMapResourceWatcher watcher = fixture._uriMapWatcher;
watcher.onChanged(new XdsClient.D2URIMapUpdate(Collections.emptyMap()));
// verify uri data is merged and published under symlink name and the actual cluster name
verify(fixture._uriEventBus).publishInitialize(SYMLINK_NAME, getDefaultUriProperties(SYMLINK_NAME));
verify(fixture._uriEventBus).publishInitialize(PRIMARY_CLUSTER_NAME, getDefaultUriProperties(PRIMARY_CLUSTER_NAME));
// test update symlink to a new primary cluster
String primaryUriResourceName2 = URI_NODE_PREFIX + PRIMARY_CLUSTER_NAME_2;
symlinkNodeWatcher.onChanged(getSymlinkNodeUpdate(primaryUriResourceName2));
verify(fixture._xdsClient).watchXdsResource(eq(primaryUriResourceName2), anyMapWatcher());
verifyUriUpdate(fixture, PRIMARY_CLUSTER_NAME_2, SYMLINK_NAME);
// if the old primary cluster gets an update, it will be published under its original cluster name
// since the symlink points to the new primary cluster now.
XdsD2.D2URI protoUri = getD2URI(PRIMARY_CLUSTER_NAME, LOCAL_HOST_URI.toString(), VERSION);
UriProperties uriProps = new UriPropertiesJsonSerializer().fromProto(protoUri);
watcher.onChanged(new XdsClient.D2URIMapUpdate(Collections.singletonMap(URI_NAME, protoUri)));
verify(fixture._uriEventBus).publishInitialize(PRIMARY_CLUSTER_NAME, uriProps);
// no status update receipt event emitted when data was empty before the update
verify(fixture._eventEmitter, never()).emitSDStatusUpdateReceiptEvent(
eq(PRIMARY_CLUSTER_NAME),
eq(LOCAL_HOST),
eq(PORT),
eq(ServiceDiscoveryEventEmitter.StatusUpdateActionType.MARK_READY),
eq(true),
eq(XDS_SERVER),
eq(URI_NODE_PREFIX + PRIMARY_CLUSTER_NAME + "/" + URI_NAME),
eq(protoUri.toString()),
eq((int) VERSION),
eq(TRACING_ID),
anyLong()
);
}
|
public GoPluginBundleDescriptor build(BundleOrPluginFileDetails bundleOrPluginJarFile) {
if (!bundleOrPluginJarFile.exists()) {
throw new RuntimeException(format("Plugin or bundle jar does not exist: %s", bundleOrPluginJarFile.file()));
}
String defaultId = bundleOrPluginJarFile.file().getName();
GoPluginBundleDescriptor goPluginBundleDescriptor = new GoPluginBundleDescriptor(GoPluginDescriptor.builder()
.version("1")
.id(defaultId)
.bundleLocation(bundleOrPluginJarFile.extractionLocation())
.pluginJarFileLocation(bundleOrPluginJarFile.file().getAbsolutePath())
.isBundledPlugin(bundleOrPluginJarFile.isBundledPlugin())
.build());
try {
if (bundleOrPluginJarFile.isBundleJar()) {
return GoPluginBundleDescriptorParser.parseXML(bundleOrPluginJarFile.getBundleXml(), bundleOrPluginJarFile);
}
if (bundleOrPluginJarFile.isPluginJar()) {
return GoPluginDescriptorParser.parseXML(bundleOrPluginJarFile.getPluginXml(), bundleOrPluginJarFile);
}
goPluginBundleDescriptor.markAsInvalid(List.of(format("Plugin with ID (%s) is not valid. The plugin does not seem to contain plugin.xml or gocd-bundle.xml", defaultId)), new RuntimeException("The plugin does not seem to contain plugin.xml or gocd-bundle.xml"));
} catch (Exception e) {
log.warn("Unable to load the jar file {}", bundleOrPluginJarFile.file(), e);
final String message = requireNonNullElse(e.getMessage(), e.getClass().getCanonicalName());
String cause = e.getCause() != null ? format("%s. Cause: %s", message, e.getCause().getMessage()) : message;
goPluginBundleDescriptor.markAsInvalid(List.of(format("Plugin with ID (%s) is not valid: %s", defaultId, cause)), e);
}
return goPluginBundleDescriptor;
}
|
@Test
void shouldCreateThePluginDescriptorFromGivenPluginJarWithPluginXML() throws Exception {
String pluginJarName = "descriptor-aware-test-plugin.jar";
copyPluginToThePluginDirectory(pluginDirectory, pluginJarName);
File pluginJarFile = new File(pluginDirectory, pluginJarName);
BundleOrPluginFileDetails bundleOrPluginFileDetails = new BundleOrPluginFileDetails(pluginJarFile, true, pluginDirectory);
final GoPluginBundleDescriptor bundleDescriptor = goPluginBundleDescriptorBuilder.build(bundleOrPluginFileDetails);
final List<GoPluginDescriptor> descriptors = bundleDescriptor.descriptors();
GoPluginDescriptor expectedDescriptor = buildExpectedDescriptor(pluginJarName, pluginJarFile.getAbsolutePath());
assertThat(descriptors.size()).isEqualTo(1);
assertThat(descriptors.get(0)).isEqualTo(expectedDescriptor);
assertThat(descriptors.get(0).isInvalid()).isFalse();
assertThat(descriptors.get(0).isBundledPlugin()).isTrue();
}
|
@Override
public Map<String, String> contextLabels() {
return Collections.unmodifiableMap(contextLabels);
}
|
@Test
public void testCreationWithValidNamespaceAndNullLabelValues() {
labels.put(LABEL_A_KEY, null);
context = new KafkaMetricsContext(namespace, labels);
assertEquals(2, context.contextLabels().size());
assertEquals(namespace, context.contextLabels().get(MetricsContext.NAMESPACE));
assertNull(context.contextLabels().get(LABEL_A_KEY));
}
|
@Override
public Progress getProgress() {
BigDecimal end;
if (range.getTo().compareTo(Timestamp.MAX_VALUE) == 0) {
// When the given end timestamp equals to Timestamp.MAX_VALUE, this means that
// the end timestamp is not specified which should be a streaming job. So we
// use now() as the end timestamp.
end = BigDecimal.valueOf(timeSupplier.get().getSeconds());
} else {
end = BigDecimal.valueOf(range.getTo().getSeconds());
}
BigDecimal current;
if (lastClaimedPosition == null) {
current = BigDecimal.valueOf(range.getFrom().getSeconds());
} else {
current = BigDecimal.valueOf(lastClaimedPosition.getSeconds());
}
// The remaining work must be greater than 0. Otherwise, it will cause an issue
// that the watermark does not advance.
final BigDecimal workRemaining = end.subtract(current).max(BigDecimal.ONE);
LOG.debug(
"Reported progress current: {}, end: {}, workRemaining: {}",
current.doubleValue(),
end.doubleValue(),
workRemaining.doubleValue());
return Progress.from(current.doubleValue(), workRemaining.doubleValue());
}
|
@Test
public void testGetProgressReturnsWorkRemainingAsWholeRangeWhenNoClaimWasAttempted() {
final Timestamp from = Timestamp.ofTimeSecondsAndNanos(0, 0);
final Timestamp to = Timestamp.now();
final TimestampRange range = TimestampRange.of(from, to);
final TimestampRangeTracker tracker = new TimestampRangeTracker(range);
final Progress progress = tracker.getProgress();
assertEquals(0D, progress.getWorkCompleted(), DELTA);
assertEquals(to.getSeconds(), progress.getWorkRemaining(), DELTA);
}
|
@RequestMapping("/admin")
public List<ServiceDTO> getAdminService() {
return discoveryService.getServiceInstances(ServiceNameConsts.APOLLO_ADMINSERVICE);
}
|
@Test
public void testGetAdminService() {
when(discoveryService.getServiceInstances(ServiceNameConsts.APOLLO_ADMINSERVICE))
.thenReturn(someServices);
assertEquals(someServices, serviceController.getAdminService());
}
|
@Override
public GetQueueUserAclsInfoResponse getQueueUserAcls(
GetQueueUserAclsInfoRequest request) throws YarnException, IOException {
if(request == null){
routerMetrics.incrQueueUserAclsFailedRetrieved();
String msg = "Missing getQueueUserAcls request.";
RouterAuditLogger.logFailure(user.getShortUserName(), GET_QUEUE_USER_ACLS, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg);
RouterServerUtil.logAndThrowException(msg);
}
long startTime = clock.getTime();
ClientMethod remoteMethod = new ClientMethod("getQueueUserAcls",
new Class[] {GetQueueUserAclsInfoRequest.class}, new Object[] {request});
Collection<GetQueueUserAclsInfoResponse> queueUserAcls = null;
try {
queueUserAcls = invokeConcurrent(remoteMethod, GetQueueUserAclsInfoResponse.class);
} catch (Exception ex) {
routerMetrics.incrQueueUserAclsFailedRetrieved();
String msg = "Unable to get queue user Acls due to exception.";
RouterAuditLogger.logFailure(user.getShortUserName(), GET_QUEUE_USER_ACLS, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg);
RouterServerUtil.logAndThrowException(msg, ex);
}
long stopTime = clock.getTime();
routerMetrics.succeededGetQueueUserAclsRetrieved(stopTime - startTime);
RouterAuditLogger.logSuccess(user.getShortUserName(), GET_QUEUE_USER_ACLS,
TARGET_CLIENT_RM_SERVICE);
// Merge the QueueUserAclsInfoResponse
return RouterYarnClientUtils.mergeQueueUserAcls(queueUserAcls);
}
|
@Test
public void testGetQueueUserAcls() throws Exception {
LOG.info("Test FederationClientInterceptor : Get QueueUserAcls request.");
// null request
LambdaTestUtils.intercept(YarnException.class, "Missing getQueueUserAcls request.",
() -> interceptor.getQueueUserAcls(null));
// normal request
GetQueueUserAclsInfoResponse response = interceptor.getQueueUserAcls(
GetQueueUserAclsInfoRequest.newInstance());
Assert.assertNotNull(response);
List<QueueACL> submitAndAdministerAcl = new ArrayList<>();
submitAndAdministerAcl.add(QueueACL.SUBMIT_APPLICATIONS);
submitAndAdministerAcl.add(QueueACL.ADMINISTER_QUEUE);
QueueUserACLInfo exceptRootQueueACLInfo = QueueUserACLInfo.newInstance("root",
submitAndAdministerAcl);
QueueUserACLInfo queueRootQueueACLInfo = response.getUserAclsInfoList().stream().
filter(acl->acl.getQueueName().equals("root")).
collect(Collectors.toList()).get(0);
Assert.assertEquals(exceptRootQueueACLInfo, queueRootQueueACLInfo);
}
|
boolean isMethodCorrect(ResolvedMethod m) {
if (m.getReturnType()!=null) {
log.error("The method {} is annotated with @SelfValidation but does not return void. It is ignored", m.getRawMember());
return false;
} else if (m.getArgumentCount() != 1 || !m.getArgumentType(0).getErasedType().equals(ViolationCollector.class)) {
log.error("The method {} is annotated with @SelfValidation but does not have a single parameter of type {}",
m.getRawMember(), ViolationCollector.class);
return false;
} else if (!m.isPublic()) {
log.error("The method {} is annotated with @SelfValidation but is not public", m.getRawMember());
return false;
}
return true;
}
|
@Test
@SuppressWarnings("Slf4jFormatShouldBeConst")
void privateIsNotAccepted() throws NoSuchMethodException {
assertThat(selfValidatingValidator.isMethodCorrect(
getMethod("validateFailPrivate", ViolationCollector.class)))
.isFalse();
verify(log).error("The method {} is annotated with @SelfValidation but is not public",
InvalidExample.class.getDeclaredMethod("validateFailPrivate", ViolationCollector.class));
}
|
public static boolean isValid(final String identifier) {
final SqlBaseLexer sqlBaseLexer = new SqlBaseLexer(
new CaseInsensitiveStream(CharStreams.fromString(identifier)));
final CommonTokenStream tokenStream = new CommonTokenStream(sqlBaseLexer);
final SqlBaseParser sqlBaseParser = new SqlBaseParser(tokenStream);
// don't log or print anything in the case of error since this is expected
// for this method
sqlBaseLexer.removeErrorListeners();
sqlBaseParser.removeErrorListeners();
sqlBaseParser.identifier();
// needs quotes if the `identifier` was not able to read the entire line
return sqlBaseParser.getNumberOfSyntaxErrors() == 0
&& sqlBaseParser.getCurrentToken().getCharPositionInLine() == identifier.length();
}
|
@Test
public void shouldBeValid() {
// Given:
final String[] identifiers = new String[]{
"FOO", // nothing special
"foo", // lower-case
};
// Then:
for (final String identifier : identifiers) {
assertThat(
"Expected " + identifier + " to be valid.",
IdentifierUtil.isValid(identifier)
);
}
}
|
public static NamenodeRole convert(NamenodeRoleProto role) {
switch (role) {
case NAMENODE:
return NamenodeRole.NAMENODE;
case BACKUP:
return NamenodeRole.BACKUP;
case CHECKPOINT:
return NamenodeRole.CHECKPOINT;
}
return null;
}
|
@Test
public void testConvertBlockWithLocations() {
boolean[] testSuite = new boolean[]{false, true};
for (int i = 0; i < testSuite.length; i++) {
BlockWithLocations locs = getBlockWithLocations(1, testSuite[i]);
BlockWithLocationsProto locsProto = PBHelper.convert(locs);
BlockWithLocations locs2 = PBHelper.convert(locsProto);
compare(locs, locs2);
}
}
|
public static Map<String, Map<String, String>> loadAttributes( Node attributesNode ) {
Map<String, Map<String, String>> attributesMap = new HashMap<>();
if ( attributesNode != null ) {
List<Node> groupNodes = XMLHandler.getNodes( attributesNode, XML_TAG_GROUP );
for ( Node groupNode : groupNodes ) {
String groupName = XMLHandler.getTagValue( groupNode, "name" );
Map<String, String> attributes = new HashMap<>();
attributesMap.put( groupName, attributes );
List<Node> attributeNodes = XMLHandler.getNodes( groupNode, XML_TAG_ATTRIBUTE );
for ( Node attributeNode : attributeNodes ) {
String key = XMLHandler.getTagValue( attributeNode, "key" );
String value = XMLHandler.getTagValue( attributeNode, "value" );
if ( key != null && value != null ) {
attributes.put( key, value );
}
}
}
}
return attributesMap;
}
|
@Test
public void testLoadAttributes_NullParameter() {
try ( MockedStatic<AttributesUtil> attributesUtilMockedStatic = mockStatic( AttributesUtil.class ) ) {
attributesUtilMockedStatic.when( () -> AttributesUtil.loadAttributes( any( Node.class ) ) ).thenCallRealMethod();
Map<String, Map<String, String>> attributesMap = AttributesUtil.loadAttributes( null );
assertNotNull( attributesMap );
assertTrue( attributesMap.isEmpty() );
}
}
|
@Override
public Response addToClusterNodeLabels(NodeLabelsInfo newNodeLabels,
HttpServletRequest hsr) throws Exception {
if (newNodeLabels == null) {
routerMetrics.incrAddToClusterNodeLabelsFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), ADD_TO_CLUSTER_NODELABELS, UNKNOWN,
TARGET_WEB_SERVICE, "Parameter error, the newNodeLabels is null.");
throw new IllegalArgumentException("Parameter error, the newNodeLabels is null.");
}
List<NodeLabelInfo> nodeLabelInfos = newNodeLabels.getNodeLabelsInfo();
if (CollectionUtils.isEmpty(nodeLabelInfos)) {
routerMetrics.incrAddToClusterNodeLabelsFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), ADD_TO_CLUSTER_NODELABELS, UNKNOWN,
TARGET_WEB_SERVICE, "Parameter error, the nodeLabelsInfo is null or empty.");
throw new IllegalArgumentException("Parameter error, the nodeLabelsInfo is null or empty.");
}
try {
long startTime = clock.getTime();
Collection<SubClusterInfo> subClustersActives = federationFacade.getActiveSubClusters();
final HttpServletRequest hsrCopy = clone(hsr);
Class[] argsClasses = new Class[]{NodeLabelsInfo.class, HttpServletRequest.class};
Object[] args = new Object[]{newNodeLabels, hsrCopy};
ClientMethod remoteMethod = new ClientMethod("addToClusterNodeLabels", argsClasses, args);
Map<SubClusterInfo, Response> responseInfoMap =
invokeConcurrent(subClustersActives, remoteMethod, Response.class);
StringBuilder buffer = new StringBuilder();
// SubCluster-0:SUCCESS,SubCluster-1:SUCCESS
responseInfoMap.forEach((subClusterInfo, response) ->
buildAppendMsg(subClusterInfo, buffer, response));
long stopTime = clock.getTime();
RouterAuditLogger.logSuccess(getUser().getShortUserName(), ADD_TO_CLUSTER_NODELABELS,
TARGET_WEB_SERVICE);
routerMetrics.succeededAddToClusterNodeLabelsRetrieved((stopTime - startTime));
return Response.status(Status.OK).entity(buffer.toString()).build();
} catch (NotFoundException e) {
routerMetrics.incrAddToClusterNodeLabelsFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), ADD_TO_CLUSTER_NODELABELS, UNKNOWN,
TARGET_WEB_SERVICE, e.getLocalizedMessage());
RouterServerUtil.logAndThrowIOException("get all active sub cluster(s) error.", e);
} catch (YarnException e) {
routerMetrics.incrAddToClusterNodeLabelsFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), ADD_TO_CLUSTER_NODELABELS, UNKNOWN,
TARGET_WEB_SERVICE, e.getLocalizedMessage());
RouterServerUtil.logAndThrowIOException("addToClusterNodeLabels with yarn error.", e);
}
routerMetrics.incrAddToClusterNodeLabelsFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), ADD_TO_CLUSTER_NODELABELS, UNKNOWN,
TARGET_WEB_SERVICE, "addToClusterNodeLabels Failed.");
throw new RuntimeException("addToClusterNodeLabels Failed.");
}
|
@Test
public void testAddToClusterNodeLabels2() throws Exception {
// In this test, we try to add A0 label,
// subCluster0 will return success, and other subClusters will return null
NodeLabelsInfo nodeLabelsInfo = new NodeLabelsInfo();
NodeLabelInfo nodeLabelInfo = new NodeLabelInfo("A0", true);
nodeLabelsInfo.getNodeLabelsInfo().add(nodeLabelInfo);
Response response = interceptor.addToClusterNodeLabels(nodeLabelsInfo, null);
Assert.assertNotNull(response);
Object entityObj = response.getEntity();
Assert.assertNotNull(entityObj);
String expectedValue = "SubCluster-0:SUCCESS,";
String entity = String.valueOf(entityObj);
Assert.assertTrue(entity.contains(expectedValue));
}
|
public JmxCollector register() {
return register(PrometheusRegistry.defaultRegistry);
}
|
@Test
public void testDelayedStartReady() throws Exception {
// TODO register calls the collector, which is in start delay seconds, need to understand
// how to handle
JmxCollector jc = new JmxCollector("---\nstartDelaySeconds: 1");
Thread.sleep(2000);
jc.register(prometheusRegistry);
assertEquals(
1.0, getSampleValue("boolean_Test_True", new String[] {}, new String[] {}), .001);
}
|
public <T> OutputSampler<T> sampleOutput(String pcollectionId, Coder<T> coder) {
return (OutputSampler<T>)
outputSamplers.computeIfAbsent(
pcollectionId,
k ->
new OutputSampler<>(
coder, this.maxSamples, this.sampleEveryN, this.onlySampleExceptions));
}
|
@Test
public void testMultipleSamePCollections() throws Exception {
DataSampler sampler = new DataSampler();
VarIntCoder coder = VarIntCoder.of();
sampler.sampleOutput("pcollection-id", coder).sample(globalWindowedValue(1));
sampler.sampleOutput("pcollection-id", coder).sample(globalWindowedValue(2));
BeamFnApi.InstructionResponse samples = getAllSamples(sampler);
assertHasSamples(samples, "pcollection-id", ImmutableList.of(encodeInt(1), encodeInt(2)));
}
|
public static Future<Integer> authTlsHash(SecretOperator secretOperations, String namespace, KafkaClientAuthentication auth, List<CertSecretSource> certSecretSources) {
Future<Integer> tlsFuture;
if (certSecretSources == null || certSecretSources.isEmpty()) {
tlsFuture = Future.succeededFuture(0);
} else {
// get all TLS trusted certs, compute hash from each of them, sum hashes
tlsFuture = Future.join(certSecretSources.stream().map(certSecretSource ->
getCertificateAsync(secretOperations, namespace, certSecretSource)
.compose(cert -> Future.succeededFuture(cert.hashCode()))).collect(Collectors.toList()))
.compose(hashes -> Future.succeededFuture(hashes.list().stream().mapToInt(e -> (int) e).sum()));
}
if (auth == null) {
return tlsFuture;
} else {
// compute hash from Auth
if (auth instanceof KafkaClientAuthenticationScram) {
// only passwordSecret can be changed
return tlsFuture.compose(tlsHash -> getPasswordAsync(secretOperations, namespace, auth)
.compose(password -> Future.succeededFuture(password.hashCode() + tlsHash)));
} else if (auth instanceof KafkaClientAuthenticationPlain) {
// only passwordSecret can be changed
return tlsFuture.compose(tlsHash -> getPasswordAsync(secretOperations, namespace, auth)
.compose(password -> Future.succeededFuture(password.hashCode() + tlsHash)));
} else if (auth instanceof KafkaClientAuthenticationTls) {
// custom cert can be used (and changed)
return ((KafkaClientAuthenticationTls) auth).getCertificateAndKey() == null ? tlsFuture :
tlsFuture.compose(tlsHash -> getCertificateAndKeyAsync(secretOperations, namespace, (KafkaClientAuthenticationTls) auth)
.compose(crtAndKey -> Future.succeededFuture(crtAndKey.certAsBase64String().hashCode() + crtAndKey.keyAsBase64String().hashCode() + tlsHash)));
} else if (auth instanceof KafkaClientAuthenticationOAuth) {
List<Future<Integer>> futureList = ((KafkaClientAuthenticationOAuth) auth).getTlsTrustedCertificates() == null ?
new ArrayList<>() : ((KafkaClientAuthenticationOAuth) auth).getTlsTrustedCertificates().stream().map(certSecretSource ->
getCertificateAsync(secretOperations, namespace, certSecretSource)
.compose(cert -> Future.succeededFuture(cert.hashCode()))).collect(Collectors.toList());
futureList.add(tlsFuture);
futureList.add(addSecretHash(secretOperations, namespace, ((KafkaClientAuthenticationOAuth) auth).getAccessToken()));
futureList.add(addSecretHash(secretOperations, namespace, ((KafkaClientAuthenticationOAuth) auth).getClientSecret()));
futureList.add(addSecretHash(secretOperations, namespace, ((KafkaClientAuthenticationOAuth) auth).getRefreshToken()));
return Future.join(futureList)
.compose(hashes -> Future.succeededFuture(hashes.list().stream().mapToInt(e -> (int) e).sum()));
} else {
// unknown Auth type
return tlsFuture;
}
}
}
|
@Test
void getHashFailure() {
String namespace = "ns";
GenericSecretSource at = new GenericSecretSourceBuilder()
.withSecretName("top-secret-at")
.withKey("key")
.build();
GenericSecretSource cs = new GenericSecretSourceBuilder()
.withSecretName("top-secret-cs")
.withKey("key")
.build();
GenericSecretSource rt = new GenericSecretSourceBuilder()
.withSecretName("top-secret-rt")
.withKey("key")
.build();
KafkaClientAuthentication kcu = new KafkaClientAuthenticationOAuthBuilder()
.withAccessToken(at)
.withRefreshToken(rt)
.withClientSecret(cs)
.build();
CertSecretSource css = new CertSecretSourceBuilder()
.withCertificate("key")
.withSecretName("css-secret")
.build();
Secret secret = new SecretBuilder()
.withData(Map.of("key", "value"))
.build();
SecretOperator secretOps = mock(SecretOperator.class);
when(secretOps.getAsync(eq(namespace), eq("top-secret-at"))).thenReturn(Future.succeededFuture(secret));
when(secretOps.getAsync(eq(namespace), eq("top-secret-rt"))).thenReturn(Future.succeededFuture(secret));
when(secretOps.getAsync(eq(namespace), eq("top-secret-cs"))).thenReturn(Future.succeededFuture(null));
when(secretOps.getAsync(eq(namespace), eq("css-secret"))).thenReturn(Future.succeededFuture(secret));
Future<Integer> res = VertxUtil.authTlsHash(secretOps, "ns", kcu, singletonList(css));
res.onComplete(v -> {
assertThat(v.succeeded(), is(false));
assertThat(v.cause().getMessage(), is("Secret top-secret-cs not found"));
});
}
|
public Optional<InstantAndValue<T>> getAndSet(MetricKey metricKey, Instant now, T value) {
InstantAndValue<T> instantAndValue = new InstantAndValue<>(now, value);
InstantAndValue<T> valueOrNull = counters.put(metricKey, instantAndValue);
// there wasn't already an entry, so return empty.
if (valueOrNull == null) {
return Optional.empty();
}
// Return the previous instance and the value.
return Optional.of(valueOrNull);
}
|
@Test
public void testGetAndSetLong() {
LastValueTracker<Long> lastValueTracker = new LastValueTracker<>();
Optional<InstantAndValue<Long>> result = lastValueTracker.getAndSet(METRIC_NAME, instant1, 1L);
assertFalse(result.isPresent());
}
|
public MutableInodeFile setLength(long length) {
mLength = length;
return getThis();
}
|
@Test
public void setLength() {
MutableInodeFile inodeFile = createInodeFile(1);
inodeFile.setLength(LENGTH);
assertEquals(LENGTH, inodeFile.getLength());
}
|
public static boolean isSupportedVersion(final String ksqlServerVersion) {
final KsqlVersion version;
try {
version = new KsqlVersion(ksqlServerVersion);
} catch (IllegalArgumentException e) {
throw new MigrationException("Could not parse ksqlDB server version to "
+ "verify compatibility. Version: " + ksqlServerVersion);
}
return version.isAtLeast(new KsqlVersion("6.0."));
}
|
@Test
public void shouldReturnUnsupportedVersion() {
assertThat(isSupportedVersion("v5.5.5"), is(false));
assertThat(isSupportedVersion("v5.4.0"), is(false));
assertThat(isSupportedVersion("v4.0.1"), is(false));
assertThat(isSupportedVersion("v0.9.5"), is(false));
assertThat(isSupportedVersion("v0.8.0"), is(false));
assertThat(isSupportedVersion("v0.6.0"), is(false));
assertThat(isSupportedVersion("v0.6.0-rc123"), is(false));
assertThat(isSupportedVersion("v0.6.0-ksqldb"), is(false));
assertThat(isSupportedVersion("5.5.5"), is(false));
assertThat(isSupportedVersion("5.4.0"), is(false));
assertThat(isSupportedVersion("4.0.1"), is(false));
assertThat(isSupportedVersion("0.9.5"), is(false));
assertThat(isSupportedVersion("0.8.0"), is(false));
assertThat(isSupportedVersion("0.6.0"), is(false));
assertThat(isSupportedVersion("0.6.0-rc123"), is(false));
assertThat(isSupportedVersion("0.6.0-ksqldb"), is(false));
}
|
public String getCatalogDbName() {
return getCatalog() + "." + getDatabase();
}
|
@Test
public void testGetCatalogDbName() {
UserProperty userProperty = new UserProperty();
userProperty.setDatabase("db");
userProperty.setCatalog("catalog");
String name = userProperty.getCatalogDbName();
Assert.assertEquals("catalog.db", name);
}
|
@Override
public String localize(final String key, final String table) {
final String identifier = String.format("%s.%s", table, key);
if(!cache.contains(identifier)) {
cache.put(identifier, bundle.localizedString(key, table));
}
return cache.get(identifier);
}
|
@Test
public void testGet() {
assertEquals("Il y a eu un problème lors de la recherche de mises à jour",
new BundleLocale().localize("Il y a eu un problème lors de la recherche de mises à jour", "Localizable"));
}
|
public static String getMaskedStatement(final String query) {
try {
final ParseTree tree = DefaultKsqlParser.getParseTree(query);
return new Visitor().visit(tree);
} catch (final Exception | StackOverflowError e) {
return fallbackMasking(query);
}
}
|
@Test
public void shouldMaskInvalidCreateStreamWithQuotes() {
// Given:
// Typo in "WITH" => "WIT"
final String query = "CREATE STREAM `stream` (id varchar) WIT ('format' = 'avro', \"kafka_topic\" = 'test_topic', partitions=3);";
// When
final String maskedQuery = QueryMask.getMaskedStatement(query);
// Then: we are replacing key which is not in ALLOWED_KEYS if the statement is invalid and we do regex matching
final String expected = "CREATE STREAM `stream` (id varchar) WIT ('format'='[string]', \"kafka_topic\"='[string]', partitions=3);";
assertThat(maskedQuery, is(expected));
}
|
public boolean hasAccess(Type type, UserGroupInformation ugi) {
boolean access = acls.get(type).isUserAllowed(ugi);
if (LOG.isDebugEnabled()) {
LOG.debug("Checking user [{}] for: {} {} ", ugi.getShortUserName(),
type.toString(), acls.get(type).getAclString());
}
if (access) {
AccessControlList blacklist = blacklistedAcls.get(type);
access = (blacklist == null) || !blacklist.isUserInList(ugi);
if (LOG.isDebugEnabled()) {
if (blacklist == null) {
LOG.debug("No blacklist for {}", type.toString());
} else if (access) {
LOG.debug("user is not in {}" , blacklist.getAclString());
} else {
LOG.debug("user is in {}" , blacklist.getAclString());
}
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("User: [{}], Type: {} Result: {}", ugi.getShortUserName(),
type.toString(), access);
}
return access;
}
|
@Test
public void testCustom() {
final Configuration conf = new Configuration(false);
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), type.toString() + " ");
}
final KMSACLs acls = new KMSACLs(conf);
for (KMSACLs.Type type : KMSACLs.Type.values()) {
Assert.assertTrue(acls.hasAccess(type,
UserGroupInformation.createRemoteUser(type.toString())));
Assert.assertFalse(acls.hasAccess(type,
UserGroupInformation.createRemoteUser("foo")));
}
}
|
public static String getTypeName(final int type) {
switch (type) {
case START_EVENT_V3:
return "Start_v3";
case STOP_EVENT:
return "Stop";
case QUERY_EVENT:
return "Query";
case ROTATE_EVENT:
return "Rotate";
case INTVAR_EVENT:
return "Intvar";
case LOAD_EVENT:
return "Load";
case NEW_LOAD_EVENT:
return "New_load";
case SLAVE_EVENT:
return "Slave";
case CREATE_FILE_EVENT:
return "Create_file";
case APPEND_BLOCK_EVENT:
return "Append_block";
case DELETE_FILE_EVENT:
return "Delete_file";
case EXEC_LOAD_EVENT:
return "Exec_load";
case RAND_EVENT:
return "RAND";
case XID_EVENT:
return "Xid";
case USER_VAR_EVENT:
return "User var";
case FORMAT_DESCRIPTION_EVENT:
return "Format_desc";
case TABLE_MAP_EVENT:
return "Table_map";
case PRE_GA_WRITE_ROWS_EVENT:
return "Write_rows_event_old";
case PRE_GA_UPDATE_ROWS_EVENT:
return "Update_rows_event_old";
case PRE_GA_DELETE_ROWS_EVENT:
return "Delete_rows_event_old";
case WRITE_ROWS_EVENT_V1:
return "Write_rows_v1";
case UPDATE_ROWS_EVENT_V1:
return "Update_rows_v1";
case DELETE_ROWS_EVENT_V1:
return "Delete_rows_v1";
case BEGIN_LOAD_QUERY_EVENT:
return "Begin_load_query";
case EXECUTE_LOAD_QUERY_EVENT:
return "Execute_load_query";
case INCIDENT_EVENT:
return "Incident";
case HEARTBEAT_LOG_EVENT:
case HEARTBEAT_LOG_EVENT_V2:
return "Heartbeat";
case IGNORABLE_LOG_EVENT:
return "Ignorable";
case ROWS_QUERY_LOG_EVENT:
return "Rows_query";
case WRITE_ROWS_EVENT:
return "Write_rows";
case UPDATE_ROWS_EVENT:
return "Update_rows";
case DELETE_ROWS_EVENT:
return "Delete_rows";
case GTID_LOG_EVENT:
return "Gtid";
case ANONYMOUS_GTID_LOG_EVENT:
return "Anonymous_Gtid";
case PREVIOUS_GTIDS_LOG_EVENT:
return "Previous_gtids";
case PARTIAL_UPDATE_ROWS_EVENT:
return "Update_rows_partial";
case TRANSACTION_CONTEXT_EVENT :
return "Transaction_context";
case VIEW_CHANGE_EVENT :
return "view_change";
case XA_PREPARE_LOG_EVENT :
return "Xa_prepare";
case TRANSACTION_PAYLOAD_EVENT :
return "transaction_payload";
default:
return "Unknown type:" + type;
}
}
|
@Test
public void getTypeNameInputPositiveOutputNotNull2() {
// Arrange
final int type = 12;
// Act
final String actual = LogEvent.getTypeName(type);
// Assert result
Assert.assertEquals("New_load", actual);
}
|
@Override
public MapTileList computeFromSource(final MapTileList pSource, final MapTileList pReuse) {
final MapTileList out = pReuse != null ? pReuse : new MapTileList();
for (int i = 0; i < pSource.getSize(); i++) {
final long sourceIndex = pSource.get(i);
final int zoom = MapTileIndex.getZoom(sourceIndex);
final int sourceX = MapTileIndex.getX(sourceIndex);
final int sourceY = MapTileIndex.getY(sourceIndex);
final int power = 1 << zoom;
for (int j = -mBorder; j <= mBorder; j++) {
for (int k = -mBorder; k <= mBorder; k++) {
int destX = sourceX + j;
int destY = sourceY + k;
while (destX < 0) {
destX += power;
}
while (destY < 0) {
destY += power;
}
while (destX >= power) {
destX -= power;
}
while (destY >= power) {
destY -= power;
}
final long index = MapTileIndex.getTileIndex(zoom, destX, destY);
if (out.contains(index)) {
continue;
}
if (pSource.contains(index) && !mIncludeAll) {
continue;
}
out.put(index);
}
}
}
return out;
}
|
@Test
public void testOnePointModulo() {
final MapTileList source = new MapTileList();
final MapTileList dest = new MapTileList();
final Set<Long> set = new HashSet<>();
final int border = 2;
final MapTileListBorderComputer computer = new MapTileListBorderComputer(border, false);
final int zoom = 5;
final int sourceX = 1;
final int sourceY = 31;
source.put(MapTileIndex.getTileIndex(zoom, sourceX, sourceY));
add(set, zoom, sourceX, sourceY, border);
set.remove(MapTileIndex.getTileIndex(zoom, sourceX, sourceY));
computer.computeFromSource(source, dest);
check(dest, set, zoom);
}
|
public Materialization create(
final StreamsMaterialization delegate,
final MaterializationInfo info,
final QueryId queryId,
final QueryContext.Stacker contextStacker
) {
final TransformVisitor transformVisitor = new TransformVisitor(queryId, contextStacker);
final List<Transform> transforms = info
.getTransforms()
.stream()
.map(xform -> xform.visit(transformVisitor))
.collect(Collectors.toList());
return materializationFactory.create(
delegate,
info.getSchema(),
transforms
);
}
|
@Test
public void shouldBuildMaterializationWithCorrectParams() {
// When:
factory.create(materialization, info, queryId, contextStacker);
// Then:
verify(materializationFactory).create(
eq(materialization),
eq(TABLE_SCHEMA),
any()
);
}
|
public static GenericRecord rewriteRecord(GenericRecord oldRecord, Schema newSchema) {
GenericRecord newRecord = new GenericData.Record(newSchema);
boolean isSpecificRecord = oldRecord instanceof SpecificRecordBase;
for (Schema.Field f : newSchema.getFields()) {
if (!(isSpecificRecord && isMetadataField(f.name()))) {
copyOldValueOrSetDefault(oldRecord, newRecord, f);
}
}
return newRecord;
}
|
@Test
public void testDefaultValueWithSchemaEvolution() {
GenericRecord rec = new GenericData.Record(new Schema.Parser().parse(EXAMPLE_SCHEMA));
rec.put("_row_key", "key1");
rec.put("non_pii_col", "val1");
rec.put("pii_col", "val2");
rec.put("timestamp", 3.5);
GenericRecord rec1 = HoodieAvroUtils.rewriteRecord(rec, new Schema.Parser().parse(EVOLVED_SCHEMA));
assertEquals("dummy_val", rec1.get("new_col_not_nullable_default_dummy_val"));
assertNull(rec1.get("new_col_nullable_wo_default"));
}
|
@Override
public String resolve(Method method, Object[] arguments, String spelExpression) {
if (StringUtils.isEmpty(spelExpression)) {
return spelExpression;
}
if (spelExpression.matches(PLACEHOLDER_SPEL_REGEX) && stringValueResolver != null) {
return stringValueResolver.resolveStringValue(spelExpression);
}
if (spelExpression.matches(METHOD_SPEL_REGEX)) {
SpelRootObject rootObject = new SpelRootObject(method, arguments);
MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer);
Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext);
return (String) evaluated;
}
if (spelExpression.matches(BEAN_SPEL_REGEX)) {
SpelRootObject rootObject = new SpelRootObject(method, arguments);
MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer);
evaluationContext.setBeanResolver(new BeanFactoryResolver(this.beanFactory));
Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext);
return (String) evaluated;
}
return spelExpression;
}
|
@Test
public void placeholderSpelTest2() throws Exception {
String testExpression = "${property:default}";
DefaultSpelResolverTest target = new DefaultSpelResolverTest();
Method testMethod = target.getClass().getMethod("testMethod", String.class);
String result = sut.resolve(testMethod, new Object[]{}, testExpression);
assertThat(result).isEqualTo("backend");
}
|
public void mastersStartedCallback() {
if (mExclusiveOnlyDeadlineMs == -1) {
long exclusiveOnlyDurationMs =
Configuration.getMs(PropertyKey.MASTER_BACKUP_STATE_LOCK_EXCLUSIVE_DURATION);
mExclusiveOnlyDeadlineMs = System.currentTimeMillis() + exclusiveOnlyDurationMs;
if (exclusiveOnlyDurationMs > 0) {
LOG.info("State-lock will remain in exclusive-only mode for {}ms until {}",
exclusiveOnlyDurationMs, new Date(mExclusiveOnlyDeadlineMs).toString());
}
}
}
|
@Test
public void testExclusiveOnlyMode() throws Throwable {
// Configure exclusive-only duration to cover the entire test execution.
final long exclusiveOnlyDurationMs = 30 * 1000;
Configuration.set(PropertyKey.MASTER_BACKUP_STATE_LOCK_EXCLUSIVE_DURATION,
exclusiveOnlyDurationMs);
// The state-lock instance.
StateLockManager stateLockManager = new StateLockManager();
// Simulate masters-started event to initiate the exclusive-only phase.
stateLockManager.mastersStartedCallback();
for (int i = 0; i < 10; i++) {
StateLockingThread sharedHolderThread = new StateLockingThread(stateLockManager, false);
sharedHolderThread.start();
// Shared lockers are expected to fail.
mExpected.expect(IllegalStateException.class);
sharedHolderThread.waitUntilStateLockAcquired();
}
// Exclusive locking should be allowed.
StateLockingThread exclusiveHolderThread = new StateLockingThread(stateLockManager, true);
exclusiveHolderThread.start();
// State lock should be acquired.
exclusiveHolderThread.waitUntilStateLockAcquired();
// Signal exit and wait for the exclusive locker.
exclusiveHolderThread.unlockExit();
exclusiveHolderThread.join();
}
|
public static <T> Write<T> write(String jdbcUrl, String table) {
return new AutoValue_ClickHouseIO_Write.Builder<T>()
.jdbcUrl(jdbcUrl)
.table(table)
.properties(new Properties())
.maxInsertBlockSize(DEFAULT_MAX_INSERT_BLOCK_SIZE)
.initialBackoff(DEFAULT_INITIAL_BACKOFF)
.maxRetries(DEFAULT_MAX_RETRIES)
.maxCumulativeBackoff(DEFAULT_MAX_CUMULATIVE_BACKOFF)
.build()
.withInsertDeduplicate(true)
.withInsertDistributedSync(true);
}
|
@Test
public void testComplexTupleType() throws Exception {
Schema sizeSchema =
Schema.of(
Schema.Field.of("width", FieldType.INT64.withNullable(true)),
Schema.Field.of("height", FieldType.INT64.withNullable(true)));
Schema browserSchema =
Schema.of(
Schema.Field.of("name", FieldType.STRING.withNullable(true)),
Schema.Field.of("size", FieldType.row(sizeSchema)),
Schema.Field.of("version", FieldType.STRING.withNullable(true)));
Schema propSchema =
Schema.of(
Schema.Field.of("browser", FieldType.row(browserSchema)),
Schema.Field.of("deviceCategory", FieldType.STRING.withNullable(true)));
Schema schema = Schema.of(Schema.Field.of("prop", FieldType.row(propSchema)));
Row sizeRow = Row.withSchema(sizeSchema).addValue(10L).addValue(20L).build();
Row browserRow =
Row.withSchema(browserSchema).addValue("test").addValue(sizeRow).addValue("1.0.0").build();
Row propRow = Row.withSchema(propSchema).addValue(browserRow).addValue("mobile").build();
Row row1 = Row.withSchema(schema).addValue(propRow).build();
executeSql(
"CREATE TABLE test_named_complex_tuples ("
+ "`prop` Tuple(`browser` Tuple(`name` Nullable(String),`size` Tuple(`width` Nullable(Int64), `height` Nullable(Int64)),`version` Nullable(String)),`deviceCategory` Nullable(String))"
+ ") ENGINE=Log");
pipeline.apply(Create.of(row1).withRowSchema(schema)).apply(write("test_named_complex_tuples"));
pipeline.run().waitUntilFinish();
try (ResultSet rs = executeQuery("SELECT * FROM test_named_complex_tuples")) {
rs.next();
assertEquals("[[test, [10, 20], 1.0.0], mobile]", rs.getString("prop"));
}
try (ResultSet rs =
executeQuery(
"SELECT prop.browser.name as name, prop.browser.size as size FROM test_named_complex_tuples")) {
rs.next();
assertEquals("test", rs.getString("name"));
assertEquals("[10, 20]", rs.getString("size"));
}
}
|
@Override
public int positionedRead(long position, byte[] buffer, int offset, int length)
throws IOException {
seek(position);
return read(buffer, offset, length);
}
|
@Test
public void positionedRead() throws IOException, AlluxioException {
AlluxioURI ufsPath = getUfsPath();
createFile(ufsPath, CHUNK_SIZE);
try (FileInStream inStream = getStream(ufsPath)) {
byte[] res = new byte[CHUNK_SIZE / 2];
assertEquals(CHUNK_SIZE / 2,
inStream.positionedRead(CHUNK_SIZE / 2, res, 0, CHUNK_SIZE / 2));
assertTrue(BufferUtils.equalIncreasingByteArray(CHUNK_SIZE / 2, CHUNK_SIZE / 2, res));
}
}
|
public static URI[] stringToURI(String[] str){
if (str == null)
return null;
URI[] uris = new URI[str.length];
for (int i = 0; i < str.length;i++){
try{
uris[i] = new URI(str[i]);
}catch(URISyntaxException ur){
throw new IllegalArgumentException(
"Failed to create uri for " + str[i], ur);
}
}
return uris;
}
|
@Test (timeout = 30000)
public void testStringToURI() {
String[] str = new String[] { "file://" };
try {
StringUtils.stringToURI(str);
fail("Ignoring URISyntaxException while creating URI from string file://");
} catch (IllegalArgumentException iae) {
assertEquals("Failed to create uri for file://", iae.getMessage());
}
}
|
public Properties getBrokerConfig(final String addr, final long timeoutMillis)
throws RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, InterruptedException,
MQBrokerException, UnsupportedEncodingException {
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_BROKER_CONFIG, null);
RemotingCommand response = this.remotingClient.invokeSync(addr, request, timeoutMillis);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
return MixAll.string2Properties(new String(response.getBody(), MixAll.DEFAULT_CHARSET));
}
default:
break;
}
throw new MQBrokerException(response.getCode(), response.getRemark(), addr);
}
|
@Test
public void assertGetBrokerConfig() throws RemotingException, InterruptedException, MQBrokerException, UnsupportedEncodingException {
mockInvokeSync();
setResponseBody("{\"key\":\"value\"}");
Properties actual = mqClientAPI.getBrokerConfig(defaultBrokerAddr, defaultTimeout);
assertNotNull(actual);
assertEquals(1, actual.size());
}
|
@Override
public JFieldVar apply(String nodeName, JsonNode node, JsonNode parent, JFieldVar field, Schema currentSchema) {
if (ruleFactory.getGenerationConfig().isIncludeJsr303Annotations()
&& (node.has("minItems") || node.has("maxItems"))
&& isApplicableType(field)) {
final Class<? extends Annotation> sizeClass
= ruleFactory.getGenerationConfig().isUseJakartaValidation()
? Size.class
: javax.validation.constraints.Size.class;
JAnnotationUse annotation = field.annotate(sizeClass);
if (node.has("minItems")) {
annotation.param("min", node.get("minItems").asInt());
}
if (node.has("maxItems")) {
annotation.param("max", node.get("maxItems").asInt());
}
}
return field;
}
|
@Test
public void testMaxAndMinLength() {
when(config.isIncludeJsr303Annotations()).thenReturn(true);
final int minValue = new Random().nextInt();
final int maxValue = new Random().nextInt();
JsonNode maxSubNode = Mockito.mock(JsonNode.class);
when(subNode.asInt()).thenReturn(minValue);
when(maxSubNode.asInt()).thenReturn(maxValue);
when(node.get("minItems")).thenReturn(subNode);
when(node.get("maxItems")).thenReturn(maxSubNode);
when(fieldVar.annotate(sizeClass)).thenReturn(annotation);
when(node.has("minItems")).thenReturn(true);
when(node.has("maxItems")).thenReturn(true);
when(fieldVar.type().boxify().fullName()).thenReturn(fieldClass.getTypeName());
JFieldVar result = rule.apply("node", node, null, fieldVar, null);
assertSame(fieldVar, result);
verify(fieldVar, times(isApplicable ? 1 : 0)).annotate(sizeClass);
verify(annotation, times(isApplicable ? 1 : 0)).param("min", minValue);
verify(annotation, times(isApplicable ? 1 : 0)).param("max", maxValue);
}
|
@Override
public Reader<E> createReader(
Configuration config, FSDataInputStream stream, long fileLen, long splitEnd)
throws IOException {
// current version does not support splitting.
checkNotSplit(fileLen, splitEnd);
return new AvroParquetRecordReader<E>(
AvroParquetReader.<E>builder(new ParquetInputFile(stream, fileLen))
.withDataModel(getDataModel())
.withConf(HadoopUtils.getHadoopConfiguration(config))
.build());
}
|
@Test
void testCreateGenericReaderWithSplitting() {
assertThatThrownBy(
() ->
createReader(
AvroParquetReaders.forGenericRecord(schema),
new Configuration(),
userPath,
5,
5))
.isInstanceOf(IllegalArgumentException.class);
}
|
public static OTP generateOTP(byte[] secret, String algo, int digits, long period, long seconds)
throws InvalidKeyException, NoSuchAlgorithmException {
long counter = (long) Math.floor((double) seconds / period);
return HOTP.generateOTP(secret, algo, digits, counter);
}
|
@Test
public void vectorsMatch() throws NoSuchAlgorithmException, InvalidKeyException {
for (Vector vector : VECTORS) {
byte[] seed = getSeed(vector.Algo);
OTP otp = TOTP.generateOTP(seed, vector.Algo, 8, 30, vector.Time);
assertEquals(vector.OTP, otp.toString());
}
}
|
@Override
public void handleTenantInfo(TenantInfoHandler handler) {
// 如果禁用,则不执行逻辑
if (isTenantDisable()) {
return;
}
// 获得租户
TenantDO tenant = getTenant(TenantContextHolder.getRequiredTenantId());
// 执行处理器
handler.handle(tenant);
}
|
@Test
public void testHandleTenantInfo_success() {
// 准备参数
TenantInfoHandler handler = mock(TenantInfoHandler.class);
// mock 未禁用
when(tenantProperties.getEnable()).thenReturn(true);
// mock 租户
TenantDO dbTenant = randomPojo(TenantDO.class);
tenantMapper.insert(dbTenant);// @Sql: 先插入出一条存在的数据
TenantContextHolder.setTenantId(dbTenant.getId());
// 调用
tenantService.handleTenantInfo(handler);
// 断言
verify(handler).handle(argThat(argument -> {
assertPojoEquals(dbTenant, argument);
return true;
}));
}
|
public static CreateSourceProperties from(final Map<String, Literal> literals) {
try {
return new CreateSourceProperties(literals, DurationParser::parse, false);
} catch (final ConfigException e) {
final String message = e.getMessage().replace(
"configuration",
"property"
);
throw new KsqlException(message, e);
}
}
|
@Test
public void shouldFailIfInvalidWindowConfig() {
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> CreateSourceProperties.from(
ImmutableMap.<String, Literal>builder()
.putAll(MINIMUM_VALID_PROPS)
.put(WINDOW_TYPE_PROPERTY, new StringLiteral("bar"))
.build()
)
);
// Then:
assertThat(e.getMessage(), containsString("Invalid value bar for property WINDOW_TYPE: String must be one of: SESSION, HOPPING, TUMBLING"));
}
|
@Override
public Optional<String> getUrlPathToJs() {
return Optional.ofNullable(analytics)
.map(WebAnalytics::getUrlPathToJs)
.filter(path -> !path.startsWith("/") && !path.contains("..") && !path.contains("://"))
.map(path -> "/" + path);
}
|
@Test
public void return_empty_if_no_analytics_plugin() {
assertThat(new WebAnalyticsLoaderImpl(null).getUrlPathToJs()).isEmpty();
assertThat(new WebAnalyticsLoaderImpl(new WebAnalytics[0]).getUrlPathToJs()).isEmpty();
}
|
@VisibleForTesting
static StreamExecutionEnvironment createStreamExecutionEnvironment(FlinkPipelineOptions options) {
return createStreamExecutionEnvironment(
options,
MoreObjects.firstNonNull(options.getFilesToStage(), Collections.emptyList()),
options.getFlinkConfDir());
}
|
@Test
public void shouldSetSavepointRestoreForRemoteStreaming() {
String path = "fakePath";
FlinkPipelineOptions options = getDefaultPipelineOptions();
options.setRunner(TestFlinkRunner.class);
options.setFlinkMaster("host:80");
options.setSavepointPath(path);
StreamExecutionEnvironment sev =
FlinkExecutionEnvironments.createStreamExecutionEnvironment(options);
// subject to change with https://issues.apache.org/jira/browse/FLINK-11048
assertThat(sev, instanceOf(RemoteStreamEnvironment.class));
assertThat(getSavepointPath(sev), is(path));
}
|
OperationNode createReadOperation(
Network<Node, Edge> network,
ParallelInstructionNode node,
PipelineOptions options,
ReaderFactory readerFactory,
DataflowExecutionContext<?> executionContext,
DataflowOperationContext operationContext)
throws Exception {
ParallelInstruction instruction = node.getParallelInstruction();
ReadInstruction read = instruction.getRead();
Source cloudSource = CloudSourceUtils.flattenBaseSpecs(read.getSource());
CloudObject sourceSpec = CloudObject.fromSpec(cloudSource.getSpec());
Coder<?> coder =
CloudObjects.coderFromCloudObject(CloudObject.fromSpec(cloudSource.getCodec()));
NativeReader<?> reader =
readerFactory.create(sourceSpec, coder, options, executionContext, operationContext);
OutputReceiver[] receivers = getOutputReceivers(network, node);
return OperationNode.create(ReadOperation.create(reader, receivers, operationContext));
}
|
@Test
public void testCreateReadOperation() throws Exception {
ParallelInstructionNode instructionNode =
ParallelInstructionNode.create(createReadInstruction("Read"), ExecutionLocation.UNKNOWN);
when(network.successors(instructionNode))
.thenReturn(
ImmutableSet.<Node>of(
IntrinsicMapTaskExecutorFactory.createOutputReceiversTransform(STAGE, counterSet)
.apply(
InstructionOutputNode.create(
instructionNode.getParallelInstruction().getOutputs().get(0),
PCOLLECTION_ID))));
when(network.outDegree(instructionNode)).thenReturn(1);
Node operationNode =
mapTaskExecutorFactory
.createOperationTransformForParallelInstructionNodes(
STAGE,
network,
PipelineOptionsFactory.create(),
readerRegistry,
sinkRegistry,
BatchModeExecutionContext.forTesting(options, counterSet, "testStage"))
.apply(instructionNode);
assertThat(operationNode, instanceOf(OperationNode.class));
assertThat(((OperationNode) operationNode).getOperation(), instanceOf(ReadOperation.class));
ReadOperation readOperation = (ReadOperation) ((OperationNode) operationNode).getOperation();
assertEquals(1, readOperation.receivers.length);
assertEquals(0, readOperation.receivers[0].getReceiverCount());
assertEquals(Operation.InitializationState.UNSTARTED, readOperation.initializationState);
assertThat(readOperation.reader, instanceOf(ReaderFactoryTest.TestReader.class));
counterSet.extractUpdates(false, updateExtractor);
verifyOutputCounters(updateExtractor, "read_output_name");
verify(updateExtractor).longSum(eq(named("Read-ByteCount")), anyBoolean(), anyLong());
verifyNoMoreInteractions(updateExtractor);
}
|
public void handleStepStatus(
MaestroTracingContext tracingContext, StepInstance.Status status, Throwable throwable) {
if (tracingContext == null || status == null) {
return;
}
if (StepInstance.Status.CREATED.equals(status)) {
start(tracingContext, status.name());
} else if (status.isTerminal()) {
finish(tracingContext, status.name(), throwable);
} else {
annotate(tracingContext, status.name());
}
}
|
@Test
public void testHandleStepStatus() {
MaestroTracingManager tm = new TestTracingManager(mockTracer);
int finishCount = 0;
for (StepInstance.Status s : StepInstance.Status.values()) {
System.out.println("Testing " + s.name());
tm.handleStepStatus(defaultContext, s);
verify(mockSpan, times(1)).annotate(s.name());
if (s.isTerminal()) {
verify(mockSpan, times(++finishCount)).finish();
}
if (s.equals(StepInstance.Status.CREATED)) {
verify(mockSpan, times(1)).start();
}
}
}
|
@Override
public V put(K key, V value, Duration ttl) {
return get(putAsync(key, value, ttl));
}
|
@Test
public void testKeySetByPatternTTL() {
RMapCacheNative<String, String> map = redisson.getMapCacheNative("simple", StringCodec.INSTANCE);
map.put("10", "100");
map.put("20", "200", Duration.ofMinutes(1));
map.put("30", "300");
assertThat(map.keySet("?0")).containsExactlyInAnyOrder("10", "20", "30");
assertThat(map.keySet("1")).isEmpty();
assertThat(map.keySet("10")).containsExactlyInAnyOrder("10");
map.destroy();
}
|
@Udf
public <T> List<T> slice(
@UdfParameter(description = "the input array") final List<T> in,
@UdfParameter(description = "start index") final Integer from,
@UdfParameter(description = "end index") final Integer to) {
if (in == null) {
return null;
}
try {
// SQL systems are usually 1-indexed and are inclusive of end index
final int start = from == null ? 0 : from - 1;
final int end = to == null ? in.size() : to;
return in.subList(start, end);
} catch (final IndexOutOfBoundsException e) {
return null;
}
}
|
@Test
public void shouldReturnNullOnNullInput() {
// Given:
final List<String> list = null;
// When:
final List<String> slice = new Slice().slice(list, 1, 2);
// Then:
assertThat(slice, nullValue());
}
|
@Override
public boolean isTrusted(Address address) {
if (address == null) {
return false;
}
if (trustedInterfaces.isEmpty()) {
return true;
}
String host = address.getHost();
if (matchAnyInterface(host, trustedInterfaces)) {
return true;
} else {
if (logger.isFineEnabled()) {
logger.fine(
"Address %s doesn't match any trusted interface", host);
}
return false;
}
}
|
@Test
public void givenInterfaceRangeIsConfigured_whenMessageWithNonMatchingHost_thenDoNotTrust() throws UnknownHostException {
AddressCheckerImpl joinMessageTrustChecker = new AddressCheckerImpl(singleton("127.0.0.1-100"), logger);
Address address = createAddress("127.0.0.101");
assertFalse(joinMessageTrustChecker.isTrusted(address));
}
|
public List<QueuedCommand> getRestoreCommands(final Duration duration) {
if (commandTopicBackup.commandTopicCorruption()) {
log.warn("Corruption detected. "
+ "Use backup to restore command topic.");
return Collections.emptyList();
}
return getAllCommandsInCommandTopic(
commandConsumer,
commandTopicPartition,
Optional.of(commandTopicBackup),
duration
);
}
|
@Test
public void shouldFilterNullCommandsWhileRestoringCommands() {
// Given:
when(commandConsumer.poll(any(Duration.class)))
.thenReturn(someConsumerRecords(
record1,
record2,
new ConsumerRecord<>("topic", 0, 2, commandId2, null)
));
when(commandConsumer.endOffsets(any()))
.thenReturn(Collections.singletonMap(TOPIC_PARTITION, 2L));
when(commandConsumer.position(TOPIC_PARTITION)).thenReturn(0L, 2L);
// When:
final List<QueuedCommand> recordList = commandTopic
.getRestoreCommands(Duration.ofMillis(1));
// Then:
assertThat(recordList, equalTo(ImmutableList.of(
new QueuedCommand(commandId1, command1, Optional.empty(), 0L),
new QueuedCommand(commandId2, command2, Optional.empty(),1L))));
}
|
@SuppressWarnings("nullness")
@VisibleForTesting
public ProcessContinuation run(
PartitionMetadata partition,
RestrictionTracker<TimestampRange, Timestamp> tracker,
OutputReceiver<DataChangeRecord> receiver,
ManualWatermarkEstimator<Instant> watermarkEstimator,
BundleFinalizer bundleFinalizer) {
final String token = partition.getPartitionToken();
final Timestamp startTimestamp = tracker.currentRestriction().getFrom();
final Timestamp endTimestamp = partition.getEndTimestamp();
// TODO: Potentially we can avoid this fetch, by enriching the runningAt timestamp when the
// ReadChangeStreamPartitionDoFn#processElement is called
final PartitionMetadata updatedPartition =
Optional.ofNullable(partitionMetadataDao.getPartition(token))
.map(partitionMetadataMapper::from)
.orElseThrow(
() ->
new IllegalStateException(
"Partition " + token + " not found in metadata table"));
try (ChangeStreamResultSet resultSet =
changeStreamDao.changeStreamQuery(
token, startTimestamp, endTimestamp, partition.getHeartbeatMillis())) {
metrics.incQueryCounter();
while (resultSet.next()) {
final List<ChangeStreamRecord> records =
changeStreamRecordMapper.toChangeStreamRecords(
updatedPartition, resultSet, resultSet.getMetadata());
Optional<ProcessContinuation> maybeContinuation;
for (final ChangeStreamRecord record : records) {
if (record instanceof DataChangeRecord) {
maybeContinuation =
dataChangeRecordAction.run(
updatedPartition,
(DataChangeRecord) record,
tracker,
receiver,
watermarkEstimator);
} else if (record instanceof HeartbeatRecord) {
maybeContinuation =
heartbeatRecordAction.run(
updatedPartition, (HeartbeatRecord) record, tracker, watermarkEstimator);
} else if (record instanceof ChildPartitionsRecord) {
maybeContinuation =
childPartitionsRecordAction.run(
updatedPartition, (ChildPartitionsRecord) record, tracker, watermarkEstimator);
} else {
LOG.error("[{}] Unknown record type {}", token, record.getClass());
throw new IllegalArgumentException("Unknown record type " + record.getClass());
}
if (maybeContinuation.isPresent()) {
LOG.debug("[{}] Continuation present, returning {}", token, maybeContinuation);
bundleFinalizer.afterBundleCommit(
Instant.now().plus(BUNDLE_FINALIZER_TIMEOUT),
updateWatermarkCallback(token, watermarkEstimator));
return maybeContinuation.get();
}
}
}
bundleFinalizer.afterBundleCommit(
Instant.now().plus(BUNDLE_FINALIZER_TIMEOUT),
updateWatermarkCallback(token, watermarkEstimator));
} catch (SpannerException e) {
/*
If there is a split when a partition is supposed to be finished, the residual will try
to perform a change stream query for an out of range interval. We ignore this error
here, and the residual should be able to claim the end of the timestamp range, finishing
the partition.
*/
if (isTimestampOutOfRange(e)) {
LOG.info(
"[{}] query change stream is out of range for {} to {}, finishing stream.",
token,
startTimestamp,
endTimestamp,
e);
} else {
throw e;
}
} catch (Exception e) {
LOG.error(
"[{}] query change stream had exception processing range {} to {}.",
token,
startTimestamp,
endTimestamp,
e);
throw e;
}
LOG.debug("[{}] change stream completed successfully", token);
if (tracker.tryClaim(endTimestamp)) {
LOG.debug("[{}] Finishing partition", token);
partitionMetadataDao.updateToFinished(token);
metrics.decActivePartitionReadCounter();
LOG.info("[{}] After attempting to finish the partition", token);
}
return ProcessContinuation.stop();
}
|
@Test
public void testQueryChangeStreamWithRestrictionFromAfterPartitionStart() {
final Struct rowAsStruct = mock(Struct.class);
final ChangeStreamResultSetMetadata resultSetMetadata =
mock(ChangeStreamResultSetMetadata.class);
final ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class);
final ChildPartitionsRecord record1 = mock(ChildPartitionsRecord.class);
final ChildPartitionsRecord record2 = mock(ChildPartitionsRecord.class);
// From is after Partition start at
when(restriction.getFrom()).thenReturn(Timestamp.ofTimeMicroseconds(15L));
// Both records should be included
when(record1.getRecordTimestamp()).thenReturn(Timestamp.ofTimeMicroseconds(15L));
when(record2.getRecordTimestamp()).thenReturn(Timestamp.ofTimeMicroseconds(25L));
when(changeStreamDao.changeStreamQuery(
PARTITION_TOKEN,
Timestamp.ofTimeMicroseconds(15L),
PARTITION_END_TIMESTAMP,
PARTITION_HEARTBEAT_MILLIS))
.thenReturn(resultSet);
when(resultSet.next()).thenReturn(true);
when(resultSet.getCurrentRowAsStruct()).thenReturn(rowAsStruct);
when(resultSet.getMetadata()).thenReturn(resultSetMetadata);
when(changeStreamRecordMapper.toChangeStreamRecords(partition, resultSet, resultSetMetadata))
.thenReturn(Arrays.asList(record1, record2));
when(childPartitionsRecordAction.run(
partition, record2, restrictionTracker, watermarkEstimator))
.thenReturn(Optional.of(ProcessContinuation.stop()));
when(watermarkEstimator.currentWatermark()).thenReturn(WATERMARK);
final ProcessContinuation result =
action.run(
partition, restrictionTracker, outputReceiver, watermarkEstimator, bundleFinalizer);
assertEquals(ProcessContinuation.stop(), result);
verify(childPartitionsRecordAction)
.run(partition, record1, restrictionTracker, watermarkEstimator);
verify(childPartitionsRecordAction)
.run(partition, record2, restrictionTracker, watermarkEstimator);
verify(partitionMetadataDao).updateWatermark(PARTITION_TOKEN, WATERMARK_TIMESTAMP);
verify(dataChangeRecordAction, never()).run(any(), any(), any(), any(), any());
verify(heartbeatRecordAction, never()).run(any(), any(), any(), any());
verify(restrictionTracker, never()).tryClaim(any());
}
|
public static Set<Integer> getIntegerSetOrNull(String property, JsonNode node) {
if (!node.has(property) || node.get(property).isNull()) {
return null;
}
return getIntegerSet(property, node);
}
|
@Test
public void getIntegerSetOrNull() throws JsonProcessingException {
assertThat(JsonUtil.getIntegerSetOrNull("items", JsonUtil.mapper().readTree("{}"))).isNull();
assertThat(
JsonUtil.getIntegerSetOrNull("items", JsonUtil.mapper().readTree("{\"items\": null}")))
.isNull();
assertThatThrownBy(
() ->
JsonUtil.getIntegerSetOrNull(
"items", JsonUtil.mapper().readTree("{\"items\": [13, \"23\"]}")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse integer from non-int value in items: \"23\"");
assertThat(
JsonUtil.getIntegerSetOrNull(
"items", JsonUtil.mapper().readTree("{\"items\": [23, 45]}")))
.containsExactlyElementsOf(Arrays.asList(23, 45));
}
|
@Override
public MapperResult findConfigInfoBaseLikeFetchRows(MapperContext context) {
final String dataId = (String) context.getWhereParameter(FieldConstant.DATA_ID);
final String group = (String) context.getWhereParameter(FieldConstant.GROUP_ID);
final String content = (String) context.getWhereParameter(FieldConstant.CONTENT);
final String sqlFetchRows = "SELECT id,data_id,group_id,tenant_id,content FROM config_info WHERE ";
String where = " 1=1 AND tenant_id='" + NamespaceUtil.getNamespaceDefaultId() + "' ";
List<Object> paramList = new ArrayList<>();
if (!StringUtils.isBlank(dataId)) {
where += " AND data_id LIKE ? ";
paramList.add(dataId);
}
if (!StringUtils.isBlank(group)) {
where += " AND group_id LIKE ";
paramList.add(group);
}
if (!StringUtils.isBlank(content)) {
where += " AND content LIKE ? ";
paramList.add(content);
}
return new MapperResult(sqlFetchRows + where + " LIMIT " + context.getStartRow() + "," + context.getPageSize(),
paramList);
}
|
@Test
void testFindConfigInfoBaseLikeFetchRows() {
MapperResult mapperResult = configInfoMapperByMySql.findConfigInfoBaseLikeFetchRows(context);
assertEquals(mapperResult.getSql(),
"SELECT id,data_id,group_id,tenant_id,content FROM config_info WHERE 1=1 AND tenant_id='' LIMIT " + startRow + ","
+ pageSize);
assertArrayEquals(mapperResult.getParamList().toArray(), emptyObjs);
}
|
@Override
public GenericRecordBuilder newRecordBuilder() {
return new AvroRecordBuilderImpl(this);
}
|
@Test(expectedExceptions = org.apache.pulsar.client.api.SchemaSerializationException.class)
public void testFailDecodeWithoutMultiVersioningSupport() {
GenericRecord dataForWriter = writerSchema.newRecordBuilder()
.set("field1", SchemaTestUtils.TEST_MULTI_VERSION_SCHEMA_STRING)
.set("field3", 0)
.build();
readerSchema.decode(writerSchema.encode(dataForWriter));
}
|
@ApiOperation(value = "Update a user", tags = { "Users" }, notes = "All request values are optional. "
+ "For example, you can only include the firstName attribute in the request body JSON-object, only updating the firstName of the user, leaving all other fields unaffected. "
+ "When an attribute is explicitly included and is set to null, the user-value will be updated to null. "
+ "Example: {\"firstName\" : null} will clear the firstName of the user).")
@ApiResponses(value = {
@ApiResponse(code = 200, message = "Indicates the user was updated."),
@ApiResponse(code = 404, message = "Indicates the requested user was not found."),
@ApiResponse(code = 409, message = "Indicates the requested user was updated simultaneously.")
})
@PutMapping(value = "/identity/users/{userId}", produces = "application/json")
public UserResponse updateUser(@ApiParam(name = "userId") @PathVariable String userId, @RequestBody UserRequest userRequest) {
User user = getUserFromRequest(userId);
if (userRequest.isEmailChanged()) {
user.setEmail(userRequest.getEmail());
}
if (userRequest.isFirstNameChanged()) {
user.setFirstName(userRequest.getFirstName());
}
if (userRequest.isDisplayNameChanged()) {
user.setDisplayName(userRequest.getDisplayName());
}
if (userRequest.isLastNameChanged()) {
user.setLastName(userRequest.getLastName());
}
if (userRequest.isPasswordChanged()) {
user.setPassword(userRequest.getPassword());
identityService.updateUserPassword(user);
} else {
identityService.saveUser(user);
}
return restResponseFactory.createUserResponse(user, false);
}
|
@Test
public void testUpdateUser() throws Exception {
User savedUser = null;
try {
User newUser = identityService.newUser("testuser");
newUser.setFirstName("Fred");
newUser.setLastName("McDonald");
newUser.setEmail("no-reply@flowable.org");
identityService.saveUser(newUser);
savedUser = newUser;
ObjectNode taskUpdateRequest = objectMapper.createObjectNode();
taskUpdateRequest.put("firstName", "Tijs");
taskUpdateRequest.put("lastName", "Barrez");
taskUpdateRequest.put("displayName", "Tijs Barrez");
taskUpdateRequest.put("email", "no-reply@flowable.org");
taskUpdateRequest.put("password", "updatedpassword");
HttpPut httpPut = new HttpPut(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_USER, newUser.getId()));
httpPut.setEntity(new StringEntity(taskUpdateRequest.toString()));
CloseableHttpResponse response = executeRequest(httpPut, HttpStatus.SC_OK);
JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent());
closeResponse(response);
assertThat(responseNode).isNotNull();
assertThatJson(responseNode)
.when(Option.IGNORING_EXTRA_FIELDS)
.isEqualTo("{"
+ "id: 'testuser',"
+ "firstName: 'Tijs',"
+ "lastName: 'Barrez',"
+ "displayName: 'Tijs Barrez',"
+ "email: 'no-reply@flowable.org',"
+ "url: '" + SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_USER, newUser.getId()) + "'"
+ "}");
// Check user is updated in Flowable
newUser = identityService.createUserQuery().userId(newUser.getId()).singleResult();
assertThat(newUser.getLastName()).isEqualTo("Barrez");
assertThat(newUser.getFirstName()).isEqualTo("Tijs");
assertThat(newUser.getDisplayName()).isEqualTo("Tijs Barrez");
assertThat(newUser.getEmail()).isEqualTo("no-reply@flowable.org");
assertThat(newUser.getPassword()).isEqualTo("updatedpassword");
} finally {
// Delete user after test fails
if (savedUser != null) {
identityService.deleteUser(savedUser.getId());
}
}
}
|
@VisibleForTesting
public void setSizeBasedWeight(boolean sizeBasedWeight) {
this.sizeBasedWeight = sizeBasedWeight;
}
|
@Test
public void testOrderingUsingAppSubmitTime() {
FairOrderingPolicy<MockSchedulableEntity> policy =
new FairOrderingPolicy<>();
policy.setSizeBasedWeight(true);
MockSchedulableEntity r1 = new MockSchedulableEntity();
MockSchedulableEntity r2 = new MockSchedulableEntity();
// R1, R2 has been started at same time
assertEquals(r1.getStartTime(), r2.getStartTime());
// No changes, equal
assertEquals("Comparator Output", 0,
policy.getComparator().compare(r1, r2));
// R2 has been started after R1
r1.setStartTime(5);
r2.setStartTime(10);
Assert.assertTrue(policy.getComparator().compare(r1, r2) < 0);
// R1 has been started after R2
r1.setStartTime(10);
r2.setStartTime(5);
Assert.assertTrue(policy.getComparator().compare(r1, r2) > 0);
}
|
public static <T> Inner<T> create() {
return new Inner<>();
}
|
@Test
@Category(NeedsRunner.class)
public void addNestedCollectionField() {
Schema nested = Schema.builder().addStringField("field1").build();
Schema schema =
Schema.builder()
.addArrayField("array", Schema.FieldType.row(nested))
.addIterableField("iter", Schema.FieldType.row(nested))
.build();
Row subRow = Row.withSchema(nested).addValue("value").build();
Row row =
Row.withSchema(schema)
.addArray(subRow, subRow)
.addIterable(ImmutableList.of(subRow, subRow))
.build();
PCollection<Row> added =
pipeline
.apply(Create.of(row).withRowSchema(schema))
.apply(
AddFields.<Row>create()
.field("array.field2", Schema.FieldType.INT32)
.field("array.field3", Schema.FieldType.array(Schema.FieldType.STRING))
.field("iter.field2", Schema.FieldType.INT32)
.field("iter.field3", Schema.FieldType.array(Schema.FieldType.STRING)));
Schema expectedNestedSchema =
Schema.builder()
.addStringField("field1")
.addNullableField("field2", Schema.FieldType.INT32)
.addNullableField("field3", Schema.FieldType.array(Schema.FieldType.STRING))
.build();
Schema expectedSchema =
Schema.builder()
.addArrayField("array", Schema.FieldType.row(expectedNestedSchema))
.addIterableField("iter", Schema.FieldType.row(expectedNestedSchema))
.build();
assertEquals(expectedSchema, added.getSchema());
Row expectedNested =
Row.withSchema(expectedNestedSchema).addValues("value", null, null).build();
Row expected =
Row.withSchema(expectedSchema)
.addArray(expectedNested, expectedNested)
.addIterable(ImmutableList.of(expectedNested, expectedNested))
.build();
PAssert.that(added).containsInAnyOrder(expected);
pipeline.run();
}
|
public static SerializableFunction<Row, Mutation> beamRowToMutationFn(
Mutation.Op operation, String table) {
return (row -> {
switch (operation) {
case INSERT:
return MutationUtils.createMutationFromBeamRows(Mutation.newInsertBuilder(table), row);
case DELETE:
return Mutation.delete(table, MutationUtils.createKeyFromBeamRow(row));
case UPDATE:
return MutationUtils.createMutationFromBeamRows(Mutation.newUpdateBuilder(table), row);
case REPLACE:
return MutationUtils.createMutationFromBeamRows(Mutation.newReplaceBuilder(table), row);
case INSERT_OR_UPDATE:
return MutationUtils.createMutationFromBeamRows(
Mutation.newInsertOrUpdateBuilder(table), row);
default:
throw new IllegalArgumentException(
String.format("Unknown mutation operation type: %s", operation));
}
});
}
|
@Test
public void testCreateInsertOrUpdateMutationFromRow() {
Mutation expectedMutation = createMutation(Mutation.Op.INSERT_OR_UPDATE);
Mutation mutation = beamRowToMutationFn(Mutation.Op.INSERT_OR_UPDATE, TABLE).apply(WRITE_ROW);
assertEquals(expectedMutation, mutation);
}
|
public double[][] test(DataFrame data) {
DataFrame x = formula.x(data);
int n = x.nrow();
int ntrees = trees.length;
double[][] prediction = new double[ntrees][n];
for (int j = 0; j < n; j++) {
Tuple xj = x.get(j);
double base = b;
for (int i = 0; i < ntrees; i++) {
base += shrinkage * trees[i].predict(xj);
prediction[i][j] = base;
}
}
return prediction;
}
|
@Test
public void testAbaloneLAD() {
test(Loss.lad(), "abalone", Abalone.formula, Abalone.train, 2.2958);
}
|
@Override
@CacheEvict(cacheNames = RedisKeyConstants.OAUTH_CLIENT,
allEntries = true) // allEntries 清空所有缓存,因为可能修改到 clientId 字段,不好清理
public void updateOAuth2Client(OAuth2ClientSaveReqVO updateReqVO) {
// 校验存在
validateOAuth2ClientExists(updateReqVO.getId());
// 校验 Client 未被占用
validateClientIdExists(updateReqVO.getId(), updateReqVO.getClientId());
// 更新
OAuth2ClientDO updateObj = BeanUtils.toBean(updateReqVO, OAuth2ClientDO.class);
oauth2ClientMapper.updateById(updateObj);
}
|
@Test
public void testUpdateOAuth2Client_success() {
// mock 数据
OAuth2ClientDO dbOAuth2Client = randomPojo(OAuth2ClientDO.class);
oauth2ClientMapper.insert(dbOAuth2Client);// @Sql: 先插入出一条存在的数据
// 准备参数
OAuth2ClientSaveReqVO reqVO = randomPojo(OAuth2ClientSaveReqVO.class, o -> {
o.setId(dbOAuth2Client.getId()); // 设置更新的 ID
o.setLogo(randomString());
});
// 调用
oauth2ClientService.updateOAuth2Client(reqVO);
// 校验是否更新正确
OAuth2ClientDO oAuth2Client = oauth2ClientMapper.selectById(reqVO.getId()); // 获取最新的
assertPojoEquals(reqVO, oAuth2Client);
}
|
public static void mergeParams(
Map<String, ParamDefinition> params,
Map<String, ParamDefinition> paramsToMerge,
MergeContext context) {
if (paramsToMerge == null) {
return;
}
Stream.concat(params.keySet().stream(), paramsToMerge.keySet().stream())
.forEach(
name -> {
ParamDefinition paramToMerge = paramsToMerge.get(name);
if (paramToMerge == null) {
return;
}
if (paramToMerge.getType() == ParamType.MAP && paramToMerge.isLiteral()) {
Map<String, ParamDefinition> baseMap = mapValueOrEmpty(params, name);
Map<String, ParamDefinition> toMergeMap = mapValueOrEmpty(paramsToMerge, name);
mergeParams(
baseMap,
toMergeMap,
MergeContext.copyWithParentMode(
context, params.getOrDefault(name, paramToMerge).getMode()));
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else if (paramToMerge.getType() == ParamType.STRING_MAP
&& paramToMerge.isLiteral()) {
Map<String, String> baseMap = stringMapValueOrEmpty(params, name);
Map<String, String> toMergeMap = stringMapValueOrEmpty(paramsToMerge, name);
baseMap.putAll(toMergeMap);
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else {
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, paramToMerge.getValue()));
}
});
}
|
@Test
public void testMergeSubworkflowRestartWithMutableOnStart() throws IOException {
DefaultParamManager defaultParamManager =
new DefaultParamManager(JsonHelper.objectMapperWithYaml());
defaultParamManager.init();
Map<String, ParamDefinition> allParams =
defaultParamManager.getDefaultParamsForType(StepType.SUBWORKFLOW).get();
Map<String, ParamDefinition> paramsToMerge =
parseParamDefMap(
"{'subworkflow_version': {'value': 'active', 'type': 'STRING', 'mode': 'MUTABLE'}}");
AssertHelper.assertThrows(
"throws exception when a subworkflow source restarts and tries to mutate params with MUTABLE_ON_START mode",
MaestroValidationException.class,
"Cannot modify param with mode [MUTABLE_ON_START] for parameter [subworkflow_version]",
() -> ParamsMergeHelper.mergeParams(allParams, paramsToMerge, upstreamRestartMergeContext));
}
|
public void scanNotActiveChannel() {
Iterator<Map.Entry<String, ConcurrentHashMap<Channel, ClientChannelInfo>>> iterator = this.groupChannelTable.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, ConcurrentHashMap<Channel, ClientChannelInfo>> entry = iterator.next();
final String group = entry.getKey();
final ConcurrentHashMap<Channel, ClientChannelInfo> chlMap = entry.getValue();
Iterator<Entry<Channel, ClientChannelInfo>> it = chlMap.entrySet().iterator();
while (it.hasNext()) {
Entry<Channel, ClientChannelInfo> item = it.next();
// final Integer id = item.getKey();
final ClientChannelInfo info = item.getValue();
long diff = System.currentTimeMillis() - info.getLastUpdateTimestamp();
if (diff > CHANNEL_EXPIRED_TIMEOUT) {
it.remove();
Channel channelInClientTable = clientChannelTable.get(info.getClientId());
if (channelInClientTable != null && channelInClientTable.equals(info.getChannel())) {
clientChannelTable.remove(info.getClientId());
}
log.warn(
"ProducerManager#scanNotActiveChannel: remove expired channel[{}] from ProducerManager groupChannelTable, producer group name: {}",
RemotingHelper.parseChannelRemoteAddr(info.getChannel()), group);
callProducerChangeListener(ProducerGroupEvent.CLIENT_UNREGISTER, group, info);
RemotingHelper.closeChannel(info.getChannel());
}
}
if (chlMap.isEmpty()) {
log.warn("SCAN: remove expired channel from ProducerManager groupChannelTable, all clear, group={}", group);
iterator.remove();
callProducerChangeListener(ProducerGroupEvent.GROUP_UNREGISTER, group, null);
}
}
}
|
@Test
public void scanNotActiveChannel() throws Exception {
producerManager.registerProducer(group, clientInfo);
AtomicReference<String> groupRef = new AtomicReference<>();
AtomicReference<ClientChannelInfo> clientChannelInfoRef = new AtomicReference<>();
producerManager.appendProducerChangeListener((event, group, clientChannelInfo) -> {
switch (event) {
case GROUP_UNREGISTER:
groupRef.set(group);
break;
case CLIENT_UNREGISTER:
clientChannelInfoRef.set(clientChannelInfo);
break;
default:
break;
}
});
assertThat(producerManager.getGroupChannelTable().get(group).get(channel)).isNotNull();
assertThat(producerManager.findChannel("clientId")).isNotNull();
Field field = ProducerManager.class.getDeclaredField("CHANNEL_EXPIRED_TIMEOUT");
field.setAccessible(true);
long channelExpiredTimeout = field.getLong(producerManager);
clientInfo.setLastUpdateTimestamp(System.currentTimeMillis() - channelExpiredTimeout - 10);
when(channel.close()).thenReturn(mock(ChannelFuture.class));
producerManager.scanNotActiveChannel();
assertThat(producerManager.getGroupChannelTable().get(group)).isNull();
assertThat(groupRef.get()).isEqualTo(group);
assertThat(clientChannelInfoRef.get()).isSameAs(clientInfo);
assertThat(producerManager.findChannel("clientId")).isNull();
}
|
public static PrimitiveType fromPrimitiveString(String typeString) {
String lowerTypeString = typeString.toLowerCase(Locale.ROOT);
if (TYPES.containsKey(lowerTypeString)) {
return TYPES.get(lowerTypeString);
}
Matcher fixed = FIXED.matcher(lowerTypeString);
if (fixed.matches()) {
return FixedType.ofLength(Integer.parseInt(fixed.group(1)));
}
Matcher decimal = DECIMAL.matcher(lowerTypeString);
if (decimal.matches()) {
return DecimalType.of(Integer.parseInt(decimal.group(1)), Integer.parseInt(decimal.group(2)));
}
throw new IllegalArgumentException("Cannot parse type string to primitive: " + typeString);
}
|
@Test
public void fromPrimitiveString() {
assertThat(Types.fromPrimitiveString("boolean")).isSameAs(Types.BooleanType.get());
assertThat(Types.fromPrimitiveString("BooLean")).isSameAs(Types.BooleanType.get());
assertThat(Types.fromPrimitiveString("timestamp")).isSameAs(Types.TimestampType.withoutZone());
assertThat(Types.fromPrimitiveString("Fixed[ 3 ]")).isEqualTo(Types.FixedType.ofLength(3));
assertThat(Types.fromPrimitiveString("Decimal( 2 , 3 )")).isEqualTo(Types.DecimalType.of(2, 3));
assertThat(Types.fromPrimitiveString("Decimal(2,3)")).isEqualTo(Types.DecimalType.of(2, 3));
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> Types.fromPrimitiveString("Unknown"))
.withMessageContaining("Unknown");
}
|
MethodSpec buildFunction(AbiDefinition functionDefinition) throws ClassNotFoundException {
return buildFunction(functionDefinition, true);
}
|
@Test
public void testBuildFunctionConstantSingleValueReturn() throws Exception {
AbiDefinition functionDefinition =
new AbiDefinition(
true,
Arrays.asList(new NamedType("param", "uint8")),
"functionName",
Arrays.asList(new NamedType("result", "int8")),
"type",
false);
MethodSpec methodSpec = solidityFunctionWrapper.buildFunction(functionDefinition);
String expected =
"public org.web3j.protocol.core.RemoteFunctionCall<java.math.BigInteger> functionName(\n"
+ " java.math.BigInteger param) {\n"
+ " final org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(FUNC_FUNCTIONNAME, \n"
+ " java.util.Arrays.<org.web3j.abi.datatypes.Type>asList(new org.web3j.abi.datatypes.generated.Uint8(param)), \n"
+ " java.util.Arrays.<org.web3j.abi.TypeReference<?>>asList(new org.web3j.abi.TypeReference<org.web3j.abi.datatypes.generated.Int8>() {}));\n"
+ " return executeRemoteCallSingleValueReturn(function, java.math.BigInteger.class);\n"
+ "}\n";
assertEquals((expected), methodSpec.toString());
}
|
@Override
public void export(RegisterTypeEnum registerType) {
if (this.exported) {
return;
}
if (getScopeModel().isLifeCycleManagedExternally()) {
// prepare model for reference
getScopeModel().getDeployer().prepare();
} else {
// ensure start module, compatible with old api usage
getScopeModel().getDeployer().start();
}
synchronized (this) {
if (this.exported) {
return;
}
if (!this.isRefreshed()) {
this.refresh();
}
if (this.shouldExport()) {
this.init();
if (shouldDelay()) {
// should register if delay export
doDelayExport();
} else if (Integer.valueOf(-1).equals(getDelay())
&& Boolean.parseBoolean(ConfigurationUtils.getProperty(
getScopeModel(), CommonConstants.DUBBO_MANUAL_REGISTER_KEY, "false"))) {
// should not register by default
doExport(RegisterTypeEnum.MANUAL_REGISTER);
} else {
doExport(registerType);
}
}
}
}
|
@Test
void testMethodConfigWithUnmatchedArgument() {
Assertions.assertThrows(IllegalArgumentException.class, () -> {
ServiceConfig<DemoServiceImpl> service = new ServiceConfig<>();
service.setInterface(DemoService.class);
service.setRef(new DemoServiceImpl());
service.setProtocol(new ProtocolConfig() {
{
setName("dubbo");
}
});
MethodConfig methodConfig = new MethodConfig();
methodConfig.setName("sayName");
// invalid argument index.
methodConfig.setArguments(Lists.newArrayList(new ArgumentConfig() {
{
setType(Integer.class.getName());
setIndex(0);
}
}));
service.setMethods(Lists.newArrayList(methodConfig));
service.export();
});
}
|
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return PathAttributes.EMPTY;
}
if(new DefaultPathContainerService().isContainer(file)) {
return PathAttributes.EMPTY;
}
final Path query;
if(file.isPlaceholder()) {
query = new Path(file.getParent(), FilenameUtils.removeExtension(file.getName()), file.getType(), file.attributes());
}
else {
query = file;
}
final AttributedList<Path> list;
if(new SimplePathPredicate(DriveHomeFinderService.SHARED_DRIVES_NAME).test(file.getParent())) {
list = new DriveTeamDrivesListService(session, fileid).list(file.getParent(), listener);
}
else {
list = new FileidDriveListService(session, fileid, query).list(file.getParent(), listener);
}
final Path found = list.find(new ListFilteringFeature.ListFilteringPredicate(session.getCaseSensitivity(), file));
if(null == found) {
throw new NotfoundException(file.getAbsolute());
}
return found.attributes();
}
|
@Test
public void testFind() throws Exception {
final Path test = new Path(DriveHomeFinderService.MYDRIVE_FOLDER, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final DriveFileIdProvider fileid = new DriveFileIdProvider(session);
new DriveTouchFeature(session, fileid).touch(test, new TransferStatus());
final DriveAttributesFinderFeature f = new DriveAttributesFinderFeature(session, fileid);
final PathAttributes attributes = f.find(test);
assertEquals(0L, attributes.getSize());
assertNotNull(attributes.getFileId());
assertNull(attributes.getVersionId());
new DriveDeleteFeature(session, fileid).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public Map<String, List<Host>> groups() {
return this.groups(HostFilter.NONE);
}
|
@Test
public void testGroups() {
final AbstractHostCollection c = new AbstractHostCollection() {
};
final Host bookmarkGroupa1 = new Host(new TestProtocol(), "h", new Credentials("u"));
bookmarkGroupa1.setLabels(Collections.singleton("a"));
final Host bookmarkGroupA1 = new Host(new TestProtocol(), "h", new Credentials("u"));
bookmarkGroupA1.setLabels(Collections.singleton("A"));
bookmarkGroupA1.setNickname("a");
final Host bookmarkGroupA2 = new Host(new TestProtocol(), "h", new Credentials("u"));
bookmarkGroupA2.setLabels(Collections.singleton("A"));
bookmarkGroupA2.setNickname("b");
final Host bookmarkGroupB = new Host(new TestProtocol(), "h", new Credentials("u"));
bookmarkGroupB.setLabels(Collections.singleton("B"));
c.add(bookmarkGroupa1);
c.add(bookmarkGroupB);
c.add(bookmarkGroupA2);
c.add(bookmarkGroupA1);
final Map<String, List<Host>> groups = c.groups();
assertEquals("a", groups.keySet().toArray()[0]);
assertEquals("A", groups.keySet().toArray()[1]);
assertEquals("a", groups.get("A").toArray(new Host[0])[0].getNickname());
assertEquals("b", groups.get("A").toArray(new Host[0])[1].getNickname());
assertEquals("B", groups.keySet().toArray()[2]);
}
|
public int boundedControlledPoll(
final ControlledFragmentHandler handler, final long limitPosition, final int fragmentLimit)
{
if (isClosed)
{
return 0;
}
long initialPosition = subscriberPosition.get();
if (initialPosition >= limitPosition)
{
return 0;
}
int fragmentsRead = 0;
int initialOffset = (int)initialPosition & termLengthMask;
int offset = initialOffset;
final UnsafeBuffer termBuffer = activeTermBuffer(initialPosition);
final int limitOffset = (int)Math.min(termBuffer.capacity(), (limitPosition - initialPosition) + offset);
final Header header = this.header;
header.buffer(termBuffer);
try
{
while (fragmentsRead < fragmentLimit && offset < limitOffset)
{
final int length = frameLengthVolatile(termBuffer, offset);
if (length <= 0)
{
break;
}
final int frameOffset = offset;
final int alignedLength = BitUtil.align(length, FRAME_ALIGNMENT);
offset += alignedLength;
if (isPaddingFrame(termBuffer, frameOffset))
{
continue;
}
++fragmentsRead;
header.offset(frameOffset);
final Action action = handler.onFragment(
termBuffer, frameOffset + HEADER_LENGTH, length - HEADER_LENGTH, header);
if (ABORT == action)
{
--fragmentsRead;
offset -= alignedLength;
break;
}
if (BREAK == action)
{
break;
}
if (COMMIT == action)
{
initialPosition += (offset - initialOffset);
initialOffset = offset;
subscriberPosition.setOrdered(initialPosition);
}
}
}
catch (final Exception ex)
{
errorHandler.onError(ex);
}
finally
{
final long resultingPosition = initialPosition + (offset - initialOffset);
if (resultingPosition > initialPosition)
{
subscriberPosition.setOrdered(resultingPosition);
}
}
return fragmentsRead;
}
|
@Test
void shouldPollFragmentsToBoundedControlledFragmentHandlerWithMaxPositionAfterEndOfTerm()
{
final int initialOffset = TERM_BUFFER_LENGTH - (ALIGNED_FRAME_LENGTH * 2);
final long initialPosition = computePosition(
INITIAL_TERM_ID, initialOffset, POSITION_BITS_TO_SHIFT, INITIAL_TERM_ID);
final long maxPosition = initialPosition + TERM_BUFFER_LENGTH;
position.setOrdered(initialPosition);
final Image image = createImage();
insertDataFrame(INITIAL_TERM_ID, initialOffset);
insertPaddingFrame(INITIAL_TERM_ID, initialOffset + ALIGNED_FRAME_LENGTH);
when(mockControlledFragmentHandler.onFragment(any(DirectBuffer.class), anyInt(), anyInt(), any(Header.class)))
.thenReturn(Action.CONTINUE);
final int fragmentsRead = image.boundedControlledPoll(
mockControlledFragmentHandler, maxPosition, Integer.MAX_VALUE);
assertThat(fragmentsRead, is(1));
final InOrder inOrder = Mockito.inOrder(position, mockControlledFragmentHandler);
inOrder.verify(mockControlledFragmentHandler).onFragment(
any(UnsafeBuffer.class), eq(initialOffset + HEADER_LENGTH), eq(DATA.length), any(Header.class));
inOrder.verify(position).setOrdered(TERM_BUFFER_LENGTH);
}
|
public static Exception lookupExceptionInCause(Throwable source, Class<? extends Exception>... clazzes) {
while (source != null) {
for (Class<? extends Exception> clazz : clazzes) {
if (clazz.isAssignableFrom(source.getClass())) {
return (Exception) source;
}
}
source = source.getCause();
}
return null;
}
|
@Test
void givenNoCauseAndExceptionIsWantedCauseClass_whenLookupExceptionInCause_thenReturnSelf() {
assertThat(ExceptionUtil.lookupExceptionInCause(cause, RuntimeException.class)).isSameAs(cause);
}
|
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException {
if (request instanceof HttpServletRequest httpRequest) {
HttpServletResponse httpResponse = (HttpServletResponse) response;
try {
chain.doFilter(new ServletRequestWrapper(httpRequest), httpResponse);
} catch (Throwable e) {
if (httpResponse.isCommitted()) {
// Request has been aborted by the client, nothing can been done as Tomcat has committed the response
LOGGER.debug(format("Processing of request %s failed", toUrl(httpRequest)), e);
return;
}
LOGGER.error(format("Processing of request %s failed", toUrl(httpRequest)), e);
httpResponse.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
}
} else {
// Not an HTTP request, not profiled
chain.doFilter(request, response);
}
}
|
@Test
public void request_used_in_chain_do_filter_is_a_servlet_wrapper_when_service_call() throws Exception {
underTest.doFilter(request("POST", "/context/service/call", "param=value"), mock(HttpServletResponse.class), chain);
ArgumentCaptor<ServletRequest> requestArgumentCaptor = ArgumentCaptor.forClass(ServletRequest.class);
verify(chain).doFilter(requestArgumentCaptor.capture(), any(HttpServletResponse.class));
assertThat(requestArgumentCaptor.getValue()).isInstanceOf(RootFilter.ServletRequestWrapper.class);
}
|
@Override
public boolean isSupported(TemporalUnit unit) {
return offsetTime.isSupported(unit);
}
|
@Test
void isSupportedTemporalField() {
for (ChronoField field : ChronoField.values()) {
assertEquals(offsetTime.isSupported(field), zoneTime.isSupported(field));
}
}
|
public String convert(ILoggingEvent le) {
long timestamp = le.getTimeStamp();
return cachingDateFormatter.format(timestamp);
}
|
@Test
public void convertsDateWithSpecifiedLocaleLangAndCountry() {
assertThat(convert(_timestamp, DATETIME_PATTERN, "UTC", "zh,CN"), matchesPattern(CHINESE_TIME_UTC));
}
|
public static Object getDefaultPrimitiveValue(Class clazz) {
if (clazz == int.class) {
return 0;
} else if (clazz == boolean.class) {
return false;
} else if (clazz == long.class) {
return 0L;
} else if (clazz == byte.class) {
return (byte) 0;
} else if (clazz == double.class) {
return 0d;
} else if (clazz == short.class) {
return (short) 0;
} else if (clazz == float.class) {
return 0f;
} else if (clazz == char.class) {
return (char) 0;
} else {
return null;
}
}
|
@Test
public void getDefaultPrimitiveValue() throws Exception {
Assert.assertEquals((short) 0, ClassUtils.getDefaultPrimitiveValue(short.class));
Assert.assertEquals(0, ClassUtils.getDefaultPrimitiveValue(int.class));
Assert.assertEquals(0l, ClassUtils.getDefaultPrimitiveValue(long.class));
Assert.assertEquals(0d, ClassUtils.getDefaultPrimitiveValue(double.class));
Assert.assertEquals(0f, ClassUtils.getDefaultPrimitiveValue(float.class));
Assert.assertEquals((byte) 0, ClassUtils.getDefaultPrimitiveValue(byte.class));
Assert.assertEquals((char) 0, ClassUtils.getDefaultPrimitiveValue(char.class));
Assert.assertEquals(false, ClassUtils.getDefaultPrimitiveValue(boolean.class));
Assert.assertEquals(null, ClassUtils.getDefaultPrimitiveValue(Void.class));
Assert.assertEquals(null, ClassUtils.getDefaultPrimitiveValue(void.class));
Assert.assertEquals(null, ClassUtils.getDefaultPrimitiveValue(String.class));
}
|
public static synchronized AbstractAbilityControlManager getInstance() {
if (null == abstractAbilityControlManager) {
initAbilityControlManager();
}
return abstractAbilityControlManager;
}
|
@Test
void testGetInstance() {
assertNotNull(NacosAbilityManagerHolder.getInstance());
}
|
@Override
public void startWorker(
StartWorkerRequest request, StreamObserver<StartWorkerResponse> responseObserver) {
LOG.info(
"Starting worker {} pointing at {}.",
request.getWorkerId(),
request.getControlEndpoint().getUrl());
LOG.debug("Worker request {}.", request);
Endpoints.ApiServiceDescriptor loggingEndpoint = request.getLoggingEndpoint();
Endpoints.ApiServiceDescriptor controlEndpoint = request.getControlEndpoint();
Set<String> runnerCapabilites = Collections.emptySet();
if (request.hasProvisionEndpoint()) {
ManagedChannelFactory channelFactory =
ManagedChannelFactory.createDefault()
.withInterceptors(
ImmutableList.of(AddHarnessIdInterceptor.create(request.getWorkerId())));
ProvisionServiceGrpc.ProvisionServiceBlockingStub provisionStub =
ProvisionServiceGrpc.newBlockingStub(
channelFactory.forDescriptor(request.getProvisionEndpoint()));
ProvisionApi.ProvisionInfo provisionInfo =
provisionStub
.getProvisionInfo(ProvisionApi.GetProvisionInfoRequest.newBuilder().build())
.getInfo();
runnerCapabilites = Sets.newHashSet(provisionInfo.getRunnerCapabilitiesList());
if (provisionInfo.hasControlEndpoint()) {
controlEndpoint = provisionInfo.getControlEndpoint();
}
if (provisionInfo.hasLoggingEndpoint()) {
loggingEndpoint = provisionInfo.getLoggingEndpoint();
}
}
// Lambda closured variables must be final.
final Endpoints.ApiServiceDescriptor logEndpoint = loggingEndpoint;
final Endpoints.ApiServiceDescriptor ctrlEndpoint = controlEndpoint;
final Set<String> capabilities = runnerCapabilites;
Thread th =
new Thread(
() -> {
try {
FnHarness.main(
request.getWorkerId(), options, capabilities, logEndpoint, ctrlEndpoint, null);
LOG.info("Successfully started worker {}.", request.getWorkerId());
} catch (Exception exn) {
LOG.error(String.format("Failed to start worker %s.", request.getWorkerId()), exn);
}
});
th.setName("SDK-worker-" + request.getWorkerId());
th.setDaemon(true);
th.start();
responseObserver.onNext(StartWorkerResponse.newBuilder().build());
responseObserver.onCompleted();
}
|
@Test
public void startWorker() {
PipelineOptions options = PipelineOptionsFactory.create();
StartWorkerRequest request = StartWorkerRequest.getDefaultInstance();
StreamObserver<StartWorkerResponse> responseObserver = mock(StreamObserver.class);
ExternalWorkerService service = new ExternalWorkerService(options);
service.startWorker(request, responseObserver);
verify(responseObserver).onNext(any(StartWorkerResponse.class));
verify(responseObserver).onCompleted();
}
|
@SafeVarargs
public static <T> Stream<T> of(T... array) {
Assert.notNull(array, "Array must be not null!");
return Stream.of(array);
}
|
@SuppressWarnings({"RedundantOperationOnEmptyContainer", "RedundantCollectionOperation"})
@Test
public void streamTestEmptyListToIterator() {
assertStreamIsEmpty(StreamUtil.of(new ArrayList<>().iterator()));
}
|
public static HollowSchema parseSchema(String schema) throws IOException {
StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(schema));
configureTokenizer(tokenizer);
return parseSchema(tokenizer);
}
|
@Test
public void parsesMapSchemaWithPrimaryKey() throws IOException {
String listSchema = "MapOfStringToTypeA Map<String, TypeA> @HashKey(value);\n";
HollowMapSchema schema = (HollowMapSchema) HollowSchemaParser.parseSchema(listSchema);
Assert.assertEquals("MapOfStringToTypeA", schema.getName());
Assert.assertEquals("String", schema.getKeyType());
Assert.assertEquals("TypeA", schema.getValueType());
Assert.assertEquals(new PrimaryKey("String", "value"), schema.getHashKey());
Assert.assertEquals(schema, HollowSchemaParser.parseSchema(schema.toString()));
}
|
public R execute(Retryable<R> retryable) throws ExecutionException {
long endMs = time.milliseconds() + retryBackoffMaxMs;
int currAttempt = 0;
ExecutionException error = null;
while (time.milliseconds() <= endMs) {
currAttempt++;
try {
return retryable.call();
} catch (UnretryableException e) {
// We've deemed this error to not be worth retrying, so collect the error and
// fail immediately.
if (error == null)
error = new ExecutionException(e);
break;
} catch (ExecutionException e) {
log.warn("Error during retry attempt {}", currAttempt, e);
if (error == null)
error = e;
long waitMs = retryBackoffMs * (long) Math.pow(2, currAttempt - 1);
long diff = endMs - time.milliseconds();
waitMs = Math.min(waitMs, diff);
if (waitMs <= 0)
break;
String message = String.format("Attempt %d to make call resulted in an error; sleeping %d ms before retrying",
currAttempt, waitMs);
log.warn(message, e);
time.sleep(waitMs);
}
}
if (error == null)
// Really shouldn't ever get to here, but...
error = new ExecutionException(new IllegalStateException("Exhausted all retry attempts but no attempt returned value or encountered exception"));
throw error;
}
|
@Test
public void testRuntimeExceptionFailureOnLastAttempt() {
Exception[] attempts = new Exception[] {
new IOException("pretend connect error"),
new IOException("pretend timeout error"),
new NullPointerException("pretend JSON node /userId in response is null")
};
long retryWaitMs = 1000;
long maxWaitMs = 10000;
Retryable<String> call = createRetryable(attempts);
Time time = new MockTime(0, 0, 0);
assertEquals(0L, time.milliseconds());
Retry<String> r = new Retry<>(time, retryWaitMs, maxWaitMs);
assertThrows(RuntimeException.class, () -> r.execute(call));
long secondWait = retryWaitMs * 2;
long totalWait = retryWaitMs + secondWait;
assertEquals(totalWait, time.milliseconds());
}
|
@Override
public void validate(ChangeContext context) {
for (ClusterSpec.Id clusterId : context.previousModel().allClusters()) {
Optional<NodeResources> currentResources = resourcesOf(clusterId, context.previousModel());
Optional<NodeResources> nextResources = resourcesOf(clusterId, context.model());
if (currentResources.isEmpty() || nextResources.isEmpty()) continue; // new or removed cluster
if ( changeRequiresRestart(currentResources.get(), nextResources.get()))
createRestartActionsFor(clusterId, context.previousModel()).forEach(context::require);
}
}
|
@Test
void test_restart_action_count() {
assertEquals(0, validate(model(1, 1, 1, 1), model(1, 1, 1, 1)).size());
assertEquals(1, validate(model(1, 1, 1, 1), model(2, 1, 1, 1)).size());
assertEquals(2, validate(model(1, 1, 1, 1), model(1, 2, 1, 1)).size());
assertEquals(3, validate(model(1, 1, 1, 1), model(1, 1, 2, 1)).size());
assertEquals(4, validate(model(1, 1, 1, 1), model(1, 1, 1, 2)).size());
assertEquals(5, validate(model(1, 1, 1, 1), model(2, 1, 1, 2)).size());
assertEquals(6, validate(model(1, 1, 1, 1), model(1, 2, 1, 2)).size());
assertEquals(7, validate(model(1, 1, 1, 1), model(1, 1, 2, 2)).size());
assertEquals(8, validate(model(1, 1, 1, 1), model(2, 1, 2, 2)).size());
assertEquals(9, validate(model(1, 1, 1, 1), model(1, 2, 2, 2)).size());
assertEquals(10, validate(model(1, 1, 1, 1), model(2, 2, 2, 2)).size());
}
|
protected abstract PTransform<PCollection<KafkaRecord<byte[], byte[]>>, PCollection<Row>>
getPTransformForInput();
|
@Test
public void testRecorderDecoder() throws Exception {
BeamKafkaTable kafkaTable = getBeamKafkaTable();
PCollection<Row> result =
pipeline
.apply(Create.of(generateEncodedPayload(1), generateEncodedPayload(2)))
.apply(MapElements.via(new BytesToRecord()))
.setCoder(KafkaRecordCoder.of(ByteArrayCoder.of(), ByteArrayCoder.of()))
.apply(kafkaTable.getPTransformForInput());
PAssert.that(result).containsInAnyOrder(generateRow(1), generateRow(2));
pipeline.run();
}
|
public DateTokenConverter<Object> getPrimaryDateTokenConverter() {
Converter<Object> p = headTokenConverter;
while (p != null) {
if (p instanceof DateTokenConverter) {
DateTokenConverter<Object> dtc = (DateTokenConverter<Object>) p;
// only primary converters should be returned as
if (dtc.isPrimary())
return dtc;
}
p = p.getNext();
}
return null;
}
|
@Test
public void nullTimeZoneByDefault() {
FileNamePattern fnp = new FileNamePattern("%d{hh}", context);
assertNull(fnp.getPrimaryDateTokenConverter().getZoneId());
}
|
public static MetadataCoderV2 of() {
return INSTANCE;
}
|
@Test
public void testEncodeDecodeWithCustomLastModifiedMills() throws Exception {
Path filePath = tmpFolder.newFile("somefile").toPath();
Metadata metadata =
Metadata.builder()
.setResourceId(
FileSystems.matchNewResource(filePath.toString(), false /* isDirectory */))
.setIsReadSeekEfficient(true)
.setSizeBytes(1024)
.setLastModifiedMillis(1541097000L)
.build();
CoderProperties.coderDecodeEncodeEqual(MetadataCoderV2.of(), metadata);
}
|
@VisibleForTesting
SortedSet<Path> getDeadWorkerDirs(int nowSecs, Set<Path> logDirs) throws Exception {
if (logDirs.isEmpty()) {
return new TreeSet<>();
} else {
Set<String> aliveIds = workerLogs.getAliveIds(nowSecs);
return workerLogs.getLogDirs(logDirs, (wid) -> !aliveIds.contains(wid));
}
}
|
@Test
public void testGetDeadWorkerDirs() throws Exception {
Map<String, Object> stormConf = Utils.readStormConfig();
stormConf.put(SUPERVISOR_WORKER_TIMEOUT_SECS, 5);
LSWorkerHeartbeat hb = new LSWorkerHeartbeat();
hb.set_time_secs(1);
Map<String, LSWorkerHeartbeat> idToHb = Collections.singletonMap("42", hb);
int nowSecs = 2;
try (TmpPath testDir = new TmpPath()) {
Path unexpectedDir1 = createDir(testDir.getFile().toPath(), "dir1");
Path expectedDir2 = createDir(testDir.getFile().toPath(), "dir2");
Path expectedDir3 = createDir(testDir.getFile().toPath(), "dir3");
Set<Path> logDirs = Sets.newSet(unexpectedDir1, expectedDir2, expectedDir3);
SupervisorUtils mockedSupervisorUtils = mock(SupervisorUtils.class);
SupervisorUtils.setInstance(mockedSupervisorUtils);
Map<String, Object> conf = Utils.readStormConfig();
StormMetricsRegistry metricRegistry = new StormMetricsRegistry();
WorkerLogs stubbedWorkerLogs = new WorkerLogs(conf, Paths.get(""), metricRegistry) {
@Override
public SortedSet<Path> getLogDirs(Set<Path> logDirs, Predicate<String> predicate) {
TreeSet<Path> ret = new TreeSet<>();
if (predicate.test("42")) {
ret.add(unexpectedDir1);
}
if (predicate.test("007")) {
ret.add(expectedDir2);
}
if (predicate.test("")) {
ret.add(expectedDir3);
}
return ret;
}
};
LogCleaner logCleaner = new LogCleaner(conf, stubbedWorkerLogs, new DirectoryCleaner(metricRegistry), null, metricRegistry);
when(mockedSupervisorUtils.readWorkerHeartbeatsImpl(anyMap())).thenReturn(idToHb);
assertEquals(Sets.newSet(expectedDir2, expectedDir3), logCleaner.getDeadWorkerDirs(nowSecs, logDirs));
} finally {
SupervisorUtils.resetInstance();
}
}
|
public void resolveAssertionConsumerService(AuthenticationRequest authenticationRequest) throws SamlValidationException {
// set URL if set in authnRequest
final String authnAcsURL = authenticationRequest.getAuthnRequest().getAssertionConsumerServiceURL();
if (authnAcsURL != null) {
authenticationRequest.setAssertionConsumerURL(authnAcsURL);
return;
}
// search url from metadata endpoints
final Integer authnAcsIdx = authenticationRequest.getAuthnRequest().getAssertionConsumerServiceIndex();
List<Endpoint> endpoints = authenticationRequest.getConnectionEntity().getRoleDescriptors().get(0).getEndpoints(AssertionConsumerService.DEFAULT_ELEMENT_NAME);
if (endpoints.isEmpty()) {
throw new SamlValidationException("Authentication: Assertion Consumer Service not found in metadata");
}
if (authnAcsIdx != null && endpoints.size() <= authnAcsIdx) {
throw new SamlValidationException("Authentication: Assertion Consumer Index is out of bounds");
}
// TODO: check if this statement is correct
if (endpoints.size() == 1) {
authenticationRequest.setAssertionConsumerURL(endpoints.get(0).getLocation());
return;
}
if(authnAcsIdx == null) {
AssertionConsumerService defaultAcs = endpoints.stream()
.filter(e -> e instanceof AssertionConsumerService)
.map(acs -> (AssertionConsumerService) acs)
.filter(IndexedEndpoint::isDefault)
.findAny()
.orElse(null);
if (defaultAcs == null) {
throw new SamlValidationException("Authentication: There is no default AssertionConsumerService");
}
authenticationRequest.setAssertionConsumerURL(defaultAcs.getLocation());
return;
}
authenticationRequest.setAssertionConsumerURL(endpoints.get(authnAcsIdx).getLocation());
}
|
@Test
void resolveAcsUrlWithIndex2InMultiAcsMetadata() throws SamlValidationException {
AuthnRequest authnRequest = OpenSAMLUtils.buildSAMLObject(AuthnRequest.class);
authnRequest.setAssertionConsumerServiceIndex(2);
AuthenticationRequest authenticationRequest = new AuthenticationRequest();
authenticationRequest.setAuthnRequest(authnRequest);
authenticationRequest.setConnectionEntity(MetadataParser.readMetadata(stubsMultiAcsMetadataFile, CONNECTION_ENTITY_ID));
assertionConsumerServiceUrlService.resolveAssertionConsumerService(authenticationRequest);
assertEquals("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", authenticationRequest.getAssertionConsumerURL());
}
|
@Override
public void registerStore(StateStore store) {
final String storeName = store.fqsn();
checkArgument(!stores.containsKey(storeName),
String.format("Store %s has already been registered.", storeName));
stores.put(storeName, store);
}
|
@Test
public void testRegisterStoreTwice() {
final String fqsn = "t/ns/store";
StateStore store = mock(StateStore.class);
when(store.fqsn()).thenReturn(fqsn);
this.stateManager.registerStore(store);
try {
this.stateManager.registerStore(store);
fail("Should fail to register a store twice");
} catch (IllegalArgumentException iae) {
// expected
}
}
|
@Override
public Object[] extract(Tuple in) {
Object[] output;
if (order == null) {
// copy the whole tuple
output = new Object[in.getArity()];
for (int i = 0; i < in.getArity(); i++) {
output[i] = in.getField(i);
}
} else {
// copy user specified order
output = new Object[order.length];
for (int i = 0; i < order.length; i++) {
output[i] = in.getField(order[i]);
}
}
return output;
}
|
@Test
void testConvertFromTupleToArray() throws InstantiationException, IllegalAccessException {
for (int i = 0; i < Tuple.MAX_ARITY; i++) {
Tuple currentTuple = (Tuple) CLASSES[i].newInstance();
String[] currentArray = new String[i + 1];
for (int j = 0; j <= i; j++) {
currentTuple.setField(testStrings[j], j);
currentArray[j] = testStrings[j];
}
arrayEqualityCheck(currentArray, new ArrayFromTuple().extract(currentTuple));
}
}
|
@Override
public Health check() {
Platform.Status platformStatus = platform.status();
if (platformStatus == Platform.Status.UP
&& VALID_DATABASEMIGRATION_STATUSES.contains(migrationState.getStatus())
&& !restartFlagHolder.isRestarting()) {
return Health.GREEN;
}
return Health.builder()
.setStatus(Health.Status.RED)
.addCause("SonarQube webserver is not up")
.build();
}
|
@Test
public void returns_RED_status_with_cause_if_platform_status_is_not_UP() {
Platform.Status[] statusesButUp = Arrays.stream(Platform.Status.values())
.filter(s -> s != Platform.Status.UP)
.toArray(Platform.Status[]::new);
Platform.Status randomStatusButUp = statusesButUp[random.nextInt(statusesButUp.length)];
when(platform.status()).thenReturn(randomStatusButUp);
Health health = underTest.check();
verifyRedHealthWithCause(health);
}
|
@ApiOperation(value = "Delete edge (deleteEdge)",
notes = "Deletes the edge. Referencing non-existing edge Id will cause an error." + TENANT_AUTHORITY_PARAGRAPH)
@PreAuthorize("hasAuthority('TENANT_ADMIN')")
@DeleteMapping(value = "/edge/{edgeId}")
public void deleteEdge(@Parameter(description = EDGE_ID_PARAM_DESCRIPTION, required = true)
@PathVariable(EDGE_ID) String strEdgeId) throws ThingsboardException {
checkParameter(EDGE_ID, strEdgeId);
EdgeId edgeId = new EdgeId(toUUID(strEdgeId));
Edge edge = checkEdgeId(edgeId, Operation.DELETE);
tbEdgeService.delete(edge, getCurrentUser());
}
|
@Test
public void testDeleteEdge() throws Exception {
Edge edge = constructEdge("My edge", "default");
Edge savedEdge = doPost("/api/edge", edge, Edge.class);
Mockito.reset(tbClusterService, auditLogService);
doDelete("/api/edge/" + savedEdge.getId().getId().toString())
.andExpect(status().isOk());
testNotifyEntityBroadcastEntityStateChangeEventManyTimeMsgToEdgeServiceNever(savedEdge, savedEdge.getId(), savedEdge.getId(),
tenantId, tenantAdminUser.getCustomerId(), tenantAdminUser.getId(), tenantAdminUser.getEmail(),
ActionType.DELETED, 1, savedEdge.getId().getId().toString());
doGet("/api/edge/" + savedEdge.getId().getId().toString())
.andExpect(status().isNotFound())
.andExpect(statusReason(containsString(msgErrorNoFound("Edge", savedEdge.getId().getId().toString()))));
}
|
public final byte[] encode(String text) throws IOException
{
ByteArrayOutputStream out = new ByteArrayOutputStream();
int offset = 0;
while (offset < text.length())
{
int codePoint = text.codePointAt(offset);
// multi-byte encoding with 1 to 4 bytes
byte[] bytes = encode(codePoint);
out.write(bytes);
offset += Character.charCount(codePoint);
}
return out.toByteArray();
}
|
@Test
void testPDFox4318() throws IOException
{
PDType1Font helveticaBold = new PDType1Font(FontName.HELVETICA_BOLD);
assertThrows(IllegalArgumentException.class,
() -> helveticaBold.encode("\u0080"),
"should have thrown IllegalArgumentException");
helveticaBold.encode("€");
assertThrows(IllegalArgumentException.class,
() -> helveticaBold.encode("\u0080"),
"should have thrown IllegalArgumentException");
}
|
@Nullable
public static Map<String, String> getLoggerInfo(String loggerName) {
LoggerContext context = (LoggerContext) LogManager.getContext(false);
Configuration config = context.getConfiguration();
if (!getAllConfiguredLoggers().contains(loggerName)) {
return null;
}
LoggerConfig loggerConfig = getLoggerConfig(config, loggerName);
return getLoggerResponse(loggerConfig);
}
|
@Test
public void testGetLoggerInfo() {
Map<String, String> rootLoggerInfo = LoggerUtils.getLoggerInfo(ROOT);
assertNotNull(rootLoggerInfo);
assertEquals(rootLoggerInfo.get("name"), ROOT);
assertEquals(rootLoggerInfo.get("level"), "ERROR");
assertNull(rootLoggerInfo.get("filter"));
Map<String, String> pinotLoggerInfo = LoggerUtils.getLoggerInfo(PINOT);
assertNotNull(pinotLoggerInfo);
assertEquals(pinotLoggerInfo.get("name"), PINOT);
assertEquals(pinotLoggerInfo.get("level"), "WARN");
assertNull(pinotLoggerInfo.get("filter"));
assertNull(LoggerUtils.getLoggerInfo("notExistLogger"));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.