focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
protected ObjectPermissions getPermissions() {
return mPermissions.get();
}
|
@Test
public void getPermissionsDefault() {
Mockito.when(mClient.getS3AccountOwner()).thenThrow(AmazonClientException.class);
ObjectUnderFileSystem.ObjectPermissions permissions = mS3UnderFileSystem.getPermissions();
Assert.assertEquals(DEFAULT_OWNER, permissions.getGroup());
Assert.assertEquals(DEFAULT_OWNER, permissions.getOwner());
Assert.assertEquals(DEFAULT_MODE, permissions.getMode());
}
|
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) {
CharStream input = CharStreams.fromString(source);
FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input );
CommonTokenStream tokens = new CommonTokenStream( lexer );
FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens );
ParserHelper parserHelper = new ParserHelper(eventsManager);
additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol()));
parser.setHelper(parserHelper);
parser.setErrorHandler( new FEELErrorHandler() );
parser.removeErrorListeners(); // removes the error listener that prints to the console
parser.addErrorListener( new FEELParserErrorListener( eventsManager ) );
// pre-loads the parser with symbols
defineVariables( inputVariableTypes, inputVariables, parser );
if (typeRegistry != null) {
parserHelper.setTypeRegistry(typeRegistry);
}
return parser;
}
|
@Test
void logicalNegation() {
String inputExpression = "not ( true )";
BaseNode neg = parse( inputExpression );
assertThat( neg).isInstanceOf(FunctionInvocationNode.class);
assertThat( neg.getResultType()).isEqualTo(BuiltInType.UNKNOWN);
assertThat( neg.getText()).isEqualTo( "not ( true )");
FunctionInvocationNode not = (FunctionInvocationNode) neg;
assertThat( not.getParams().getElements().get( 0 )).isInstanceOf(BooleanNode.class);
assertThat( not.getParams().getElements().get( 0 ).getResultType()).isEqualTo(BuiltInType.BOOLEAN);
assertThat( not.getParams().getElements().get( 0 ).getText()).isEqualTo("true");
}
|
@Override
public void applyFlowRules(FlowRule... flowRules) {
checkPermission(FLOWRULE_WRITE);
apply(buildFlowRuleOperations(true, null, flowRules));
}
|
@Test
public void flowMetrics() {
FlowRule f1 = flowRule(1, 1);
FlowRule f2 = flowRule(2, 2);
FlowRule f3 = flowRule(3, 3);
mgr.applyFlowRules(f1, f2, f3);
FlowEntry fe1 = new DefaultFlowEntry(f1);
FlowEntry fe2 = new DefaultFlowEntry(f2);
//FlowRule updatedF1 = flowRule(f1, FlowRuleState.ADDED);
//FlowRule updatedF2 = flowRule(f2, FlowRuleState.ADDED);
providerService.pushFlowMetrics(DID, Lists.newArrayList(fe1, fe2));
assertTrue("Entries should be added.",
validateState(ImmutableMap.of(
f1, FlowEntryState.ADDED,
f2, FlowEntryState.ADDED,
f3, FlowEntryState.PENDING_ADD)));
validateEvents(RULE_ADD_REQUESTED, RULE_ADD_REQUESTED, RULE_ADD_REQUESTED,
RULE_ADDED, RULE_ADDED, RULE_ADD_REQUESTED);
}
|
public static boolean isSystemGroup(String group) {
if (StringUtils.isBlank(group)) {
return false;
}
String groupInLowerCase = group.toLowerCase();
for (String prefix : SYSTEM_GROUP_PREFIX_LIST) {
if (groupInLowerCase.startsWith(prefix)) {
return true;
}
}
return false;
}
|
@Test
public void testIsSystemGroup_NullGroup_ReturnsFalse() {
String group = null;
boolean result = BrokerMetricsManager.isSystemGroup(group);
assertThat(result).isFalse();
}
|
public void validate(ExternalIssueReport report, Path reportPath) {
if (report.rules != null && report.issues != null) {
Set<String> ruleIds = validateRules(report.rules, reportPath);
validateIssuesCctFormat(report.issues, ruleIds, reportPath);
} else if (report.rules == null && report.issues != null) {
String documentationLink = documentationLinkGenerator.getDocumentationLink(DOCUMENTATION_SUFFIX);
LOGGER.warn("External issues were imported with a deprecated format which will be removed soon. " +
"Please switch to the newest format to fully benefit from Clean Code: {}", documentationLink);
validateIssuesDeprecatedFormat(report.issues, reportPath);
} else {
throw new IllegalStateException(String.format("Failed to parse report '%s': invalid report detected.", reportPath));
}
}
|
@Test
public void validate_whenDeprecatedReportMissingSeverity_shouldThrowException() throws IOException {
ExternalIssueReport report = read(DEPRECATED_REPORTS_LOCATION);
report.issues[0].severity = null;
assertThatThrownBy(() -> validator.validate(report, reportPath))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Failed to parse report 'report-path': missing mandatory field 'severity'.");
assertWarningLog();
}
|
protected static String encrypt(String... args) throws Exception {
int iterations = args.length == 2 ? Integer.parseInt(args[1]) : DEFAULT_ITERATIONS;
EncryptionReplacer replacer = new EncryptionReplacer();
String xmlPath = System.getProperty("hazelcast.config");
Properties properties = xmlPath == null ? System.getProperties()
: loadPropertiesFromConfig(new FileInputStream(xmlPath));
replacer.init(properties);
String encrypted = replacer.encrypt(args[0], iterations);
String variable = "$" + replacer.getPrefix() + "{" + encrypted + "}";
return variable;
}
|
@Test
public void testGenerateEncryptedLegacy() throws Exception {
assumeAlgorithmsSupported("PBKDF2WithHmacSHA1", "DES");
String xml = "<hazelcast xmlns=\"http://www.hazelcast.com/schema/config\">\n" + XML_LEGACY_CONFIG + "</hazelcast>";
File configFile = createFileWithString(xml);
hazelcastConfigProperty.setOrClearProperty(configFile.getAbsolutePath());
String encrypted = encrypt("test");
assertThat(encrypted)
.startsWith("$ENC{")
.endsWith("}");
}
|
protected static boolean isMatchingMetricTags(Set<Tag> meterTags, Set<Tag> expectedTags) {
if (!meterTags.containsAll(expectedTags)) {
return false;
}
return expectedTags.stream().allMatch(tag -> isMatchingTag(meterTags, tag));
}
|
@Test
void matchingMetricTagsReturnsTrue() {
meterTags.add(Tag.of("key", "value"));
Set<Tag> expectedTags = new HashSet<>();
expectedTags.add(Tag.of("key", "value"));
assertTrue(MetricsUtils.isMatchingMetricTags(meterTags, expectedTags));
}
|
@Override
public JobManagerRunner get(JobID jobId) {
assertJobRegistered(jobId);
return this.jobManagerRunners.get(jobId);
}
|
@Test
void testGet() {
final JobID jobId = new JobID();
final JobManagerRunner jobManagerRunner =
TestingJobManagerRunner.newBuilder().setJobId(jobId).build();
testInstance.register(jobManagerRunner);
assertThat(testInstance.get(jobId)).isEqualTo(jobManagerRunner);
}
|
@Override
public void onMsg(TbContext ctx, TbMsg msg) throws TbNodeException {
ctx.tellNext(msg, checkMatches(msg) ? TbNodeConnectionType.TRUE : TbNodeConnectionType.FALSE);
}
|
@Test
void givenTypePolygonAndConfigWithoutPerimeterKeyName_whenOnMsg_thenExceptionMissingPerimeterDefinitionOldVersion() throws TbNodeException {
// GIVEN
var config = new TbGpsGeofencingFilterNodeConfiguration().defaultConfiguration();
config.setPerimeterKeyName(null);
node.init(ctx, new TbNodeConfiguration(JacksonUtil.valueToTree(config)));
DeviceId deviceId = new DeviceId(UUID.randomUUID());
TbMsg msg = getTbMsg(deviceId, TbMsgMetaData.EMPTY,
GeoUtilTest.POINT_INSIDE_SIMPLE_RECT_CENTER.getLatitude(), GeoUtilTest.POINT_INSIDE_SIMPLE_RECT_CENTER.getLongitude());
// WHEN
var exception = assertThrows(TbNodeException.class, () -> node.onMsg(ctx, msg));
// THEN
assertThat(exception.getMessage()).isEqualTo("Missing perimeter definition!");
}
|
@Description("logarithm to base 10")
@ScalarFunction
@SqlType(StandardTypes.DOUBLE)
public static double log10(@SqlType(StandardTypes.DOUBLE) double num)
{
return Math.log10(num);
}
|
@Test
public void testLog10()
{
for (double doubleValue : DOUBLE_VALUES) {
assertFunction("log10(" + doubleValue + ")", DOUBLE, Math.log10(doubleValue));
}
assertFunction("log10(NULL)", DOUBLE, null);
}
|
public static Boolean judge(final ConditionData conditionData, final String realData) {
if (Objects.isNull(conditionData) || StringUtils.isBlank(conditionData.getOperator())) {
return false;
}
PredicateJudge predicateJudge = newInstance(conditionData.getOperator());
if (!(predicateJudge instanceof BlankPredicateJudge) && StringUtils.isBlank(realData)) {
return false;
}
return predicateJudge.judge(conditionData, realData);
}
|
@Test
public void testRegexJudge() {
conditionData.setOperator(OperatorEnum.REGEX.getAlias());
conditionData.setParamValue("[/a-zA-Z0-9]+");
assertTrue(PredicateJudgeFactory.judge(conditionData, "/http/test"));
assertFalse(PredicateJudgeFactory.judge(conditionData, "/http?/test"));
}
|
public Plan validateReservationUpdateRequest(
ReservationSystem reservationSystem, ReservationUpdateRequest request)
throws YarnException {
ReservationId reservationId = request.getReservationId();
Plan plan = validateReservation(reservationSystem, reservationId,
AuditConstants.UPDATE_RESERVATION_REQUEST);
validateReservationDefinition(reservationId,
request.getReservationDefinition(), plan,
AuditConstants.UPDATE_RESERVATION_REQUEST);
return plan;
}
|
@Test
public void testUpdateReservationNoDefinition() {
ReservationUpdateRequest request = new ReservationUpdateRequestPBImpl();
request.setReservationId(ReservationSystemTestUtil.getNewReservationId());
Plan plan = null;
try {
plan = rrValidator.validateReservationUpdateRequest(rSystem, request);
Assert.fail();
} catch (YarnException e) {
Assert.assertNull(plan);
String message = e.getMessage();
Assert
.assertTrue(message
.startsWith("Missing reservation definition. Please try again by specifying a reservation definition."));
LOG.info(message);
}
}
|
static DbTypeParser getDbTypeParser() {
if (dbTypeParser == null) {
synchronized (JdbcUtils.class) {
if (dbTypeParser == null) {
dbTypeParser = EnhancedServiceLoader.load(DbTypeParser.class, SqlParserType.SQL_PARSER_TYPE_DRUID);
}
}
}
return dbTypeParser;
}
|
@Test
public void testDbTypeParserLoading() {
DbTypeParser dbTypeParser = JdbcUtils.getDbTypeParser();
Assertions.assertNotNull(dbTypeParser);
}
|
public static boolean isAnonymousUser(final String customerId) {
return customerId != null && customerId.toLowerCase(Locale.ROOT).equals(
CONFLUENT_SUPPORT_CUSTOMER_ID_DEFAULT);
}
|
@Test
public void testInvalidAnonymousUser() {
String[] invalidIds = Stream.concat(
CustomerIdExamples.INVALID_ANONYMOUS_IDS.stream(),
CustomerIdExamples.VALID_CUSTOMER_IDS.stream()).
toArray(String[]::new);
for (String invalidId : invalidIds) {
assertFalse(invalidId + " is a valid anonymous user identifier",
BaseSupportConfig.isAnonymousUser(invalidId));
}
}
|
@Override
public List<VFSFile> getFiles( VFSFile file, String filters, VariableSpace space ) throws FileException {
ConnectionFileName fileName = getConnectionFileName( file );
if ( fileName.isConnectionRoot() ) {
VFSConnectionDetails details = getExistingDetails( fileName );
if ( usesBuckets( details ) ) {
return getBuckets( file, fileName, details );
}
}
FileObject fileObject;
try {
fileObject = getFileObject( file, space );
} catch ( FileException e ) {
throw new FileNotFoundException( file.getPath(), TYPE );
}
return populateChildren( file, fileObject, filters );
}
|
@Test( expected = FileException.class )
public void testGetFilesOfConnectionUsingBucketsThrowsIfEmptyBuckets() throws Exception {
GetFilesOfConnectionUsingBucketsScenario scenario = new GetFilesOfConnectionUsingBucketsScenario();
mockDetailsProviderLocations( scenario.details1, scenario.provider );
vfsFileProvider.getFiles( scenario.connectionRootFile, null, mock( VariableSpace.class ) );
}
|
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
if (data.size() < 3) {
onInvalidDataReceived(device, data);
return;
}
final int opCode = data.getIntValue(Data.FORMAT_UINT8, 0);
if (opCode != OP_CODE_NUMBER_OF_STORED_RECORDS_RESPONSE && opCode != OP_CODE_RESPONSE_CODE) {
onInvalidDataReceived(device, data);
return;
}
final int operator = data.getIntValue(Data.FORMAT_UINT8, 1);
if (operator != OPERATOR_NULL) {
onInvalidDataReceived(device, data);
return;
}
switch (opCode) {
case OP_CODE_NUMBER_OF_STORED_RECORDS_RESPONSE -> {
// Field size is defined per service
int numberOfRecords;
switch (data.size() - 2) {
case 1 -> numberOfRecords = data.getIntValue(Data.FORMAT_UINT8, 2);
case 2 -> numberOfRecords = data.getIntValue(Data.FORMAT_UINT16_LE, 2);
case 4 -> numberOfRecords = data.getIntValue(Data.FORMAT_UINT32_LE, 2);
default -> {
// Other field sizes are not supported
onInvalidDataReceived(device, data);
return;
}
}
onNumberOfRecordsReceived(device, numberOfRecords);
}
case OP_CODE_RESPONSE_CODE -> {
if (data.size() != 4) {
onInvalidDataReceived(device, data);
return;
}
final int requestCode = data.getIntValue(Data.FORMAT_UINT8, 2);
final int responseCode = data.getIntValue(Data.FORMAT_UINT8, 3);
if (responseCode == RACP_RESPONSE_SUCCESS) {
onRecordAccessOperationCompleted(device, requestCode);
} else if (responseCode == RACP_ERROR_NO_RECORDS_FOUND) {
onRecordAccessOperationCompletedWithNoRecordsFound(device, requestCode);
} else {
onRecordAccessOperationError(device, requestCode, responseCode);
}
}
}
}
|
@Test
public void onRecordAccessOperationError_abortUnsuccessful() {
final Data data = new Data(new byte[] { 6, 0, 3, 7 });
callback.onDataReceived(null, data);
assertEquals(7, error);
}
|
public void encryptColumns(
String inputFile, String outputFile, List<String> paths, FileEncryptionProperties fileEncryptionProperties)
throws IOException {
Path inPath = new Path(inputFile);
Path outPath = new Path(outputFile);
RewriteOptions options = new RewriteOptions.Builder(conf, inPath, outPath)
.encrypt(paths)
.encryptionProperties(fileEncryptionProperties)
.build();
ParquetRewriter rewriter = new ParquetRewriter(options);
rewriter.processBlocks();
rewriter.close();
}
|
@Test
public void testNoEncryption() throws IOException {
String[] encryptColumns = {};
testSetup("GZIP");
columnEncryptor.encryptColumns(
inputFile.getFileName(),
outputFile,
Arrays.asList(encryptColumns),
EncDecProperties.getFileEncryptionProperties(encryptColumns, ParquetCipher.AES_GCM_CTR_V1, false));
verifyResultDecryptionWithValidKey();
}
|
@Override
public Optional<IdentifierValue> getAlias() {
return Optional.empty();
}
|
@Test
void assertGetAliasWhenAbsent() {
assertFalse(new ShorthandProjection(new IdentifierValue("owner"), Collections.emptyList()).getAlias().isPresent());
}
|
void handleStatement(final QueuedCommand queuedCommand) {
throwIfNotConfigured();
handleStatementWithTerminatedQueries(
queuedCommand.getAndDeserializeCommand(commandDeserializer),
queuedCommand.getAndDeserializeCommandId(),
queuedCommand.getStatus(),
Mode.EXECUTE,
queuedCommand.getOffset(),
false
);
}
|
@Test
@SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_INFERRED")
public void shouldExecutePlannedCommandWithMergedConfig() {
// Given:
final Map<String, String> savedConfigs = ImmutableMap.of("biz", "baz");
plannedCommand = new Command(
CREATE_STREAM_FOO_STATEMENT,
emptyMap(),
savedConfigs,
Optional.of(plan)
);
final KsqlConfig mockConfig = mock(KsqlConfig.class);
when(mockConfig.getKsqlStreamConfigProps()).thenReturn(
ImmutableMap.of(StreamsConfig.APPLICATION_SERVER_CONFIG, "appid"));
final KsqlConfig mergedConfig = mock(KsqlConfig.class);
when(mockConfig.overrideBreakingConfigsWithOriginalValues(any())).thenReturn(mergedConfig);
when(mockEngine.getKsqlConfig()).thenReturn(mockConfig);
givenMockPlannedQuery();
// When:
handleStatement(statementExecutorWithMocks, plannedCommand, COMMAND_ID, Optional.empty(), 0L);
// Then:
verify(mockConfig).overrideBreakingConfigsWithOriginalValues(savedConfigs);
verify(mockEngine).execute(
any(),
eq(ConfiguredKsqlPlan.of(plan, SessionConfig.of(mergedConfig, emptyMap()))),
eq(false)
);
}
|
T getFunction(final List<SqlArgument> arguments) {
// first try to get the candidates without any implicit casting
Optional<T> candidate = findMatchingCandidate(arguments, false);
if (candidate.isPresent()) {
return candidate.get();
} else if (!supportsImplicitCasts) {
throw createNoMatchingFunctionException(arguments);
}
// if none were found (candidate isn't present) try again with implicit casting
candidate = findMatchingCandidate(arguments, true);
if (candidate.isPresent()) {
return candidate.get();
}
throw createNoMatchingFunctionException(arguments);
}
|
@Test
public void shouldNotMatchVarargDifferentStructs() {
// Given:
givenFunctions(
function(OTHER, 0, ArrayType.of(STRUCT1))
);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> udfIndex.getFunction(ImmutableList.of(SqlArgument.of(STRUCT1_ARG), SqlArgument.of(STRUCT2_ARG)))
);
// Then:
assertThat(e.getMessage(), containsString("Function 'name' does not accept parameters "
+ "(STRUCT<a STRING>, STRUCT<b INTEGER>)"));
}
|
public FEELFnResult<Boolean> invoke(@ParameterName( "list" ) List list) {
if ( list == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null"));
}
boolean result = true;
boolean containsNull = false;
// Spec. definition: return false if any item is false, else true if all items are true, else null
for ( final Object element : list ) {
if (element != null && !(element instanceof Boolean)) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not a Boolean"));
} else {
if (element != null) {
result &= (Boolean) element;
} else if (!containsNull) {
containsNull = true;
}
}
}
if (containsNull && result) {
return FEELFnResult.ofResult( null );
} else {
return FEELFnResult.ofResult( result );
}
}
|
@Test
void invokeArrayParamEmptyArray() {
FunctionTestUtil.assertResult(allFunction.invoke(new Object[]{}), true);
}
|
@Override
public String getSchema() {
return dialectDatabaseMetaData.getSchema(connection);
}
|
@Test
void assertGetSchema() throws SQLException {
when(connection.getSchema()).thenReturn(TEST_SCHEMA);
MetaDataLoaderConnection connection = new MetaDataLoaderConnection(databaseType, this.connection);
assertThat(connection.getSchema(), is(TEST_SCHEMA));
}
|
public static Regression<double[]> fit(double[][] x, double[] y, double eps, double C, double tol) {
smile.base.svm.SVR<double[]> svr = new smile.base.svm.SVR<>(new LinearKernel(), eps, C, tol);
KernelMachine<double[]> svm = svr.fit(x, y);
return new Regression<>() {
final LinearKernelMachine model = LinearKernelMachine.of(svm);
@Override
public double predict(double[] x) {
return model.f(x);
}
};
}
|
@Test
public void tesDiabetes() {
System.out.println("Diabetes");
MathEx.setSeed(19650218); // to get repeatable results.
GaussianKernel kernel = new GaussianKernel(5.0);
RegressionValidations<Regression<double[]>> result = CrossValidation.regression(10, Diabetes.x, Diabetes.y,
(x, y) -> SVM.fit(x, y, kernel, 50, 1000, 1E-3));
System.out.println(result);
assertEquals(61.5148, result.avg.rmse, 1E-4);
}
|
public Method getMethod() {
return method;
}
|
@Test
public void testGetMethod() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
MethodDesc methodDesc = getMethodDesc();
assertThat(methodDesc.getMethod()).isEqualTo(method);
}
|
@Override
protected TableRecords getUndoRows() {
return sqlUndoLog.getBeforeImage();
}
|
@Test
public void getUndoRows() {
Assertions.assertEquals(executor.getUndoRows(), executor.getSqlUndoLog().getBeforeImage());
}
|
public static String unescape(String escaped) {
boolean escaping = false;
StringBuilder newString = new StringBuilder();
for (char c : escaped.toCharArray()) {
if (!escaping) {
if (c == ESCAPE_CHAR) {
escaping = true;
} else {
newString.append(c);
}
} else {
if (c == 'n') {
newString.append('\n');
} else if (c == 'r') {
newString.append('\r');
} else {
newString.append(c);
}
escaping = false;
}
}
return newString.toString();
}
|
@Test
public void testWithEscape() {
assertEquals("Hello\\World!", StringUtil.unescape("Hello\\\\World!"));
assertEquals("Hello \\\\World!", StringUtil.unescape("Hello \\\\\\\\World!"));
}
|
public boolean contains(short version) {
return version >= min && version <= max;
}
|
@Test
public void testContains() {
assertTrue(v(1, 1).contains((short) 1));
assertFalse(v(1, 1).contains((short) 2));
assertTrue(v(1, 2).contains((short) 1));
assertFalse(v(4, 10).contains((short) 3));
assertTrue(v(2, 12).contains((short) 11));
}
|
public AvailableServiceResponse getAvailableServices() {
AWSPolicy awsPolicy = buildAwsSetupPolicy();
ArrayList<AvailableService> services = new ArrayList<>();
String policy;
try {
policy = objectMapper.writeValueAsString(awsPolicy);
} catch (JsonProcessingException e) {
LOG.error(POLICY_ENCODING_ERROR, e);
throw new InternalServerErrorException(POLICY_ENCODING_ERROR, e);
}
AvailableService cloudWatchService =
AvailableService.create("CloudWatch",
"Retrieve CloudWatch logs via Kinesis. Kinesis allows streaming of the logs " +
"in real time. AWS CloudWatch is a monitoring and management service built " +
"for developers, system operators, site reliability engineers (SRE), " +
"and IT managers.",
policy,
"Requires Kinesis",
"https://aws.amazon.com/cloudwatch/");
services.add(cloudWatchService);
return AvailableServiceResponse.create(services, services.size());
}
|
@Test
public void testAvailableServices() {
AvailableServiceResponse services = awsService.getAvailableServices();
// There should be one service.
assertEquals(1, services.total());
assertEquals(1, services.services().size());
// CloudWatch should be in the list of available services.
assertTrue(services.services().stream().anyMatch(s -> s.name().equals("CloudWatch")));
// Verify that some of the needed actions are present.
String policy = services.services().get(0).policy();
assertTrue(policy.contains("cloudwatch"));
assertTrue(policy.contains("dynamodb"));
assertTrue(policy.contains("ec2"));
assertTrue(policy.contains("elasticloadbalancing"));
assertTrue(policy.contains("kinesis"));
}
|
public StepInstanceRestartResponse toStepRestartResponse() {
return StepInstanceRestartResponse.builder()
.workflowId(this.workflowId)
.workflowVersionId(this.workflowVersionId)
.workflowInstanceId(this.workflowInstanceId)
.workflowRunId(this.workflowRunId)
.stepId(this.stepId)
.stepAttemptId(this.stepAttemptId)
.status(this.status.runStatus)
.timelineEvent(this.timelineEvent)
.build();
}
|
@Test
public void testToStepRestartResponse() {
RunResponse res = RunResponse.from(stepInstance, TimelineLogEvent.info("bar"));
StepInstanceRestartResponse response = res.toStepRestartResponse();
Assert.assertEquals(InstanceRunStatus.CREATED, response.getStatus());
res = RunResponse.from(instance, "foo");
response = res.toStepRestartResponse();
Assert.assertEquals(InstanceRunStatus.INTERNAL_ERROR, response.getStatus());
res = RunResponse.from(instance, 0);
response = res.toStepRestartResponse();
Assert.assertEquals(InstanceRunStatus.DUPLICATED, response.getStatus());
res = RunResponse.from(instance, -1);
response = res.toStepRestartResponse();
Assert.assertEquals(InstanceRunStatus.STOPPED, response.getStatus());
res = RunResponse.from(instance, 1);
response = res.toStepRestartResponse();
Assert.assertEquals(InstanceRunStatus.CREATED, response.getStatus());
Assert.assertEquals(12L, response.getWorkflowVersionId());
}
|
public LoggerContext configure() {
LoggerContext ctx = helper.getRootContext();
ctx.reset();
helper.enableJulChangePropagation(ctx);
configureConsole(ctx);
configureWithLogbackWritingToFile(ctx);
helper.apply(
LogLevelConfig.newBuilder(helper.getRootLoggerName())
.rootLevelFor(ProcessId.APP)
.immutableLevel("com.hazelcast",
Level.toLevel("WARN"))
.build(),
appSettings.getProps());
return ctx;
}
|
@Test
public void gobbler_logger_writes_to_console_without_formatting_when_running_from_command_line() {
emulateRunFromCommandLine(false);
LoggerContext ctx = underTest.configure();
Logger gobblerLogger = ctx.getLogger(LOGGER_GOBBLER);
verifyGobblerConsoleAppender(gobblerLogger);
assertThat(gobblerLogger.iteratorForAppenders()).toIterable().hasSize(1);
}
|
public static <InputT> ByBuilder<InputT> of(PCollection<InputT> input) {
return named(null).of(input);
}
|
@Test
public void testBuild_implicitName() {
final PCollection<String> dataset = TestUtils.createMockDataset(TypeDescriptors.strings());
final PCollection<String> filtered = Filter.of(dataset).by(s -> !s.equals("")).output();
final Filter filter = (Filter) TestUtils.getProducer(filtered);
assertFalse(filter.getName().isPresent());
}
|
@Udf
public <T extends Comparable<? super T>> T arrayMin(@UdfParameter(
description = "Array of values from which to find the minimum") final List<T> input) {
if (input == null) {
return null;
}
T candidate = null;
for (T thisVal : input) {
if (thisVal != null) {
if (candidate == null) {
candidate = thisVal;
} else if (thisVal.compareTo(candidate) < 0) {
candidate = thisVal;
}
}
}
return candidate;
}
|
@Test
public void shouldReturnValueForMixedInput() {
final List<String> input = Arrays.asList(null, "foo", null, "bar", null);
assertThat(udf.arrayMin(input), is("bar"));
}
|
@Override
public ExecuteContext after(ExecuteContext context) {
ThreadLocalUtils.removeRequestTag();
return context;
}
|
@Test
public void testAfter() {
ThreadLocalUtils.addRequestTag(Collections.singletonMap("bar", Collections.singletonList("foo")));
Assert.assertNotNull(ThreadLocalUtils.getRequestTag());
// Test the after method to verify if thread variables are released
interceptor.after(context);
Assert.assertNull(ThreadLocalUtils.getRequestTag());
}
|
public void run(OutputReceiver<PartitionRecord> receiver, Instant startTime) {
List<ByteStringRange> streamPartitions =
changeStreamDao.generateInitialChangeStreamPartitions();
for (ByteStringRange partition : streamPartitions) {
metrics.incListPartitionsCount();
String uid = UniqueIdGenerator.getNextId();
PartitionRecord partitionRecord =
new PartitionRecord(
partition, startTime, uid, startTime, Collections.emptyList(), endTime);
// We are outputting elements with timestamp of 0 to prevent reliance on event time. This
// limits the ability to window on commit time of any data changes. It is still possible to
// window on processing time.
receiver.outputWithTimestamp(partitionRecord, Instant.EPOCH);
}
}
|
@Test
public void testGenerateInitialPartitionsFromStartTime() {
Range.ByteStringRange partition1 = Range.ByteStringRange.create("", "b");
Range.ByteStringRange partition2 = Range.ByteStringRange.create("b", "");
List<Range.ByteStringRange> partitionRecordList = Arrays.asList(partition1, partition2);
when(changeStreamDao.generateInitialChangeStreamPartitions()).thenReturn(partitionRecordList);
GenerateInitialPartitionsAction generateInitialPartitionsAction =
new GenerateInitialPartitionsAction(metrics, changeStreamDao, endTime);
generateInitialPartitionsAction.run(receiver, startTime);
verify(receiver, times(2))
.outputWithTimestamp(partitionRecordArgumentCaptor.capture(), eq(Instant.EPOCH));
List<PartitionRecord> actualPartitions = partitionRecordArgumentCaptor.getAllValues();
assertEquals(partition1, actualPartitions.get(0).getPartition());
assertEquals(startTime, actualPartitions.get(0).getStartTime());
assertEquals(partition2, actualPartitions.get(1).getPartition());
assertEquals(startTime, actualPartitions.get(1).getStartTime());
}
|
@Override
public void setLoadedCoreExtensions(Set<CoreExtension> coreExtensions) {
checkState(this.coreExtensions == null, "Repository has already been initialized");
this.coreExtensions = ImmutableSet.copyOf(coreExtensions);
this.installedCoreExtensions = new HashSet<>(coreExtensions.size());
}
|
@Test
public void setLoadedCoreExtensions_fails_with_NPE_if_argument_is_null() {
assertThatThrownBy(() -> underTest.setLoadedCoreExtensions(null))
.isInstanceOf(NullPointerException.class);
}
|
@Override
public int run(String[] args) throws Exception {
try {
webServiceClient = WebServiceClient.getWebServiceClient().createClient();
return runCommand(args);
} finally {
if (yarnClient != null) {
yarnClient.close();
}
if (webServiceClient != null) {
webServiceClient.destroy();
}
}
}
|
@Test (timeout = 5000)
public void testWithFileInputForOptionOut() throws Exception {
String localDir = "target/SaveLogs";
Path localPath = new Path(localDir);
FileSystem fs = FileSystem.get(conf);
ApplicationId appId1 = ApplicationId.newInstance(0, 1);
LogsCLI cli = createCli();
// Specify a file name to the option -out
try {
fs.mkdirs(localPath);
Path tmpFilePath = new Path(localPath, "tmpFile");
if (!fs.exists(tmpFilePath)) {
fs.createNewFile(tmpFilePath);
}
int exitCode = cli.run(new String[] {"-applicationId",
appId1.toString(),
"-out" , tmpFilePath.toString()});
assertTrue(exitCode == -1);
assertTrue(sysErrStream.toString().contains(
"Invalid value for -out option. Please provide a directory."));
} finally {
fs.delete(localPath, true);
}
}
|
@Override
public Output load(String streamOutputId) throws NotFoundException {
final Output output = coll.findOneById(streamOutputId);
if (output == null) {
throw new NotFoundException("Couldn't find output with id " + streamOutputId);
}
return output;
}
|
@Test
@MongoDBFixtures("OutputServiceImplTest.json")
public void loadReturnsExistingOutput() throws NotFoundException {
final Output output = outputService.load("54e3deadbeefdeadbeef0001");
assertThat(output.getId()).isEqualTo("54e3deadbeefdeadbeef0001");
}
|
@Override
public void setTimestamp(final Path file, final TransferStatus status) throws BackgroundException {
if(file.isVolume()) {
log.warn(String.format("Skip setting timestamp for %s", file));
return;
}
try {
if(null != status.getModified()) {
final String fileid = this.fileid.getFileId(file);
final File properties = new File();
properties.setModifiedTime(status.getModified() != null ? new DateTime(status.getModified()) : null);
final File latest = session.getClient().files().update(fileid, properties).setFields(DriveAttributesFinderFeature.DEFAULT_FIELDS).
setSupportsAllDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable")).execute();
status.setResponse(new DriveAttributesFinderFeature(session, this.fileid).toAttributes(latest));
}
}
catch(IOException e) {
throw new DriveExceptionMappingService(fileid).map("Failure to write attributes of {0}", e, file);
}
}
|
@Test
public void testSetTimestampDirectory() throws Exception {
final DriveFileIdProvider fileid = new DriveFileIdProvider(session);
final Path home = DriveHomeFinderService.MYDRIVE_FOLDER;
final Path test = new DriveDirectoryFeature(session, fileid).mkdir(
new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final long modified = System.currentTimeMillis();
new DriveTimestampFeature(session, fileid).setTimestamp(test, modified);
assertEquals(modified, new DefaultAttributesFinderFeature(session).find(test).getModificationDate());
assertEquals(modified, new DriveAttributesFinderFeature(session, fileid).find(test).getModificationDate());
new DriveDeleteFeature(session, fileid).delete(Collections.<Path>singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public boolean needToLoad(FilterInvoker invoker) {
AbstractInterfaceConfig<?, ?> config = invoker.getConfig();
String enabled = config.getParameter(SentinelConstants.SOFA_RPC_SENTINEL_ENABLED);
if (StringUtils.isNotBlank(enabled)) {
return Boolean.parseBoolean(enabled);
}
return RpcConfigs.getOrDefaultValue(SentinelConstants.SOFA_RPC_SENTINEL_ENABLED, true);
}
|
@Test
public void testNeedToLoadProviderAndConsumer() {
SentinelSofaRpcProviderFilter providerFilter = new SentinelSofaRpcProviderFilter();
ProviderConfig providerConfig = new ProviderConfig();
providerConfig.setInterfaceId(Serializer.class.getName());
providerConfig.setId("AAA");
FilterInvoker providerInvoker = new FilterInvoker(null, null, providerConfig);
assertTrue(providerFilter.needToLoad(providerInvoker));
SentinelSofaRpcConsumerFilter consumerFilter = new SentinelSofaRpcConsumerFilter();
ConsumerConfig consumerConfig = new ConsumerConfig();
consumerConfig.setInterfaceId(Serializer.class.getName());
consumerConfig.setId("BBB");
FilterInvoker consumerInvoker = new FilterInvoker(null, null, consumerConfig);
assertTrue(consumerFilter.needToLoad(consumerInvoker));
providerConfig.setParameter(SentinelConstants.SOFA_RPC_SENTINEL_ENABLED, "false");
assertFalse(providerFilter.needToLoad(providerInvoker));
assertTrue(consumerFilter.needToLoad(consumerInvoker));
providerConfig.setParameter(SentinelConstants.SOFA_RPC_SENTINEL_ENABLED, "");
assertTrue(providerFilter.needToLoad(providerInvoker));
RpcConfigs.putValue(SentinelConstants.SOFA_RPC_SENTINEL_ENABLED, "false");
assertFalse(providerFilter.needToLoad(providerInvoker));
assertFalse(consumerFilter.needToLoad(consumerInvoker));
}
|
@Override
public void publish(ScannerReportWriter writer) {
for (final DefaultInputFile inputFile : componentCache.allChangedFilesToPublish()) {
File iofile = writer.getSourceFile(inputFile.scannerId());
try (OutputStream output = new BufferedOutputStream(new FileOutputStream(iofile));
InputStream in = inputFile.inputStream();
BufferedReader reader = new BufferedReader(new InputStreamReader(in, inputFile.charset()))) {
writeSource(reader, output, inputFile.lines());
} catch (IOException e) {
throw new IllegalStateException("Unable to store file source in the report", e);
}
}
}
|
@Test
public void publishSourceWithLastEmptyLine() throws Exception {
FileUtils.write(sourceFile, "1\n2\n3\n4\n", StandardCharsets.ISO_8859_1);
publisher.publish(writer);
File out = writer.getSourceFile(inputFile.scannerId());
assertThat(FileUtils.readFileToString(out, StandardCharsets.UTF_8)).isEqualTo("1\n2\n3\n4\n");
}
|
public static List<PMMLModel> getPMMLModels(PMMLRuntimeContext pmmlContext) {
logger.debug("getPMMLModels {}", pmmlContext);
Collection<GeneratedExecutableResource> finalResources =
getAllGeneratedExecutableResources(pmmlContext.getGeneratedResourcesMap().get(PMML_STRING));
logger.debug("finalResources {}", finalResources);
return finalResources.stream()
.map(finalResource -> loadKiePMMLModelFactory(finalResource, pmmlContext))
.flatMap(factory -> factory.getKiePMMLModels().stream())
.collect(Collectors.toList());
}
|
@Test
void getPMMLModels() {
List<PMMLModel> retrieved = PMMLRuntimeHelper.getPMMLModels(getPMMLContext(FILE_NAME, MODEL_NAME,
memoryCompilerClassLoader));
assertThat(retrieved).isNotNull().hasSize(1); // defined in IndexFile.pmml_json
assertThat(retrieved.get(0)).isInstanceOf(KiePMMLTestingModel.class);
}
|
public static String[] getAddresses(String netAddress, String partialAddress) {
String[] parAddStrArray = StringUtils.split(partialAddress.substring(1, partialAddress.length() - 1), ",");
String address = netAddress.substring(0, netAddress.indexOf("{"));
String[] addressStrArray = new String[parAddStrArray.length];
for (int i = 0; i < parAddStrArray.length; i++) {
addressStrArray[i] = address + parAddStrArray[i];
}
return addressStrArray;
}
|
@Test
public void testGetAddresses() {
String address = "1.1.1.{1,2,3,4}";
String[] addressArray = AclUtils.getAddresses(address, "{1,2,3,4}");
List<String> newAddressList = new ArrayList<>(Arrays.asList(addressArray));
List<String> addressList = new ArrayList<>();
addressList.add("1.1.1.1");
addressList.add("1.1.1.2");
addressList.add("1.1.1.3");
addressList.add("1.1.1.4");
Assert.assertEquals(newAddressList, addressList);
// IPv6 test
String ipv6Address = "1:ac41:9987::bb22:666:{1,2,3,4}";
String[] ipv6AddressArray = AclUtils.getAddresses(ipv6Address, "{1,2,3,4}");
List<String> newIPv6AddressList = new ArrayList<>();
Collections.addAll(newIPv6AddressList, ipv6AddressArray);
List<String> ipv6AddressList = new ArrayList<>();
ipv6AddressList.add("1:ac41:9987::bb22:666:1");
ipv6AddressList.add("1:ac41:9987::bb22:666:2");
ipv6AddressList.add("1:ac41:9987::bb22:666:3");
ipv6AddressList.add("1:ac41:9987::bb22:666:4");
Assert.assertEquals(newIPv6AddressList, ipv6AddressList);
}
|
public static String getCertFingerPrint(Certificate cert) {
byte [] digest = null;
try {
byte[] encCertInfo = cert.getEncoded();
MessageDigest md = MessageDigest.getInstance("SHA-1");
digest = md.digest(encCertInfo);
} catch (Exception e) {
logger.error("Exception:", e);
}
if (digest != null) {
return bytesToHex(digest).toLowerCase();
}
return null;
}
|
@Test
public void testGetCertFingerPrintCarol() throws Exception {
X509Certificate cert = null;
try (InputStream is = Config.getInstance().getInputStreamFromFile("carol.crt")){
CertificateFactory cf = CertificateFactory.getInstance("X.509");
cert = (X509Certificate) cf.generateCertificate(is);
} catch (Exception e) {
e.printStackTrace();
}
String fp = FingerPrintUtil.getCertFingerPrint(cert);
Assert.assertEquals("f9d76aae4799610a3c904df073dc79f430b408b1", fp);
}
|
public static OffsetBasedPagination forStartRowNumber(int startRowNumber, int pageSize) {
checkArgument(startRowNumber >= 1, "startRowNumber must be >= 1");
checkArgument(pageSize >= 1, "page size must be >= 1");
return new OffsetBasedPagination(startRowNumber - 1, pageSize);
}
|
@Test
void equals_whenDifferentClasses_shouldBeFalse() {
Assertions.assertThat(OffsetBasedPagination.forStartRowNumber(15, 20)).isNotEqualTo("not an OffsetBasedPagination object");
}
|
public static long findAndVerifyWindowGrace(final GraphNode graphNode) {
return findAndVerifyWindowGrace(graphNode, "");
}
|
@Test
public void shouldExtractGraceFromSessionAncestorThroughStatelessParent() {
final SessionWindows windows = SessionWindows.ofInactivityGapAndGrace(ofMillis(10L), ofMillis(1234L));
final StatefulProcessorNode<String, Long> graceGrandparent = new StatefulProcessorNode<>(
"asdf",
new ProcessorParameters<>(
new KStreamSessionWindowAggregate<String, Long, Integer>(
windows,
"asdf",
EmitStrategy.onWindowUpdate(),
null,
null,
null
),
"asdf"
),
(StoreFactory) null
);
final ProcessorGraphNode<String, Long> statelessParent = new ProcessorGraphNode<>("stateless", null);
graceGrandparent.addChild(statelessParent);
final ProcessorGraphNode<String, Long> node = new ProcessorGraphNode<>("stateless", null);
statelessParent.addChild(node);
final long extracted = GraphGraceSearchUtil.findAndVerifyWindowGrace(node);
assertThat(extracted, is(windows.gracePeriodMs() + windows.inactivityGap()));
}
|
@Override
public int hashCode() {
return Objects.hash(taskId, topicPartitions);
}
|
@Test
public void shouldBeEqualsIfSameObject() {
final TaskMetadataImpl same = new TaskMetadataImpl(
TASK_ID,
TOPIC_PARTITIONS,
COMMITTED_OFFSETS,
END_OFFSETS,
TIME_CURRENT_IDLING_STARTED);
assertThat(taskMetadata, equalTo(same));
assertThat(taskMetadata.hashCode(), equalTo(same.hashCode()));
}
|
public void connect(ServerRoutingInstance serverRoutingInstance)
throws InterruptedException, TimeoutException {
_serverToChannelMap.computeIfAbsent(serverRoutingInstance, ServerChannel::new).connect();
}
|
@Test(dataProvider = "parameters")
public void testConnect(boolean nativeTransportEnabled)
throws Exception {
BrokerMetrics brokerMetrics = mock(BrokerMetrics.class);
NettyConfig nettyConfig = new NettyConfig();
nettyConfig.setNativeTransportsEnabled(nativeTransportEnabled);
QueryRouter queryRouter = mock(QueryRouter.class);
ServerRoutingInstance serverRoutingInstance =
new ServerRoutingInstance("localhost", _dummyServer.getAddress().getPort(), TableType.REALTIME);
ServerChannels serverChannels = new ServerChannels(queryRouter, brokerMetrics, nettyConfig, null);
serverChannels.connect(serverRoutingInstance);
final long requestId = System.currentTimeMillis();
AsyncQueryResponse asyncQueryResponse = mock(AsyncQueryResponse.class);
BrokerRequest brokerRequest = new BrokerRequest();
InstanceRequest instanceRequest = new InstanceRequest();
instanceRequest.setRequestId(requestId);
instanceRequest.setQuery(brokerRequest);
serverChannels.sendRequest("dummy_table_name", asyncQueryResponse, serverRoutingInstance, instanceRequest, 1000);
serverChannels.shutDown();
}
|
@Override
public boolean isInputConsumable(
SchedulingExecutionVertex executionVertex,
Set<ExecutionVertexID> verticesToDeploy,
Map<ConsumedPartitionGroup, Boolean> consumableStatusCache) {
for (ConsumedPartitionGroup consumedPartitionGroup :
executionVertex.getConsumedPartitionGroups()) {
if (!consumableStatusCache.computeIfAbsent(
consumedPartitionGroup, this::isConsumableBasedOnFinishedProducers)) {
return false;
}
}
return true;
}
|
@Test
void testAllFinishedHybridInput() {
final TestingSchedulingTopology topology = new TestingSchedulingTopology();
final List<TestingSchedulingExecutionVertex> producers =
topology.addExecutionVertices().withParallelism(2).finish();
final List<TestingSchedulingExecutionVertex> consumer =
topology.addExecutionVertices().withParallelism(2).finish();
topology.connectAllToAll(producers, consumer)
.withResultPartitionState(ResultPartitionState.ALL_DATA_PRODUCED)
.withResultPartitionType(ResultPartitionType.HYBRID_FULL)
.finish();
AllFinishedInputConsumableDecider inputConsumableDecider =
createAllFinishedInputConsumableDecider();
assertThat(
inputConsumableDecider.isInputConsumable(
consumer.get(0), Collections.emptySet(), new HashMap<>()))
.isTrue();
assertThat(
inputConsumableDecider.isInputConsumable(
consumer.get(1), Collections.emptySet(), new HashMap<>()))
.isTrue();
}
|
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
try {
final int response = session.getClient().stat(directory.getAbsolute());
if(FTPReply.isPositiveCompletion(response)) {
return reader.read(directory, this.parse(response, session.getClient().getReplyStrings()));
}
else {
throw new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString());
}
}
catch(IOException e) {
throw new FTPExceptionMappingService().map("Listing directory {0} failed", e, directory);
}
}
|
@Test
public void testList() throws Exception {
final ListService service = new FTPStatListService(session,
new CompositeFileEntryParser(Collections.singletonList(new UnixFTPEntryParser())));
final Path directory = new FTPWorkdirService(session).find();
final Path file = new Path(directory, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new FTPTouchFeature(session).touch(file, new TransferStatus());
final AttributedList<Path> list = service.list(directory, new DisabledListProgressListener());
assertTrue(list.contains(file));
new FTPDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public ConsumerBuilder<T> topics(List<String> topicNames) {
checkArgument(topicNames != null && !topicNames.isEmpty(),
"Passed in topicNames list should not be null or empty.");
topicNames.stream().forEach(topicName ->
checkArgument(StringUtils.isNotBlank(topicName), "topicNames cannot have blank topic"));
conf.getTopicNames().addAll(topicNames.stream().map(StringUtils::trim).collect(Collectors.toList()));
return this;
}
|
@Test(expectedExceptions = IllegalArgumentException.class)
public void testConsumerBuilderImplWhenTopicNamesHasBlankTopic() {
List<String> topicNames = Arrays.asList("my-topic", " ");
consumerBuilderImpl.topics(topicNames);
}
|
@Override
public void login(final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException {
final Credentials credentials = authorizationService.validate();
try {
final StringBuilder url = new StringBuilder();
url.append(host.getProtocol().getScheme().toString()).append("://");
url.append(host.getProtocol().getDefaultHostname());
if(!(host.getProtocol().getScheme().getPort() == host.getPort())) {
url.append(":").append(host.getPort());
}
final String context = PathNormalizer.normalize(host.getProtocol().getContext());
// Custom authentication context
url.append(context);
// Determine RestFS URL from service discovery
final HttpGet request = new HttpGet(url.toString());
final CloseableHttpResponse response = client.execute(request);
switch(response.getStatusLine().getStatusCode()) {
case HttpStatus.SC_OK:
final JsonElement element = JsonParser.parseReader(new InputStreamReader(response.getEntity().getContent()));
if(element.isJsonObject()) {
final JsonObject json = element.getAsJsonObject();
final URI uri = URI.create(json.getAsJsonObject("serviceTarget").getAsJsonPrimitive("uri").getAsString());
if(log.isInfoEnabled()) {
log.info(String.format("Set base path to %s", url));
}
this.setBasePath(uri.toString());
}
break;
default:
throw new DefaultHttpResponseExceptionMappingService().map(new HttpResponseException(
response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()));
}
credentials.setUsername(new UserInfoApi(new EueApiClient(this))
.userinfoGet(null, null).getAccount().getOsServiceId());
if(StringUtils.isNotBlank(host.getProperty("pacs.url"))) {
try {
client.execute(new HttpPost(host.getProperty("pacs.url")));
}
catch(IOException e) {
log.warn(String.format("Ignore failure %s running Personal Agent Context Service (PACS) request", e));
}
}
if(StringUtils.isNotBlank(new HostPreferences(host).getProperty("cryptomator.vault.name.default"))) {
final Path vault = new Path(new HostPreferences(host).getProperty("cryptomator.vault.name.default"), EnumSet.of(Path.Type.directory));
try {
vaultResourceId = new EueAttributesFinderFeature(this, resourceid).find(vault).getFileId();
host.setProperty("cryptomator.enable", String.valueOf(true));
}
catch(NotfoundException e) {
log.warn(String.format("Disable vault features with no existing vault found at %s", vault));
// Disable vault features
host.setProperty("cryptomator.enable", String.valueOf(false));
}
}
userShares.set(this.userShares());
}
catch(ApiException e) {
throw new EueExceptionMappingService().map(e);
}
catch(HttpResponseException e) {
throw new DefaultHttpResponseExceptionMappingService().map(e);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map(e);
}
}
|
@Test
public void testLogin() throws Exception {
assertNotEquals(StringUtils.EMPTY, session.getHost().getCredentials().getUsername());
}
|
List<Token> tokenize() throws ScanException {
List<Token> tokenList = new ArrayList<Token>();
StringBuffer buf = new StringBuffer();
while (pointer < patternLength) {
char c = pattern.charAt(pointer);
pointer++;
switch (state) {
case LITERAL_STATE:
handleLiteralState(c, tokenList, buf);
break;
case FORMAT_MODIFIER_STATE:
handleFormatModifierState(c, tokenList, buf);
break;
case OPTION_STATE:
processOption(c, tokenList, buf);
break;
case KEYWORD_STATE:
handleKeywordState(c, tokenList, buf);
break;
case RIGHT_PARENTHESIS_STATE:
handleRightParenthesisState(c, tokenList, buf);
break;
default:
}
}
// EOS
switch (state) {
case LITERAL_STATE:
addValuedToken(Token.LITERAL, buf, tokenList);
break;
case KEYWORD_STATE:
tokenList.add(new Token(Token.SIMPLE_KEYWORD, buf.toString()));
break;
case RIGHT_PARENTHESIS_STATE:
tokenList.add(Token.RIGHT_PARENTHESIS_TOKEN);
break;
case FORMAT_MODIFIER_STATE:
case OPTION_STATE:
throw new ScanException("Unexpected end of pattern string");
}
return tokenList;
}
|
@Test
public void testMultipleRecursion() throws ScanException {
List<Token> tl = new TokenStream("%-1(%d %45(%class %file))").tokenize();
List<Token> witness = new ArrayList<Token>();
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.FORMAT_MODIFIER, "-1"));
witness.add(Token.BARE_COMPOSITE_KEYWORD_TOKEN);
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.SIMPLE_KEYWORD, "d"));
witness.add(new Token(Token.LITERAL, " "));
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.FORMAT_MODIFIER, "45"));
witness.add(Token.BARE_COMPOSITE_KEYWORD_TOKEN);
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.SIMPLE_KEYWORD, "class"));
witness.add(new Token(Token.LITERAL, " "));
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.SIMPLE_KEYWORD, "file"));
witness.add(Token.RIGHT_PARENTHESIS_TOKEN);
witness.add(Token.RIGHT_PARENTHESIS_TOKEN);
assertEquals(witness, tl);
}
|
public static String extractCharset(String line, String defaultValue) {
if (line == null) {
return defaultValue;
}
final String[] parts = line.split(" ");
String charsetInfo = "";
for (var part : parts) {
if (part.startsWith("charset")) {
charsetInfo = part;
break;
}
}
final String charset = charsetInfo.replace("charset=", "").replace(";", "");
if (charset.isBlank()) {
return defaultValue;
}
return charset;
}
|
@DisplayName("with the charset information in the middle")
@Test
void testExtractCharsetInTheMiddle() {
assertEquals("UTF-8", TelegramAsyncHandler.extractCharset("Content-Type: text/plain; name=\"some-name\"; charset=UTF-8",
StandardCharsets.US_ASCII.name()));
}
|
public static TypeDescription convert(Schema schema) {
final TypeDescription root = TypeDescription.createStruct();
final Types.StructType schemaRoot = schema.asStruct();
for (Types.NestedField field : schemaRoot.asStructType().fields()) {
TypeDescription orcColumnType = convert(field.fieldId(), field.type(), field.isRequired());
root.addField(field.name(), orcColumnType);
}
return root;
}
|
@Test
public void testRoundtripConversionNested() {
Types.StructType leafStructType =
Types.StructType.of(
optional(6, "leafLongCol", Types.LongType.get()),
optional(7, "leafBinaryCol", Types.BinaryType.get()));
Types.StructType nestedStructType =
Types.StructType.of(
optional(4, "longCol", Types.LongType.get()),
optional(5, "leafStructCol", leafStructType));
Types.StructType structPrimTypeForList =
Types.StructType.of(
optional(506, "leafLongCol", Types.LongType.get()),
optional(507, "leafBinaryCol", Types.BinaryType.get()));
Types.StructType leafStructTypeForList =
Types.StructType.of(
optional(516, "leafLongCol", Types.LongType.get()),
optional(517, "leafBinaryCol", Types.BinaryType.get()));
Types.StructType nestedStructTypeForList =
Types.StructType.of(
optional(504, "longCol", Types.LongType.get()),
optional(505, "leafStructCol", leafStructTypeForList));
Types.StructType structPrimTypeForMap =
Types.StructType.of(
optional(606, "leafLongCol", Types.LongType.get()),
optional(607, "leafBinaryCol", Types.BinaryType.get()));
Types.StructType leafStructTypeForMap =
Types.StructType.of(
optional(616, "leafLongCol", Types.LongType.get()),
optional(617, "leafBinaryCol", Types.BinaryType.get()));
Types.StructType nestedStructTypeForMap =
Types.StructType.of(
optional(604, "longCol", Types.LongType.get()),
optional(605, "leafStructCol", leafStructTypeForMap));
Types.StructType leafStructTypeForStruct =
Types.StructType.of(
optional(716, "leafLongCol", Types.LongType.get()),
optional(717, "leafBinaryCol", Types.BinaryType.get()));
Types.StructType nestedStructTypeForStruct =
Types.StructType.of(
optional(704, "longCol", Types.LongType.get()),
optional(705, "leafStructCol", leafStructTypeForStruct));
// all fields in expected iceberg schema will be optional since we don't have a column mapping
Schema expectedSchema =
new Schema(
optional(1, "intCol", Types.IntegerType.get()),
optional(2, "longCol", Types.LongType.get()),
optional(3, "nestedStructCol", nestedStructType),
optional(8, "intCol3", Types.IntegerType.get()),
optional(9, "doubleCol", Types.DoubleType.get()),
required(10, "uuidCol", Types.UUIDType.get()),
optional(20, "booleanCol", Types.BooleanType.get()),
optional(21, "fixedCol", Types.FixedType.ofLength(4096)),
required(22, "binaryCol", Types.BinaryType.get()),
required(23, "stringCol", Types.StringType.get()),
required(24, "decimalCol", Types.DecimalType.of(15, 3)),
required(25, "floatCol", Types.FloatType.get()),
optional(30, "dateCol", Types.DateType.get()),
required(32, "timeCol", Types.TimeType.get()),
required(34, "timestampCol", Types.TimestampType.withZone()),
required(35, "listPrimCol", Types.ListType.ofRequired(135, Types.LongType.get())),
required(36, "listPrimNestCol", Types.ListType.ofRequired(136, structPrimTypeForList)),
required(37, "listNestedCol", Types.ListType.ofRequired(137, nestedStructTypeForList)),
optional(
38,
"mapPrimCol",
Types.MapType.ofRequired(
138, 238, Types.StringType.get(), Types.FixedType.ofLength(4096))),
required(
39,
"mapPrimNestCol",
Types.MapType.ofRequired(139, 239, Types.StringType.get(), structPrimTypeForMap)),
required(
40,
"mapNestedCol",
Types.MapType.ofRequired(140, 240, Types.StringType.get(), nestedStructTypeForMap)),
required(
41,
"structListNestCol",
Types.ListType.ofRequired(
241,
Types.StructType.of(
optional(816, "leafLongCol", Types.LongType.get()),
optional(817, "leafBinaryCol", Types.BinaryType.get())))),
required(
42,
"structMapNestCol",
Types.MapType.ofRequired(
242,
342,
Types.StringType.get(),
Types.StructType.of(
optional(916, "leafLongCol", Types.LongType.get()),
optional(917, "leafBinaryCol", Types.BinaryType.get())))),
required(
43,
"structStructNestCol",
Types.StructType.of(
required(
243,
"innerStructNest",
Types.StructType.of(
optional(1016, "leafLongCol", Types.LongType.get()),
optional(1017, "leafBinaryCol", Types.BinaryType.get()))))),
required(
44,
"structStructComplexNestCol",
Types.StructType.of(
required(
244,
"innerStructNest",
Types.StructType.of(
optional(1116, "leafLongCol", Types.LongType.get()),
optional(
1117,
"leftMapOfListStructCol",
Types.MapType.ofRequired(
1150,
1151,
Types.StringType.get(),
Types.ListType.ofRequired(
1250, nestedStructTypeForStruct))))))));
TypeDescription orcSchema = ORCSchemaUtil.convert(expectedSchema);
assertThat(ORCSchemaUtil.convert(orcSchema).asStruct()).isEqualTo(expectedSchema.asStruct());
}
|
@Nonnull @Override
public ResultIterator<SqlRow> iterator() {
if (iterator == null) {
iterator = new RowToSqlRowIterator(rootResultConsumer.iterator());
return iterator;
} else {
throw new IllegalStateException("Iterator can be requested only once.");
}
}
|
@Test
public void when_hasNextInterrupted_then_interrupted() {
// this query is a continuous one, but never returns any rows (all are filtered out)
SqlResult sqlResult = instance().getSql().execute("select * from table(generate_stream(1)) where v < 0");
AtomicBoolean interruptedOk = new AtomicBoolean();
Thread t = new Thread(() -> {
try {
sqlResult.iterator().hasNext();
} catch (Throwable e) {
if (e.getCause() instanceof RuntimeException
&& e.getCause().getCause() instanceof InterruptedException) {
interruptedOk.set(true);
} else {
logger.severe("Unexpected exception caught", e);
}
}
});
t.start();
t.interrupt();
assertTrueEventually(() -> assertTrue(interruptedOk.get()));
}
|
@Override
public List<String> getAllProjectPermissions() {
return projectPermissions;
}
|
@Test
public void projectPermissions_must_be_ordered() {
assertThat(underTest.getAllProjectPermissions())
.containsExactly("admin", "codeviewer", "issueadmin", "securityhotspotadmin", "scan", "user");
}
|
public Object valueFrom(Struct struct) {
return valueFrom(struct, true);
}
|
@Test void shouldReturnNullValueWhenFieldNotFoundInMap() {
Map<String, Object> foo = new HashMap<>();
foo.put("bar", 42);
foo.put("baz", null);
Map<String, Object> map = new HashMap<>();
map.put("foo", foo);
assertNull(pathV2("un.known").valueFrom(map));
assertNull(pathV2("foo.unknown").valueFrom(map));
assertNull(pathV2("unknown").valueFrom(map));
assertNull(pathV2("foo.baz.inner").valueFrom(map));
}
|
@Override
public Integer call() throws Exception {
super.call();
EnvironmentEndpoint endpoint = applicationContext.getBean(EnvironmentEndpoint.class);
stdOut(JacksonMapper.ofYaml().writeValueAsString(endpoint.getEnvironmentInfo()));
return 0;
}
|
@Test
void run() {
ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
try (ApplicationContext ctx = ApplicationContext.run(Environment.CLI, Environment.TEST)) {
PicocliRunner.call(ConfigPropertiesCommand.class, ctx);
assertThat(out.toString(), containsString("activeEnvironments:"));
assertThat(out.toString(), containsString("- test"));
}
}
|
static double evaluateAugmentedNormalizedTermFrequency(int calculatedLevenshteinDistance, List<String> texts) {
Map<String, Long> wordFrequencies =
texts.stream().collect(Collectors.groupingBy(Function.identity(), counting()));
int maxFrequency = wordFrequencies.values().stream()
.max(Comparator.comparingLong(f -> f))
.map(Long::intValue)
.orElseThrow(() -> new KiePMMLException("Failed to find most frequent word!"));
int binaryEvaluation = evaluateBinary(calculatedLevenshteinDistance);
return 0.5 * (binaryEvaluation + (calculatedLevenshteinDistance / (double) maxFrequency)); // cast for
// java:S2184
}
|
@Test
void evaluateAugmentedNormalizedTermFrequency() {
Map<Integer, String> source = new HashMap<>();
int maxFrequency = 23;
source.put(maxFrequency, "aword");
source.put(19, "anotherword");
source.put(5, "adifferentword");
source.put(3, "lastword");
List<String> texts = new ArrayList<>();
source.forEach((integer, s) -> IntStream.range(0, integer).forEach(i -> texts.add(s)));
Collections.shuffle(texts);
int calculatedLevenshteinDistance = 4;
int binaryEvaluation = 1;
double expected = 0.5 * (binaryEvaluation + (calculatedLevenshteinDistance / (double) maxFrequency)); // cast
// for java:S2184
assertThat(KiePMMLTextIndex.evaluateAugmentedNormalizedTermFrequency(calculatedLevenshteinDistance, texts)).isCloseTo(expected, Offset.offset(0.0));
}
|
@Override public boolean replace(long key, long oldValue, long newValue) {
assert oldValue != nullValue : "replace() called with null-sentinel oldValue " + nullValue;
assert newValue != nullValue : "replace() called with null-sentinel newValue " + nullValue;
final long valueAddr = hsa.get(key);
if (valueAddr == NULL_ADDRESS) {
return false;
}
final long actualValue = mem.getLong(valueAddr);
if (actualValue != oldValue) {
return false;
}
mem.putLong(valueAddr, newValue);
return true;
}
|
@Test
public void testReplace() throws Exception {
long key = newKey();
long value = newValue();
assertEqualsKV(MISSING_VALUE, map.replace(key, value), key, value);
map.put(key, value);
long newValue = newValue();
assertEqualsKV(value, map.replace(key, newValue), key, value);
}
|
public void createBackupLog(UUID callerUuid, UUID txnId) {
createBackupLog(callerUuid, txnId, false);
}
|
@Test
public void createBackupLog_whenNotCreated() {
UUID callerUuid = UuidUtil.newUnsecureUUID();
txService.createBackupLog(callerUuid, TXN);
assertTxLogState(TXN, ACTIVE);
}
|
@Override
public String toString() {
return String.format("NormalKey(id:%d createTime:%d)", id, createTime);
}
|
@Test
public void testToString() {
String expected = String.format("NormalKey(id:%d createTime:%d)", normalKey.getId(), normalKey.getCreateTime());
assertEquals(expected, normalKey.toString());
}
|
public static int max(int a, int b, int c) {
return Math.max(Math.max(a, b), c);
}
|
@Test
public void testMax_3args() {
System.out.println("max");
int a = -1;
int b = 0;
int c = 1;
int expResult = 1;
int result = MathEx.max(a, b, c);
assertEquals(expResult, result);
}
|
static void validateCsvFormat(CSVFormat format) {
String[] header =
checkArgumentNotNull(format.getHeader(), "Illegal %s: header is required", CSVFormat.class);
checkArgument(header.length > 0, "Illegal %s: header cannot be empty", CSVFormat.class);
checkArgument(
!format.getAllowMissingColumnNames(),
"Illegal %s: cannot allow missing column names",
CSVFormat.class);
checkArgument(
!format.getIgnoreHeaderCase(), "Illegal %s: cannot ignore header case", CSVFormat.class);
checkArgument(
!format.getAllowDuplicateHeaderNames(),
"Illegal %s: cannot allow duplicate header names",
CSVFormat.class);
for (String columnName : header) {
checkArgument(
!Strings.isNullOrEmpty(columnName),
"Illegal %s: column name is required",
CSVFormat.class);
}
checkArgument(
!format.getSkipHeaderRecord(),
"Illegal %s: cannot skip header record because the header is already accounted for",
CSVFormat.class);
}
|
@Test
public void givenCSVFormatWithNullHeader_throwsException() {
CSVFormat format = csvFormat();
String gotMessage =
assertThrows(
IllegalArgumentException.class, () -> CsvIOParseHelpers.validateCsvFormat(format))
.getMessage();
assertEquals("Illegal class org.apache.commons.csv.CSVFormat: header is required", gotMessage);
}
|
@Override
public Set<IndexSet> getAll() {
return ImmutableSet.copyOf(findAllMongoIndexSets());
}
|
@Test
public void getAllShouldBeCachedForNonEmptyList() {
final IndexSetConfig indexSetConfig = mock(IndexSetConfig.class);
final List<IndexSetConfig> indexSetConfigs = Collections.singletonList(indexSetConfig);
final MongoIndexSet indexSet = mock(MongoIndexSet.class);
when(mongoIndexSetFactory.create(indexSetConfig)).thenReturn(indexSet);
when(indexSetService.findAll()).thenReturn(indexSetConfigs);
assertThat(this.indexSetRegistry.getAll())
.isNotNull()
.isNotEmpty()
.hasSize(1)
.containsExactly(indexSet);
assertThat(this.indexSetRegistry.getAll())
.isNotNull()
.isNotEmpty()
.hasSize(1)
.containsExactly(indexSet);
verify(indexSetService, times(1)).findAll();
}
|
static String headerLine(CSVFormat csvFormat) {
return String.join(String.valueOf(csvFormat.getDelimiter()), csvFormat.getHeader());
}
|
@Test
public void givenCustomRecordSeparator_isNoop() {
CSVFormat csvFormat = csvFormat().withRecordSeparator("😆");
PCollection<String> input =
pipeline.apply(Create.of(headerLine(csvFormat), "a,1,1.1😆b,2,2.2😆c,3,3.3"));
CsvIOStringToCsvRecord underTest = new CsvIOStringToCsvRecord(csvFormat);
CsvIOParseResult<List<String>> result = input.apply(underTest);
PAssert.that(result.getOutput())
.containsInAnyOrder(
Collections.singletonList(
Arrays.asList("a", "1", "1.1😆b", "2", "2.2😆c", "3", "3.3")));
PAssert.that(result.getErrors()).empty();
pipeline.run();
}
|
public static <RestrictionT, PositionT> RestrictionTracker<RestrictionT, PositionT> observe(
RestrictionTracker<RestrictionT, PositionT> restrictionTracker,
ClaimObserver<PositionT> claimObserver) {
if (restrictionTracker instanceof RestrictionTracker.HasProgress) {
return new RestrictionTrackerObserverWithProgress<>(restrictionTracker, claimObserver);
} else {
return new RestrictionTrackerObserver<>(restrictionTracker, claimObserver);
}
}
|
@Test
public void testObservingClaims() {
RestrictionTracker<String, String> observedTracker =
new RestrictionTracker() {
@Override
public boolean tryClaim(Object position) {
return "goodClaim".equals(position);
}
@Override
public Object currentRestriction() {
throw new UnsupportedOperationException();
}
@Override
public SplitResult<Object> trySplit(double fractionOfRemainder) {
throw new UnsupportedOperationException();
}
@Override
public void checkDone() throws IllegalStateException {
throw new UnsupportedOperationException();
}
@Override
public IsBounded isBounded() {
return IsBounded.BOUNDED;
}
};
List<String> positionsObserved = new ArrayList<>();
ClaimObserver<String> observer =
new ClaimObserver<String>() {
@Override
public void onClaimed(String position) {
positionsObserved.add(position);
assertEquals("goodClaim", position);
}
@Override
public void onClaimFailed(String position) {
positionsObserved.add(position);
}
};
RestrictionTracker<String, String> observingTracker =
RestrictionTrackers.observe(observedTracker, observer);
observingTracker.tryClaim("goodClaim");
observingTracker.tryClaim("badClaim");
assertThat(positionsObserved, contains("goodClaim", "badClaim"));
}
|
public static int digitCount(final int value)
{
return (int)((value + INT_DIGITS[31 - Integer.numberOfLeadingZeros(value | 1)]) >> 32);
}
|
@Test
void digitCountLongValue()
{
for (int i = 0; i < LONG_MAX_DIGITS; i++)
{
final long min = 0 == i ? 0 : LONG_POW_10[i];
final long max = LONG_MAX_DIGITS - 1 == i ? Long.MAX_VALUE : LONG_POW_10[i + 1] - 1;
final int expectedDigitCount = i + 1;
assertEquals(expectedDigitCount, digitCount(min));
assertEquals(expectedDigitCount, digitCount(min + 1));
assertEquals(expectedDigitCount, digitCount(min + (max - min) >>> 1));
assertEquals(expectedDigitCount, digitCount(max - 1));
assertEquals(expectedDigitCount, digitCount(max));
}
}
|
@Override
public void monitor(RedisServer master) {
connection.sync(RedisCommands.SENTINEL_MONITOR, master.getName(), master.getHost(),
master.getPort().intValue(), master.getQuorum().intValue());
}
|
@Test
public void testMonitor() {
Collection<RedisServer> masters = connection.masters();
RedisServer master = masters.iterator().next();
master.setName(master.getName() + ":");
connection.monitor(master);
}
|
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain chain) throws IOException, ServletException {
HttpServletRequest request = (HttpServletRequest) servletRequest;
HttpServletResponse response = (HttpServletResponse) servletResponse;
DBSessions dbSessions = platform.getContainer().getComponentByType(DBSessions.class);
ThreadLocalSettings settings = platform.getContainer().getComponentByType(ThreadLocalSettings.class);
UserSessionInitializer userSessionInitializer = platform.getContainer().getOptionalComponentByType(UserSessionInitializer.class).orElse(null);
LOG.trace("{} serves {}", Thread.currentThread(), request.getRequestURI());
dbSessions.enableCaching();
try {
settings.load();
try {
doFilter(request, response, chain, userSessionInitializer);
} finally {
settings.unload();
}
} finally {
dbSessions.disableCaching();
}
}
|
@Test
public void doFilter_unloads_Settings_even_if_UserSessionInitializer_removeUserSession_fails() throws Exception {
RuntimeException thrown = mockUserSessionInitializerRemoveUserSessionFailing();
try {
underTest.doFilter(request, response, chain);
fail("A RuntimeException should have been thrown");
} catch (RuntimeException e) {
assertThat(e).isSameAs(thrown);
verify(settings).unload();
}
}
|
@Override
public long nextBackOffMillis() {
// Make sure we have not gone over the maximum elapsed time.
if (getElapsedTimeMillis() > maxElapsedTimeMillis) {
return maxElapsedTimeMillis;
}
int randomizedInterval =
getRandomValueFromInterval(randomizationFactor, Math.random(), currentIntervalMillis);
incrementCurrentInterval();
return randomizedInterval;
}
|
@Test
public void testBackOff() {
int testInitialInterval = 500;
double testRandomizationFactor = 0.1;
double testMultiplier = 2.0;
int testMaxInterval = 5000;
int testMaxElapsedTime = 900000;
ExponentialBackOff backOffPolicy =
new ExponentialBackOff.Builder()
.setInitialIntervalMillis(testInitialInterval)
.setRandomizationFactor(testRandomizationFactor)
.setMultiplier(testMultiplier)
.setMaxIntervalMillis(testMaxInterval)
.setMaxElapsedTimeMillis(testMaxElapsedTime)
.build();
int[] expectedResults = {500, 1000, 2000, 4000, 5000, 5000, 5000, 5000, 5000, 5000};
for (int expected : expectedResults) {
assertEquals(expected, backOffPolicy.getCurrentIntervalMillis());
// Assert that the next back off falls in the expected range.
int minInterval = (int) (expected - (testRandomizationFactor * expected));
int maxInterval = (int) (expected + (testRandomizationFactor * expected));
long actualInterval = backOffPolicy.nextBackOffMillis();
assertTrue(minInterval <= actualInterval && actualInterval <= maxInterval);
}
}
|
@Override
@Transactional(rollbackFor = Exception.class)
@LogRecord(type = SYSTEM_USER_TYPE, subType = SYSTEM_USER_CREATE_SUB_TYPE, bizNo = "{{#user.id}}",
success = SYSTEM_USER_CREATE_SUCCESS)
public Long createUser(UserSaveReqVO createReqVO) {
// 1.1 校验账户配合
tenantService.handleTenantInfo(tenant -> {
long count = userMapper.selectCount();
if (count >= tenant.getAccountCount()) {
throw exception(USER_COUNT_MAX, tenant.getAccountCount());
}
});
// 1.2 校验正确性
validateUserForCreateOrUpdate(null, createReqVO.getUsername(),
createReqVO.getMobile(), createReqVO.getEmail(), createReqVO.getDeptId(), createReqVO.getPostIds());
// 2.1 插入用户
AdminUserDO user = BeanUtils.toBean(createReqVO, AdminUserDO.class);
user.setStatus(CommonStatusEnum.ENABLE.getStatus()); // 默认开启
user.setPassword(encodePassword(createReqVO.getPassword())); // 加密密码
userMapper.insert(user);
// 2.2 插入关联岗位
if (CollectionUtil.isNotEmpty(user.getPostIds())) {
userPostMapper.insertBatch(convertList(user.getPostIds(),
postId -> new UserPostDO().setUserId(user.getId()).setPostId(postId)));
}
// 3. 记录操作日志上下文
LogRecordContext.putVariable("user", user);
return user.getId();
}
|
@Test
public void testCreatUser_max() {
// 准备参数
UserSaveReqVO reqVO = randomPojo(UserSaveReqVO.class);
// mock 账户额度不足
TenantDO tenant = randomPojo(TenantDO.class, o -> o.setAccountCount(-1));
doNothing().when(tenantService).handleTenantInfo(argThat(handler -> {
handler.handle(tenant);
return true;
}));
// 调用,并断言异常
assertServiceException(() -> userService.createUser(reqVO), USER_COUNT_MAX, -1);
}
|
@Override
public NodeLabelsInfo getLabelsOnNode(HttpServletRequest hsr, String nodeId)
throws IOException {
try {
long startTime = clock.getTime();
Collection<SubClusterInfo> subClustersActive = federationFacade.getActiveSubClusters();
final HttpServletRequest hsrCopy = clone(hsr);
Class[] argsClasses = new Class[]{HttpServletRequest.class, String.class};
Object[] args = new Object[]{hsrCopy, nodeId};
ClientMethod remoteMethod = new ClientMethod("getLabelsOnNode", argsClasses, args);
Map<SubClusterInfo, NodeLabelsInfo> nodeToLabelsInfoMap =
invokeConcurrent(subClustersActive, remoteMethod, NodeLabelsInfo.class);
Set<NodeLabel> hashSets = Sets.newHashSet();
nodeToLabelsInfoMap.values().forEach(item -> hashSets.addAll(item.getNodeLabels()));
NodeLabelsInfo nodeLabelsInfo = new NodeLabelsInfo(hashSets);
if (nodeLabelsInfo != null) {
long stopTime = clock.getTime();
routerMetrics.succeededGetLabelsToNodesRetrieved(stopTime - startTime);
RouterAuditLogger.logSuccess(getUser().getShortUserName(), GET_LABELS_ON_NODE,
TARGET_WEB_SERVICE);
return nodeLabelsInfo;
}
} catch (NotFoundException e) {
routerMetrics.incrLabelsToNodesFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_LABELS_ON_NODE,
UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage());
RouterServerUtil.logAndThrowIOException("get all active sub cluster(s) error.", e);
} catch (YarnException e) {
routerMetrics.incrLabelsToNodesFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_LABELS_ON_NODE,
UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage());
RouterServerUtil.logAndThrowIOException(
e, "getLabelsOnNode nodeId = %s with yarn error.", nodeId);
}
routerMetrics.incrLabelsToNodesFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_LABELS_ON_NODE,
UNKNOWN, TARGET_WEB_SERVICE, "getLabelsOnNode by nodeId = " + nodeId + " Failed.");
throw RouterServerUtil.logAndReturnRunTimeException(
"getLabelsOnNode by nodeId = %s Failed.", nodeId);
}
|
@Test
public void testGetLabelsOnNode() throws Exception {
NodeLabelsInfo nodeLabelsInfo = interceptor.getLabelsOnNode(null, "node1");
Assert.assertNotNull(nodeLabelsInfo);
Assert.assertEquals(2, nodeLabelsInfo.getNodeLabelsName().size());
List<String> nodeLabelsName = nodeLabelsInfo.getNodeLabelsName();
Assert.assertNotNull(nodeLabelsName);
Assert.assertTrue(nodeLabelsName.contains("x"));
Assert.assertTrue(nodeLabelsName.contains("y"));
// null request
interceptor.setAllowPartialResult(false);
NodeLabelsInfo nodeLabelsInfo2 = interceptor.getLabelsOnNode(null, "node2");
Assert.assertNotNull(nodeLabelsInfo2);
Assert.assertEquals(0, nodeLabelsInfo2.getNodeLabelsName().size());
}
|
@Override
public Consumer<Packet> get() {
return responseHandler;
}
|
@Test
public void get_whenResponseThreads() {
supplier = newSupplier(1);
assertInstanceOf(AsyncSingleThreadedResponseHandler.class, supplier.get());
}
|
@Override
protected Set<StepField> getUsedFields( final JsonInputMeta meta ) {
Set<StepField> usedFields = new HashSet<>();
if ( meta.isAcceptingFilenames() && StringUtils.isNotEmpty( meta.getAcceptingField() ) ) {
final Set<String> inpusStepNames = getInputStepNames( meta, meta.getAcceptingField() );
for ( final String inpusStepName : inpusStepNames ) {
final StepField stepField = new StepField( inpusStepName, meta.getAcceptingField() );
usedFields.add( stepField );
}
}
return usedFields;
}
|
@Test
public void testGetUsedFields_isNotAcceptingFilenames() throws Exception {
when( meta.isAcceptingFilenames() ).thenReturn( false );
Set<StepField> usedFields = analyzer.getUsedFields( meta );
assertNotNull( usedFields );
assertEquals( 0, usedFields.size() );
}
|
public static String[] splitOnSpace(String s)
{
return PATTERN_SPACE.split(s);
}
|
@Test
void testSplitOnSpace_happyPath()
{
String[] result = StringUtil.splitOnSpace("a b c");
assertArrayEquals(new String[] {"a", "b", "c"}, result);
}
|
public String generatePushDownFilter(List<String> writtenPartitions, List<FieldSchema> partitionFields, HiveSyncConfig config) {
PartitionValueExtractor partitionValueExtractor = ReflectionUtils
.loadClass(config.getStringOrDefault(META_SYNC_PARTITION_EXTRACTOR_CLASS));
List<Partition> partitions = writtenPartitions.stream().map(s -> {
List<String> values = partitionValueExtractor.extractPartitionValuesInPath(s);
if (values.size() != partitionFields.size()) {
throw new HoodieHiveSyncException("Partition fields and values should be same length"
+ ", but got partitionFields: " + partitionFields + " with values: " + values);
}
return new Partition(values, null);
}).collect(Collectors.toList());
Expression filter;
int estimateSize = partitionFields.size() * partitions.size();
if (estimateSize > config.getIntOrDefault(HIVE_SYNC_FILTER_PUSHDOWN_MAX_SIZE)) {
filter = buildMinMaxPartitionExpression(partitions, partitionFields);
} else {
filter = buildPartitionExpression(partitions, partitionFields);
}
if (filter != null) {
return generateFilterString(filter);
}
return "";
}
|
@Test
public void testPushDownFilters() {
Properties props = new Properties();
HiveSyncConfig config = new HiveSyncConfig(props);
List<FieldSchema> partitionFieldSchemas = new ArrayList<>(4);
partitionFieldSchemas.add(new FieldSchema("date", "date"));
partitionFieldSchemas.add(new FieldSchema("year", "string"));
partitionFieldSchemas.add(new FieldSchema("month", "int"));
partitionFieldSchemas.add(new FieldSchema("day", "bigint"));
List<String> writtenPartitions = new ArrayList<>();
writtenPartitions.add("2022-09-01/2022/9/1");
assertEquals("(((date = 2022-09-01 AND year = \"2022\") AND month = 9) AND day = 1)",
partitionFilterGenerator.generatePushDownFilter(writtenPartitions, partitionFieldSchemas, config));
writtenPartitions.add("2022-09-02/2022/9/2");
assertEquals(
"((((date = 2022-09-01 AND year = \"2022\") AND month = 9) AND day = 1) OR (((date = 2022-09-02 AND year = \"2022\") AND month = 9) AND day = 2))",
partitionFilterGenerator.generatePushDownFilter(writtenPartitions, partitionFieldSchemas, config));
// If there are incompatible types to convert as filters inside partition
partitionFieldSchemas.clear();
writtenPartitions.clear();
partitionFieldSchemas.add(new FieldSchema("date", "date"));
partitionFieldSchemas.add(new FieldSchema("finished", "boolean"));
writtenPartitions.add("2022-09-01/true");
assertEquals("date = 2022-09-01",
partitionFilterGenerator.generatePushDownFilter(writtenPartitions, partitionFieldSchemas, config));
writtenPartitions.add("2022-09-02/true");
assertEquals("(date = 2022-09-01 OR date = 2022-09-02)",
partitionFilterGenerator.generatePushDownFilter(writtenPartitions, partitionFieldSchemas, config));
// If no compatible types matched to convert as filters
partitionFieldSchemas.clear();
writtenPartitions.clear();
partitionFieldSchemas.add(new FieldSchema("finished", "boolean"));
writtenPartitions.add("true");
assertEquals("",
partitionFilterGenerator.generatePushDownFilter(writtenPartitions, partitionFieldSchemas, config));
writtenPartitions.add("false");
assertEquals("",
partitionFilterGenerator.generatePushDownFilter(writtenPartitions, partitionFieldSchemas, config));
}
|
@Udf
public <T extends Comparable<? super T>> T arrayMin(@UdfParameter(
description = "Array of values from which to find the minimum") final List<T> input) {
if (input == null) {
return null;
}
T candidate = null;
for (T thisVal : input) {
if (thisVal != null) {
if (candidate == null) {
candidate = thisVal;
} else if (thisVal.compareTo(candidate) < 0) {
candidate = thisVal;
}
}
}
return candidate;
}
|
@Test
public void shouldFindBigIntMin() {
final List<Long> input = Arrays.asList(1L, 3L, -2L);
assertThat(udf.arrayMin(input), is(Long.valueOf(-2)));
}
|
@ShellMethod(key = "show rollback", value = "Show details of a rollback instant")
public String showRollback(
@ShellOption(value = {"--instant"}, help = "Rollback instant") String rollbackInstant,
@ShellOption(value = {"--limit"}, help = "Limit #rows to be displayed", defaultValue = "10") Integer limit,
@ShellOption(value = {"--sortBy"}, help = "Sorting Field", defaultValue = "") final String sortByField,
@ShellOption(value = {"--desc"}, help = "Ordering", defaultValue = "false") final boolean descending,
@ShellOption(value = {"--headeronly"}, help = "Print Header Only",
defaultValue = "false") final boolean headerOnly)
throws IOException {
HoodieActiveTimeline activeTimeline = HoodieCLI.getTableMetaClient().getActiveTimeline();
final List<Comparable[]> rows = new ArrayList<>();
HoodieRollbackMetadata metadata = TimelineMetadataUtils.deserializeAvroMetadata(
activeTimeline.getInstantDetails(new HoodieInstant(State.COMPLETED, ROLLBACK_ACTION, rollbackInstant)).get(),
HoodieRollbackMetadata.class);
metadata.getPartitionMetadata().forEach((key, value) -> Stream
.concat(value.getSuccessDeleteFiles().stream().map(f -> Pair.of(f, true)),
value.getFailedDeleteFiles().stream().map(f -> Pair.of(f, false)))
.forEach(fileWithDeleteStatus -> {
Comparable[] row = new Comparable[5];
row[0] = metadata.getStartRollbackTime();
row[1] = metadata.getCommitsRollback().toString();
row[2] = key;
row[3] = fileWithDeleteStatus.getLeft();
row[4] = fileWithDeleteStatus.getRight();
rows.add(row);
}));
TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_INSTANT)
.addTableHeaderField(HoodieTableHeaderFields.HEADER_ROLLBACK_INSTANT)
.addTableHeaderField(HoodieTableHeaderFields.HEADER_PARTITION)
.addTableHeaderField(HoodieTableHeaderFields.HEADER_DELETED_FILE)
.addTableHeaderField(HoodieTableHeaderFields.HEADER_SUCCEEDED);
return HoodiePrintHelper.print(header, new HashMap<>(), sortByField, descending, limit, headerOnly, rows);
}
|
@Test
public void testShowRollback() throws IOException {
// get instant
HoodieActiveTimeline activeTimeline = HoodieCLI.getTableMetaClient().getActiveTimeline();
Stream<HoodieInstant> rollback = activeTimeline.getRollbackTimeline().filterCompletedInstants().getInstantsAsStream();
HoodieInstant instant = rollback.findFirst().orElse(null);
assertNotNull(instant, "The instant can not be null.");
Object result = shell.evaluate(() -> "show rollback --instant " + instant.getTimestamp());
assertTrue(ShellEvaluationResultUtil.isSuccess(result));
List<Comparable[]> rows = new ArrayList<>();
// get metadata of instant
HoodieRollbackMetadata metadata = TimelineMetadataUtils.deserializeAvroMetadata(
activeTimeline.getInstantDetails(instant).get(), HoodieRollbackMetadata.class);
// generate expect result
metadata.getPartitionMetadata().forEach((key, value) -> Stream
.concat(value.getSuccessDeleteFiles().stream().map(f -> Pair.of(f, true)),
value.getFailedDeleteFiles().stream().map(f -> Pair.of(f, false)))
.forEach(fileWithDeleteStatus -> {
Comparable[] row = new Comparable[5];
row[0] = metadata.getStartRollbackTime();
row[1] = metadata.getCommitsRollback().toString();
row[2] = key;
row[3] = fileWithDeleteStatus.getLeft();
row[4] = fileWithDeleteStatus.getRight();
rows.add(row);
}));
TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_INSTANT)
.addTableHeaderField(HoodieTableHeaderFields.HEADER_ROLLBACK_INSTANT)
.addTableHeaderField(HoodieTableHeaderFields.HEADER_PARTITION)
.addTableHeaderField(HoodieTableHeaderFields.HEADER_DELETED_FILE)
.addTableHeaderField(HoodieTableHeaderFields.HEADER_SUCCEEDED);
String expected = HoodiePrintHelper.print(header, new HashMap<>(), "", false, -1, false, rows);
expected = removeNonWordAndStripSpace(expected);
String got = removeNonWordAndStripSpace(result.toString());
assertEquals(expected, got);
}
|
@Override
public List<String> listTableNames(String dbName) {
try {
return new ArrayList<>(tableNameCache.get(dbName));
} catch (ExecutionException e) {
LOG.error("listTableNames error", e);
return Collections.emptyList();
}
}
|
@Test
public void testListTableNames() {
List<String> project = odpsMetadata.listTableNames("project");
Assert.assertEquals(Collections.singletonList("tableName"), project);
}
|
public static void downloadFromHttpUrl(String destPkgUrl, File targetFile) throws IOException {
final URL url = new URL(destPkgUrl);
final URLConnection connection = url.openConnection();
if (StringUtils.isNotEmpty(url.getUserInfo())) {
final AuthenticationDataBasic authBasic = new AuthenticationDataBasic(url.getUserInfo());
for (Map.Entry<String, String> header : authBasic.getHttpHeaders()) {
connection.setRequestProperty(header.getKey(), header.getValue());
}
}
try (InputStream in = connection.getInputStream()) {
log.info("Downloading function package from {} to {} ...", destPkgUrl, targetFile.getAbsoluteFile());
Files.copy(in, targetFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
}
log.info("Downloading function package from {} to {} completed!", destPkgUrl, targetFile.getAbsoluteFile());
}
|
@Test
public void testDownloadFile() throws Exception {
final String jarHttpUrl = "https://repo1.maven.org/maven2/org/apache/pulsar/pulsar-common/2.4.2/pulsar-common-2.4.2.jar";
final File file = Files.newTemporaryFile();
file.deleteOnExit();
assertThat(file.length()).isZero();
FunctionCommon.downloadFromHttpUrl(jarHttpUrl, file);
assertThat(file.length()).isGreaterThan(0);
}
|
@VisibleForTesting
@SuppressWarnings("nullness") // ok to have nullable elements on stream
static String renderName(String prefix, MetricResult<?> metricResult) {
MetricKey key = metricResult.getKey();
MetricName name = key.metricName();
String step = key.stepName();
return Streams.concat(
Stream.of(prefix),
Stream.of(stripSuffix(normalizePart(step))),
Stream.of(name.getNamespace(), name.getName()).map(SparkBeamMetric::normalizePart))
.filter(not(Strings::isNullOrEmpty))
.collect(Collectors.joining("."));
}
|
@Test
public void testRenderName() {
MetricResult<Object> metricResult =
MetricResult.create(
MetricKey.create(
"myStep.one.two(three)", MetricName.named("myNameSpace//", "myName()")),
123,
456);
String renderedName = SparkBeamMetric.renderName("", metricResult);
assertThat(
"Metric name was not rendered correctly",
renderedName,
equalTo("myStep_one_two_three.myNameSpace__.myName__"));
}
|
public Schema mergeTables(
Map<FeatureOption, MergingStrategy> mergingStrategies,
Schema sourceSchema,
List<SqlNode> derivedColumns,
List<SqlWatermark> derivedWatermarkSpecs,
SqlTableConstraint derivedPrimaryKey) {
SchemaBuilder schemaBuilder =
new SchemaBuilder(
mergingStrategies,
sourceSchema,
(FlinkTypeFactory) validator.getTypeFactory(),
dataTypeFactory,
validator,
escapeExpression);
schemaBuilder.appendDerivedColumns(mergingStrategies, derivedColumns);
schemaBuilder.appendDerivedWatermarks(mergingStrategies, derivedWatermarkSpecs);
schemaBuilder.appendDerivedPrimaryKey(derivedPrimaryKey);
return schemaBuilder.build();
}
|
@Test
void mergeWithIncludeFailsOnDuplicateRegularColumnAndComputeColumn() {
Schema sourceSchema = Schema.newBuilder().column("one", DataTypes.INT()).build();
List<SqlNode> derivedColumns =
Arrays.asList(
regularColumn("two", DataTypes.INT()),
computedColumn("three", plus("two", "3")),
regularColumn("three", DataTypes.INT()),
regularColumn("four", DataTypes.STRING()));
assertThatThrownBy(
() ->
util.mergeTables(
getDefaultMergingStrategies(),
sourceSchema,
derivedColumns,
Collections.emptyList(),
null))
.isInstanceOf(ValidationException.class)
.hasMessage(
"A column named 'three' already exists in the table. Duplicate columns "
+ "exist in the compute column and regular column. ");
}
|
public void migrate(Connection connection) throws SQLException {
try {
log.info("Upgrading database, this might take a while depending on the size of the database.");
List<String> messages = List.of(
repeat("*", "", 72),
"WARNING: Shutting down your server at this point will lead to a database corruption. Please wait until the database upgrade completes.",
repeat("*", "", 72)
);
for (String message : messages) {
System.err.println(message);
log.info(message);
}
disableLiquibaseConsoleLogging();
Database database = DatabaseFactory.getInstance().findCorrectDatabaseImplementation(new JdbcConnection(connection));
newLiquibaseFor(database).update();
System.err.println("INFO: Database upgrade completed successfully.");
log.info("Database upgrade completed successfully.");
DataMigrationRunner.run(connection);
} catch (LockException e) {
String message = "Unable to migrate the database, as it is currently locked. A previous GoCD start-up may have been interrupted during migration, and you may need to " +
"1) validate no GoCD instances are running, " +
"2) check the DB health looks OK, " +
"3) unlock by connecting directly to the database and running the command noted at https://docs.liquibase.com/concepts/tracking-tables/databasechangeloglock-table.html, " +
"4) restarting GoCD.";
log.error("{} The problem was: [{}] cause: [{}]", message, ExceptionUtils.getMessage(e), ExceptionUtils.getRootCauseMessage(e), e);
throw new SQLException(message, e);
} catch (LiquibaseException e) {
String message = "Unable to migrate the database.";
log.error("{} The problem was: [{}] cause: [{}]", message, ExceptionUtils.getMessage(e), ExceptionUtils.getRootCauseMessage(e), e);
throw new SQLException(message, e);
}
}
|
@Test
public void shouldRunMigrationOnRequestedConnection() throws Exception {
try (MockedStatic<DataMigrationRunner> migration = mockStatic(DataMigrationRunner.class); Connection connection = dummyH2Connection()) {
migrator.migrate(connection);
verify(liquibase).update();
migration.verify(() -> DataMigrationRunner.run(connection));
}
}
|
@Override
public Node upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener,
final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final ThreadPool pool = ThreadPoolFactory.get("multipart", concurrency);
try {
final InputStream in;
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(file))) {
in = new SDSTripleCryptEncryptorFeature(session, nodeid).encrypt(file, local.getInputStream(), status);
}
else {
in = local.getInputStream();
}
final CreateFileUploadRequest createFileUploadRequest = new CreateFileUploadRequest()
.directS3Upload(true)
.timestampModification(status.getModified() != null ? new DateTime(status.getModified()) : null)
.timestampCreation(status.getCreated() != null ? new DateTime(status.getCreated()) : null)
.size(TransferStatus.UNKNOWN_LENGTH == status.getLength() ? null : status.getLength())
.parentId(Long.parseLong(nodeid.getVersionId(file.getParent())))
.name(file.getName());
final CreateFileUploadResponse createFileUploadResponse = new NodesApi(session.getClient())
.createFileUploadChannel(createFileUploadRequest, StringUtils.EMPTY);
if(log.isDebugEnabled()) {
log.debug(String.format("upload started for %s with response %s", file, createFileUploadResponse));
}
final Map<Integer, TransferStatus> etags = new HashMap<>();
final List<PresignedUrl> presignedUrls = this.retrievePresignedUrls(createFileUploadResponse, status);
final List<Future<TransferStatus>> parts = new ArrayList<>();
try {
final String random = new UUIDRandomStringService().random();
// Full size of file
final long size = status.getLength() + status.getOffset();
long offset = 0;
long remaining = status.getLength();
for(int partNumber = 1; remaining >= 0; partNumber++) {
final long length = Math.min(Math.max((size / (MAXIMUM_UPLOAD_PARTS - 1)), partsize), remaining);
final PresignedUrl presignedUrl = presignedUrls.get(partNumber - 1);
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(file))) {
final Local temporary = temp.create(String.format("%s-%d", random, partNumber));
if(log.isDebugEnabled()) {
log.debug(String.format("Encrypted contents for part %d to %s", partNumber, temporary));
}
final FileBuffer buffer = new FileBuffer(temporary);
new StreamCopier(status, StreamProgress.noop).withAutoclose(false).withLimit(length)
.transfer(in, new BufferOutputStream(buffer));
parts.add(this.submit(pool, file, temporary, buffer, throttle, listener, status,
presignedUrl.getUrl(), presignedUrl.getPartNumber(), 0L, length, callback));
}
else {
parts.add(this.submit(pool, file, local, Buffer.noop, throttle, listener, status,
presignedUrl.getUrl(), presignedUrl.getPartNumber(), offset, length, callback));
}
remaining -= length;
offset += length;
if(0L == remaining) {
break;
}
}
}
finally {
in.close();
}
Interruptibles.awaitAll(parts)
.forEach(part -> etags.put(part.getPart(), part));
final CompleteS3FileUploadRequest completeS3FileUploadRequest = new CompleteS3FileUploadRequest()
.keepShareLinks(new HostPreferences(session.getHost()).getBoolean("sds.upload.sharelinks.keep"))
.resolutionStrategy(CompleteS3FileUploadRequest.ResolutionStrategyEnum.OVERWRITE);
if(status.getFilekey() != null) {
final ObjectReader reader = session.getClient().getJSON().getContext(null).readerFor(FileKey.class);
final FileKey fileKey = reader.readValue(status.getFilekey().array());
final EncryptedFileKey encryptFileKey = Crypto.encryptFileKey(
TripleCryptConverter.toCryptoPlainFileKey(fileKey),
TripleCryptConverter.toCryptoUserPublicKey(session.keyPair().getPublicKeyContainer())
);
completeS3FileUploadRequest.setFileKey(TripleCryptConverter.toSwaggerFileKey(encryptFileKey));
}
etags.forEach((key, value) -> completeS3FileUploadRequest.addPartsItem(
new S3FileUploadPart().partEtag(value.getChecksum().hash).partNumber(key)));
if(log.isDebugEnabled()) {
log.debug(String.format("Complete file upload with %s for %s", completeS3FileUploadRequest, file));
}
new NodesApi(session.getClient()).completeS3FileUpload(completeS3FileUploadRequest, createFileUploadResponse.getUploadId(), StringUtils.EMPTY);
// Polling
return new SDSUploadService(session, nodeid).await(file, status, createFileUploadResponse.getUploadId()).getNode();
}
catch(CryptoSystemException | InvalidFileKeyException | InvalidKeyPairException | UnknownVersionException e) {
throw new TripleCryptExceptionMappingService().map("Upload {0} failed", e, file);
}
catch(ApiException e) {
throw new SDSExceptionMappingService(nodeid).map("Upload {0} failed", e, file);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file);
}
finally {
temp.shutdown();
// Cancel future tasks
pool.shutdown(false);
}
}
|
@Test
public void testUploadMultipleParts() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final SDSDirectS3UploadFeature feature = new SDSDirectS3UploadFeature(session, nodeid, new SDSDelegatingWriteFeature(session, nodeid, new SDSDirectS3WriteFeature(session, nodeid)));
final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path test = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final Local local = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString());
final byte[] random = RandomUtils.nextBytes(21 * 1024 * 1024);
final OutputStream out = local.getOutputStream(false);
IOUtils.write(random, out);
out.close();
final TransferStatus status = new TransferStatus();
status.setLength(random.length);
final Node node = feature.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED),
new DisabledStreamListener(), status, new DisabledLoginCallback());
assertTrue(status.isComplete());
assertNotSame(PathAttributes.EMPTY, status.getResponse());
assertTrue(new SDSFindFeature(session, nodeid).find(test));
final PathAttributes attributes = new SDSAttributesFinderFeature(session, nodeid).find(test);
assertEquals(random.length, attributes.getSize());
assertEquals(new SDSAttributesAdapter(session).toAttributes(node), attributes);
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
local.delete();
}
|
@SuppressWarnings({"BooleanExpressionComplexity", "CyclomaticComplexity"})
public static boolean isScalablePushQuery(
final Statement statement,
final KsqlExecutionContext ksqlEngine,
final KsqlConfig ksqlConfig,
final Map<String, Object> overrides
) {
if (!isPushV2Enabled(ksqlConfig, overrides)) {
return false;
}
if (! (statement instanceof Query)) {
return false;
}
final Query query = (Query) statement;
final SourceFinder sourceFinder = new SourceFinder();
sourceFinder.process(query.getFrom(), null);
// It will be present if it's not a join, which we don't handle
if (!sourceFinder.getSourceName().isPresent()) {
return false;
}
// Find all of the writers to this particular source.
final SourceName sourceName = sourceFinder.getSourceName().get();
final Set<QueryId> upstreamQueries = ksqlEngine.getQueriesWithSink(sourceName);
// See if the config or override have set the stream to be "latest"
final boolean isLatest = isLatest(ksqlConfig, overrides);
// Cannot be a pull query, i.e. must be a push
return !query.isPullQuery()
// Group by is not supported
&& !query.getGroupBy().isPresent()
// Windowing is not supported
&& !query.getWindow().isPresent()
// Having clause is not supported
&& !query.getHaving().isPresent()
// Partition by is not supported
&& !query.getPartitionBy().isPresent()
// There must be an EMIT CHANGES clause
&& (query.getRefinement().isPresent()
&& query.getRefinement().get().getOutputRefinement() == OutputRefinement.CHANGES)
// Must be reading from "latest"
&& isLatest
// We only handle a single sink source at the moment from a CTAS/CSAS
&& upstreamQueries.size() == 1
// ROWPARTITION and ROWOFFSET are not currently supported in SPQs
&& !containsDisallowedColumns(query);
}
|
@Test
public void isScalablePushQuery_true_configLatest() {
try(MockedStatic<ColumnExtractor> columnExtractor = mockStatic(ColumnExtractor.class)) {
// When:
expectIsSPQ(ColumnName.of("foo"), columnExtractor);
when(ksqlConfig.getKsqlStreamConfigProp(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG))
.thenReturn(Optional.of("latest"));
// Then:
assertThat(ScalablePushUtil.isScalablePushQuery(query, ksqlEngine, ksqlConfig,
ImmutableMap.of()),
equalTo(true));
}
}
|
@Override
public final Object getValue(final int columnIndex, final Class<?> type) throws SQLException {
ShardingSpherePreconditions.checkNotContains(INVALID_MEMORY_TYPES, type, () -> new SQLFeatureNotSupportedException(String.format("Get value from `%s`", type.getName())));
Object result = currentResultSetRow.getCell(columnIndex);
wasNull = null == result;
return result;
}
|
@Test
void assertGetValue() throws SQLException {
when(memoryResultSetRow.getCell(1)).thenReturn("1");
assertThat(memoryMergedResult.getValue(1, Object.class).toString(), is("1"));
}
|
@Override
public Query normalizeQuery(final Query query, final ParameterProvider parameterProvider) {
return query.toBuilder()
.query(ElasticsearchQueryString.of(this.queryStringDecorators.decorate(query.query().queryString(), parameterProvider, query)))
.filter(normalizeFilter(query.filter(), query, parameterProvider))
.searchTypes(query.searchTypes().stream().map(searchType -> normalizeSearchType(searchType, query, parameterProvider)).collect(Collectors.toSet()))
.build();
}
|
@Test
void decoratesSearchTypes() {
final Query query = Query.builder()
.searchTypes(
Collections.singleton(MessageList.builder()
.query(ElasticsearchQueryString.of("action:index"))
.build())
)
.build();
final Query normalizedQuery = decorateQueryStringsNormalizer.normalizeQuery(query, name -> Optional.empty());
assertThat(normalizedQuery.searchTypes())
.hasSize(1)
.first()
.extracting(searchType -> searchType.query().orElseThrow(IllegalStateException::new))
.matches(q -> q instanceof BackendQuery && ((BackendQuery) q).queryString().equals("Hey there!"));
}
|
@Udf(description = "Returns first substring of the input that matches the given regex pattern")
public String regexpExtract(
@UdfParameter(description = "The regex pattern") final String pattern,
@UdfParameter(description = "The input string to apply regex on") final String input
) {
return regexpExtract(pattern, input, 0);
}
|
@Test
public void shouldReturnNullOnNullValue() {
assertNull(udf.regexpExtract(null, null));
assertNull(udf.regexpExtract(null, null, null));
assertNull(udf.regexpExtract(null, "", 1));
assertNull(udf.regexpExtract("some string", null, 1));
assertNull(udf.regexpExtract("some string", "", null));
}
|
@Override
public synchronized boolean tryReturnRecordAt(
boolean isAtSplitPoint, @Nullable ShufflePosition groupStart) {
if (lastGroupStart == null && !isAtSplitPoint) {
throw new IllegalStateException(
String.format("The first group [at %s] must be at a split point", groupStart.toString()));
}
if (this.startPosition != null && groupStart.compareTo(this.startPosition) < 0) {
throw new IllegalStateException(
String.format(
"Trying to return record at %s which is before the starting position at %s",
groupStart, this.startPosition));
}
int comparedToLast = (lastGroupStart == null) ? 1 : groupStart.compareTo(this.lastGroupStart);
if (comparedToLast < 0) {
throw new IllegalStateException(
String.format(
"Trying to return group at %s which is before the last-returned group at %s",
groupStart, this.lastGroupStart));
}
if (isAtSplitPoint) {
splitPointsSeen++;
if (comparedToLast == 0) {
throw new IllegalStateException(
String.format(
"Trying to return a group at a split point with same position as the "
+ "previous group: both at %s, last group was %s",
groupStart,
lastGroupWasAtSplitPoint ? "at a split point." : "not at a split point."));
}
if (stopPosition != null && groupStart.compareTo(stopPosition) >= 0) {
return false;
}
} else {
checkState(
comparedToLast == 0,
// This case is not a violation of general RangeTracker semantics, but it is
// contrary to how GroupingShuffleReader in particular works. Hitting it would
// mean it's behaving unexpectedly.
"Trying to return a group not at a split point, but with a different position "
+ "than the previous group: last group was %s at %s, current at %s",
lastGroupWasAtSplitPoint ? "a split point" : "a non-split point",
lastGroupStart,
groupStart);
}
this.lastGroupStart = groupStart;
this.lastGroupWasAtSplitPoint = isAtSplitPoint;
return true;
}
|
@Test
public void testNonSplitPointRecordWithDifferentPosition() throws Exception {
GroupingShuffleRangeTracker tracker =
new GroupingShuffleRangeTracker(ofBytes(3, 0, 0), ofBytes(5, 0, 0));
tracker.tryReturnRecordAt(true, ofBytes(3, 4, 5));
expected.expect(IllegalStateException.class);
tracker.tryReturnRecordAt(false, ofBytes(3, 4, 6));
}
|
@Config("session-property-manager.config-file")
public FileSessionPropertyManagerConfig setConfigFile(File configFile)
{
this.configFile = configFile;
return this;
}
|
@Test
public void testDefaults()
{
assertRecordedDefaults(recordDefaults(FileSessionPropertyManagerConfig.class)
.setConfigFile(null));
}
|
@Override
public void accept(Props props) {
if (isClusterEnabled(props)) {
checkClusterProperties(props);
}
}
|
@Test
@UseDataProvider("validIPv4andIPv6Addresses")
public void accept_does_not_verify_h2_on_search_node(String host) {
mockValidHost(host);
mockLocalNonLoopback(host);
TestAppSettings settings = newSettingsForSearchNode(host, of("sonar.jdbc.url", "jdbc:h2:mem"));
// do not fail
new ClusterSettings(network).accept(settings.getProps());
}
|
public static GenericData get() {
return INSTANCE;
}
|
@Test
void arraySet() {
Schema schema = Schema.createArray(Schema.create(Schema.Type.INT));
GenericArray<Integer> array = new GenericData.Array<>(10, schema);
array.clear();
for (int i = 0; i < 10; ++i)
array.add(i);
assertEquals(10, array.size());
assertEquals(Integer.valueOf(0), array.get(0));
assertEquals(Integer.valueOf(5), array.get(5));
assertEquals(Integer.valueOf(5), array.set(5, 55));
assertEquals(10, array.size());
assertEquals(Integer.valueOf(55), array.get(5));
}
|
@Override
public KeyValueIterator<Windowed<K>, V> backwardFetch(final K key) {
Objects.requireNonNull(key, "key cannot be null");
return new MeteredWindowedKeyValueIterator<>(
wrapped().backwardFetch(keyBytes(key)),
fetchSensor,
iteratorDurationSensor,
streamsMetrics,
serdes::keyFrom,
serdes::valueFrom,
time,
numOpenIterators,
openIterators
);
}
|
@Test
public void shouldThrowNullPointerOnBackwardFetchIfToIsNull() {
setUpWithoutContext();
assertThrows(NullPointerException.class, () -> store.backwardFetch("from", null));
}
|
public static String compareMd5ResultString(List<String> changedGroupKeys) throws IOException {
if (null == changedGroupKeys) {
return "";
}
StringBuilder sb = new StringBuilder();
for (String groupKey : changedGroupKeys) {
String[] dataIdGroupId = GroupKey.parseKey(groupKey);
sb.append(dataIdGroupId[0]);
sb.append(Constants.WORD_SEPARATOR);
sb.append(dataIdGroupId[1]);
// if have tenant, then set it
boolean b = (dataIdGroupId.length == DATA_ID_GROUP_ID_THREE_LEN
|| dataIdGroupId.length == DATA_ID_GROUP_ID_FOUR_LEN)
&& StringUtil.isNotBlank(dataIdGroupId[2]);
if (b) {
sb.append(Constants.WORD_SEPARATOR);
sb.append(dataIdGroupId[2]);
}
sb.append(Constants.LINE_SEPARATOR);
}
// To encode WORD_SEPARATOR and LINE_SEPARATOR invisible characters, encoded value is %02 and %01
return URLEncoder.encode(sb.toString(), "UTF-8");
}
|
@Test
public void assetCompareMd5ResultString() throws IOException {
Assert.isTrue("".equals(Md5Util.compareMd5ResultString(null)));
String result = "prescription%02dynamic-threadpool-example%02message-consume%01" +
"prescription%02dynamic-threadpool-example%02message-produce%01";
List<String> changedGroupKeys = new ArrayList<>(2);
changedGroupKeys.add("prescription+dynamic-threadpool-example+message-consume+12");
changedGroupKeys.add("prescription+dynamic-threadpool-example+message-produce+11");
Assert.isTrue(result.equals(Md5Util.compareMd5ResultString(changedGroupKeys)));
}
|
public static String[] splitToSteps(String path, boolean preserveRootAsStep) {
if (path == null) {
return null;
}
if (preserveRootAsStep && path.equals(SHARE_ROOT)) {
return new String[] { SHARE_ROOT };
}
var includeRoot = preserveRootAsStep && path.startsWith(SHARE_ROOT);
if (!includeRoot) {
path = ensureRelative(path);
}
// no ambiguity such as "/|\\\\"
var pathSteps = path.split("" + PATH_SEPARATOR);
if (includeRoot) {
pathSteps[0] = SHARE_ROOT; // replace leading ""
}
return pathSteps;
}
|
@Test
void splitAbsoluteWithoutPreservingRootShouldReturnStepsOnly() {
assertArrayEquals(new String[] { "1", "2" }, FilesPath.splitToSteps("/1/2", false));
}
|
public static String formatExpression(final Expression expression) {
return formatExpression(expression, FormatOptions.of(s -> false));
}
|
@Test
public void shouldFormatSearchedCaseExpression() {
final SearchedCaseExpression expression = new SearchedCaseExpression(
Collections.singletonList(
new WhenClause(new StringLiteral("foo"),
new LongLiteral(1))),
Optional.empty());
assertThat(ExpressionFormatter.formatExpression(expression), equalTo("(CASE WHEN 'foo' THEN 1 END)"));
}
|
public static URI parse(String gluePath) {
requireNonNull(gluePath, "gluePath may not be null");
if (gluePath.isEmpty()) {
return rootPackageUri();
}
// Legacy from the Cucumber Eclipse plugin
// Older versions of Cucumber allowed it.
if (CLASSPATH_SCHEME_PREFIX.equals(gluePath)) {
return rootPackageUri();
}
if (nonStandardPathSeparatorInUse(gluePath)) {
String standardized = replaceNonStandardPathSeparator(gluePath);
return parseAssumeClasspathScheme(standardized);
}
if (isProbablyPackage(gluePath)) {
String path = resourceNameOfPackageName(gluePath);
return parseAssumeClasspathScheme(path);
}
return parseAssumeClasspathScheme(gluePath);
}
|
@Test
void can_parse_absolute_path_form() {
URI uri = GluePath.parse("/com/example/app");
assertAll(
() -> assertThat(uri.getScheme(), is("classpath")),
() -> assertThat(uri.getSchemeSpecificPart(), is("/com/example/app")));
}
|
@Override
public void metricChange(final KafkaMetric metric) {
if (!THROUGHPUT_METRIC_NAMES.contains(metric.metricName().name())
|| !StreamsMetricsImpl.TOPIC_LEVEL_GROUP.equals(metric.metricName().group())) {
return;
}
addMetric(
metric,
getQueryId(metric),
getTopic(metric)
);
}
|
@Test
public void shouldAggregateMetricsByQueryIdInSharedRuntimes() {
// Given:
final Map<String, String> sharedRuntimeQueryTags = ImmutableMap.of(
"logical_cluster_id", "lksqlc-12345",
"query-id", "CTAS_TEST_5",
"member", "_confluent_blahblah_query-1-blahblah",
"topic", TOPIC_NAME
);
listener.metricChange(mockMetric(
BYTES_CONSUMED_TOTAL,
2D,
ImmutableMap.of(
"thread-id", "_confluent_blahblah_query-1-blahblah",
"task-id", "CTAS_TEST_5__" + TASK_ID_1,
"processor-node-id", PROCESSOR_NODE_ID,
"topic", TOPIC_NAME))
);
Measurable bytesConsumed = verifyAndGetMetric(BYTES_CONSUMED_TOTAL, sharedRuntimeQueryTags);
Object bytesConsumedValue =
bytesConsumed.measure(new MetricConfig().tags(sharedRuntimeQueryTags), 0L);
assertThat(bytesConsumedValue, equalTo(2D));
// When:
listener.metricChange(mockMetric(
BYTES_CONSUMED_TOTAL,
15D,
ImmutableMap.of(
"thread-id", "_confluent_blahblah_query-1-blahblah",
"task-id", "CTAS_TEST_5__" + TASK_ID_2,
"processor-node-id", PROCESSOR_NODE_ID,
"topic", TOPIC_NAME
))
);
// Then:
bytesConsumed = verifyAndGetMetric(BYTES_CONSUMED_TOTAL, sharedRuntimeQueryTags);
bytesConsumedValue = bytesConsumed.measure(new MetricConfig().tags(sharedRuntimeQueryTags), 0L);
assertThat(bytesConsumedValue, equalTo(17D));
}
|
@Override
public boolean isEmpty() {
return size() == 0;
}
|
@Test
public void isEmpty_whenEmpty() {
assertTrue(queue.isEmpty());
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.