focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public ObjectNode encode(Criterion criterion, CodecContext context) {
EncodeCriterionCodecHelper encoder = new EncodeCriterionCodecHelper(criterion, context);
return encoder.encode();
}
|
@Test
public void matchMetadataTest() {
Criterion criterion = Criteria.matchMetadata(0xabcdL);
ObjectNode result = criterionCodec.encode(criterion, context);
assertThat(result, matchesCriterion(criterion));
}
|
public synchronized void recycleTable(long dbId, Table table, boolean recoverable) {
if (idToTableInfo.row(dbId).containsKey(table.getId())) {
LOG.error("table[{}-{}] already in recycle bin.", table.getId(), table.getName());
return;
}
String tableName = table.getName();
RecycleTableInfo newTableInfo = new RecycleTableInfo(dbId, table, recoverable);
RecycleTableInfo oldTableInfo = nameToTableInfo.get(dbId, tableName);
// There's already a table with the same name in the recycle bin.
if (oldTableInfo != null) {
// Our serialization rules require `idToTableInfo` and `nameToTableInfo` to be the same size, so
// we need to make sure that tables with different ids have different names (in the recycle bin), so
// here we rename the previous table's name.
Table oldTable = oldTableInfo.getTable();
oldTable.setName(uniqueIllegalTableName(tableName, oldTable.getId()));
// Mark the renamed table as unrecoverable, as if it had been force dropped, to maintain a similar
// behavior as older versions.
oldTableInfo.setRecoverable(false);
nameToTableInfo.remove(dbId, table.getName());
nameToTableInfo.put(dbId, oldTable.getName(), oldTableInfo);
// Speed up the deletion of this renamed table by modifying its recycle time to zero
idToRecycleTime.put(oldTable.getId(), 0L);
}
// If the table was force dropped, set recycle time to zero so that this table will be deleted immediately
// in the next cleanup round.
idToRecycleTime.put(table.getId(), !recoverable ? 0 : System.currentTimeMillis());
idToTableInfo.put(dbId, table.getId(), newTableInfo);
nameToTableInfo.put(dbId, table.getName(), newTableInfo);
LOG.info("Finished put table '{}' to recycle bin. tableId: {}", table.getName(), table.getId());
}
|
@Test
public void testRecycleTable(@Mocked GlobalStateMgr globalStateMgr, @Mocked EditLog editLog) {
Table table1 = new Table(111, "uno", Table.TableType.VIEW, null);
Table table2SameName = new Table(22, "dos", Table.TableType.VIEW, null);
Table table2 = new Table(222, "dos", Table.TableType.VIEW, null);
new Expectations() {
{
GlobalStateMgr.getCurrentState();
minTimes = 0;
result = globalStateMgr;
}
};
new Expectations() {
{
globalStateMgr.getEditLog();
minTimes = 0;
result = editLog;
}
};
new Expectations() {
{
editLog.logEraseMultiTables((List<Long>) any);
minTimes = 0;
result = null;
}
};
// 1. add 2 tables
long dbId = 1;
CatalogRecycleBin recycleBin = new CatalogRecycleBin();
recycleBin.recycleTable(dbId, table1, true);
recycleBin.recycleTable(dbId, table2SameName, true);
recycleBin.recycleTable(dbId, table2, true);
Assert.assertEquals(recycleBin.getTables(dbId), Arrays.asList(table1, table2SameName, table2));
Assert.assertSame(recycleBin.getTable(dbId, table1.getId()), table1);
Assert.assertSame(recycleBin.getTable(dbId, table2.getId()), table2);
Assert.assertTrue(recycleBin.idToRecycleTime.containsKey(table1.getId()));
Assert.assertTrue(recycleBin.idToRecycleTime.containsKey(table2.getId()));
// 2. manually set table expire time & recycle table1
Config.catalog_trash_expire_second = 3600;
long now = System.currentTimeMillis();
long expireFromNow = now - 3600 * 1000L;
recycleBin.idToRecycleTime.put(table1.getId(), expireFromNow - 1000);
recycleBin.eraseTable(now);
Assert.assertEquals(recycleBin.getTables(dbId), List.of(table2));
Assert.assertNull(recycleBin.getTable(dbId, table1.getId()));
Assert.assertSame(recycleBin.getTable(dbId, table2.getId()), table2);
// 3. set recyle later, check if recycle now
CatalogRecycleBin.LATE_RECYCLE_INTERVAL_SECONDS = 10;
Assert.assertFalse(recycleBin.ensureEraseLater(table1.getId(), now)); // already erased
Assert.assertTrue(recycleBin.ensureEraseLater(table2.getId(), now));
Assert.assertEquals(0, recycleBin.enableEraseLater.size());
recycleBin.idToRecycleTime.put(table2.getId(), expireFromNow + 1000);
Assert.assertTrue(recycleBin.ensureEraseLater(table2.getId(), now));
Assert.assertEquals(1, recycleBin.enableEraseLater.size());
Assert.assertTrue(recycleBin.enableEraseLater.contains(table2.getId()));
// 4. won't erase on expire time
recycleBin.idToRecycleTime.put(table2.getId(), expireFromNow - 1000);
recycleBin.eraseTable(now);
Assert.assertEquals(recycleBin.getTable(dbId, table2.getId()), table2);
Assert.assertEquals(1, recycleBin.idToRecycleTime.size());
// 5. will erase after expire time + latency time
recycleBin.idToRecycleTime.put(table2.getId(), expireFromNow - 11000);
Assert.assertFalse(recycleBin.ensureEraseLater(table2.getId(), now));
recycleBin.eraseTable(now);
Assert.assertNull(recycleBin.getTable(dbId, table2.getId()));
Assert.assertEquals(0, recycleBin.idToRecycleTime.size());
Assert.assertEquals(0, recycleBin.enableEraseLater.size());
}
|
@Override
public KeyValueSegment getOrCreateSegmentIfLive(final long segmentId,
final ProcessorContext context,
final long streamTime) {
final KeyValueSegment segment = super.getOrCreateSegmentIfLive(segmentId, context, streamTime);
cleanupExpiredSegments(streamTime);
return segment;
}
|
@Test
public void shouldGetSegmentForTimestamp() {
final KeyValueSegment segment = segments.getOrCreateSegmentIfLive(0, context, -1L);
segments.getOrCreateSegmentIfLive(1, context, -1L);
assertEquals(segment, segments.getSegmentForTimestamp(0L));
}
|
@Override
protected String getDestination(Exchange exchange, Endpoint endpoint) {
// when using toD for dynamic destination then extract from header
String destination = exchange.getMessage().getHeader("CamelJmsDestinationName", String.class);
if (destination == null) {
destination = super.getDestination(exchange, endpoint);
}
return destination;
}
|
@Test
public void testGetDestinationDynamic() {
Exchange exchange = Mockito.mock(Exchange.class);
Message message = Mockito.mock(Message.class);
Endpoint endpoint = Mockito.mock(Endpoint.class);
Mockito.when(exchange.getIn()).thenReturn(message);
Mockito.when(exchange.getMessage()).thenReturn(message);
Mockito.when(exchange.getMessage().getHeader("CamelJmsDestinationName", String.class)).thenReturn("gauda");
Mockito.when(endpoint.getEndpointUri()).thenReturn("jms:${header.foo}?clientId=123");
AbstractMessagingSpanDecorator decorator = new JmsSpanDecorator();
assertEquals("gauda", decorator.getDestination(exchange, endpoint));
}
|
public static Map<String, String> parseMap(String str) {
if (str != null) {
StringTokenizer tok = new StringTokenizer(str, ", \t\n\r");
HashMap<String, String> map = new HashMap<>();
while (tok.hasMoreTokens()) {
String record = tok.nextToken();
int endIndex = record.indexOf('=');
if (endIndex == -1) {
throw new RuntimeException("Failed to parse Map from String");
}
String key = record.substring(0, endIndex);
String value = record.substring(endIndex + 1);
map.put(key.trim(), value.trim());
}
return Collections.unmodifiableMap(map);
} else {
return Collections.emptyMap();
}
}
|
@Test
public void testParseMapValueWithEquals() {
String stringMap = "key1=value1\n" +
"key2=value2=value3";
Map<String, String> m = parseMap(stringMap);
assertThat(m, aMapWithSize(2));
assertThat(m, hasEntry("key1", "value1"));
assertThat(m, hasEntry("key2", "value2=value3"));
}
|
@PublicAPI(usage = ACCESS)
public JavaClasses importClasses(Class<?>... classes) {
return importClasses(Arrays.asList(classes));
}
|
@Test
public void imports_jdk_classes() {
JavaClasses classes = new ClassFileImporter().importClasses(File.class);
assertThatTypes(classes).matchExactly(File.class);
}
|
public void removeBuiltinRole(final String roleName) {
this.roleRemover.removeBuiltinRole(roleName);
}
|
@Test
public void usesRoleRemoverToRemoveRoles() {
migrationHelpers.removeBuiltinRole("John");
verify(roleRemover).removeBuiltinRole("John");
}
|
public static Optional<EfestoOutputDrlMap> execute(BaseEfestoInput<EfestoMapInputDTO> toEvaluate, EfestoRuntimeContext context) {
KieSession kieSession;
try {
kieSession = loadKieSession(toEvaluate.getModelLocalUriId(), context);
} catch (Exception e) {
logger.warn("{} can not execute {}",
DrlRuntimeHelper.class.getName(),
toEvaluate.getModelLocalUriId());
return Optional.empty();
}
if (kieSession == null) {
return Optional.empty();
}
try {
MapInputSessionUtils.Builder builder = MapInputSessionUtils.builder(kieSession, toEvaluate.getInputData());
final MapInputSessionUtils mapInputSessionUtils = builder.build();
long identifier = kieSession.getIdentifier();
mapInputSessionUtils.fireAllRules();
LocalComponentIdDrlSession modelLocalUriId = new EfestoAppRoot()
.get(KieDrlComponentRoot.class)
.get(DrlSessionIdFactory.class)
.get(toEvaluate.getModelLocalUriId().basePath(), identifier);
return Optional.of(new EfestoOutputDrlMap(modelLocalUriId, null));
} catch (Exception e) {
throw new KieRuntimeServiceException(String.format("%s failed to execute %s",
DrlRuntimeHelper.class.getName(),
toEvaluate.getModelLocalUriId()), e);
}
}
|
@Test
void execute() {
List<Object> inserts = new ArrayList<>();
inserts.add(new LoanApplication("ABC10001", new Applicant("John", 45), 2000, 1000));
inserts.add(new LoanApplication("ABC10002", new Applicant("Paul", 25), 5000, 100));
inserts.add(new LoanApplication("ABC10015", new Applicant("George", 12), 1000, 100));
List<LoanApplication> approvedApplications = new ArrayList<>();
final Map<String, Object> globals = new HashMap<>();
globals.put("approvedApplications", approvedApplications);
globals.put("maxAmount", 5000);
EfestoMapInputDTO darMapInputDTO = new EfestoMapInputDTO(inserts, globals, Collections.emptyMap(), Collections.emptyMap(), "modelname", "packageName");
EfestoInputDrlMap darInputDrlMap =
new EfestoInputDrlMap(new ModelLocalUriId(LocalUri.parse("/drl/" + basePath)), darMapInputDTO);
EfestoRuntimeContext context = EfestoRuntimeContextUtils.buildWithParentClassLoader(Thread.currentThread().getContextClassLoader());
Optional<EfestoOutputDrlMap> retrieved = DrlRuntimeHelper.execute(darInputDrlMap, context);
assertThat(retrieved).isNotNull().isPresent();
assertThat(approvedApplications).hasSize(1);
LoanApplication approvedApplication = approvedApplications.get(0);
assertThat(approvedApplication).isEqualTo(inserts.get(0));
}
|
public static Properties mergeProps(Properties original, Properties newProperties) {
Properties props = new Properties();
props.putAll(original);
props.putAll(newProperties);
return props;
}
|
@Test
public void test_mergeProps() {
Properties newProps = new Properties();
newProps.put("A", "B");
newProps.put("B", "C");
Properties existingProps = new Properties();
existingProps.put("B", "D");
existingProps.put("D", "E");
Properties mergedProps = Util.mergeProps(existingProps, newProps);
assertThat(mergedProps.size()).isEqualTo(existingProps.size() + newProps.size() - 1);
assertThat(mergedProps).contains(
entry("A", "B"),
entry("B", "C"),
entry("D", "E")
);
}
|
@Override
public int getFailures() {
return failures;
}
|
@Test
public void measuresFailures() throws IOException {
try (Graphite graphite = new Graphite(address, socketFactory)) {
assertThat(graphite.getFailures()).isZero();
}
}
|
@Override
public void replay(
long offset,
long producerId,
short producerEpoch,
CoordinatorRecord record
) throws RuntimeException {
ApiMessageAndVersion key = record.key();
ApiMessageAndVersion value = record.value();
switch (key.version()) {
case 0:
case 1:
offsetMetadataManager.replay(
offset,
producerId,
(OffsetCommitKey) key.message(),
(OffsetCommitValue) Utils.messageOrNull(value)
);
break;
case 2:
groupMetadataManager.replay(
(GroupMetadataKey) key.message(),
(GroupMetadataValue) Utils.messageOrNull(value)
);
break;
case 3:
groupMetadataManager.replay(
(ConsumerGroupMetadataKey) key.message(),
(ConsumerGroupMetadataValue) Utils.messageOrNull(value)
);
break;
case 4:
groupMetadataManager.replay(
(ConsumerGroupPartitionMetadataKey) key.message(),
(ConsumerGroupPartitionMetadataValue) Utils.messageOrNull(value)
);
break;
case 5:
groupMetadataManager.replay(
(ConsumerGroupMemberMetadataKey) key.message(),
(ConsumerGroupMemberMetadataValue) Utils.messageOrNull(value)
);
break;
case 6:
groupMetadataManager.replay(
(ConsumerGroupTargetAssignmentMetadataKey) key.message(),
(ConsumerGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value)
);
break;
case 7:
groupMetadataManager.replay(
(ConsumerGroupTargetAssignmentMemberKey) key.message(),
(ConsumerGroupTargetAssignmentMemberValue) Utils.messageOrNull(value)
);
break;
case 8:
groupMetadataManager.replay(
(ConsumerGroupCurrentMemberAssignmentKey) key.message(),
(ConsumerGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value)
);
break;
case 9:
groupMetadataManager.replay(
(ShareGroupPartitionMetadataKey) key.message(),
(ShareGroupPartitionMetadataValue) Utils.messageOrNull(value)
);
break;
case 10:
groupMetadataManager.replay(
(ShareGroupMemberMetadataKey) key.message(),
(ShareGroupMemberMetadataValue) Utils.messageOrNull(value)
);
break;
case 11:
groupMetadataManager.replay(
(ShareGroupMetadataKey) key.message(),
(ShareGroupMetadataValue) Utils.messageOrNull(value)
);
break;
case 12:
groupMetadataManager.replay(
(ShareGroupTargetAssignmentMetadataKey) key.message(),
(ShareGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value)
);
break;
case 13:
groupMetadataManager.replay(
(ShareGroupTargetAssignmentMemberKey) key.message(),
(ShareGroupTargetAssignmentMemberValue) Utils.messageOrNull(value)
);
break;
case 14:
groupMetadataManager.replay(
(ShareGroupCurrentMemberAssignmentKey) key.message(),
(ShareGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value)
);
break;
default:
throw new IllegalStateException("Received an unknown record type " + key.version()
+ " in " + record);
}
}
|
@Test
public void testReplayShareGroupMetadata() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ShareGroupMetadataKey key = new ShareGroupMetadataKey();
ShareGroupMetadataValue value = new ShareGroupMetadataValue();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, new CoordinatorRecord(
new ApiMessageAndVersion(key, (short) 11),
new ApiMessageAndVersion(value, (short) 0)
));
verify(groupMetadataManager, times(1)).replay(key, value);
}
|
public static Optional<ParsedMetricName> parseMetricName(String metricName) {
if (metricName.isEmpty()) {
return Optional.empty();
}
List<String> metricNameSplit =
Splitter.on(METRIC_NAME_DELIMITER).limit(2).splitToList(metricName);
if (metricNameSplit.size() == 0 || metricNameSplit.get(0).isEmpty()) {
return Optional.empty();
}
if (metricNameSplit.size() == 1) {
return Optional.of(ParsedMetricName.create(metricNameSplit.get(0)));
}
Splitter.MapSplitter splitter =
Splitter.on(LABEL_DELIMITER).omitEmptyStrings().withKeyValueSeparator(METRIC_KV_DELIMITER);
try {
Map<String, String> labels = splitter.split(metricNameSplit.get(1));
return Optional.of(ParsedMetricName.create(metricNameSplit.get(0), labels));
} catch (IllegalArgumentException e) {
return Optional.of(ParsedMetricName.create(metricNameSplit.get(0)));
}
}
|
@Test
public void testParseMetricName_successfulLabels() {
String baseMetricName = "baseMetricName";
LabeledMetricNameUtils.MetricNameBuilder builder =
LabeledMetricNameUtils.MetricNameBuilder.baseNameBuilder(baseMetricName);
builder.addLabel("key1", "val1");
builder.addLabel("key2", "val2");
builder.addLabel("key3", "val3");
String metricName = builder.build("namespace").getName();
Optional<LabeledMetricNameUtils.ParsedMetricName> parsedName =
LabeledMetricNameUtils.parseMetricName(metricName);
String expectedMetricName = "baseMetricName*key1:val1;key2:val2;key3:val3;";
ImmutableMap<String, String> expectedLabels =
ImmutableMap.of("key1", "val1", "key2", "val2", "key3", "val3");
LabeledMetricNameUtils.ParsedMetricName expectedParsedName =
LabeledMetricNameUtils.ParsedMetricName.create(baseMetricName, expectedLabels);
assertThat(metricName, equalTo(expectedMetricName));
assertThat(parsedName.isPresent(), equalTo(true));
assertThat(parsedName.get(), equalTo(expectedParsedName));
assertThat(parsedName.get().getBaseName(), equalTo(baseMetricName));
assertThat(parsedName.get().getMetricLabels(), equalTo(expectedLabels));
}
|
public static Future<Integer> authTlsHash(SecretOperator secretOperations, String namespace, KafkaClientAuthentication auth, List<CertSecretSource> certSecretSources) {
Future<Integer> tlsFuture;
if (certSecretSources == null || certSecretSources.isEmpty()) {
tlsFuture = Future.succeededFuture(0);
} else {
// get all TLS trusted certs, compute hash from each of them, sum hashes
tlsFuture = Future.join(certSecretSources.stream().map(certSecretSource ->
getCertificateAsync(secretOperations, namespace, certSecretSource)
.compose(cert -> Future.succeededFuture(cert.hashCode()))).collect(Collectors.toList()))
.compose(hashes -> Future.succeededFuture(hashes.list().stream().mapToInt(e -> (int) e).sum()));
}
if (auth == null) {
return tlsFuture;
} else {
// compute hash from Auth
if (auth instanceof KafkaClientAuthenticationScram) {
// only passwordSecret can be changed
return tlsFuture.compose(tlsHash -> getPasswordAsync(secretOperations, namespace, auth)
.compose(password -> Future.succeededFuture(password.hashCode() + tlsHash)));
} else if (auth instanceof KafkaClientAuthenticationPlain) {
// only passwordSecret can be changed
return tlsFuture.compose(tlsHash -> getPasswordAsync(secretOperations, namespace, auth)
.compose(password -> Future.succeededFuture(password.hashCode() + tlsHash)));
} else if (auth instanceof KafkaClientAuthenticationTls) {
// custom cert can be used (and changed)
return ((KafkaClientAuthenticationTls) auth).getCertificateAndKey() == null ? tlsFuture :
tlsFuture.compose(tlsHash -> getCertificateAndKeyAsync(secretOperations, namespace, (KafkaClientAuthenticationTls) auth)
.compose(crtAndKey -> Future.succeededFuture(crtAndKey.certAsBase64String().hashCode() + crtAndKey.keyAsBase64String().hashCode() + tlsHash)));
} else if (auth instanceof KafkaClientAuthenticationOAuth) {
List<Future<Integer>> futureList = ((KafkaClientAuthenticationOAuth) auth).getTlsTrustedCertificates() == null ?
new ArrayList<>() : ((KafkaClientAuthenticationOAuth) auth).getTlsTrustedCertificates().stream().map(certSecretSource ->
getCertificateAsync(secretOperations, namespace, certSecretSource)
.compose(cert -> Future.succeededFuture(cert.hashCode()))).collect(Collectors.toList());
futureList.add(tlsFuture);
futureList.add(addSecretHash(secretOperations, namespace, ((KafkaClientAuthenticationOAuth) auth).getAccessToken()));
futureList.add(addSecretHash(secretOperations, namespace, ((KafkaClientAuthenticationOAuth) auth).getClientSecret()));
futureList.add(addSecretHash(secretOperations, namespace, ((KafkaClientAuthenticationOAuth) auth).getRefreshToken()));
return Future.join(futureList)
.compose(hashes -> Future.succeededFuture(hashes.list().stream().mapToInt(e -> (int) e).sum()));
} else {
// unknown Auth type
return tlsFuture;
}
}
}
|
@Test
void getHashOk() {
String namespace = "ns";
GenericSecretSource at = new GenericSecretSourceBuilder()
.withSecretName("top-secret-at")
.withKey("key")
.build();
GenericSecretSource cs = new GenericSecretSourceBuilder()
.withSecretName("top-secret-cs")
.withKey("key")
.build();
GenericSecretSource rt = new GenericSecretSourceBuilder()
.withSecretName("top-secret-rt")
.withKey("key")
.build();
KafkaClientAuthentication kcu = new KafkaClientAuthenticationOAuthBuilder()
.withAccessToken(at)
.withRefreshToken(rt)
.withClientSecret(cs)
.build();
CertSecretSource css = new CertSecretSourceBuilder()
.withCertificate("key")
.withSecretName("css-secret")
.build();
Secret secret = new SecretBuilder()
.withData(Map.of("key", "value"))
.build();
SecretOperator secretOps = mock(SecretOperator.class);
when(secretOps.getAsync(eq(namespace), eq("top-secret-at"))).thenReturn(Future.succeededFuture(secret));
when(secretOps.getAsync(eq(namespace), eq("top-secret-rt"))).thenReturn(Future.succeededFuture(secret));
when(secretOps.getAsync(eq(namespace), eq("top-secret-cs"))).thenReturn(Future.succeededFuture(secret));
when(secretOps.getAsync(eq(namespace), eq("css-secret"))).thenReturn(Future.succeededFuture(secret));
Future<Integer> res = VertxUtil.authTlsHash(secretOps, "ns", kcu, singletonList(css));
res.onComplete(v -> {
assertThat(v.succeeded(), is(true));
// we are summing "value" hash four times
assertThat(v.result(), is("value".hashCode() * 4));
});
}
|
@Override
protected Set<StepField> getUsedFields( final JsonInputMeta meta ) {
Set<StepField> usedFields = new HashSet<>();
if ( meta.isAcceptingFilenames() && StringUtils.isNotEmpty( meta.getAcceptingField() ) ) {
final Set<String> inpusStepNames = getInputStepNames( meta, meta.getAcceptingField() );
for ( final String inpusStepName : inpusStepNames ) {
final StepField stepField = new StepField( inpusStepName, meta.getAcceptingField() );
usedFields.add( stepField );
}
}
return usedFields;
}
|
@Test
public void testGetUsedFields_fileNameFromField() throws Exception {
when( meta.isAcceptingFilenames() ).thenReturn( true );
when( meta.getAcceptingField() ).thenReturn( "filename" );
Set<String> stepNames = new HashSet<>();
stepNames.add( "previousStep" );
Set<StepField> usedFields = analyzer.getUsedFields( meta );
assertNotNull( usedFields );
assertEquals( 1, usedFields.size() );
StepField used = usedFields.iterator().next();
assertEquals( "previousStep", used.getStepName() );
assertEquals( "filename", used.getFieldName() );
}
|
@Override
public UserIdentity login(String username, Object credentials, ServletRequest request) {
if (!(request instanceof HttpServletRequest)) {
return null;
}
String doAsUser = request.getParameter(DO_AS);
if (doAsUser == null && _fallbackToSpnegoAllowed) {
SpnegoUserIdentity fallbackIdentity = (SpnegoUserIdentity) _fallbackSpnegoLoginService.login(username, credentials, request);
SpnegoUserPrincipal fallbackPrincipal = (SpnegoUserPrincipal) fallbackIdentity.getUserPrincipal();
if (!fallbackIdentity.isEstablished()) {
LOG.info("Service user {} isn't authorized as spnego fallback principal", fallbackPrincipal.getName());
}
return fallbackIdentity;
} else {
SpnegoUserIdentity serviceIdentity = (SpnegoUserIdentity) _delegateSpnegoLoginService.login(username, credentials, request);
SpnegoUserPrincipal servicePrincipal = (SpnegoUserPrincipal) serviceIdentity.getUserPrincipal();
LOG.info("Authorizing proxy user {} from {} service", doAsUser, servicePrincipal.getName());
UserIdentity doAsIdentity = null;
if (doAsUser != null && !doAsUser.isEmpty()) {
doAsIdentity = _endUserAuthorizer.getUserIdentity((HttpServletRequest) request, doAsUser);
}
Principal principal = new TrustedProxyPrincipal(doAsUser, servicePrincipal);
Subject subject = new Subject(READ_ONLY_SUBJECT, Collections.singleton(principal), Collections.emptySet(), Collections.emptySet());
if (!serviceIdentity.isEstablished()) {
LOG.info("Service user {} isn't authorized as a trusted proxy", servicePrincipal.getName());
return new SpnegoUserIdentity(subject, principal, null);
} else {
if (doAsIdentity == null) {
LOG.info("Couldn't authorize user {}", doAsUser);
}
return new SpnegoUserIdentity(subject, principal, doAsIdentity);
}
}
}
|
@Test
public void testFallbackToSpnego() {
SpnegoLoginServiceWithAuthServiceLifecycle mockSpnegoLoginService = mock(SpnegoLoginServiceWithAuthServiceLifecycle.class);
SpnegoLoginServiceWithAuthServiceLifecycle mockFallbackLoginService = mock(SpnegoLoginServiceWithAuthServiceLifecycle.class);
SpnegoUserPrincipal servicePrincipal = new SpnegoUserPrincipal(TEST_SERVICE_USER, ENCODED_TOKEN);
UserIdentity serviceDelegate = mock(UserIdentity.class);
Subject subject = new Subject(true, Collections.singleton(servicePrincipal), Collections.emptySet(), Collections.emptySet());
SpnegoUserIdentity result = new SpnegoUserIdentity(subject, servicePrincipal, serviceDelegate);
expect(mockFallbackLoginService.login(anyString(), anyObject(), anyObject())).andReturn(result);
TestAuthorizer userAuthorizer = new TestAuthorizer(TEST_USER);
HttpServletRequest mockRequest = mock(HttpServletRequest.class);
replay(mockFallbackLoginService);
TrustedProxyLoginService trustedProxyLoginService = new TrustedProxyLoginService(mockSpnegoLoginService, mockFallbackLoginService,
userAuthorizer, true);
UserIdentity doAsIdentity = trustedProxyLoginService.login(null, ENCODED_TOKEN, mockRequest);
assertNotNull(doAsIdentity);
assertNotNull(doAsIdentity.getUserPrincipal());
assertEquals(servicePrincipal, doAsIdentity.getUserPrincipal());
assertTrue(((SpnegoUserIdentity) doAsIdentity).isEstablished());
verify(mockFallbackLoginService);
}
|
public String getConfigForDisplay() {
String pluginId = getPluginId();
SCMMetadataStore metadataStore = SCMMetadataStore.getInstance();
List<ConfigurationProperty> propertiesToBeUsedForDisplay = ConfigurationDisplayUtil.getConfigurationPropertiesToBeUsedForDisplay(metadataStore, pluginId, configuration);
String prefix = metadataStore.hasPlugin(pluginId) ? "" : "WARNING! Plugin missing. ";
return prefix + configuration.forDisplay(propertiesToBeUsedForDisplay);
}
|
@Test
void shouldDisplayAllNonSecureFieldsInGetConfigForDisplayWhenPluginDoesNotExist() {
Configuration configuration = new Configuration(create("key1", false, "value1"), create("key2", true, "value2"), create("key3", false, "value3"));
SCM scm = SCMMother.create("scm", "scm-name", "some-plugin-which-does-not-exist", "1.0", configuration);
assertThat(scm.getConfigForDisplay()).isEqualTo("WARNING! Plugin missing. [key1=value1, key3=value3]");
}
|
public synchronized TopologyDescription describe() {
return internalTopologyBuilder.describe();
}
|
@Test
public void timeWindowAnonymousMaterializedCountShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.windowedBy(TimeWindows.of(ofMillis(1)))
.count(Materialized.<Object, Long, WindowStore<Bytes, byte[]>>with(null, Serdes.Long())
.withStoreType(Materialized.StoreType.ROCKS_DB));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000003\n" +
" Processor: KSTREAM-AGGREGATE-0000000003 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000002])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(true));
}
|
String buildUrl( JmsDelegate delegate, boolean debug ) {
StringBuilder finalUrl = new StringBuilder( delegate.amqUrl.trim() );
// verify user hit the checkbox on the dialogue *and* also has not specified these values on the URL already
// end result: default to SSL settings in the URL if present, otherwise use data from the security tab
if ( delegate.sslEnabled && !finalUrl.toString().contains( "sslEnabled" ) ) {
appendSslOptions( delegate, finalUrl, debug );
}
return finalUrl.toString();
}
|
@Test public void testUrlBuildSslOptionsParamsExist() {
ActiveMQProvider provider = new ActiveMQProvider();
JmsDelegate delegate = new JmsDelegate( Collections.singletonList( provider ) );
delegate.amqUrl = AMQ_URL_BASE + "?foo=bar";
delegate.sslEnabled = true;
delegate.sslTruststorePath = TRUST_STORE_PATH_VAL;
delegate.sslTruststorePassword = TRUST_STORE_PASS_VAL;
String urlString = provider.buildUrl( delegate, false );
try {
URI url = new URI( urlString );
} catch ( URISyntaxException e ) {
fail( e.getMessage() );
}
assertTrue( "Missing trust store path", urlString.contains( "trustStorePath=" + TRUST_STORE_PATH_VAL ) );
assertTrue( "Missing trust store password", urlString.contains( "trustStorePassword=" + TRUST_STORE_PASS_VAL ) );
assertTrue( "URL base incorrect", urlString.startsWith( AMQ_URL_BASE + "?" ) );
delegate.amqUrl += ";";
urlString = provider.buildUrl( delegate,false );
try {
URI url = new URI( urlString );
} catch ( URISyntaxException e ) {
fail( e.getMessage() );
}
assertTrue( "Missing trust store path", urlString.contains( "trustStorePath=" + TRUST_STORE_PATH_VAL ) );
assertTrue( "Missing trust store password", urlString.contains( "trustStorePassword=" + TRUST_STORE_PASS_VAL ) );
assertTrue( "URL base incorrect", urlString.startsWith( AMQ_URL_BASE + "?" ) );
}
|
public void addAlbums(final String title,
final String artist, final boolean isClassical,
final String composer) {
if (isClassical) {
this.albums.add(new Album(title, artist, true, composer));
} else {
this.albums.add(new Album(title, artist, false, ""));
}
}
|
@Test
void testAdd_true(){
DisplayedAlbums displayedAlbums = new DisplayedAlbums();
displayedAlbums.addAlbums("title", "artist", true, "composer");
assertEquals("composer", displayedAlbums.getAlbums().get(0).getComposer());
}
|
@Override
public String encrypt(String clearText) {
try {
javax.crypto.Cipher cipher = javax.crypto.Cipher.getInstance(CRYPTO_ALGO);
byte[] iv = new byte[GCM_IV_LENGTH_IN_BYTES];
new SecureRandom().nextBytes(iv);
cipher.init(javax.crypto.Cipher.ENCRYPT_MODE, loadSecretFile(), new GCMParameterSpec(GCM_TAG_LENGTH_IN_BITS, iv));
byte[] encryptedText = cipher.doFinal(clearText.getBytes(StandardCharsets.UTF_8.name()));
return Base64.encodeBase64String(
ByteBuffer.allocate(GCM_IV_LENGTH_IN_BYTES + encryptedText.length)
.put(iv)
.put(encryptedText)
.array());
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new IllegalStateException(e);
}
}
|
@Test
public void encrypt_should_generate_different_value_everytime() throws Exception {
AesGCMCipher cipher = new AesGCMCipher(pathToSecretKey());
String encryptedText1 = cipher.encrypt("this is a secret");
String encryptedText2 = cipher.encrypt("this is a secret");
assertThat(StringUtils.isNotBlank(encryptedText1)).isTrue();
assertThat(StringUtils.isNotBlank(encryptedText2)).isTrue();
assertThat(encryptedText1).isNotEqualTo(encryptedText2);
}
|
static String getAbbreviation(Exception ex,
Integer statusCode,
String storageErrorMessage) {
String result = null;
for (RetryReasonCategory retryReasonCategory : rankedReasonCategories) {
final String abbreviation
= retryReasonCategory.captureAndGetAbbreviation(ex,
statusCode, storageErrorMessage);
if (abbreviation != null) {
result = abbreviation;
}
}
return result;
}
|
@Test
public void test500RetryReason() {
Assertions.assertThat(RetryReason.getAbbreviation(null, HTTP_INTERNAL_ERROR, null)).isEqualTo(
"500"
);
}
|
boolean matchRule(RuleKey rule) {
return rulePattern.match(rule.toString());
}
|
@Test
public void match_rule() {
RuleKey rule = Rule.create("checkstyle", "IllegalRegexp", "").ruleKey();
assertThat(new IssuePattern("*", "*").matchRule(rule)).isTrue();
assertThat(new IssuePattern("*", "checkstyle:*").matchRule(rule)).isTrue();
assertThat(new IssuePattern("*", "checkstyle:IllegalRegexp").matchRule(rule)).isTrue();
assertThat(new IssuePattern("*", "checkstyle:Illegal*").matchRule(rule)).isTrue();
assertThat(new IssuePattern("*", "*:*Illegal*").matchRule(rule)).isTrue();
assertThat(new IssuePattern("*", "pmd:IllegalRegexp").matchRule(rule)).isFalse();
assertThat(new IssuePattern("*", "pmd:*").matchRule(rule)).isFalse();
assertThat(new IssuePattern("*", "*:Foo*IllegalRegexp").matchRule(rule)).isFalse();
}
|
public Statement buildStatement(final ParserRuleContext parseTree) {
return build(Optional.of(getSources(parseTree)), parseTree);
}
|
@Test
public void shouldBuildAssertNotExistsWithSubjectAndId() {
// Given:
final SingleStatementContext stmt
= givenQuery("ASSERT NOT EXISTS SCHEMA SUBJECT 'a-b-c' ID 33;");
// When:
final AssertSchema assertSchema = (AssertSchema) builder.buildStatement(stmt);
// Then:
assertThat(assertSchema.getSubject(), is(Optional.of("a-b-c")));
assertThat(assertSchema.getId(), is(Optional.of(33)));
assertThat(assertSchema.getTimeout(), is(Optional.empty()));
assertThat(assertSchema.checkExists(), is(false));
}
|
@Override
public int hashCode()
{
return new HashCodeBuilder()
.append(_baseUriTemplate)
.append(_pathKeys)
.append(_id)
.append(_queryParams)
.toHashCode();
}
|
@Test
public void testEquality()
{
final GetRequest<?> equalRequest1 = Mockito.mock(GetRequest.class);
Mockito.when(equalRequest1.getBaseUriTemplate()).thenReturn(BASE_URI_TEMPLATE);
Mockito.when(equalRequest1.getPathKeys()).thenReturn(PATH_KEYS);
Mockito.when(equalRequest1.getObjectId()).thenReturn(ID);
Mockito.when(equalRequest1.getQueryParamsObjects()).thenReturn(QUERY_PARAMS_OBJECTS);
final GetRequest<?> equalRequest2 = Mockito.mock(GetRequest.class);
Mockito.when(equalRequest2.getBaseUriTemplate()).thenReturn(BASE_URI_TEMPLATE);
Mockito.when(equalRequest2.getPathKeys()).thenReturn(PATH_KEYS);
Mockito.when(equalRequest2.getObjectId()).thenReturn(ID);
Mockito.when(equalRequest2.getQueryParamsObjects()).thenReturn(QUERY_PARAMS_OBJECTS);
final GetRequest<?> idDifferRequest = Mockito.mock(GetRequest.class);
Mockito.when(idDifferRequest.getBaseUriTemplate()).thenReturn(BASE_URI_TEMPLATE);
Mockito.when(idDifferRequest.getPathKeys()).thenReturn(PATH_KEYS);
Mockito.when(idDifferRequest.getObjectId()).thenReturn(null);
Mockito.when(idDifferRequest.getQueryParamsObjects()).thenReturn(QUERY_PARAMS_OBJECTS);
final RestliRequestUriSignature equalSignature1 = new RestliRequestUriSignature(equalRequest1, RestliRequestUriSignature.ALL_FIELDS);
final RestliRequestUriSignature equalSignature2 = new RestliRequestUriSignature(equalRequest2, RestliRequestUriSignature.ALL_FIELDS);
Assert.assertEquals(equalSignature1.hashCode(), equalSignature2.hashCode());
Assert.assertEquals(equalSignature1, equalSignature2);
final Set<SignatureField> nonIDFields = new HashSet<>(Arrays.asList(SignatureField.BASE_URI_TEMPLATE,
SignatureField.PATH_KEYS,
SignatureField.QUERY_PARAMS));
final RestliRequestUriSignature equalSignature3 = new RestliRequestUriSignature(equalRequest1, nonIDFields);
final RestliRequestUriSignature equalSignature4 = new RestliRequestUriSignature(idDifferRequest, RestliRequestUriSignature.ALL_FIELDS);
Assert.assertEquals(equalSignature3.hashCode(), equalSignature4.hashCode());
Assert.assertEquals(equalSignature3, equalSignature4);
}
|
public static String getDataSourceUnitVersionNode(final String databaseName, final String dataSourceName, final String version) {
return String.join("/", getDataSourceUnitVersionsNode(databaseName, dataSourceName), version);
}
|
@Test
void assertGetDataSourceUnitNodeWithVersion() {
assertThat(DataSourceMetaDataNode.getDataSourceUnitVersionNode("foo_db", "foo_ds", "0"), is("/metadata/foo_db/data_sources/units/foo_ds/versions/0"));
}
|
@Override
public void registerInstance(Service service, Instance instance, String clientId) {
Service singleton = ServiceManager.getInstance().getSingleton(service);
if (singleton.isEphemeral()) {
throw new NacosRuntimeException(NacosException.INVALID_PARAM,
String.format("Current service %s is ephemeral service, can't register persistent instance.",
singleton.getGroupedServiceName()));
}
final InstanceStoreRequest request = new InstanceStoreRequest();
request.setService(service);
request.setInstance(instance);
request.setClientId(clientId);
final WriteRequest writeRequest = WriteRequest.newBuilder().setGroup(group())
.setData(ByteString.copyFrom(serializer.serialize(request))).setOperation(DataOperation.ADD.name())
.build();
try {
protocol.write(writeRequest);
Loggers.RAFT.info("Client registered. service={}, clientId={}, instance={}", service, clientId, instance);
} catch (Exception e) {
throw new NacosRuntimeException(NacosException.SERVER_ERROR, e);
}
}
|
@Test
void testRegisterPersistentInstance() {
assertThrows(NacosRuntimeException.class, () -> {
when(service.isEphemeral()).thenReturn(true);
persistentClientOperationServiceImpl.registerInstance(service, instance, clientId);
});
}
|
public boolean isSync() {
return sync;
}
|
@Test
public void isSync() throws Exception {
Subscriber subscriber = new TestSubscriber();
Assert.assertTrue(subscriber.isSync());
subscriber = new TestAsyncSubscriber();
Assert.assertFalse(subscriber.isSync());
}
|
@SuppressWarnings("unchecked")
@Override
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException {
NodeStatus remoteNodeStatus = request.getNodeStatus();
/**
* Here is the node heartbeat sequence...
* 1. Check if it's a valid (i.e. not excluded) node
* 2. Check if it's a registered node
* 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat
* 4. Send healthStatus to RMNode
* 5. Update node's labels if distributed Node Labels configuration is enabled
*/
NodeId nodeId = remoteNodeStatus.getNodeId();
// 1. Check if it's a valid (i.e. not excluded) node, if not, see if it is
// in decommissioning.
if (!this.nodesListManager.isValidNode(nodeId.getHost())
&& !isNodeInDecommissioning(nodeId)) {
String message =
"Disallowed NodeManager nodeId: " + nodeId + " hostname: "
+ nodeId.getHost();
LOG.info(message);
return YarnServerBuilderUtils.newNodeHeartbeatResponse(
NodeAction.SHUTDOWN, message);
}
// 2. Check if it's a registered node
RMNode rmNode = this.rmContext.getRMNodes().get(nodeId);
if (rmNode == null) {
/* node does not exist */
String message = "Node not found resyncing " + remoteNodeStatus.getNodeId();
LOG.info(message);
return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC,
message);
}
// Send ping
this.nmLivelinessMonitor.receivedPing(nodeId);
this.decommissioningWatcher.update(rmNode, remoteNodeStatus);
// 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat
NodeHeartbeatResponse lastNodeHeartbeatResponse = rmNode.getLastNodeHeartBeatResponse();
if (getNextResponseId(
remoteNodeStatus.getResponseId()) == lastNodeHeartbeatResponse
.getResponseId()) {
LOG.info("Received duplicate heartbeat from node "
+ rmNode.getNodeAddress()+ " responseId=" + remoteNodeStatus.getResponseId());
return lastNodeHeartbeatResponse;
} else if (remoteNodeStatus.getResponseId() != lastNodeHeartbeatResponse
.getResponseId()) {
String message =
"Too far behind rm response id:"
+ lastNodeHeartbeatResponse.getResponseId() + " nm response id:"
+ remoteNodeStatus.getResponseId();
LOG.info(message);
// TODO: Just sending reboot is not enough. Think more.
this.rmContext.getDispatcher().getEventHandler().handle(
new RMNodeEvent(nodeId, RMNodeEventType.REBOOTING));
return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC,
message);
}
// Evaluate whether a DECOMMISSIONING node is ready to be DECOMMISSIONED.
if (rmNode.getState() == NodeState.DECOMMISSIONING &&
decommissioningWatcher.checkReadyToBeDecommissioned(
rmNode.getNodeID())) {
String message = "DECOMMISSIONING " + nodeId +
" is ready to be decommissioned";
LOG.info(message);
this.rmContext.getDispatcher().getEventHandler().handle(
new RMNodeEvent(nodeId, RMNodeEventType.DECOMMISSION));
this.nmLivelinessMonitor.unregister(nodeId);
return YarnServerBuilderUtils.newNodeHeartbeatResponse(
NodeAction.SHUTDOWN, message);
}
if (timelineServiceV2Enabled) {
// Check & update collectors info from request.
updateAppCollectorsMap(request);
}
// Heartbeat response
long newInterval = nextHeartBeatInterval;
if (heartBeatIntervalScalingEnable) {
newInterval = rmNode.calculateHeartBeatInterval(
nextHeartBeatInterval, heartBeatIntervalMin,
heartBeatIntervalMax, heartBeatIntervalSpeedupFactor,
heartBeatIntervalSlowdownFactor);
}
NodeHeartbeatResponse nodeHeartBeatResponse =
YarnServerBuilderUtils.newNodeHeartbeatResponse(
getNextResponseId(lastNodeHeartbeatResponse.getResponseId()),
NodeAction.NORMAL, null, null, null, null, newInterval);
rmNode.setAndUpdateNodeHeartbeatResponse(nodeHeartBeatResponse);
populateKeys(request, nodeHeartBeatResponse);
populateTokenSequenceNo(request, nodeHeartBeatResponse);
if (timelineServiceV2Enabled) {
// Return collectors' map that NM needs to know
setAppCollectorsMapToResponse(rmNode.getRunningApps(),
nodeHeartBeatResponse);
}
// 4. Send status to RMNode, saving the latest response.
RMNodeStatusEvent nodeStatusEvent =
new RMNodeStatusEvent(nodeId, remoteNodeStatus);
if (request.getLogAggregationReportsForApps() != null
&& !request.getLogAggregationReportsForApps().isEmpty()) {
nodeStatusEvent.setLogAggregationReportsForApps(request
.getLogAggregationReportsForApps());
}
this.rmContext.getDispatcher().getEventHandler().handle(nodeStatusEvent);
// 5. Update node's labels to RM's NodeLabelManager.
if (isDistributedNodeLabelsConf && request.getNodeLabels() != null) {
try {
updateNodeLabelsFromNMReport(
NodeLabelsUtils.convertToStringSet(request.getNodeLabels()),
nodeId);
nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(true);
} catch (IOException ex) {
//ensure the error message is captured and sent across in response
nodeHeartBeatResponse.setDiagnosticsMessage(ex.getMessage());
nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(false);
}
}
// 6. check if node's capacity is load from dynamic-resources.xml
// if so, send updated resource back to NM.
String nid = nodeId.toString();
Resource capability = loadNodeResourceFromDRConfiguration(nid);
// sync back with new resource if not null.
if (capability != null) {
nodeHeartBeatResponse.setResource(capability);
}
// Check if we got an event (AdminService) that updated the resources
if (rmNode.isUpdatedCapability()) {
nodeHeartBeatResponse.setResource(rmNode.getTotalCapability());
rmNode.resetUpdatedCapability();
}
// 7. Send Container Queuing Limits back to the Node. This will be used by
// the node to truncate the number of Containers queued for execution.
if (this.rmContext.getNodeManagerQueueLimitCalculator() != null) {
nodeHeartBeatResponse.setContainerQueuingLimit(
this.rmContext.getNodeManagerQueueLimitCalculator()
.createContainerQueuingLimit());
}
// 8. Get node's attributes and update node-to-attributes mapping
// in RMNodeAttributeManager.
if (request.getNodeAttributes() != null) {
try {
// update node attributes if necessary then update heartbeat response
updateNodeAttributesIfNecessary(nodeId, request.getNodeAttributes());
nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(true);
} catch (IOException ex) {
//ensure the error message is captured and sent across in response
String errorMsg =
nodeHeartBeatResponse.getDiagnosticsMessage() == null ?
ex.getMessage() :
nodeHeartBeatResponse.getDiagnosticsMessage() + "\n" + ex
.getMessage();
nodeHeartBeatResponse.setDiagnosticsMessage(errorMsg);
nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(false);
}
}
return nodeHeartBeatResponse;
}
|
@Test (timeout = 50000)
public void testGetNextHeartBeatInterval() throws Exception {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, "4000");
rm = new MockRM(conf);
rm.start();
MockNM nm1 = rm.registerNode("host1:1234", 5120);
MockNM nm2 = rm.registerNode("host2:5678", 10240);
NodeHeartbeatResponse nodeHeartbeat = nm1.nodeHeartbeat(true);
Assert.assertEquals(4000, nodeHeartbeat.getNextHeartBeatInterval());
NodeHeartbeatResponse nodeHeartbeat2 = nm2.nodeHeartbeat(true);
Assert.assertEquals(4000, nodeHeartbeat2.getNextHeartBeatInterval());
}
|
public Response downloadDaemonLogFile(String host, String fileName, String user) throws IOException {
return logFileDownloadHelper.downloadFile(host, fileName, user, true);
}
|
@Test
public void testDownloadDaemonLogFilePathOutsideLogRoot() throws IOException {
try (TmpPath rootPath = new TmpPath()) {
LogviewerLogDownloadHandler handler = createHandlerTraversalTests(rootPath.getFile().toPath());
Response response = handler.downloadDaemonLogFile("host","../evil.sh", "user");
Utils.forceDelete(rootPath.toString());
assertThat(response.getStatus(), is(Response.Status.NOT_FOUND.getStatusCode()));
}
}
|
String getInitiatorTimeZone() {
if (initiator.getType() == Initiator.Type.TIME) {
return ((TimeInitiator) initiator).getTimezone();
}
return null;
}
|
@Test
public void testGetInitiatorTimeZone() throws Exception {
WorkflowInstance instance =
loadObject(
"fixtures/instances/sample-workflow-instance-created.json", WorkflowInstance.class);
TimeInitiator initiator = new TimeInitiator();
initiator.setTimezone("Asia/Tokyo");
;
RunRequest runRequest =
RunRequest.builder()
.currentPolicy(RunPolicy.RESTART_FROM_INCOMPLETE)
.initiator(initiator)
.build();
InstanceWrapper instanceWrapper = InstanceWrapper.from(instance, runRequest);
assertEquals(runRequest.getInitiator(), instanceWrapper.getInitiator());
assertTrue(instanceWrapper.isWorkflowParam());
assertEquals("Asia/Tokyo", instanceWrapper.getInitiatorTimeZone());
assertEquals(runRequest.getCurrentPolicy().name(), instanceWrapper.getRunPolicy());
assertEquals(
instance.getRunProperties().getOwner().getName(), instanceWrapper.getWorkflowOwner());
assertNull(instanceWrapper.getFirstTimeTriggerTimeZone());
}
|
public AccessPrivilege getAccessPrivilege(InetAddress addr) {
return getAccessPrivilege(addr.getHostAddress(),
addr.getCanonicalHostName());
}
|
@Test
public void testExactAddressRW() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, address1
+ " rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertFalse(AccessPrivilege.READ_WRITE == matcher
.getAccessPrivilege(address2, hostname1));
}
|
public static Status unblock(
final UnsafeBuffer logMetaDataBuffer,
final UnsafeBuffer termBuffer,
final int blockedOffset,
final int tailOffset,
final int termId)
{
Status status = NO_ACTION;
int frameLength = frameLengthVolatile(termBuffer, blockedOffset);
if (frameLength < 0)
{
resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, -frameLength);
status = UNBLOCKED;
}
else if (0 == frameLength)
{
int currentOffset = blockedOffset + FRAME_ALIGNMENT;
while (currentOffset < tailOffset)
{
frameLength = frameLengthVolatile(termBuffer, currentOffset);
if (frameLength != 0)
{
if (scanBackToConfirmZeroed(termBuffer, currentOffset, blockedOffset))
{
final int length = currentOffset - blockedOffset;
resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, length);
status = UNBLOCKED;
}
break;
}
currentOffset += FRAME_ALIGNMENT;
}
if (currentOffset == termBuffer.capacity())
{
if (0 == frameLengthVolatile(termBuffer, blockedOffset))
{
final int length = currentOffset - blockedOffset;
resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, length);
status = UNBLOCKED_TO_END;
}
}
}
return status;
}
|
@Test
void shouldScanForwardForNextNonCommittedMessage()
{
final int messageLength = HEADER_LENGTH * 4;
final int termOffset = 0;
final int tailOffset = messageLength * 2;
when(mockTermBuffer.getIntVolatile(messageLength)).thenReturn(-messageLength);
assertEquals(
UNBLOCKED, TermUnblocker.unblock(mockLogMetaDataBuffer, mockTermBuffer, termOffset, tailOffset, TERM_ID));
final InOrder inOrder = inOrder(mockTermBuffer);
inOrder.verify(mockTermBuffer).putShort(typeOffset(termOffset), (short)HDR_TYPE_PAD, LITTLE_ENDIAN);
inOrder.verify(mockTermBuffer).putInt(termOffsetOffset(termOffset), termOffset, LITTLE_ENDIAN);
inOrder.verify(mockTermBuffer).putIntOrdered(termOffset, messageLength);
}
|
public void add(T element) {
Preconditions.checkNotNull(element);
if (elements.add(element) && elements.size() > maxSize) {
elements.poll();
}
}
|
@Test
void testQueueWithMaxSize0() {
final BoundedFIFOQueue<Integer> testInstance = new BoundedFIFOQueue<>(0);
assertThat(testInstance).isEmpty();
testInstance.add(1);
assertThat(testInstance).isEmpty();
}
|
@Override
@SuppressWarnings("deprecation")
public HttpRoute determineRoute(HttpHost target, HttpContext context) {
if ( ! target.getSchemeName().equals("http") && ! target.getSchemeName().equals("https"))
throw new IllegalArgumentException("Scheme must be 'http' or 'https' when using HttpToHttpsRoutePlanner, was '" + target.getSchemeName() + "'");
if (HttpClientContext.adapt(context).getRequestConfig().getProxy() != null)
throw new IllegalArgumentException("Proxies are not supported with HttpToHttpsRoutePlanner");
int port = DefaultSchemePortResolver.INSTANCE.resolve(target);
return new HttpRoute(new HttpHost("https", target.getAddress(), target.getHostName(), port));
}
|
@Test
void verifyPortMustBeSet() {
try {
planner.determineRoute(new HttpHost("http", "host", -1), new HttpClientContext());
}
catch (IllegalArgumentException e) {
assertEquals("Port must be set when using HttpToHttpsRoutePlanner", e.getMessage());
}
}
|
public static Instruction popVlan() {
return new L2ModificationInstruction.ModVlanHeaderInstruction(
L2ModificationInstruction.L2SubType.VLAN_POP);
}
|
@Test
public void testPopVlanMethod() {
final Instruction instruction = Instructions.popVlan();
final L2ModificationInstruction.ModVlanHeaderInstruction popVlanInstruction =
checkAndConvert(instruction,
Instruction.Type.L2MODIFICATION,
L2ModificationInstruction.ModVlanHeaderInstruction.class);
assertThat(popVlanInstruction.subtype(),
is(L2ModificationInstruction.L2SubType.VLAN_POP));
}
|
public void printKsqlEntityList(final List<KsqlEntity> entityList) {
switch (outputFormat) {
case JSON:
printAsJson(entityList);
break;
case TABULAR:
final boolean showStatements = entityList.size() > 1;
for (final KsqlEntity ksqlEntity : entityList) {
writer().println();
if (showStatements) {
writer().println(ksqlEntity.getStatementText());
}
printAsTable(ksqlEntity);
}
break;
default:
throw new RuntimeException(String.format(
"Unexpected output format: '%s'",
outputFormat.name()
));
}
}
|
@Test
public void shouldPrintTypesList() {
// Given:
final KsqlEntityList entities = new KsqlEntityList(ImmutableList.of(
new TypeList("statement", ImmutableMap.of(
"typeB", new SchemaInfo(
SqlBaseType.ARRAY,
null,
new SchemaInfo(SqlBaseType.STRING, null, null)),
"typeA", new SchemaInfo(
SqlBaseType.STRUCT,
ImmutableList.of(
new FieldInfo("f1", new SchemaInfo(SqlBaseType.STRING, null, null), Optional.empty())),
null),
"typeC", new SchemaInfo(
SqlBaseType.DECIMAL,
null,
null,
ImmutableMap.of("precision", 10, "scale", 9)
)
))
));
// When:
console.printKsqlEntityList(entities);
// Then:
final String output = terminal.getOutputString();
Approvals.verify(output, approvalOptions);
}
|
@Override
public ScalarOperator visitCompoundPredicate(CompoundPredicateOperator predicate, Void context) {
return shuttleIfUpdate(predicate);
}
|
@Test
void testCompoundPredicate() {
BinaryPredicateOperator binary1 = new BinaryPredicateOperator(BinaryType.EQ,
new ColumnRefOperator(1, INT, "id", true), ConstantOperator.createInt(1));
BinaryPredicateOperator binary2 = new BinaryPredicateOperator(BinaryType.EQ,
new ColumnRefOperator(2, INT, "id2", true), ConstantOperator.createInt(1));
CompoundPredicateOperator compound =
new CompoundPredicateOperator(CompoundPredicateOperator.CompoundType.AND, binary1, binary2);
{
ScalarOperator newOperator = shuttle.visitCompoundPredicate(compound, null);
assertEquals(compound, newOperator);
}
{
ScalarOperator newOperator = shuttle2.visitCompoundPredicate(compound, null);
assertEquals(compound, newOperator);
}
}
|
@Override
public boolean onTouchEvent(@NonNull MotionEvent me) {
if (getKeyboard() == null) {
// I mean, if there isn't any keyboard I'm handling, what's the point?
return false;
}
if (areTouchesDisabled(me)) {
mGestureTypingPathShouldBeDrawn = false;
return super.onTouchEvent(me);
}
final int action = me.getActionMasked();
PointerTracker pointerTracker = getPointerTracker(me);
mGestureTypingPathShouldBeDrawn = pointerTracker.isInGestureTyping();
mGestureDrawingHelper.handleTouchEvent(me);
// Gesture detector must be enabled only when mini-keyboard is not
// on the screen.
if (!mMiniKeyboardPopup.isShowing()
&& !mGestureTypingPathShouldBeDrawn
&& mGestureDetector.onTouchEvent(me)) {
Logger.d(TAG, "Gesture detected!");
mKeyPressTimingHandler.cancelAllMessages();
dismissAllKeyPreviews();
return true;
}
if (action == MotionEvent.ACTION_DOWN) {
mGestureTypingPathShouldBeDrawn = false;
mFirstTouchPoint.x = (int) me.getX();
mFirstTouchPoint.y = (int) me.getY();
mIsFirstDownEventInsideSpaceBar =
mSpaceBarKey != null && mSpaceBarKey.isInside(mFirstTouchPoint.x, mFirstTouchPoint.y);
} else if (action != MotionEvent.ACTION_MOVE) {
mGestureTypingPathShouldBeDrawn = false;
}
// If the motion event is above the keyboard and it's a MOVE event
// coming even before the first MOVE event into the extension area
if (!mIsFirstDownEventInsideSpaceBar
&& me.getY() < mExtensionKeyboardYActivationPoint
&& !mMiniKeyboardPopup.isShowing()
&& !mExtensionVisible
&& action == MotionEvent.ACTION_MOVE) {
if (mExtensionKeyboardAreaEntranceTime <= 0) {
mExtensionKeyboardAreaEntranceTime = SystemClock.uptimeMillis();
}
if (SystemClock.uptimeMillis() - mExtensionKeyboardAreaEntranceTime
> DELAY_BEFORE_POPPING_UP_EXTENSION_KBD) {
KeyboardExtension extKbd = ((ExternalAnyKeyboard) getKeyboard()).getExtensionLayout();
if (extKbd == null || extKbd.getKeyboardResId() == AddOn.INVALID_RES_ID) {
Logger.i(TAG, "No extension keyboard");
return super.onTouchEvent(me);
} else {
// telling the main keyboard that the last touch was
// canceled
MotionEvent cancel =
MotionEvent.obtain(
me.getDownTime(),
me.getEventTime(),
MotionEvent.ACTION_CANCEL,
me.getX(),
me.getY(),
0);
super.onTouchEvent(cancel);
cancel.recycle();
mExtensionVisible = true;
dismissAllKeyPreviews();
if (mExtensionKey == null) {
mExtensionKey = new AnyKey(new Row(getKeyboard()), getThemedKeyboardDimens());
mExtensionKey.edgeFlags = 0;
mExtensionKey.height = 1;
mExtensionKey.width = 1;
mExtensionKey.popupResId = extKbd.getKeyboardResId();
mExtensionKey.externalResourcePopupLayout = mExtensionKey.popupResId != 0;
mExtensionKey.x = getWidth() / 2;
mExtensionKey.y = mExtensionKeyboardPopupOffset;
}
// so the popup will be right above your finger.
mExtensionKey.x = ((int) me.getX());
onLongPress(extKbd, mExtensionKey, mIsStickyExtensionKeyboard, getPointerTracker(me));
return true;
}
} else {
return super.onTouchEvent(me);
}
} else if (mExtensionVisible && me.getY() > mExtensionKeyboardYDismissPoint) {
// closing the popup
dismissPopupKeyboard();
return true;
} else {
return super.onTouchEvent(me);
}
}
|
@Test
public void testKeyClickHappyPath() {
AnyKeyboard.AnyKey key = findKey('a');
int primaryCode = key.getCodeAtIndex(0, false);
Mockito.verifyZeroInteractions(mMockKeyboardListener);
MotionEvent motionEvent =
MotionEvent.obtain(
100,
100,
MotionEvent.ACTION_DOWN,
Keyboard.Key.getCenterX(key),
Keyboard.Key.getCenterY(key),
0);
mViewUnderTest.onTouchEvent(motionEvent);
motionEvent.recycle();
Mockito.verify(mMockKeyboardListener).onPress(primaryCode);
Mockito.verify(mMockKeyboardListener).onFirstDownKey(primaryCode);
Mockito.verify(mMockKeyboardListener)
.onGestureTypingInputStart(
eq(Keyboard.Key.getCenterX(key)),
eq(Keyboard.Key.getCenterY(key)),
same(key),
anyLong());
Mockito.verifyNoMoreInteractions(mMockKeyboardListener);
Mockito.reset(mMockKeyboardListener);
motionEvent =
MotionEvent.obtain(
100,
110,
MotionEvent.ACTION_UP,
Keyboard.Key.getCenterX(key),
Keyboard.Key.getCenterY(key),
0);
mViewUnderTest.onTouchEvent(motionEvent);
motionEvent.recycle();
InOrder inOrder = Mockito.inOrder(mMockKeyboardListener);
inOrder
.verify(mMockKeyboardListener)
.onKey(eq(primaryCode), same(key), eq(0), any(int[].class), eq(true));
inOrder.verify(mMockKeyboardListener).onRelease(primaryCode);
inOrder.verifyNoMoreInteractions();
}
|
public static void main(String[] args) {
ProcessEntryPoint entryPoint = ProcessEntryPoint.createForArguments(args);
Props props = entryPoint.getProps();
new CeProcessLogging().configure(props);
CeServer server = new CeServer(
new ComputeEngineImpl(props, new ComputeEngineContainerImpl()),
new MinimumViableSystem(),
new CeSecurityManager(new PluginSecurityManager(), props));
entryPoint.launch(server);
}
|
@Test
public void staticMain_withoutAnyArguments_expectException() {
String[] emptyArray = {};
assertThatThrownBy(() -> CeServer.main(emptyArray))
.hasMessage("Only a single command-line argument is accepted (absolute path to configuration file)");
}
|
@Override
public List<SchemaTableName> listTables(ConnectorSession session, String schemaNameOrNull)
{
Set<String> schemaNames;
if (schemaNameOrNull != null) {
schemaNames = ImmutableSet.of(schemaNameOrNull);
}
else {
schemaNames = ImmutableSet.copyOf(listSchemaNames(session));
}
ImmutableList.Builder<SchemaTableName> schemaTableNames = ImmutableList.builder();
for (String schema : schemaNames) {
if (JMX_SCHEMA_NAME.equals(schema)) {
return listJmxTables();
}
else if (HISTORY_SCHEMA_NAME.equals(schema)) {
return jmxHistoricalData.getTables().stream()
.map(tableName -> new SchemaTableName(JmxMetadata.HISTORY_SCHEMA_NAME, tableName))
.collect(toList());
}
}
return schemaTableNames.build();
}
|
@Test
public void testListTables()
{
assertTrue(metadata.listTables(SESSION, JMX_SCHEMA_NAME).contains(RUNTIME_TABLE));
assertTrue(metadata.listTables(SESSION, HISTORY_SCHEMA_NAME).contains(RUNTIME_HISTORY_TABLE));
}
|
@Deprecated
@Override
public KStream<K, V> through(final String topic) {
return through(topic, Produced.with(keySerde, valueSerde, null));
}
|
@Deprecated // specifically testing the deprecated variant
@Test
public void shouldNotAllowNullTopicOnThroughWithProduced() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.through(null, Produced.as("through")));
assertThat(exception.getMessage(), equalTo("topic can't be null"));
}
|
public void close() {
try {
httpTlsClient.close();
} catch (Exception ignore) {
// Ignore
}
try {
httpNonTlsClient.close();
} catch (Exception ignore) {
// Ignore
}
if (vertx != null && ownedVertx) {
vertx.close();
}
}
|
@Test
public void shouldFailToStartClientRequestWithInvalidKeystorePassword() throws Exception {
ksqlClient.close();
stopServer();
// Given:
startServerWithTls();
// When:
final KsqlRestClientException e = assertThrows(
KsqlRestClientException.class,
() -> startClientWithTlsAndTruststorePassword("iquwhduiqhwd")
);
// Then:
assertThat(e.getCause().getMessage(), containsString(
"java.io.IOException: Keystore was tampered with, or password was incorrect"
));
}
|
@Override public void close() {
// toArray is synchronized while iterators are not
pendingScopes.expungeStaleEntries();
for (Map.Entry<Scope, CallerStackTrace> entry : pendingScopes) {
CallerStackTrace caller = entry.getValue();
if (!caller.closed) {
throwCallerError(caller);
}
}
}
|
@Test void decorator_close_afterCorrectUsage() {
try (Scope scope = currentTraceContext.newScope(null)) {
try (Scope scope2 = currentTraceContext.newScope(context)) {
}
}
decorator.close(); // doesn't error
}
|
@Udf(description = "Returns first substring of the input that matches the given regex pattern")
public String regexpExtract(
@UdfParameter(description = "The regex pattern") final String pattern,
@UdfParameter(description = "The input string to apply regex on") final String input
) {
return regexpExtract(pattern, input, 0);
}
|
@Test
public void shouldReturnSubstringCapturedByGroupNumber() {
assertEquals(udf.regexpExtract("(.*) (.*)", "test string", 1), "test");
assertEquals(udf.regexpExtract("(.*) (.*)", "test string", 2), "string");
}
|
@Override
public <U> ParSeqBasedCompletionStage<U> thenApplyAsync(Function<? super T, ? extends U> fn, Executor executor)
{
return nextStageByComposingTask(_task.flatMap("thenApplyAsync", (t) -> Task.blocking(() -> fn.apply(t), executor)));
}
|
@Test
public void testThenApplyAsync() throws Exception
{
CompletionStage<String> completionStage = createTestStage(TESTVALUE1);
CountDownLatch waitLatch = new CountDownLatch(1);
completionStage.thenApplyAsync(r -> {
assertEquals(THREAD_NAME_VALUE, Thread.currentThread().getName());
waitLatch.countDown();
return "";
}, _mockExecutor);
finish(completionStage);
waitLatch.await(1000, TimeUnit.MILLISECONDS);
}
|
@Override
public String toSql(int depth) {
if (depth >= MAX_NESTING_DEPTH) {
return "struct<...>";
}
ArrayList<String> fieldsSql = Lists.newArrayList();
for (StructField f : fields) {
fieldsSql.add(f.toSql(depth + 1, true));
}
return String.format("struct<%s>", Joiner.on(", ").join(fieldsSql));
}
|
@Test
public void testUnnamedStruct() {
StructType type = new StructType(Lists.newArrayList(Type.INT, Type.DATETIME));
Assert.assertEquals("struct<col1 int(11), col2 datetime>", type.toSql());
}
|
public Stat set(Path path, byte[] data) {
return set(path, data, -1);
}
|
@Test
public void require_that_write_fails_if_data_is_more_than_jute_max_buffer() {
CuratorConfig.Builder builder = new CuratorConfig.Builder();
builder.server(createZKBuilder(localhost, port1));
try (Curator curator = createCurator(new CuratorConfig(builder), 1)) {
try {
curator.set(Path.createRoot().append("foo"), Utf8.toBytes("more than 1 byte"));
fail("Did not fail when writing more than juteMaxBuffer bytes");
} catch (IllegalArgumentException e) {
assertEquals("Cannot not set data at /foo, 16 bytes is too much, max number of bytes allowed per node is 1",
e.getMessage());
}
}
}
|
public static String delFirst(String regex, CharSequence content) {
if (StrUtil.hasBlank(regex, content)) {
return StrUtil.str(content);
}
final Pattern pattern = PatternPool.get(regex, Pattern.DOTALL);
return delFirst(pattern, content);
}
|
@Test
public void delFirstTest() {
// 删除第一个匹配到的内容
final String resultDelFirst = ReUtil.delFirst("(\\w)aa(\\w)", content);
assertEquals("ZZbbbccc中文1234", resultDelFirst);
}
|
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n) {
return invoke(n, BigDecimal.ZERO);
}
|
@Test
void invokeRoundingUp() {
FunctionTestUtil.assertResult(roundHalfUpFunction.invoke(BigDecimal.valueOf(10.27)), BigDecimal.valueOf(10));
FunctionTestUtil.assertResult(roundHalfUpFunction.invoke(BigDecimal.valueOf(10.27), BigDecimal.ONE),
BigDecimal.valueOf(10.3));
}
|
public static ProducingResult createProducingResult(
ResolvedSchema inputSchema, @Nullable Schema declaredSchema) {
// no schema has been declared by the user,
// the schema will be entirely derived from the input
if (declaredSchema == null) {
// go through data type to erase time attributes
final DataType physicalDataType = inputSchema.toSourceRowDataType();
final Schema schema = Schema.newBuilder().fromRowDataType(physicalDataType).build();
return new ProducingResult(null, schema, null);
}
final List<UnresolvedColumn> declaredColumns = declaredSchema.getColumns();
// the declared schema does not contain physical information,
// thus, it only replaces physical columns with metadata rowtime or adds a primary key
if (declaredColumns.stream().noneMatch(SchemaTranslator::isPhysical)) {
// go through data type to erase time attributes
final DataType sourceDataType = inputSchema.toSourceRowDataType();
final DataType physicalDataType =
patchDataTypeWithoutMetadataRowtime(sourceDataType, declaredColumns);
final Schema.Builder builder = Schema.newBuilder();
builder.fromRowDataType(physicalDataType);
builder.fromSchema(declaredSchema);
return new ProducingResult(null, builder.build(), null);
}
return new ProducingResult(null, declaredSchema, null);
}
|
@Test
void testOutputToPartialSchema() {
final ResolvedSchema tableSchema =
ResolvedSchema.of(
Column.physical("id", BIGINT().notNull()),
Column.physical("name", STRING()),
Column.metadata("rowtime", TIMESTAMP_LTZ(3), null, false));
final ProducingResult result =
SchemaTranslator.createProducingResult(
tableSchema,
Schema.newBuilder()
.columnByExpression("computed", "f1 + 42")
.columnByMetadata("rowtime", TIMESTAMP_LTZ(3))
.primaryKey("id")
.build());
assertThat(result.getSchema())
.isEqualTo(
Schema.newBuilder()
.column("id", BIGINT().notNull())
.column("name", STRING())
.columnByExpression("computed", "f1 + 42")
.columnByMetadata("rowtime", TIMESTAMP_LTZ(3)) // becomes metadata
.primaryKey("id")
.build());
}
|
@Override
public void startScheduling() {
checkIdleSlotTimeout();
state.as(Created.class)
.orElseThrow(
() ->
new IllegalStateException(
"Can only start scheduling when being in Created state."))
.startScheduling();
}
|
@Test
void testStartSchedulingSetsResourceRequirementsForDefaultMode() throws Exception {
final JobGraph jobGraph = createJobGraph();
final DefaultDeclarativeSlotPool declarativeSlotPool =
createDeclarativeSlotPool(jobGraph.getJobID());
final AdaptiveScheduler scheduler =
new AdaptiveSchedulerBuilder(
jobGraph, mainThreadExecutor, EXECUTOR_RESOURCE.getExecutor())
.setDeclarativeSlotPool(declarativeSlotPool)
.build();
scheduler.startScheduling();
assertThat(declarativeSlotPool.getResourceRequirements())
.contains(ResourceRequirement.create(ResourceProfile.UNKNOWN, PARALLELISM));
}
|
public static boolean isValidCard(String idCard) {
if (StrUtil.isBlank(idCard)) {
return false;
}
//idCard = idCard.trim();
int length = idCard.length();
switch (length) {
case 18:// 18位身份证
return isValidCard18(idCard);
case 15:// 15位身份证
return isValidCard15(idCard);
case 10: {// 10位身份证,港澳台地区
String[] cardVal = isValidCard10(idCard);
return null != cardVal && "true".equals(cardVal[2]);
}
default:
return false;
}
}
|
@Test
public void issueI88YKMTest() {
assertTrue(IdcardUtil.isValidCard("111111111111111"));
}
|
public static List<String> toPartitionValues(String partitionName) {
// mimics Warehouse.makeValsFromName
ImmutableList.Builder<String> resultBuilder = ImmutableList.builder();
Iterable<String> pieces = Splitter.on("/").split(partitionName);
for (String piece : pieces) {
int idx = piece.indexOf("=");
if (idx == -1) {
break;
}
resultBuilder.add(unescapePathName(piece.substring(idx + 1)));
}
return resultBuilder.build();
}
|
@Test
public void testToPartitionValues() {
Assert.assertEquals(Lists.newArrayList("1", "2", "3"), toPartitionValues("a=1/b=2/c=3"));
Assert.assertEquals(Lists.newArrayList("1", "2=1"), toPartitionValues("a=1/b=2=1"));
Assert.assertEquals(Lists.newArrayList("1", "2=null"), toPartitionValues("a=1/b=2=null"));
Assert.assertEquals(Lists.newArrayList("1", "2=null", "1"), toPartitionValues("a=1/b=2=null/3=1"));
Assert.assertEquals(Lists.newArrayList("1", "2=null", ""), toPartitionValues("a=1/b=2=null/3="));
Assert.assertEquals(Lists.newArrayList("1", "", "1"), toPartitionValues("a=1/b=/3=1"));
}
|
public static NetworkInterface[] filterBySubnet(final InetAddress address, final int subnetPrefix)
throws SocketException
{
return filterBySubnet(NetworkInterfaceShim.DEFAULT, address, subnetPrefix);
}
|
@Test
void shouldFilterBySubnetAndFindOneIpV6Result() throws Exception
{
final NetworkInterfaceStub stub = new NetworkInterfaceStub();
final NetworkInterface ifc1 = stub.add("fe80:0:0:0001:0002:0:0:1/80");
stub.add("fe80:0:0:0002:0003:0:0:1/80");
final NetworkInterface[] filteredBySubnet = filterBySubnet(stub, getByName("fe80:0:0:0001:0002:0:0:0"), 80);
assertEquals(1, filteredBySubnet.length);
assertEquals(ifc1, filteredBySubnet[0]);
}
|
public StrJoiner merge(StrJoiner strJoiner){
if(null != strJoiner && null != strJoiner.appendable){
final String otherStr = strJoiner.toString();
if(strJoiner.wrapElement){
this.append(otherStr);
}else{
this.append(otherStr, this.prefix.length(), otherStr.length());
}
}
return this;
}
|
@Test
public void mergeTest(){
StrJoiner joiner1 = StrJoiner.of(",", "[", "]");
joiner1.append("123");
StrJoiner joiner2 = StrJoiner.of(",", "[", "]");
joiner1.append("456");
joiner1.append("789");
final StrJoiner merge = joiner1.merge(joiner2);
assertEquals("[123,456,789]", merge.toString());
}
|
public void init() throws ServerException {
if (status != Status.UNDEF) {
throw new IllegalStateException("Server already initialized");
}
status = Status.BOOTING;
verifyDir(homeDir);
verifyDir(tempDir);
Properties serverInfo = new Properties();
try {
InputStream is = getResource(name + ".properties");
serverInfo.load(is);
is.close();
} catch (IOException ex) {
throw new RuntimeException("Could not load server information file: " + name + ".properties");
}
initLog();
log.info("++++++++++++++++++++++++++++++++++++++++++++++++++++++");
log.info("Server [{}] starting", name);
log.info(" Built information:");
log.info(" Version : {}", serverInfo.getProperty(name + ".version", "undef"));
log.info(" Source Repository : {}", serverInfo.getProperty(name + ".source.repository", "undef"));
log.info(" Source Revision : {}", serverInfo.getProperty(name + ".source.revision", "undef"));
log.info(" Built by : {}", serverInfo.getProperty(name + ".build.username", "undef"));
log.info(" Built timestamp : {}", serverInfo.getProperty(name + ".build.timestamp", "undef"));
log.info(" Runtime information:");
log.info(" Home dir: {}", homeDir);
log.info(" Config dir: {}", (config == null) ? configDir : "-");
log.info(" Log dir: {}", logDir);
log.info(" Temp dir: {}", tempDir);
initConfig();
log.debug("Loading services");
List<Service> list = loadServices();
try {
log.debug("Initializing services");
initServices(list);
log.info("Services initialized");
} catch (ServerException ex) {
log.error("Services initialization failure, destroying initialized services");
destroyServices();
throw ex;
}
Status status = Status.valueOf(getConfig().get(getPrefixedName(CONF_STARTUP_STATUS), Status.NORMAL.toString()));
setStatus(status);
log.info("Server [{}] started!, status [{}]", name, status);
}
|
@Test(expected = IllegalStateException.class)
@TestDir
public void illegalState4() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Server server = new Server("server", dir, dir, dir, dir, new Configuration(false));
server.init();
server.init();
}
|
@Override
public Num calculate(BarSeries series, Position position) {
Num bars = numberOfBars.calculate(series, position);
// If a simple division was used (grossreturn/bars), compounding would not be
// considered, leading to inaccuracies in the calculation.
// Therefore we need to use "pow" to accurately capture the compounding effect.
return bars.isZero() ? series.one() : grossReturn.calculate(series, position).pow(series.one().dividedBy(bars));
}
|
@Test
public void calculateWithOnePosition() {
series = new MockBarSeries(numFunction, 100, 105);
Position position = new Position(Trade.buyAt(0, series), Trade.sellAt(1, series));
AnalysisCriterion average = getCriterion();
assertNumEquals(numOf(105d / 100).pow(numOf(0.5)), average.calculate(series, position));
}
|
@Override
public boolean isBroadcastPoint(Topology topology, ConnectPoint connectPoint) {
checkNotNull(connectPoint, CONNECTION_POINT_NULL);
return defaultTopology(topology).isBroadcastPoint(connectPoint);
}
|
@Test
public void testIsBroadcastPoint() {
VirtualNetwork virtualNetwork = setupVirtualNetworkTopology();
TopologyService topologyService = manager.get(virtualNetwork.id(), TopologyService.class);
Topology topology = topologyService.currentTopology();
VirtualDevice srcVirtualDevice = getVirtualDevice(virtualNetwork.id(), DID1);
ConnectPoint cp = new ConnectPoint(srcVirtualDevice.id(), PortNumber.portNumber(1));
// test the isBroadcastPoint() method.
Boolean isBroadcastPoint = topologyService.isBroadcastPoint(topology, cp);
assertTrue("The connect point should be a broadcast point.", isBroadcastPoint);
}
|
public void restore(final List<Pair<byte[], byte[]>> backupCommands) {
// Delete the command topic
deleteCommandTopicIfExists();
// Create the command topic
KsqlInternalTopicUtils.ensureTopic(commandTopicName, serverConfig, topicClient);
// Restore the commands
restoreCommandTopic(backupCommands);
}
|
@Test
public void shouldThrowWhenRestoreExecutionFails() throws Exception {
// Given:
when(topicClient.isTopicExists(COMMAND_TOPIC_NAME)).thenReturn(false);
doThrow(new RuntimeException()).when(future2).get();
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> restoreCommandTopic.restore(BACKUP_COMMANDS));
// Then:
assertThat(e.getMessage(),
containsString(String.format("Failed restoring command (line 2): %s",
new String(RECORD_2.key(), StandardCharsets.UTF_8))));
verifyCreateCommandTopic();
final InOrder inOrder = inOrder(kafkaProducer, future1, future2);
inOrder.verify(kafkaProducer).initTransactions();
inOrder.verify(kafkaProducer).beginTransaction();
inOrder.verify(kafkaProducer).send(RECORD_1);
inOrder.verify(future1).get();
inOrder.verify(kafkaProducer).commitTransaction();
inOrder.verify(kafkaProducer).beginTransaction();
inOrder.verify(kafkaProducer).send(RECORD_2);
inOrder.verify(future2).get();
inOrder.verify(kafkaProducer).abortTransaction();
inOrder.verify(kafkaProducer).close();
verifyNoMoreInteractions(kafkaProducer, future1, future2);
verifyNoMoreInteractions(future3);
}
|
@VisibleForTesting
public void validateConfigKeyUnique(Long id, String key) {
ConfigDO config = configMapper.selectByKey(key);
if (config == null) {
return;
}
// 如果 id 为空,说明不用比较是否为相同 id 的参数配置
if (id == null) {
throw exception(CONFIG_KEY_DUPLICATE);
}
if (!config.getId().equals(id)) {
throw exception(CONFIG_KEY_DUPLICATE);
}
}
|
@Test
public void testValidateConfigKeyUnique_keyDuplicateForCreate() {
// 准备参数
String key = randomString();
// mock 数据
configMapper.insert(randomConfigDO(o -> o.setConfigKey(key)));
// 调用,校验异常
assertServiceException(() -> configService.validateConfigKeyUnique(null, key),
CONFIG_KEY_DUPLICATE);
}
|
public static byte[] compress(final byte[] src) throws IOException {
byte[] result;
ByteArrayOutputStream bos = new ByteArrayOutputStream(src.length);
GZIPOutputStream gos = new GZIPOutputStream(bos);
try {
gos.write(src);
gos.finish();
result = bos.toByteArray();
} finally {
IOUtil.close(bos, gos);
}
return result;
}
|
@Test
@DisabledOnJre({JRE.JAVA_8, JRE.JAVA_11})
public void testCompress2() throws IOException {
Assertions.assertArrayEquals(compressedBytes2,
CompressUtil.compress(originBytes));
}
|
Future<RecordMetadata> send(final ProducerRecord<byte[], byte[]> record,
final Callback callback) {
maybeBeginTransaction();
try {
return producer.send(record, callback);
} catch (final KafkaException uncaughtException) {
if (isRecoverable(uncaughtException)) {
// producer.send() call may throw a KafkaException which wraps a FencedException,
// in this case we should throw its wrapped inner cause so that it can be
// captured and re-wrapped as TaskMigratedException
throw new TaskMigratedException(
formatException("Producer got fenced trying to send a record"),
uncaughtException.getCause()
);
} else {
throw new StreamsException(
formatException(String.format("Error encountered trying to send record to topic %s", record.topic())),
uncaughtException
);
}
}
}
|
@Test
public void shouldThrowStreamsExceptionOnSendError() {
nonEosMockProducer.sendException = new KafkaException("KABOOM!");
final StreamsException thrown = assertThrows(
StreamsException.class,
() -> nonEosStreamsProducer.send(record, null)
);
assertThat(thrown.getCause(), is(nonEosMockProducer.sendException));
assertThat(thrown.getMessage(), is("Error encountered trying to send record to topic topic [test]"));
}
|
@Override
public abstract String toString();
|
@Test
public void testFormattingDiffsUsing_toString() {
// The toString behaviour should be the same as the wrapped correspondence.
assertThat(LENGTHS_WITH_DIFF.toString()).isEqualTo("has a length of");
}
|
public String toHtml(@Nullable RuleDto.Format descriptionFormat, RuleDescriptionSectionDto ruleDescriptionSectionDto) {
if (MARKDOWN.equals(descriptionFormat)) {
return Markdown.convertToHtml(ruleDescriptionSectionDto.getContent());
}
return ruleDescriptionSectionDto.getContent();
}
|
@Test
public void toHtmlWithMarkdownFormat() {
String result = ruleDescriptionFormatter.toHtml(MARKDOWN, MARKDOWN_SECTION);
assertThat(result).isEqualTo("<strong>md</strong> <code>description</code>");
}
|
public void logAndProcessFailure(
String computationId,
ExecutableWork executableWork,
Throwable t,
Consumer<Work> onInvalidWork) {
if (shouldRetryLocally(computationId, executableWork.work(), t)) {
// Try again after some delay and at the end of the queue to avoid a tight loop.
executeWithDelay(retryLocallyDelayMs, executableWork);
} else {
// Consider the item invalid. It will eventually be retried by Windmill if it still needs to
// be processed.
onInvalidWork.accept(executableWork.work());
}
}
|
@Test
public void logAndProcessFailure_doesNotRetryWhenFailureReporterMarksAsNonRetryable() {
Set<Work> executedWork = new HashSet<>();
ExecutableWork work = createWork(executedWork::add);
WorkFailureProcessor workFailureProcessor =
createWorkFailureProcessor(streamingApplianceFailureReporter(true));
Set<Work> invalidWork = new HashSet<>();
workFailureProcessor.logAndProcessFailure(
DEFAULT_COMPUTATION_ID, work, new RuntimeException(), invalidWork::add);
assertThat(executedWork).isEmpty();
assertThat(invalidWork).containsExactly(work.work());
}
|
@Override
public CompletableFuture<Map<MessageQueue, Long>> invokeBrokerToResetOffset(String address,
ResetOffsetRequestHeader requestHeader, long timeoutMillis) {
CompletableFuture<Map<MessageQueue, Long>> future = new CompletableFuture<>();
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.INVOKE_BROKER_TO_RESET_OFFSET, requestHeader);
remotingClient.invoke(address, request, timeoutMillis).thenAccept(response -> {
if (response.getCode() == ResponseCode.SUCCESS && null != response.getBody()) {
Map<MessageQueue, Long> offsetTable = ResetOffsetBody.decode(response.getBody(), ResetOffsetBody.class).getOffsetTable();
future.complete(offsetTable);
log.info("Invoke broker to reset offset success. address:{}, header:{}, offsetTable:{}",
address, requestHeader, offsetTable);
} else {
log.warn("invokeBrokerToResetOffset getResponseCommand failed, {} {}, header={}", response.getCode(), response.getRemark(), requestHeader);
future.completeExceptionally(new MQClientException(response.getCode(), response.getRemark()));
}
});
return future;
}
|
@Test
public void assertInvokeBrokerToResetOffsetWithError() {
setResponseError();
ResetOffsetRequestHeader requestHeader = mock(ResetOffsetRequestHeader.class);
CompletableFuture<Map<MessageQueue, Long>> actual = mqClientAdminImpl.invokeBrokerToResetOffset(defaultBrokerAddr, requestHeader, defaultTimeout);
Throwable thrown = assertThrows(ExecutionException.class, actual::get);
assertTrue(thrown.getCause() instanceof MQClientException);
MQClientException mqException = (MQClientException) thrown.getCause();
assertEquals(ResponseCode.SYSTEM_ERROR, mqException.getResponseCode());
assertTrue(mqException.getMessage().contains("CODE: 1 DESC: null"));
}
|
@Override
public DescriptiveUrl toDownloadUrl(final Path file, final Sharee sharee, final Object options, final PasswordCallback callback) throws BackgroundException {
final Host bookmark = session.getHost();
final StringBuilder request = new StringBuilder(String.format("https://%s%s/apps/files_sharing/api/v1/shares?path=%s&shareType=%d&shareWith=%s",
bookmark.getHostname(), new NextcloudHomeFeature(bookmark).find(NextcloudHomeFeature.Context.ocs).getAbsolute(),
URIEncoder.encode(PathRelativizer.relativize(NextcloudHomeFeature.Context.files.home(bookmark).find().getAbsolute(), file.getAbsolute())),
Sharee.world.equals(sharee) ? SHARE_TYPE_PUBLIC_LINK : SHARE_TYPE_USER,
Sharee.world.equals(sharee) ? StringUtils.EMPTY : sharee.getIdentifier()
));
final Credentials password = callback.prompt(bookmark,
LocaleFactory.localizedString("Passphrase", "Cryptomator"),
MessageFormat.format(LocaleFactory.localizedString("Create a passphrase required to access {0}", "Credentials"), file.getName()),
new LoginOptions().anonymous(true).keychain(false).icon(bookmark.getProtocol().disk()));
if(password.isPasswordAuthentication()) {
request.append(String.format("&password=%s", URIEncoder.encode(password.getPassword())));
}
final HttpPost resource = new HttpPost(request.toString());
resource.setHeader("OCS-APIRequest", "true");
resource.setHeader(HttpHeaders.ACCEPT, ContentType.APPLICATION_XML.getMimeType());
try {
return session.getClient().execute(resource, new OcsDownloadShareResponseHandler());
}
catch(HttpResponseException e) {
throw new DefaultHttpResponseExceptionMappingService().map(e);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map(e);
}
}
|
@Test
public void testToDownloadUrlPasswordTooShort() throws Exception {
final Path home = new NextcloudHomeFeature(session.getHost()).find();
final Path folder = new DAVDirectoryFeature(session, new NextcloudAttributesFinderFeature(session)).mkdir(new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
try {
new NextcloudShareFeature(session).toDownloadUrl(folder, Share.Sharee.world, null, new DisabledPasswordCallback() {
@Override
public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) {
return new Credentials(null, new AlphanumericRandomStringService(5).random());
}
});
fail();
}
catch(AccessDeniedException e) {
assertEquals("Password needs to be at least 10 characters long. Please contact your web hosting service provider for assistance.", e.getDetail());
}
new DAVDeleteFeature(session).delete(Collections.singletonList(folder), new DisabledPasswordCallback(), new Delete.DisabledCallback());
}
|
public void makeStop() {
if (!started.get()) {
return;
}
this.stopped = true;
log.info("makestop thread[{}] ", this.getServiceName());
}
|
@Test
public void testMakeStop() {
ServiceThread testServiceThread = startTestServiceThread();
testServiceThread.makeStop();
assertEquals(true, testServiceThread.isStopped());
}
|
public void menuItemSelected(MenuItem menuItem) {
dispatchAction(new MenuAction(menuItem));
if (menuItem == MenuItem.COMPANY) {
dispatchAction(new ContentAction(Content.COMPANY));
} else {
dispatchAction(new ContentAction(Content.PRODUCTS));
}
}
|
@Test
void testMenuItemSelected() {
final var dispatcher = Dispatcher.getInstance();
final var store = mock(Store.class);
dispatcher.registerStore(store);
dispatcher.menuItemSelected(MenuItem.HOME);
dispatcher.menuItemSelected(MenuItem.COMPANY);
// We expect 4 events, 2 menu selections and 2 content change actions
final var actionCaptor = ArgumentCaptor.forClass(Action.class);
verify(store, times(4)).onAction(actionCaptor.capture());
verifyNoMoreInteractions(store);
final var actions = actionCaptor.getAllValues();
final var menuActions = actions.stream()
.filter(a -> a.getType().equals(ActionType.MENU_ITEM_SELECTED))
.map(a -> (MenuAction) a)
.toList();
final var contentActions = actions.stream()
.filter(a -> a.getType().equals(ActionType.CONTENT_CHANGED))
.map(a -> (ContentAction) a)
.toList();
assertEquals(2, menuActions.size());
assertEquals(1, menuActions.stream().map(MenuAction::getMenuItem).filter(MenuItem.HOME::equals)
.count());
assertEquals(1, menuActions.stream().map(MenuAction::getMenuItem)
.filter(MenuItem.COMPANY::equals).count());
assertEquals(2, contentActions.size());
assertEquals(1, contentActions.stream().map(ContentAction::getContent)
.filter(Content.PRODUCTS::equals).count());
assertEquals(1, contentActions.stream().map(ContentAction::getContent)
.filter(Content.COMPANY::equals).count());
}
|
public boolean contains(short version) {
return version >= lowest && version <= highest;
}
|
@Test
public void testContains() {
assertTrue(newVersions(2, 3).contains((short) 3));
assertTrue(newVersions(2, 3).contains((short) 2));
assertFalse(newVersions(0, 1).contains((short) 2));
assertTrue(newVersions(0, Short.MAX_VALUE).contains((short) 100));
assertFalse(newVersions(2, Short.MAX_VALUE).contains((short) 0));
assertTrue(newVersions(2, 3).contains(newVersions(2, 3)));
assertTrue(newVersions(2, 3).contains(newVersions(2, 2)));
assertFalse(newVersions(2, 3).contains(newVersions(2, 4)));
assertTrue(newVersions(2, 3).contains(Versions.NONE));
assertTrue(Versions.ALL.contains(newVersions(1, 2)));
}
|
@Override
public void run() {
try {
// We kill containers until the kernel reports the OOM situation resolved
// Note: If the kernel has a delay this may kill more than necessary
while (true) {
String status = cgroups.getCGroupParam(
CGroupsHandler.CGroupController.MEMORY,
"",
CGROUP_PARAM_MEMORY_OOM_CONTROL);
if (!status.contains(CGroupsHandler.UNDER_OOM)) {
break;
}
boolean containerKilled = killContainer();
if (!containerKilled) {
// This can happen, if SIGKILL did not clean up
// non-PGID or containers or containers launched by other users
// or if a process was put to the root YARN cgroup.
throw new YarnRuntimeException(
"Could not find any containers but CGroups " +
"reserved for containers ran out of memory. " +
"I am giving up");
}
}
} catch (ResourceHandlerException ex) {
LOG.warn("Could not fetch OOM status. " +
"This is expected at shutdown. Exiting.", ex);
}
}
|
@Test
public void testBothRunningGuaranteedContainersOverLimitUponOOM()
throws Exception {
ConcurrentHashMap<ContainerId, Container> containers =
new ConcurrentHashMap<>();
Container c1 = createContainer(1, true, 1L, true);
containers.put(c1.getContainerId(), c1);
Container c2 = createContainer(2, true, 2L, true);
containers.put(c2.getContainerId(), c2);
ContainerExecutor ex = createContainerExecutor(containers);
Context context = mock(Context.class);
when(context.getContainers()).thenReturn(containers);
when(context.getContainerExecutor()).thenReturn(ex);
CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class);
when(cGroupsHandler.getCGroupParam(
CGroupsHandler.CGroupController.MEMORY,
"",
CGROUP_PARAM_MEMORY_OOM_CONTROL))
.thenReturn("under_oom 1").thenReturn("under_oom 0");
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c1.getContainerId().toString(), CGROUP_PROCS_FILE))
.thenReturn("1234").thenReturn("");
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES))
.thenReturn(getMB(11));
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES))
.thenReturn(getMB(11));
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c2.getContainerId().toString(), CGROUP_PROCS_FILE))
.thenReturn("1235").thenReturn("");
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES))
.thenReturn(getMB(11));
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES))
.thenReturn(getMB(11));
DefaultOOMHandler handler =
new DefaultOOMHandler(context, false) {
@Override
protected CGroupsHandler getCGroupsHandler() {
return cGroupsHandler;
}
};
handler.run();
verify(ex, times(1)).signalContainer(
new ContainerSignalContext.Builder()
.setPid("1235")
.setContainer(c2)
.setSignal(ContainerExecutor.Signal.KILL)
.build()
);
verify(ex, times(1)).signalContainer(any());
}
|
@JsonAnyGetter
public Map<String, Object> getExtraInfo() {
return extraInfo;
}
|
@Test
public void testRoundTripSerde() throws Exception {
WorkflowCreateRequest request =
loadObject("fixtures/api/sample-workflow-create-request.json", WorkflowCreateRequest.class);
assertEquals(
request, MAPPER.readValue(MAPPER.writeValueAsString(request), WorkflowCreateRequest.class));
assertEquals("foo", request.getExtraInfo().get("additional_meta"));
}
|
public TurnServerOptions getRoutingFor(
@Nonnull final UUID aci,
@Nonnull final Optional<InetAddress> clientAddress,
final int instanceLimit
) {
try {
return getRoutingForInner(aci, clientAddress, instanceLimit);
} catch(Exception e) {
logger.error("Failed to perform routing", e);
return new TurnServerOptions(this.configTurnRouter.getHostname(), null, this.configTurnRouter.randomUrls());
}
}
|
@Test
public void testHandlesDatacenterNotInDnsRecords() throws UnknownHostException {
when(performanceTable.getDatacentersFor(any(), any(), any(), any()))
.thenReturn(List.of("unsynced-datacenter"));
assertThat(router().getRoutingFor(aci, Optional.of(InetAddress.getByName("0.0.0.1")), 10))
.isEqualTo(optionsWithUrls(List.of()));
}
|
@Override
public AppResponse process(Flow flow, MultipleSessionsRequest request) throws FlowNotDefinedException, IOException, NoSuchAlgorithmException {
var authAppSession = appSessionService.getSession(request.getAuthSessionId());
if (!isAppSessionAuthenticated(authAppSession)) return new NokResponse();
appAuthenticator = appAuthenticatorService.findByUserAppId(authAppSession.getUserAppId());
if (!isAppAuthenticatorActivated(appAuthenticator)) return new NokResponse();
var response = checkEidasUIT();
return response.orElseGet(() -> addDetailsToResponse(new WebSessionInformationResponse()));
}
|
@Test
public void processSessionInformationReceivedActivateResponseFaultReason() throws FlowNotDefinedException, IOException, NoSuchAlgorithmException {
//given
Map<String, String> activateResponse = Map.of("status", "NOK", "faultReason", "default");
when(appAuthenticatorService.findByUserAppId(authAppSession.getUserAppId())).thenReturn(mockedAppAuthenticator);
when(dwsClient.bsnkActivate(response.get("bsn"))).thenReturn(activateResponse);
//when
AppResponse appResponse = sessionInformationReceived.process(mockedFlow, multipleSessionsRequest);
//then
assertTrue(appResponse instanceof StatusResponse);
assertEquals("ABORTED", ((StatusResponse) appResponse).getStatus());
}
|
@Override
public Long sendSingleSms(String mobile, Long userId, Integer userType,
String templateCode, Map<String, Object> templateParams) {
// 校验短信模板是否合法
SmsTemplateDO template = validateSmsTemplate(templateCode);
// 校验短信渠道是否合法
SmsChannelDO smsChannel = validateSmsChannel(template.getChannelId());
// 校验手机号码是否存在
mobile = validateMobile(mobile);
// 构建有序的模板参数。为什么放在这个位置,是提前保证模板参数的正确性,而不是到了插入发送日志
List<KeyValue<String, Object>> newTemplateParams = buildTemplateParams(template, templateParams);
// 创建发送日志。如果模板被禁用,则不发送短信,只记录日志
Boolean isSend = CommonStatusEnum.ENABLE.getStatus().equals(template.getStatus())
&& CommonStatusEnum.ENABLE.getStatus().equals(smsChannel.getStatus());
String content = smsTemplateService.formatSmsTemplateContent(template.getContent(), templateParams);
Long sendLogId = smsLogService.createSmsLog(mobile, userId, userType, isSend, template, content, templateParams);
// 发送 MQ 消息,异步执行发送短信
if (isSend) {
smsProducer.sendSmsSendMessage(sendLogId, mobile, template.getChannelId(),
template.getApiTemplateId(), newTemplateParams);
}
return sendLogId;
}
|
@Test
public void testSendSingleSms_successWhenSmsTemplateEnable() {
// 准备参数
String mobile = randomString();
Long userId = randomLongId();
Integer userType = randomEle(UserTypeEnum.values()).getValue();
String templateCode = randomString();
Map<String, Object> templateParams = MapUtil.<String, Object>builder().put("code", "1234")
.put("op", "login").build();
// mock SmsTemplateService 的方法
SmsTemplateDO template = randomPojo(SmsTemplateDO.class, o -> {
o.setStatus(CommonStatusEnum.ENABLE.getStatus());
o.setContent("验证码为{code}, 操作为{op}");
o.setParams(Lists.newArrayList("code", "op"));
});
when(smsTemplateService.getSmsTemplateByCodeFromCache(eq(templateCode))).thenReturn(template);
String content = randomString();
when(smsTemplateService.formatSmsTemplateContent(eq(template.getContent()), eq(templateParams)))
.thenReturn(content);
// mock SmsChannelService 的方法
SmsChannelDO smsChannel = randomPojo(SmsChannelDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus()));
when(smsChannelService.getSmsChannel(eq(template.getChannelId()))).thenReturn(smsChannel);
// mock SmsLogService 的方法
Long smsLogId = randomLongId();
when(smsLogService.createSmsLog(eq(mobile), eq(userId), eq(userType), eq(Boolean.TRUE), eq(template),
eq(content), eq(templateParams))).thenReturn(smsLogId);
// 调用
Long resultSmsLogId = smsSendService.sendSingleSms(mobile, userId, userType, templateCode, templateParams);
// 断言
assertEquals(smsLogId, resultSmsLogId);
// 断言调用
verify(smsProducer).sendSmsSendMessage(eq(smsLogId), eq(mobile),
eq(template.getChannelId()), eq(template.getApiTemplateId()),
eq(Lists.newArrayList(new KeyValue<>("code", "1234"), new KeyValue<>("op", "login"))));
}
|
List<Token> tokenize() throws ScanException {
List<Token> tokenList = new ArrayList<Token>();
StringBuffer buf = new StringBuffer();
while (pointer < patternLength) {
char c = pattern.charAt(pointer);
pointer++;
switch (state) {
case LITERAL_STATE:
handleLiteralState(c, tokenList, buf);
break;
case FORMAT_MODIFIER_STATE:
handleFormatModifierState(c, tokenList, buf);
break;
case OPTION_STATE:
processOption(c, tokenList, buf);
break;
case KEYWORD_STATE:
handleKeywordState(c, tokenList, buf);
break;
case RIGHT_PARENTHESIS_STATE:
handleRightParenthesisState(c, tokenList, buf);
break;
default:
}
}
// EOS
switch (state) {
case LITERAL_STATE:
addValuedToken(Token.LITERAL, buf, tokenList);
break;
case KEYWORD_STATE:
tokenList.add(new Token(Token.SIMPLE_KEYWORD, buf.toString()));
break;
case RIGHT_PARENTHESIS_STATE:
tokenList.add(Token.RIGHT_PARENTHESIS_TOKEN);
break;
case FORMAT_MODIFIER_STATE:
case OPTION_STATE:
throw new ScanException("Unexpected end of pattern string");
}
return tokenList;
}
|
@Test
public void testSimpleP2() throws ScanException {
List<Token> tl = new TokenStream("X %a %-12.550(hello %class{.4?})").tokenize();
List<Token> witness = new ArrayList<Token>();
witness.add(new Token(Token.LITERAL, "X "));
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.SIMPLE_KEYWORD, "a"));
witness.add(new Token(Token.LITERAL, " "));
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.FORMAT_MODIFIER, "-12.550"));
witness.add(Token.BARE_COMPOSITE_KEYWORD_TOKEN);
witness.add(new Token(Token.LITERAL, "hello "));
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.SIMPLE_KEYWORD, "class"));
List<String> ol = new ArrayList<String>();
ol.add(".4?");
witness.add(new Token(Token.OPTION, ol));
witness.add(Token.RIGHT_PARENTHESIS_TOKEN);
assertEquals(witness, tl);
}
|
@SuppressWarnings("unchecked")
@Override
public <S extends StateStore> S getStateStore(final String name) {
final StateStore store = stateManager.getGlobalStore(name);
return (S) getReadWriteStore(store);
}
|
@Test
public void shouldReturnGlobalOrNullStore() {
when(stateManager.getGlobalStore(GLOBAL_STORE_NAME)).thenReturn(mock(StateStore.class));
assertThat(globalContext.getStateStore(GLOBAL_STORE_NAME), new IsInstanceOf(StateStore.class));
assertNull(globalContext.getStateStore(UNKNOWN_STORE));
}
|
@Override
public void open() {
super.open();
for (String propertyKey : properties.stringPropertyNames()) {
LOGGER.debug("propertyKey: {}", propertyKey);
String[] keyValue = propertyKey.split("\\.", 2);
if (2 == keyValue.length) {
LOGGER.debug("key: {}, value: {}", keyValue[0], keyValue[1]);
Properties prefixProperties;
if (basePropertiesMap.containsKey(keyValue[0])) {
prefixProperties = basePropertiesMap.get(keyValue[0]);
} else {
prefixProperties = new Properties();
basePropertiesMap.put(keyValue[0].trim(), prefixProperties);
}
prefixProperties.put(keyValue[1].trim(), getProperty(propertyKey));
}
}
Set<String> removeKeySet = new HashSet<>();
for (String key : basePropertiesMap.keySet()) {
if (!COMMON_KEY.equals(key)) {
Properties properties = basePropertiesMap.get(key);
if (!properties.containsKey(DRIVER_KEY) || !properties.containsKey(URL_KEY)) {
LOGGER.error("{} will be ignored. {}.{} and {}.{} is mandatory.",
key, DRIVER_KEY, key, key, URL_KEY);
removeKeySet.add(key);
}
}
}
for (String key : removeKeySet) {
basePropertiesMap.remove(key);
}
LOGGER.debug("JDBC PropertiesMap: {}", basePropertiesMap);
setMaxLineResults();
setMaxRows();
//TODO(zjffdu) Set different sql splitter for different sql dialects.
this.sqlSplitter = new SqlSplitter();
}
|
@Test
void testSplitSqlQueryWithComments() throws IOException,
InterpreterException {
Properties properties = new Properties();
properties.setProperty("common.max_count", "1000");
properties.setProperty("common.max_retry", "3");
properties.setProperty("default.driver", "org.h2.Driver");
properties.setProperty("default.url", getJdbcConnection());
properties.setProperty("default.user", "");
properties.setProperty("default.password", "");
properties.setProperty("default.splitQueries", "true");
JDBCInterpreter t = new JDBCInterpreter(properties);
t.open();
String sqlQuery = "/* ; */\n" +
"-- /* comment\n" +
"--select * from test_table\n" +
"select * from test_table; /* some comment ; */\n" +
"/*\n" +
"select * from test_table;\n" +
"*/\n" +
"-- a ; b\n" +
"select * from test_table WHERE ID = ';--';\n" +
"select * from test_table WHERE ID = '/*'; -- test";
InterpreterResult interpreterResult = t.interpret(sqlQuery, context);
assertEquals(InterpreterResult.Code.SUCCESS, interpreterResult.code());
List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage();
assertEquals(3, resultMessages.size());
}
|
public DataTable subTable(int fromRow, int fromColumn) {
return subTable(fromRow, fromColumn, height(), width());
}
|
@Test
void subTable_throws_for_negative_from_column() {
DataTable table = createSimpleTable();
assertThrows(IndexOutOfBoundsException.class, () -> table.subTable(0, -1, 1, 1));
}
|
@Override
public void onEvent(Event event) {
if (EnvUtil.getStandaloneMode()) {
return;
}
if (event instanceof ClientEvent.ClientVerifyFailedEvent) {
syncToVerifyFailedServer((ClientEvent.ClientVerifyFailedEvent) event);
} else {
syncToAllServer((ClientEvent) event);
}
}
|
@Test
void testOnClientVerifyFailedEventSuccess() {
distroClientDataProcessor.onEvent(new ClientEvent.ClientVerifyFailedEvent(CLIENT_ID, MOCK_TARGET_SERVER));
verify(distroProtocol).syncToTarget(any(), eq(DataOperation.ADD), eq(MOCK_TARGET_SERVER), eq(0L));
verify(distroProtocol, never()).sync(any(), any());
}
|
@Override
public BrokerResponse executeQuery(String brokerAddress, String query)
throws PinotClientException {
try {
return executeQueryAsync(brokerAddress, query).get(_brokerReadTimeout, TimeUnit.MILLISECONDS);
} catch (Exception e) {
throw new PinotClientException(e);
}
}
|
@Test
public void validJsonResponse() {
_responseJson = _VALID_RESPONSE_JSON;
JsonAsyncHttpPinotClientTransportFactory factory = new JsonAsyncHttpPinotClientTransportFactory();
JsonAsyncHttpPinotClientTransport transport = (JsonAsyncHttpPinotClientTransport) factory.buildTransport();
BrokerResponse response =
transport.executeQuery("localhost:" + _dummyServer.getAddress().getPort(), "select * from planets");
assertFalse(response.hasExceptions());
assertEquals(response.getRequestId(), "4567");
ExecutionStats stats = response.getExecutionStats();
assertEquals(stats.getTotalDocs(), 115545);
assertEquals(stats.getNumServersResponded(), 99);
}
|
@Override
protected Integer convertFromString(final String value) throws ConversionException {
final int subtaskIndex = Integer.parseInt(value);
if (subtaskIndex >= 0) {
return subtaskIndex;
} else {
throw new ConversionException("subtaskindex must be positive, was: " + subtaskIndex);
}
}
|
@Test
void testConversionFromStringNegativeNumber() {
assertThatThrownBy(() -> subtaskIndexPathParameter.convertFromString("-2147483648"))
.isInstanceOf(ConversionException.class)
.hasMessage("subtaskindex must be positive, was: " + Integer.MIN_VALUE);
}
|
@Override
public Collection<String> getXADriverClassNames() {
return Collections.singletonList("org.opengauss.xa.PGXADataSource");
}
|
@Test
void assertGetXADriverClassName() {
assertThat(new OpenGaussXADataSourceDefinition().getXADriverClassNames(), is(Collections.singletonList("org.opengauss.xa.PGXADataSource")));
}
|
@PostMapping("confirm")
@Operation(summary = "User confirms wether emailaddress is (still) correct")
public AccountResult confirmEmail(@RequestBody DEmailConfirmRequest deprecatedRequest) {
validateEmailAddressConfirmed(deprecatedRequest);
AppSession appSession = validate(deprecatedRequest);
var request = deprecatedRequest.getRequest();
var result = accountService.confirmEmail(appSession.getAccountId(), request);
return result;
}
|
@Test
public void invalidEmailConfirm() {
DEmailConfirmRequest request = new DEmailConfirmRequest();
request.setAppSessionId("id");
DAccountException exc = assertThrows(DAccountException.class, () -> {
emailController.confirmEmail(request);
});
assertEquals(HttpStatus.BAD_REQUEST, exc.getAccountErrorMessage().getHttpStatus());
assertEquals("Missing parameters.", exc.getAccountErrorMessage().getMessage());
}
|
public static ExtensibleLoadManagerImpl get(LoadManager loadManager) {
if (!(loadManager instanceof ExtensibleLoadManagerWrapper loadManagerWrapper)) {
throw new IllegalArgumentException("The load manager should be 'ExtensibleLoadManagerWrapper'.");
}
return loadManagerWrapper.get();
}
|
@Test(timeOut = 30 * 1000)
public void testSplitBundleWithSpecificPositionAdminAPI() throws Exception {
String namespace = defaultTestNamespace;
String topic = "persistent://" + namespace + "/test-split-with-specific-position";
admin.topics().createPartitionedTopic(topic, 1024);
BundlesData bundles = admin.namespaces().getBundles(namespace);
int numBundles = bundles.getNumBundles();
var bundleRanges = bundles.getBoundaries().stream().map(Long::decode).sorted().toList();
String firstBundle = bundleRanges.get(0) + "_" + bundleRanges.get(1);
long mid = bundleRanges.get(0) + (bundleRanges.get(1) - bundleRanges.get(0)) / 2;
long splitPosition = mid + 100;
admin.namespaces().splitNamespaceBundle(namespace, firstBundle, true,
"specified_positions_divide", List.of(bundleRanges.get(0), bundleRanges.get(1), splitPosition));
BundlesData bundlesData = admin.namespaces().getBundles(namespace);
Awaitility.waitAtMost(15, TimeUnit.SECONDS)
.untilAsserted(() -> assertEquals(bundlesData.getNumBundles(), numBundles + 1));
String lowBundle = String.format("0x%08x", bundleRanges.get(0));
String midBundle = String.format("0x%08x", splitPosition);
String highBundle = String.format("0x%08x", bundleRanges.get(1));
assertTrue(bundlesData.getBoundaries().contains(lowBundle));
assertTrue(bundlesData.getBoundaries().contains(midBundle));
assertTrue(bundlesData.getBoundaries().contains(highBundle));
}
|
public static IRubyObject deep(final Ruby runtime, final Object input) {
if (input == null) {
return runtime.getNil();
}
final Class<?> cls = input.getClass();
final Rubyfier.Converter converter = CONVERTER_MAP.get(cls);
if (converter != null) {
return converter.convert(runtime, input);
}
return fallbackConvert(runtime, input, cls);
}
|
@Test
public void testDeepWithFloat() {
Object result = Rubyfier.deep(RubyUtil.RUBY, 1.0F);
assertEquals(RubyFloat.class, result.getClass());
assertEquals(1.0D, ((RubyFloat)result).getDoubleValue(), 0);
}
|
List<ParsedTerm> identifyUnknownFields(final Set<String> availableFields, final List<ParsedTerm> terms) {
final Map<String, List<ParsedTerm>> groupedByField = terms.stream()
.filter(t -> !t.isDefaultField())
.filter(term -> !SEARCHABLE_ES_FIELDS.contains(term.getRealFieldName()))
.filter(term -> !RESERVED_SETTABLE_FIELDS.contains(term.getRealFieldName()))
.filter(term -> !availableFields.contains(term.getRealFieldName()))
.distinct()
.collect(Collectors.groupingBy(ParsedTerm::getRealFieldName));
return unknownFieldsListLimiter.filterElementsContainingUsefulInformation(groupedByField);
}
|
@Test
void testDoesNotIdentifySpecialIndexFieldAsUnknown() {
final List<ParsedTerm> unknownFields = toTest.identifyUnknownFields(
Set.of("some_normal_field"),
List.of(ParsedTerm.create("_index", "graylog_0"))
);
assertTrue(unknownFields.isEmpty());
}
|
public URL getInterNodeListener(
final Function<URL, Integer> portResolver
) {
return getInterNodeListener(portResolver, LOGGER);
}
|
@Test
public void shouldThrowIfExplicitInterNodeListenerHasAutoPortAssignment() {
// Given:
final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.<String, Object>builder()
.putAll(MIN_VALID_CONFIGS)
.put(ADVERTISED_LISTENER_CONFIG, "https://unresolvable.host:0")
.build()
);
// When:
final Exception e = assertThrows(
ConfigException.class,
() -> config.getInterNodeListener(portResolver, logger)
);
// Then:
assertThat(e.getMessage(), containsString("Invalid value https://unresolvable.host:0 for configuration "
+ ADVERTISED_LISTENER_CONFIG
+ ": Must have valid port"));
}
|
public static String UU64() {
return UU64(java.util.UUID.randomUUID());
}
|
@Test
public void testUU64(){
String uu64 = UUID.UU64();
System.out.println(uu64);
}
|
@GET
@Produces(MediaType.APPLICATION_JSON)
@Operation(summary = "Get prekey count",
description = "Gets the number of one-time prekeys uploaded for this device and still available")
@ApiResponse(responseCode = "200", description = "Body contains the number of available one-time prekeys for the device.", useReturnTypeSchema = true)
@ApiResponse(responseCode = "401", description = "Account authentication check failed.")
public CompletableFuture<PreKeyCount> getStatus(@ReadOnly @Auth final AuthenticatedDevice auth,
@QueryParam("identity") @DefaultValue("aci") final IdentityType identityType) {
final CompletableFuture<Integer> ecCountFuture =
keysManager.getEcCount(auth.getAccount().getIdentifier(identityType), auth.getAuthenticatedDevice().getId());
final CompletableFuture<Integer> pqCountFuture =
keysManager.getPqCount(auth.getAccount().getIdentifier(identityType), auth.getAuthenticatedDevice().getId());
return ecCountFuture.thenCombine(pqCountFuture, PreKeyCount::new);
}
|
@Test
void putKeysStructurallyInvalidSignedECKey() {
final ECKeyPair identityKeyPair = Curve.generateKeyPair();
final IdentityKey identityKey = new IdentityKey(identityKeyPair.getPublicKey());
final KEMSignedPreKey wrongPreKey = KeysHelper.signedKEMPreKey(1, identityKeyPair);
final WeaklyTypedPreKeyState preKeyState =
new WeaklyTypedPreKeyState(null, WeaklyTypedSignedPreKey.fromSignedPreKey(wrongPreKey), null, null, identityKey.serialize());
Response response =
resources.getJerseyTest()
.target("/v2/keys")
.request()
.header("Authorization", AuthHelper.getAuthHeader(AuthHelper.VALID_UUID, AuthHelper.VALID_PASSWORD))
.put(Entity.entity(preKeyState, MediaType.APPLICATION_JSON_TYPE));
assertThat(response.getStatus()).isEqualTo(400);
}
|
public static ServerId of(@Nullable String databaseId, String datasetId) {
if (databaseId != null) {
int databaseIdLength = databaseId.length();
checkArgument(databaseIdLength == DATABASE_ID_LENGTH, "Illegal databaseId length (%s)", databaseIdLength);
}
int datasetIdLength = datasetId.length();
checkArgument(datasetIdLength == DEPRECATED_SERVER_ID_LENGTH
|| datasetIdLength == NOT_UUID_DATASET_ID_LENGTH
|| datasetIdLength == UUID_DATASET_ID_LENGTH, "Illegal datasetId length (%s)", datasetIdLength);
return new ServerId(databaseId, datasetId);
}
|
@Test
public void of_throws_IAE_if_databaseId_is_empty() {
assertThatThrownBy(() -> ServerId.of("", randomAlphabetic(UUID_DATASET_ID_LENGTH)))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Illegal databaseId length (0)");
}
|
@Override
public URL getApiRoute(String apiRouteBase) throws MalformedURLException {
return new URL(
apiRouteBase + registryEndpointRequestProperties.getImageName() + "/blobs/" + blobDigest);
}
|
@Test
public void testGetApiRoute() throws MalformedURLException {
Assert.assertEquals(
new URL("http://someApiBase/someImageName/blobs/" + fakeDigest),
testBlobPuller.getApiRoute("http://someApiBase/"));
}
|
@Override
public void createNetwork(Network osNet) {
checkNotNull(osNet, ERR_NULL_NETWORK);
checkArgument(!Strings.isNullOrEmpty(osNet.getId()), ERR_NULL_NETWORK_ID);
osNetworkStore.createNetwork(osNet);
OpenstackNetwork finalAugmentedNetwork = buildAugmentedNetworkFromType(osNet);
augmentedNetworkMap.compute(osNet.getId(), (id, existing) -> {
final String error = osNet.getId() + ERR_DUPLICATE;
checkArgument(existing == null, error);
return finalAugmentedNetwork;
});
log.info(String.format(MSG_NETWORK, deriveResourceName(osNet), MSG_CREATED));
}
|
@Test(expected = NullPointerException.class)
public void testCreateNullNetwork() {
target.createNetwork(null);
}
|
public static boolean hasLeadership(KubernetesConfigMap configMap, String lockIdentity) {
final String leader = configMap.getAnnotations().get(LEADER_ANNOTATION_KEY);
return leader != null && leader.contains(lockIdentity);
}
|
@Test
void testNoAnnotation() {
assertThat(KubernetesLeaderElector.hasLeadership(leaderConfigMap, lockIdentity)).isFalse();
}
|
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
} else if (!(obj instanceof Point)) {
return false;
}
Point other = (Point) obj;
if (Double.doubleToLongBits(this.x) != Double.doubleToLongBits(other.x)) {
return false;
} else if (Double.doubleToLongBits(this.y) != Double.doubleToLongBits(other.y)) {
return false;
}
return true;
}
|
@Test
public void equalsTest() {
Point point1 = new Point(1, 2);
Point point2 = new Point(1, 2);
Point point3 = new Point(1, 1);
Point point4 = new Point(2, 2);
TestUtils.equalsTest(point1, point2);
TestUtils.notEqualsTest(point1, point3);
TestUtils.notEqualsTest(point1, point4);
TestUtils.notEqualsTest(point1, new Object());
TestUtils.notEqualsTest(point1, null);
}
|
@Override
public Map<ExecutionAttemptID, ExecutionSlotAssignment> allocateSlotsFor(
List<ExecutionAttemptID> executionAttemptIds) {
Map<ExecutionAttemptID, ExecutionSlotAssignment> result = new HashMap<>();
Map<SlotRequestId, ExecutionAttemptID> remainingExecutionsToSlotRequest =
new HashMap<>(executionAttemptIds.size());
List<PhysicalSlotRequest> physicalSlotRequests =
new ArrayList<>(executionAttemptIds.size());
for (ExecutionAttemptID executionAttemptId : executionAttemptIds) {
if (requestedPhysicalSlots.containsKeyA(executionAttemptId)) {
result.put(
executionAttemptId,
new ExecutionSlotAssignment(
executionAttemptId,
requestedPhysicalSlots.getValueByKeyA(executionAttemptId)));
} else {
final SlotRequestId slotRequestId = new SlotRequestId();
final ResourceProfile resourceProfile =
resourceProfileRetriever.apply(executionAttemptId);
Collection<TaskManagerLocation> preferredLocations =
preferredLocationsRetriever.getPreferredLocations(
executionAttemptId.getExecutionVertexId(), Collections.emptySet());
final SlotProfile slotProfile =
SlotProfile.priorAllocation(
resourceProfile,
resourceProfile,
preferredLocations,
Collections.emptyList(),
Collections.emptySet());
final PhysicalSlotRequest request =
new PhysicalSlotRequest(
slotRequestId, slotProfile, slotWillBeOccupiedIndefinitely);
physicalSlotRequests.add(request);
remainingExecutionsToSlotRequest.put(slotRequestId, executionAttemptId);
}
}
result.putAll(
allocatePhysicalSlotsFor(remainingExecutionsToSlotRequest, physicalSlotRequests));
return result;
}
|
@Test
void testSlotAllocation() {
final AllocationContext context = new AllocationContext();
final CompletableFuture<LogicalSlot> slotFuture =
context.allocateSlotsFor(EXECUTION_ATTEMPT_ID);
assertThat(slotFuture).isCompleted();
assertThat(context.getSlotProvider().getRequests()).hasSize(1);
final PhysicalSlotRequest slotRequest =
context.getSlotProvider().getRequests().values().iterator().next();
assertThat(slotRequest.getSlotProfile().getPhysicalSlotResourceProfile())
.isEqualTo(RESOURCE_PROFILE);
}
|
@Override
protected int command() {
if (!validateConfigFilePresent()) {
return 1;
}
final MigrationConfig config;
try {
config = MigrationConfig.load(getConfigFile());
} catch (KsqlException | MigrationException e) {
LOGGER.error(e.getMessage());
return 1;
}
return command(config, MigrationsUtil::getKsqlClient);
}
|
@Test
public void shouldNotInitializeIfServerVersionIncompatible() {
// Given:
when(serverInfo.getServerVersion()).thenReturn("v0.9.0");
// When:
final int status = command.command(config, cfg -> client);
// Then:
assertThat(status, is(1));
verify(client, never()).executeStatement(EXPECTED_CS_STATEMENT);
verify(client, never()).executeStatement(EXPECTED_CTAS_STATEMENT);
}
|
public static NamingSelector newMetadataSelector(Map<String, String> metadata) {
return newMetadataSelector(metadata, false);
}
|
@Test
public void testNewMetadataSelector() {
Instance ins1 = new Instance();
ins1.addMetadata("a", "1");
ins1.addMetadata("b", "2");
Instance ins2 = new Instance();
ins2.addMetadata("a", "1");
Instance ins3 = new Instance();
ins3.addMetadata("b", "2");
NamingContext namingContext = mock(NamingContext.class);
when(namingContext.getInstances()).thenReturn(Arrays.asList(ins1, ins2, ins3));
NamingSelector metadataSelector = NamingSelectorFactory.newMetadataSelector(new HashMap() {
{
put("a", "1");
put("b", "2");
}
});
List<Instance> result = metadataSelector.select(namingContext).getResult();
assertEquals(1, result.size());
assertEquals(ins1, result.get(0));
}
|
@Nonnull
public T createAndRestore(
@Nonnull List<? extends Collection<S>> restoreOptions,
@Nonnull StateObject.StateObjectSizeStatsCollector stats)
throws Exception {
if (restoreOptions.isEmpty()) {
restoreOptions = Collections.singletonList(Collections.emptyList());
}
int alternativeIdx = 0;
Exception collectedException = null;
while (alternativeIdx < restoreOptions.size()) {
Collection<S> restoreState = restoreOptions.get(alternativeIdx);
++alternativeIdx;
// IMPORTANT: please be careful when modifying the log statements because they are used
// for validation in
// the automatic end-to-end tests. Those tests might fail if they are not aligned with
// the log message!
if (restoreState.isEmpty()) {
LOG.debug("Creating {} with empty state.", logDescription);
} else {
if (LOG.isTraceEnabled()) {
LOG.trace(
"Creating {} and restoring with state {} from alternative ({}/{}).",
logDescription,
restoreState,
alternativeIdx,
restoreOptions.size());
} else {
LOG.debug(
"Creating {} and restoring with state from alternative ({}/{}).",
logDescription,
alternativeIdx,
restoreOptions.size());
}
}
try {
T successfullyRestored = attemptCreateAndRestore(restoreState);
// Obtain and report stats for the state objects used in our successful restore
restoreState.forEach(handle -> handle.collectSizeStats(stats));
return successfullyRestored;
} catch (Exception ex) {
collectedException = ExceptionUtils.firstOrSuppressed(ex, collectedException);
if (backendCloseableRegistry.isClosed()) {
throw new FlinkException(
"Stopping restore attempts for already cancelled task.",
collectedException);
}
LOG.warn(
"Exception while restoring {} from alternative ({}/{}), will retry while more "
+ "alternatives are available.",
logDescription,
alternativeIdx,
restoreOptions.size(),
ex);
}
}
throw new FlinkException(
"Could not restore "
+ logDescription
+ " from any of the "
+ restoreOptions.size()
+ " provided restore options.",
collectedException);
}
|
@Test
void testCanBeCanceledViaRegistry() throws Exception {
CloseableRegistry closeableRegistry = new CloseableRegistry();
OneShotLatch waitForBlock = new OneShotLatch();
OneShotLatch unblock = new OneShotLatch();
OperatorStateHandle blockingRestoreHandle = mock(OperatorStateHandle.class);
when(blockingRestoreHandle.openInputStream())
.thenReturn(new BlockingFSDataInputStream(waitForBlock, unblock));
List<StateObjectCollection<OperatorStateHandle>> sortedRestoreOptions =
Collections.singletonList(
new StateObjectCollection<>(
Collections.singletonList(blockingRestoreHandle)));
BackendRestorerProcedure<OperatorStateBackend, OperatorStateHandle> restorerProcedure =
new BackendRestorerProcedure<>(
backendSupplier, closeableRegistry, "test op state backend");
AtomicReference<Exception> exceptionReference = new AtomicReference<>(null);
Thread restoreThread =
new Thread(
() -> {
try {
restorerProcedure.createAndRestore(
sortedRestoreOptions,
StateObject.StateObjectSizeStatsCollector.create());
} catch (Exception e) {
exceptionReference.set(e);
}
});
restoreThread.start();
waitForBlock.await();
closeableRegistry.close();
unblock.trigger();
restoreThread.join();
Exception exception = exceptionReference.get();
assertThat(exception).isInstanceOf(FlinkException.class);
}
|
@Override
public void killProcess(final String processId) throws SQLException {
Process process = ProcessRegistry.getInstance().get(processId);
if (null == process) {
return;
}
for (Statement each : process.getProcessStatements().values()) {
each.cancel();
}
}
|
@Test
void assertKillProcess() throws SQLException {
ProcessRegistry processRegistry = mock(ProcessRegistry.class);
when(ProcessRegistry.getInstance()).thenReturn(processRegistry);
Process process = mock(Process.class);
Statement statement = mock(Statement.class);
when(process.getProcessStatements()).thenReturn(Collections.singletonMap(1, statement));
when(processRegistry.get(any())).thenReturn(process);
processPersistService.killProcess("foo_process_id");
verify(statement).cancel();
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.