focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public void configureAuthDataStatefulSet(V1StatefulSet statefulSet, Optional<FunctionAuthData> functionAuthData) {
if (!functionAuthData.isPresent()) {
return;
}
V1PodSpec podSpec = statefulSet.getSpec().getTemplate().getSpec();
// configure pod mount secret with auth token
podSpec.setVolumes(Collections.singletonList(
new V1Volume()
.name(SECRET_NAME)
.secret(
new V1SecretVolumeSource()
.secretName(getSecretName(new String(functionAuthData.get().getData()))))));
podSpec.getContainers().forEach(container -> container.setVolumeMounts(Collections.singletonList(
new V1VolumeMount()
.name(SECRET_NAME)
.mountPath(DEFAULT_SECRET_MOUNT_DIR)
.readOnly(true))));
}
|
@Test
public void testConfigureAuthDataStatefulSet() {
byte[] testBytes = new byte[]{0, 1, 2, 3, 4};
CoreV1Api coreV1Api = mock(CoreV1Api.class);
KubernetesSecretsTokenAuthProvider kubernetesSecretsTokenAuthProvider = new KubernetesSecretsTokenAuthProvider();
kubernetesSecretsTokenAuthProvider.initialize(coreV1Api, testBytes, (fd) -> "default");
V1StatefulSet statefulSet = new V1StatefulSet();
statefulSet.setSpec(
new V1StatefulSetSpec().template(
new V1PodTemplateSpec().spec(
new V1PodSpec().containers(
Collections.singletonList(new V1Container())))));
FunctionAuthData functionAuthData = FunctionAuthData.builder().data("foo".getBytes()).build();
kubernetesSecretsTokenAuthProvider.configureAuthDataStatefulSet(statefulSet, Optional.of(functionAuthData));
Assert.assertEquals(statefulSet.getSpec().getTemplate().getSpec().getVolumes().size(), 1);
Assert.assertEquals(statefulSet.getSpec().getTemplate().getSpec().getVolumes().get(0).getName(), "function-auth");
Assert.assertEquals(statefulSet.getSpec().getTemplate().getSpec().getVolumes().get(0).getSecret().getSecretName(), "pf-secret-foo");
Assert.assertEquals(statefulSet.getSpec().getTemplate().getSpec().getContainers().size(), 1);
Assert.assertEquals(statefulSet.getSpec().getTemplate().getSpec().getContainers().get(0).getVolumeMounts().size(), 1);
Assert.assertEquals(statefulSet.getSpec().getTemplate().getSpec().getContainers().get(0).getVolumeMounts().get(0).getName(), "function-auth");
Assert.assertEquals(statefulSet.getSpec().getTemplate().getSpec().getContainers().get(0).getVolumeMounts().get(0).getMountPath(), "/etc/auth");
}
|
static void filterProperties(Message message, Set<String> namesToClear) {
List<Object> retainedProperties = messagePropertiesBuffer();
try {
filterProperties(message, namesToClear, retainedProperties);
} finally {
retainedProperties.clear(); // ensure no object references are held due to any exception
}
}
|
@Test void filterProperties_message_allTypes() throws JMSException {
TextMessage message = newMessageWithAllTypes();
message.setStringProperty("b3", "00f067aa0ba902b7-00f067aa0ba902b7-1");
PropertyFilter.filterProperties(message, Collections.singleton("b3"));
assertThat(message).isEqualToIgnoringGivenFields(newMessageWithAllTypes(), "processAsExpired");
}
|
@CanIgnoreReturnValue
public final Ordered containsExactlyEntriesIn(Multimap<?, ?> expectedMultimap) {
checkNotNull(expectedMultimap, "expectedMultimap");
checkNotNull(actual);
ListMultimap<?, ?> missing = difference(expectedMultimap, actual);
ListMultimap<?, ?> extra = difference(actual, expectedMultimap);
// TODO(kak): Possible enhancement: Include "[1 copy]" if the element does appear in
// the subject but not enough times. Similarly for unexpected extra items.
if (!missing.isEmpty()) {
if (!extra.isEmpty()) {
boolean addTypeInfo = hasMatchingToStringPair(missing.entries(), extra.entries());
// Note: The usage of countDuplicatesAndAddTypeInfo() below causes entries no longer to be
// grouped by key in the 'missing' and 'unexpected items' parts of the message (we still
// show the actual and expected multimaps in the standard format).
String missingDisplay =
addTypeInfo
? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(missing).entries())
: countDuplicatesMultimap(annotateEmptyStringsMultimap(missing));
String extraDisplay =
addTypeInfo
? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(extra).entries())
: countDuplicatesMultimap(annotateEmptyStringsMultimap(extra));
failWithActual(
fact("missing", missingDisplay),
fact("unexpected", extraDisplay),
simpleFact("---"),
fact("expected", annotateEmptyStringsMultimap(expectedMultimap)));
return ALREADY_FAILED;
} else {
failWithActual(
fact("missing", countDuplicatesMultimap(annotateEmptyStringsMultimap(missing))),
simpleFact("---"),
fact("expected", annotateEmptyStringsMultimap(expectedMultimap)));
return ALREADY_FAILED;
}
} else if (!extra.isEmpty()) {
failWithActual(
fact("unexpected", countDuplicatesMultimap(annotateEmptyStringsMultimap(extra))),
simpleFact("---"),
fact("expected", annotateEmptyStringsMultimap(expectedMultimap)));
return ALREADY_FAILED;
}
return new MultimapInOrder(/* allowUnexpected = */ false, expectedMultimap);
}
|
@Test
public void containsExactlyInOrder() {
ImmutableMultimap<Integer, String> actual =
ImmutableMultimap.of(3, "one", 3, "six", 3, "two", 4, "five", 4, "four");
ImmutableMultimap<Integer, String> expected =
ImmutableMultimap.of(3, "one", 3, "six", 3, "two", 4, "five", 4, "four");
assertThat(actual).containsExactlyEntriesIn(expected).inOrder();
}
|
public static boolean isInteger(String s) {
if (StrUtil.isBlank(s)) {
return false;
}
try {
Integer.parseInt(s);
} catch (NumberFormatException e) {
return false;
}
return true;
}
|
@Test
public void isIntegerTest() {
assertTrue(NumberUtil.isInteger("-12"));
assertTrue(NumberUtil.isInteger("256"));
assertTrue(NumberUtil.isInteger("0256"));
assertTrue(NumberUtil.isInteger("0"));
assertFalse(NumberUtil.isInteger("23.4"));
assertFalse(NumberUtil.isInteger(null));
assertFalse(NumberUtil.isInteger(""));
assertFalse(NumberUtil.isInteger(" "));
}
|
@Override
public Object read(final MySQLPacketPayload payload, final boolean unsigned) throws SQLException {
int length = payload.readInt1();
switch (length) {
case 0:
throw new SQLFeatureNotSupportedException("Can not support date format if year, month, day is absent.");
case 4:
return getTimestampForDate(payload);
case 7:
return getTimestampForDatetime(payload);
case 11:
Timestamp result = getTimestampForDatetime(payload);
result.setNanos(payload.readInt4() * 1000);
return result;
default:
throw new SQLFeatureNotSupportedException(String.format("Wrong length `%d` of MYSQL_TYPE_TIME", length));
}
}
|
@Test
void assertReadWithElevenBytes() throws SQLException {
when(payload.readInt1()).thenReturn(11, 12, 31, 10, 59, 0);
when(payload.readInt2()).thenReturn(2018);
when(payload.readInt4()).thenReturn(230000);
LocalDateTime actual = LocalDateTime.ofInstant(Instant.ofEpochMilli(((Timestamp) new MySQLDateBinaryProtocolValue().read(payload, false)).getTime()), ZoneId.systemDefault());
assertThat(actual.getYear(), is(2018));
assertThat(actual.getMonthValue(), is(12));
assertThat(actual.getDayOfMonth(), is(31));
assertThat(actual.getHour(), is(10));
assertThat(actual.getMinute(), is(59));
assertThat(actual.getSecond(), is(0));
assertThat(actual.getNano(), is(230000000));
}
|
public boolean isCheckpointPending() {
return !pendingCheckpoints.isEmpty();
}
|
@Test
void testNoFastPathWithChannelFinishedDuringCheckpointsCancel() throws Exception {
BufferOrEvent[] sequence = {
createBarrier(1, 0, 0), createEndOfPartition(0), createCancellationBarrier(1, 1)
};
ValidatingCheckpointHandler checkpointHandler = new ValidatingCheckpointHandler();
inputGate = createCheckpointedInputGate(2, sequence, checkpointHandler);
for (BufferOrEvent boe : sequence) {
assertThat(inputGate.pollNext()).hasValue(boe);
}
assertThat(checkpointHandler.getLastCanceledCheckpointId()).isOne();
// If go with the fast path, the pending checkpoint would not be removed normally.
assertThat(inputGate.getCheckpointBarrierHandler().isCheckpointPending()).isFalse();
}
|
public boolean matchFile(@Nullable String filePath) {
return filePath != null && filePattern.match(filePath);
}
|
@Test
public void shouldMatchJavaFile() {
String javaFile = "org/foo/Bar.java";
assertThat(new IssuePattern("org/foo/Bar.java", "*").matchFile(javaFile)).isTrue();
assertThat(new IssuePattern("org/foo/*", "*").matchFile(javaFile)).isTrue();
assertThat(new IssuePattern("**Bar.java", "*").matchFile(javaFile)).isTrue();
assertThat(new IssuePattern("**", "*").matchFile(javaFile)).isTrue();
assertThat(new IssuePattern("org/*/?ar.java", "*").matchFile(javaFile)).isTrue();
assertThat(new IssuePattern("org/other/Hello.java", "*").matchFile(javaFile)).isFalse();
assertThat(new IssuePattern("org/foo/Hello.java", "*").matchFile(javaFile)).isFalse();
assertThat(new IssuePattern("org/*/??ar.java", "*").matchFile(javaFile)).isFalse();
assertThat(new IssuePattern("org/*/??ar.java", "*").matchFile(null)).isFalse();
assertThat(new IssuePattern("org/*/??ar.java", "*").matchFile("plop")).isFalse();
}
|
public String callServer(String api, Map<String, String> params, Map<String, String> body, String curServer,
String method) throws NacosException {
long start = System.currentTimeMillis();
long end = 0;
String namespace = params.get(CommonParams.NAMESPACE_ID);
String group = params.get(CommonParams.GROUP_NAME);
String serviceName = params.get(CommonParams.SERVICE_NAME);
params.putAll(getSecurityHeaders(namespace, group, serviceName));
Header header = NamingHttpUtil.builderHeader();
String url;
if (curServer.startsWith(HTTPS_PREFIX) || curServer.startsWith(HTTP_PREFIX)) {
url = curServer + api;
} else {
if (!InternetAddressUtil.containsPort(curServer)) {
curServer = curServer + InternetAddressUtil.IP_PORT_SPLITER + serverPort;
}
url = NamingHttpClientManager.getInstance().getPrefix() + curServer + api;
}
try {
HttpRestResult<String> restResult = nacosRestTemplate.exchangeForm(url, header,
Query.newInstance().initParams(params), body, method, String.class);
end = System.currentTimeMillis();
MetricsMonitor.getNamingRequestMonitor(method, url, String.valueOf(restResult.getCode()))
.observe(end - start);
if (restResult.ok()) {
return restResult.getData();
}
if (HttpStatus.SC_NOT_MODIFIED == restResult.getCode()) {
return StringUtils.EMPTY;
}
throw new NacosException(restResult.getCode(), restResult.getMessage());
} catch (NacosException e) {
NAMING_LOGGER.error("[NA] failed to request", e);
throw e;
} catch (Exception e) {
NAMING_LOGGER.error("[NA] failed to request", e);
throw new NacosException(NacosException.SERVER_ERROR, e);
}
}
|
@Test
void testCallServerFail() throws Exception {
assertThrows(NacosException.class, () -> {
//given
NacosRestTemplate nacosRestTemplate = mock(NacosRestTemplate.class);
when(nacosRestTemplate.exchangeForm(any(), any(), any(), any(), any(), any())).thenAnswer(
invocationOnMock -> {
//return url
HttpRestResult<Object> res = new HttpRestResult<Object>();
res.setMessage("fail");
res.setCode(400);
return res;
});
final Field nacosRestTemplateField = NamingHttpClientProxy.class.getDeclaredField("nacosRestTemplate");
nacosRestTemplateField.setAccessible(true);
nacosRestTemplateField.set(clientProxy, nacosRestTemplate);
String api = "/api";
Map<String, String> params = new HashMap<>();
Map<String, String> body = new HashMap<>();
String method = HttpMethod.GET;
String curServer = "127.0.0.1";
//when
clientProxy.callServer(api, params, body, curServer, method);
});
}
|
public byte getByte(long pos) {
checkOffsetAndCount(size, pos, 1);
for (Segment s = head; true; s = s.next) {
int segmentByteCount = s.limit - s.pos;
if (pos < segmentByteCount) return s.data[s.pos + (int) pos];
pos -= segmentByteCount;
}
}
|
@Test public void getByteOfEmptyBuffer() throws Exception {
Buffer buffer = new Buffer();
try {
buffer.getByte(0);
fail();
} catch (IndexOutOfBoundsException expected) {
}
}
|
@Transactional
public void deleteChecklistLikeByChecklistId(User user, long checklistId) {
Checklist checklist = checklistRepository.getById(checklistId);
validateChecklistOwnership(user, checklist);
ChecklistLike checklistLike = checklistLikeRepository.getByChecklistId(checklistId);
checklistLikeRepository.deleteById(checklistLike.getId());
}
|
@DisplayName("체크리스트 좋아요 삭제 성공")
@Test
void deleteChecklistLikeByChecklistId() {
// given
Checklist checklist = checklistRepository.save(ChecklistFixture.CHECKLIST1_USER1);
ChecklistLike checklistLike = checklistLikeRepository.save(ChecklistFixture.CHECKLIST1_LIKE);
// when
checklistService.deleteChecklistLikeByChecklistId(USER1, checklist.getId());
// then
assertThat(checklistLikeRepository.existsById(checklistLike.getId())).isFalse();
}
|
public OpenAPI filter(OpenAPI openAPI, OpenAPISpecFilter filter, Map<String, List<String>> params, Map<String, String> cookies, Map<String, List<String>> headers) {
OpenAPI filteredOpenAPI = filterOpenAPI(filter, openAPI, params, cookies, headers);
if (filteredOpenAPI == null) {
return filteredOpenAPI;
}
OpenAPI clone = new OpenAPI();
clone.info(filteredOpenAPI.getInfo());
clone.openapi(filteredOpenAPI.getOpenapi());
clone.jsonSchemaDialect(filteredOpenAPI.getJsonSchemaDialect());
clone.setSpecVersion(filteredOpenAPI.getSpecVersion());
clone.setExtensions(filteredOpenAPI.getExtensions());
clone.setExternalDocs(filteredOpenAPI.getExternalDocs());
clone.setSecurity(filteredOpenAPI.getSecurity());
clone.setServers(filteredOpenAPI.getServers());
clone.tags(filteredOpenAPI.getTags() == null ? null : new ArrayList<>(openAPI.getTags()));
final Set<String> allowedTags = new HashSet<>();
final Set<String> filteredTags = new HashSet<>();
Paths clonedPaths = new Paths();
if (filteredOpenAPI.getPaths() != null) {
for (String resourcePath : filteredOpenAPI.getPaths().keySet()) {
PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath);
PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers);
PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags);
if (clonedPathItem != null) {
if (!clonedPathItem.readOperations().isEmpty()) {
clonedPaths.addPathItem(resourcePath, clonedPathItem);
}
}
}
clone.paths(clonedPaths);
}
filteredTags.removeAll(allowedTags);
final List<Tag> tags = clone.getTags();
if (tags != null && !filteredTags.isEmpty()) {
tags.removeIf(tag -> filteredTags.contains(tag.getName()));
if (clone.getTags().isEmpty()) {
clone.setTags(null);
}
}
if (filteredOpenAPI.getWebhooks() != null) {
for (String resourcePath : filteredOpenAPI.getWebhooks().keySet()) {
PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath);
PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers);
PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags);
if (clonedPathItem != null) {
if (!clonedPathItem.readOperations().isEmpty()) {
clone.addWebhooks(resourcePath, clonedPathItem);
}
}
}
}
if (filteredOpenAPI.getComponents() != null) {
clone.components(new Components());
clone.getComponents().setSchemas(filterComponentsSchema(filter, filteredOpenAPI.getComponents().getSchemas(), params, cookies, headers));
clone.getComponents().setSecuritySchemes(filteredOpenAPI.getComponents().getSecuritySchemes());
clone.getComponents().setCallbacks(filteredOpenAPI.getComponents().getCallbacks());
clone.getComponents().setExamples(filteredOpenAPI.getComponents().getExamples());
clone.getComponents().setExtensions(filteredOpenAPI.getComponents().getExtensions());
clone.getComponents().setHeaders(filteredOpenAPI.getComponents().getHeaders());
clone.getComponents().setLinks(filteredOpenAPI.getComponents().getLinks());
clone.getComponents().setParameters(filteredOpenAPI.getComponents().getParameters());
clone.getComponents().setRequestBodies(filteredOpenAPI.getComponents().getRequestBodies());
clone.getComponents().setResponses(filteredOpenAPI.getComponents().getResponses());
clone.getComponents().setPathItems(filteredOpenAPI.getComponents().getPathItems());
}
if (filter.isRemovingUnreferencedDefinitions()) {
clone = removeBrokenReferenceDefinitions(clone);
}
return clone;
}
|
@Test
public void shouldRemoveBrokenRefs() throws IOException {
final OpenAPI openAPI = getOpenAPI(RESOURCE_PATH);
openAPI.getPaths().get("/pet/{petId}").getGet().getResponses().getDefault().getHeaders().remove("X-Rate-Limit-Limit");
assertNotNull(openAPI.getComponents().getSchemas().get("PetHeader"));
final RemoveUnreferencedDefinitionsFilter remover = new RemoveUnreferencedDefinitionsFilter();
final OpenAPI filtered = new SpecFilter().filter(openAPI, remover, null, null, null);
assertNull(filtered.getComponents().getSchemas().get("PetHeader"));
assertNotNull(filtered.getComponents().getSchemas().get("Category"));
assertNotNull(filtered.getComponents().getSchemas().get("Pet"));
}
|
@PostMapping(value = "/artifact/download")
public ResponseEntity<String> importArtifact(@RequestParam(value = "url", required = true) String url,
@RequestParam(value = "mainArtifact", defaultValue = "true") boolean mainArtifact,
@RequestParam(value = "secretName", required = false) String secretName) {
if (!url.isEmpty()) {
List<Service> services = null;
Secret secret = null;
if (secretName != null) {
secret = secretRepository.findByName(secretName).stream().findFirst().orElse(null);
log.debug("Secret {} was requested. Have we found it? {}", secretName, (secret != null));
}
try {
// Download remote to local file before import.
HTTPDownloader.FileAndHeaders fileAndHeaders = HTTPDownloader.handleHTTPDownloadToFileAndHeaders(url,
secret, true);
File localFile = fileAndHeaders.getLocalFile();
// Now try importing services.
services = serviceService.importServiceDefinition(localFile,
new ReferenceResolver(url, secret, true,
RelativeReferenceURLBuilderFactory
.getRelativeReferenceURLBuilder(fileAndHeaders.getResponseHeaders())),
new ArtifactInfo(url, mainArtifact));
} catch (IOException ioe) {
log.error("Exception while retrieving remote item " + url, ioe);
return new ResponseEntity<>("Exception while retrieving remote item", HttpStatus.INTERNAL_SERVER_ERROR);
} catch (MockRepositoryImportException mrie) {
return new ResponseEntity<>(mrie.getMessage(), HttpStatus.BAD_REQUEST);
}
if (services != null && !services.isEmpty()) {
return new ResponseEntity<>(
"{\"name\": \"" + services.get(0).getName() + ":" + services.get(0).getVersion() + "\"}",
HttpStatus.CREATED);
}
}
return new ResponseEntity<>(HttpStatus.NO_CONTENT);
}
|
@Test
void shouldReturnBadRequest() throws MockRepositoryImportException {
// arrange
String apiPastry = "https://raw.githubusercontent.com/microcks/microcks/master/samples/APIPastry-openapi.yaml";
Mockito.when(serviceService.importServiceDefinition(Mockito.any(File.class), Mockito.any(ReferenceResolver.class),
Mockito.any(ArtifactInfo.class))).thenThrow(new MockRepositoryImportException("Intentional error"));
// act
ResponseEntity<String> responseEntity = sut.importArtifact(apiPastry, false, null);
// assert
SoftAssertions.assertSoftly(softly -> {
softly.assertThat(responseEntity.getStatusCode()).isEqualTo(HttpStatus.BAD_REQUEST);
softly.assertThat(responseEntity.getBody()).contains("Intentional error");
});
}
|
public static <T extends Message> Read<T> readProtos(Class<T> messageClass) {
// TODO: Stop using ProtoCoder and instead parse the payload directly.
// We should not be relying on the fact that ProtoCoder's wire format is identical to
// the protobuf wire format, as the wire format is not part of a coder's API.
ProtoCoder<T> coder = ProtoCoder.of(messageClass);
return Read.newBuilder(parsePayloadUsingCoder(coder)).setCoder(coder).build();
}
|
@Test
public void testProto() {
ProtoCoder<Primitive> coder = ProtoCoder.of(Primitive.class);
ImmutableList<Primitive> inputs =
ImmutableList.of(
Primitive.newBuilder().setPrimitiveInt32(42).build(),
Primitive.newBuilder().setPrimitiveBool(true).build(),
Primitive.newBuilder().setPrimitiveString("Hello, World!").build());
setupTestClient(inputs, coder);
PCollection<Primitive> read =
pipeline.apply(
PubsubIO.readProtos(Primitive.class)
.fromSubscription(SUBSCRIPTION.getPath())
.withClock(CLOCK)
.withClientFactory(clientFactory));
PAssert.that(read).containsInAnyOrder(inputs);
pipeline.run();
}
|
@ApiOperation(value = "Get a single table", tags = { "Database tables" })
@ApiResponses(value = {
@ApiResponse(code = 200, message = "Indicates the table exists and the table count is returned."),
@ApiResponse(code = 404, message = "Indicates the requested table does not exist.")
})
@GetMapping(value = "/management/tables/{tableName}", produces = "application/json")
public TableResponse getTable(@ApiParam(name = "tableName") @PathVariable String tableName) {
if (restApiInterceptor != null) {
restApiInterceptor.accessTableInfo();
}
Map<String, Long> tableCounts = managementService.getTableCount();
TableResponse response = null;
for (Entry<String, Long> entry : tableCounts.entrySet()) {
if (entry.getKey().equals(tableName)) {
response = restResponseFactory.createTableResponse(entry.getKey(), entry.getValue());
break;
}
}
if (response == null) {
throw new FlowableObjectNotFoundException("Could not find a table with name '" + tableName + "'.", String.class);
}
return response;
}
|
@Test
public void testGetTable() throws Exception {
Map<String, Long> tableCounts = managementService.getTableCount();
String tableNameToGet = tableCounts.keySet().iterator().next();
CloseableHttpResponse response = executeRequest(new HttpGet(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_TABLE, tableNameToGet)),
HttpStatus.SC_OK);
// Check table
JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent());
closeResponse(response);
assertThat(responseNode).isNotNull();
assertThatJson(responseNode)
.isEqualTo("{"
+ "name: '" + tableNameToGet + "',"
+ "count: " + tableCounts.get(tableNameToGet) + ","
+ "url: '" + SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_TABLE, tableNameToGet) + "'"
+ "}");
}
|
@PUT
@Path("{id}")
@Produces(MediaType.APPLICATION_JSON)
@Consumes(MediaType.APPLICATION_JSON)
public Response updateSubnet(@PathParam("id") String id, InputStream input) throws IOException {
log.trace(String.format(MESSAGE, "UPDATE " + id));
String inputStr = IOUtils.toString(input, REST_UTF8);
if (!haService.isActive()
&& !DEFAULT_ACTIVE_IP_ADDRESS.equals(haService.getActiveIp())) {
return syncPut(haService, SUBNETS, id, inputStr);
}
final NeutronSubnet subnet = (NeutronSubnet)
jsonToModelEntity(inputStr, NeutronSubnet.class);
adminService.updateSubnet(subnet);
return status(Response.Status.OK).build();
}
|
@Test
public void testUpdateSubnetWithUpdatingOperation() {
expect(mockOpenstackHaService.isActive()).andReturn(true).anyTimes();
replay(mockOpenstackHaService);
mockOpenstackNetworkAdminService.updateSubnet(anyObject());
replay(mockOpenstackNetworkAdminService);
final WebTarget wt = target();
InputStream jsonStream = OpenstackSubnetWebResourceTest.class
.getResourceAsStream("openstack-subnet.json");
Response response = wt.path(PATH + "/d32019d3-bc6e-4319-9c1d-6722fc136a22")
.request(MediaType.APPLICATION_JSON_TYPE)
.put(Entity.json(jsonStream));
final int status = response.getStatus();
assertThat(status, is(200));
verify(mockOpenstackNetworkAdminService);
}
|
public static String getFlowName(String activationMethod) {
return switch (activationMethod) {
case ActivationMethod.ACCOUNT -> ActivateAppWithRequestWebsite.NAME;
case ActivationMethod.PASSWORD -> ActivateAppWithPasswordLetterFlow.NAME;
case ActivationMethod.SMS -> ActivateAppWithPasswordSmsFlow.NAME;
case ActivationMethod.RDA -> ActivateAppWithPasswordRdaFlow.NAME;
case ActivationMethod.APP -> ActivateAppWithOtherAppFlow.NAME;
case ActivationMethod.LETTER -> ActivateAccountAndAppFlow.NAME;
case ActivationMethod.UNDEFINED -> UndefinedFlow.NAME;
default -> throw new IllegalStateException("Unexpected value: " + activationMethod);
};
}
|
@Test
void getFlowNameNonexistentTest() {
Exception exception = assertThrows(IllegalStateException.class, () ->
flowService.getFlowName("nope")
);
assertEquals("Unexpected value: nope", exception.getMessage());
}
|
public List<String> generate(String tableName, String columnName, boolean isAutoGenerated) throws SQLException {
return generate(tableName, singleton(columnName), isAutoGenerated);
}
|
@Test
public void generate_for_oracle_autogenerated_false() throws SQLException {
when(dbConstraintFinder.findConstraintName(TABLE_NAME)).thenReturn(Optional.of(CONSTRAINT));
when(db.getDialect()).thenReturn(ORACLE);
List<String> sqls = underTest.generate(TABLE_NAME, PK_COLUMN, false);
assertThat(sqls).containsExactly("ALTER TABLE issues DROP CONSTRAINT pk_id DROP INDEX");
}
|
public InstantAndValue<T> remove(MetricKey metricKey) {
return counters.remove(metricKey);
}
|
@Test
public void testRemove() {
LastValueTracker<Double> lastValueTracker = new LastValueTracker<>();
lastValueTracker.getAndSet(METRIC_NAME, instant1, 1d);
assertTrue(lastValueTracker.contains(METRIC_NAME));
InstantAndValue<Double> result = lastValueTracker.remove(METRIC_NAME);
assertNotNull(result);
assertEquals(instant1, result.getIntervalStart());
assertEquals(1d, result.getValue().longValue());
}
|
public static String getType(String fileStreamHexHead) {
if(StrUtil.isBlank(fileStreamHexHead)){
return null;
}
if (MapUtil.isNotEmpty(FILE_TYPE_MAP)) {
for (final Entry<String, String> fileTypeEntry : FILE_TYPE_MAP.entrySet()) {
if (StrUtil.startWithIgnoreCase(fileStreamHexHead, fileTypeEntry.getKey())) {
return fileTypeEntry.getValue();
}
}
}
byte[] bytes = HexUtil.decodeHex(fileStreamHexHead);
return FileMagicNumber.getMagicNumber(bytes).getExtension();
}
|
@Test
@Disabled
public void getTypeFromInputStream() throws IOException {
final File file = FileUtil.file("d:/test/pic.jpg");
final BufferedInputStream inputStream = FileUtil.getInputStream(file);
inputStream.mark(0);
final String type = FileTypeUtil.getType(inputStream);
inputStream.reset();
}
|
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
PiLpmFieldMatch that = (PiLpmFieldMatch) o;
return prefixLength == that.prefixLength &&
Objects.equal(value, that.value) &&
Objects.equal(this.fieldId(), that.fieldId());
}
|
@Test
public void testEquals() {
new EqualsTester()
.addEqualityGroup(piLpmFieldMatch1, sameAsPiLpmFieldMatch1)
.addEqualityGroup(piLpmFieldMatch2)
.testEquals();
}
|
String availabilityZoneEc2() {
String uri = ec2MetadataEndpoint.concat("/placement/availability-zone/");
return metadataClient(uri, awsConfig).get().getBody();
}
|
@Test
public void availabilityZoneEc2() {
// given
String availabilityZone = "eu-central-1b";
stubFor(get(urlEqualTo("/placement/availability-zone/"))
.willReturn(aResponse().withStatus(HttpURLConnection.HTTP_OK).withBody(availabilityZone)));
// when
String result = awsMetadataApi.availabilityZoneEc2();
// then
assertEquals(availabilityZone, result);
}
|
public List<ContainerLogMeta> collect(
LogAggregationFileController fileController) throws IOException {
List<ContainerLogMeta> containersLogMeta = new ArrayList<>();
RemoteIterator<FileStatus> appDirs = fileController.
getApplicationDirectoriesOfUser(logsRequest.getUser());
while (appDirs.hasNext()) {
FileStatus currentAppDir = appDirs.next();
if (logsRequest.getAppId() == null ||
logsRequest.getAppId().equals(currentAppDir.getPath().getName())) {
ApplicationId appId = ApplicationId.fromString(
currentAppDir.getPath().getName());
RemoteIterator<FileStatus> nodeFiles = fileController
.getNodeFilesOfApplicationDirectory(currentAppDir);
while (nodeFiles.hasNext()) {
FileStatus currentNodeFile = nodeFiles.next();
if (!logsRequest.getNodeId().match(currentNodeFile.getPath()
.getName())) {
continue;
}
if (currentNodeFile.getPath().getName().equals(
logsRequest.getAppId() + ".har")) {
Path p = new Path("har:///"
+ currentNodeFile.getPath().toUri().getRawPath());
nodeFiles = HarFs.get(p.toUri(), conf).listStatusIterator(p);
continue;
}
try {
Map<String, List<ContainerLogFileInfo>> metaFiles = fileController
.getLogMetaFilesOfNode(logsRequest, currentNodeFile, appId);
if (metaFiles == null) {
continue;
}
metaFiles.entrySet().removeIf(entry ->
!(logsRequest.getContainerId() == null ||
logsRequest.getContainerId().equals(entry.getKey())));
containersLogMeta.addAll(createContainerLogMetas(
currentNodeFile.getPath().getName(), metaFiles));
} catch (IOException ioe) {
LOG.warn("Can not get log meta from the log file:"
+ currentNodeFile.getPath() + "\n" + ioe.getMessage());
}
}
}
}
return containersLogMeta;
}
|
@Test
void testMultipleFileRegex() throws IOException {
ExtendedLogMetaRequest.ExtendedLogMetaRequestBuilder request =
new ExtendedLogMetaRequest.ExtendedLogMetaRequestBuilder();
request.setAppId(null);
request.setContainerId(null);
request.setFileName(String.format("%s.*", BIG_FILE_NAME));
request.setFileSize(null);
request.setModificationTime(null);
request.setNodeId(null);
request.setUser(null);
LogAggregationMetaCollector collector = new LogAggregationMetaCollector(
request.build(), new YarnConfiguration());
List<ContainerLogMeta> res = collector.collect(fileController);
List<ContainerLogFileInfo> allFile = res.stream()
.flatMap(m -> m.getContainerLogMeta().stream())
.collect(Collectors.toList());
assertEquals(4, allFile.size());
assertTrue(allFile.stream().allMatch(
f -> f.getFileName().contains(BIG_FILE_NAME)));
}
|
@Udf
public Map<String, String> splitToMap(
@UdfParameter(
description = "Separator string and values to join") final String input,
@UdfParameter(
description = "Separator string and values to join") final String entryDelimiter,
@UdfParameter(
description = "Separator string and values to join") final String kvDelimiter) {
if (input == null || entryDelimiter == null || kvDelimiter == null) {
return null;
}
if (entryDelimiter.isEmpty() || kvDelimiter.isEmpty() || entryDelimiter.equals(kvDelimiter)) {
return null;
}
final Iterable<String> entries = Splitter.on(entryDelimiter).omitEmptyStrings().split(input);
return StreamSupport.stream(entries.spliterator(), false)
.filter(e -> e.contains(kvDelimiter))
.map(kv -> Splitter.on(kvDelimiter).split(kv).iterator())
.collect(Collectors.toMap(
Iterator::next,
Iterator::next,
(v1, v2) -> v2));
}
|
@Test
public void shouldDropEntriesWithoutKeyAndValue() {
Map<String, String> result = udf.splitToMap("foo:=apple/cherry", "/", ":=");
assertThat(result, hasEntry("foo", "apple"));
assertThat(result.size(), equalTo(1));
}
|
@Override
public String handlePlaceHolder() {
return handlePlaceHolder(inlineExpression);
}
|
@Test
void assertHandlePlaceHolder() {
assertThat(TypedSPILoader.getService(InlineExpressionParser.class, "GROOVY", PropertiesBuilder.build(
new PropertiesBuilder.Property(InlineExpressionParser.INLINE_EXPRESSION_KEY, "t_$->{[\"new$->{1+2}\"]}"))).handlePlaceHolder(), is("t_${[\"new${1+2}\"]}"));
assertThat(TypedSPILoader.getService(InlineExpressionParser.class, "GROOVY", PropertiesBuilder.build(
new PropertiesBuilder.Property(InlineExpressionParser.INLINE_EXPRESSION_KEY, "t_${[\"new$->{1+2}\"]}"))).handlePlaceHolder(), is("t_${[\"new${1+2}\"]}"));
}
|
static void printTargets(PrintWriter writer,
OptionalInt screenWidth,
List<String> targetFiles,
List<TargetDirectory> targetDirectories) {
printEntries(writer, "", screenWidth, targetFiles);
boolean needIntro = !targetFiles.isEmpty() || targetDirectories.size() > 1;
boolean firstIntro = targetFiles.isEmpty();
for (TargetDirectory targetDirectory : targetDirectories) {
String intro = "";
if (needIntro) {
if (!firstIntro) {
intro = intro + String.format("%n");
}
intro = intro + targetDirectory.name + ":";
firstIntro = false;
}
log.trace("LS : targetDirectory name = {}, children = {}",
targetDirectory.name, targetDirectory.children);
printEntries(writer, intro, screenWidth, targetDirectory.children);
}
}
|
@Test
public void testPrintTargets() throws Exception {
try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) {
try (PrintWriter writer = new PrintWriter(new OutputStreamWriter(
stream, StandardCharsets.UTF_8))) {
LsCommandHandler.printTargets(writer, OptionalInt.of(18),
Arrays.asList("foo", "foobarbaz", "quux"), Arrays.asList(
new TargetDirectory("/some/dir",
Collections.singletonList("supercalifragalistic")),
new TargetDirectory("/some/other/dir",
Arrays.asList("capability", "delegation", "elephant",
"fungible", "green"))));
}
assertEquals(String.join(String.format("%n"), Arrays.asList(
"foo quux",
"foobarbaz ",
"",
"/some/dir:",
"supercalifragalistic",
"",
"/some/other/dir:",
"capability",
"delegation",
"elephant",
"fungible",
"green")), stream.toString().trim());
}
}
|
public List<ShardingCondition> createShardingConditions(final SQLStatementContext sqlStatementContext, final List<Object> params) {
if (!(sqlStatementContext instanceof WhereAvailable)) {
return Collections.emptyList();
}
Collection<ColumnSegment> columnSegments = ((WhereAvailable) sqlStatementContext).getColumnSegments();
ShardingSphereSchema schema = getSchema(sqlStatementContext, database);
Map<String, String> columnExpressionTableNames = sqlStatementContext instanceof TableAvailable
? ((TableAvailable) sqlStatementContext).getTablesContext().findTableNames(columnSegments, schema)
: Collections.emptyMap();
List<ShardingCondition> result = new ArrayList<>();
for (WhereSegment each : ((WhereAvailable) sqlStatementContext).getWhereSegments()) {
result.addAll(createShardingConditions(each.getExpr(), params, columnExpressionTableNames));
}
return result;
}
|
@Test
void assertCreateShardingConditionsForSelectInStatement() {
ColumnSegment left = new ColumnSegment(0, 0, new IdentifierValue("foo_sharding_col"));
ListExpression right = new ListExpression(0, 0);
LiteralExpressionSegment literalExpressionSegment = new LiteralExpressionSegment(0, 0, 5);
right.getItems().add(literalExpressionSegment);
InExpression inExpression = new InExpression(0, 0, left, right, false);
when(whereSegment.getExpr()).thenReturn(inExpression);
when(shardingRule.findShardingColumn(any(), any())).thenReturn(Optional.of("foo_sharding_col"));
List<ShardingCondition> actual = shardingConditionEngine.createShardingConditions(sqlStatementContext, Collections.emptyList());
assertThat(actual.get(0).getStartIndex(), is(0));
assertTrue(actual.get(0).getValues().get(0) instanceof ListShardingConditionValue);
}
|
private AccessLog(String logFormat, Object... args) {
Objects.requireNonNull(logFormat, "logFormat");
this.logFormat = logFormat;
this.args = args;
}
|
@Test
void accessLogFiltering() {
disposableServer = createServer()
.handle((req, resp) -> {
resp.withConnection(conn -> {
ChannelHandler handler = conn.channel().pipeline().get(NettyPipeline.AccessLogHandler);
resp.header(ACCESS_LOG_HANDLER, handler != null ? FOUND : NOT_FOUND);
});
return resp.send();
})
.accessLog(true, AccessLogFactory.createFilter(p -> !String.valueOf(p.uri()).startsWith("/filtered/")))
.bindNow();
Tuple2<String, String> response = getHttpClientResponse(URI_1);
getHttpClientResponse(URI_2);
assertAccessLogging(response, true, true, null);
}
|
public short getMode() {
return mMode;
}
|
@Test
public void getMode() {
AccessControlList acl = new AccessControlList();
assertEquals(0, acl.getMode());
acl.setEntry(new AclEntry.Builder().setType(AclEntryType.OWNING_USER).setSubject(OWNING_USER)
.addAction(AclAction.READ).build());
acl.setEntry(new AclEntry.Builder().setType(AclEntryType.OWNING_GROUP).setSubject(OWNING_GROUP)
.addAction(AclAction.WRITE).build());
acl.setEntry(new AclEntry.Builder().setType(AclEntryType.OTHER)
.addAction(AclAction.EXECUTE).build());
assertEquals(new Mode(Mode.Bits.READ, Mode.Bits.WRITE, Mode.Bits.EXECUTE).toShort(),
acl.getMode());
}
|
public static String getTextValue(final Object jdbcBoolValue) {
if (null == jdbcBoolValue) {
return null;
}
return (Boolean) jdbcBoolValue ? "t" : "f";
}
|
@Test
void assertGetTextValue() {
Object jdbcBoolValue = true;
String textValue = PostgreSQLTextBoolUtils.getTextValue(jdbcBoolValue);
assertThat(textValue, is("t"));
}
|
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ItemCounter that = (ItemCounter) o;
if (!map.equals(that.map)) {
return false;
}
return true;
}
|
@Test
public void testEquals_returnsFalseDifferentClass() {
assertFalse(counter.equals(new Object()));
}
|
public static <T> T retryUntilTimeout(Callable<T> callable, Supplier<String> description, Duration timeoutDuration, long retryBackoffMs) throws Exception {
return retryUntilTimeout(callable, description, timeoutDuration, retryBackoffMs, Time.SYSTEM);
}
|
@Test
public void testNoBackoffTimeAndSucceed() throws Exception {
Mockito.when(mockCallable.call())
.thenThrow(new TimeoutException())
.thenThrow(new TimeoutException())
.thenThrow(new TimeoutException())
.thenReturn("success");
assertEquals("success", RetryUtil.retryUntilTimeout(mockCallable, testMsg, Duration.ofMillis(100), 0, mockTime));
Mockito.verify(mockCallable, Mockito.times(4)).call();
}
|
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
PiActionParam that = (PiActionParam) o;
return Objects.equal(id, that.id) &&
Objects.equal(value, that.value);
}
|
@Test
public void testEquals() {
new EqualsTester()
.addEqualityGroup(piActionParam1, sameAsPiActionParam1)
.addEqualityGroup(piActionParam2)
.testEquals();
}
|
public void retrieveDocuments() throws DocumentRetrieverException {
boolean first = true;
String route = params.cluster.isEmpty() ? params.route : resolveClusterRoute(params.cluster);
MessageBusParams messageBusParams = createMessageBusParams(params.configId, params.timeout, route);
documentAccess = documentAccessFactory.createDocumentAccess(messageBusParams);
session = documentAccess.createSyncSession(new SyncParameters.Builder().build());
int trace = params.traceLevel;
if (trace > 0) {
session.setTraceLevel(trace);
}
Iterator<String> iter = params.documentIds;
if (params.jsonOutput && !params.printIdsOnly) {
System.out.println('[');
}
while (iter.hasNext()) {
if (params.jsonOutput && !params.printIdsOnly) {
if (!first) {
System.out.println(',');
} else {
first = false;
}
}
String docid = iter.next();
Message msg = createDocumentRequest(docid);
Reply reply = session.syncSend(msg);
printReply(reply);
}
if (params.jsonOutput && !params.printIdsOnly) {
System.out.println(']');
}
}
|
@Test
void testShowDocSize() throws DocumentRetrieverException {
ClientParameters params = createParameters()
.setDocumentIds(asIterator(DOC_ID_1))
.setShowDocSize(true)
.build();
Document document = new Document(DataType.DOCUMENT, new DocumentId(DOC_ID_1));
when(mockedSession.syncSend(any())).thenReturn(new GetDocumentReply(document));
DocumentRetriever documentRetriever = createDocumentRetriever(params);
documentRetriever.retrieveDocuments();
assertTrue(outContent.toString().contains(String.format("Document size: %d bytes", document.getSerializedSize())));
}
|
public static CheckpointStorage load(
@Nullable CheckpointStorage fromApplication,
StateBackend configuredStateBackend,
Configuration jobConfig,
Configuration clusterConfig,
ClassLoader classLoader,
@Nullable Logger logger)
throws IllegalConfigurationException, DynamicCodeLoadingException {
Preconditions.checkNotNull(jobConfig, "jobConfig");
Preconditions.checkNotNull(clusterConfig, "clusterConfig");
Preconditions.checkNotNull(classLoader, "classLoader");
Preconditions.checkNotNull(configuredStateBackend, "statebackend");
// Job level config can override the cluster level config.
Configuration mergedConfig = new Configuration(clusterConfig);
mergedConfig.addAll(jobConfig);
// Legacy state backends always take precedence for backwards compatibility.
StateBackend rootStateBackend =
(configuredStateBackend instanceof DelegatingStateBackend)
? ((DelegatingStateBackend) configuredStateBackend)
.getDelegatedStateBackend()
: configuredStateBackend;
if (rootStateBackend instanceof CheckpointStorage) {
if (logger != null) {
logger.info(
"Using legacy state backend {} as Job checkpoint storage",
rootStateBackend);
if (fromApplication != null) {
logger.warn(
"Checkpoint storage passed via StreamExecutionEnvironment is ignored because legacy state backend '{}' is used. {}",
rootStateBackend.getClass().getName(),
LEGACY_PRECEDENCE_LOG_MESSAGE);
}
if (mergedConfig.get(CheckpointingOptions.CHECKPOINT_STORAGE) != null) {
logger.warn(
"Config option '{}' is ignored because legacy state backend '{}' is used. {}",
CheckpointingOptions.CHECKPOINT_STORAGE.key(),
rootStateBackend.getClass().getName(),
LEGACY_PRECEDENCE_LOG_MESSAGE);
}
}
return (CheckpointStorage) rootStateBackend;
}
// In the FLINK-2.0, the checkpoint storage from application will not be supported
// anymore.
if (fromApplication != null) {
if (fromApplication instanceof ConfigurableCheckpointStorage) {
if (logger != null) {
logger.info(
"Using job/cluster config to configure application-defined checkpoint storage: {}",
fromApplication);
if (mergedConfig.get(CheckpointingOptions.CHECKPOINT_STORAGE) != null) {
logger.warn(
"Config option '{}' is ignored because the checkpoint storage passed via StreamExecutionEnvironment takes precedence.",
CheckpointingOptions.CHECKPOINT_STORAGE.key());
}
}
return ((ConfigurableCheckpointStorage) fromApplication)
// Use cluster config for backwards compatibility.
.configure(clusterConfig, classLoader);
}
if (logger != null) {
logger.info("Using application defined checkpoint storage: {}", fromApplication);
}
return fromApplication;
}
return fromConfig(mergedConfig, classLoader, logger)
.orElseGet(() -> createDefaultCheckpointStorage(mergedConfig, classLoader, logger));
}
|
@Test
void testConfigureJobManagerStorage() throws Exception {
final String savepointDir = new Path(TempDirUtils.newFolder(tmp).toURI()).toString();
final Path expectedSavepointPath = new Path(savepointDir);
final int maxSize = 100;
final Configuration config = new Configuration();
config.set(
CheckpointingOptions.CHECKPOINT_STORAGE,
"filesystem"); // check that this is not accidentally picked up
config.set(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointDir);
CheckpointStorage storage1 =
CheckpointStorageLoader.load(
new JobManagerCheckpointStorage(maxSize),
new ModernStateBackend(),
new Configuration(),
config,
cl,
LOG);
assertThat(storage1).isInstanceOf(JobManagerCheckpointStorage.class);
JobManagerCheckpointStorage jmStorage = (JobManagerCheckpointStorage) storage1;
assertThat(jmStorage.getSavepointPath())
.is(matching(normalizedPath(expectedSavepointPath)));
assertThat(jmStorage.getMaxStateSize()).isEqualTo(maxSize);
}
|
@Override
public void destroy() {
if (this.mqttClient != null) {
this.mqttClient.disconnect();
}
}
|
@Test
public void givenMqttClientIsNotNull_whenDestroy_thenDisconnect() {
ReflectionTestUtils.setField(mqttNode, "mqttClient", mqttClientMock);
mqttNode.destroy();
then(mqttClientMock).should().disconnect();
}
|
@Override
public String getTableName(final int columnIndex) throws SQLException {
try {
return resultSetMetaData.getTableName(columnIndex);
} catch (final SQLFeatureNotSupportedException ignore) {
return "";
}
}
|
@Test
void assertGetTableName() throws SQLException {
assertThat(queryResultMetaData.getTableName(1), is("order"));
}
|
@Override
public WorkAttempt tryDoWork() {
retrieveCookieIfNecessary();
return doWork();
}
|
@Test
void workStatusShouldDeriveFromWorkTypeForNoWork() {
work = mock(NoWork.class);
prepareForWork();
assertThat(agentController.tryDoWork()).isEqualTo(WorkAttempt.NOTHING_TO_DO);
}
|
@Override
public String getFingerprint(Schema schema) {
return schema2fingerprintMap.get(schema);
}
|
@Test
public void testDifferentFPs() {
String fp1 = reg.getFingerprint(schema1);
String fp2 = reg.getFingerprint(schema2);
assertNotEquals(fp1, fp2);
}
|
static Optional<Timestamp> readTimestampOfLastEventInSegment(Path segmentPath) throws IOException {
byte[] eventBytes = null;
try (RecordIOReader recordReader = new RecordIOReader(segmentPath)) {
int blockId = (int) Math.ceil(((Files.size(segmentPath) - VERSION_SIZE) / (double) BLOCK_SIZE)) - 1;;
while (eventBytes == null && blockId >= 0) { // no event present in last block, try with the one before
recordReader.seekToBlock(blockId);
eventBytes = recordReader.readEvent();
blockId--;
}
} catch (NoSuchFileException nsfex) {
// the segment file may have been removed by the clean consumed feature on the reader side
return Optional.empty();
}
if (eventBytes == null) {
logger.warn("Cannot find a complete event into the segment file [{}], this is a DLQ segment corruption", segmentPath);
return Optional.empty();
}
return Optional.of(DLQEntry.deserialize(eventBytes).getEntryTime());
}
|
@Test
public void testReadTimestampOfLastEventInSegmentWithDeletedSegment() throws IOException {
// Exercise
Optional<Timestamp> timestamp = DeadLetterQueueWriter.readTimestampOfLastEventInSegment(Path.of("non_existing_file.txt"));
// Verify
assertTrue(timestamp.isEmpty());
}
|
@Override
public Addresses loadAddresses(ClientConnectionProcessListenerRegistry listenerRunner)
throws Exception {
privateToPublic = getAddresses.call();
Set<Address> addresses = privateToPublic.keySet();
listenerRunner.onPossibleAddressesCollected(addresses);
return new Addresses(addresses);
}
|
@Test(expected = IllegalStateException.class)
public void testLoadAddresses_whenExceptionIsThrown() throws Exception {
RemoteAddressProvider provider = new RemoteAddressProvider(() -> {
throw new IllegalStateException("Expected exception");
}, true);
provider.loadAddresses(createConnectionProcessListenerRunner());
}
|
public static final SingleKieModuleDeploymentHelper newSingleInstance() {
return new KieModuleDeploymentHelperImpl();
}
|
@Test
public void testSingleDeploymentHelper() throws Exception {
int numFiles = 0;
int numDirs = 0;
SingleKieModuleDeploymentHelper deploymentHelper = KieModuleDeploymentHelper.newSingleInstance();
List<String> resourceFilePaths = new ArrayList<String>();
resourceFilePaths.add("builder/test/");
numFiles += 2;
resourceFilePaths.add("builder/simple_query_test.drl");
++numFiles;
List<Class<?>> kjarClasses = new ArrayList<Class<?>>();
kjarClasses.add(KieModuleDeploymentHelper.class);
numDirs += 5; // org.kie.api.builder.helper
kjarClasses.add(EnvironmentImpl.class);
numDirs += 3; // (org.)drools.core.impl
kjarClasses.add( Cheese.class);
numDirs += 1; // (org.drools.)compiler
numFiles += 3;
String groupId = "org.kie.api.builder";
String artifactId = "test-kjar";
String version = "0.1-SNAPSHOT";
deploymentHelper.createKieJarAndDeployToMaven(groupId, artifactId, version,
"defaultKieBase", "defaultKieSession",
resourceFilePaths, kjarClasses);
// pom.xml, pom.properties
numFiles += 2;
// kmodule.xml, kmodule.info, kbase.cache
numFiles +=3;
// META-INF/maven/org.kie.api.builder/test-kjar
numDirs += 4;
// defaultKiebase, META-INF/defaultKieBase
numDirs += 2;
File artifactFile = MavenRepository.getMavenRepository().resolveArtifact(groupId + ":" + artifactId + ":" + version).getFile();
zip = new ZipInputStream(new FileInputStream(artifactFile));
Set<String> jarFiles = new HashSet<String>();
Set<String> jarDirs = new HashSet<String>();
ZipEntry ze = zip.getNextEntry();
logger.debug("Getting files from deployed jar: " );
while( ze != null ) {
String fileName = ze.getName();
if( fileName.endsWith("drl")
|| fileName.endsWith("class")
|| fileName.endsWith("xml")
|| fileName.endsWith("info")
|| fileName.endsWith("properties")
|| fileName.endsWith("cache") ) {
jarFiles.add(fileName);
logger.debug("> " + fileName);
} else {
jarDirs.add(fileName);
logger.debug("] " + fileName);
}
ze = zip.getNextEntry();
}
assertThat(jarFiles.size()).as("Num files in kjar").isEqualTo(numFiles);
}
|
public List<ColumnMatchResult<?>> getMismatchedColumns(List<Column> columns, ChecksumResult controlChecksum, ChecksumResult testChecksum)
{
return columns.stream()
.flatMap(column -> columnValidators.get(column.getCategory()).get().validate(column, controlChecksum, testChecksum).stream())
.filter(columnMatchResult -> !columnMatchResult.isMatched())
.collect(toImmutableList());
}
|
@Test
public void testValidateVarcharArrayAsDoubleArray()
{
Map<String, Object> equalNullCount = ImmutableMap.<String, Object>builder()
.put("varchar_array$null_count", 1L)
.put("varchar_array_as_double$null_count", 1L)
.build();
Map<String, Object> unequalNullCount = ImmutableMap.<String, Object>builder()
.put("varchar_array$null_count", 1L)
.put("varchar_array_as_double$null_count", 2L)
.build();
Map<String, Object> asDoubleArrayCounts = ImmutableMap.<String, Object>builder()
.put("varchar_array_as_double$nan_count", 2L)
.put("varchar_array_as_double$pos_inf_count", 3L)
.put("varchar_array_as_double$neg_inf_count", 4L)
.build();
List<Column> columns = ImmutableList.of(VARCHAR_ARRAY_COLUMN);
ChecksumResult controlChecksum = new ChecksumResult(
10,
ImmutableMap.<String, Object>builder()
.put("varchar_array$checksum", new SqlVarbinary(new byte[] {0xb}))
.putAll(equalNullCount)
.put("varchar_array_as_double$sum", 1.0)
.putAll(asDoubleArrayCounts)
.put("varchar_array$cardinality_checksum", new SqlVarbinary(new byte[] {0xd}))
.put("varchar_array$cardinality_sum", 10L)
.build());
// Matched as double.
ChecksumResult testChecksum = new ChecksumResult(
10,
ImmutableMap.<String, Object>builder()
.put("varchar_array$checksum", new SqlVarbinary(new byte[] {0x1b}))
.putAll(equalNullCount)
.put("varchar_array_as_double$sum", 1 + RELATIVE_ERROR_MARGIN)
.putAll(asDoubleArrayCounts)
.put("varchar_array$cardinality_checksum", new SqlVarbinary(new byte[] {0xd}))
.put("varchar_array$cardinality_sum", 10L)
.build());
assertTrue(stringAsDoubleValidator.getMismatchedColumns(columns, controlChecksum, testChecksum).isEmpty());
// Mismatched as varchar.
testChecksum = new ChecksumResult(
10,
ImmutableMap.<String, Object>builder()
.put("varchar_array$checksum", new SqlVarbinary(new byte[] {0xb}))
.putAll(equalNullCount)
.put("varchar_array_as_double$sum", 1.0)
.putAll(asDoubleArrayCounts)
.put("varchar_array$cardinality_checksum", new SqlVarbinary(new byte[] {0x1d}))
.put("varchar_array$cardinality_sum", 10L)
.build());
assertMismatchedColumns(columns, controlChecksum, testChecksum, stringAsDoubleValidator, VARCHAR_ARRAY_COLUMN);
// Mismatched as varchar.
testChecksum = new ChecksumResult(
10,
ImmutableMap.<String, Object>builder()
.put("varchar_array$checksum", new SqlVarbinary(new byte[] {0x1b}))
.putAll(unequalNullCount)
.put("varchar_array_as_double$sum", 1.0)
.putAll(asDoubleArrayCounts)
.put("varchar_array$cardinality_checksum", new SqlVarbinary(new byte[] {0xd}))
.put("varchar_array$cardinality_sum", 10L)
.build());
assertMismatchedColumns(columns, controlChecksum, testChecksum, stringAsDoubleValidator, VARCHAR_ARRAY_COLUMN);
// Mismatched as either double or varchar.
testChecksum = new ChecksumResult(
10,
ImmutableMap.<String, Object>builder()
.put("varchar_array$checksum", new SqlVarbinary(new byte[] {0x1b}))
.putAll(equalNullCount)
.put("varchar_array_as_double$sum", 1 - RELATIVE_ERROR_MARGIN * 10)
.putAll(asDoubleArrayCounts)
.put("varchar_array$cardinality_checksum", new SqlVarbinary(new byte[] {0xd}))
.put("varchar_array$cardinality_sum", 10L)
.build());
assertMismatchedColumns(columns, controlChecksum, testChecksum, stringAsDoubleValidator, VARCHAR_ARRAY_COLUMN);
}
|
@ShellMethod(key = "fetch table schema", value = "Fetches latest table schema")
public String fetchTableSchema(
@ShellOption(value = {"--outputFilePath"}, defaultValue = ShellOption.NULL,
help = "File path to write schema") final String outputFilePath) throws Exception {
HoodieTableMetaClient client = HoodieCLI.getTableMetaClient();
TableSchemaResolver tableSchemaResolver = new TableSchemaResolver(client);
Schema schema = tableSchemaResolver.getTableAvroSchema();
if (outputFilePath != null) {
LOG.info("Latest table schema : " + schema.toString(true));
writeToFile(outputFilePath, schema.toString(true));
return String.format("Latest table schema written to %s", outputFilePath);
} else {
return String.format("Latest table schema %s", schema.toString(true));
}
}
|
@Test
public void testFetchTableSchema() throws Exception {
// Create table and connect
HoodieCLI.conf = storageConf();
new TableCommand().createTable(
tablePath, tableName, HoodieTableType.COPY_ON_WRITE.name(),
"", TimelineLayoutVersion.VERSION_1, "org.apache.hudi.common.model.HoodieAvroPayload");
String schemaStr = "{\n"
+ " \"type\" : \"record\",\n"
+ " \"name\" : \"SchemaName\",\n"
+ " \"namespace\" : \"SchemaNS\",\n"
+ " \"fields\" : [ {\n"
+ " \"name\" : \"key\",\n"
+ " \"type\" : \"int\"\n"
+ " }, {\n"
+ " \"name\" : \"val\",\n"
+ " \"type\" : [ \"null\", \"string\" ],\n"
+ " \"default\" : null\n"
+ " }]};";
generateData(schemaStr);
Object result = shell.evaluate(() -> "fetch table schema");
assertTrue(ShellEvaluationResultUtil.isSuccess(result));
String actualSchemaStr = result.toString().substring(result.toString().indexOf("{"));
Schema actualSchema = new Schema.Parser().parse(actualSchemaStr);
Schema expectedSchema = new Schema.Parser().parse(schemaStr);
expectedSchema = HoodieAvroUtils.addMetadataFields(expectedSchema);
assertEquals(actualSchema, expectedSchema);
File file = File.createTempFile("temp", null);
result = shell.evaluate(() -> "fetch table schema --outputFilePath " + file.getAbsolutePath());
assertTrue(ShellEvaluationResultUtil.isSuccess(result));
actualSchemaStr = getFileContent(file.getAbsolutePath());
actualSchema = new Schema.Parser().parse(actualSchemaStr);
assertEquals(actualSchema, expectedSchema);
}
|
public static NamenodeRole convert(NamenodeRoleProto role) {
switch (role) {
case NAMENODE:
return NamenodeRole.NAMENODE;
case BACKUP:
return NamenodeRole.BACKUP;
case CHECKPOINT:
return NamenodeRole.CHECKPOINT;
}
return null;
}
|
@Test
public void testConvertBlockType() {
BlockType bContiguous = BlockType.CONTIGUOUS;
BlockTypeProto bContiguousProto = PBHelperClient.convert(bContiguous);
BlockType bContiguous2 = PBHelperClient.convert(bContiguousProto);
assertEquals(bContiguous, bContiguous2);
BlockType bStriped = BlockType.STRIPED;
BlockTypeProto bStripedProto = PBHelperClient.convert(bStriped);
BlockType bStriped2 = PBHelperClient.convert(bStripedProto);
assertEquals(bStriped, bStriped2);
}
|
public static List<String> colNamesFromSchema(final String schema) {
return splitAcrossOneLevelDeepComma(schema).stream()
.map(RowUtil::removeBackTickAndKeyTrim)
.map(RowUtil::splitAndGetFirst)
.collect(Collectors.toList());
}
|
@Test
public void shouldGetColumnNamesFromSchema() {
// Given
final String schema = "`K` STRUCT<`F1` ARRAY<STRING>>, "
+ "`STR` STRING, "
+ "`LONG` BIGINT, "
+ "`DEC` DECIMAL(4, 2),"
+ "`BYTES_` BYTES, "
+ "`ARRAY` ARRAY<STRING>, "
+ "`MAP` MAP<STRING, STRING>, "
+ "`STRUCT` STRUCT<`F1` INTEGER>, "
+ "`COMPLEX` STRUCT<`DECIMAL` DECIMAL(2, 1), `STRUCT` STRUCT<`F1` STRING, `F2` INTEGER>, `ARRAY_ARRAY` ARRAY<ARRAY<STRING>>, `ARRAY_STRUCT` ARRAY<STRUCT<`F1` STRING>>, `ARRAY_MAP` ARRAY<MAP<STRING, INTEGER>>, `MAP_ARRAY` MAP<STRING, ARRAY<STRING>>, `MAP_MAP` MAP<STRING, MAP<STRING, INTEGER>>, `MAP_STRUCT` MAP<STRING, STRUCT<`F1` STRING>>>, "
+ "`TIMESTAMP` TIMESTAMP, "
+ "`DATE` DATE, "
+ "`TIME` TIME, "
+ "`HEAD` BYTES";
// When
final List<String> columnNames = RowUtil.colNamesFromSchema(schema);
// Then
assertThat(
columnNames,
contains(
"K",
"STR",
"LONG",
"DEC",
"BYTES_",
"ARRAY",
"MAP",
"STRUCT",
"COMPLEX",
"TIMESTAMP",
"DATE",
"TIME",
"HEAD"
));
}
|
@Override
public Health health() {
Map<String, Health> healths = circuitBreakerRegistry.getAllCircuitBreakers().stream()
.filter(this::isRegisterHealthIndicator)
.collect(Collectors.toMap(CircuitBreaker::getName,
this::mapBackendMonitorState));
Status status = this.statusAggregator.getAggregateStatus(healths.values().stream().map(Health::getStatus).collect(Collectors.toSet()));
return Health.status(status).withDetails(healths).build();
}
|
@Test
public void testHealthStatus() {
CircuitBreaker openCircuitBreaker = mock(CircuitBreaker.class);
CircuitBreaker halfOpenCircuitBreaker = mock(CircuitBreaker.class);
CircuitBreaker closeCircuitBreaker = mock(CircuitBreaker.class);
Map<CircuitBreaker.State, CircuitBreaker> expectedStateToCircuitBreaker = new HashMap<>();
expectedStateToCircuitBreaker.put(OPEN, openCircuitBreaker);
expectedStateToCircuitBreaker.put(HALF_OPEN, halfOpenCircuitBreaker);
expectedStateToCircuitBreaker.put(CLOSED, closeCircuitBreaker);
CircuitBreakerConfigurationProperties.InstanceProperties instanceProperties =
mock(CircuitBreakerConfigurationProperties.InstanceProperties.class);
CircuitBreakerConfigurationProperties circuitBreakerProperties = mock(
CircuitBreakerConfigurationProperties.class);
// given
CircuitBreakerRegistry registry = mock(CircuitBreakerRegistry.class);
CircuitBreakerConfig config = mock(CircuitBreakerConfig.class);
CircuitBreaker.Metrics metrics = mock(CircuitBreaker.Metrics.class);
// when
when(registry.getAllCircuitBreakers()).thenReturn(new HashSet<>(expectedStateToCircuitBreaker.values()));
boolean allowHealthIndicatorToFail = true;
expectedStateToCircuitBreaker.forEach(
(state, circuitBreaker) -> setCircuitBreakerWhen(state, circuitBreaker, config, metrics, instanceProperties, circuitBreakerProperties, allowHealthIndicatorToFail));
CircuitBreakersHealthIndicator healthIndicator =
new CircuitBreakersHealthIndicator(registry, circuitBreakerProperties, new SimpleStatusAggregator());
// then
Health health = healthIndicator.health();
then(health.getStatus()).isEqualTo(Status.DOWN);
then(health.getDetails()).containsKeys(OPEN.name(), HALF_OPEN.name(), CLOSED.name());
assertState(OPEN, Status.DOWN, health.getDetails());
assertState(HALF_OPEN, new Status("CIRCUIT_HALF_OPEN"), health.getDetails());
assertState(CLOSED, Status.UP, health.getDetails());
}
|
@NonNull
public static Permutor<FeedItem> getPermutor(@NonNull SortOrder sortOrder) {
Comparator<FeedItem> comparator = null;
Permutor<FeedItem> permutor = null;
switch (sortOrder) {
case EPISODE_TITLE_A_Z:
comparator = (f1, f2) -> itemTitle(f1).compareTo(itemTitle(f2));
break;
case EPISODE_TITLE_Z_A:
comparator = (f1, f2) -> itemTitle(f2).compareTo(itemTitle(f1));
break;
case DATE_OLD_NEW:
comparator = (f1, f2) -> pubDate(f1).compareTo(pubDate(f2));
break;
case DATE_NEW_OLD:
comparator = (f1, f2) -> pubDate(f2).compareTo(pubDate(f1));
break;
case DURATION_SHORT_LONG:
comparator = (f1, f2) -> Integer.compare(duration(f1), duration(f2));
break;
case DURATION_LONG_SHORT:
comparator = (f1, f2) -> Integer.compare(duration(f2), duration(f1));
break;
case EPISODE_FILENAME_A_Z:
comparator = (f1, f2) -> itemLink(f1).compareTo(itemLink(f2));
break;
case EPISODE_FILENAME_Z_A:
comparator = (f1, f2) -> itemLink(f2).compareTo(itemLink(f1));
break;
case FEED_TITLE_A_Z:
comparator = (f1, f2) -> feedTitle(f1).compareTo(feedTitle(f2));
break;
case FEED_TITLE_Z_A:
comparator = (f1, f2) -> feedTitle(f2).compareTo(feedTitle(f1));
break;
case RANDOM:
permutor = Collections::shuffle;
break;
case SMART_SHUFFLE_OLD_NEW:
permutor = (queue) -> smartShuffle(queue, true);
break;
case SMART_SHUFFLE_NEW_OLD:
permutor = (queue) -> smartShuffle(queue, false);
break;
case SIZE_SMALL_LARGE:
comparator = (f1, f2) -> Long.compare(size(f1), size(f2));
break;
case SIZE_LARGE_SMALL:
comparator = (f1, f2) -> Long.compare(size(f2), size(f1));
break;
case COMPLETION_DATE_NEW_OLD:
comparator = (f1, f2) -> f2.getMedia().getPlaybackCompletionDate()
.compareTo(f1.getMedia().getPlaybackCompletionDate());
break;
default:
throw new IllegalArgumentException("Permutor not implemented");
}
if (comparator != null) {
final Comparator<FeedItem> comparator2 = comparator;
permutor = (queue) -> Collections.sort(queue, comparator2);
}
return permutor;
}
|
@Test
public void testPermutorForRule_EPISODE_TITLE_ASC_NullTitle() {
Permutor<FeedItem> permutor = FeedItemPermutors.getPermutor(SortOrder.EPISODE_TITLE_A_Z);
List<FeedItem> itemList = getTestList();
itemList.get(2) // itemId 2
.setTitle(null);
assertTrue(checkIdOrder(itemList, 1, 3, 2)); // before sorting
permutor.reorder(itemList);
assertTrue(checkIdOrder(itemList, 2, 1, 3)); // after sorting
}
|
public static Optional<Expression> convert(
org.apache.flink.table.expressions.Expression flinkExpression) {
if (!(flinkExpression instanceof CallExpression)) {
return Optional.empty();
}
CallExpression call = (CallExpression) flinkExpression;
Operation op = FILTERS.get(call.getFunctionDefinition());
if (op != null) {
switch (op) {
case IS_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::isNull);
case NOT_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::notNull);
case LT:
return convertFieldAndLiteral(Expressions::lessThan, Expressions::greaterThan, call);
case LT_EQ:
return convertFieldAndLiteral(
Expressions::lessThanOrEqual, Expressions::greaterThanOrEqual, call);
case GT:
return convertFieldAndLiteral(Expressions::greaterThan, Expressions::lessThan, call);
case GT_EQ:
return convertFieldAndLiteral(
Expressions::greaterThanOrEqual, Expressions::lessThanOrEqual, call);
case EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.isNaN(ref);
} else {
return Expressions.equal(ref, lit);
}
},
call);
case NOT_EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.notNaN(ref);
} else {
return Expressions.notEqual(ref, lit);
}
},
call);
case NOT:
return onlyChildAs(call, CallExpression.class)
.flatMap(FlinkFilters::convert)
.map(Expressions::not);
case AND:
return convertLogicExpression(Expressions::and, call);
case OR:
return convertLogicExpression(Expressions::or, call);
case STARTS_WITH:
return convertLike(call);
}
}
return Optional.empty();
}
|
@Test
public void testLessThanEquals() {
UnboundPredicate<Integer> expected =
org.apache.iceberg.expressions.Expressions.lessThanOrEqual("field1", 1);
Optional<org.apache.iceberg.expressions.Expression> actual =
FlinkFilters.convert(resolve(Expressions.$("field1").isLessOrEqual(Expressions.lit(1))));
assertThat(actual).isPresent();
assertPredicatesMatch(expected, actual.get());
Optional<org.apache.iceberg.expressions.Expression> actual1 =
FlinkFilters.convert(resolve(Expressions.lit(1).isGreaterOrEqual(Expressions.$("field1"))));
assertThat(actual1).isPresent();
assertPredicatesMatch(expected, actual1.get());
}
|
public static JSONObject createObj() {
return new JSONObject();
}
|
@Test
public void customValueTest() {
final JSONObject jsonObject = JSONUtil.createObj()
.set("test2", (JSONString) () -> NumberUtil.decimalFormat("#.0", 12.00D));
assertEquals("{\"test2\":12.0}", jsonObject.toString());
}
|
@Override
public boolean isExpire(long currentTime) {
return isEphemeral() && getAllPublishedService().isEmpty() && currentTime - getLastUpdatedTime() > ClientConfig
.getInstance().getClientExpiredTime();
}
|
@Test
void testIsExpire() {
long mustExpireTime = ipPortBasedClient.getLastUpdatedTime() + ClientConfig.getInstance().getClientExpiredTime() * 2;
assertTrue(ipPortBasedClient.isExpire(mustExpireTime));
}
|
@Override
public void addListener(String key, String group, ConfigurationListener listener) {}
|
@Test
void testAddListener() {
configuration.addListener(null, null);
configuration.addListener(null, null, null);
}
|
@SafeVarargs
public static <T> Stream<T> of(T... array) {
Assert.notNull(array, "Array must be not null!");
return Stream.of(array);
}
|
@Test
public void ofTest(){
final Stream<Integer> stream = StreamUtil.of(2, x -> x * 2, 4);
final String result = stream.collect(CollectorUtil.joining(","));
assertEquals("2,4,8,16", result);
}
|
public QueueCapacityVector parse(String capacityString, QueuePath queuePath) {
if (queuePath.isRoot()) {
return QueueCapacityVector.of(100f, ResourceUnitCapacityType.PERCENTAGE);
}
if (capacityString == null) {
return new QueueCapacityVector();
}
// Trim all spaces from capacity string
capacityString = capacityString.replaceAll(" ", "");
for (Parser parser : parsers) {
Matcher matcher = parser.regex.matcher(capacityString);
if (matcher.find()) {
return parser.parser.apply(matcher);
}
}
return new QueueCapacityVector();
}
|
@Test
public void testPercentageCapacityConfig() {
QueueCapacityVector percentageCapacityVector =
capacityConfigParser.parse(Float.toString(PERCENTAGE_VALUE), QUEUE_PATH);
QueueCapacityVectorEntry memory = percentageCapacityVector.getResource(MEMORY_URI);
QueueCapacityVectorEntry vcore = percentageCapacityVector.getResource(VCORES_URI);
Assert.assertEquals(ResourceUnitCapacityType.PERCENTAGE, memory.getVectorResourceType());
Assert.assertEquals(PERCENTAGE_VALUE, memory.getResourceValue(), EPSILON);
Assert.assertEquals(ResourceUnitCapacityType.PERCENTAGE, vcore.getVectorResourceType());
Assert.assertEquals(PERCENTAGE_VALUE, vcore.getResourceValue(), EPSILON);
QueueCapacityVector rootCapacityVector =
capacityConfigParser.parse(Float.toString(PERCENTAGE_VALUE), ROOT);
QueueCapacityVectorEntry memoryRoot = rootCapacityVector.getResource(MEMORY_URI);
QueueCapacityVectorEntry vcoreRoot = rootCapacityVector.getResource(VCORES_URI);
Assert.assertEquals(ResourceUnitCapacityType.PERCENTAGE, memoryRoot.getVectorResourceType());
Assert.assertEquals(100f, memoryRoot.getResourceValue(), EPSILON);
Assert.assertEquals(ResourceUnitCapacityType.PERCENTAGE, vcoreRoot.getVectorResourceType());
Assert.assertEquals(100f, vcoreRoot.getResourceValue(), EPSILON);
}
|
public static Map<String, String> resolveAttachments(Object invocation, boolean isApache) {
if (invocation == null) {
return Collections.emptyMap();
}
final Map<String, String> attachments = new HashMap<>();
if (isApache) {
attachments.putAll(getAttachmentsFromContext(APACHE_RPC_CONTEXT));
} else {
attachments.putAll(getAttachmentsFromContext(ALIBABA_RPC_CONTEXT));
}
final Optional<Object> fieldValue = ReflectUtils.getFieldValue(invocation, ATTACHMENTS_FIELD);
if (fieldValue.isPresent() && fieldValue.get() instanceof Map) {
attachments.putAll((Map<String, String>) fieldValue.get());
}
return Collections.unmodifiableMap(attachments);
}
|
@Test
public void testObjectAttachments() {
final TestObjectInvocation testObjectInvocation = new TestObjectInvocation(buildObjectAttachments());
final Map<String, String> attachmentsByObject = DubboAttachmentsHelper
.resolveAttachments(testObjectInvocation, false);
Assert.assertEquals(attachmentsByObject, testObjectInvocation.attachments);
final Map<String, String> attachmentsByObject2 = DubboAttachmentsHelper
.resolveAttachments(testObjectInvocation, true);
Assert.assertEquals(attachmentsByObject2, testObjectInvocation.attachments);
}
|
public SqlEndpoint() {
}
|
@Test
public void testSQLEndpoint() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
template.sendBody("direct:start", "");
MockEndpoint.assertIsSatisfied(context);
}
|
public Optional<UserDto> authenticate(HttpRequest request) {
return extractCredentialsFromHeader(request)
.flatMap(credentials -> Optional.ofNullable(authenticate(credentials, request)));
}
|
@Test
public void authenticate_from_user_token() {
UserDto user = db.users().insertUser();
when(userTokenAuthentication.authenticate(request)).thenReturn(Optional.of(new UserAuthResult(user, new UserTokenDto().setName("my-token"), UserAuthResult.AuthType.TOKEN)));
when(request.getHeader(AUTHORIZATION_HEADER)).thenReturn("Basic " + toBase64("token:"));
Optional<UserDto> userAuthenticated = underTest.authenticate(request);
assertThat(userAuthenticated).isPresent();
assertThat(userAuthenticated.get().getLogin()).isEqualTo(user.getLogin());
}
|
public void setParentVersion(String parentVersion) {
this.parentVersion = parentVersion;
}
|
@Test
public void testSetParentVersion() {
String parentVersion = "1.0";
Model instance = new Model();
instance.setParentVersion(parentVersion);
assertNotNull(instance.getParentVersion());
}
|
public static ByteArrayOutputStream getPayload(MultipartPayload multipartPayload) throws IOException {
final ByteArrayOutputStream os = new ByteArrayOutputStream();
final String preamble = multipartPayload.getPreamble();
if (preamble != null) {
os.write((preamble + "\r\n").getBytes());
}
final List<BodyPartPayload> bodyParts = multipartPayload.getBodyParts();
if (!bodyParts.isEmpty()) {
final String boundary = multipartPayload.getBoundary();
final byte[] startBoundary = ("--" + boundary + "\r\n").getBytes();
for (BodyPartPayload bodyPart : bodyParts) {
os.write(startBoundary);
final Map<String, String> bodyPartHeaders = bodyPart.getHeaders();
if (bodyPartHeaders != null) {
for (Map.Entry<String, String> header : bodyPartHeaders.entrySet()) {
os.write((header.getKey() + ": " + header.getValue() + "\r\n").getBytes());
}
}
os.write("\r\n".getBytes());
if (bodyPart instanceof MultipartPayload) {
getPayload((MultipartPayload) bodyPart).writeTo(os);
} else if (bodyPart instanceof ByteArrayBodyPartPayload) {
final ByteArrayBodyPartPayload byteArrayBodyPart = (ByteArrayBodyPartPayload) bodyPart;
os.write(byteArrayBodyPart.getPayload(), byteArrayBodyPart.getOff(), byteArrayBodyPart.getLen());
} else {
throw new AssertionError(bodyPart.getClass());
}
os.write("\r\n".getBytes()); //CRLF for the next (starting or closing) boundary
}
os.write(("--" + boundary + "--").getBytes());
final String epilogue = multipartPayload.getEpilogue();
if (epilogue != null) {
os.write(("\r\n" + epilogue).getBytes());
}
}
return os;
}
|
@Test
public void testEmptyMultipartPayload() throws IOException {
final MultipartPayload mP = new MultipartPayload();
final StringBuilder headersString = new StringBuilder();
for (Map.Entry<String, String> header : mP.getHeaders().entrySet()) {
headersString.append(header.getKey())
.append(": ")
.append(header.getValue())
.append("\r\n");
}
assertEquals("Content-Type: multipart/form-data; boundary=\"" + mP.getBoundary() + "\"\r\n",
headersString.toString());
assertEquals("", MultipartUtils.getPayload(mP).toString());
}
|
public static URL parseDecodedStr(String decodedURLStr) {
Map<String, String> parameters = null;
int pathEndIdx = decodedURLStr.indexOf('?');
if (pathEndIdx >= 0) {
parameters = parseDecodedParams(decodedURLStr, pathEndIdx + 1);
} else {
pathEndIdx = decodedURLStr.length();
}
String decodedBody = decodedURLStr.substring(0, pathEndIdx);
return parseURLBody(decodedURLStr, decodedBody, parameters);
}
|
@Test
void testPond() {
String str = "https://a#@b";
URL url1 = URL.valueOf(str);
URL url2 = URLStrParser.parseDecodedStr(str);
Assertions.assertEquals("https", url1.getProtocol());
Assertions.assertEquals("https", url2.getProtocol());
Assertions.assertEquals("a", url1.getHost());
Assertions.assertEquals("a", url2.getHost());
}
|
public static String getRemoteAddrFromRequest(Request request, Set<IpSubnet> trustedSubnets) {
final String remoteAddr = request.getRemoteAddr();
final String XForwardedFor = request.getHeader("X-Forwarded-For");
if (XForwardedFor != null) {
for (IpSubnet s : trustedSubnets) {
try {
if (s.contains(remoteAddr)) {
// Request came from trusted source, trust X-Forwarded-For and return it
return XForwardedFor;
}
} catch (UnknownHostException e) {
// ignore silently, probably not worth logging
}
}
}
// Request did not come from a trusted source, or the X-Forwarded-For header was not set
return remoteAddr;
}
|
@Test
public void getRemoteAddrFromRequestReturnsHeaderContentWithXForwardedForHeaderFromTrustedNetwork() throws Exception {
final Request request = mock(Request.class);
when(request.getRemoteAddr()).thenReturn("127.0.0.1");
when(request.getHeader("X-Forwarded-For")).thenReturn("192.168.100.42");
final String s = RestTools.getRemoteAddrFromRequest(request, Collections.singleton(new IpSubnet("127.0.0.0/8")));
assertThat(s).isEqualTo("192.168.100.42");
}
|
@VisibleForTesting
static void validateTaskConfigs(TableConfig tableConfig, Schema schema) {
TableTaskConfig taskConfig = tableConfig.getTaskConfig();
if (taskConfig != null) {
for (Map.Entry<String, Map<String, String>> taskConfigEntry : taskConfig.getTaskTypeConfigsMap().entrySet()) {
String taskTypeConfigName = taskConfigEntry.getKey();
Map<String, String> taskTypeConfig = taskConfigEntry.getValue();
// Task configuration cannot be null.
Preconditions.checkNotNull(taskTypeConfig,
String.format("Task configuration for \"%s\" cannot be null!", taskTypeConfigName));
// Schedule key for task config has to be set.
if (taskTypeConfig.containsKey(SCHEDULE_KEY)) {
String cronExprStr = taskTypeConfig.get(SCHEDULE_KEY);
try {
CronScheduleBuilder.cronSchedule(cronExprStr);
} catch (Exception e) {
throw new IllegalStateException(
String.format("Task %s contains an invalid cron schedule: %s", taskTypeConfigName, cronExprStr), e);
}
}
boolean isAllowDownloadFromServer = Boolean.parseBoolean(
taskTypeConfig.getOrDefault(TableTaskConfig.MINION_ALLOW_DOWNLOAD_FROM_SERVER,
String.valueOf(TableTaskConfig.DEFAULT_MINION_ALLOW_DOWNLOAD_FROM_SERVER)));
if (isAllowDownloadFromServer) {
Preconditions.checkState(tableConfig.getValidationConfig().getPeerSegmentDownloadScheme() != null,
String.format("Table %s has task %s with allowDownloadFromServer set to true, but "
+ "peerSegmentDownloadScheme is not set in the table config", tableConfig.getTableName(),
taskTypeConfigName));
}
// Task Specific validation for REALTIME_TO_OFFLINE_TASK_TYPE
// TODO task specific validate logic should directly call to PinotTaskGenerator API
if (taskTypeConfigName.equals(REALTIME_TO_OFFLINE_TASK_TYPE)) {
// check table is not upsert
Preconditions.checkState(tableConfig.getUpsertMode() == UpsertConfig.Mode.NONE,
"RealtimeToOfflineTask doesn't support upsert table!");
// check no malformed period
TimeUtils.convertPeriodToMillis(taskTypeConfig.getOrDefault("bufferTimePeriod", "2d"));
TimeUtils.convertPeriodToMillis(taskTypeConfig.getOrDefault("bucketTimePeriod", "1d"));
TimeUtils.convertPeriodToMillis(taskTypeConfig.getOrDefault("roundBucketTimePeriod", "1s"));
// check mergeType is correct
Preconditions.checkState(ImmutableSet.of("CONCAT", "ROLLUP", "DEDUP")
.contains(taskTypeConfig.getOrDefault("mergeType", "CONCAT").toUpperCase()),
"MergeType must be one of [CONCAT, ROLLUP, DEDUP]!");
// check no mis-configured columns
Set<String> columnNames = schema.getColumnNames();
for (Map.Entry<String, String> entry : taskTypeConfig.entrySet()) {
if (entry.getKey().endsWith(".aggregationType")) {
Preconditions.checkState(columnNames.contains(StringUtils.removeEnd(entry.getKey(), ".aggregationType")),
String.format("Column \"%s\" not found in schema!", entry.getKey()));
try {
// check that it's a valid aggregation function type
AggregationFunctionType aft = AggregationFunctionType.getAggregationFunctionType(entry.getValue());
// check that a value aggregator is available
if (!AVAILABLE_CORE_VALUE_AGGREGATORS.contains(aft)) {
throw new IllegalArgumentException("ValueAggregator not enabled for type: " + aft.toString());
}
} catch (IllegalArgumentException e) {
String err =
String.format("Column \"%s\" has invalid aggregate type: %s", entry.getKey(), entry.getValue());
throw new IllegalStateException(err);
}
}
}
} else if (taskTypeConfigName.equals(UPSERT_COMPACTION_TASK_TYPE)) {
// check table is realtime
Preconditions.checkState(tableConfig.getTableType() == TableType.REALTIME,
"UpsertCompactionTask only supports realtime tables!");
// check upsert enabled
Preconditions.checkState(tableConfig.isUpsertEnabled(), "Upsert must be enabled for UpsertCompactionTask");
// check no malformed period
if (taskTypeConfig.containsKey("bufferTimePeriod")) {
TimeUtils.convertPeriodToMillis(taskTypeConfig.get("bufferTimePeriod"));
}
// check invalidRecordsThresholdPercent
if (taskTypeConfig.containsKey("invalidRecordsThresholdPercent")) {
Preconditions.checkState(Double.parseDouble(taskTypeConfig.get("invalidRecordsThresholdPercent")) >= 0
&& Double.parseDouble(taskTypeConfig.get("invalidRecordsThresholdPercent")) <= 100,
"invalidRecordsThresholdPercent must be >= 0 and <= 100");
}
// check invalidRecordsThresholdCount
if (taskTypeConfig.containsKey("invalidRecordsThresholdCount")) {
Preconditions.checkState(Long.parseLong(taskTypeConfig.get("invalidRecordsThresholdCount")) >= 1,
"invalidRecordsThresholdCount must be >= 1");
}
// check that either invalidRecordsThresholdPercent or invalidRecordsThresholdCount was provided
Preconditions.checkState(
taskTypeConfig.containsKey("invalidRecordsThresholdPercent") || taskTypeConfig.containsKey(
"invalidRecordsThresholdCount"),
"invalidRecordsThresholdPercent or invalidRecordsThresholdCount or both must be provided");
String validDocIdsType = taskTypeConfig.getOrDefault("validDocIdsType", "snapshot");
if (validDocIdsType.equals(ValidDocIdsType.SNAPSHOT.toString())) {
UpsertConfig upsertConfig = tableConfig.getUpsertConfig();
Preconditions.checkNotNull(upsertConfig, "UpsertConfig must be provided for UpsertCompactionTask");
Preconditions.checkState(upsertConfig.isEnableSnapshot(), String.format(
"'enableSnapshot' from UpsertConfig must be enabled for UpsertCompactionTask with validDocIdsType = "
+ "%s", validDocIdsType));
} else if (validDocIdsType.equals(ValidDocIdsType.IN_MEMORY_WITH_DELETE.toString())) {
UpsertConfig upsertConfig = tableConfig.getUpsertConfig();
Preconditions.checkNotNull(upsertConfig, "UpsertConfig must be provided for UpsertCompactionTask");
Preconditions.checkNotNull(upsertConfig.getDeleteRecordColumn(), String.format(
"deleteRecordColumn must be provided for " + "UpsertCompactionTask with validDocIdsType = %s",
validDocIdsType));
}
}
}
}
}
|
@Test
public void testUpsertCompactionTaskConfig() {
Schema schema =
new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).addSingleValueDimension("myCol", FieldSpec.DataType.STRING)
.addDateTime(TIME_COLUMN, FieldSpec.DataType.LONG, "1:MILLISECONDS:EPOCH", "1:MILLISECONDS")
.setPrimaryKeyColumns(Lists.newArrayList("myCol")).build();
Map<String, String> upsertCompactionTaskConfig =
ImmutableMap.of("bufferTimePeriod", "5d", "invalidRecordsThresholdPercent", "1", "invalidRecordsThresholdCount",
"1");
UpsertConfig upsertConfig = new UpsertConfig(UpsertConfig.Mode.FULL);
upsertConfig.setEnableSnapshot(true);
TableConfig tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME)
.setUpsertConfig(upsertConfig)
.setTaskConfig(new TableTaskConfig(ImmutableMap.of("UpsertCompactionTask", upsertCompactionTaskConfig)))
.build();
TableConfigUtils.validateTaskConfigs(tableConfig, schema);
// test with invalidRecordsThresholdPercents as 0
upsertCompactionTaskConfig = ImmutableMap.of("invalidRecordsThresholdPercent", "0");
TableConfig zeroPercentTableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME)
.setUpsertConfig(upsertConfig)
.setTaskConfig(new TableTaskConfig(ImmutableMap.of("UpsertCompactionTask", upsertCompactionTaskConfig)))
.build();
TableConfigUtils.validateTaskConfigs(zeroPercentTableConfig, schema);
// test with invalid invalidRecordsThresholdPercents as -1 and 110
upsertCompactionTaskConfig = ImmutableMap.of("invalidRecordsThresholdPercent", "-1");
TableConfig negativePercentTableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME)
.setUpsertConfig(upsertConfig)
.setTaskConfig(new TableTaskConfig(ImmutableMap.of("UpsertCompactionTask", upsertCompactionTaskConfig)))
.build();
Assert.assertThrows(IllegalStateException.class,
() -> TableConfigUtils.validateTaskConfigs(negativePercentTableConfig, schema));
upsertCompactionTaskConfig = ImmutableMap.of("invalidRecordsThresholdPercent", "110");
TableConfig hundredTenPercentTableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME)
.setUpsertConfig(new UpsertConfig(UpsertConfig.Mode.FULL))
.setTaskConfig(new TableTaskConfig(ImmutableMap.of("UpsertCompactionTask", upsertCompactionTaskConfig)))
.build();
Assert.assertThrows(IllegalStateException.class,
() -> TableConfigUtils.validateTaskConfigs(hundredTenPercentTableConfig, schema));
// test with invalid invalidRecordsThresholdCount
upsertCompactionTaskConfig = ImmutableMap.of("invalidRecordsThresholdCount", "0");
TableConfig invalidCountTableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME)
.setUpsertConfig(new UpsertConfig(UpsertConfig.Mode.FULL))
.setTaskConfig(new TableTaskConfig(ImmutableMap.of("UpsertCompactionTask", upsertCompactionTaskConfig)))
.build();
Assert.assertThrows(IllegalStateException.class,
() -> TableConfigUtils.validateTaskConfigs(invalidCountTableConfig, schema));
// test without invalidRecordsThresholdPercent or invalidRecordsThresholdCount
upsertCompactionTaskConfig = ImmutableMap.of("bufferTimePeriod", "5d");
TableConfig invalidTableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME)
.setUpsertConfig(new UpsertConfig(UpsertConfig.Mode.FULL))
.setTaskConfig(new TableTaskConfig(ImmutableMap.of("UpsertCompactionTask", upsertCompactionTaskConfig)))
.build();
Assert.assertThrows(IllegalStateException.class,
() -> TableConfigUtils.validateTaskConfigs(invalidTableConfig, schema));
}
|
CompletableFuture<Void> beginExecute(
@Nonnull List<? extends Tasklet> tasklets,
@Nonnull CompletableFuture<Void> cancellationFuture,
@Nonnull ClassLoader jobClassLoader
) {
final ExecutionTracker executionTracker = new ExecutionTracker(tasklets.size(), cancellationFuture);
try {
final Map<Boolean, List<Tasklet>> byCooperation =
tasklets.stream().collect(partitioningBy(
tasklet -> doWithClassLoader(jobClassLoader, tasklet::isCooperative)
));
submitCooperativeTasklets(executionTracker, jobClassLoader, byCooperation.get(true));
submitBlockingTasklets(executionTracker, jobClassLoader, byCooperation.get(false));
} catch (Throwable t) {
executionTracker.future.internalCompleteExceptionally(t);
}
return executionTracker.future;
}
|
@Test
public void when_nonBlockingCancelled_then_doneCallBackFiredAfterActualDone() {
// Given
CountDownLatch proceedLatch = new CountDownLatch(1);
final List<MockTasklet> tasklets =
Stream.generate(() -> new MockTasklet().waitOnLatch(proceedLatch).callsBeforeDone(Integer.MAX_VALUE))
.limit(100).collect(toList());
// When
CompletableFuture<Void> f = tes.beginExecute(tasklets, cancellationFuture, classLoader);
cancellationFuture.cancel(true);
// Then
assertFalse("future should not be completed until tasklets are completed.", f.isDone());
proceedLatch.countDown();
assertTrueEventually(() -> assertTrue("future should be completed eventually", f.isDone()));
assertThrows(CancellationException.class, cancellationFuture::get);
}
|
@Override
public boolean next() throws SQLException {
if (orderByValuesQueue.isEmpty()) {
return false;
}
if (isFirstNext) {
isFirstNext = false;
return true;
}
OrderByValue firstOrderByValue = orderByValuesQueue.poll();
if (firstOrderByValue.next()) {
orderByValuesQueue.offer(firstOrderByValue);
}
if (orderByValuesQueue.isEmpty()) {
return false;
}
setCurrentQueryResult(orderByValuesQueue.peek().getQueryResult());
return true;
}
|
@Test
void assertNextForCaseInsensitive() throws SQLException {
List<QueryResult> queryResults = Arrays.asList(mock(QueryResult.class), mock(QueryResult.class), mock(QueryResult.class));
for (int i = 0; i < 3; i++) {
QueryResultMetaData metaData = mock(QueryResultMetaData.class);
when(queryResults.get(i).getMetaData()).thenReturn(metaData);
when(metaData.getColumnName(1)).thenReturn("col1");
when(metaData.getColumnName(2)).thenReturn("col2");
}
when(queryResults.get(0).next()).thenReturn(true, false);
when(queryResults.get(0).getValue(2, Object.class)).thenReturn("b");
when(queryResults.get(1).next()).thenReturn(true, true, false);
when(queryResults.get(1).getValue(2, Object.class)).thenReturn("a", "a", "B", "B");
when(queryResults.get(2).next()).thenReturn(true, false);
when(queryResults.get(2).getValue(2, Object.class)).thenReturn("A");
ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "MySQL"));
MergedResult actual = resultMerger.merge(queryResults, selectStatementContext, createDatabase(), mock(ConnectionContext.class));
assertTrue(actual.next());
assertThat(actual.getValue(2, Object.class).toString(), is("a"));
assertTrue(actual.next());
assertThat(actual.getValue(2, Object.class).toString(), is("A"));
assertTrue(actual.next());
assertThat(actual.getValue(2, Object.class).toString(), is("B"));
assertTrue(actual.next());
assertThat(actual.getValue(2, Object.class).toString(), is("b"));
assertFalse(actual.next());
}
|
@Override
public Status check() {
DataStore dataStore =
applicationModel.getExtensionLoader(DataStore.class).getDefaultExtension();
Map<String, Object> executors = dataStore.get(CommonConstants.EXECUTOR_SERVICE_COMPONENT_KEY);
StringBuilder msg = new StringBuilder();
Status.Level level = Status.Level.OK;
for (Map.Entry<String, Object> entry : executors.entrySet()) {
String port = entry.getKey();
ExecutorService executor = (ExecutorService) entry.getValue();
if (executor instanceof ThreadPoolExecutor) {
ThreadPoolExecutor tp = (ThreadPoolExecutor) executor;
boolean ok = tp.getActiveCount() < tp.getMaximumPoolSize() - 1;
Status.Level lvl = Status.Level.OK;
if (!ok) {
level = Status.Level.WARN;
lvl = Status.Level.WARN;
}
if (msg.length() > 0) {
msg.append(';');
}
msg.append("Pool status:")
.append(lvl)
.append(", max:")
.append(tp.getMaximumPoolSize())
.append(", core:")
.append(tp.getCorePoolSize())
.append(", largest:")
.append(tp.getLargestPoolSize())
.append(", active:")
.append(tp.getActiveCount())
.append(", task:")
.append(tp.getTaskCount())
.append(", service port: ")
.append(port);
}
}
return msg.length() == 0 ? new Status(Status.Level.UNKNOWN) : new Status(level, msg.toString());
}
|
@Test
void test() {
DataStore dataStore =
ExtensionLoader.getExtensionLoader(DataStore.class).getDefaultExtension();
ExecutorService executorService1 = Executors.newFixedThreadPool(1);
ExecutorService executorService2 = Executors.newFixedThreadPool(10);
dataStore.put(CommonConstants.EXECUTOR_SERVICE_COMPONENT_KEY, "8888", executorService1);
dataStore.put(CommonConstants.EXECUTOR_SERVICE_COMPONENT_KEY, "8889", executorService2);
ThreadPoolStatusChecker threadPoolStatusChecker = new ThreadPoolStatusChecker(ApplicationModel.defaultModel());
Status status = threadPoolStatusChecker.check();
Assertions.assertEquals(status.getLevel(), Status.Level.WARN);
Assertions.assertEquals(
status.getMessage(),
"Pool status:WARN, max:1, core:1, largest:0, active:0, task:0, service port: 8888;"
+ "Pool status:OK, max:10, core:10, largest:0, active:0, task:0, service port: 8889");
// reset
executorService1.shutdown();
executorService2.shutdown();
dataStore.remove(CommonConstants.EXECUTOR_SERVICE_COMPONENT_KEY, "8888");
dataStore.remove(CommonConstants.EXECUTOR_SERVICE_COMPONENT_KEY, "8889");
}
|
public static void mergeParams(
Map<String, ParamDefinition> params,
Map<String, ParamDefinition> paramsToMerge,
MergeContext context) {
if (paramsToMerge == null) {
return;
}
Stream.concat(params.keySet().stream(), paramsToMerge.keySet().stream())
.forEach(
name -> {
ParamDefinition paramToMerge = paramsToMerge.get(name);
if (paramToMerge == null) {
return;
}
if (paramToMerge.getType() == ParamType.MAP && paramToMerge.isLiteral()) {
Map<String, ParamDefinition> baseMap = mapValueOrEmpty(params, name);
Map<String, ParamDefinition> toMergeMap = mapValueOrEmpty(paramsToMerge, name);
mergeParams(
baseMap,
toMergeMap,
MergeContext.copyWithParentMode(
context, params.getOrDefault(name, paramToMerge).getMode()));
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else if (paramToMerge.getType() == ParamType.STRING_MAP
&& paramToMerge.isLiteral()) {
Map<String, String> baseMap = stringMapValueOrEmpty(params, name);
Map<String, String> toMergeMap = stringMapValueOrEmpty(paramsToMerge, name);
baseMap.putAll(toMergeMap);
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else {
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, paramToMerge.getValue()));
}
});
}
|
@Test
public void testMergeMapOverwrite() throws JsonProcessingException {
Map<String, ParamDefinition> allParams =
parseParamDefMap(
"{'tomergemap': {'type': 'MAP', 'value': {'tomerge': {'type': 'STRING','value': 'hello', 'validator': '@notEmpty'}}}}");
Map<String, ParamDefinition> paramsToMerge =
parseParamDefMap(
"{'tomergemap': {'type': 'MAP', 'value': {'tomerge':{'type': 'STRING', 'value': 'goodbye', 'validator': 'param != null'}}}}");
ParamsMergeHelper.mergeParams(allParams, paramsToMerge, definitionContext);
assertEquals(1, allParams.size());
MapParamDefinition tomergemap = allParams.get("tomergemap").asMapParamDef();
assertEquals("goodbye", tomergemap.getValue().get("tomerge").getValue());
assertEquals(
"param != null",
tomergemap.getValue().get("tomerge").asStringParamDef().getValidator().getRule());
assertEquals(
ParamSource.DEFINITION,
tomergemap.getValue().get("tomerge").asStringParamDef().getSource());
assertEquals(ParamSource.DEFINITION, tomergemap.getSource());
}
|
@Override
public String getNodeHttpAddress(HttpServletRequest req, String appId,
String appAttemptId, String containerId, String clusterId) {
UserGroupInformation callerUGI = LogWebServiceUtils.getUser(req);
String cId = clusterId != null ? clusterId : defaultClusterid;
MultivaluedMap<String, String> params = new MultivaluedMapImpl();
params.add("fields", "INFO");
String path = JOINER.join("clusters/", cId, "/apps/", appId, "/entities/",
TimelineEntityType.YARN_CONTAINER.toString(), "/", containerId);
TimelineEntity conEntity = null;
try {
if (callerUGI == null) {
conEntity = getEntity(path, params);
} else {
setUserName(params, callerUGI.getShortUserName());
conEntity =
callerUGI.doAs(new PrivilegedExceptionAction<TimelineEntity>() {
@Override public TimelineEntity run() throws Exception {
return getEntity(path, params);
}
});
}
} catch (Exception e) {
LogWebServiceUtils.rewrapAndThrowException(e);
}
if (conEntity == null) {
return null;
}
return (String) conEntity.getInfo()
.get(ContainerMetricsConstants.ALLOCATED_HOST_HTTP_ADDRESS_INFO);
}
|
@Test
public void testGetContainer() {
String address = logWebService
.getNodeHttpAddress(request, appId.toString(), null, cId.toString(),
null);
Assert.assertEquals(this.nodeHttpAddress, address);
}
|
public Class<?> getClass(String name, Class<?> defaultValue) {
String valueString = getTrimmed(name);
if (valueString == null)
return defaultValue;
try {
return getClassByName(valueString);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
}
|
@Test
public void testGetClass() throws IOException {
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("test.class1", "java.lang.Integer");
appendProperty("test.class2", " java.lang.Integer ");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
assertEquals("java.lang.Integer",
conf.getClass("test.class1", null).getCanonicalName());
assertEquals("java.lang.Integer",
conf.getClass("test.class2", null).getCanonicalName());
}
|
public boolean eval(StructLike data) {
return new EvalVisitor().eval(data);
}
|
@Test
public void testOr() {
Evaluator evaluator = new Evaluator(STRUCT, or(equal("x", 7), notNull("z")));
assertThat(evaluator.eval(TestHelpers.Row.of(7, 0, 3))).as("7, 3 => true").isTrue();
assertThat(evaluator.eval(TestHelpers.Row.of(8, 0, 3))).as("8, 3 => true").isTrue();
assertThat(evaluator.eval(TestHelpers.Row.of(7, 0, null))).as("7, null => true").isTrue();
assertThat(evaluator.eval(TestHelpers.Row.of(8, 0, null))).as("8, null => false").isFalse();
Evaluator structEvaluator =
new Evaluator(STRUCT, or(equal("s1.s2.s3.s4.i", 7), notNull("s1.s2.s3.s4.i")));
assertThat(
structEvaluator.eval(
TestHelpers.Row.of(
7,
0,
3,
TestHelpers.Row.of(
TestHelpers.Row.of(TestHelpers.Row.of(TestHelpers.Row.of(7)))))))
.as("7, 7 => true")
.isTrue();
assertThat(
structEvaluator.eval(
TestHelpers.Row.of(
8,
0,
3,
TestHelpers.Row.of(
TestHelpers.Row.of(TestHelpers.Row.of(TestHelpers.Row.of(8)))))))
.as("8, 8 => false")
.isTrue();
assertThat(
structEvaluator.eval(
TestHelpers.Row.of(
7,
0,
null,
TestHelpers.Row.of(
TestHelpers.Row.of(TestHelpers.Row.of(TestHelpers.Row.of(7)))))))
.as("7, notnull => false")
.isTrue();
}
|
public Result tick() {
if (System.currentTimeMillis() >= mLastEventHorizonMs) {
return Result.EXPIRED;
}
if (System.currentTimeMillis() < mNextEventMs) {
return Result.NOT_READY;
}
mNextEventMs = System.currentTimeMillis() + mIntervalMs;
long next = Math.min(mIntervalMs * 2, mMaxIntervalMs);
// Account for overflow.
if (next < mIntervalMs) {
next = Integer.MAX_VALUE;
}
mIntervalMs = next;
mNumEvents++;
return Result.READY;
}
|
@Test(timeout = 2000)
public void backoff() {
int n = 10;
ExponentialTimer timer = new ExponentialTimer(1, 1000, 0, 1000);
long start = System.currentTimeMillis();
for (int i = 0; i < n; i++) {
while (timer.tick() == ExponentialTimer.Result.NOT_READY) {
CommonUtils.sleepMs(10);
}
long now = System.currentTimeMillis();
assertTrue(now - start >= (1 << i - 1));
}
}
|
public boolean isEnabled() {
return config.getBoolean(ENABLED).orElseThrow(DEFAULT_VALUE_MISSING) && clientId() != null && clientSecret() != null;
}
|
@Test
public void is_enabled_always_return_false_when_client_secret_is_null() {
settings.setProperty("sonar.auth.bitbucket.enabled", true);
settings.setProperty("sonar.auth.bitbucket.clientId.secured", "id");
settings.setProperty("sonar.auth.bitbucket.clientSecret.secured", (String) null);
assertThat(underTest.isEnabled()).isFalse();
}
|
public OpenAPI read(Class<?> cls) {
return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>());
}
|
@Test(description = "Example with ref")
public void testExampleWithRef() {
Components components = new Components();
components.addExamples("Id", new Example().description("Id Example").summary("Id Example").value("1"));
OpenAPI oas = new OpenAPI()
.info(new Info().description("info"))
.components(components);
Reader reader = new Reader(oas);
OpenAPI openAPI = reader.read(RefExamplesResource.class);
String yaml = "openapi: 3.0.1\n" +
"info:\n" +
" description: info\n" +
"paths:\n" +
" /example:\n" +
" post:\n" +
" description: subscribes a client to updates relevant to the requestor's account\n" +
" operationId: subscribe\n" +
" parameters:\n" +
" - name: subscriptionId\n" +
" in: path\n" +
" required: true\n" +
" style: simple\n" +
" schema:\n" +
" type: string\n" +
" description: Schema\n" +
" example: Subscription example\n" +
" examples:\n" +
" subscriptionId_1:\n" +
" summary: Subscription number 12345\n" +
" description: subscriptionId_1\n" +
" value: 12345\n" +
" externalValue: Subscription external value 1\n" +
" $ref: '#/components/examples/Id'\n" +
" example: example\n" +
" requestBody:\n" +
" content:\n" +
" '*/*':\n" +
" schema:\n" +
" type: integer\n" +
" format: int32\n" +
" responses:\n" +
" default:\n" +
" description: default response\n" +
" content:\n" +
" '*/*':\n" +
" schema:\n" +
" $ref: '#/components/schemas/SubscriptionResponse'\n" +
"components:\n" +
" schemas:\n" +
" SubscriptionResponse:\n" +
" type: object\n" +
" properties:\n" +
" subscriptionId:\n" +
" type: string\n" +
" examples:\n" +
" Id:\n" +
" summary: Id Example\n" +
" description: Id Example\n" +
" value: \"1\"\n";
SerializationMatchers.assertEqualsToYaml(openAPI, yaml);
}
|
public String getEvent() {
return event;
}
|
@Test
public void getEvent() {
SAExposureData exposureData = new SAExposureData("ExposeEvent");
Assert.assertEquals("ExposeEvent", exposureData.getEvent());
}
|
public static String toString(final Collection<?> col) {
if (col == null) {
return "null";
}
if (col.isEmpty()) {
return "[]";
}
return CycleDependencyHandler.wrap(col, o -> {
StringBuilder sb = new StringBuilder(32);
sb.append("[");
for (Object obj : col) {
if (sb.length() > 1) {
sb.append(", ");
}
if (obj == col) {
sb.append("(this ").append(obj.getClass().getSimpleName()).append(")");
} else {
sb.append(StringUtils.toString(obj));
}
}
sb.append("]");
return sb.toString();
});
}
|
@Test
public void testCollectionToString() {
List<String> nullCollection = null;
List<String> emptyCollection = new ArrayList<>();
List<Object> filledCollection = new ArrayList<>();
filledCollection.add("Foo");
filledCollection.add("Bar");
filledCollection.add(filledCollection);
Assertions.assertEquals("null", CollectionUtils.toString(nullCollection));
Assertions.assertEquals("[]", CollectionUtils.toString(emptyCollection));
Assertions.assertEquals("[\"Foo\", \"Bar\", (this ArrayList)]", CollectionUtils.toString(filledCollection));
}
|
@VisibleForTesting
protected void updateCache(DefaultTapiResolver resolver, DefaultContext context) {
updateNodes(resolver, getNodes(context));
updateNeps(resolver, getNeps(context));
}
|
@Test
public void testUpdateCacheWithoutNep() {
DcsBasedTapiDataProducer dataProvider = new DcsBasedTapiDataProducer();
DefaultTapiResolver mockResolver = EasyMock.createMock(DefaultTapiResolver.class);
topology.addToNode(node1);
topology.addToNode(node2);
List<TapiNodeRef> expectNodes = Arrays.asList(
DcsBasedTapiObjectRefFactory.create(topology, node1).setDeviceId(did1),
DcsBasedTapiObjectRefFactory.create(topology, node2).setDeviceId(did2)
);
List<TapiNepRef> expectNeps = Collections.emptyList();
mockResolver.addNodeRefList(expectNodes);
expectLastCall().once();
mockResolver.addNepRefList(expectNeps);
expectLastCall().once();
replay(mockResolver);
dataProvider.updateCache(mockResolver, context);
verify(mockResolver);
}
|
public static ShardingRouteEngine newInstance(final ShardingRule shardingRule, final ShardingSphereDatabase database, final QueryContext queryContext,
final ShardingConditions shardingConditions, final ConfigurationProperties props, final ConnectionContext connectionContext) {
SQLStatementContext sqlStatementContext = queryContext.getSqlStatementContext();
SQLStatement sqlStatement = sqlStatementContext.getSqlStatement();
if (sqlStatement instanceof TCLStatement) {
return new ShardingDatabaseBroadcastRoutingEngine();
}
if (sqlStatement instanceof DDLStatement) {
if (sqlStatementContext instanceof CursorAvailable) {
return getCursorRouteEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props);
}
return getDDLRoutingEngine(shardingRule, database, sqlStatementContext);
}
if (sqlStatement instanceof DALStatement) {
return getDALRoutingEngine(shardingRule, database, sqlStatementContext, connectionContext);
}
if (sqlStatement instanceof DCLStatement) {
return getDCLRoutingEngine(shardingRule, database, sqlStatementContext);
}
return getDQLRoutingEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props, connectionContext);
}
|
@Test
void assertNewInstanceForDCLForNoSingleTable() {
DCLStatement dclStatement = mock(DCLStatement.class);
when(sqlStatementContext.getSqlStatement()).thenReturn(dclStatement);
QueryContext queryContext = new QueryContext(sqlStatementContext, "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class));
ShardingRouteEngine actual =
ShardingRouteEngineFactory.newInstance(shardingRule, database, queryContext, shardingConditions, props, new ConnectionContext(Collections::emptySet));
assertThat(actual, instanceOf(ShardingInstanceBroadcastRoutingEngine.class));
}
|
public T runWithDelay() throws Exception {
try {
return execute();
} catch(Exception e) {
if (e.getClass().equals(retryExceptionType)){
tries++;
if (MAX_RETRIES == tries) {
throw e;
} else {
Thread.sleep((long) DELAY * tries);
return runWithDelay();
}
} else {
throw e;
}
}
}
|
@Test
public void testRetryFailureWithDelayMoreThanTimeout() {
Retry<Void> retriable = new Retry<Void>(NullPointerException.class) {
@Override
public Void execute() {
throw new NullPointerException();
}
};
long startTime = System.currentTimeMillis();
try {
retriable.runWithDelay();
Assert.fail();
} catch (Exception e) {
Assert.assertEquals(NullPointerException.class, e.getClass());
Assert.assertTrue(System.currentTimeMillis() - startTime >= 180 * 1000);
}
}
|
public long getRevisedRowCount(final SelectStatementContext selectStatementContext) {
if (isMaxRowCount(selectStatementContext)) {
return Integer.MAX_VALUE;
}
return rowCountSegment instanceof LimitValueSegment ? actualOffset + actualRowCount : actualRowCount;
}
|
@Test
void assertGetRevisedRowCountForOracle() {
getRevisedRowCount(new OracleSelectStatement());
}
|
public static Optional<Expression> convert(
org.apache.flink.table.expressions.Expression flinkExpression) {
if (!(flinkExpression instanceof CallExpression)) {
return Optional.empty();
}
CallExpression call = (CallExpression) flinkExpression;
Operation op = FILTERS.get(call.getFunctionDefinition());
if (op != null) {
switch (op) {
case IS_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::isNull);
case NOT_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::notNull);
case LT:
return convertFieldAndLiteral(Expressions::lessThan, Expressions::greaterThan, call);
case LT_EQ:
return convertFieldAndLiteral(
Expressions::lessThanOrEqual, Expressions::greaterThanOrEqual, call);
case GT:
return convertFieldAndLiteral(Expressions::greaterThan, Expressions::lessThan, call);
case GT_EQ:
return convertFieldAndLiteral(
Expressions::greaterThanOrEqual, Expressions::lessThanOrEqual, call);
case EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.isNaN(ref);
} else {
return Expressions.equal(ref, lit);
}
},
call);
case NOT_EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.notNaN(ref);
} else {
return Expressions.notEqual(ref, lit);
}
},
call);
case NOT:
return onlyChildAs(call, CallExpression.class)
.flatMap(FlinkFilters::convert)
.map(Expressions::not);
case AND:
return convertLogicExpression(Expressions::and, call);
case OR:
return convertLogicExpression(Expressions::or, call);
case STARTS_WITH:
return convertLike(call);
}
}
return Optional.empty();
}
|
@Test
public void testNotEquals() {
for (Pair<String, Object> pair : FIELD_VALUE_LIST) {
UnboundPredicate<?> expected =
org.apache.iceberg.expressions.Expressions.notEqual(pair.first(), pair.second());
Optional<org.apache.iceberg.expressions.Expression> actual =
FlinkFilters.convert(
resolve(Expressions.$(pair.first()).isNotEqual(Expressions.lit(pair.second()))));
assertThat(actual).isPresent();
assertPredicatesMatch(expected, actual.get());
Optional<org.apache.iceberg.expressions.Expression> actual1 =
FlinkFilters.convert(
resolve(Expressions.lit(pair.second()).isNotEqual(Expressions.$(pair.first()))));
assertThat(actual1).isPresent();
assertPredicatesMatch(expected, actual1.get());
}
}
|
public static <T> List<LocalProperty<T>> grouped(Collection<T> columns)
{
return ImmutableList.of(new GroupingProperty<>(columns));
}
|
@Test
public void testGroupedDoubleThenSingle()
{
List<LocalProperty<String>> actual = builder()
.grouped("a", "b")
.grouped("c")
.build();
assertMatch(
actual,
builder().grouped("a", "b", "c", "d").build(),
Optional.of(grouped("d")));
assertMatch(
actual,
builder().grouped("a", "b", "c").build(),
Optional.empty());
assertMatch(
actual,
builder().grouped("a", "b").build(),
Optional.empty());
assertMatch(
actual,
builder().grouped("a", "c").build(),
Optional.of(grouped("a", "c")));
assertMatch(
actual,
builder().grouped("a").build(),
Optional.of(grouped("a")));
assertMatch(
actual,
builder().grouped("c").build(),
Optional.of(grouped("c")));
}
|
@Override
protected ServerSocketFactory getServerSocketFactory() throws Exception {
if (socketFactory == null) {
SSLContext sslContext = getSsl().createContext(this);
SSLParametersConfiguration parameters = getSsl().getParameters();
parameters.setContext(getContext());
socketFactory = new ConfigurableSSLServerSocketFactory(parameters, sslContext.getServerSocketFactory());
}
return socketFactory;
}
|
@Test
public void testGetServerSocketFactory() throws Exception {
ServerSocketFactory socketFactory = receiver.getServerSocketFactory();
assertNotNull(socketFactory);
assertTrue(ssl.isContextCreated());
assertTrue(parameters.isContextInjected());
}
|
@Override
public Result reconcile(Request request) {
String name = request.name();
if (!isSystemSetting(name)) {
return new Result(false, null);
}
client.fetch(ConfigMap.class, name)
.ifPresent(configMap -> {
addFinalizerIfNecessary(configMap);
routeRuleReconciler.reconcile(name);
customizeSystem(name);
});
return new Result(false, null);
}
|
@Test
void reconcileCategoriesRule() {
ConfigMap configMap = systemConfigMapForRouteRule(rules -> {
rules.setCategories("categories-new");
return rules;
});
when(environmentFetcher.getConfigMapBlocking()).thenReturn(Optional.of(configMap));
when(client.fetch(eq(ConfigMap.class), eq(SystemSetting.SYSTEM_CONFIG)))
.thenReturn(Optional.of(configMap));
systemSettingReconciler.reconcile(new Reconciler.Request(SystemSetting.SYSTEM_CONFIG));
ArgumentCaptor<ConfigMap> captor = ArgumentCaptor.forClass(ConfigMap.class);
verify(client, times(1)).update(captor.capture());
ConfigMap updatedConfigMap = captor.getValue();
assertThat(rulesFrom(updatedConfigMap).getCategories()).isEqualTo("categories-new");
assertThat(oldRulesFromAnno(updatedConfigMap).getCategories()).isEqualTo("categories-new");
verify(applicationContext, times(1)).publishEvent(any());
}
|
@Override
public Message receive() {
MessageExt rmqMsg = localMessageCache.poll();
return rmqMsg == null ? null : OMSUtil.msgConvert(rmqMsg);
}
|
@Test
public void testPoll() {
final byte[] testBody = new byte[] {'a', 'b'};
MessageExt consumedMsg = new MessageExt();
consumedMsg.setMsgId("NewMsgId");
consumedMsg.setBody(testBody);
consumedMsg.putUserProperty(NonStandardKeys.MESSAGE_DESTINATION, "TOPIC");
consumedMsg.setTopic(queueName);
when(localMessageCache.poll()).thenReturn(consumedMsg);
Message message = consumer.receive();
assertThat(message.sysHeaders().getString(Message.BuiltinKeys.MESSAGE_ID)).isEqualTo("NewMsgId");
assertThat(((BytesMessage) message).getBody(byte[].class)).isEqualTo(testBody);
}
|
@Override
public byte[] fromConnectData(String topic, Schema schema, Object value) {
if (schema == null && value == null) {
return null;
}
JsonNode jsonValue = config.schemasEnabled() ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value);
try {
return serializer.serialize(topic, jsonValue);
} catch (SerializationException e) {
throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e);
}
}
|
@Test
public void mapToJsonStringKeys() {
Schema stringIntMap = SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT32_SCHEMA).build();
Map<String, Integer> input = new HashMap<>();
input.put("key1", 12);
input.put("key2", 15);
JsonNode converted = parse(converter.fromConnectData(TOPIC, stringIntMap, input));
validateEnvelope(converted);
assertEquals(parse("{ \"type\": \"map\", \"keys\": { \"type\" : \"string\", \"optional\": false }, \"values\": { \"type\" : \"int32\", \"optional\": false }, \"optional\": false }"),
converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME));
assertEquals(JsonNodeFactory.instance.objectNode().put("key1", 12).put("key2", 15),
converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME));
}
|
@Override
public String getSchema(final Connection connection) {
try {
return Optional.ofNullable(connection.getMetaData().getUserName()).map(String::toUpperCase).orElse(null);
} catch (final SQLException ignored) {
return null;
}
}
|
@Test
void assertGetSchemaIfExceptionThrown() throws SQLException {
Connection connection = mock(Connection.class, RETURNS_DEEP_STUBS);
when(connection.getMetaData().getUserName()).thenThrow(SQLException.class);
assertNull(dialectDatabaseMetaData.getSchema(connection));
}
|
public void close(final Timer timer) {
// we do not need to re-enable wakeups since we are closing already
client.disableWakeups();
try {
maybeAutoCommitOffsetsSync(timer);
while (pendingAsyncCommits.get() > 0 && timer.notExpired()) {
ensureCoordinatorReady(timer);
client.poll(timer);
invokeCompletedOffsetCommitCallbacks();
}
} finally {
super.close(timer);
}
}
|
@Test
public void testLeaveGroupOnClose() {
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
joinAsFollowerAndReceiveAssignment(coordinator, singletonList(t1p));
final AtomicBoolean received = new AtomicBoolean(false);
client.prepareResponse(body -> {
received.set(true);
LeaveGroupRequest leaveRequest = (LeaveGroupRequest) body;
return validateLeaveGroup(groupId, consumerId, leaveRequest);
}, new LeaveGroupResponse(
new LeaveGroupResponseData().setErrorCode(Errors.NONE.code())));
coordinator.close(time.timer(0));
assertTrue(received.get());
}
|
@Override
public ExecutionSchedule createExecutionSchedule(Session session, Collection<StageExecutionAndScheduler> stages)
{
if (stages.size() > getMaxStageCountForEagerScheduling(session)) {
return new PhasedExecutionSchedule(stages);
}
else {
return new AllAtOnceExecutionSchedule(stages);
}
}
|
@Test
public void testCreateExecutionSchedule()
{
Session session = testSessionBuilder(new SessionPropertyManager(new SystemSessionProperties(
new QueryManagerConfig(),
new TaskManagerConfig(),
new MemoryManagerConfig(),
new FeaturesConfig().setMaxStageCountForEagerScheduling(5),
new NodeMemoryConfig(),
new WarningCollectorConfig(),
new NodeSchedulerConfig(),
new NodeSpillConfig(),
new TracingConfig(),
new CompilerConfig(),
new HistoryBasedOptimizationConfig()))).build();
AdaptivePhasedExecutionPolicy policy = new AdaptivePhasedExecutionPolicy();
Collection<StageExecutionAndScheduler> schedulers = getStageExecutionAndSchedulers(4);
assertTrue(policy.createExecutionSchedule(session, schedulers) instanceof AllAtOnceExecutionSchedule);
schedulers = getStageExecutionAndSchedulers(5);
assertTrue(policy.createExecutionSchedule(session, schedulers) instanceof AllAtOnceExecutionSchedule);
schedulers = getStageExecutionAndSchedulers(6);
assertTrue(policy.createExecutionSchedule(session, schedulers) instanceof PhasedExecutionSchedule);
}
|
private ExposeExternallyMode getExposeExternallyMode(Map<String, Comparable> properties) {
Boolean exposeExternally = getOrNull(properties, KUBERNETES_SYSTEM_PREFIX, EXPOSE_EXTERNALLY);
if (exposeExternally == null) {
return ExposeExternallyMode.AUTO;
} else if (exposeExternally) {
return ExposeExternallyMode.ENABLED;
} else {
return ExposeExternallyMode.DISABLED;
}
}
|
@Test
public void kubernetesApiExposeExternally() {
// given
Map<String, Comparable> properties = createProperties();
properties.put(KubernetesProperties.EXPOSE_EXTERNALLY.key(), true);
// when
KubernetesConfig config = new KubernetesConfig(properties);
// then
assertEquals(ExposeExternallyMode.ENABLED, config.getExposeExternallyMode());
}
|
public static Map<String, String> resolve(ServerWebExchange exchange) {
Map<String, String> result = new HashMap<>();
HttpHeaders headers = exchange.getRequest().getHeaders();
for (Map.Entry<String, List<String>> entry : headers.entrySet()) {
String key = entry.getKey();
if (StringUtils.isBlank(key)) {
continue;
}
// resolve sct transitive header
if (StringUtils.startsWithIgnoreCase(key, MetadataConstant.SCT_TRANSITIVE_HEADER_PREFIX)
&& !CollectionUtils.isEmpty(entry.getValue())) {
String sourceKey = StringUtils.substring(key, MetadataConstant.SCT_TRANSITIVE_HEADER_PREFIX_LENGTH);
result.put(sourceKey, entry.getValue().get(0));
}
//resolve polaris transitive header
if (StringUtils.startsWithIgnoreCase(key, MetadataConstant.POLARIS_TRANSITIVE_HEADER_PREFIX)
&& !CollectionUtils.isEmpty(entry.getValue())) {
String sourceKey = StringUtils.substring(key, MetadataConstant.POLARIS_TRANSITIVE_HEADER_PREFIX_LENGTH);
result.put(sourceKey, entry.getValue().get(0));
}
}
return result;
}
|
@Test
public void testSCTTransitiveMetadata() {
MockServerHttpRequest.BaseBuilder<?> builder = MockServerHttpRequest.get("");
builder.header("X-SCT-Metadata-Transitive-a", "test");
MockServerWebExchange exchange = MockServerWebExchange.from(builder);
Map<String, String> resolve = CustomTransitiveMetadataResolver.resolve(exchange);
assertThat(resolve.size()).isEqualTo(1);
assertThat(resolve.get("a")).isEqualTo("test");
}
|
@Override
public Optional<SimpleAddress> selectAddress(Optional<String> addressSelectionContext)
{
if (addressSelectionContext.isPresent()) {
return addressSelectionContext
.map(HostAndPort::fromString)
.map(SimpleAddress::new);
}
List<HostAndPort> resourceManagers = internalNodeManager.getResourceManagers().stream()
.filter(node -> node.getThriftPort().isPresent())
.map(resourceManagerNode -> {
HostAddress hostAndPort = resourceManagerNode.getHostAndPort();
return HostAndPort.fromParts(hostAndPort.getHostText(), resourceManagerNode.getThriftPort().getAsInt());
})
.collect(toImmutableList());
return hostSelector.apply(resourceManagers).map(SimpleAddress::new);
}
|
@Test(expectedExceptions = IllegalArgumentException.class)
public void testAddressSelectionContextPresentWithInvalidAddress()
{
InMemoryNodeManager internalNodeManager = new InMemoryNodeManager();
RandomResourceManagerAddressSelector selector = new RandomResourceManagerAddressSelector(internalNodeManager);
selector.selectAddress(Optional.of("host:123.456"));
}
|
public static void write(Node node, Writer writer, String charset, int indent) {
transform(new DOMSource(node), new StreamResult(writer), charset, indent);
}
|
@Test
@Disabled
public void writeTest() {
final String result = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>"//
+ "<returnsms>"//
+ "<returnstatus>Success(成功)</returnstatus>"//
+ "<message>ok</message>"//
+ "<remainpoint>1490</remainpoint>"//
+ "<taskID>885</taskID>"//
+ "<successCounts>1</successCounts>"//
+ "</returnsms>";
final Document docResult = XmlUtil.parseXml(result);
XmlUtil.toFile(docResult, "e:/aaa.xml", "utf-8");
}
|
public boolean validate(final Protocol protocol, final LoginOptions options) {
return protocol.validate(this, options);
}
|
@Test
public void testLoginAnonymous2() {
Credentials credentials = new Credentials(PreferencesFactory.get().getProperty("connection.login.anon.name"),
null);
assertTrue(credentials.validate(new TestProtocol(Scheme.ftp), new LoginOptions()));
}
|
public static void preserve(FileSystem targetFS, Path path,
CopyListingFileStatus srcFileStatus,
EnumSet<FileAttribute> attributes,
boolean preserveRawXattrs) throws IOException {
// strip out those attributes we don't need any more
attributes.remove(FileAttribute.BLOCKSIZE);
attributes.remove(FileAttribute.CHECKSUMTYPE);
// If not preserving anything from FileStatus, don't bother fetching it.
FileStatus targetFileStatus = attributes.isEmpty() ? null :
targetFS.getFileStatus(path);
String group = targetFileStatus == null ? null :
targetFileStatus.getGroup();
String user = targetFileStatus == null ? null :
targetFileStatus.getOwner();
boolean chown = false;
if (attributes.contains(FileAttribute.ACL)) {
List<AclEntry> srcAcl = srcFileStatus.getAclEntries();
List<AclEntry> targetAcl = getAcl(targetFS, targetFileStatus);
if (!srcAcl.equals(targetAcl)) {
targetFS.removeAcl(path);
targetFS.setAcl(path, srcAcl);
}
// setAcl doesn't preserve sticky bit, so also call setPermission if needed.
if (srcFileStatus.getPermission().getStickyBit() !=
targetFileStatus.getPermission().getStickyBit()) {
targetFS.setPermission(path, srcFileStatus.getPermission());
}
} else if (attributes.contains(FileAttribute.PERMISSION) &&
!srcFileStatus.getPermission().equals(targetFileStatus.getPermission())) {
targetFS.setPermission(path, srcFileStatus.getPermission());
}
final boolean preserveXAttrs = attributes.contains(FileAttribute.XATTR);
if (preserveXAttrs || preserveRawXattrs) {
final String rawNS =
StringUtils.toLowerCase(XAttr.NameSpace.RAW.name());
Map<String, byte[]> srcXAttrs = srcFileStatus.getXAttrs();
Map<String, byte[]> targetXAttrs = getXAttrs(targetFS, path);
if (srcXAttrs != null && !srcXAttrs.equals(targetXAttrs)) {
for (Entry<String, byte[]> entry : srcXAttrs.entrySet()) {
String xattrName = entry.getKey();
if (xattrName.startsWith(rawNS) || preserveXAttrs) {
targetFS.setXAttr(path, xattrName, entry.getValue());
}
}
}
}
// The replication factor can only be preserved for replicated files.
// It is ignored when either the source or target file are erasure coded.
if (attributes.contains(FileAttribute.REPLICATION) &&
!targetFileStatus.isDirectory() &&
!targetFileStatus.isErasureCoded() &&
!srcFileStatus.isErasureCoded() &&
srcFileStatus.getReplication() != targetFileStatus.getReplication()) {
targetFS.setReplication(path, srcFileStatus.getReplication());
}
if (attributes.contains(FileAttribute.GROUP) &&
!group.equals(srcFileStatus.getGroup())) {
group = srcFileStatus.getGroup();
chown = true;
}
if (attributes.contains(FileAttribute.USER) &&
!user.equals(srcFileStatus.getOwner())) {
user = srcFileStatus.getOwner();
chown = true;
}
if (chown) {
targetFS.setOwner(path, user, group);
}
if (attributes.contains(FileAttribute.TIMES)) {
targetFS.setTimes(path,
srcFileStatus.getModificationTime(),
srcFileStatus.getAccessTime());
}
}
|
@Test
public void testPreserveUserOnFile() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.USER);
Path dst = new Path("/tmp/dest2");
Path src = new Path("/tmp/src2");
createFile(fs, src);
createFile(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setReplication(src, (short) 1);
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
fs.setTimes(dst, 100, 100);
fs.setReplication(dst, (short) 2);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertTrue(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication());
}
|
public static UBinary create(Kind binaryOp, UExpression lhs, UExpression rhs) {
checkArgument(
OP_CODES.containsKey(binaryOp), "%s is not a supported binary operation", binaryOp);
return new AutoValue_UBinary(binaryOp, lhs, rhs);
}
|
@Test
public void equal() {
assertUnifiesAndInlines(
"4 == 17", UBinary.create(Kind.EQUAL_TO, ULiteral.intLit(4), ULiteral.intLit(17)));
}
|
public static Set<String> validateScopes(String scopeClaimName, Collection<String> scopes) throws ValidateException {
if (scopes == null)
throw new ValidateException(String.format("%s value must be non-null", scopeClaimName));
Set<String> copy = new HashSet<>();
for (String scope : scopes) {
scope = validateString(scopeClaimName, scope);
if (copy.contains(scope))
throw new ValidateException(String.format("%s value must not contain duplicates - %s already present", scopeClaimName, scope));
copy.add(scope);
}
return Collections.unmodifiableSet(copy);
}
|
@Test
public void testValidateScopesDisallowsEmptyNullAndWhitespace() {
assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", "")));
assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", null)));
assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", " ")));
}
|
public static boolean isIbmJdk() {
return System.getProperty("java.vendor").contains("IBM");
}
|
@Test
public void testIsIBMJdk() {
System.setProperty("java.vendor", "Oracle Corporation");
assertFalse(Java.isIbmJdk());
System.setProperty("java.vendor", "IBM Corporation");
assertTrue(Java.isIbmJdk());
}
|
public static Map<String, String> getKiePMMLNodeSourcesMap(final NodeNamesDTO nodeNamesDTO,
final List<Field<?>> fields,
final String packageName) {
logger.trace("getKiePMMLNodeSourcesMap {} {}", nodeNamesDTO, packageName);
final JavaParserDTO javaParserDTO = new JavaParserDTO(nodeNamesDTO, packageName);
final Map<String, String> toReturn = new HashMap<>();
populateJavaParserDTOAndSourcesMap(javaParserDTO, toReturn, nodeNamesDTO, fields,true);
return toReturn;
}
|
@Test
void getKiePMMLNodeSourcesMap() {
final KiePMMLNodeFactory.NodeNamesDTO nodeNamesDTO = new KiePMMLNodeFactory.NodeNamesDTO(node1,
createNodeClassName(),
null,
1.0);
Map<String, String> retrieved = KiePMMLNodeFactory.getKiePMMLNodeSourcesMap(nodeNamesDTO,
getFieldsFromDataDictionaryAndDerivedFields(dataDictionary1, derivedFields1),
PACKAGE_NAME);
assertThat(retrieved).isNotNull();
commonVerifyNodeSource(retrieved, PACKAGE_NAME);
}
|
public static PathData[] expandAsGlob(String pattern, Configuration conf)
throws IOException {
Path globPath = new Path(pattern);
FileSystem fs = globPath.getFileSystem(conf);
FileStatus[] stats = fs.globStatus(globPath);
PathData[] items = null;
if (stats == null) {
// remove any quoting in the glob pattern
pattern = pattern.replaceAll("\\\\(.)", "$1");
// not a glob & file not found, so add the path with a null stat
items = new PathData[]{ new PathData(fs, pattern, null) };
} else {
// figure out what type of glob path was given, will convert globbed
// paths to match the type to preserve relativity
PathType globType;
URI globUri = globPath.toUri();
if (globUri.getScheme() != null) {
globType = PathType.HAS_SCHEME;
} else if (!globUri.getPath().isEmpty() &&
new Path(globUri.getPath()).isAbsolute()) {
globType = PathType.SCHEMELESS_ABSOLUTE;
} else {
globType = PathType.RELATIVE;
}
// convert stats to PathData
items = new PathData[stats.length];
int i=0;
for (FileStatus stat : stats) {
URI matchUri = stat.getPath().toUri();
String globMatch = null;
switch (globType) {
case HAS_SCHEME: // use as-is, but remove authority if necessary
if (globUri.getAuthority() == null) {
matchUri = removeAuthority(matchUri);
}
globMatch = uriToString(matchUri, false);
break;
case SCHEMELESS_ABSOLUTE: // take just the uri's path
globMatch = matchUri.getPath();
break;
case RELATIVE: // make it relative to the current working dir
URI cwdUri = fs.getWorkingDirectory().toUri();
globMatch = relativize(cwdUri, matchUri, stat.isDirectory());
break;
}
items[i++] = new PathData(fs, globMatch, stat);
}
}
Arrays.sort(items);
return items;
}
|
@Test (timeout = 30000)
public void testRelativeGlob() throws Exception {
PathData[] items = PathData.expandAsGlob("d1/f1*", conf);
assertEquals(
sortedString("d1/f1", "d1/f1.1"),
sortedString(items)
);
}
|
public static PDImageXObject createFromFile(PDDocument document, File file)
throws IOException
{
return createFromFile(document, file, 0);
}
|
@Test
void testCreateFromRandomAccessMulti() throws IOException
{
String tiffPath = "src/test/resources/org/apache/pdfbox/pdmodel/graphics/image/ccittg4multi.tif";
ImageInputStream is = ImageIO.createImageInputStream(new File(tiffPath));
ImageReader imageReader = ImageIO.getImageReaders(is).next();
imageReader.setInput(is);
int countTiffImages = imageReader.getNumImages(true);
assertTrue(countTiffImages > 1);
try (PDDocument document = new PDDocument())
{
int pdfPageNum = 0;
while (true)
{
PDImageXObject ximage = CCITTFactory.createFromFile(document, new File(tiffPath), pdfPageNum);
if (ximage == null)
{
break;
}
BufferedImage bim = imageReader.read(pdfPageNum);
validate(ximage, 1, bim.getWidth(), bim.getHeight(), "tiff", PDDeviceGray.INSTANCE.getName());
checkIdent(bim, ximage.getOpaqueImage(null, 1));
PDPage page = new PDPage(PDRectangle.A4);
float fX = ximage.getWidth() / page.getMediaBox().getWidth();
float fY = ximage.getHeight() / page.getMediaBox().getHeight();
float factor = Math.max(fX, fY);
document.addPage(page);
try (PDPageContentStream contentStream = new PDPageContentStream(document, page, AppendMode.APPEND, false))
{
contentStream.drawImage(ximage, 0, 0, ximage.getWidth() / factor, ximage.getHeight() / factor);
}
++pdfPageNum;
}
assertEquals(countTiffImages, pdfPageNum);
document.save(TESTRESULTSDIR + "/multitiff.pdf");
}
try (PDDocument document = Loader.loadPDF(new File(TESTRESULTSDIR, "multitiff.pdf"), (String) null))
{
assertEquals(countTiffImages, document.getNumberOfPages());
}
imageReader.dispose();
}
|
@Override
public OverlayData createOverlayData(ComponentName remoteApp) {
if (!OS_SUPPORT_FOR_ACCENT) {
return EMPTY;
}
try {
final ActivityInfo activityInfo =
mLocalContext
.getPackageManager()
.getActivityInfo(remoteApp, PackageManager.GET_META_DATA);
final Context context =
mLocalContext.createPackageContext(remoteApp.getPackageName(), CONTEXT_IGNORE_SECURITY);
context.setTheme(activityInfo.getThemeResource());
fetchRemoteColors(mCurrentOverlayData, context);
Logger.d(
"OverlyDataCreatorForAndroid",
"For component %s we fetched %s",
remoteApp,
mCurrentOverlayData);
return mCurrentOverlayData;
} catch (Exception e) {
Logger.w("OverlyDataCreatorForAndroid", e, "Failed to fetch colors for %s", remoteApp);
return EMPTY;
}
}
|
@Test
public void testAddsFullOpaqueToTextColor() throws Exception {
setupReturnedColors(R.style.CompletelyTransparentAttribute);
final OverlayData overlayData = mUnderTest.createOverlayData(mComponentName);
Assert.assertEquals(Color.parseColor("#FF112233"), overlayData.getPrimaryTextColor());
}
|
void handleLine(final String line) {
final String trimmedLine = Optional.ofNullable(line).orElse("").trim();
if (trimmedLine.isEmpty()) {
return;
}
handleStatements(trimmedLine);
}
|
@Test
public void shouldDescribeTableFunction() {
final String expectedOutput =
"Name : EXPLODE\n"
+ "Author : Confluent\n"
+ "Overview : Explodes an array. This function outputs one value for each element of the array.\n"
+ "Type : TABLE\n"
+ "Jar : internal\n"
+ "Variations : ";
localCli.handleLine("describe function explode;");
final String outputString = terminal.getOutputString();
assertThat(outputString, containsString(expectedOutput));
// variations for Udfs are loaded non-deterministically. Don't assume which variation is first
String expectedVariation =
"\tVariation : EXPLODE(list ARRAY<T>)\n"
+ "\tReturns : T";
assertThat(outputString, containsString(expectedVariation));
expectedVariation = "\tVariation : EXPLODE(input ARRAY<DECIMAL>)\n"
+ "\tReturns : DECIMAL";
assertThat(outputString, containsString(expectedVariation));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.