focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Operation(summary = "grantDataSource", description = "GRANT_DATASOURCE_NOTES")
@Parameters({
@Parameter(name = "userId", description = "USER_ID", required = true, schema = @Schema(implementation = int.class, example = "100")),
@Parameter(name = "datasourceIds", description = "DATASOURCE_IDS", required = true, schema = @Schema(implementation = String.class))
})
@PostMapping(value = "/grant-datasource")
@ResponseStatus(HttpStatus.OK)
@ApiException(GRANT_DATASOURCE_ERROR)
public Result grantDataSource(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "userId") int userId,
@RequestParam(value = "datasourceIds") String datasourceIds) {
Map<String, Object> result = usersService.grantDataSource(loginUser, userId, datasourceIds);
return returnDataList(result);
}
|
@Test
public void testGrantDataSource() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("userId", "32");
paramsMap.add("datasourceIds", "5");
MvcResult mvcResult = mockMvc.perform(post("/users/grant-datasource")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assertions.assertEquals(Status.USER_NOT_EXIST.getCode(), result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
|
@Override
public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final List<Header> headers = new ArrayList<Header>(this.headers());
if(status.isAppend()) {
final HttpRange range = HttpRange.withStatus(status);
final String header;
if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) {
header = String.format("bytes=%d-", range.getStart());
}
else {
header = String.format("bytes=%d-%d", range.getStart(), range.getEnd());
}
if(log.isDebugEnabled()) {
log.debug(String.format("Add range header %s for file %s", header, file));
}
headers.add(new BasicHeader(HttpHeaders.RANGE, header));
// Disable compression
headers.add(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, "identity"));
}
try {
final HttpRequestBase request = this.toRequest(file, status);
for(Header header : headers) {
request.addHeader(header);
}
final HttpResponse response = session.getClient().execute(request);
final VoidResponseHandler handler = new VoidResponseHandler();
try {
handler.handleResponse(response);
// Will abort the read when closed before EOF.
final ContentLengthStatusInputStream stream = new ContentLengthStatusInputStream(new HttpMethodReleaseInputStream(response, status),
response.getEntity().getContentLength(),
response.getStatusLine().getStatusCode());
if(status.isAppend()) {
if(stream.getCode() == HttpStatus.SC_OK) {
if(TransferStatus.UNKNOWN_LENGTH != status.getLength()) {
if(stream.getLength() != status.getLength()) {
log.warn(String.format("Range header not supported. Skipping %d bytes in file %s.", status.getOffset(), file));
stream.skip(status.getOffset());
}
}
}
}
return stream;
}
catch(IOException ex) {
request.abort();
throw ex;
}
}
catch(SardineException e) {
throw new DAVExceptionMappingService().map("Download {0} failed", e, file);
}
catch(IOException e) {
throw new HttpExceptionMappingService().map("Download {0} failed", e, file);
}
}
|
@Test
public void testReadRange() throws Exception {
final Path test = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final Local local = new Local(System.getProperty("java.io.tmpdir"), new AlphanumericRandomStringService().random());
final byte[] content = RandomUtils.nextBytes(1023);
final OutputStream out = local.getOutputStream(false);
assertNotNull(out);
IOUtils.write(content, out);
out.close();
new DAVUploadFeature(session).upload(
test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener(),
new TransferStatus().withLength(content.length),
new DisabledConnectionCallback());
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
status.setAppend(true);
status.setOffset(100L);
final InputStream in = new DAVReadFeature(session).read(test, status.withLength(content.length - 100), new DisabledConnectionCallback());
assertNotNull(in);
final ByteArrayOutputStream buffer = new ByteArrayOutputStream(content.length - 100);
new StreamCopier(status, status).transfer(in, buffer);
final byte[] reference = new byte[content.length - 100];
System.arraycopy(content, 100, reference, 0, content.length - 100);
assertArrayEquals(reference, buffer.toByteArray());
in.close();
new DAVDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public List<HouseTable> findAllByDatabaseId(String databaseId) {
Map<String, String> params = new HashMap<>();
if (Strings.isNotEmpty(databaseId)) {
params.put("databaseId", databaseId);
}
return getHtsRetryTemplate(
Arrays.asList(
HouseTableRepositoryStateUnkownException.class, IllegalStateException.class))
.execute(
context ->
apiInstance
.getUserTables(params)
.map(GetAllEntityResponseBodyUserTable::getResults)
.flatMapMany(Flux::fromIterable)
.map(houseTableMapper::toHouseTable)
.collectList()
.block(Duration.ofSeconds(REQUEST_TIMEOUT_SECONDS)));
}
|
@Test
public void testListOfTablesInDatabase() {
List<UserTable> tables = new ArrayList<>();
tables.add(houseTableMapper.toUserTable(HOUSE_TABLE));
tables.add(houseTableMapper.toUserTable(HOUSE_TABLE_SAME_DB));
GetAllEntityResponseBodyUserTable listResponse = new GetAllEntityResponseBodyUserTable();
/**
* Need to use the reflection trick to help initializing the object with generated class {@link
* GetAllUserTablesResponseBody}, which somehow doesn't provided proper setter in the generated
* code.
*/
Field resultField =
ReflectionUtils.findField(GetAllEntityResponseBodyUserTable.class, "results");
Assertions.assertNotNull(resultField);
ReflectionUtils.makeAccessible(resultField);
ReflectionUtils.setField(resultField, listResponse, tables);
mockHtsServer.enqueue(
new MockResponse()
.setResponseCode(200)
.setBody((new Gson()).toJson(listResponse))
.addHeader("Content-Type", "application/json"));
List<HouseTable> returnList = htsRepo.findAllByDatabaseId(HOUSE_TABLE.getDatabaseId());
assertThat(returnList).hasSize(2);
}
|
public static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description) throws IOException {
if (position < 0) {
throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position);
}
int expectedReadBytes = destinationBuffer.remaining();
readFully(channel, destinationBuffer, position);
if (destinationBuffer.hasRemaining()) {
throw new EOFException(String.format("Failed to read `%s` from file channel `%s`. Expected to read %d bytes, " +
"but reached end of file after reading %d bytes. Started read from position %d.",
description, channel, expectedReadBytes, expectedReadBytes - destinationBuffer.remaining(), position));
}
}
|
@Test
public void testReadFullyOrFailWithPartialFileChannelReads() throws IOException {
FileChannel channelMock = mock(FileChannel.class);
final int bufferSize = 100;
ByteBuffer buffer = ByteBuffer.allocate(bufferSize);
String expectedBufferContent = fileChannelMockExpectReadWithRandomBytes(channelMock, bufferSize);
Utils.readFullyOrFail(channelMock, buffer, 0L, "test");
assertEquals(expectedBufferContent, new String(buffer.array()), "The buffer should be populated correctly");
assertFalse(buffer.hasRemaining(), "The buffer should be filled");
verify(channelMock, atLeastOnce()).read(any(), anyLong());
}
|
public static String asAlphaNumericLower(int i) {
return asAlphaNumeric(i).toLowerCase(Locale.ROOT);
}
|
@Test
public void testAlphaLower() {
assertEquals("a", AutoPageNumberUtils.asAlphaNumericLower(1));
assertEquals("z", AutoPageNumberUtils.asAlphaNumericLower(26));
assertEquals("aa", AutoPageNumberUtils.asAlphaNumericLower(27));
assertEquals("zz", AutoPageNumberUtils.asAlphaNumericLower(52));
assertEquals("aaa", AutoPageNumberUtils.asAlphaNumericLower(53));
assertEquals("zzz", AutoPageNumberUtils.asAlphaNumericLower(78));
}
|
@Pointcut("@annotation(org.apache.shenyu.admin.aspect.annotation.Pageable)")
public void pageableCut() {
}
|
@Test
public void pageableCutTest() {
assertDoesNotThrow(() -> pageableAspect.pageableCut());
}
|
@Override
public void handle(ContainersLauncherEvent event) {
// TODO: ContainersLauncher launches containers one by one!!
Container container = event.getContainer();
ContainerId containerId = container.getContainerId();
switch (event.getType()) {
case LAUNCH_CONTAINER:
Application app =
context.getApplications().get(
containerId.getApplicationAttemptId().getApplicationId());
ContainerLaunch launch =
new ContainerLaunch(context, getConfig(), dispatcher, exec, app,
event.getContainer(), dirsHandler, containerManager);
containerLauncher.submit(launch);
running.put(containerId, launch);
break;
case RELAUNCH_CONTAINER:
app = context.getApplications().get(
containerId.getApplicationAttemptId().getApplicationId());
ContainerRelaunch relaunch =
new ContainerRelaunch(context, getConfig(), dispatcher, exec, app,
event.getContainer(), dirsHandler, containerManager);
containerLauncher.submit(relaunch);
running.put(containerId, relaunch);
break;
case RECOVER_CONTAINER:
app = context.getApplications().get(
containerId.getApplicationAttemptId().getApplicationId());
launch = new RecoveredContainerLaunch(context, getConfig(), dispatcher,
exec, app, event.getContainer(), dirsHandler, containerManager);
containerLauncher.submit(launch);
running.put(containerId, launch);
break;
case RECOVER_PAUSED_CONTAINER:
app = context.getApplications().get(
containerId.getApplicationAttemptId().getApplicationId());
launch = new RecoverPausedContainerLaunch(context, getConfig(),
dispatcher, exec, app, event.getContainer(), dirsHandler,
containerManager);
containerLauncher.submit(launch);
break;
case CLEANUP_CONTAINER:
cleanup(event, containerId, true);
break;
case CLEANUP_CONTAINER_FOR_REINIT:
cleanup(event, containerId, false);
break;
case SIGNAL_CONTAINER:
SignalContainersLauncherEvent signalEvent =
(SignalContainersLauncherEvent) event;
ContainerLaunch runningContainer = running.get(containerId);
if (runningContainer == null) {
// Container not launched. So nothing needs to be done.
LOG.info("Container " + containerId + " not running, nothing to signal.");
return;
}
try {
runningContainer.signalContainer(signalEvent.getCommand());
} catch (IOException e) {
LOG.warn("Got exception while signaling container " + containerId
+ " with command " + signalEvent.getCommand());
}
break;
case PAUSE_CONTAINER:
ContainerLaunch launchedContainer = running.get(containerId);
if (launchedContainer == null) {
// Container not launched. So nothing needs to be done.
return;
}
// Pause the container
try {
launchedContainer.pauseContainer();
} catch (Exception e) {
LOG.info("Got exception while pausing container: " +
StringUtils.stringifyException(e));
}
break;
case RESUME_CONTAINER:
ContainerLaunch launchCont = running.get(containerId);
if (launchCont == null) {
// Container not launched. So nothing needs to be done.
return;
}
// Resume the container.
try {
launchCont.resumeContainer();
} catch (Exception e) {
LOG.info("Got exception while resuming container: " +
StringUtils.stringifyException(e));
}
break;
}
}
|
@Test
public void testPauseContainerEvent()
throws IllegalArgumentException, IllegalAccessException, IOException {
spy.running.clear();
spy.running.put(containerId, containerLaunch);
when(event.getType())
.thenReturn(ContainersLauncherEventType.PAUSE_CONTAINER);
doNothing().when(containerLaunch).pauseContainer();
spy.handle(event);
assertEquals(1, spy.running.size());
Mockito.verify(containerLaunch, Mockito.times(1)).pauseContainer();
}
|
public static Optional<SingleRouteEngine> newInstance(final Collection<QualifiedTable> singleTables, final SQLStatement sqlStatement) {
if (!singleTables.isEmpty()) {
return Optional.of(new SingleStandardRouteEngine(singleTables, sqlStatement));
}
// TODO move this logic to common route logic
if (isSchemaDDLStatement(sqlStatement)) {
return Optional.of(new SingleDatabaseBroadcastRouteEngine());
}
return Optional.empty();
}
|
@Test
void assertNewInstanceWithEmptySingleTableNameAndCreateSchemaStatement() {
assertTrue(SingleRouteEngineFactory.newInstance(Collections.emptyList(), mock(CreateSchemaStatement.class)).isPresent());
}
|
public String compress(String compressorName, String uncompressedString) throws IOException {
Checks.notNull(uncompressedString, "uncompressedString cannot be null");
Compressor compressor =
getCompressor(compressorName == null ? DEFAULT_COMPRESSOR_NAME : compressorName);
return base64Encode(compressor.compress(uncompressedString.getBytes(DEFAULT_ENCODING)));
}
|
@Test
public void compressShouldThrowExceptionIfDecompressedStringIsNull() {
AssertHelper.assertThrows(
"compress should throw exception if decompressed string is null",
NullPointerException.class,
"uncompressedString cannot be null",
() -> stringCodec.compress("abcd", null));
}
|
public DockerKillCommand setSignal(String signal) {
super.addCommandArguments("signal", signal);
return this;
}
|
@Test
public void testSetGracePeriod() {
dockerKillCommand.setSignal(SIGNAL);
assertEquals("kill", StringUtils.join(",",
dockerKillCommand.getDockerCommandWithArguments()
.get("docker-command")));
assertEquals("foo", StringUtils.join(",",
dockerKillCommand.getDockerCommandWithArguments().get("name")));
assertEquals("SIGUSR2", StringUtils.join(",",
dockerKillCommand.getDockerCommandWithArguments().get("signal")));
assertEquals(3, dockerKillCommand.getDockerCommandWithArguments().size());
}
|
@Override
public void createDb(String dbName, Map<String, String> properties) throws AlreadyExistsException {
if (dbExists(dbName)) {
throw new AlreadyExistsException("Database Already Exists");
}
icebergCatalog.createDb(dbName, properties);
}
|
@Test(expected = AlreadyExistsException.class)
public void testCreateDuplicatedDb(@Mocked IcebergHiveCatalog icebergHiveCatalog) throws AlreadyExistsException {
IcebergMetadata metadata = new IcebergMetadata(CATALOG_NAME, HDFS_ENVIRONMENT, icebergHiveCatalog,
Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor(), null);
new Expectations() {
{
icebergHiveCatalog.listAllDatabases();
result = Lists.newArrayList("iceberg_db");
minTimes = 0;
}
};
metadata.createDb("iceberg_db", new HashMap<>());
}
|
@Override
public void checkClientTrusted( X509Certificate[] chain, String authType ) throws CertificateException
{
// Find and use the end entity as the selector for verification.
final X509Certificate endEntityCert = CertificateUtils.identifyEndEntityCertificate( Arrays.asList( chain ) );
final X509CertSelector selector = new X509CertSelector();
selector.setCertificate( endEntityCert );
try
{
checkChainTrusted( selector, chain );
}
catch ( InvalidAlgorithmParameterException | NoSuchAlgorithmException | CertPathBuilderException ex )
{
throw new CertificateException( ex );
}
}
|
@Test
public void testInvalidChainMissingIntermediate() throws Exception
{
// Setup fixture.
assert validChain.length == 4;
final X509Certificate[] input = new X509Certificate[ 3 ];
input[ 0 ] = validChain[ 0 ];
input[ 1 ] = validChain[ 2 ];
input[ 2 ] = validChain[ 3 ];
// Execute system under test.
assertThrows(CertificateException.class, () -> systemUnderTest.checkClientTrusted( input, "RSA" ) );
}
|
@Deprecated
@Restricted(DoNotUse.class)
public static String resolve(ConfigurationContext context, String toInterpolate) {
return context.getSecretSourceResolver().resolve(toInterpolate);
}
|
@Test
public void resolve_mixedMultipleEntriesEscaped() {
assertThat(resolve("http://^${FOO}:^${BAR}"), equalTo("http://${FOO}:${BAR}"));
}
|
@Override
public Long getLong(String key) {
final OpType type = OpType.fromSymbol(key);
return type == null ? null : opsCount.get(type).longValue();
}
|
@Test
public void testGetLong() {
assertNull(statistics.getLong(null));
assertNull(statistics.getLong(NO_SUCH_OP));
verifyStatistics();
}
|
private void maybeMarkUserPartitionsAsReady() {
if (isAllUserTopicPartitionsInitialized) {
return;
}
maybeFetchStartAndEndOffsets();
boolean isAllInitialized = true;
for (final UserTopicIdPartition utp : assignedUserTopicIdPartitions.values()) {
if (utp.isAssigned && !utp.isInitialized) {
final Integer metadataPartition = utp.metadataPartition;
final StartAndEndOffsetHolder holder = offsetHolderByMetadataPartition.get(toRemoteLogPartition(metadataPartition));
// The offset-holder can be null, when the recent assignment wasn't picked up by the consumer.
if (holder != null) {
final Long readOffset = readOffsetsByMetadataPartition.getOrDefault(metadataPartition, -1L);
// 1) The end-offset was fetched only once during reassignment. The metadata-partition can receive
// new stream of records, so the consumer can read records more than the last-fetched end-offset.
// 2) When the internal topic becomes empty due to breach by size/time/start-offset, then there
// are no records to read.
if (readOffset + 1 >= holder.endOffset || holder.endOffset.equals(holder.startOffset)) {
markInitialized(utp);
} else {
log.debug("The user-topic-partition {} could not be marked initialized since the read-offset is {} " +
"but the end-offset is {} for the metadata-partition {}", utp, readOffset, holder.endOffset,
metadataPartition);
}
} else {
log.debug("The offset-holder is null for the metadata-partition {}. The consumer may not have picked" +
" up the recent assignment", metadataPartition);
}
}
isAllInitialized = isAllInitialized && utp.isAssigned && utp.isInitialized;
}
if (isAllInitialized) {
log.info("Initialized for all the {} assigned user-partitions mapped to the {} meta-partitions in {} ms",
assignedUserTopicIdPartitions.size(), assignedMetadataPartitions.size(),
time.milliseconds() - uninitializedAt);
}
isAllUserTopicPartitionsInitialized = isAllInitialized;
}
|
@Test
public void testMaybeMarkUserPartitionsAsReady() {
final TopicIdPartition tpId = getIdPartitions("hello", 1).get(0);
final int metadataPartition = partitioner.metadataPartition(tpId);
consumer.updateEndOffsets(Collections.singletonMap(toRemoteLogPartition(metadataPartition), 2L));
consumerTask.addAssignmentsForPartitions(Collections.singleton(tpId));
consumerTask.ingestRecords();
assertTrue(consumerTask.isUserPartitionAssigned(tpId), "Partition " + tpId + " has not been assigned");
assertTrue(consumerTask.isMetadataPartitionAssigned(metadataPartition));
assertFalse(handler.isPartitionInitialized.containsKey(tpId));
IntStream.range(0, 5).forEach(offset -> addRecord(consumer, metadataPartition, tpId, offset));
consumerTask.ingestRecords();
assertEquals(Optional.of(4L), consumerTask.readOffsetForMetadataPartition(metadataPartition));
assertTrue(handler.isPartitionInitialized.get(tpId));
}
|
@Override
public void createDatabaseIndexes() throws Exception {
super.createDatabaseIndexes();
log.info("Installing SQL DataBase schema PostgreSQL specific indexes part: " + SCHEMA_ENTITIES_IDX_PSQL_ADDON_SQL);
executeQueryFromFile(SCHEMA_ENTITIES_IDX_PSQL_ADDON_SQL);
}
|
@Test
public void givenPsqlDbSchemaService_whenCreateDatabaseSchema_thenVerifyPsqlIndexSpecificCall() throws Exception {
SqlEntityDatabaseSchemaService service = spy(new SqlEntityDatabaseSchemaService());
willDoNothing().given(service).executeQueryFromFile(anyString());
service.createDatabaseSchema();
verify(service, times(1)).createDatabaseIndexes();
verify(service, times(1)).executeQueryFromFile(SqlEntityDatabaseSchemaService.SCHEMA_ENTITIES_SQL);
verify(service, times(1)).executeQueryFromFile(SqlEntityDatabaseSchemaService.SCHEMA_ENTITIES_IDX_SQL);
verify(service, times(1)).executeQueryFromFile(SqlEntityDatabaseSchemaService.SCHEMA_ENTITIES_IDX_PSQL_ADDON_SQL);
verify(service, times(3)).executeQueryFromFile(anyString());
}
|
@Override
public void onOpened() {
digestNotification();
}
|
@Test
public void onOpened_appGoesVisible_resumeAppAndNotifyJs() throws Exception {
// Arrange
setUpBackgroundApp();
// Act
final PushNotification uut = createUUT();
uut.onOpened();
// Hijack and invoke visibility listener
ArgumentCaptor<AppVisibilityListener> listenerCaptor = ArgumentCaptor.forClass(AppVisibilityListener.class);
verify(mAppLifecycleFacade).addVisibilityListener(listenerCaptor.capture());
AppVisibilityListener listener = listenerCaptor.getValue();
listener.onAppVisible();
// Assert
verify(mJsIOHelper).sendEventToJS(eq(NOTIFICATION_OPENED_EVENT_NAME), argThat(new isValidResponse(mResponseBundle)), eq(mReactContext));
}
|
public static Map<String, VersionRange> defaultFeatureMap(boolean enableUnstable) {
Map<String, VersionRange> features = new HashMap<>(1);
features.put(MetadataVersion.FEATURE_NAME, VersionRange.of(
MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel(),
enableUnstable ?
MetadataVersion.latestTesting().featureLevel() :
MetadataVersion.latestProduction().featureLevel()));
for (Features feature : Features.PRODUCTION_FEATURES) {
short maxVersion = enableUnstable ? feature.latestTesting() : feature.latestProduction();
if (maxVersion > 0) {
features.put(feature.featureName(), VersionRange.of(feature.minimumProduction(), maxVersion));
}
}
return features;
}
|
@Test
public void testDefaultFeatureMap() {
Map<String, VersionRange> expectedFeatures = new HashMap<>(1);
expectedFeatures.put(MetadataVersion.FEATURE_NAME, VersionRange.of(
MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel(),
MetadataVersion.LATEST_PRODUCTION.featureLevel()));
for (Features feature : Features.PRODUCTION_FEATURES) {
short maxVersion = feature.defaultValue(MetadataVersion.LATEST_PRODUCTION);
if (maxVersion > 0) {
expectedFeatures.put(feature.featureName(), VersionRange.of(
feature.minimumProduction(),
maxVersion
));
}
}
assertEquals(expectedFeatures, QuorumFeatures.defaultFeatureMap(false));
}
|
static boolean checkConverterState(PNGConverterState state)
{
if (state == null)
{
return false;
}
if (state.IHDR == null || !checkChunkSane(state.IHDR))
{
LOG.error("Invalid IHDR chunk.");
return false;
}
if (!checkChunkSane(state.PLTE))
{
LOG.error("Invalid PLTE chunk.");
return false;
}
if (!checkChunkSane(state.iCCP))
{
LOG.error("Invalid iCCP chunk.");
return false;
}
if (!checkChunkSane(state.tRNS))
{
LOG.error("Invalid tRNS chunk.");
return false;
}
if (!checkChunkSane(state.sRGB))
{
LOG.error("Invalid sRGB chunk.");
return false;
}
if (!checkChunkSane(state.cHRM))
{
LOG.error("Invalid cHRM chunk.");
return false;
}
if (!checkChunkSane(state.gAMA))
{
LOG.error("Invalid gAMA chunk.");
return false;
}
// Check the IDATs
if (state.IDATs.isEmpty())
{
LOG.error("No IDAT chunks.");
return false;
}
for (Chunk idat : state.IDATs)
{
if (!checkChunkSane(idat))
{
LOG.error("Invalid IDAT chunk.");
return false;
}
}
return true;
}
|
@Test
void testCheckConverterState()
{
assertFalse(PNGConverter.checkConverterState(null));
PNGConverter.PNGConverterState state = new PNGConverter.PNGConverterState();
assertFalse(PNGConverter.checkConverterState(state));
PNGConverter.Chunk invalidChunk = new PNGConverter.Chunk();
invalidChunk.bytes = new byte[0];
assertFalse(PNGConverter.checkChunkSane(invalidChunk));
// Valid Dummy Chunk
PNGConverter.Chunk validChunk = new PNGConverter.Chunk();
validChunk.bytes = new byte[16];
validChunk.start = 4;
validChunk.length = 8;
validChunk.crc = 2077607535;
assertTrue(PNGConverter.checkChunkSane(validChunk));
state.IHDR = invalidChunk;
assertFalse(PNGConverter.checkConverterState(state));
state.IDATs = Collections.singletonList(validChunk);
assertFalse(PNGConverter.checkConverterState(state));
state.IHDR = validChunk;
assertTrue(PNGConverter.checkConverterState(state));
state.IDATs = new ArrayList<>();
assertFalse(PNGConverter.checkConverterState(state));
state.IDATs = Collections.singletonList(validChunk);
assertTrue(PNGConverter.checkConverterState(state));
state.PLTE = invalidChunk;
assertFalse(PNGConverter.checkConverterState(state));
state.PLTE = validChunk;
assertTrue(PNGConverter.checkConverterState(state));
state.cHRM = invalidChunk;
assertFalse(PNGConverter.checkConverterState(state));
state.cHRM = validChunk;
assertTrue(PNGConverter.checkConverterState(state));
state.tRNS = invalidChunk;
assertFalse(PNGConverter.checkConverterState(state));
state.tRNS = validChunk;
assertTrue(PNGConverter.checkConverterState(state));
state.iCCP = invalidChunk;
assertFalse(PNGConverter.checkConverterState(state));
state.iCCP = validChunk;
assertTrue(PNGConverter.checkConverterState(state));
state.sRGB = invalidChunk;
assertFalse(PNGConverter.checkConverterState(state));
state.sRGB = validChunk;
assertTrue(PNGConverter.checkConverterState(state));
state.gAMA = invalidChunk;
assertFalse(PNGConverter.checkConverterState(state));
state.gAMA = validChunk;
assertTrue(PNGConverter.checkConverterState(state));
state.IDATs = Arrays.asList(validChunk, invalidChunk);
assertFalse(PNGConverter.checkConverterState(state));
}
|
public void writeReference(Reference reference) throws IOException {
if (reference instanceof StringReference) {
writeQuotedString((StringReference) reference);
} else if (reference instanceof TypeReference) {
writeType((TypeReference) reference);
} else if (reference instanceof FieldReference) {
writeFieldDescriptor((FieldReference) reference);
} else if (reference instanceof MethodReference) {
writeMethodDescriptor((MethodReference) reference);
} else if (reference instanceof MethodProtoReference) {
writeMethodProtoDescriptor((MethodProtoReference) reference);
} else if (reference instanceof MethodHandleReference) {
writeMethodHandle((MethodHandleReference) reference);
} else if (reference instanceof CallSiteReference) {
writeCallSite((CallSiteReference) reference);
} else {
throw new IllegalArgumentException(String.format("Not a known reference type: %s", reference.getClass()));
}
}
|
@Test
public void testWriteReference_type() throws IOException {
DexFormattedWriter writer = new DexFormattedWriter(output);
writer.writeReference(new ImmutableTypeReference("Ltest/type;"));
Assert.assertEquals(
"Ltest/type;",
output.toString());
}
|
@SuppressWarnings("java:S108")
public static void closeQuietly(AutoCloseable closeable) {
if (closeable == null) {
return;
}
try {
closeable.close();
} catch (Exception ignore) {
}
}
|
@Test
public void test_closeResource_whenException() throws Exception {
Closeable closeable = mock(Closeable.class);
doThrow(new IOException("expected")).when(closeable).close();
closeQuietly(closeable);
verify(closeable).close();
verifyNoMoreInteractions(closeable);
}
|
public static String formatMethod(Method input) {
String parameterTypes =
FluentIterable.from(asList(input.getParameterTypes()))
.transform(Class::getSimpleName)
.join(COMMA_SEPARATOR);
return String.format("%s(%s)", input.getName(), parameterTypes);
}
|
@Test
public void testMethodFormatter() throws Exception {
assertEquals(
"testMethodFormatter()",
ReflectHelpers.formatMethod(getClass().getMethod("testMethodFormatter")));
assertEquals(
"oneArg(int)",
ReflectHelpers.formatMethod(getClass().getDeclaredMethod("oneArg", int.class)));
assertEquals(
"twoArg(String, List)",
ReflectHelpers.formatMethod(
getClass().getDeclaredMethod("twoArg", String.class, List.class)));
}
|
@Udf
public <T> boolean contains(
@UdfParameter final List<T> array,
@UdfParameter final T val
) {
return array != null && array.contains(val);
}
|
@Test
public void shouldFindIntegersInList() {
assertTrue(udf.contains(Arrays.asList(1, 2, 3), 2));
assertFalse(udf.contains(Arrays.asList(1, 2, 3), 0));
assertFalse(udf.contains(Arrays.asList(1, 2, 3), "1"));
assertFalse(udf.contains(Arrays.asList(1, 2, 3), "aa"));
}
|
public void mergeStatistics(SizeStatistics other) {
if (!valid) {
return;
}
// Stop merge if other is invalid.
if (other == null || !other.isValid()) {
valid = false;
unencodedByteArrayDataBytes = 0L;
repetitionLevelHistogram.clear();
definitionLevelHistogram.clear();
return;
}
Preconditions.checkArgument(type.equals(other.type), "Cannot merge SizeStatistics of different types");
unencodedByteArrayDataBytes = Math.addExact(unencodedByteArrayDataBytes, other.unencodedByteArrayDataBytes);
for (int i = 0; i < repetitionLevelHistogram.size(); i++) {
repetitionLevelHistogram.set(
i, Math.addExact(repetitionLevelHistogram.get(i), other.repetitionLevelHistogram.get(i)));
}
for (int i = 0; i < definitionLevelHistogram.size(); i++) {
definitionLevelHistogram.set(
i,
Math.addExact(
definitionLevelHistogram.get(i),
other.getDefinitionLevelHistogram().get(i)));
}
}
|
@Test
public void testMergeStatistics() {
PrimitiveType type = Types.optional(PrimitiveType.PrimitiveTypeName.BINARY)
.as(LogicalTypeAnnotation.stringType())
.named("a");
final int maxRepetitionLevel = 2;
final int maxDefinitionLevel = 2;
SizeStatistics.Builder builder1 = SizeStatistics.newBuilder(type, maxRepetitionLevel, maxDefinitionLevel);
builder1.add(0, 0, Binary.fromString("a"));
builder1.add(1, 1, Binary.fromString("b"));
builder1.add(2, 2, Binary.fromString("c"));
SizeStatistics statistics1 = builder1.build();
SizeStatistics.Builder builder2 = SizeStatistics.newBuilder(type, maxRepetitionLevel, maxDefinitionLevel);
builder2.add(0, 1, Binary.fromString("d"));
builder2.add(0, 1, Binary.fromString("e"));
SizeStatistics statistics2 = builder2.build();
statistics1.mergeStatistics(statistics2);
Assert.assertEquals(Optional.of(5L), statistics1.getUnencodedByteArrayDataBytes());
Assert.assertEquals(Arrays.asList(3L, 1L, 1L), statistics1.getRepetitionLevelHistogram());
Assert.assertEquals(Arrays.asList(1L, 3L, 1L), statistics1.getDefinitionLevelHistogram());
}
|
@Override
public RouteContext route(final ShardingRule shardingRule) {
RouteContext result = new RouteContext();
Collection<DataNode> dataNodes = getDataNodes(shardingRule, shardingRule.getShardingTable(logicTableName));
result.getOriginalDataNodes().addAll(originalDataNodes);
for (DataNode each : dataNodes) {
result.getRouteUnits().add(
new RouteUnit(new RouteMapper(each.getDataSourceName(), each.getDataSourceName()), Collections.singleton(new RouteMapper(logicTableName, each.getTableName()))));
}
return result;
}
|
@Test
void assertRouteByMixedWithHintDataSource() {
SQLStatementContext sqlStatementContext = mock(SQLStatementContext.class, withSettings().extraInterfaces(TableAvailable.class).defaultAnswer(RETURNS_DEEP_STUBS));
when(((TableAvailable) sqlStatementContext).getTablesContext().getTableNames()).thenReturn(Collections.singleton("t_hint_ds_test"));
ShardingStandardRoutingEngine standardRoutingEngine = createShardingStandardRoutingEngine("t_hint_ds_test",
ShardingRoutingEngineFixtureBuilder.createShardingConditions("t_hint_ds_test"), sqlStatementContext, new HintValueContext());
HintManager hintManager = HintManager.getInstance();
hintManager.addDatabaseShardingValue("t_hint_ds_test", 1);
RouteContext routeContext = standardRoutingEngine.route(ShardingRoutingEngineFixtureBuilder.createMixedShardingRule());
List<RouteUnit> routeUnits = new ArrayList<>(routeContext.getRouteUnits());
assertThat(routeContext.getRouteUnits().size(), is(1));
assertThat(routeUnits.get(0).getDataSourceMapper().getActualName(), is("ds_1"));
assertThat(routeUnits.get(0).getTableMappers().size(), is(1));
assertThat(routeUnits.get(0).getTableMappers().iterator().next().getActualName(), is("t_hint_ds_test_1"));
assertThat(routeUnits.get(0).getTableMappers().iterator().next().getLogicName(), is("t_hint_ds_test"));
}
|
@Override
public Credentials configure(final Host host) {
if(WinHttpClients.isWinAuthAvailable()) {
if(!host.getCredentials().validate(host.getProtocol(), new LoginOptions(host.getProtocol()).password(false))) {
final String nameSamCompatible = CurrentWindowsCredentials.INSTANCE.getName();
final Credentials credentials = new Credentials(host.getCredentials())
.withPassword(CurrentWindowsCredentials.INSTANCE.getPassword());
if(!includeDomain && StringUtils.contains(nameSamCompatible, '\\')) {
credentials.setUsername(StringUtils.split(nameSamCompatible, '\\')[1]);
}
else {
credentials.setUsername(nameSamCompatible);
}
if(log.isDebugEnabled()) {
log.debug(String.format("Configure %s with username %s", host, credentials));
}
return credentials;
}
}
return CredentialsConfigurator.DISABLED.configure(host);
}
|
@Test
public void testConfigureWindows() {
assumeTrue(Factory.Platform.getDefault().equals(Factory.Platform.Name.windows));
final Host bookmark = new Host(new TestProtocol());
final Credentials configured = new WindowsIntegratedCredentialsConfigurator().configure(bookmark);
assertNotSame(bookmark.getCredentials(), configured);
assertFalse(configured.getUsername().isEmpty());
}
|
public List<CompactionTask> produce() {
// get all CF files sorted by key range start (L1+)
List<SstFileMetaData> sstSortedByCfAndStartingKeys =
metadataSupplier.get().stream()
.filter(l -> l.level() > 0) // let RocksDB deal with L0
.sorted(SST_COMPARATOR)
.collect(Collectors.toList());
LOG.trace("Input files: {}", sstSortedByCfAndStartingKeys.size());
List<CompactionTask> tasks = groupIntoTasks(sstSortedByCfAndStartingKeys);
tasks.sort(Comparator.<CompactionTask>comparingInt(t -> t.files.size()).reversed());
return tasks.subList(0, Math.min(tasks.size(), settings.maxManualCompactions));
}
|
@Test
void testMaxFileSizeToCompact() {
assertThat(
produce(
configBuilder().setMaxFileSizeToCompact(new MemorySize(1)).build(),
sstBuilder().build()))
.isEmpty();
}
|
public static void unzip(Path archive, Path destination) throws IOException {
unzip(archive, destination, false);
}
|
@Test
public void testUnzip_reproducibleTimestampsEnabled() throws URISyntaxException, IOException {
// The zipfile has only level1/level2/level3/file.txt packaged
Path archive =
Paths.get(
Resources.getResource("plugins-common/test-archives/zip-only-file-packaged.zip")
.toURI());
Path destination = tempFolder.getRoot().toPath();
ZipUtil.unzip(archive, destination, true);
assertThat(Files.getLastModifiedTime(destination.resolve("level-1")))
.isEqualTo(FileTime.fromMillis(1000L));
assertThat(Files.getLastModifiedTime(destination.resolve("level-1/level-2")))
.isEqualTo(FileTime.fromMillis(1000L));
assertThat(Files.getLastModifiedTime(destination.resolve("level-1/level-2/level-3")))
.isEqualTo(FileTime.fromMillis(1000L));
assertThat(Files.getLastModifiedTime(destination.resolve("level-1/level-2/level-3/file.txt")))
.isEqualTo(FileTime.from(Instant.parse("2021-01-29T21:10:02Z")));
}
|
@Override
public GetApplicationsResponse getApplications(GetApplicationsRequest request)
throws YarnException {
UserGroupInformation callerUGI = getCallerUgi(null,
AuditConstants.GET_APPLICATIONS_REQUEST);
Set<String> applicationTypes = getLowerCasedAppTypes(request);
EnumSet<YarnApplicationState> applicationStates =
request.getApplicationStates();
Set<String> users = request.getUsers();
Set<String> queues = request.getQueues();
Set<String> tags = request.getApplicationTags();
long limit = request.getLimit();
Range<Long> start = request.getStartRange();
Range<Long> finish = request.getFinishRange();
ApplicationsRequestScope scope = request.getScope();
String name = request.getName();
final Map<ApplicationId, RMApp> apps = rmContext.getRMApps();
final Set<ApplicationId> runningAppsFilteredByQueues =
getRunningAppsFilteredByQueues(apps, queues);
Set<String> queuePaths = new HashSet<>();
for (String queue : queues) {
String queuePath = rmAppManager.getQueuePath(queue);
if (queuePath != null) {
queuePaths.add(queuePath);
} else {
queuePaths.add(queue);
}
}
Iterator<RMApp> appsIter = apps.values().iterator();
List<ApplicationReport> reports = new ArrayList<ApplicationReport>();
while (appsIter.hasNext() && reports.size() < limit) {
RMApp application = appsIter.next();
// Check if current application falls under the specified scope
if (scope == ApplicationsRequestScope.OWN &&
!callerUGI.getUserName().equals(application.getUser())) {
continue;
}
if (queuePaths != null && !queuePaths.isEmpty()) {
if (!runningAppsFilteredByQueues.contains(application.getApplicationId()) &&
!queuePaths.contains(application.getQueue())) {
continue;
}
}
if (applicationTypes != null && !applicationTypes.isEmpty()) {
String appTypeToMatch =
StringUtils.toLowerCase(application.getApplicationType());
if (!applicationTypes.contains(appTypeToMatch)) {
continue;
}
}
if (applicationStates != null && !applicationStates.isEmpty()) {
if (!applicationStates.contains(application
.createApplicationState())) {
continue;
}
}
if (users != null && !users.isEmpty() &&
!users.contains(application.getUser())) {
continue;
}
if (start != null && !start.contains(application.getStartTime())) {
continue;
}
if (finish != null && !finish.contains(application.getFinishTime())) {
continue;
}
if (tags != null && !tags.isEmpty()) {
Set<String> appTags = application.getApplicationTags();
if (appTags == null || appTags.isEmpty()) {
continue;
}
boolean match = false;
for (String tag : tags) {
if (appTags.contains(tag)) {
match = true;
break;
}
}
if (!match) {
continue;
}
}
// checkAccess can grab the scheduler lock so call it last
boolean allowAccess = checkAccess(callerUGI, application.getUser(),
ApplicationAccessType.VIEW_APP, application);
if (scope == ApplicationsRequestScope.VIEWABLE && !allowAccess) {
continue;
}
// Given RM is configured to display apps per user, skip apps to which
// this caller doesn't have access to view.
if (filterAppsByUser && !allowAccess) {
continue;
}
if (name != null && !name.equals(application.getName())) {
continue;
}
reports.add(application.createAndGetApplicationReport(
callerUGI.getUserName(), allowAccess));
}
RMAuditLogger.logSuccess(callerUGI.getUserName(),
AuditConstants.GET_APPLICATIONS_REQUEST, "ClientRMService");
GetApplicationsResponse response =
recordFactory.newRecordInstance(GetApplicationsResponse.class);
response.setApplicationList(reports);
return response;
}
|
@Test
public void testGetApplications() throws Exception {
/**
* 1. Submit 3 applications alternately in two queues
* 2. Test each of the filters
*/
// Basic setup
ResourceScheduler scheduler = mockResourceScheduler();
RMContext rmContext = mock(RMContext.class);
mockRMContext(scheduler, rmContext);
RMStateStore stateStore = mock(RMStateStore.class);
when(rmContext.getStateStore()).thenReturn(stateStore);
doReturn(mock(RMTimelineCollectorManager.class)).when(rmContext)
.getRMTimelineCollectorManager();
RMAppManager appManager = new RMAppManager(rmContext, scheduler,
null, mock(ApplicationACLsManager.class), new Configuration());
when(rmContext.getDispatcher().getEventHandler()).thenReturn(
new EventHandler<Event>() {
public void handle(Event event) {}
});
ApplicationACLsManager mockAclsManager = mock(ApplicationACLsManager.class);
QueueACLsManager mockQueueACLsManager = mock(QueueACLsManager.class);
when(mockQueueACLsManager.checkAccess(any(UserGroupInformation.class),
any(QueueACL.class), any(RMApp.class), any(),
any()))
.thenReturn(true);
ClientRMService rmService =
new ClientRMService(rmContext, scheduler, appManager,
mockAclsManager, mockQueueACLsManager, null);
rmService.init(new Configuration());
// Initialize appnames and queues
String[] queues = {QUEUE_1, QUEUE_2};
String[] appNames =
{MockApps.newAppName(), MockApps.newAppName(), MockApps.newAppName()};
ApplicationId[] appIds =
{getApplicationId(101), getApplicationId(102), getApplicationId(103)};
List<String> tags = Arrays.asList("Tag1", "Tag2", "Tag3");
long[] submitTimeMillis = new long[3];
// Submit applications
for (int i = 0; i < appIds.length; i++) {
ApplicationId appId = appIds[i];
when(mockAclsManager.checkAccess(UserGroupInformation.getCurrentUser(),
ApplicationAccessType.VIEW_APP, null, appId)).thenReturn(true);
SubmitApplicationRequest submitRequest = mockSubmitAppRequest(
appId, appNames[i], queues[i % queues.length],
new HashSet<String>(tags.subList(0, i + 1)));
// make sure each app is submitted at a different time
Thread.sleep(1);
rmService.submitApplication(submitRequest);
submitTimeMillis[i] = rmService.getApplicationReport(
GetApplicationReportRequest.newInstance(appId))
.getApplicationReport().getStartTime();
}
// Test different cases of ClientRMService#getApplications()
GetApplicationsRequest request = GetApplicationsRequest.newInstance();
assertEquals("Incorrect total number of apps", 6,
rmService.getApplications(request).getApplicationList().size());
// Check limit
request.setLimit(1L);
assertEquals("Failed to limit applications", 1,
rmService.getApplications(request).getApplicationList().size());
// Check start range
request = GetApplicationsRequest.newInstance();
request.setStartRange(submitTimeMillis[0] + 1, System.currentTimeMillis());
// 2 applications are submitted after first timeMills
assertEquals("Incorrect number of matching start range",
2, rmService.getApplications(request).getApplicationList().size());
// 1 application is submitted after the second timeMills
request.setStartRange(submitTimeMillis[1] + 1, System.currentTimeMillis());
assertEquals("Incorrect number of matching start range",
1, rmService.getApplications(request).getApplicationList().size());
// no application is submitted after the third timeMills
request.setStartRange(submitTimeMillis[2] + 1, System.currentTimeMillis());
assertEquals("Incorrect number of matching start range",
0, rmService.getApplications(request).getApplicationList().size());
// Check queue
request = GetApplicationsRequest.newInstance();
Set<String> queueSet = new HashSet<String>();
request.setQueues(queueSet);
queueSet.add(queues[0]);
assertEquals("Incorrect number of applications in queue", 3,
rmService.getApplications(request).getApplicationList().size());
assertEquals("Incorrect number of applications in queue", 3,
rmService.getApplications(request).getApplicationList().size());
queueSet.add(queues[1]);
assertEquals("Incorrect number of applications in queue", 3,
rmService.getApplications(request).getApplicationList().size());
// Check user
request = GetApplicationsRequest.newInstance();
Set<String> userSet = new HashSet<String>();
request.setUsers(userSet);
userSet.add("random-user-name");
assertEquals("Incorrect number of applications for user", 0,
rmService.getApplications(request).getApplicationList().size());
userSet.add(UserGroupInformation.getCurrentUser().getShortUserName());
assertEquals("Incorrect number of applications for user", 3,
rmService.getApplications(request).getApplicationList().size());
rmService.setDisplayPerUserApps(true);
userSet.clear();
assertEquals("Incorrect number of applications for user", 6,
rmService.getApplications(request).getApplicationList().size());
rmService.setDisplayPerUserApps(false);
// Check tags
request = GetApplicationsRequest.newInstance(
ApplicationsRequestScope.ALL, null, null, null, null, null, null,
null, null);
Set<String> tagSet = new HashSet<String>();
request.setApplicationTags(tagSet);
assertEquals("Incorrect number of matching tags", 6,
rmService.getApplications(request).getApplicationList().size());
tagSet = Sets.newHashSet(tags.get(0));
request.setApplicationTags(tagSet);
assertEquals("Incorrect number of matching tags", 3,
rmService.getApplications(request).getApplicationList().size());
tagSet = Sets.newHashSet(tags.get(1));
request.setApplicationTags(tagSet);
assertEquals("Incorrect number of matching tags", 2,
rmService.getApplications(request).getApplicationList().size());
tagSet = Sets.newHashSet(tags.get(2));
request.setApplicationTags(tagSet);
assertEquals("Incorrect number of matching tags", 1,
rmService.getApplications(request).getApplicationList().size());
// Check scope
request = GetApplicationsRequest.newInstance(
ApplicationsRequestScope.VIEWABLE);
assertEquals("Incorrect number of applications for the scope", 6,
rmService.getApplications(request).getApplicationList().size());
request = GetApplicationsRequest.newInstance(
ApplicationsRequestScope.OWN);
assertEquals("Incorrect number of applications for the scope", 3,
rmService.getApplications(request).getApplicationList().size());
}
|
@SuppressWarnings("unchecked")
public <T extends Expression> T rewrite(final T expression, final C context) {
return (T) rewriter.process(expression, context);
}
|
@Test
public void shouldRewriteSimpleCaseExpression() {
// Given:
final SimpleCaseExpression parsed = parseExpression(
"CASE COL0 WHEN 1 THEN 'ONE' WHEN 2 THEN 'TWO' ELSE 'THREE' END");
when(processor.apply(parsed.getOperand(), context)).thenReturn(expr1);
when(processor.apply(parsed.getWhenClauses().get(0), context)).thenReturn(when1);
when(processor.apply(parsed.getWhenClauses().get(1), context)).thenReturn(when2);
when(processor.apply(parsed.getDefaultValue().get(), context)).thenReturn(expr2);
// When:
final Expression rewritten = expressionRewriter.rewrite(parsed, context);
// Then:
assertThat(
rewritten,
equalTo(
new SimpleCaseExpression(
parsed.getLocation(),
expr1,
ImmutableList.of(when1, when2),
Optional.of(expr2)
)
)
);
}
|
public static InputStreamReader getResourceAsReader(String resource, String charsetName) throws IOException {
return new InputStreamReader(getResourceAsStream(resource), charsetName);
}
|
@Test
void testGetResourceAsReader() throws IOException {
try (Reader reader = ResourceUtils.getResourceAsReader("resource_utils_test.properties", "UTF-8")) {
assertNotNull(reader);
}
}
|
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) {
if (msg instanceof Http2DataFrame) {
Http2DataFrame dataFrame = (Http2DataFrame) msg;
encoder().writeData(ctx, dataFrame.stream().id(), dataFrame.content(),
dataFrame.padding(), dataFrame.isEndStream(), promise);
} else if (msg instanceof Http2HeadersFrame) {
writeHeadersFrame(ctx, (Http2HeadersFrame) msg, promise);
} else if (msg instanceof Http2WindowUpdateFrame) {
Http2WindowUpdateFrame frame = (Http2WindowUpdateFrame) msg;
Http2FrameStream frameStream = frame.stream();
// It is legit to send a WINDOW_UPDATE frame for the connection stream. The parent channel doesn't attempt
// to set the Http2FrameStream so we assume if it is null the WINDOW_UPDATE is for the connection stream.
try {
if (frameStream == null) {
increaseInitialConnectionWindow(frame.windowSizeIncrement());
} else {
consumeBytes(frameStream.id(), frame.windowSizeIncrement());
}
promise.setSuccess();
} catch (Throwable t) {
promise.setFailure(t);
}
} else if (msg instanceof Http2ResetFrame) {
Http2ResetFrame rstFrame = (Http2ResetFrame) msg;
int id = rstFrame.stream().id();
// Only ever send a reset frame if stream may have existed before as otherwise we may send a RST on a
// stream in an invalid state and cause a connection error.
if (connection().streamMayHaveExisted(id)) {
encoder().writeRstStream(ctx, rstFrame.stream().id(), rstFrame.errorCode(), promise);
} else {
ReferenceCountUtil.release(rstFrame);
promise.setFailure(Http2Exception.streamError(
rstFrame.stream().id(), Http2Error.PROTOCOL_ERROR, "Stream never existed"));
}
} else if (msg instanceof Http2PingFrame) {
Http2PingFrame frame = (Http2PingFrame) msg;
encoder().writePing(ctx, frame.ack(), frame.content(), promise);
} else if (msg instanceof Http2SettingsFrame) {
encoder().writeSettings(ctx, ((Http2SettingsFrame) msg).settings(), promise);
} else if (msg instanceof Http2SettingsAckFrame) {
// In the event of manual SETTINGS ACK, it is assumed the encoder will apply the earliest received but not
// yet ACKed settings.
encoder().writeSettingsAck(ctx, promise);
} else if (msg instanceof Http2GoAwayFrame) {
writeGoAwayFrame(ctx, (Http2GoAwayFrame) msg, promise);
} else if (msg instanceof Http2PushPromiseFrame) {
Http2PushPromiseFrame pushPromiseFrame = (Http2PushPromiseFrame) msg;
writePushPromise(ctx, pushPromiseFrame, promise);
} else if (msg instanceof Http2PriorityFrame) {
Http2PriorityFrame priorityFrame = (Http2PriorityFrame) msg;
encoder().writePriority(ctx, priorityFrame.stream().id(), priorityFrame.streamDependency(),
priorityFrame.weight(), priorityFrame.exclusive(), promise);
} else if (msg instanceof Http2UnknownFrame) {
Http2UnknownFrame unknownFrame = (Http2UnknownFrame) msg;
encoder().writeFrame(ctx, unknownFrame.frameType(), unknownFrame.stream().id(),
unknownFrame.flags(), unknownFrame.content(), promise);
} else if (!(msg instanceof Http2Frame)) {
ctx.write(msg, promise);
} else {
ReferenceCountUtil.release(msg);
throw new UnsupportedMessageTypeException(msg, SUPPORTED_MESSAGES);
}
}
|
@Test
public void windowUpdateFrameDecrementsConsumedBytes() throws Exception {
frameInboundWriter.writeInboundHeaders(3, request, 31, false);
Http2Connection connection = frameCodec.connection();
Http2Stream stream = connection.stream(3);
assertNotNull(stream);
ByteBuf data = Unpooled.buffer(100).writeZero(100);
frameInboundWriter.writeInboundData(3, data, 0, false);
Http2HeadersFrame inboundHeaders = inboundHandler.readInbound();
assertNotNull(inboundHeaders);
assertNotNull(inboundHeaders.stream());
Http2FrameStream stream2 = inboundHeaders.stream();
int before = connection.local().flowController().unconsumedBytes(stream);
ChannelFuture f = channel.write(new DefaultHttp2WindowUpdateFrame(100).stream(stream2));
int after = connection.local().flowController().unconsumedBytes(stream);
assertEquals(100, before - after);
assertTrue(f.isSuccess());
}
|
public static int align(final int value, final int alignment)
{
return (value + (alignment - 1)) & -alignment;
}
|
@Test
void shouldAlignValueToNextMultipleOfAlignment()
{
final int alignment = CACHE_LINE_LENGTH;
assertThat(align(0, alignment), is(0));
assertThat(align(1, alignment), is(alignment));
assertThat(align(alignment, alignment), is(alignment));
assertThat(align(alignment + 1, alignment), is(alignment * 2));
final int remainder = MAX_VALUE % alignment;
final int maxMultiple = MAX_VALUE - remainder;
assertThat(align(maxMultiple, alignment), is(maxMultiple));
assertThat(align(MAX_VALUE, alignment), is(MIN_VALUE));
}
|
@Override
public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context,
Map<String, Long> recentlyUnloadedBundles,
Map<String, Long> recentlyUnloadedBrokers) {
final var conf = context.brokerConfiguration();
decisionCache.clear();
stats.clear();
Map<String, BrokerLookupData> availableBrokers;
try {
availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync()
.get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS);
} catch (ExecutionException | InterruptedException | TimeoutException e) {
counter.update(Failure, Unknown);
log.warn("Failed to fetch available brokers. Stop unloading.", e);
return decisionCache;
}
try {
final var loadStore = context.brokerLoadDataStore();
stats.setLoadDataStore(loadStore);
boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log);
var skipReason = stats.update(
context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf);
if (skipReason.isPresent()) {
if (debugMode) {
log.warn(CANNOT_CONTINUE_UNLOAD_MSG
+ " Skipped the load stat update. Reason:{}.",
skipReason.get());
}
counter.update(Skip, skipReason.get());
return decisionCache;
}
counter.updateLoadData(stats.avg, stats.std);
if (debugMode) {
log.info("brokers' load stats:{}", stats);
}
// skip metrics
int numOfBrokersWithEmptyLoadData = 0;
int numOfBrokersWithFewBundles = 0;
final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd();
boolean transfer = conf.isLoadBalancerTransferEnabled();
if (stats.std() > targetStd
|| isUnderLoaded(context, stats.peekMinBroker(), stats)
|| isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) {
unloadConditionHitCount++;
} else {
unloadConditionHitCount = 0;
}
if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ " Shedding condition hit count:{} is less than or equal to the threshold:{}.",
unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold());
}
counter.update(Skip, HitCount);
return decisionCache;
}
while (true) {
if (!stats.hasTransferableBrokers()) {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ " Exhausted target transfer brokers.");
}
break;
}
UnloadDecision.Reason reason;
if (stats.std() > targetStd) {
reason = Overloaded;
} else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) {
reason = Underloaded;
if (debugMode) {
log.info(String.format("broker:%s is underloaded:%s although "
+ "load std:%.2f <= targetStd:%.2f. "
+ "Continuing unload for this underloaded broker.",
stats.peekMinBroker(),
context.brokerLoadDataStore().get(stats.peekMinBroker()).get(),
stats.std(), targetStd));
}
} else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) {
reason = Overloaded;
if (debugMode) {
log.info(String.format("broker:%s is overloaded:%s although "
+ "load std:%.2f <= targetStd:%.2f. "
+ "Continuing unload for this overloaded broker.",
stats.peekMaxBroker(),
context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(),
stats.std(), targetStd));
}
} else {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ "The overall cluster load meets the target, std:{} <= targetStd:{}."
+ "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.",
stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker());
}
break;
}
String maxBroker = stats.pollMaxBroker();
String minBroker = stats.peekMinBroker();
Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker);
Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker);
if (maxBrokerLoadData.isEmpty()) {
log.error(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " MaxBrokerLoadData is empty.", maxBroker));
numOfBrokersWithEmptyLoadData++;
continue;
}
if (minBrokerLoadData.isEmpty()) {
log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker);
numOfBrokersWithEmptyLoadData++;
continue;
}
double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA();
double minLoad = minBrokerLoadData.get().getWeightedMaxEMA();
double offload = (maxLoad - minLoad) / 2;
BrokerLoadData brokerLoadData = maxBrokerLoadData.get();
double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn()
+ brokerLoadData.getMsgThroughputOut();
double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn()
+ minBrokerLoadData.get().getMsgThroughputOut();
double offloadThroughput = maxBrokerThroughput * offload / maxLoad;
if (debugMode) {
log.info(String.format(
"Attempting to shed load from broker:%s%s, which has the max resource "
+ "usage:%.2f%%, targetStd:%.2f,"
+ " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.",
maxBroker, transfer ? " to broker:" + minBroker : "",
maxLoad * 100,
targetStd,
offload * 100,
offloadThroughput / KB
));
}
double trafficMarkedToOffload = 0;
double trafficMarkedToGain = 0;
Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker);
if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) {
log.error(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " TopBundlesLoadData is empty.", maxBroker));
numOfBrokersWithEmptyLoadData++;
continue;
}
var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData();
if (maxBrokerTopBundlesLoadData.size() == 1) {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " Sole namespace bundle:%s is overloading the broker. ",
maxBroker, maxBrokerTopBundlesLoadData.iterator().next()));
continue;
}
Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker);
var minBrokerTopBundlesLoadDataIter =
minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() :
null;
if (maxBrokerTopBundlesLoadData.isEmpty()) {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " Broker overloaded despite having no bundles", maxBroker));
continue;
}
int remainingTopBundles = maxBrokerTopBundlesLoadData.size();
for (var e : maxBrokerTopBundlesLoadData) {
String bundle = e.bundleName();
if (channel != null && !channel.isOwner(bundle, maxBroker)) {
if (debugMode) {
log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " MaxBroker:%s is not the owner.", bundle, maxBroker));
}
continue;
}
if (recentlyUnloadedBundles.containsKey(bundle)) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " Bundle has been recently unloaded at ts:%d.",
bundle, recentlyUnloadedBundles.get(bundle)));
}
continue;
}
if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " This unload can't meet "
+ "affinity(isolation) or anti-affinity group policies.", bundle));
}
continue;
}
if (remainingTopBundles <= 1) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is"
+ " less than or equal to 1.",
bundle, maxBroker));
}
break;
}
var bundleData = e.stats();
double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut;
boolean swap = false;
List<Unload> minToMaxUnloads = new ArrayList<>();
double minBrokerBundleSwapThroughput = 0.0;
if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) {
// see if we can swap bundles from min to max broker to balance better.
if (transfer && minBrokerTopBundlesLoadDataIter != null) {
var maxBrokerNewThroughput =
maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain
- maxBrokerBundleThroughput;
var minBrokerNewThroughput =
minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain
+ maxBrokerBundleThroughput;
while (minBrokerTopBundlesLoadDataIter.hasNext()) {
var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next();
if (!isTransferable(context, availableBrokers,
minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) {
continue;
}
var minBrokerBundleThroughput =
minBrokerBundleData.stats().msgThroughputIn
+ minBrokerBundleData.stats().msgThroughputOut;
var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput;
var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput;
if (maxBrokerNewThroughputTmp < maxBrokerThroughput
&& minBrokerNewThroughputTmp < maxBrokerThroughput) {
minToMaxUnloads.add(new Unload(minBroker,
minBrokerBundleData.bundleName(), Optional.of(maxBroker)));
maxBrokerNewThroughput = maxBrokerNewThroughputTmp;
minBrokerNewThroughput = minBrokerNewThroughputTmp;
minBrokerBundleSwapThroughput += minBrokerBundleThroughput;
if (minBrokerNewThroughput <= maxBrokerNewThroughput
&& maxBrokerNewThroughput < maxBrokerThroughput * 0.75) {
swap = true;
break;
}
}
}
}
if (!swap) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is "
+ "greater than the target :%.2f KByte/s.",
bundle,
(trafficMarkedToOffload + maxBrokerBundleThroughput) / KB,
trafficMarkedToGain / KB,
(trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB,
offloadThroughput / KB));
}
break;
}
}
Unload unload;
if (transfer) {
if (swap) {
minToMaxUnloads.forEach(minToMaxUnload -> {
if (debugMode) {
log.info("Decided to gain bundle:{} from min broker:{}",
minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker());
}
var decision = new UnloadDecision();
decision.setUnload(minToMaxUnload);
decision.succeed(reason);
decisionCache.add(decision);
});
if (debugMode) {
log.info(String.format(
"Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.",
minBrokerBundleSwapThroughput / KB, minBroker, maxBroker));
trafficMarkedToGain += minBrokerBundleSwapThroughput;
}
}
unload = new Unload(maxBroker, bundle, Optional.of(minBroker));
} else {
unload = new Unload(maxBroker, bundle);
}
var decision = new UnloadDecision();
decision.setUnload(unload);
decision.succeed(reason);
decisionCache.add(decision);
trafficMarkedToOffload += maxBrokerBundleThroughput;
remainingTopBundles--;
if (debugMode) {
log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s."
+ " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s."
+ " Target:%.2f KByte/s.",
bundle, maxBrokerBundleThroughput / KB,
trafficMarkedToOffload / KB,
trafficMarkedToGain / KB,
(trafficMarkedToOffload - trafficMarkedToGain) / KB,
offloadThroughput / KB));
}
}
if (trafficMarkedToOffload > 0) {
var adjustedOffload =
(trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput;
stats.offload(maxLoad, minLoad, adjustedOffload);
if (debugMode) {
log.info(
String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}",
stats, maxLoad, minLoad, adjustedOffload));
}
} else {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " There is no bundle that can be unloaded in top bundles load data. "
+ "Consider splitting bundles owned by the broker "
+ "to make each bundle serve less traffic "
+ "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport"
+ " to report more bundles in the top bundles load data.", maxBroker));
}
} // while end
if (debugMode) {
log.info("decisionCache:{}", decisionCache);
}
if (decisionCache.isEmpty()) {
UnloadDecision.Reason reason;
if (numOfBrokersWithEmptyLoadData > 0) {
reason = NoLoadData;
} else if (numOfBrokersWithFewBundles > 0) {
reason = NoBundles;
} else {
reason = HitCount;
}
counter.update(Skip, reason);
} else {
unloadConditionHitCount = 0;
}
} catch (Throwable e) {
log.error("Failed to process unloading. ", e);
this.counter.update(Failure, Unknown);
}
return decisionCache;
}
|
@Test
public void testEmptyBrokerLoadData() {
UnloadCounter counter = new UnloadCounter();
TransferShedder transferShedder = new TransferShedder(counter);
var ctx = getContext();
var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of());
assertTrue(res.isEmpty());
assertEquals(counter.getBreakdownCounters().get(Skip).get(NoBrokers).get(), 1);
}
|
public Tag id(Long id) {
this.id = id;
return this;
}
|
@Test
public void idTest() {
// TODO: test id
}
|
static double getHeadingOfGeometryNearPoint(EdgeIteratorState edgeState, GHPoint point, double maxDistance) {
final DistanceCalc calcDist = DistanceCalcEarth.DIST_EARTH;
double closestDistance = Double.POSITIVE_INFINITY;
PointList points = edgeState.fetchWayGeometry(FetchMode.ALL);
int closestPoint = -1;
for (int i = 1; i < points.size(); i++) {
double fromLat = points.getLat(i - 1), fromLon = points.getLon(i - 1);
double toLat = points.getLat(i), toLon = points.getLon(i);
// the 'distance' between the point and an edge segment is either the vertical distance to the segment or
// the distance to the closer one of the two endpoints. here we save one call to calcDist per segment,
// because each endpoint appears in two segments (except the first and last).
double distance = calcDist.validEdgeDistance(point.lat, point.lon, fromLat, fromLon, toLat, toLon)
? calcDist.calcDenormalizedDist(calcDist.calcNormalizedEdgeDistance(point.lat, point.lon, fromLat, fromLon, toLat, toLon))
: calcDist.calcDist(fromLat, fromLon, point.lat, point.lon);
if (i == points.size() - 1)
distance = Math.min(distance, calcDist.calcDist(toLat, toLon, point.lat, point.lon));
if (distance > maxDistance)
continue;
if (distance < closestDistance) {
closestDistance = distance;
closestPoint = i;
}
}
if (closestPoint < 0)
return Double.NaN;
double fromLat = points.getLat(closestPoint - 1), fromLon = points.getLon(closestPoint - 1);
double toLat = points.getLat(closestPoint), toLon = points.getLon(closestPoint);
return AngleCalc.ANGLE_CALC.calcAzimuth(fromLat, fromLon, toLat, toLon);
}
|
@Test
public void getHeading() {
GHPoint point = new GHPoint(55.67093, 12.577294);
BooleanEncodedValue carAccessEnc = new SimpleBooleanEncodedValue("car_access", true);
DecimalEncodedValue carSpeedEnc = new DecimalEncodedValueImpl("car_speed", 5, 5, false);
EncodingManager em = EncodingManager.start().add(carAccessEnc).add(carSpeedEnc).build();
BaseGraph g = new BaseGraph.Builder(em).create();
EdgeIteratorState edge = g.edge(0, 1);
g.getNodeAccess().setNode(0, 55.671044, 12.5771583);
g.getNodeAccess().setNode(1, 55.6704136, 12.5784324);
// GHUtility.setSpeed(50, 0, carAccessEnc, carSpeedEnc, edge.getFlags());
assertEquals(131.2, HeadingEdgeFilter.getHeadingOfGeometryNearPoint(edge, point, 20), .1);
}
|
@Override
public Version get() {
var properties = buildProperties.getIfUnique();
if (properties == null) {
return Version.valueOf(DEFAULT_VERSION);
}
var projectVersion = Objects.toString(properties.getVersion(), DEFAULT_VERSION);
return Version.valueOf(projectVersion);
}
|
@Test
void getWhenBuildPropertiesAndVersionNotEmpty() {
Properties properties = new Properties();
properties.put("version", "2.0.0");
BuildProperties buildProperties = new BuildProperties(properties);
when(buildPropertiesProvider.getIfUnique()).thenReturn(buildProperties);
Version version = systemVersionSupplier.get();
assertThat(version.toString()).isEqualTo("2.0.0");
properties.put("version", "2.0.0-SNAPSHOT");
buildProperties = new BuildProperties(properties);
when(buildPropertiesProvider.getIfUnique()).thenReturn(buildProperties);
version = systemVersionSupplier.get();
assertThat(version.toString()).isEqualTo("2.0.0-SNAPSHOT");
assertThat(version.getPreReleaseVersion()).isEqualTo("SNAPSHOT");
}
|
@Override
public float floatValue()
{
return value;
}
|
@Override
@Test
void testFloatValue()
{
for (int i = -1000; i < 3000; i += 200)
{
assertEquals((float) i, COSInteger.get(i).floatValue());
}
}
|
public String getSort() {
return sort;
}
|
@Test
public void default_sort_is_by_name() {
assertThat(underTest.getSort()).isEqualTo("name");
}
|
@SuppressWarnings("unused") // Required for automatic type inference
public static <K> Builder0<K> forClass(final Class<K> type) {
return new Builder0<>();
}
|
@Test
public void shouldWorkWithSuppliers2() {
// Given:
handlerMap2 = HandlerMaps.forClass(BaseType.class).withArgTypes(String.class, Integer.class)
.put(LeafTypeA.class, () -> handler2_1)
.build();
// When:
handlerMap2.get(LeafTypeA.class).handle("A", 2, LEAF_A);
// Then:
verify(handler2_1).handle("A", 2, LEAF_A);
}
|
@Override
public <T> List<ExtensionWrapper<T>> find(Class<T> type) {
log.debug("Finding extensions of extension point '{}'", type.getName());
Map<String, Set<String>> entries = getEntries();
List<ExtensionWrapper<T>> result = new ArrayList<>();
// add extensions found in classpath and plugins
for (String pluginId : entries.keySet()) {
// classpath's extensions <=> pluginId = null
List<ExtensionWrapper<T>> pluginExtensions = find(type, pluginId);
result.addAll(pluginExtensions);
}
if (result.isEmpty()) {
log.debug("No extensions found for extension point '{}'", type.getName());
} else {
log.debug("Found {} extensions for extension point '{}'", result.size(), type.getName());
}
// sort by "ordinal" property
Collections.sort(result);
return result;
}
|
@Test
public void testFindFailType() {
ExtensionFinder instance = new AbstractExtensionFinder(pluginManager) {
@Override
public Map<String, Set<String>> readPluginsStorages() {
return Collections.emptyMap();
}
@Override
public Map<String, Set<String>> readClasspathStorages() {
return Collections.emptyMap();
}
};
List<ExtensionWrapper<TestExtension>> list = instance.find(TestExtension.class);
assertEquals(0, list.size());
}
|
public Optional<String> clusterId() {
for (MetaProperties metaProps : logDirProps.values()) {
if (metaProps.clusterId().isPresent()) {
return metaProps.clusterId();
}
}
return Optional.empty();
}
|
@Test
public void testClusterIdForEmpty() {
assertEquals(Optional.empty(), EMPTY.clusterId());
}
|
@Deprecated
public static <T> Task<T> callable(final String name, final Callable<? extends T> callable) {
return Task.callable(name, () -> callable.call());
}
|
@Test
public void testTimeoutTaskWithError() throws InterruptedException {
final Exception error = new Exception();
final Task<String> task = Task.callable("task", () -> {
throw error;
} );
final Task<String> timeoutTask = task.withTimeout(2000, TimeUnit.MILLISECONDS);
getEngine().run(timeoutTask);
// We should complete with an error almost immediately
assertTrue(timeoutTask.await(100, TimeUnit.MILLISECONDS));
assertTrue(timeoutTask.isFailed());
assertEquals(error, timeoutTask.getError());
}
|
@Override
public double variance() {
return 2.0 * nu2 * nu2 * (nu1 + nu2 - 2) / (nu1 * (nu2 - 2) * (nu2 - 2) * (nu2 - 4));
}
|
@Test
public void testVariance() {
System.out.println("variance");
FDistribution instance = new FDistribution(10, 20);
instance.rand();
assertEquals(0.4320988, instance.variance(), 1E-7);
}
|
@SafeVarargs
public static <T> Set<T> newSets(T... values) {
if(null == values || values.length == 0){
Assert.notNull(values, "values not is null.");
}
return new HashSet<>(Arrays.asList(values));
}
|
@Test
public void newSets() {
final Object[] values = {};
Assert.assertEquals(new HashSet(), CollectionKit.newSets(values));
}
|
public static String getGlobalRuleVersionsNode(final String ruleName) {
return String.join("/", getGlobalRuleRootNode(), ruleName, VERSIONS);
}
|
@Test
void assertGetGlobalRuleVersionsNode() {
assertThat(GlobalNode.getGlobalRuleVersionsNode("transaction"), is("/rules/transaction/versions"));
}
|
public void consume() {
while (true) {
try {
var msg = queue.take();
if (Message.POISON_PILL.equals(msg)) {
LOGGER.info("Consumer {} receive request to terminate.", name);
break;
}
var sender = msg.getHeader(Headers.SENDER);
var body = msg.getBody();
LOGGER.info("Message [{}] from [{}] received by [{}]", body, sender, name);
} catch (InterruptedException e) {
// allow thread to exit
LOGGER.error("Exception caught.", e);
return;
}
}
}
|
@Test
void testConsume() throws Exception {
final var messages = List.of(
createMessage("you", "Hello!"),
createMessage("me", "Hi!"),
Message.POISON_PILL,
createMessage("late_for_the_party", "Hello? Anyone here?")
);
final var queue = new SimpleMessageQueue(messages.size());
for (final var message : messages) {
queue.put(message);
}
new Consumer("NSA", queue).consume();
assertTrue(appender.logContains("Message [Hello!] from [you] received by [NSA]"));
assertTrue(appender.logContains("Message [Hi!] from [me] received by [NSA]"));
assertTrue(appender.logContains("Consumer NSA receive request to terminate."));
}
|
@Nonnull
@Override
public Optional<? extends INode> parse(
@Nullable final String str, @Nonnull DetectionLocation detectionLocation) {
if (str == null) {
return Optional.empty();
}
for (IMapper mapper : jcaSpecificAlgorithmMappers) {
Optional<? extends INode> asset = mapper.parse(str, detectionLocation);
if (asset.isPresent()) {
return asset;
}
}
return switch (str.toUpperCase().trim()) {
case "PBE", "PBES2" -> Optional.of(new PasswordBasedEncryption(detectionLocation));
case "DH", "DIFFIEHELLMAN" -> Optional.of(new DH(detectionLocation));
case "RSA" -> Optional.of(new RSA(detectionLocation));
case "EC" ->
Optional.of(new Algorithm(str, PublicKeyEncryption.class, detectionLocation));
default -> {
final Algorithm algorithm = new Algorithm(str, Unknown.class, detectionLocation);
algorithm.put(new Unknown(detectionLocation));
yield Optional.of(algorithm);
}
};
}
|
@Test
void pbe() {
DetectionLocation testDetectionLocation =
new DetectionLocation("testfile", 1, 1, List.of("test"), () -> "SSL");
JcaAlgorithmMapper jcaAlgorithmMapper = new JcaAlgorithmMapper();
Optional<? extends INode> assetOptional =
jcaAlgorithmMapper.parse("PBEWithHmacSHA256AndAES", testDetectionLocation);
assertThat(assetOptional).isPresent();
assertThat(assetOptional.get().is(PasswordBasedEncryption.class)).isTrue();
}
|
@Override
protected void toThrift(TPlanNode planNode) {
planNode.setNode_type(TPlanNodeType.FILE_SCAN_NODE);
TFileScanNode fileScanNode = new TFileScanNode(desc.getId().asInt());
fileScanNode.setEnable_pipeline_load(true);
planNode.setFile_scan_node(fileScanNode);
}
|
@Test
public void testSetColumnOfDecimal() {
Analyzer analyzer = new Analyzer(globalStateMgr, connectContext);
DescriptorTable descTbl = analyzer.getDescTbl();
List<Column> columns = getDecimalSchema();
TupleDescriptor dstDesc = descTbl.createTupleDescriptor("DstTableDesc");
for (Column column : columns) {
SlotDescriptor slot = descTbl.addSlotDescriptor(dstDesc);
slot.setColumn(column);
slot.setIsMaterialized(true);
if (column.isAllowNull()) {
slot.setIsNullable(true);
} else {
slot.setIsNullable(false);
}
}
TDescriptorTable tableDesc = descTbl.toThrift();
TSlotDescriptor slotDesc = tableDesc.getSlotDescriptors().get(2);
TTypeNode typeNode = slotDesc.slotType.getTypes().get(0);
Assert.assertTrue(typeNode.isSetScalar_type());
Assert.assertEquals(typeNode.scalar_type.type, TPrimitiveType.DECIMAL128);
Assert.assertEquals(typeNode.scalar_type.precision, 38);
Assert.assertEquals(typeNode.scalar_type.scale, 9);
}
|
public static <T extends TypedSPI> Optional<T> findService(final Class<T> serviceInterface, final Object type) {
return findService(serviceInterface, type, new Properties());
}
|
@Test
void assertFindServiceWithoutProperties() {
assertTrue(TypedSPILoader.findService(TypedSPIFixture.class, "TYPED.FIXTURE").isPresent());
}
|
public FEELFnResult<Boolean> invoke(@ParameterName( "range1" ) Range range1, @ParameterName( "range2" ) Range range2) {
if ( range1 == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range1", "cannot be null"));
}
if ( range2 == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range2", "cannot be null"));
}
try {
boolean result = range1.getHighBoundary() == Range.RangeBoundary.CLOSED &&
range2.getLowBoundary() == Range.RangeBoundary.CLOSED &&
range1.getHighEndPoint().compareTo(range2.getLowEndPoint()) == 0;
return FEELFnResult.ofResult( result );
} catch( Exception e ) {
// points are not comparable
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range1", "cannot be compared to range2"));
}
}
|
@Test
void invokeParamRangeAndRange() {
FunctionTestUtil.assertResult( meetsFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ) ),
Boolean.FALSE );
FunctionTestUtil.assertResult( meetsFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, "c", "k", Range.RangeBoundary.CLOSED ) ),
Boolean.FALSE );
FunctionTestUtil.assertResult( meetsFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, "f", "k", Range.RangeBoundary.CLOSED ) ),
Boolean.TRUE );
FunctionTestUtil.assertResult( meetsFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.OPEN, "g", "k", Range.RangeBoundary.CLOSED ) ),
Boolean.FALSE );
FunctionTestUtil.assertResult( meetsFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "f", "k", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ) ),
Boolean.FALSE );
}
|
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final ContextualRecord that = (ContextualRecord) o;
return Arrays.equals(value, that.value) &&
Objects.equals(recordContext, that.recordContext);
}
|
@Test
public void testEquals() {
final byte[] value1 = {1, 2, 3};
final byte[] value2 = {1, 2, 3};
final long timestamp = Time.SYSTEM.milliseconds();
final ProcessorRecordContext context1 = new ProcessorRecordContext(timestamp, 12345L, 0, "test-topic", new RecordHeaders());
final ProcessorRecordContext context2 = new ProcessorRecordContext(timestamp, 12345L, 0, "test-topic", new RecordHeaders());
final ContextualRecord record1 = new ContextualRecord(value1, context1);
final ContextualRecord record2 = new ContextualRecord(value2, context2);
assertEquals(record1, record2);
}
|
@Override
public boolean shouldCareAbout(Object entity) {
return securityConfigClasses.stream().anyMatch(aClass -> aClass.isAssignableFrom(entity.getClass()));
}
|
@Test
public void shouldCareAboutPluginRoleConfigChange() {
SecurityConfigChangeListener securityConfigChangeListener = new SecurityConfigChangeListener() {
@Override
public void onEntityConfigChange(Object entity) {
}
};
assertThat(securityConfigChangeListener.shouldCareAbout(new PluginRoleConfig()), is(true));
}
|
public static SamlAuthenticationStatus getSamlAuthenticationStatus(String samlResponse, Auth auth, SamlSettings samlSettings) {
SamlAuthenticationStatus samlAuthenticationStatus = new SamlAuthenticationStatus();
try {
auth.processResponse();
} catch (Exception e) {
samlAuthenticationStatus.getErrors().add(e.getMessage());
}
samlAuthenticationStatus.getErrors().addAll(auth.getErrors().stream().filter(Objects::nonNull).toList());
if (auth.getLastErrorReason() != null) {
samlAuthenticationStatus.getErrors().add(auth.getLastErrorReason());
}
if (samlAuthenticationStatus.getErrors().isEmpty()) {
samlAuthenticationStatus.getErrors().addAll(generateMappingErrors(auth, samlSettings));
}
samlAuthenticationStatus.setAvailableAttributes(auth.getAttributes());
samlAuthenticationStatus.setMappedAttributes(getAttributesMapping(auth, samlSettings));
samlAuthenticationStatus.setSignatureEnabled(isSignatureEnabled(auth, samlSettings));
samlAuthenticationStatus.setEncryptionEnabled(isEncryptionEnabled(auth, samlResponse));
samlAuthenticationStatus.setWarnings(samlAuthenticationStatus.getErrors().isEmpty() ? generateWarnings(auth, samlSettings) : new ArrayList<>());
samlAuthenticationStatus.setStatus(samlAuthenticationStatus.getErrors().isEmpty() ? "success" : "error");
return samlAuthenticationStatus;
}
|
@Test
public void authentication_has_warnings_when_the_private_key_is_invalid_but_auth_completes() {
setSettings();
getResponseAttributes().forEach((key, value) -> when(auth.getAttribute(key)).thenReturn(value));
samlAuthenticationStatus = getSamlAuthenticationStatus(BASE64_SAML_RESPONSE, auth, new SamlSettings(settings.asConfig()));
assertEquals("success", samlAuthenticationStatus.getStatus());
assertTrue(samlAuthenticationStatus.getErrors().isEmpty());
assertFalse(samlAuthenticationStatus.getWarnings().isEmpty());
assertTrue(samlAuthenticationStatus.getWarnings()
.contains(String.format("Error in parsing service provider private key, please make sure that it is in PKCS 8 format.")));
}
|
public static boolean canDrop(FilterPredicate pred, List<ColumnChunkMetaData> columns) {
Objects.requireNonNull(pred, "pred cannot be null");
Objects.requireNonNull(columns, "columns cannot be null");
return pred.accept(new StatisticsFilter(columns));
}
|
@Test
public void testContainsEqNonNull() {
assertTrue(canDrop(contains(eq(intColumn, 9)), columnMetas));
assertFalse(canDrop(contains(eq(intColumn, 10)), columnMetas));
assertFalse(canDrop(contains(eq(intColumn, 100)), columnMetas));
assertTrue(canDrop(contains(eq(intColumn, 101)), columnMetas));
// drop columns of all nulls when looking for non-null value
assertTrue(canDrop(contains(eq(intColumn, 0)), nullColumnMetas));
assertFalse(canDrop(contains(eq(intColumn, 50)), missingMinMaxColumnMetas));
}
|
@DeleteMapping(params = "beta=true")
@Secured(action = ActionTypes.WRITE, signType = SignType.CONFIG)
public RestResult<Boolean> stopBeta(HttpServletRequest httpServletRequest,
@RequestParam(value = "dataId") String dataId, @RequestParam(value = "group") String group,
@RequestParam(value = "tenant", required = false, defaultValue = StringUtils.EMPTY) String tenant) {
String remoteIp = getRemoteIp(httpServletRequest);
String requestIpApp = RequestUtil.getAppName(httpServletRequest);
try {
configInfoBetaPersistService.removeConfigInfo4Beta(dataId, group, tenant);
} catch (Throwable e) {
LOGGER.error("remove beta data error", e);
return RestResultUtils.failed(500, false, "remove beta data error");
}
ConfigTraceService.logPersistenceEvent(dataId, group, tenant, requestIpApp, System.currentTimeMillis(),
remoteIp, ConfigTraceService.PERSISTENCE_EVENT_BETA, ConfigTraceService.PERSISTENCE_TYPE_REMOVE, null);
ConfigChangePublisher.notifyConfigChange(
new ConfigDataChangeEvent(true, dataId, group, tenant, System.currentTimeMillis()));
return RestResultUtils.success("stop beta ok", true);
}
|
@Test
void testStopBeta() throws Exception {
MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.delete(Constants.CONFIG_CONTROLLER_PATH).param("beta", "true")
.param("dataId", "test").param("group", "test").param("tenant", "");
String actualValue = mockmvc.perform(builder).andReturn().getResponse().getContentAsString();
String code = JacksonUtils.toObj(actualValue).get("code").toString();
String data = JacksonUtils.toObj(actualValue).get("data").toString();
assertEquals("200", code);
assertEquals("true", data);
}
|
public static <T extends PipelineOptions> T as(Class<T> klass) {
return new Builder().as(klass);
}
|
@Test
public void testNotAllGettersAnnotatedWithDefault() throws Exception {
// Initial construction is valid.
GetterWithDefault options = PipelineOptionsFactory.as(GetterWithDefault.class);
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage(
"Expected getter for property [object] to be marked with @Default on all ["
+ "org.apache.beam.sdk.options.PipelineOptionsFactoryTest$GetterWithDefault, "
+ "org.apache.beam.sdk.options.PipelineOptionsFactoryTest$MissingSetter], "
+ "found only on [org.apache.beam.sdk.options."
+ "PipelineOptionsFactoryTest$GetterWithDefault]");
// When we attempt to convert, we should error at this moment.
options.as(CombinedObject.class);
}
|
@Override
public void setPermits(int permits) {
get(setPermitsAsync(permits));
}
|
@Test
public void testSetPermits() throws InterruptedException {
RPermitExpirableSemaphore s = redisson.getPermitExpirableSemaphore("test");
s.setPermits(10);
assertThat(s.getPermits()).isEqualTo(10);
assertThat(s.availablePermits()).isEqualTo(10);
assertThat(s.acquiredPermits()).isEqualTo(0);
// attempts to set available permits fail
assertThat(s.trySetPermits(15)).isFalse();
assertThat(s.getPermits()).isEqualTo(10);
assertThat(s.availablePermits()).isEqualTo(10);
assertThat(s.acquiredPermits()).isEqualTo(0);
// attempts to set max permits succeeds
s.setPermits(15);
assertThat(s.getPermits()).isEqualTo(15);
assertThat(s.availablePermits()).isEqualTo(15);
assertThat(s.acquiredPermits()).isEqualTo(0);
// setting to existing value succeeds
s.setPermits(15);
assertThat(s.getPermits()).isEqualTo(15);
assertThat(s.availablePermits()).isEqualTo(15);
assertThat(s.acquiredPermits()).isEqualTo(0);
// decreasing max permits succeeds
s.setPermits(5);
assertThat(s.getPermits()).isEqualTo(5);
assertThat(s.availablePermits()).isEqualTo(5);
assertThat(s.acquiredPermits()).isEqualTo(0);
// changing the max after acquiring permits succeeds
String acquire1 = s.tryAcquire(200, 1000, TimeUnit.MILLISECONDS);
assertThat(acquire1).isNotNull();
String acquire2 = s.tryAcquire(200, 1000, TimeUnit.MILLISECONDS);
assertThat(acquire2).isNotNull();
String acquire3 = s.tryAcquire(200, 1000, TimeUnit.MILLISECONDS);
assertThat(acquire3).isNotNull();
assertThat(s.getPermits()).isEqualTo(5);
assertThat(s.availablePermits()).isEqualTo(2);
assertThat(s.acquiredPermits()).isEqualTo(3);
// decreasing the max to the number of claimed permits is allowed
s.setPermits(3);
assertThat(s.getPermits()).isEqualTo(3);
assertThat(s.availablePermits()).isEqualTo(0);
assertThat(s.acquiredPermits()).isEqualTo(3);
// decreasing the max to below the number of claimed permits is allowed
// and results in a negative number of available permits
s.setPermits(2);
assertThat(s.getPermits()).isEqualTo(2);
assertThat(s.availablePermits()).isEqualTo(-1);
assertThat(s.acquiredPermits()).isEqualTo(3);
}
|
@Override
public String version() {
return AppInfoParser.getVersion();
}
|
@Test
public void testTimestampConverterVersionRetrievedFromAppInfoParser() {
assertEquals(AppInfoParser.getVersion(), xformKey.version());
assertEquals(AppInfoParser.getVersion(), xformValue.version());
assertEquals(xformKey.version(), xformValue.version());
}
|
@Override
public String getCommandName() {
return COMMAND_NAME;
}
|
@Test
public void backupCmdExecuted()
throws IOException, AlluxioException, NoSuchFieldException, IllegalAccessException {
CollectAlluxioInfoCommand cmd = new CollectAlluxioInfoCommand(FileSystemContext.create());
// Write to temp dir
File targetDir = InfoCollectorTestUtils.createTemporaryDirectory();
CommandLine mockCommandLine = mock(CommandLine.class);
String[] mockArgs = new String[]{cmd.getCommandName(), targetDir.getAbsolutePath()};
when(mockCommandLine.getArgs()).thenReturn(mockArgs);
when(mockCommandLine.getOptionValue(ArgumentMatchers.eq("output-dir"), ArgumentMatchers.eq("")))
.thenReturn(targetDir.getAbsolutePath());
// Replace commands to execute
Field f = cmd.getClass().getSuperclass().getDeclaredField("mCommands");
f.setAccessible(true);
CollectAlluxioInfoCommand.AlluxioCommand mockCommandFail =
mock(CollectAlluxioInfoCommand.AlluxioCommand.class);
when(mockCommandFail.runWithOutput()).thenReturn(
new CommandReturn(255, "command failed"));
Map<String, ShellCommand> mockCommandMap = new HashMap<>();
mockCommandMap.put("mockCommand", mockCommandFail);
f.set(cmd, mockCommandMap);
// Replace better command to execute
Field cb = cmd.getClass().getSuperclass().getDeclaredField("mCommandsAlt");
cb.setAccessible(true);
ShellCommand mockCommandBackup = mock(ShellCommand.class);
when(mockCommandBackup.runWithOutput()).thenReturn(
new CommandReturn(0, "backup command executed"));
Map<String, ShellCommand> mockBetterMap = new HashMap<>();
mockBetterMap.put("mockCommand", mockCommandBackup);
cb.set(cmd, mockBetterMap);
// The backup command worked so exit code is 0
int ret = cmd.run(mockCommandLine);
assertEquals(0, ret);
// Verify the 1st option command failed, then backup executed
verify(mockCommandFail).runWithOutput();
verify(mockCommandBackup).runWithOutput();
// Files will be copied to sub-dir of target dir
File subDir = new File(Paths.get(targetDir.getAbsolutePath(), cmd.getCommandName()).toString());
assertEquals(new String[]{"collectAlluxioInfo.txt"}, subDir.list());
// Verify only the better version command output is found
String fileContent = new String(Files.readAllBytes(subDir.listFiles()[0].toPath()));
assertTrue(fileContent.contains("backup command executed"));
}
|
public Map<String, Parameter> generateMergedWorkflowParams(
WorkflowInstance instance, RunRequest request) {
Workflow workflow = instance.getRuntimeWorkflow();
Map<String, ParamDefinition> allParamDefs = new LinkedHashMap<>();
Map<String, ParamDefinition> defaultWorkflowParams =
defaultParamManager.getDefaultWorkflowParams();
// merge workflow params for start
if (request.isFreshRun()) {
// merge default workflow params
ParamsMergeHelper.mergeParams(
allParamDefs,
defaultWorkflowParams,
ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.SYSTEM_DEFAULT, request));
// merge defined workflow params
if (workflow.getParams() != null) {
ParamsMergeHelper.mergeParams(
allParamDefs,
workflow.getParams(),
ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.DEFINITION, request));
}
}
// merge workflow params from previous instance for restart
if (!request.isFreshRun() && instance.getParams() != null) {
Map<String, ParamDefinition> previousParamDefs =
instance.getParams().entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().toDefinition()));
// remove reserved params, which should be injected again by the system.
for (String paramName : Constants.RESERVED_PARAM_NAMES) {
previousParamDefs.remove(paramName);
}
ParamsMergeHelper.mergeParams(
allParamDefs,
previousParamDefs,
ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.SYSTEM, false));
}
// merge run params
if (request.getRunParams() != null) {
ParamSource source = getParamSource(request.getInitiator(), request.isFreshRun());
ParamsMergeHelper.mergeParams(
allParamDefs,
request.getRunParams(),
ParamsMergeHelper.MergeContext.workflowCreate(source, request));
}
// merge user provided restart run params
getUserRestartParam(request)
.ifPresent(
userRestartParams -> {
ParamSource source = getParamSource(request.getInitiator(), request.isFreshRun());
ParamsMergeHelper.mergeParams(
allParamDefs,
userRestartParams,
ParamsMergeHelper.MergeContext.workflowCreate(source, request));
});
// cleanup any placeholder params and convert to params
return ParamsMergeHelper.convertToParameters(ParamsMergeHelper.cleanupParams(allParamDefs));
}
|
@Test
public void testCalculateTimezonesWithTriggersAndTimeInitiator() throws IOException {
WorkflowDefinition definition =
loadObject(
"fixtures/parameters/sample-wf-with-time-triggers.json", WorkflowDefinition.class);
workflow = definition.getWorkflow();
workflowInstance.setRuntimeWorkflow(workflow);
paramsManager = new ParamsManager(defaultsManager);
Step step = Mockito.mock(Step.class);
when(step.getType()).thenReturn(StepType.TITUS);
RunProperties runProperties = new RunProperties();
runProperties.setOwner(User.builder().name("demo").build());
workflowInstance.setRunProperties(runProperties);
TimeInitiator initiator = new TimeInitiator();
initiator.setTimezone("Asia/Tokyo");
RunRequest request =
RunRequest.builder()
.initiator(initiator)
.currentPolicy(RunPolicy.START_FRESH_NEW_RUN)
.build();
Map<String, Parameter> workflowParams =
paramsManager.generateMergedWorkflowParams(workflowInstance, request);
paramExtensionRepo.reset(
Collections.emptyMap(),
Collections.emptyMap(),
InstanceWrapper.from(workflowInstance, request));
paramEvaluator.evaluateWorkflowParameters(workflowParams, workflow.getId());
paramExtensionRepo.clear();
// if triggers and initiator both, use first trigger timezone for default tz, and initiator for
// the workflow tz
Assert.assertEquals("Asia/Tokyo", workflowParams.get("WORKFLOW_CRON_TIMEZONE").asString());
Assert.assertEquals("US/Pacific", workflowParams.get("DSL_DEFAULT_TZ").asString());
Assert.assertEquals("demo", workflowParams.get("owner").asString());
}
|
@Override
protected void write(final MySQLPacketPayload payload) {
payload.writeInt1(HEADER);
payload.writeInt2(warnings);
payload.writeInt2(statusFlags);
}
|
@Test
void assertWrite() {
new MySQLEofPacket(MySQLStatusFlag.SERVER_STATUS_AUTOCOMMIT.getValue()).write(payload);
verify(payload).writeInt1(MySQLEofPacket.HEADER);
verify(payload).writeInt2(0);
verify(payload).writeInt2(MySQLStatusFlag.SERVER_STATUS_AUTOCOMMIT.getValue());
}
|
SObjectNode addNode(final SObjectNode node) {
final String givenObjectType = node.getObjectType();
if (objectType != null && !objectType.equals(givenObjectType)) {
throw new IllegalArgumentException(
"SObjectTree can hold only records of the same type, previously given: " + objectType
+ ", and now trying to add: "
+ givenObjectType);
}
objectType = givenObjectType;
records.add(node);
return node;
}
|
@Test
public void shouldSetReferences() {
final SObjectTree tree = new SObjectTree();
final SObjectNode account1 = new SObjectNode(tree, simpleAccount);
account1.addChild("Contacts", smith);
account1.addChild("Contacts", evans);
tree.addNode(account1);
final SObjectNode account2 = new SObjectNode(tree, simpleAccount2);
tree.addNode(account2);
final SObjectNode simpleAccountFromTree = tree.records.get(0);
assertEquals("ref1", simpleAccountFromTree.getObject().getAttributes().getReferenceId());
final Iterator<SObjectNode> simpleAccountNodes = simpleAccountFromTree.getChildNodes().iterator();
assertEquals("ref2", simpleAccountNodes.next().getObject().getAttributes().getReferenceId());
assertEquals("ref3", simpleAccountNodes.next().getObject().getAttributes().getReferenceId());
assertEquals("ref4", account2.getObject().getAttributes().getReferenceId());
}
|
@Override
@Transactional(rollbackFor = Exception.class)
public Long createJob(JobSaveReqVO createReqVO) throws SchedulerException {
validateCronExpression(createReqVO.getCronExpression());
// 校验唯一性
if (jobMapper.selectByHandlerName(createReqVO.getHandlerName()) != null) {
throw exception(JOB_HANDLER_EXISTS);
}
// 插入
JobDO job = BeanUtils.toBean(createReqVO, JobDO.class);
job.setStatus(JobStatusEnum.INIT.getStatus());
fillJobMonitorTimeoutEmpty(job);
jobMapper.insert(job);
// 添加 Job 到 Quartz 中
schedulerManager.addJob(job.getId(), job.getHandlerName(), job.getHandlerParam(), job.getCronExpression(),
createReqVO.getRetryCount(), createReqVO.getRetryInterval());
// 更新
JobDO updateObj = JobDO.builder().id(job.getId()).status(JobStatusEnum.NORMAL.getStatus()).build();
jobMapper.updateById(updateObj);
// 返回
return job.getId();
}
|
@Test
public void testCreateJob_success() throws SchedulerException {
// 准备参数 指定 Cron 表达式
JobSaveReqVO reqVO = randomPojo(JobSaveReqVO.class, o -> o.setCronExpression("0 0/1 * * * ? *"))
.setId(null);
// 调用
Long jobId = jobService.createJob(reqVO);
// 断言
assertNotNull(jobId);
// 校验记录的属性是否正确
JobDO job = jobMapper.selectById(jobId);
assertPojoEquals(reqVO, job, "id");
assertEquals(JobStatusEnum.NORMAL.getStatus(), job.getStatus());
// 校验调用
verify(schedulerManager).addJob(eq(job.getId()), eq(job.getHandlerName()), eq(job.getHandlerParam()),
eq(job.getCronExpression()), eq(reqVO.getRetryCount()), eq(reqVO.getRetryInterval()));
}
|
public Optional<YamlRuleConfiguration> swapToYamlRuleConfiguration(final Collection<RepositoryTuple> repositoryTuples, final Class<? extends YamlRuleConfiguration> toBeSwappedType) {
RepositoryTupleEntity tupleEntity = toBeSwappedType.getAnnotation(RepositoryTupleEntity.class);
if (null == tupleEntity) {
return Optional.empty();
}
return tupleEntity.leaf()
? swapToYamlRuleConfiguration(repositoryTuples, toBeSwappedType, tupleEntity)
: swapToYamlRuleConfiguration(repositoryTuples, toBeSwappedType, getFields(toBeSwappedType));
}
|
@Test
void assertSwapToYamlRuleConfigurationWithoutGlobalLeafYamlRuleConfiguration() {
assertFalse(new RepositoryTupleSwapperEngine().swapToYamlRuleConfiguration(Collections.singleton(new RepositoryTuple("invalid", "")), GlobalLeafYamlRuleConfiguration.class).isPresent());
}
|
public static Class<?> forName(String[] packages, String className) {
try {
return classForName(className);
} catch (ClassNotFoundException e) {
if (packages != null && packages.length > 0) {
for (String pkg : packages) {
try {
return classForName(pkg + "." + className);
} catch (ClassNotFoundException ignore) {
}
}
}
throw new IllegalStateException(e.getMessage(), e);
}
}
|
@Test
void testForName() {
ClassUtils.forName(new String[] {"org.apache.dubbo.common.compiler.support"}, "HelloServiceImpl0");
}
|
public List<IssueDto> sort() {
String sort = query.sort();
Boolean asc = query.asc();
if (sort != null && asc != null) {
return getIssueProcessor(sort).sort(issues, asc);
}
return issues;
}
|
@Test
public void should_sort_by_update_date() {
Date date = new Date();
Date date1 = DateUtils.addDays(date, -3);
Date date2 = DateUtils.addDays(date, -2);
Date date3 = DateUtils.addDays(date, -1);
IssueDto issue1 = new IssueDto().setKee("A").setIssueUpdateDate(date1);
IssueDto issue2 = new IssueDto().setKee("B").setIssueUpdateDate(date3);
IssueDto issue3 = new IssueDto().setKee("C").setIssueUpdateDate(date2);
List<IssueDto> dtoList = newArrayList(issue1, issue2, issue3);
IssueQuery query = IssueQuery.builder().sort(IssueQuery.SORT_BY_UPDATE_DATE).asc(false).build();
IssuesFinderSort issuesFinderSort = new IssuesFinderSort(dtoList, query);
List<IssueDto> result = newArrayList(issuesFinderSort.sort());
assertThat(result).hasSize(3);
assertThat(result.get(0).getIssueUpdateDate()).isEqualTo(date3);
assertThat(result.get(1).getIssueUpdateDate()).isEqualTo(date2);
assertThat(result.get(2).getIssueUpdateDate()).isEqualTo(date1);
}
|
@Override
public RemoteData.Builder serialize() {
final RemoteData.Builder remoteBuilder = RemoteData.newBuilder();
remoteBuilder.addDataLongs(getValue());
remoteBuilder.addDataLongs(getTimeBucket());
remoteBuilder.addDataStrings(getEntityId());
remoteBuilder.addDataStrings(getServiceId());
return remoteBuilder;
}
|
@Test
public void testSerialize() {
function.accept(MeterEntity.newService("service-test", Layer.GENERAL), SMALL_VALUE);
MinFunction function2 = Mockito.spy(MinFunction.class);
function2.deserialize(function.serialize().build());
assertThat(function2.getEntityId()).isEqualTo(function.getEntityId());
assertThat(function2.getTimeBucket()).isEqualTo(function.getTimeBucket());
assertThat(function2.getServiceId()).isEqualTo(function.getServiceId());
assertThat(function2.getValue()).isEqualTo(function.getValue());
}
|
public void copyTo(FilePath target) throws IOException, InterruptedException {
try {
try (OutputStream out = target.write()) {
copyTo(out);
}
} catch (IOException e) {
throw new IOException("Failed to copy " + this + " to " + target, e);
}
}
|
@Test public void copyTo() throws Exception {
File tmp = temp.newFile();
FilePath f = new FilePath(channels.french, tmp.getPath());
try (OutputStream out = OutputStream.nullOutputStream()) {
f.copyTo(out);
}
assertTrue("target does not exist", tmp.exists());
assertTrue("could not delete target " + tmp.getPath(), tmp.delete());
}
|
@Override
public Set<DiscreteResource> values() {
// breaks immutability, but intentionally returns the field
// because this class is transient
return values;
}
|
@Test
public void testValues() {
DiscreteResource res1 = Resources.discrete(DeviceId.deviceId("a")).resource();
DiscreteResource res2 = Resources.discrete(DeviceId.deviceId("b")).resource();
DiscreteResources sut = GenericDiscreteResources.of(ImmutableSet.of(res1, res2));
assertThat(sut.values(), is(ImmutableSet.of(res1, res2)));
}
|
public boolean include(NotificationFilter filter) {
return pipelineName.equals(filter.pipelineName)
&& stageName.equals(filter.stageName)
&& event.include(filter.event);
}
|
@Test
void filterWithAllEventShouldIncludeOthers() {
assertThat(new NotificationFilter("cruise", "dev", StageEvent.All, false).include(
new NotificationFilter("cruise", "dev", StageEvent.Fixed, false))).isTrue();
}
|
@Override
public DescriptiveUrl toUploadUrl(final Path file, final Sharee sharee, final Object options, final PasswordCallback callback) throws BackgroundException {
final Host bookmark = session.getHost();
final StringBuilder request = new StringBuilder(String.format("https://%s%s/apps/files_sharing/api/v1/shares?path=%s&shareType=%d&permissions=%d",
bookmark.getHostname(), new NextcloudHomeFeature(bookmark).find(NextcloudHomeFeature.Context.ocs).getAbsolute(),
URIEncoder.encode(PathRelativizer.relativize(NextcloudHomeFeature.Context.files.home(bookmark).find().getAbsolute(), file.getAbsolute())),
Sharee.world.equals(sharee) ? SHARE_TYPE_PUBLIC_LINK : SHARE_TYPE_USER,
SHARE_PERMISSIONS_CREATE
));
final Credentials password = callback.prompt(bookmark,
LocaleFactory.localizedString("Passphrase", "Cryptomator"),
MessageFormat.format(LocaleFactory.localizedString("Create a passphrase required to access {0}", "Credentials"), file.getName()),
new LoginOptions().anonymous(true).keychain(false).icon(bookmark.getProtocol().disk()));
if(password.isPasswordAuthentication()) {
request.append(String.format("&password=%s", URIEncoder.encode(password.getPassword())));
}
final HttpPost resource = new HttpPost(request.toString());
resource.setHeader("OCS-APIRequest", "true");
resource.setHeader(HttpHeaders.ACCEPT, ContentType.APPLICATION_XML.getMimeType());
try {
return session.getClient().execute(resource, new OcsUploadShareResponseHandler() {
@Override
public DescriptiveUrl handleEntity(final HttpEntity entity) throws IOException {
final XmlMapper mapper = new XmlMapper();
final Share value = mapper.readValue(entity.getContent(), Share.class);
// Additional request, because permissions are ignored in POST
final StringBuilder request = new StringBuilder(String.format("https://%s/ocs/v1.php/apps/files_sharing/api/v1/shares/%s?permissions=%d",
bookmark.getHostname(),
value.data.id,
SHARE_PERMISSIONS_CREATE
));
final HttpPut put = new HttpPut(request.toString());
put.setHeader("OCS-APIRequest", "true");
put.setHeader(HttpHeaders.ACCEPT, ContentType.APPLICATION_XML.getMimeType());
session.getClient().execute(put, new VoidResponseHandler());
return super.handleEntity(entity);
}
});
}
catch(HttpResponseException e) {
throw new DefaultHttpResponseExceptionMappingService().map(e);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map(e);
}
}
|
@Test
public void testToUploadUrlPasswordTooShort() throws Exception {
final Path home = new NextcloudHomeFeature(session.getHost()).find();
final Path folder = new DAVDirectoryFeature(session, new NextcloudAttributesFinderFeature(session)).mkdir(new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
try {
new NextcloudShareFeature(session).toUploadUrl(folder, Share.Sharee.world, null, new DisabledPasswordCallback() {
@Override
public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) {
return new Credentials(null, new AlphanumericRandomStringService(5).random());
}
});
fail();
}
catch(AccessDeniedException e) {
assertEquals("Password needs to be at least 10 characters long. Please contact your web hosting service provider for assistance.", e.getDetail());
}
new DAVDeleteFeature(session).delete(Collections.singletonList(folder), new DisabledPasswordCallback(), new Delete.DisabledCallback());
}
|
@Override
public MapSettings setProperty(String key, String value) {
return (MapSettings) super.setProperty(key, value);
}
|
@Test
public void all_values_should_be_trimmed_set_property() {
Settings settings = new MapSettings();
settings.setProperty("foo", " FOO ");
assertThat(settings.getString("foo")).isEqualTo("FOO");
}
|
@Override
public Messages process(Messages messages) {
try (Timer.Context ignored = executionTime.time()) {
final State latestState = stateUpdater.getLatestState();
if (latestState.enableRuleMetrics()) {
return process(messages, new RuleMetricsListener(metricRegistry), latestState);
}
return process(messages, new NoopInterpreterListener(), latestState);
}
}
|
@Test
public void testMatchAllContinuesIfAllRulesMatched() {
final RuleService ruleService = mock(MongoDbRuleService.class);
when(ruleService.loadAll()).thenReturn(ImmutableList.of(RULE_TRUE, RULE_FALSE, RULE_ADD_FOOBAR));
final PipelineService pipelineService = mock(MongoDbPipelineService.class);
when(pipelineService.loadAll()).thenReturn(Collections.singleton(
PipelineDao.create("p1", "title", "description",
"pipeline \"pipeline\"\n" +
"stage 0 match all\n" +
" rule \"true\";\n" +
"stage 1 match either\n" +
" rule \"add_foobar\";\n" +
"end\n",
Tools.nowUTC(),
null)
));
final Map<String, Function<?>> functions = ImmutableMap.of(SetField.NAME, new SetField());
final PipelineInterpreter interpreter = createPipelineInterpreter(ruleService, pipelineService, functions);
final Messages processed = interpreter.process(messageInDefaultStream("message", "test"));
final List<Message> messages = ImmutableList.copyOf(processed);
assertThat(messages).hasSize(1);
final Message actualMessage = messages.get(0);
assertThat(actualMessage.getFieldAs(String.class, "foobar")).isEqualTo("covfefe");
}
|
public static <T> T toBean(Object source, Class<T> clazz) {
return toBean(source, clazz, null);
}
|
@Test
public void mapToBeanWithAliasTest() {
final Map<String, Object> map = MapUtil.newHashMap();
map.put("aliasSubName", "sub名字");
map.put("slow", true);
map.put("is_booleana", "1");
map.put("is_booleanb", true);
final SubPersonWithAlias subPersonWithAlias = BeanUtil.toBean(map, SubPersonWithAlias.class);
assertEquals("sub名字", subPersonWithAlias.getSubName());
//https://gitee.com/dromara/hutool/issues/I6H0XF
assertFalse(subPersonWithAlias.isBooleana());
assertNull(subPersonWithAlias.getBooleanb());
}
|
public Agent agent()
{
return agent;
}
|
@Test
void shouldReturnAgent()
{
assertThat(invoker.agent(), is(mockAgent));
}
|
@RequestMapping("/statuses/{key}")
public ObjectNode listWithHealthStatus(@PathVariable String key) throws NacosException {
String serviceName;
String namespaceId;
if (key.contains(UtilsAndCommons.NAMESPACE_SERVICE_CONNECTOR)) {
namespaceId = key.split(UtilsAndCommons.NAMESPACE_SERVICE_CONNECTOR)[0];
serviceName = key.split(UtilsAndCommons.NAMESPACE_SERVICE_CONNECTOR)[1];
} else {
namespaceId = Constants.DEFAULT_NAMESPACE_ID;
serviceName = key;
}
NamingUtils.checkServiceNameFormat(serviceName);
List<? extends Instance> ips = instanceServiceV2.listAllInstances(namespaceId, serviceName);
ObjectNode result = JacksonUtils.createEmptyJsonNode();
ArrayNode ipArray = JacksonUtils.createEmptyArrayNode();
for (Instance ip : ips) {
ipArray.add(ip.toInetAddr() + "_" + ip.isHealthy());
}
result.replace("ips", ipArray);
return result;
}
|
@Test
void listWithHealthStatus() throws Exception {
MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.get(
UtilsAndCommons.DEFAULT_NACOS_NAMING_CONTEXT_V2 + UtilsAndCommons.NACOS_NAMING_INSTANCE_CONTEXT + "/statuses")
.param("key", "");
String actualValue = mockmvc.perform(builder).andReturn().getResponse().getContentAsString();
assertNotNull(actualValue);
}
|
@VisibleForTesting
static Optional<String> checkSchemas(
final LogicalSchema schema,
final LogicalSchema other
) {
final Optional<String> keyError = checkSchemas(schema.key(), other.key(), "key ")
.map(msg -> "Key columns must be identical. " + msg);
if (keyError.isPresent()) {
return keyError;
}
return checkSchemas(schema.columns(), other.columns(), "");
}
|
@Test
public void shouldEnforceNoReorderingKey() {
// Given:
final LogicalSchema someSchema = LogicalSchema.builder()
.keyColumn(ColumnName.of("k0"), SqlTypes.INTEGER)
.keyColumn(ColumnName.of("k1"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of("f0"), SqlTypes.STRING)
.build();
final LogicalSchema otherSchema = LogicalSchema.builder()
.keyColumn(ColumnName.of("k1"), SqlTypes.BIGINT)
.keyColumn(ColumnName.of("k0"), SqlTypes.INTEGER)
.valueColumn(ColumnName.of("f0"), SqlTypes.STRING)
.build();
// When:
final Optional<String> s = StructuredDataSource.checkSchemas(someSchema, otherSchema);
// Then:
assertThat(s.isPresent(), is(true));
assertThat(s.get(), containsString("The following key columns are changed, missing or reordered: [`k0` INTEGER KEY, `k1` BIGINT KEY]"));
}
|
@Override
public List<Connection> getConnections(final String databaseName, final String dataSourceName, final int connectionSize, final ConnectionMode connectionMode) throws SQLException {
return getConnections(databaseName, dataSourceName, connectionSize, connectionMode,
ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData().getGlobalRuleMetaData().getSingleRule(TransactionRule.class).getDefaultType());
}
|
@Test
void assertGetConnectionsSucceed() throws SQLException {
List<Connection> actual = new JDBCBackendDataSource().getConnections("schema", String.format(DATA_SOURCE_PATTERN, 1), 5, ConnectionMode.MEMORY_STRICTLY);
assertThat(actual.size(), is(5));
}
|
@Nullable
public Integer getIntValue(@IntFormat final int formatType,
@IntRange(from = 0) final int offset) {
if ((offset + getTypeLen(formatType)) > size()) return null;
return switch (formatType) {
case FORMAT_UINT8 -> unsignedByteToInt(mValue[offset]);
case FORMAT_UINT16_LE -> unsignedBytesToInt(mValue[offset], mValue[offset + 1]);
case FORMAT_UINT16_BE -> unsignedBytesToInt(mValue[offset + 1], mValue[offset]);
case FORMAT_UINT24_LE -> unsignedBytesToInt(
mValue[offset],
mValue[offset + 1],
mValue[offset + 2],
(byte) 0
);
case FORMAT_UINT24_BE -> unsignedBytesToInt(
mValue[offset + 2],
mValue[offset + 1],
mValue[offset],
(byte) 0
);
case FORMAT_UINT32_LE -> unsignedBytesToInt(
mValue[offset],
mValue[offset + 1],
mValue[offset + 2],
mValue[offset + 3]
);
case FORMAT_UINT32_BE -> unsignedBytesToInt(
mValue[offset + 3],
mValue[offset + 2],
mValue[offset + 1],
mValue[offset]
);
case FORMAT_SINT8 -> unsignedToSigned(unsignedByteToInt(mValue[offset]), 8);
case FORMAT_SINT16_LE -> unsignedToSigned(unsignedBytesToInt(mValue[offset],
mValue[offset + 1]), 16);
case FORMAT_SINT16_BE -> unsignedToSigned(unsignedBytesToInt(mValue[offset + 1],
mValue[offset]), 16);
case FORMAT_SINT24_LE -> unsignedToSigned(unsignedBytesToInt(
mValue[offset],
mValue[offset + 1],
mValue[offset + 2],
(byte) 0
), 24);
case FORMAT_SINT24_BE -> unsignedToSigned(unsignedBytesToInt(
(byte) 0,
mValue[offset + 2],
mValue[offset + 1],
mValue[offset]
), 24);
case FORMAT_SINT32_LE -> unsignedToSigned(unsignedBytesToInt(
mValue[offset],
mValue[offset + 1],
mValue[offset + 2],
mValue[offset + 3]
), 32);
case FORMAT_SINT32_BE -> unsignedToSigned(unsignedBytesToInt(
mValue[offset + 3],
mValue[offset + 2],
mValue[offset + 1],
mValue[offset]
), 32);
default -> null;
};
}
|
@Test
public void getValue_UINT16_BE() {
final Data data = new Data(new byte[] { 0x67, (byte) 0xD0 });
final int value = data.getIntValue(Data.FORMAT_UINT16_BE, 0);
assertEquals(26576, value);
}
|
public static String toUnicode(String str) {
return EmojiParser.parseToUnicode(str);
}
|
@Test
public void toUnicodeTest() {
String emoji = EmojiUtil.toUnicode(":smile:");
assertEquals("😄", emoji);
}
|
@Override
public void registerHints(RuntimeHints hints, ClassLoader classLoader) {
Constructor<SentinelProtectInterceptor> constructor;
try {
constructor = SentinelProtectInterceptor.class.getConstructor(SentinelRestTemplate.class, RestTemplate.class);
}
catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
hints.reflection().registerConstructor(constructor, ExecutableMode.INVOKE);
}
|
@Test
public void shouldRegisterHints() {
Constructor<SentinelProtectInterceptor> constructor;
try {
constructor = SentinelProtectInterceptor.class.getConstructor(SentinelRestTemplate.class, RestTemplate.class);
}
catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
RuntimeHints hints = new RuntimeHints();
new SentinelProtectInterceptorHints().registerHints(hints, getClass().getClassLoader());
assertThat(RuntimeHintsPredicates.reflection().onConstructor(constructor)).accepts(hints);
}
|
public LinkParameter extensions(java.util.Map<String, Object> extensions) {
this.extensions = extensions;
return this;
}
|
@Test
public void testExtensions() {
LinkParameter linkParameter = new LinkParameter();
HashMap<String, Object> hashMap = new HashMap<>();
hashMap.put("x-", "foo");
hashMap.put("x-", "bar");
hashMap.put("x-", "baz");
Assert.assertEquals(linkParameter.extensions(hashMap), linkParameter);
}
|
@InvokeOnHeader(CONTROL_ACTION_SUBSCRIBE)
public void performSubscribe(final Message message, AsyncCallback callback) {
String filterId;
if (message.getBody() instanceof DynamicRouterControlMessage) {
filterId = subscribeFromMessage(dynamicRouterControlService, message, false);
} else {
filterId = subscribeFromHeaders(dynamicRouterControlService, message, false);
}
message.setBody(filterId);
callback.done(false);
}
|
@Test
void performSubscribeActionWithPredicateInBody() {
String subscribeChannel = "testChannel";
Map<String, Object> headers = Map.of(
CONTROL_ACTION_HEADER, CONTROL_ACTION_SUBSCRIBE,
CONTROL_SUBSCRIBE_CHANNEL, subscribeChannel,
CONTROL_SUBSCRIPTION_ID, "testId",
CONTROL_DESTINATION_URI, "mock://test",
CONTROL_PRIORITY, 10);
Language language = context.resolveLanguage("simple");
Predicate predicate = language.createPredicate("true");
when(message.getBody()).thenReturn(predicate);
when(message.getHeaders()).thenReturn(headers);
Mockito.doNothing().when(callback).done(false);
producer.performSubscribe(message, callback);
Mockito.verify(controlService, Mockito.times(1))
.subscribeWithPredicateInstance(
subscribeChannel, "testId", "mock://test", 10, predicate, false);
}
|
void handleStop(Exchange exchange, String metricsName) {
String propertyName = getPropertyName(metricsName);
Timer.Context context = getTimerContextFromExchange(exchange, propertyName);
if (context != null) {
context.stop();
exchange.removeProperty(propertyName);
} else {
LOG.warn("Timer \"{}\" not found", metricsName);
}
}
|
@Test
public void testHandleStopContextNotFound() {
when(exchange.getProperty(PROPERTY_NAME, Timer.Context.class)).thenReturn(null);
producer.handleStop(exchange, METRICS_NAME);
inOrder.verify(exchange, times(1)).getProperty(PROPERTY_NAME, Timer.Context.class);
inOrder.verifyNoMoreInteractions();
}
|
public static Job newJob(@Nullable SerializableConfiguration conf) throws IOException {
if (conf == null) {
return Job.getInstance();
} else {
// Don't reading configuration from slave thread, but only from master thread.
Job job = Job.getInstance(new Configuration(false));
for (Map.Entry<String, String> entry : conf.get()) {
job.getConfiguration().set(entry.getKey(), entry.getValue());
}
return job;
}
}
|
@Test
public void testCreateNewJob() throws Exception {
Job jobFromNull = SerializableConfiguration.newJob(null);
assertNotNull(jobFromNull);
Job job = SerializableConfiguration.newJob(DEFAULT_SERIALIZABLE_CONF);
assertNotNull(job);
}
|
@Override
public Metrics getMetrics() {
return new AtomicRateLimiterMetrics();
}
|
@Test
public void permissionsInFirstCycle() throws Exception {
setup(Duration.ZERO);
setTimeOnNanos(CYCLE_IN_NANOS - 10);
RateLimiter.Metrics metrics = rateLimiter.getMetrics();
int availablePermissions = metrics.getAvailablePermissions();
then(availablePermissions).isEqualTo(PERMISSIONS_RER_CYCLE);
}
|
public boolean isSecured() {
return secured;
}
|
@Test
public void testDefaultSecured() {
assertFalse(jt400Configuration.isSecured());
}
|
@Override
public void stop() {
lock.unlock();
}
|
@Test
public void unLockWithNoLock() {
lock.stop();
}
|
public alluxio.grpc.JobServiceSummary toProto() throws IOException {
alluxio.grpc.JobServiceSummary.Builder jobServiceBuilder =
alluxio.grpc.JobServiceSummary.newBuilder();
for (StatusSummary statusSummary : mSummaryPerStatus) {
jobServiceBuilder.addSummaryPerStatus(statusSummary.toProto());
}
for (JobInfo jobInfo : mRecentActivities) {
jobServiceBuilder.addRecentActivities(jobInfo.toProto());
}
for (JobInfo jobInfo : mRecentFailures) {
jobServiceBuilder.addRecentFailures(jobInfo.toProto());
}
for (JobInfo jobInfo : mLongestRunning) {
jobServiceBuilder.addLongestRunning(jobInfo.toProto());
}
return jobServiceBuilder.build();
}
|
@Test
public void toProto() throws Exception {
final JobServiceSummary summary = new JobServiceSummary(mSummary.toProto());
assertEquals(mSummary, summary);
}
|
@Override
public void accept(final DataType data) {
if (data instanceof StartingData) {
handleEvent((StartingData) data);
} else if (data instanceof StoppingData) {
handleEvent((StoppingData) data);
}
}
|
@Test
void statusRecordsTheStartTime() {
//given
final var startTime = LocalDateTime.of(2017, Month.APRIL, 1, 19, 9);
final var startingData = new StartingData(startTime);
final var statusMember = new StatusMember(1);
//when
statusMember.accept(startingData);
//then
assertEquals(startTime, statusMember.getStarted());
}
|
@Override
public void putAll(final List<KeyValue<Bytes, byte[]>> entries) {
wrapped().putAll(entries);
for (final KeyValue<Bytes, byte[]> entry : entries) {
final byte[] valueAndTimestamp = entry.value;
log(entry.key, rawValue(valueAndTimestamp), valueAndTimestamp == null ? context.timestamp() : timestamp(valueAndTimestamp));
}
}
|
@Test
public void shouldWriteAllKeyValueToInnerStoreOnPutAll() {
store.putAll(Arrays.asList(KeyValue.pair(hi, rawThere),
KeyValue.pair(hello, rawWorld)));
assertThat(root.get(hi), equalTo(rawThere));
assertThat(root.get(hello), equalTo(rawWorld));
}
|
@Override
public ReleaseId getReleaseId() {
return releaseId;
}
|
@Test
public void getReleaseId() {
final MavenSession mavenSession = mockMavenSession(false);
final ProjectPomModel pomModel = new ProjectPomModel(mavenSession);
final ReleaseId releaseId = pomModel.getReleaseId();
assertReleaseId(releaseId, ARTIFACT_ID);
}
|
public static Formatter forNumbers(@Nonnull String format) {
return new NumberFormat(format);
}
|
@Test
public void testFillMode() {
check(0, " 0", "-0", "9");
check(0, "0", "-0", "FM9");
check(0, " 0 ", "<0>", "B9");
check(0, "0", "<0>", "FMB9");
Formatter f = forNumbers("999 (RN)");
check(0, f, " 0 () ");
check(988, f, " 988 (CMLXXXVIII) ");
}
|
public static String resolveIdWithWidgetApi(final String urlString) throws IOException,
ParsingException {
String fixedUrl = urlString;
// if URL is an on.soundcloud link, do a request to resolve the redirect
if (ON_URL_PATTERN.matcher(fixedUrl).find()) {
try {
fixedUrl = NewPipe.getDownloader().head(fixedUrl).latestUrl();
// remove tracking params which are in the query string
fixedUrl = fixedUrl.split("\\?")[0];
} catch (final ExtractionException e) {
throw new ParsingException("Could not follow on.soundcloud.com redirect", e);
}
}
// Remove the tailing slash from URLs due to issues with the SoundCloud API
if (fixedUrl.charAt(fixedUrl.length() - 1) == '/') {
fixedUrl = fixedUrl.substring(0, fixedUrl.length() - 1);
}
// Make URL lower case and remove m. and www. if it exists.
// Without doing this, the widget API does not recognize the URL.
fixedUrl = Utils.removeMAndWWWFromUrl(fixedUrl.toLowerCase());
final URL url;
try {
url = Utils.stringToURL(fixedUrl);
} catch (final MalformedURLException e) {
throw new IllegalArgumentException("The given URL is not valid");
}
try {
final String widgetUrl = "https://api-widget.soundcloud.com/resolve?url="
+ Utils.encodeUrlUtf8(url.toString())
+ "&format=json&client_id=" + SoundcloudParsingHelper.clientId();
final String response = NewPipe.getDownloader().get(widgetUrl,
SoundCloud.getLocalization()).responseBody();
final JsonObject o = JsonParser.object().from(response);
return String.valueOf(JsonUtils.getValue(o, "id"));
} catch (final JsonParserException e) {
throw new ParsingException("Could not parse JSON response", e);
} catch (final ExtractionException e) {
throw new ParsingException(
"Could not resolve id with embedded player. ClientId not extracted", e);
}
}
|
@Test
void resolveIdWithWidgetApiTest() throws Exception {
assertEquals("26057743", SoundcloudParsingHelper.resolveIdWithWidgetApi("https://soundcloud.com/trapcity"));
assertEquals("16069159", SoundcloudParsingHelper.resolveIdWithWidgetApi("https://soundcloud.com/nocopyrightsounds"));
assertEquals("26057743", SoundcloudParsingHelper.resolveIdWithWidgetApi("https://on.soundcloud.com/Rr2JyfFcYwbawpw49"));
assertEquals("1818813498", SoundcloudParsingHelper.resolveIdWithWidgetApi("https://on.soundcloud.com/a8QmYdMnmxnsSTEp9"));
assertEquals("1468401502", SoundcloudParsingHelper.resolveIdWithWidgetApi("https://on.soundcloud.com/rdt7e"));
}
|
public static byte[] hexStringToByteArray(String hexEncodedBinary) {
if (hexEncodedBinary.length() % 2 == 0) {
char[] sc = hexEncodedBinary.toCharArray();
byte[] ba = new byte[sc.length / 2];
for (int i = 0; i < ba.length; i++) {
int nibble0 = Character.digit(sc[i * 2], 16);
int nibble1 = Character.digit(sc[i * 2 + 1], 16);
if (nibble0 == -1 || nibble1 == -1){
throw new IllegalArgumentException(
"Hex-encoded binary string contains an invalid hex digit in '"+sc[i * 2]+sc[i * 2 + 1]+"'");
}
ba[i] = (byte) ((nibble0 << 4) | nibble1);
}
return ba;
} else {
throw new IllegalArgumentException(
"Hex-encoded binary string contains an uneven no. of digits");
}
}
|
@Test
public void testLoopBack() throws Exception {
assertEquals("0f107f8081ff", JOrphanUtils.baToHexString(BinaryTCPClientImpl.hexStringToByteArray("0f107f8081ff")));
}
|
@Override
public VoidOutput run(RunContext runContext) throws Exception {
String renderedNamespace = runContext.render(this.namespace);
String renderedKey = runContext.render(this.key);
Object renderedValue = runContext.renderTyped(this.value);
KVStore kvStore = runContext.namespaceKv(renderedNamespace);
kvStore.put(renderedKey, new KVValueAndMetadata(new KVMetadata(ttl), renderedValue), this.overwrite);
return null;
}
|
@Test
void shouldSetKVGivenTTL() throws Exception {
// Given
Set set = Set.builder()
.id(Set.class.getSimpleName())
.type(Set.class.getName())
.key("{{ inputs.key }}")
.value("{{ inputs.value }}")
.ttl(Duration.ofMinutes(5))
.build();
var value = Map.of("date", Instant.now().truncatedTo(ChronoUnit.MILLIS), "int", 1, "string", "string");
final RunContext runContext = TestsUtils.mockRunContext(this.runContextFactory, set, Map.of(
"key", TEST_KEY,
"value", value
));
// When
set.run(runContext);
// Then
final KVStore kv = runContext.namespaceKv(runContext.flowInfo().namespace());
assertThat(kv.getValue(TEST_KEY), is(Optional.of(new KVValue(value))));
Instant expirationDate = kv.get(TEST_KEY).get().expirationDate();
assertThat(expirationDate.isAfter(Instant.now().plus(Duration.ofMinutes(4))) && expirationDate.isBefore(Instant.now().plus(Duration.ofMinutes(6))), is(true));
}
|
@Override
public <T> T convert(DataTable dataTable, Type type) {
return convert(dataTable, type, false);
}
|
@Test
void convert_to_primitive__single_cell() {
DataTable table = parse("| 3 |");
assertEquals(Integer.valueOf(3), converter.convert(table, Integer.class));
}
|
public void commit(final int index)
{
final int recordIndex = computeRecordIndex(index);
final AtomicBuffer buffer = this.buffer;
final int recordLength = verifyClaimedSpaceNotReleased(buffer, recordIndex);
buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength);
}
|
@Test
void commitPublishesMessageByInvertingTheLengthValue()
{
final int index = 32;
final int recordIndex = index - HEADER_LENGTH;
when(buffer.getInt(lengthOffset(recordIndex))).thenReturn(-19);
ringBuffer.commit(index);
final InOrder inOrder = inOrder(buffer);
inOrder.verify(buffer).getInt(lengthOffset(recordIndex));
inOrder.verify(buffer).putIntOrdered(lengthOffset(recordIndex), 19);
inOrder.verifyNoMoreInteractions();
}
|
@Override
public Page download(Request request, Task task) {
if (task == null || task.getSite() == null) {
throw new NullPointerException("task or site can not be null");
}
CloseableHttpResponse httpResponse = null;
CloseableHttpClient httpClient = getHttpClient(task.getSite());
Proxy proxy = proxyProvider != null ? proxyProvider.getProxy(request, task) : null;
HttpClientRequestContext requestContext = httpUriRequestConverter.convert(request, task.getSite(), proxy);
Page page = Page.fail(request);
try {
httpResponse = httpClient.execute(requestContext.getHttpUriRequest(), requestContext.getHttpClientContext());
page = handleResponse(request, request.getCharset() != null ? request.getCharset() : task.getSite().getCharset(), httpResponse, task);
onSuccess(page, task);
return page;
} catch (IOException e) {
onError(page, task, e);
return page;
} finally {
if (httpResponse != null) {
//ensure the connection is released back to pool
EntityUtils.consumeQuietly(httpResponse.getEntity());
}
if (proxyProvider != null && proxy != null) {
proxyProvider.returnProxy(proxy, page, task);
}
}
}
|
@Test
public void test_disableCookieManagement() throws Exception {
HttpServer server = httpServer(13423);
server.get(not(eq(cookie("cookie"), "cookie-webmagic"))).response("ok");
Runner.running(server, new Runnable() {
@Override
public void run() throws Exception {
HttpClientDownloader httpClientDownloader = new HttpClientDownloader();
Request request = new Request();
request.setUrl("http://127.0.0.1:13423");
request.addCookie("cookie","cookie-webmagic");
Page page = httpClientDownloader.download(request, Site.me().setDisableCookieManagement(true).toTask());
assertThat(page.getRawText()).isEqualTo("ok");
}
});
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.