focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@GET
@Path("/entity-uid/{uid}/")
@Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)
public TimelineEntity getEntity(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
@PathParam("uid") String uId,
@QueryParam("confstoretrieve") String confsToRetrieve,
@QueryParam("metricstoretrieve") String metricsToRetrieve,
@QueryParam("fields") String fields,
@QueryParam("metricslimit") String metricsLimit,
@QueryParam("metricstimestart") String metricsTimeStart,
@QueryParam("metricstimeend") String metricsTimeEnd) {
String url = req.getRequestURI() +
(req.getQueryString() == null ? "" :
QUERY_STRING_SEP + req.getQueryString());
UserGroupInformation callerUGI =
TimelineReaderWebServicesUtils.getUser(req);
LOG.info("Received URL {} from user {}",
url, TimelineReaderWebServicesUtils.getUserName(callerUGI));
long startTime = Time.monotonicNow();
boolean succeeded = false;
init(res);
TimelineReaderManager timelineReaderManager = getTimelineReaderManager();
TimelineEntity entity = null;
try {
TimelineReaderContext context =
TimelineUIDConverter.GENERIC_ENTITY_UID.decodeUID(uId);
if (context == null) {
throw new BadRequestException("Incorrect UID " + uId);
}
entity = timelineReaderManager.getEntity(context,
TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
metricsTimeStart, metricsTimeEnd));
checkAccessForGenericEntity(entity, callerUGI);
succeeded = true;
} catch (Exception e) {
handleException(e, url, startTime, "Either metricslimit or metricstime"
+ " start/end");
} finally {
long latency = Time.monotonicNow() - startTime;
METRICS.addGetEntitiesLatency(latency, succeeded);
LOG.info("Processed URL {} (Took {} ms.)", url, latency);
}
if (entity == null) {
LOG.info("Processed URL {} but entity not found" + " (Took {} ms.)",
url, (Time.monotonicNow() - startTime));
throw new NotFoundException("Timeline entity with uid: " + uId +
"is not found");
}
return entity;
}
|
@Test
void testGetEntitiesNoMatch() throws Exception {
Client client = createClient();
try {
URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
"timeline/clusters/cluster1/apps/app1/entities/app?" +
"metricfilters=metric7%20ge%200&isrelatedto=type1:tid1_1:tid1_2," +
"type2:tid2_1%60&relatesto=flow:flow1&eventfilters=event_2,event_4" +
"&infofilters=info2%20eq%203.5&createdtimestart=1425016502030&" +
"createdtimeend=1425016502060");
ClientResponse resp = getResponse(client, uri);
Set<TimelineEntity> entities =
resp.getEntity(new GenericType<Set<TimelineEntity>>(){
});
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
resp.getType().toString());
assertNotNull(entities);
assertEquals(0, entities.size());
} finally {
client.destroy();
}
}
|
void snapshot(final PendingServiceMessageTracker tracker, final ErrorHandler errorHandler)
{
final int length = MessageHeaderEncoder.ENCODED_LENGTH + PendingMessageTrackerEncoder.BLOCK_LENGTH;
final long nextServiceSessionId = correctNextServiceSessionId(tracker, errorHandler);
idleStrategy.reset();
while (true)
{
final long result = publication.tryClaim(length, bufferClaim);
if (result > 0)
{
pendingMessageTrackerEncoder
.wrapAndApplyHeader(bufferClaim.buffer(), bufferClaim.offset(), messageHeaderEncoder)
.nextServiceSessionId(nextServiceSessionId)
.logServiceSessionId(tracker.logServiceSessionId())
.pendingMessageCapacity(tracker.pendingMessages().size())
.serviceId(tracker.serviceId());
bufferClaim.commit();
break;
}
checkResultAndIdle(result);
}
tracker.pendingMessages().forEach(this, Integer.MAX_VALUE);
}
|
@Test
void snapshotPendingServiceMessageTrackerWithServiceMessagesMissedByFollower()
{
final int serviceId = 6;
final PendingServiceMessageTracker pendingServiceMessageTracker = new PendingServiceMessageTracker(
serviceId, mock(Counter.class), mock(LogPublisher.class), mock(ClusterClock.class));
final AtomicBuffer headerMessageBuffer = new UnsafeBuffer(new byte[1024]);
final long expectedLogServiceSessionId = pendingServiceMessageTracker.logServiceSessionId() + 1;
final long expectedNextServiceSessionId = expectedLogServiceSessionId + 1;
when(publication.tryClaim(anyInt(), any())).thenAnswer(
(invocation) ->
{
final int length = invocation.getArgument(0, Integer.class);
final BufferClaim bufferClaim = invocation.getArgument(1, BufferClaim.class);
bufferClaim.wrap(headerMessageBuffer, 0, length + 32);
return (long)length;
});
pendingServiceMessageTracker.sweepFollowerMessages(expectedLogServiceSessionId);
snapshotTaker.snapshot(pendingServiceMessageTracker, mock(ErrorHandler.class));
pendingMessageTrackerDecoder.wrapAndApplyHeader(headerMessageBuffer, HEADER_LENGTH, messageHeaderDecoder);
assertEquals(expectedNextServiceSessionId, pendingMessageTrackerDecoder.nextServiceSessionId());
assertEquals(expectedLogServiceSessionId, pendingMessageTrackerDecoder.logServiceSessionId());
}
|
@Override
public void unsubscribePreCommit(String portId, Type eventType,
InstancePortAdminService service, String className) {
store.computeIfPresent(portId, (k, v) -> {
if (className == null || className.isEmpty()) {
return null;
}
Objects.requireNonNull(v).computeIfPresent(eventType, (i, j) -> {
Objects.requireNonNull(j).remove(className);
return j;
});
return v;
});
if (subscriberCountByEventType(portId, eventType) == 0) {
InstancePort instPort = service.instancePort(portId);
if (instPort != null && instPort.state() == REMOVE_PENDING) {
service.removeInstancePort(portId);
}
}
}
|
@Test
public void testUnsubscribePreCommit() {
sampleSubscribe();
InstancePortAdminService service = new TestInstancePortAdminService();
target.unsubscribePreCommit(PORT_ID_1, OPENSTACK_PORT_PRE_REMOVE, service, CLASS_NAME_1);
target.unsubscribePreCommit(PORT_ID_2, OPENSTACK_PORT_PRE_REMOVE, service, CLASS_NAME_2);
assertEquals(0, target.subscriberCountByEventType(PORT_ID_1, OPENSTACK_PORT_PRE_REMOVE));
assertEquals(1, target.subscriberCountByEventType(PORT_ID_2, OPENSTACK_PORT_PRE_REMOVE));
assertEquals(0, target.subscriberCount(PORT_ID_1));
assertEquals(2, target.subscriberCount(PORT_ID_2));
}
|
public void clear() {
_intraPartMoveTasksByBrokerId.clear();
_interPartMoveTasksByBrokerId.clear();
_remainingLeadershipMovements.clear();
_remainingInterBrokerReplicaMovements.clear();
_remainingIntraBrokerReplicaMovements.clear();
}
|
@Test
public void testClear() {
List<ExecutionProposal> proposals = new ArrayList<>();
proposals.add(_leaderMovement1);
proposals.add(_partitionMovement0);
ExecutionTaskPlanner planner =
new ExecutionTaskPlanner(null, new KafkaCruiseControlConfig(KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties()));
Set<PartitionInfo> partitions = new HashSet<>();
partitions.add(generatePartitionInfo(_leaderMovement1, false));
partitions.add(generatePartitionInfo(_partitionMovement0, false));
Cluster expectedCluster = new Cluster(null,
_expectedNodes,
partitions,
Collections.emptySet(),
Collections.emptySet());
StrategyOptions strategyOptions = new StrategyOptions.Builder(expectedCluster).build();
planner.addExecutionProposals(proposals, strategyOptions, null);
assertEquals(2, planner.remainingLeadershipMovements().size());
assertEquals(2, planner.remainingInterBrokerReplicaMovements().size());
planner.clear();
assertEquals(0, planner.remainingLeadershipMovements().size());
assertEquals(0, planner.remainingInterBrokerReplicaMovements().size());
}
|
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
} else if (!(obj instanceof Rectangle)) {
return false;
}
Rectangle other = (Rectangle) obj;
if (Double.doubleToLongBits(this.left) != Double.doubleToLongBits(other.left)) {
return false;
} else if (Double.doubleToLongBits(this.top) != Double.doubleToLongBits(other.top)) {
return false;
} else if (Double.doubleToLongBits(this.right) != Double.doubleToLongBits(other.right)) {
return false;
} else if (Double.doubleToLongBits(this.bottom) != Double.doubleToLongBits(other.bottom)) {
return false;
}
return true;
}
|
@Test
public void equalsTest() {
Rectangle rectangle1 = create(1, 2, 3, 4);
Rectangle rectangle2 = create(1, 2, 3, 4);
Rectangle rectangle3 = create(3, 2, 3, 4);
Rectangle rectangle4 = create(1, 4, 3, 4);
Rectangle rectangle5 = create(1, 2, 1, 4);
Rectangle rectangle6 = create(1, 2, 3, 2);
TestUtils.equalsTest(rectangle1, rectangle2);
TestUtils.notEqualsTest(rectangle1, rectangle3);
TestUtils.notEqualsTest(rectangle1, rectangle4);
TestUtils.notEqualsTest(rectangle1, rectangle5);
TestUtils.notEqualsTest(rectangle1, rectangle6);
TestUtils.notEqualsTest(rectangle1, new Object());
TestUtils.notEqualsTest(rectangle1, null);
}
|
public String getFilepath() {
return filepath;
}
|
@Test
public void testConstructorMessageAndThrowable() {
Throwable cause = new RuntimeException( causeExceptionMessage );
try {
throw new KettleFileNotFoundException( errorMessage, cause );
} catch ( KettleFileNotFoundException e ) {
assertTrue( e.getMessage().contains( errorMessage ) );
assertTrue( e.getMessage().contains( causeExceptionMessage ) );
assertEquals( cause, e.getCause() );
assertEquals( null, e.getFilepath() );
}
}
|
static String createTestId(String prefix) {
String convertedPrefix =
CaseFormat.UPPER_CAMEL.converterTo(CaseFormat.LOWER_HYPHEN).convert(prefix);
String formattedTimestamp =
DateTimeFormatter.ofPattern("yyyyMMddHHmmssSSS")
.withZone(ZoneId.of("UTC"))
.format(Instant.now());
return String.format("%s-%s", convertedPrefix, formattedTimestamp);
}
|
@Test
public void testCreateTopicNameWithUppercase() {
assertThat(createTestId("testWithUpperCase")).matches("test-with-upper-case-\\d{17}");
}
|
public final String key() {
return key;
}
|
@Test void trimsKey() {
assertThat(new Tag<Object>(" x-foo ") {
@Override protected String parseValue(Object input, TraceContext context) {
return null;
}
}.key()).isEqualTo("x-foo");
}
|
@Override
public Output run(RunContext runContext) throws Exception {
String renderedNamespace = runContext.render(this.namespace);
FlowService flowService = ((DefaultRunContext) runContext).getApplicationContext().getBean(FlowService.class);
flowService.checkAllowedNamespace(runContext.tenantId(), renderedNamespace, runContext.tenantId(), runContext.flowInfo().namespace());
String renderedKey = runContext.render(this.key);
boolean deleted = runContext.namespaceKv(renderedNamespace).delete(renderedKey);
if (this.errorOnMissing && !deleted) {
throw new NoSuchElementException("No value found for key '" + renderedKey + "' in namespace '" + renderedNamespace + "' and `errorOnMissing` is set to true");
}
return Output.builder().deleted(deleted).build();
}
|
@Test
void shouldOutputTrueGivenExistingKey() throws Exception {
// Given
String namespaceId = "io.kestra." + IdUtils.create();
RunContext runContext = this.runContextFactory.of(Map.of(
"flow", Map.of("namespace", namespaceId),
"inputs", Map.of(
"key", TEST_KV_KEY,
"namespace", namespaceId
)
));
Delete delete = Delete.builder()
.id(Delete.class.getSimpleName())
.type(Delete.class.getName())
.namespace("{{ inputs.namespace }}")
.key("{{ inputs.key }}")
.build();
final KVStore kv = runContext.namespaceKv(namespaceId);
kv.put(TEST_KV_KEY, new KVValueAndMetadata(null, "value"));
// When
Delete.Output run = delete.run(runContext);
// Then
assertThat(run.isDeleted(), is(true));
}
|
public void compress(byte[] data, int[] keys) throws IOException
{
OutputStream stream = new OutputStream();
byte[] compressedData;
int length;
switch (compression)
{
case CompressionType.NONE:
compressedData = data;
length = compressedData.length;
break;
case CompressionType.BZ2:
compressedData = concat(Ints.toByteArray(data.length), BZip2.compress(data));
length = compressedData.length - 4;
break;
case CompressionType.GZ:
compressedData = concat(Ints.toByteArray(data.length), GZip.compress(data));
length = compressedData.length - 4;
break;
default:
throw new RuntimeException("Unknown compression type");
}
compressedData = encrypt(compressedData, compressedData.length, keys);
stream.writeByte(compression);
stream.writeInt(length);
stream.writeBytes(compressedData);
if (revision != -1)
{
stream.writeShort(revision);
}
this.data = stream.flip();
Crc32 crc32 = new Crc32();
crc32.update(this.data, 0, this.data.length - (revision != -1 ? 2 : 0));
this.crc = crc32.getHash();
}
|
@Test
public void testCompress() throws IOException
{
int[] keys = new int[]
{
4, 8, 15, 16
};
Random random = new Random(42L);
byte[] data = new byte[1024];
random.nextBytes(data);
Container container = new Container(GZ, -1);
container.compress(data, keys);
byte[] compressedData = container.data;
container = Container.decompress(compressedData, keys);
assertArrayEquals(data, container.data);
}
|
@VisibleForTesting
List<Container> getContainersFromPreviousAttemptsUnsafe(final Object response) {
if (getContainersFromPreviousAttemptsMethod.isPresent() && response != null) {
try {
@SuppressWarnings("unchecked")
final List<Container> containers =
(List<Container>)
getContainersFromPreviousAttemptsMethod.get().invoke(response);
if (containers != null && !containers.isEmpty()) {
return containers;
}
} catch (Exception t) {
logger.error("Error invoking 'getContainersFromPreviousAttempts()'", t);
}
}
return Collections.emptyList();
}
|
@Test
void testCallsGetContainersFromPreviousAttemptsMethodIfPresent() {
final RegisterApplicationMasterResponseReflector
registerApplicationMasterResponseReflector =
new RegisterApplicationMasterResponseReflector(LOG, HasMethod.class);
final List<Container> containersFromPreviousAttemptsUnsafe =
registerApplicationMasterResponseReflector.getContainersFromPreviousAttemptsUnsafe(
new HasMethod());
assertThat(containersFromPreviousAttemptsUnsafe).hasSize(1);
}
|
@Override
public String rpcType() {
return RpcTypeEnum.HTTP.getName();
}
|
@Test
public void testRpcType() {
assertEquals(RpcTypeEnum.HTTP.getName(), shenyuClientRegisterDivideService.rpcType());
}
|
public static boolean isConsoleOutput(String filePath) {
return getConsoleOutputFolderAndFileName().equalsIgnoreCase(filePath);
}
|
@Test
public void shouldNotIdentifyAnyOtherArtifactAsConsoleLog() {
assertThat(isConsoleOutput("artifact"), is(false));
}
|
@Override
protected BitList<Invoker<T>> doRoute(
BitList<Invoker<T>> invokers,
URL url,
Invocation invocation,
boolean needToPrintMessage,
Holder<RouterSnapshotNode<T>> nodeHolder,
Holder<String> messageHolder)
throws RpcException {
MeshRuleCache<T> ruleCache = this.meshRuleCache;
if (!ruleCache.containsRule()) {
if (needToPrintMessage) {
messageHolder.set("MeshRuleCache has not been built. Skip route.");
}
return invokers;
}
BitList<Invoker<T>> result = new BitList<>(invokers.getOriginList(), true, invokers.getTailList());
StringBuilder stringBuilder = needToPrintMessage ? new StringBuilder() : null;
// loop each application
for (String appName : ruleCache.getAppList()) {
// find destination by invocation
List<DubboRouteDestination> routeDestination =
getDubboRouteDestination(ruleCache.getVsDestinationGroup(appName), invocation);
if (routeDestination != null) {
// aggregate target invokers
String subset = randomSelectDestination(ruleCache, appName, routeDestination, invokers);
if (subset != null) {
BitList<Invoker<T>> destination = meshRuleCache.getSubsetInvokers(appName, subset);
result = result.or(destination);
if (stringBuilder != null) {
stringBuilder
.append("Match App: ")
.append(appName)
.append(" Subset: ")
.append(subset)
.append(' ');
}
}
}
}
// result = result.or(ruleCache.getUnmatchedInvokers());
// empty protection
if (result.isEmpty()) {
if (needToPrintMessage) {
messageHolder.set("Empty protection after routed.");
}
return invokers;
}
if (needToPrintMessage) {
messageHolder.set(stringBuilder.toString());
}
return invokers.and(result);
}
|
@Test
void testRoute1() {
StandardMeshRuleRouter<Object> meshRuleRouter = new StandardMeshRuleRouter<>(url);
BitList<Invoker<Object>> invokers =
new BitList<>(Arrays.asList(createInvoker(""), createInvoker("unknown"), createInvoker("app1")));
assertEquals(invokers, meshRuleRouter.route(invokers.clone(), null, null, false, null));
Holder<String> message = new Holder<>();
meshRuleRouter.doRoute(invokers.clone(), null, null, true, null, message);
assertEquals("MeshRuleCache has not been built. Skip route.", message.get());
}
|
@Override
public Optional<Entity> exportEntity(EntityDescriptor entityDescriptor, EntityDescriptorIds entityDescriptorIds) {
final ModelId modelId = entityDescriptor.id();
try {
final RuleDao ruleDao = ruleService.load(modelId.id());
return Optional.of(exportNativeEntity(ruleDao, entityDescriptorIds));
} catch (NotFoundException e) {
LOG.debug("Couldn't find pipeline rule {}", entityDescriptor, e);
return Optional.empty();
}
}
|
@Test
@MongoDBFixtures("PipelineRuleFacadeTest.json")
public void collectEntity() {
final EntityDescriptor descriptor = EntityDescriptor.create("5adf25034b900a0fdb4e5338", ModelTypes.PIPELINE_RULE_V1);
final EntityDescriptorIds entityDescriptorIds = EntityDescriptorIds.of(descriptor);
final Optional<Entity> collectedEntity = facade.exportEntity(descriptor, entityDescriptorIds);
assertThat(collectedEntity)
.isPresent()
.containsInstanceOf(EntityV1.class);
final EntityV1 entity = (EntityV1) collectedEntity.orElseThrow(AssertionError::new);
assertThat(entity.id()).isEqualTo(ModelId.of(entityDescriptorIds.get(descriptor).orElse(null)));
assertThat(entity.type()).isEqualTo(ModelTypes.PIPELINE_RULE_V1);
final PipelineRuleEntity pipelineRuleEntity = objectMapper.convertValue(entity.data(), PipelineRuleEntity.class);
assertThat(pipelineRuleEntity.title()).isEqualTo(ValueReference.of("debug"));
assertThat(pipelineRuleEntity.description()).isEqualTo(ValueReference.of("Debug"));
assertThat(pipelineRuleEntity.source().asString(Collections.emptyMap())).startsWith("rule \"debug\"\n");
}
|
public Optional<JobTriggerDto> getOneForJob(String jobDefinitionId) {
final List<JobTriggerDto> triggers = getAllForJob(jobDefinitionId);
// We are currently expecting only one trigger per job definition. This will most probably change in the
// future once we extend our scheduler usage.
// TODO: Don't throw exception when there is more than one trigger for a job definition.
// To be able to do this, we need some kind of label system to make sure we can differentiate between
// automatically created triggers (e.g. by event definition) and manually created ones.
if (triggers.size() > 1) {
throw new IllegalStateException("More than one trigger for job definition <" + jobDefinitionId + ">");
}
return triggers.stream().findFirst();
}
|
@Test
@MongoDBFixtures("job-triggers.json")
public void getForJob() {
assertThatCode(() -> dbJobTriggerService.getOneForJob(null))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("jobDefinitionId");
assertThatCode(() -> dbJobTriggerService.getOneForJob(""))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("jobDefinitionId");
assertThat(dbJobTriggerService.getOneForJob("54e3deadbeefdeadbeefaff4")).isPresent()
.hasValueSatisfying(trigger -> {
assertThat(trigger.id()).isEqualTo("54e3deadbeefdeadbeef0002");
assertThat(trigger.jobDefinitionId()).isEqualTo("54e3deadbeefdeadbeefaff4");
});
assertThat(dbJobTriggerService.getOneForJob("doesntexist")).isEmpty();
// We expect a ISE when there is more than one trigger for a single job definition
assertThatCode(() -> dbJobTriggerService.getOneForJob("54e3deadbeefdeadbeefaff3"))
.isInstanceOf(IllegalStateException.class)
.hasMessageContaining("54e3deadbeefdeadbeefaff3");
}
|
public <T> MongoCollection<T> nonEntityCollection(String collectionName, Class<T> valueType) {
return getCollection(collectionName, valueType);
}
|
@Test
void testMongoIgnore() {
// @MongoIgnore should prevent a property from being written to Mongo. But if it's returned from Mongo,
// e.g. because it was calculated by an aggregation, it should be populated in the returned object.
final MongoCollection<IgnoreTest> collection = collections.nonEntityCollection("ignoreTest", IgnoreTest.class);
collection.insertOne(new IgnoreTest("I should be present", "I should be gone"));
assertThat(collection.find().first()).isEqualTo(new IgnoreTest("I should be present", null));
final MongoCollection<Document> rawCollection = collections.nonEntityCollection("alsoIgnoreTest", Document.class);
rawCollection.insertOne(new Document(Map.of(
"ignore_me_not", "I should be present",
"ignore_me", "I sneaked in")));
final MongoCollection<IgnoreTest> collection2 = collections.nonEntityCollection("alsoIgnoreTest", IgnoreTest.class);
assertThat(collection2.find().first()).isEqualTo(new IgnoreTest("I should be present", "I sneaked in"));
}
|
public abstract void filter(Metadata metadata) throws TikaException;
|
@Test
public void testDefault() throws Exception {
Metadata metadata = new Metadata();
metadata.set("title", "title");
metadata.set("author", "author");
MetadataFilter defaultFilter = new DefaultMetadataFilter();
defaultFilter.filter(metadata);
assertEquals(2, metadata.names().length);
assertEquals("title", metadata.get("title"));
assertEquals("author", metadata.get("author"));
}
|
@SuppressWarnings("DataFlowIssue")
public static CommandExecutor newInstance(final MySQLCommandPacketType commandPacketType, final CommandPacket commandPacket, final ConnectionSession connectionSession) throws SQLException {
if (commandPacket instanceof SQLReceivedPacket) {
log.debug("Execute packet type: {}, sql: {}", commandPacketType, ((SQLReceivedPacket) commandPacket).getSQL());
} else {
log.debug("Execute packet type: {}", commandPacketType);
}
switch (commandPacketType) {
case COM_QUIT:
return new MySQLComQuitExecutor();
case COM_INIT_DB:
return new MySQLComInitDbExecutor((MySQLComInitDbPacket) commandPacket, connectionSession);
case COM_FIELD_LIST:
return new MySQLComFieldListPacketExecutor((MySQLComFieldListPacket) commandPacket, connectionSession);
case COM_QUERY:
return new MySQLComQueryPacketExecutor((MySQLComQueryPacket) commandPacket, connectionSession);
case COM_PING:
return new MySQLComPingExecutor(connectionSession);
case COM_STMT_PREPARE:
return new MySQLComStmtPrepareExecutor((MySQLComStmtPreparePacket) commandPacket, connectionSession);
case COM_STMT_EXECUTE:
return new MySQLComStmtExecuteExecutor((MySQLComStmtExecutePacket) commandPacket, connectionSession);
case COM_STMT_SEND_LONG_DATA:
return new MySQLComStmtSendLongDataExecutor((MySQLComStmtSendLongDataPacket) commandPacket, connectionSession);
case COM_STMT_RESET:
return new MySQLComStmtResetExecutor((MySQLComStmtResetPacket) commandPacket, connectionSession);
case COM_STMT_CLOSE:
return new MySQLComStmtCloseExecutor((MySQLComStmtClosePacket) commandPacket, connectionSession);
case COM_SET_OPTION:
return new MySQLComSetOptionExecutor((MySQLComSetOptionPacket) commandPacket, connectionSession);
case COM_RESET_CONNECTION:
return new MySQLComResetConnectionExecutor(connectionSession);
default:
return new MySQLUnsupportedCommandExecutor(commandPacketType);
}
}
|
@Test
void assertNewInstanceWithComStmtReset() throws SQLException {
assertThat(MySQLCommandExecutorFactory.newInstance(MySQLCommandPacketType.COM_STMT_RESET,
mock(MySQLComStmtResetPacket.class), connectionSession), instanceOf(MySQLComStmtResetExecutor.class));
}
|
public static int getSuitableThreadCount() {
return getSuitableThreadCount(THREAD_MULTIPLER);
}
|
@Test
void testGetSuitableThreadCount() {
assertEquals(4, ThreadUtils.getSuitableThreadCount());
assertEquals(8, ThreadUtils.getSuitableThreadCount(3));
}
|
Map<Uuid, String> topicNames() {
return topicNames;
}
|
@Test
public void testEmptyTopicNamesCacheBuiltFromTopicIds() {
Map<String, Uuid> topicIds = new HashMap<>();
MetadataSnapshot cache = new MetadataSnapshot("clusterId",
Collections.singletonMap(6, new Node(6, "localhost", 2077)),
Collections.emptyList(),
Collections.emptySet(),
Collections.emptySet(),
Collections.emptySet(),
null,
topicIds);
assertEquals(Collections.emptyMap(), cache.topicNames());
}
|
public abstract MySqlSplit toMySqlSplit();
|
@Test
public void testRecordSnapshotSplitState() {
final MySqlSnapshotSplit split =
new MySqlSnapshotSplit(
TableId.parse("test_db.test_table"),
"test_db.test_table-1",
new RowType(
Collections.singletonList(
new RowType.RowField("id", new BigIntType()))),
new Object[] {100L},
new Object[] {999L},
null,
new HashMap<>());
final MySqlSnapshotSplitState mySqlSplitState = new MySqlSnapshotSplitState(split);
mySqlSplitState.setHighWatermark(
BinlogOffset.ofBinlogFilePosition("mysql-bin.000002", 78L));
final MySqlSnapshotSplit expected =
new MySqlSnapshotSplit(
TableId.parse("test_db.test_table"),
"test_db.test_table-1",
new RowType(
Collections.singletonList(
new RowType.RowField("id", new BigIntType()))),
new Object[] {100L},
new Object[] {999L},
BinlogOffset.ofBinlogFilePosition("mysql-bin.000002", 78L),
new HashMap<>());
assertEquals(expected, mySqlSplitState.toMySqlSplit());
}
|
public static JsonToRowWithErrFn withExceptionReporting(Schema rowSchema) {
return JsonToRowWithErrFn.forSchema(rowSchema);
}
|
@Test
@Category(NeedsRunner.class)
public void testParsesRowsDeadLetterWithMissingFieldsNoErrors() throws Exception {
PCollection<String> jsonPersons =
pipeline.apply("jsonPersons", Create.of(JSON_PERSON_WITH_IMPLICIT_NULLS));
ParseResult results =
jsonPersons.apply(
JsonToRow.withExceptionReporting(PERSON_SCHEMA_WITH_NULLABLE_FIELD)
.withNullBehavior(NullBehavior.ACCEPT_MISSING_OR_NULL));
PCollection<Row> personRows = results.getResults();
PCollection<Row> errors = results.getFailedToParseLines();
PAssert.that(personRows).containsInAnyOrder(PERSON_ROWS_WITH_NULLS);
PAssert.that(errors).empty();
pipeline.run();
}
|
@Override
public CreateTopicsResult createTopics(final Collection<NewTopic> newTopics,
final CreateTopicsOptions options) {
final Map<String, KafkaFutureImpl<TopicMetadataAndConfig>> topicFutures = new HashMap<>(newTopics.size());
final CreatableTopicCollection topics = new CreatableTopicCollection();
for (NewTopic newTopic : newTopics) {
if (topicNameIsUnrepresentable(newTopic.name())) {
KafkaFutureImpl<TopicMetadataAndConfig> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic name '" +
newTopic.name() + "' cannot be represented in a request."));
topicFutures.put(newTopic.name(), future);
} else if (!topicFutures.containsKey(newTopic.name())) {
topicFutures.put(newTopic.name(), new KafkaFutureImpl<>());
topics.add(newTopic.convertToCreatableTopic());
}
}
if (!topics.isEmpty()) {
final long now = time.milliseconds();
final long deadline = calcDeadlineMs(now, options.timeoutMs());
final Call call = getCreateTopicsCall(options, topicFutures, topics,
Collections.emptyMap(), now, deadline);
runnable.call(call, now);
}
return new CreateTopicsResult(new HashMap<>(topicFutures));
}
|
@Test
public void testCreateTopicsRetryThrottlingExceptionWhenEnabledUntilRequestTimeOut() throws Exception {
long defaultApiTimeout = 60000;
MockTime time = new MockTime();
try (AdminClientUnitTestEnv env = mockClientEnv(time,
AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, String.valueOf(defaultApiTimeout))) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(
expectCreateTopicsRequestWithTopics("topic1", "topic2", "topic3"),
prepareCreateTopicsResponse(1000,
creatableTopicResult("topic1", Errors.NONE),
creatableTopicResult("topic2", Errors.THROTTLING_QUOTA_EXCEEDED),
creatableTopicResult("topic3", Errors.TOPIC_ALREADY_EXISTS)));
env.kafkaClient().prepareResponse(
expectCreateTopicsRequestWithTopics("topic2"),
prepareCreateTopicsResponse(1000,
creatableTopicResult("topic2", Errors.THROTTLING_QUOTA_EXCEEDED)));
CreateTopicsResult result = env.adminClient().createTopics(
asList(
new NewTopic("topic1", 1, (short) 1),
new NewTopic("topic2", 1, (short) 1),
new NewTopic("topic3", 1, (short) 1)),
new CreateTopicsOptions().retryOnQuotaViolation(true));
// Wait until the prepared attempts have consumed
TestUtils.waitForCondition(() -> env.kafkaClient().numAwaitingResponses() == 0,
"Failed awaiting CreateTopics requests");
// Wait until the next request is sent out
TestUtils.waitForCondition(() -> env.kafkaClient().inFlightRequestCount() == 1,
"Failed awaiting next CreateTopics request");
// Advance time past the default api timeout to time out the inflight request
time.sleep(defaultApiTimeout + 1);
assertNull(result.values().get("topic1").get());
ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(result.values().get("topic2"),
ThrottlingQuotaExceededException.class);
assertEquals(0, e.throttleTimeMs());
TestUtils.assertFutureThrows(result.values().get("topic3"), TopicExistsException.class);
}
}
|
public <T> Future<Iterable<T>> multimapFetchSingleEntryFuture(
ByteString encodedKey, ByteString encodedTag, String stateFamily, Coder<T> elemCoder) {
StateTag<ByteString> stateTag =
StateTag.<ByteString>of(Kind.MULTIMAP_SINGLE_ENTRY, encodedTag, stateFamily)
.toBuilder()
.setMultimapKey(encodedKey)
.build();
return valuesToPagingIterableFuture(stateTag, elemCoder, this.stateFuture(stateTag, elemCoder));
}
|
@Test
public void testReadMultimapSingleEntry() throws Exception {
Future<Iterable<Integer>> future =
underTest.multimapFetchSingleEntryFuture(
STATE_MULTIMAP_KEY_1, STATE_KEY_1, STATE_FAMILY, INT_CODER);
Mockito.verifyNoMoreInteractions(mockWindmill);
Windmill.KeyedGetDataRequest.Builder expectedRequest =
Windmill.KeyedGetDataRequest.newBuilder()
.setKey(DATA_KEY)
.setShardingKey(SHARDING_KEY)
.setWorkToken(WORK_TOKEN)
.setMaxBytes(WindmillStateReader.MAX_KEY_BYTES)
.addMultimapsToFetch(
Windmill.TagMultimapFetchRequest.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.setFetchEntryNamesOnly(false)
.addEntriesToFetch(
Windmill.TagMultimapEntry.newBuilder()
.setEntryName(STATE_MULTIMAP_KEY_1)
.setFetchMaxBytes(WindmillStateReader.INITIAL_MAX_MULTIMAP_BYTES)
.build()));
Windmill.KeyedGetDataResponse.Builder response =
Windmill.KeyedGetDataResponse.newBuilder()
.setKey(DATA_KEY)
.addTagMultimaps(
Windmill.TagMultimapFetchResponse.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.addEntries(
Windmill.TagMultimapEntry.newBuilder()
.setEntryName(STATE_MULTIMAP_KEY_1)
.addAllValues(Arrays.asList(intData(5), intData(6)))));
Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest.build()))
.thenReturn(response.build());
Iterable<Integer> results = future.get();
Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest.build());
assertThat(results, Matchers.containsInAnyOrder(5, 6));
Mockito.verifyNoMoreInteractions(mockWindmill);
assertNoReader(future);
}
|
public static String prettyJSON(String json) {
return prettyJSON(json, TAB_SEPARATOR);
}
|
@Test
public void testRenderArrayInObject() throws Exception {
assertEquals("{\n" + TAB + "\"foo\": [\n" + TAB + "]\n}", prettyJSON("{\"foo\":[]}"));
}
|
public static <T> T toBean(Object source, Class<T> clazz) {
return toBean(source, clazz, null);
}
|
@Test
public void mapToBeanTest2() {
final HashMap<String, Object> map = MapUtil.newHashMap();
map.put("name", "Joe");
map.put("age", 12);
// 非空构造也可以实例化成功
final Person2 person = BeanUtil.toBean(map, Person2.class, CopyOptions.create());
assertEquals("Joe", person.name);
assertEquals(12, person.age);
}
|
public static float getMem() {
return (float) (1
- (double) OperatingSystemBeanManager.getFreePhysicalMem() / (double) OperatingSystemBeanManager.getTotalPhysicalMem());
}
|
@Test
public void testGetMem() {
systemBeanManagerMocked.when(() -> OperatingSystemBeanManager.getFreePhysicalMem()).thenReturn(123L);
systemBeanManagerMocked.when(() -> OperatingSystemBeanManager.getTotalPhysicalMem()).thenReturn(2048L);
assertEquals(EnvUtil.getMem(), 1 - ((double) 123L / (double) 2048L));
systemBeanManagerMocked.when(() -> OperatingSystemBeanManager.getFreePhysicalMem()).thenReturn(0L);
assertEquals(EnvUtil.getMem(), 1 - ((double) 0L / (double) 2048L));
}
|
public final short readShort() throws IOException {
if (input instanceof RandomAccessFile) {
return ((RandomAccessFile) input).readShort();
} else if (input instanceof DataInputStream) {
return ((DataInputStream) input).readShort();
} else {
throw new UnsupportedOperationException("Unknown Hollow Blob Input type");
}
}
|
@Test
public void testReadShort() throws IOException {
HollowBlobInput inStream = HollowBlobInput.modeBasedSelector(MemoryMode.ON_HEAP, mockBlob);
assertEquals(1, inStream.readShort()); // first short is 1
assertEquals(1, inStream.readShort()); // second short is 1
HollowBlobInput inBuffer = HollowBlobInput.modeBasedSelector(MemoryMode.SHARED_MEMORY_LAZY, mockBlob);
assertEquals(1, inBuffer.readShort()); // first short is 1
assertEquals(1, inBuffer.readShort()); // second short is 1
}
|
@Override
public boolean test(Pair<Point, Point> pair) {
return testVertical(pair) && testHorizontal(pair);
}
|
@Test
public void testVertSeparation_allowMissingData() {
Point p1 = (new PointBuilder()).time(EPOCH).latLong(0.0, 0.0).altitude(Distance.ofFeet(1000.0)).build();
Point p2 = (new PointBuilder()).time(EPOCH).latLong(0.0, 0.0).altitude(Distance.ofFeet(1000.0)).build();
Point p3 = (new PointBuilder()).time(EPOCH).latLong(0.0, 0.0).altitude(Distance.ofFeet(1500.0)).build();
Point p4 = (new PointBuilder()).time(EPOCH).latLong(0.0, 0.0).build();
double MAX_HORIZ_SEPARATION_IN_FT = 1000;
double MAX_VERT_SEPARATION = 500;
CylindricalFilter filter = new CylindricalFilter(MAX_HORIZ_SEPARATION_IN_FT, MAX_VERT_SEPARATION, true);
assertTrue(filter.test(Pair.of(p1, p1)), "A point is in the same cylindar with itself");
assertTrue(filter.test(Pair.of(p1, p2)), "A point is in the same cylindar with itself");
assertTrue(filter.test(Pair.of(p1, p3)), "These points are 500ft apart");
assertTrue(filter.test(Pair.of(p1, p4)), "These points have no altitude data");
}
|
public AggregateAnalysisResult analyze(
final ImmutableAnalysis analysis,
final List<SelectExpression> finalProjection
) {
if (!analysis.getGroupBy().isPresent()) {
throw new IllegalArgumentException("Not an aggregate query");
}
final AggAnalyzer aggAnalyzer = new AggAnalyzer(analysis, functionRegistry);
aggAnalyzer.process(finalProjection);
return aggAnalyzer.result();
}
|
@Test
public void shouldNotThrowOnNonAggregateFunctionIfAllParamsAreInGroupBy() {
// Given:
final Expression someExpression = mock(Expression.class);
givenSelectExpression(new FunctionCall(
FunctionName.of("UCASE"),
ImmutableList.of(GROUP_BY_1, someExpression)
));
// When:
analyzer.analyze(analysis, selects);
// Then: did not throw.
}
|
public static Set<Result> anaylze(String log) {
Set<Result> results = new HashSet<>();
for (Rule rule : Rule.values()) {
Matcher matcher = rule.pattern.matcher(log);
if (matcher.find()) {
results.add(new Result(rule, log, matcher));
}
}
return results;
}
|
@Test
public void loaderExceptionModCrash2() throws IOException {
CrashReportAnalyzer.Result result = findResultByRule(
CrashReportAnalyzer.anaylze(loadLog("/crash-report/loader_exception_mod_crash2.txt")),
CrashReportAnalyzer.Rule.LOADING_CRASHED_FORGE);
assertEquals("Inventory Sort", result.getMatcher().group("name"));
assertEquals("invsort", result.getMatcher().group("id"));
}
|
public static String byteToHexString(final byte[] bytes, final int start, final int end) {
if (bytes == null) {
throw new IllegalArgumentException("bytes == null");
}
int length = end - start;
char[] out = new char[length * 2];
for (int i = start, j = 0; i < end; i++) {
out[j++] = HEX_CHARS[(0xF0 & bytes[i]) >>> 4];
out[j++] = HEX_CHARS[0x0F & bytes[i]];
}
return new String(out);
}
|
@Test
void testHexArrayToString() {
byte[] byteArray = new byte[] {1, -97, 49, 74};
String hex = StringUtils.byteToHexString(byteArray);
assertThat(hex).isEqualTo("019f314a");
}
|
@Override
public void encode(Event event, OutputStream output) throws IOException {
String outputString = (format == null
? event.toString()
: StringInterpolation.evaluate(event, format));
output.write(outputString.getBytes(charset));
}
|
@Test
public void testEncodeWithCharset() throws IOException {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
byte[] rightSingleQuoteInUtf8 = {(byte) 0xE2, (byte) 0x80, (byte) 0x99};
String rightSingleQuote = new String(rightSingleQuoteInUtf8, Charset.forName("UTF-8"));
// encode with cp-1252
Map<String, Object> config = new HashMap<>();
config.put("charset", "cp1252");
config.put("format", "%{message}");
config.put("delimiter", "");
Event e1 = new Event(Collections.singletonMap("message", rightSingleQuote));
Plain cp1252encoder = new Plain(new ConfigurationImpl(config), new TestContext());
byte[] rightSingleQuoteInCp1252 = {(byte) 0x92};
cp1252encoder.encode(e1, outputStream);
byte[] resultBytes = outputStream.toByteArray();
Assert.assertArrayEquals(rightSingleQuoteInCp1252, resultBytes);
}
|
public RingbufferConfig setInMemoryFormat(InMemoryFormat inMemoryFormat) {
checkNotNull(inMemoryFormat, "inMemoryFormat can't be null");
checkFalse(inMemoryFormat == NATIVE, "InMemoryFormat " + NATIVE + " is not supported");
this.inMemoryFormat = inMemoryFormat;
return this;
}
|
@Test(expected = NullPointerException.class)
public void setInMemoryFormat_whenNull() {
RingbufferConfig config = new RingbufferConfig(NAME);
config.setInMemoryFormat(null);
}
|
@Override
public String getMethod() {
return PATH;
}
|
@Test
public void testSetMyCommandsWithMoreThan100Commands() {
SetMyCommands setMyCommands = SetMyCommands
.builder()
.languageCode("en")
.scope(BotCommandScopeDefault.builder().build())
.build();
List<BotCommand> commands = new ArrayList<>();
for(int i = 0; i < 102; i++) {
commands.add(BotCommand.builder().command("test").description("Test Description").build());
}
setMyCommands.setCommands(commands);
assertEquals("setMyCommands", setMyCommands.getMethod());
Throwable thrown = assertThrows(TelegramApiValidationException.class, setMyCommands::validate);
assertEquals("No more than 100 commands are allowed", thrown.getMessage());
}
|
@Override
public <VO, VR> KStream<K, VR> outerJoin(final KStream<K, VO> otherStream,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final JoinWindows windows) {
return outerJoin(otherStream, toValueJoinerWithKey(joiner), windows);
}
|
@SuppressWarnings("deprecation")
@Test
public void shouldNotAllowNullValueJoinerOnOuterJoinWithStreamJoined() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.outerJoin(
testStream,
(ValueJoiner<? super String, ? super String, ?>) null,
JoinWindows.of(ofMillis(10)),
StreamJoined.as("name")));
assertThat(exception.getMessage(), equalTo("joiner can't be null"));
}
|
@Override
public synchronized int read() throws IOException {
checkNotClosed();
if (finished) {
return -1;
}
file.readLock().lock();
try {
int b = file.read(pos++); // it's ok for pos to go beyond size()
if (b == -1) {
finished = true;
} else {
file.setLastAccessTime(fileSystemState.now());
}
return b;
} finally {
file.readLock().unlock();
}
}
|
@Test
public void testRead_partialArray_sliceLarger() throws IOException {
JimfsInputStream in = newInputStream(1, 2, 3, 4, 5, 6, 7, 8);
byte[] bytes = new byte[12];
assertThat(in.read(bytes, 0, 10)).isEqualTo(8);
assertArrayEquals(bytes(1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0), bytes);
assertEmpty(in);
}
|
public static <T> T fillBean(String className, Map<List<String>, Object> params, ClassLoader classLoader) {
return fillBean(errorEmptyMessage(), className, params, classLoader);
}
|
@Test
public void fillBeanTestWithInitialInstanceTest() {
Dispute dispute = new Dispute();
Map<List<String>, Object> paramsToSet = Map.of(List.of("creator", "firstName"), FIRST_NAME, List.of("creator", "age"), AGE);
Object result = ScenarioBeanUtil.fillBean(of(dispute), Dispute.class.getCanonicalName(), paramsToSet, classLoader);
assertThat(result).isInstanceOf(Dispute.class);
assertThat(result).isSameAs(dispute);
assertThat(FIRST_NAME).isEqualTo(dispute.getCreator().getFirstName());
assertThat(AGE).isEqualTo(dispute.getCreator().getAge());
}
|
public static JsonAsserter with(String json) {
return new JsonAsserterImpl(JsonPath.parse(json).json());
}
|
@Test
public void list_content_can_be_asserted_with_nested_matcher() throws Exception {
with(JSON).assertThat("$..book[*]", hasItems(hasEntry("author", "Nigel Rees"), hasEntry("author", "Evelyn Waugh")));
}
|
@Override
public PageData<WidgetsBundle> findAllTenantWidgetsBundlesByTenantId(WidgetsBundleFilter widgetsBundleFilter, PageLink pageLink) {
return findTenantWidgetsBundlesByTenantIds(Arrays.asList(widgetsBundleFilter.getTenantId().getId(), NULL_UUID), widgetsBundleFilter, pageLink);
}
|
@Test
public void testFindAllWidgetsBundlesByTenantIdFullSearchScadaFirst() {
UUID tenantId1 = Uuids.timeBased();
UUID tenantId2 = Uuids.timeBased();
for (int i = 0; i < 10; i++) {
createWidgetBundles(5, tenantId1, "WB1_" + i + "_");
createWidgetBundles(2, tenantId1, "WB1_SCADA_" + i + "_", true);
createWidgetBundles(3, tenantId2, "WB2_" + i + "_");
createWidgetBundles(3, tenantId2, "WB2_SCADA_" + i + "_", true);
createSystemWidgetBundles(2, "WB_SYS_" + i + "_");
createSystemWidgetBundles(1, "WB_SYS_SCADA_" + i + "_", true);
}
widgetsBundles = widgetsBundleDao.find(TenantId.SYS_TENANT_ID).stream().sorted(Comparator.comparing(WidgetsBundle::getTitle)).collect(Collectors.toList());;
assertEquals(160, widgetsBundles.size());
PageLink pageLink = new PageLink(50, 0, "WB", new SortOrder("title"));
PageData<WidgetsBundle> widgetsBundles1 =
widgetsBundleDao.findAllTenantWidgetsBundlesByTenantId(
WidgetsBundleFilter.builder().tenantId(TenantId.fromUUID(tenantId1)).fullSearch(true).scadaFirst(true).build(), pageLink);
for (int i =0; i < 30; i++) {
var widgetsBundle = widgetsBundles1.getData().get(i);
assertTrue(widgetsBundle.isScada());
}
for (int i = 30; i < 50; i++) {
var widgetsBundle = widgetsBundles1.getData().get(i);
assertFalse(widgetsBundle.isScada());
}
pageLink = new PageLink(50, 0, "WB", new SortOrder("title"));
PageData<WidgetsBundle> widgetsBundles2 =
widgetsBundleDao.findAllTenantWidgetsBundlesByTenantId(
WidgetsBundleFilter.builder().tenantId(TenantId.fromUUID(tenantId2)).fullSearch(true).scadaFirst(true).build(), pageLink);
for (int i =0; i < 40; i++) {
var widgetsBundle = widgetsBundles2.getData().get(i);
assertTrue(widgetsBundle.isScada());
}
for (int i = 40; i < 50; i++) {
var widgetsBundle = widgetsBundles2.getData().get(i);
assertFalse(widgetsBundle.isScada());
}
}
|
static DatanodeStorageInfo[] chooseTargetForNewBlock(
BlockManager bm, String src, DatanodeInfo[] excludedNodes,
String[] favoredNodes, EnumSet<AddBlockFlag> flags,
ValidateAddBlockResult r) throws IOException {
Node clientNode = null;
boolean ignoreClientLocality = (flags != null
&& flags.contains(AddBlockFlag.IGNORE_CLIENT_LOCALITY));
// If client locality is ignored, clientNode remains 'null' to indicate
if (!ignoreClientLocality) {
clientNode = bm.getDatanodeManager().getDatanodeByHost(r.clientMachine);
if (clientNode == null) {
clientNode = getClientNode(bm, r.clientMachine);
}
}
Set<Node> excludedNodesSet =
(excludedNodes == null) ? new HashSet<>()
: new HashSet<>(Arrays.asList(excludedNodes));
List<String> favoredNodesList =
(favoredNodes == null) ? Collections.emptyList()
: Arrays.asList(favoredNodes);
// choose targets for the new block to be allocated.
return bm.chooseTarget4NewBlock(src, r.numTargets, clientNode,
excludedNodesSet, r.blockSize,
favoredNodesList, r.storagePolicyID,
r.blockType, r.ecPolicy, flags);
}
|
@Test
@SuppressWarnings("unchecked")
public void testIgnoreClientLocality() throws IOException {
ValidateAddBlockResult addBlockResult =
new ValidateAddBlockResult(1024L, 3, (byte) 0x01, null, null, null);
EnumSet<AddBlockFlag> addBlockFlags =
EnumSet.of(AddBlockFlag.IGNORE_CLIENT_LOCALITY);
BlockManager bmMock = mock(BlockManager.class);
ArgumentCaptor<Node> nodeCaptor = ArgumentCaptor.forClass(Node.class);
when(bmMock.chooseTarget4NewBlock(anyString(), anyInt(), any(), anySet(),
anyLong(), anyList(), anyByte(), any(), any(), any())).thenReturn(null);
FSDirWriteFileOp.chooseTargetForNewBlock(bmMock, "localhost", null, null,
addBlockFlags, addBlockResult);
// There should be no other interactions with the block manager when the
// IGNORE_CLIENT_LOCALITY is passed in because there is no need to discover
// the local node requesting the new block
verify(bmMock, times(1)).chooseTarget4NewBlock(anyString(), anyInt(),
nodeCaptor.capture(), anySet(), anyLong(), anyList(), anyByte(), any(),
any(), any());
verifyNoMoreInteractions(bmMock);
assertNull(
"Source node was assigned a value. Expected 'null' value because "
+ "chooseTarget was flagged to ignore source node locality",
nodeCaptor.getValue());
}
|
Map<ProjectionProducer<PTransform<?, ?>>, Map<PCollection<?>, FieldAccessDescriptor>>
getPushdownOpportunities() {
return pushdownOpportunities.build();
}
|
@Test
public void testFieldAccessAllFields_returnsNoPushdown() {
Pipeline p = Pipeline.create();
PCollection<Row> output = p.apply(new SimpleSource());
Map<PCollection<?>, FieldAccessDescriptor> pCollectionFieldAccess =
ImmutableMap.of(output, FieldAccessDescriptor.withAllFields());
ProjectionProducerVisitor visitor = new ProjectionProducerVisitor(pCollectionFieldAccess);
p.traverseTopologically(visitor);
Map<ProjectionProducer<PTransform<?, ?>>, Map<PCollection<?>, FieldAccessDescriptor>>
pushdownOpportunities = visitor.getPushdownOpportunities();
Assert.assertTrue(pushdownOpportunities.isEmpty());
}
|
public static TypeBuilder<Schema> builder() {
return new TypeBuilder<>(new SchemaCompletion(), new NameContext());
}
|
@Test
void testFloat() {
Schema.Type type = Schema.Type.FLOAT;
Schema simple = SchemaBuilder.builder().floatType();
Schema expected = primitive(type, simple);
Schema built1 = SchemaBuilder.builder().floatBuilder().prop("p", "v").endFloat();
assertEquals(expected, built1);
}
|
@SuppressWarnings("java:S2583")
public static boolean verify(@NonNull JWKSet jwks, @NonNull JWSObject jws) {
if (jwks == null) {
throw new IllegalArgumentException("no JWKS provided to verify JWS");
}
if (jwks.getKeys() == null || jwks.getKeys().isEmpty()) {
return false;
}
var header = jws.getHeader();
if (!JWSAlgorithm.ES256.equals(header.getAlgorithm())) {
throw new UnsupportedOperationException(
"only supports ES256, found: " + header.getAlgorithm());
}
var key = jwks.getKeyByKeyId(header.getKeyID());
if (key == null) {
return false;
}
try {
var processor = new DefaultJWSVerifierFactory();
var verifier = processor.createJWSVerifier(jws.getHeader(), key.toECKey().toPublicKey());
return jws.verify(verifier);
} catch (JOSEException e) {
throw FederationExceptions.badSignature(e);
}
}
|
@Test
void verify() throws ParseException {
var jws = toJws(ECKEY, "hello world?").serialize();
var in = JWSObject.parse(jws);
assertTrue(JwsVerifier.verify(JWKS, in));
}
|
@Override
public boolean shouldFilter(HttpResponseMessage response) {
if (!ENABLED.get() || !response.hasBody() || response.getContext().isInBrownoutMode()) {
return false;
}
if (response.getContext().get(CommonContextKeys.GZIPPER) != null) {
return true;
}
// A flag on SessionContext can be set to override normal mechanism of checking if client accepts gzip.;
final HttpRequestInfo request = response.getInboundRequest();
final Boolean overrideIsGzipRequested =
(Boolean) response.getContext().get(CommonContextKeys.OVERRIDE_GZIP_REQUESTED);
final boolean isGzipRequested = (overrideIsGzipRequested == null)
? HttpUtils.acceptsGzip(request.getHeaders())
: overrideIsGzipRequested;
// Check the headers to see if response is already gzipped.
final Headers respHeaders = response.getHeaders();
boolean isResponseCompressed = HttpUtils.isCompressed(respHeaders);
// Decide what to do.;
final boolean shouldGzip = isGzippableContentType(response)
&& isGzipRequested
&& !isResponseCompressed
&& isRightSizeForGzip(response);
if (shouldGzip) {
response.getContext().set(CommonContextKeys.GZIPPER, getGzipper());
}
return shouldGzip;
}
|
@Test
void prepareResponseBody_alreadyZipped() throws Exception {
originalRequestHeaders.set("Accept-Encoding", "gzip,deflate");
byte[] originBody = "blah".getBytes();
response.getHeaders().set("Content-Length", Integer.toString(originBody.length));
response.getHeaders().set("Content-Type", "application/json");
response.getHeaders().set("Content-Encoding", "gzip");
response.setHasBody(true);
assertFalse(filter.shouldFilter(response));
}
|
public String getNewLine( String fformat ) {
String nl = System.getProperty( "line.separator" );
if ( fformat != null ) {
if ( fformat.equalsIgnoreCase( "DOS" ) ) {
nl = "\r\n";
} else if ( fformat.equalsIgnoreCase( "UNIX" ) ) {
nl = "\n";
}
}
return nl;
}
|
@Test
public void testGetNewline() throws Exception {
XMLOutputMeta xmlOutputMeta = new XMLOutputMeta();
assertEquals( "\r\n", xmlOutputMeta.getNewLine( "DOS" ) );
assertEquals( "\n", xmlOutputMeta.getNewLine( "UNIX" ) );
assertEquals( System.getProperty( "line.separator" ), xmlOutputMeta.getNewLine( null ) );
}
|
public static FunctionConfig validateUpdate(FunctionConfig existingConfig, FunctionConfig newConfig) {
FunctionConfig mergedConfig = existingConfig.toBuilder().build();
if (!existingConfig.getTenant().equals(newConfig.getTenant())) {
throw new IllegalArgumentException("Tenants differ");
}
if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) {
throw new IllegalArgumentException("Namespaces differ");
}
if (!existingConfig.getName().equals(newConfig.getName())) {
throw new IllegalArgumentException("Function Names differ");
}
if (!StringUtils.isEmpty(newConfig.getClassName())) {
mergedConfig.setClassName(newConfig.getClassName());
}
if (!StringUtils.isEmpty(newConfig.getJar())) {
mergedConfig.setJar(newConfig.getJar());
}
if (newConfig.getInputSpecs() == null) {
newConfig.setInputSpecs(new HashMap<>());
}
if (mergedConfig.getInputSpecs() == null) {
mergedConfig.setInputSpecs(new HashMap<>());
}
if (newConfig.getInputs() != null) {
newConfig.getInputs().forEach((topicName -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder().isRegexPattern(false).build());
}));
}
if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) {
newConfig.getInputSpecs().put(newConfig.getTopicsPattern(),
ConsumerConfig.builder()
.isRegexPattern(true)
.build());
}
if (newConfig.getCustomSerdeInputs() != null) {
newConfig.getCustomSerdeInputs().forEach((topicName, serdeClassName) -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder()
.serdeClassName(serdeClassName)
.isRegexPattern(false)
.build());
});
}
if (newConfig.getCustomSchemaInputs() != null) {
newConfig.getCustomSchemaInputs().forEach((topicName, schemaClassname) -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder()
.schemaType(schemaClassname)
.isRegexPattern(false)
.build());
});
}
if (!newConfig.getInputSpecs().isEmpty()) {
newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> {
if (!existingConfig.getInputSpecs().containsKey(topicName)) {
throw new IllegalArgumentException("Input Topics cannot be altered");
}
if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) {
throw new IllegalArgumentException(
"isRegexPattern for input topic " + topicName + " cannot be altered");
}
mergedConfig.getInputSpecs().put(topicName, consumerConfig);
});
}
if (!StringUtils.isEmpty(newConfig.getOutputSerdeClassName()) && !newConfig.getOutputSerdeClassName()
.equals(existingConfig.getOutputSerdeClassName())) {
throw new IllegalArgumentException("Output Serde mismatch");
}
if (!StringUtils.isEmpty(newConfig.getOutputSchemaType()) && !newConfig.getOutputSchemaType()
.equals(existingConfig.getOutputSchemaType())) {
throw new IllegalArgumentException("Output Schema mismatch");
}
if (!StringUtils.isEmpty(newConfig.getLogTopic())) {
mergedConfig.setLogTopic(newConfig.getLogTopic());
}
if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees()
.equals(existingConfig.getProcessingGuarantees())) {
throw new IllegalArgumentException("Processing Guarantees cannot be altered");
}
if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering()
.equals(existingConfig.getRetainOrdering())) {
throw new IllegalArgumentException("Retain Ordering cannot be altered");
}
if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering()
.equals(existingConfig.getRetainKeyOrdering())) {
throw new IllegalArgumentException("Retain Key Ordering cannot be altered");
}
if (!StringUtils.isEmpty(newConfig.getOutput())) {
mergedConfig.setOutput(newConfig.getOutput());
}
if (newConfig.getUserConfig() != null) {
mergedConfig.setUserConfig(newConfig.getUserConfig());
}
if (newConfig.getSecrets() != null) {
mergedConfig.setSecrets(newConfig.getSecrets());
}
if (newConfig.getRuntime() != null && !newConfig.getRuntime().equals(existingConfig.getRuntime())) {
throw new IllegalArgumentException("Runtime cannot be altered");
}
if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) {
throw new IllegalArgumentException("AutoAck cannot be altered");
}
if (newConfig.getMaxMessageRetries() != null) {
mergedConfig.setMaxMessageRetries(newConfig.getMaxMessageRetries());
}
if (!StringUtils.isEmpty(newConfig.getDeadLetterTopic())) {
mergedConfig.setDeadLetterTopic(newConfig.getDeadLetterTopic());
}
if (!StringUtils.isEmpty(newConfig.getSubName()) && !newConfig.getSubName()
.equals(existingConfig.getSubName())) {
throw new IllegalArgumentException("Subscription Name cannot be altered");
}
if (newConfig.getParallelism() != null) {
mergedConfig.setParallelism(newConfig.getParallelism());
}
if (newConfig.getResources() != null) {
mergedConfig
.setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources()));
}
if (newConfig.getWindowConfig() != null) {
mergedConfig.setWindowConfig(newConfig.getWindowConfig());
}
if (newConfig.getTimeoutMs() != null) {
mergedConfig.setTimeoutMs(newConfig.getTimeoutMs());
}
if (newConfig.getCleanupSubscription() != null) {
mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription());
}
if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) {
mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags());
}
if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) {
mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions());
}
if (newConfig.getProducerConfig() != null) {
mergedConfig.setProducerConfig(newConfig.getProducerConfig());
}
return mergedConfig;
}
|
@Test
public void testMergeEqual() {
FunctionConfig functionConfig = createFunctionConfig();
FunctionConfig newFunctionConfig = createFunctionConfig();
FunctionConfig mergedConfig = FunctionConfigUtils.validateUpdate(functionConfig, newFunctionConfig);
assertEquals(
new Gson().toJson(functionConfig),
new Gson().toJson(mergedConfig)
);
}
|
@Override
public void handleReply(Reply reply) {
if (failure.get() != null) {
return;
}
if (containsFatalErrors(reply.getErrors())) {
failure.compareAndSet(null, new IOException(formatErrors(reply)));
return;
}
long now = System.currentTimeMillis();
long latency = now - (long) reply.getContext();
numReplies.incrementAndGet();
accumulateReplies(now, latency);
}
|
@Test
public void requireThatJson2VespaFeederWorks() throws Throwable {
ByteArrayOutputStream dump = new ByteArrayOutputStream();
assertFeed(new FeederParams().setDumpStream(dump).setDumpFormat(FeederParams.DumpFormat.VESPA),
"[" +
" { \"put\": \"id:simple:simple::0\", \"fields\": { \"my_str\":\"foo\"}}," +
" { \"update\": \"id:simple:simple::1\", \"fields\": { \"my_str\": { \"assign\":\"bar\"}}}," +
" { \"remove\": \"id:simple:simple::2\", \"condition\":\"true\"}" +
"]",
new MessageHandler() {
@Override
public void handleMessage(Message msg) {
Reply reply = ((DocumentMessage)msg).createReply();
reply.swapState(msg);
reply.popHandler().handleReply(reply);
}
},
"",
"(.+\n)+" +
"\\s*\\d+,\\s*3,.+\n");
assertEquals(187, dump.size());
assertFeed(new ByteArrayInputStream(dump.toByteArray()),
new MessageHandler() {
@Override
public void handleMessage(Message msg) {
Reply reply = ((DocumentMessage)msg).createReply();
reply.swapState(msg);
reply.popHandler().handleReply(reply);
}
},
"",
"(.+\n)+" +
"\\s*\\d+,\\s*3,.+\n");
}
|
@Override
public boolean filterPath(Path filePath) {
if (getIncludeMatchers().isEmpty() && getExcludeMatchers().isEmpty()) {
return false;
}
// compensate for the fact that Flink paths are slashed
final String path =
filePath.hasWindowsDrive() ? filePath.getPath().substring(1) : filePath.getPath();
final java.nio.file.Path nioPath = Paths.get(path);
for (PathMatcher matcher : getIncludeMatchers()) {
if (matcher.matches(nioPath)) {
return shouldExclude(nioPath);
}
}
return true;
}
|
@Test
void testIncludeFileWithCharacterRangeMatcher() {
GlobFilePathFilter matcher =
new GlobFilePathFilter(
Collections.singletonList("dir/[a-d].txt"), Collections.emptyList());
assertThat(matcher.filterPath(new Path("dir/a.txt"))).isFalse();
assertThat(matcher.filterPath(new Path("dir/b.txt"))).isFalse();
assertThat(matcher.filterPath(new Path("dir/c.txt"))).isFalse();
assertThat(matcher.filterPath(new Path("dir/d.txt"))).isFalse();
assertThat(matcher.filterPath(new Path("dir/z.txt"))).isTrue();
}
|
public RuntimeOptionsBuilder parse(Class<?> clazz) {
RuntimeOptionsBuilder args = new RuntimeOptionsBuilder();
for (Class<?> classWithOptions = clazz; hasSuperClass(
classWithOptions); classWithOptions = classWithOptions.getSuperclass()) {
CucumberOptions options = requireNonNull(optionsProvider).getOptions(classWithOptions);
if (options != null) {
addDryRun(options, args);
addMonochrome(options, args);
addTags(classWithOptions, options, args);
addPlugins(options, args);
addPublish(options, args);
addName(options, args);
addSnippets(options, args);
addGlue(options, args);
addFeatures(options, args);
addObjectFactory(options, args);
addUuidGenerator(options, args);
}
}
addDefaultFeaturePathIfNoFeaturePathIsSpecified(args, clazz);
addDefaultGlueIfNoOverridingGlueIsSpecified(args, clazz);
return args;
}
|
@Test
void create_with_multiple_names() {
RuntimeOptions runtimeOptions = parser().parse(MultipleNames.class).build();
List<Pattern> filters = runtimeOptions.getNameFilters();
assertThat(filters.size(), is(equalTo(2)));
Iterator<Pattern> iterator = filters.iterator();
assertAll(
() -> assertThat(getRegexpPattern(iterator.next()), is(equalTo("name1"))),
() -> assertThat(getRegexpPattern(iterator.next()), is(equalTo("name2"))));
}
|
public static ScalarOperator compoundAnd(Collection<ScalarOperator> nodes) {
return createCompound(CompoundPredicateOperator.CompoundType.AND, nodes);
}
|
@Test
public void compoundAnd2() {
ScalarOperator tree1 = Utils.compoundAnd(ConstantOperator.createInt(1),
ConstantOperator.createInt(2),
ConstantOperator.createInt(3),
ConstantOperator.createInt(4));
assertEquals(CompoundPredicateOperator.CompoundType.AND, ((CompoundPredicateOperator) tree1).getCompoundType());
assertEquals(CompoundPredicateOperator.CompoundType.AND,
((CompoundPredicateOperator) tree1.getChild(0)).getCompoundType());
assertEquals(CompoundPredicateOperator.CompoundType.AND,
((CompoundPredicateOperator) tree1.getChild(1)).getCompoundType());
assertEquals(1, ((ConstantOperator) tree1.getChild(0).getChild(0)).getInt());
assertEquals(2, ((ConstantOperator) tree1.getChild(0).getChild(1)).getInt());
assertEquals(3, ((ConstantOperator) tree1.getChild(1).getChild(0)).getInt());
assertEquals(4, ((ConstantOperator) tree1.getChild(1).getChild(1)).getInt());
}
|
static AggregationStrategy createAggregationStrategy(CamelContext camelContext, DynamicRouterConfiguration cfg) {
AggregationStrategy strategy = Optional.ofNullable(cfg.getAggregationStrategyBean())
.or(() -> Optional.ofNullable(cfg.getAggregationStrategy())
.map(ref -> lookupByNameAndType(camelContext, ref, Object.class)
.map(aggStr -> convertAggregationStrategy.apply(aggStr, cfg))
.orElseThrow(() -> new IllegalArgumentException(
"Cannot find AggregationStrategy in Registry with name: " +
cfg.getAggregationStrategy()))))
.orElse(new NoopAggregationStrategy());
CamelContextAware.trySetCamelContext(strategy, camelContext);
return cfg.isShareUnitOfWork() ? new ShareUnitOfWorkAggregationStrategy(strategy) : strategy;
}
|
@Test
void testCreateAggregationStrategyNoOp() {
when(mockConfig.getAggregationStrategyBean()).thenReturn(null);
when(mockConfig.getAggregationStrategy()).thenReturn(null);
AggregationStrategy strategy = DynamicRouterRecipientListHelper.createAggregationStrategy(camelContext, mockConfig);
Assertions.assertInstanceOf(DynamicRouterRecipientListHelper.NoopAggregationStrategy.class, strategy);
}
|
public void registryDefaultSampleThreadPoolExecutor() {
ApplicationModel applicationModel = collector.getApplicationModel();
if (applicationModel == null) {
return;
}
try {
if (this.frameworkExecutorRepository == null) {
this.frameworkExecutorRepository =
collector.getApplicationModel().getBeanFactory().getBean(FrameworkExecutorRepository.class);
}
} catch (Exception ex) {
logger.warn(
COMMON_METRICS_COLLECTOR_EXCEPTION,
"",
"",
"ThreadPoolMetricsSampler! frameworkExecutorRepository non-init");
}
if (this.dataStore == null) {
this.dataStore = collector
.getApplicationModel()
.getExtensionLoader(DataStore.class)
.getDefaultExtension();
}
if (dataStore != null) {
dataStore.addListener(this);
Map<String, Object> executors = dataStore.get(EXECUTOR_SERVICE_COMPONENT_KEY);
for (Map.Entry<String, Object> entry : executors.entrySet()) {
ExecutorService executor = (ExecutorService) entry.getValue();
if (executor instanceof ThreadPoolExecutor) {
this.addExecutors(SERVER_THREAD_POOL_PREFIX + entry.getKey(), executor);
}
}
executors = dataStore.get(CONSUMER_SHARED_EXECUTOR_SERVICE_COMPONENT_KEY);
for (Map.Entry<String, Object> entry : executors.entrySet()) {
ExecutorService executor = (ExecutorService) entry.getValue();
if (executor instanceof ThreadPoolExecutor) {
this.addExecutors(CLIENT_THREAD_POOL_PREFIX + entry.getKey(), executor);
}
}
ThreadRejectMetricsCountSampler threadRejectMetricsCountSampler =
new ThreadRejectMetricsCountSampler(collector);
this.sampleThreadPoolExecutor.entrySet().stream()
.filter(entry -> entry.getKey().startsWith(SERVER_THREAD_POOL_NAME))
.forEach(entry -> {
if (entry.getValue().getRejectedExecutionHandler() instanceof AbortPolicyWithReport) {
MetricThreadPoolExhaustedListener metricThreadPoolExhaustedListener =
new MetricThreadPoolExhaustedListener(
entry.getKey(), threadRejectMetricsCountSampler);
((AbortPolicyWithReport) entry.getValue().getRejectedExecutionHandler())
.addThreadPoolExhaustedEventListener(metricThreadPoolExhaustedListener);
}
});
}
if (this.frameworkExecutorRepository != null) {
this.addExecutors("sharedExecutor", frameworkExecutorRepository.getSharedExecutor());
}
}
|
@Test
public void testRegistryDefaultSampleThreadPoolExecutor() throws NoSuchFieldException, IllegalAccessException {
Map<String, Object> serverExecutors = new HashMap<>();
Map<String, Object> clientExecutors = new HashMap<>();
ExecutorService serverExecutor = Executors.newFixedThreadPool(5);
ExecutorService clientExecutor = Executors.newFixedThreadPool(5);
serverExecutors.put("server1", serverExecutor);
clientExecutors.put("client1", clientExecutor);
when(dataStore.get(EXECUTOR_SERVICE_COMPONENT_KEY)).thenReturn(serverExecutors);
when(dataStore.get(CONSUMER_SHARED_EXECUTOR_SERVICE_COMPONENT_KEY)).thenReturn(clientExecutors);
when(frameworkExecutorRepository.getSharedExecutor()).thenReturn(Executors.newFixedThreadPool(5));
sampler2.registryDefaultSampleThreadPoolExecutor();
Field f = ThreadPoolMetricsSampler.class.getDeclaredField("sampleThreadPoolExecutor");
f.setAccessible(true);
Map<String, ThreadPoolExecutor> executors = (Map<String, ThreadPoolExecutor>) f.get(sampler2);
Assertions.assertEquals(3, executors.size());
Assertions.assertTrue(executors.containsKey("DubboServerHandler-server1"));
Assertions.assertTrue(executors.containsKey("DubboClientHandler-client1"));
Assertions.assertTrue(executors.containsKey("sharedExecutor"));
serverExecutor.shutdown();
clientExecutor.shutdown();
}
|
@Override
public InterpreterResult interpret(String sql, InterpreterContext contextInterpreter) {
logger.info("Run SQL command '{}'", sql);
return executeSql(sql);
}
|
@Test
void badSqlSyntaxFails() {
InterpreterResult ret = bqInterpreter.interpret(constants.getWrong(), context);
assertEquals(InterpreterResult.Code.ERROR, ret.code());
}
|
@Override
public Map<String, ConfigChangeItem> doParse(String oldContent, String newContent, String type) {
Map<String, Object> oldMap = Collections.emptyMap();
Map<String, Object> newMap = Collections.emptyMap();
try {
Yaml yaml = new Yaml(new SafeConstructor(new LoaderOptions()));
if (StringUtils.isNotBlank(oldContent)) {
oldMap = yaml.load(oldContent);
oldMap = getFlattenedMap(oldMap);
}
if (StringUtils.isNotBlank(newContent)) {
newMap = yaml.load(newContent);
newMap = getFlattenedMap(newMap);
}
} catch (MarkedYAMLException e) {
handleYamlException(e);
}
return filterChangeData(oldMap, newMap);
}
|
@Test
void testComplexYaml() throws IOException {
/*
* map:
* key1: "string"
* key2:
* - item1
* - item2
* - item3
* key3: 123
*/
String s = "map:\n" + " key1: \"string\"\n" + " key2:\n" + " - item1\n" + " - item2\n" + " - item3\n"
+ " key3: 123 \n";
Map<String, ConfigChangeItem> map = parser.doParse(s, s, type);
assertEquals(0, map.size());
}
|
public void disableNotification() {
this.disableNotification = true;
}
|
@Test
void comparison() {
SendMessage sm1 = SendMessage
.builder()
.chatId(1L)
.text("Hello World")
.build();
SendMessage sm2 = SendMessage
.builder()
.chatId("1")
.text("Hello World")
.build();
SendMessage disabledNotification = SendMessage
.builder()
.chatId("1")
.text("Hello World")
.disableNotification(true)
.build();
assertEquals(sm1, sm2);
assertNotEquals(sm1, disabledNotification);
assertEquals(sm1.hashCode(), sm2.hashCode());
assertNotEquals(sm1.hashCode(), disabledNotification.hashCode());
}
|
@SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = "requiredColumns is ImmutableSet")
public Collection<? extends ColumnReferenceExp> get() {
return requiredColumns;
}
|
@Test
public void shouldRemove() {
// Given:
builder.addAll(ImmutableSet.of(COL0_REF, COL1_REF, COL2_REF));
// When:
builder.remove(COL1_REF);
// Then:
assertThat(builder.build().get(), is(ImmutableSet.of(COL0_REF, COL2_REF)));
}
|
Future<Boolean> canRoll(int podId) {
LOGGER.debugCr(reconciliation, "Determining whether broker {} can be rolled", podId);
return canRollBroker(descriptions, podId);
}
|
@Test
public void testNoLeader(VertxTestContext context) {
KSB ksb = new KSB()
.addNewTopic("A", false)
.addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1")
.addNewPartition(0)
.replicaOn(0, 1, 2)
//.leader(0)
.isr(1, 2)
.endPartition()
.endTopic()
.addNewTopic("B", false)
.addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1")
.addNewPartition(0)
.replicaOn(0, 1, 2)
//.leader(1)
.isr(0)
.endPartition()
.endTopic()
.addBroker(3);
KafkaAvailability kafkaSorted = new KafkaAvailability(new Reconciliation("dummy", "kind", "namespace", "A"), ksb.ac());
Checkpoint a = context.checkpoint(ksb.brokers.size());
for (Integer brokerId : ksb.brokers.keySet()) {
kafkaSorted.canRoll(brokerId).onComplete(context.succeeding(canRoll -> context.verify(() -> {
if (brokerId == 0) {
assertFalse(canRoll,
"broker " + brokerId + " should not be rollable, because B/0 would be below min isr");
} else {
assertTrue(canRoll,
"broker " + brokerId + " should be rollable, being minisr = 1 and having two brokers in its isr");
}
a.flag();
})));
}
}
|
public static void setField(
final Object object, final String fieldName, final Object fieldNewValue) {
try {
traverseClassHierarchy(
object.getClass(),
NoSuchFieldException.class,
(InsideTraversal<Void>)
traversalClass -> {
Field field = traversalClass.getDeclaredField(fieldName);
field.setAccessible(true);
field.set(object, fieldNewValue);
return null;
});
} catch (Exception e) {
throw new RuntimeException(e);
}
}
|
@Test
public void setFieldReflectively_setsPrivateFields() {
ExampleDescendant example = new ExampleDescendant();
example.overridden = 5;
ReflectionHelpers.setField(example, "overridden", 10);
assertThat(example.overridden).isEqualTo(10);
}
|
public static String getObjectName(final String upstreamUrl, final String serviceName) {
String[] ipAndPort = upstreamUrl.split(":");
return serviceName + "@tcp -h " + ipAndPort[0] + " -p " + ipAndPort[1];
}
|
@Test
public void testGetObjectName() {
final String result = PrxInfoUtil.getObjectName("127.0.0.1:8080", "serviceName");
assertEquals("serviceName@tcp -h 127.0.0.1 -p 8080", result);
}
|
@SuppressWarnings("unchecked")
@Override
public boolean canHandleReturnType(Class returnType) {
return rxSupportedTypes.stream()
.anyMatch(classType -> classType.isAssignableFrom(returnType));
}
|
@Test
public void testCheckTypes() {
assertThat(rxJava2RateLimiterAspectExt.canHandleReturnType(Flowable.class)).isTrue();
assertThat(rxJava2RateLimiterAspectExt.canHandleReturnType(Single.class)).isTrue();
}
|
public static String getBroadcastAddr(String ipAddr, int prefixLength) {
String subnet = ipAddr + "/" + prefixLength;
SubnetUtils utils = new SubnetUtils(subnet);
return utils.getInfo().getBroadcastAddress();
}
|
@Test
public void testGetBroadcastAddr() {
String ipAddr = "192.168.10.35";
int prefix1 = 24;
String broadcast1 = getBroadcastAddr(ipAddr, prefix1);
assertEquals(broadcast1, "192.168.10.255");
int prefix2 = 28;
String broadcast2 = getBroadcastAddr(ipAddr, prefix2);
assertEquals(broadcast2, "192.168.10.47");
int prefix3 = 32;
String broadcast3 = getBroadcastAddr(ipAddr, prefix3);
assertEquals(broadcast3, "192.168.10.35");
int prefix4 = 16;
String broadcast4 = getBroadcastAddr(ipAddr, prefix4);
assertEquals(broadcast4, "192.168.255.255");
}
|
public static void copyBytes(InputStream in, OutputStream out,
int buffSize, boolean close)
throws IOException {
try {
copyBytes(in, out, buffSize);
if(close) {
out.close();
out = null;
in.close();
in = null;
}
} finally {
if(close) {
closeStream(out);
closeStream(in);
}
}
}
|
@Test
public void testCopyBytesShouldCloseInputSteamWhenOutputStreamCloseThrowsRunTimeException()
throws Exception {
InputStream inputStream = Mockito.mock(InputStream.class);
OutputStream outputStream = Mockito.mock(OutputStream.class);
Mockito.doReturn(-1).when(inputStream).read(new byte[1]);
Mockito.doThrow(new RuntimeException()).when(outputStream).close();
try {
IOUtils.copyBytes(inputStream, outputStream, 1, true);
fail("Didn't throw exception");
} catch (RuntimeException e) {
}
Mockito.verify(outputStream, Mockito.atLeastOnce()).close();
}
|
public static void getSemanticPropsSingleFromString(
SingleInputSemanticProperties result,
String[] forwarded,
String[] nonForwarded,
String[] readSet,
TypeInformation<?> inType,
TypeInformation<?> outType) {
getSemanticPropsSingleFromString(
result, forwarded, nonForwarded, readSet, inType, outType, false);
}
|
@Test
void testForwardedNoArrowSpaces() {
String[] forwardedFields = {" f2 ; f3 ; f0 "};
SingleInputSemanticProperties sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, forwardedFields, null, null, fiveIntTupleType, fiveIntTupleType);
assertThat(sp.getForwardingTargetFields(0, 0)).contains(0);
assertThat(sp.getForwardingTargetFields(0, 2)).contains(2);
assertThat(sp.getForwardingTargetFields(0, 3)).contains(3);
}
|
static BytecodeExpression lessThan(BytecodeExpression left, BytecodeExpression right)
{
checkArgumentTypes(left, right);
OpCode comparisonInstruction;
OpCode noMatchJumpInstruction;
Class<?> type = left.getType().getPrimitiveType();
if (type == int.class) {
comparisonInstruction = null;
noMatchJumpInstruction = IF_ICMPGE;
}
else if (type == long.class) {
comparisonInstruction = LCMP;
noMatchJumpInstruction = IFGE;
}
else if (type == float.class) {
comparisonInstruction = FCMPG;
noMatchJumpInstruction = IFGE;
}
else if (type == double.class) {
comparisonInstruction = DCMPG;
noMatchJumpInstruction = IFGE;
}
else {
throw new IllegalArgumentException("Less than does not support " + type);
}
return new ComparisonBytecodeExpression("<", comparisonInstruction, noMatchJumpInstruction, left, right);
}
|
@Test
public void testLessThan()
throws Exception
{
assertBytecodeExpression(lessThan(constantInt(3), constantInt(7)), 3 < 7, "(3 < 7)");
assertBytecodeExpression(lessThan(constantInt(7), constantInt(3)), 7 < 3, "(7 < 3)");
assertBytecodeExpression(lessThan(constantInt(7), constantInt(7)), 7 < 7, "(7 < 7)");
assertBytecodeExpression(lessThan(constantLong(3L), constantLong(7L)), 3L < 7L, "(3L < 7L)");
assertBytecodeExpression(lessThan(constantLong(7L), constantLong(3L)), 7L < 3L, "(7L < 3L)");
assertBytecodeExpression(lessThan(constantLong(7L), constantLong(7L)), 7L < 7L, "(7L < 7L)");
assertBytecodeExpression(lessThan(constantFloat(3.3f), constantFloat(7.7f)), 3.3f < 7.7f, "(3.3f < 7.7f)");
assertBytecodeExpression(lessThan(constantFloat(7.7f), constantFloat(3.3f)), 7.7f < 3.3f, "(7.7f < 3.3f)");
assertBytecodeExpression(lessThan(constantFloat(7.7f), constantFloat(7.7f)), 7.7f < 7.7f, "(7.7f < 7.7f)");
assertBytecodeExpression(lessThan(constantFloat(Float.NaN), constantFloat(7.7f)), Float.NaN < 7.7f, "(NaNf < 7.7f)");
assertBytecodeExpression(lessThan(constantFloat(7.7f), constantFloat(Float.NaN)), 7.7f < Float.NaN, "(7.7f < NaNf)");
assertBytecodeExpression(lessThan(constantDouble(3.3), constantDouble(7.7)), 3.3 < 7.7, "(3.3 < 7.7)");
assertBytecodeExpression(lessThan(constantDouble(7.7), constantDouble(3.3)), 7.7 < 3.3, "(7.7 < 3.3)");
assertBytecodeExpression(lessThan(constantDouble(7.7), constantDouble(7.7)), 7.7 < 7.7, "(7.7 < 7.7)");
assertBytecodeExpression(lessThan(constantDouble(Double.NaN), constantDouble(7.7)), Double.NaN < 7.7, "(NaN < 7.7)");
assertBytecodeExpression(lessThan(constantDouble(7.7), constantDouble(Double.NaN)), 7.7 < Double.NaN, "(7.7 < NaN)");
}
|
@Override
public Object getObject(final int columnIndex) throws SQLException {
return mergeResultSet.getValue(columnIndex, Object.class);
}
|
@Test
void assertGetObjectWithInteger() throws SQLException {
int result = 0;
when(mergeResultSet.getValue(1, int.class)).thenReturn(result);
assertThat(shardingSphereResultSet.getObject(1, int.class), is(result));
when(mergeResultSet.getValue(1, Integer.class)).thenReturn(result);
assertThat(shardingSphereResultSet.getObject(1, Integer.class), is(result));
}
|
public FilesToken getToken() {
return token;
}
|
@Test
void sasTokenForCopyPastedURIShouldBePreserved() {
var plainToken
= "sv=2022-11-02&ss=f&srt=sco&sp=rwdlc&se=2023-05-28T22:50:04Z&st=2023-05-24T14:50:04Z&spr=https&sig=gj%2BUKSiCWSHmcubvGhyJhatkP8hkbXkrmV%2B%2BZme%2BCxI%3D";
// context while resolving endpoint calls SAS setters with decoded values
// by observation Camel decoded sig=gj UKSiCWSHmcubvGhyJhatkP8hkbXkrmV Zme CxI=
// using URISupport sig=gj+UKSiCWSHmcubvGhyJhatkP8hkbXkrmV++Zme+CxI=
// leads to "Signature size is invalid" response from server
// likely need to post-process replacing + by %2B
// Camel also sorted params before calling setters
var endpoint = context.getEndpoint(
"azure-files://account/share?" + plainToken, FilesEndpoint.class);
assertEquals(
plainToken,
endpoint.getToken().toURIQuery());
}
|
@Override
public KeyValueSegment getOrCreateSegmentIfLive(final long segmentId,
final ProcessorContext context,
final long streamTime) {
final KeyValueSegment segment = super.getOrCreateSegmentIfLive(segmentId, context, streamTime);
cleanupExpiredSegments(streamTime);
return segment;
}
|
@Test
public void futureEventsShouldNotCauseSegmentRoll() {
updateStreamTimeAndCreateSegment(0);
verifyCorrectSegments(0, 1);
updateStreamTimeAndCreateSegment(1);
verifyCorrectSegments(0, 2);
updateStreamTimeAndCreateSegment(2);
verifyCorrectSegments(0, 3);
updateStreamTimeAndCreateSegment(3);
verifyCorrectSegments(0, 4);
final long streamTime = updateStreamTimeAndCreateSegment(4);
verifyCorrectSegments(0, 5);
segments.getOrCreateSegmentIfLive(5, context, streamTime);
verifyCorrectSegments(0, 6);
segments.getOrCreateSegmentIfLive(6, context, streamTime);
verifyCorrectSegments(0, 7);
}
|
@Override
public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context,
Map<String, Long> recentlyUnloadedBundles,
Map<String, Long> recentlyUnloadedBrokers) {
final var conf = context.brokerConfiguration();
decisionCache.clear();
stats.clear();
Map<String, BrokerLookupData> availableBrokers;
try {
availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync()
.get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS);
} catch (ExecutionException | InterruptedException | TimeoutException e) {
counter.update(Failure, Unknown);
log.warn("Failed to fetch available brokers. Stop unloading.", e);
return decisionCache;
}
try {
final var loadStore = context.brokerLoadDataStore();
stats.setLoadDataStore(loadStore);
boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log);
var skipReason = stats.update(
context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf);
if (skipReason.isPresent()) {
if (debugMode) {
log.warn(CANNOT_CONTINUE_UNLOAD_MSG
+ " Skipped the load stat update. Reason:{}.",
skipReason.get());
}
counter.update(Skip, skipReason.get());
return decisionCache;
}
counter.updateLoadData(stats.avg, stats.std);
if (debugMode) {
log.info("brokers' load stats:{}", stats);
}
// skip metrics
int numOfBrokersWithEmptyLoadData = 0;
int numOfBrokersWithFewBundles = 0;
final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd();
boolean transfer = conf.isLoadBalancerTransferEnabled();
if (stats.std() > targetStd
|| isUnderLoaded(context, stats.peekMinBroker(), stats)
|| isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) {
unloadConditionHitCount++;
} else {
unloadConditionHitCount = 0;
}
if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ " Shedding condition hit count:{} is less than or equal to the threshold:{}.",
unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold());
}
counter.update(Skip, HitCount);
return decisionCache;
}
while (true) {
if (!stats.hasTransferableBrokers()) {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ " Exhausted target transfer brokers.");
}
break;
}
UnloadDecision.Reason reason;
if (stats.std() > targetStd) {
reason = Overloaded;
} else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) {
reason = Underloaded;
if (debugMode) {
log.info(String.format("broker:%s is underloaded:%s although "
+ "load std:%.2f <= targetStd:%.2f. "
+ "Continuing unload for this underloaded broker.",
stats.peekMinBroker(),
context.brokerLoadDataStore().get(stats.peekMinBroker()).get(),
stats.std(), targetStd));
}
} else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) {
reason = Overloaded;
if (debugMode) {
log.info(String.format("broker:%s is overloaded:%s although "
+ "load std:%.2f <= targetStd:%.2f. "
+ "Continuing unload for this overloaded broker.",
stats.peekMaxBroker(),
context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(),
stats.std(), targetStd));
}
} else {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ "The overall cluster load meets the target, std:{} <= targetStd:{}."
+ "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.",
stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker());
}
break;
}
String maxBroker = stats.pollMaxBroker();
String minBroker = stats.peekMinBroker();
Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker);
Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker);
if (maxBrokerLoadData.isEmpty()) {
log.error(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " MaxBrokerLoadData is empty.", maxBroker));
numOfBrokersWithEmptyLoadData++;
continue;
}
if (minBrokerLoadData.isEmpty()) {
log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker);
numOfBrokersWithEmptyLoadData++;
continue;
}
double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA();
double minLoad = minBrokerLoadData.get().getWeightedMaxEMA();
double offload = (maxLoad - minLoad) / 2;
BrokerLoadData brokerLoadData = maxBrokerLoadData.get();
double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn()
+ brokerLoadData.getMsgThroughputOut();
double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn()
+ minBrokerLoadData.get().getMsgThroughputOut();
double offloadThroughput = maxBrokerThroughput * offload / maxLoad;
if (debugMode) {
log.info(String.format(
"Attempting to shed load from broker:%s%s, which has the max resource "
+ "usage:%.2f%%, targetStd:%.2f,"
+ " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.",
maxBroker, transfer ? " to broker:" + minBroker : "",
maxLoad * 100,
targetStd,
offload * 100,
offloadThroughput / KB
));
}
double trafficMarkedToOffload = 0;
double trafficMarkedToGain = 0;
Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker);
if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) {
log.error(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " TopBundlesLoadData is empty.", maxBroker));
numOfBrokersWithEmptyLoadData++;
continue;
}
var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData();
if (maxBrokerTopBundlesLoadData.size() == 1) {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " Sole namespace bundle:%s is overloading the broker. ",
maxBroker, maxBrokerTopBundlesLoadData.iterator().next()));
continue;
}
Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker);
var minBrokerTopBundlesLoadDataIter =
minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() :
null;
if (maxBrokerTopBundlesLoadData.isEmpty()) {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " Broker overloaded despite having no bundles", maxBroker));
continue;
}
int remainingTopBundles = maxBrokerTopBundlesLoadData.size();
for (var e : maxBrokerTopBundlesLoadData) {
String bundle = e.bundleName();
if (channel != null && !channel.isOwner(bundle, maxBroker)) {
if (debugMode) {
log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " MaxBroker:%s is not the owner.", bundle, maxBroker));
}
continue;
}
if (recentlyUnloadedBundles.containsKey(bundle)) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " Bundle has been recently unloaded at ts:%d.",
bundle, recentlyUnloadedBundles.get(bundle)));
}
continue;
}
if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " This unload can't meet "
+ "affinity(isolation) or anti-affinity group policies.", bundle));
}
continue;
}
if (remainingTopBundles <= 1) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is"
+ " less than or equal to 1.",
bundle, maxBroker));
}
break;
}
var bundleData = e.stats();
double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut;
boolean swap = false;
List<Unload> minToMaxUnloads = new ArrayList<>();
double minBrokerBundleSwapThroughput = 0.0;
if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) {
// see if we can swap bundles from min to max broker to balance better.
if (transfer && minBrokerTopBundlesLoadDataIter != null) {
var maxBrokerNewThroughput =
maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain
- maxBrokerBundleThroughput;
var minBrokerNewThroughput =
minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain
+ maxBrokerBundleThroughput;
while (minBrokerTopBundlesLoadDataIter.hasNext()) {
var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next();
if (!isTransferable(context, availableBrokers,
minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) {
continue;
}
var minBrokerBundleThroughput =
minBrokerBundleData.stats().msgThroughputIn
+ minBrokerBundleData.stats().msgThroughputOut;
var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput;
var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput;
if (maxBrokerNewThroughputTmp < maxBrokerThroughput
&& minBrokerNewThroughputTmp < maxBrokerThroughput) {
minToMaxUnloads.add(new Unload(minBroker,
minBrokerBundleData.bundleName(), Optional.of(maxBroker)));
maxBrokerNewThroughput = maxBrokerNewThroughputTmp;
minBrokerNewThroughput = minBrokerNewThroughputTmp;
minBrokerBundleSwapThroughput += minBrokerBundleThroughput;
if (minBrokerNewThroughput <= maxBrokerNewThroughput
&& maxBrokerNewThroughput < maxBrokerThroughput * 0.75) {
swap = true;
break;
}
}
}
}
if (!swap) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is "
+ "greater than the target :%.2f KByte/s.",
bundle,
(trafficMarkedToOffload + maxBrokerBundleThroughput) / KB,
trafficMarkedToGain / KB,
(trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB,
offloadThroughput / KB));
}
break;
}
}
Unload unload;
if (transfer) {
if (swap) {
minToMaxUnloads.forEach(minToMaxUnload -> {
if (debugMode) {
log.info("Decided to gain bundle:{} from min broker:{}",
minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker());
}
var decision = new UnloadDecision();
decision.setUnload(minToMaxUnload);
decision.succeed(reason);
decisionCache.add(decision);
});
if (debugMode) {
log.info(String.format(
"Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.",
minBrokerBundleSwapThroughput / KB, minBroker, maxBroker));
trafficMarkedToGain += minBrokerBundleSwapThroughput;
}
}
unload = new Unload(maxBroker, bundle, Optional.of(minBroker));
} else {
unload = new Unload(maxBroker, bundle);
}
var decision = new UnloadDecision();
decision.setUnload(unload);
decision.succeed(reason);
decisionCache.add(decision);
trafficMarkedToOffload += maxBrokerBundleThroughput;
remainingTopBundles--;
if (debugMode) {
log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s."
+ " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s."
+ " Target:%.2f KByte/s.",
bundle, maxBrokerBundleThroughput / KB,
trafficMarkedToOffload / KB,
trafficMarkedToGain / KB,
(trafficMarkedToOffload - trafficMarkedToGain) / KB,
offloadThroughput / KB));
}
}
if (trafficMarkedToOffload > 0) {
var adjustedOffload =
(trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput;
stats.offload(maxLoad, minLoad, adjustedOffload);
if (debugMode) {
log.info(
String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}",
stats, maxLoad, minLoad, adjustedOffload));
}
} else {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " There is no bundle that can be unloaded in top bundles load data. "
+ "Consider splitting bundles owned by the broker "
+ "to make each bundle serve less traffic "
+ "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport"
+ " to report more bundles in the top bundles load data.", maxBroker));
}
} // while end
if (debugMode) {
log.info("decisionCache:{}", decisionCache);
}
if (decisionCache.isEmpty()) {
UnloadDecision.Reason reason;
if (numOfBrokersWithEmptyLoadData > 0) {
reason = NoLoadData;
} else if (numOfBrokersWithFewBundles > 0) {
reason = NoBundles;
} else {
reason = HitCount;
}
counter.update(Skip, reason);
} else {
unloadConditionHitCount = 0;
}
} catch (Throwable e) {
log.error("Failed to process unloading. ", e);
this.counter.update(Failure, Unknown);
}
return decisionCache;
}
|
@Test
public void testLoadMoreThan100() throws IllegalAccessException {
UnloadCounter counter = new UnloadCounter();
TransferShedder transferShedder = new TransferShedder(counter);
var ctx = setupContext();
var brokerLoadDataStore = ctx.brokerLoadDataStore();
brokerLoadDataStore.pushAsync("broker4:8080", getCpuLoad(ctx, 200, "broker4:8080"));
brokerLoadDataStore.pushAsync("broker5:8080", getCpuLoad(ctx, 1000, "broker5:8080"));
var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of());
var expected = new HashSet<UnloadDecision>();
expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")),
Success, Overloaded));
expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker2:8080")),
Success, Overloaded));
assertEquals(res, expected);
assertEquals(counter.getLoadAvg(), 2.4240000000000004);
assertEquals(counter.getLoadStd(), 3.8633332758124816);
var stats = (TransferShedder.LoadStats)
FieldUtils.readDeclaredField(transferShedder, "stats", true);
assertEquals(stats.avg(), 2.4240000000000004);
assertEquals(stats.std(), 2.781643776903451);
}
|
@Override
public void lock() {
try {
lock(-1, null, false);
} catch (InterruptedException e) {
throw new IllegalStateException();
}
}
|
@Test
public void testRemainTimeToLive() {
RLock lock = redisson.getLock("test-lock:1");
lock.lock(1, TimeUnit.HOURS);
assertThat(lock.remainTimeToLive()).isBetween(TimeUnit.HOURS.toMillis(1) - 10, TimeUnit.HOURS.toMillis(1));
lock.unlock();
assertThat(lock.remainTimeToLive()).isEqualTo(-2);
lock.lock();
assertThat(lock.remainTimeToLive()).isBetween(redisson.getConfig().getLockWatchdogTimeout() - 10, redisson.getConfig().getLockWatchdogTimeout());
}
|
@GetSize
public double getSize(
@Element SubscriptionPartition subscriptionPartition,
@Restriction OffsetByteRange restriction) {
if (restriction.getRange().getTo() != Long.MAX_VALUE) {
return restriction.getByteCount();
}
return newTracker(subscriptionPartition, restriction).getProgress().getWorkRemaining();
}
|
@Test
public void getProgressUnboundedRangeDelegates() {
Progress progress = Progress.from(0, 0.2);
when(tracker.getProgress()).thenReturn(progress);
assertTrue(
DoubleMath.fuzzyEquals(
progress.getWorkRemaining(), sdf.getSize(PARTITION, RESTRICTION), .0001));
verify(tracker).getProgress();
}
|
public static Builder builder() {
return new Builder();
}
|
@Test
public void testRoundTripSerDe() throws JsonProcessingException {
String fullJson =
"{\"identifiers\":[{\"namespace\":[\"accounting\",\"tax\"],\"name\":\"paid\"}],\"next-page-token\":null}";
assertRoundTripSerializesEquallyFrom(
fullJson, ListTablesResponse.builder().addAll(IDENTIFIERS).build());
String emptyIdentifiers = "{\"identifiers\":[],\"next-page-token\":null}";
assertRoundTripSerializesEquallyFrom(emptyIdentifiers, ListTablesResponse.builder().build());
}
|
public HollowOrdinalIterator findKeysWithPrefix(String prefix) {
TST current;
HollowOrdinalIterator it;
do {
current = prefixIndexVolatile;
it = current.findKeysWithPrefix(prefix);
} while (current != this.prefixIndexVolatile);
return it;
}
|
@Test
public void testFindKeysWithPrefix() throws Exception {
for (Movie movie : getSimpleList()) {
objectMapper.add(movie);
}
StateEngineRoundTripper.roundTripSnapshot(writeStateEngine, readStateEngine);
HollowTokenizedPrefixIndex tokenizedPrefixIndex = new HollowTokenizedPrefixIndex(readStateEngine, "SimpleMovie", "name.value", false);
Set<Integer> ordinals = toSet(tokenizedPrefixIndex.findKeysWithPrefix("th"));
Assert.assertTrue(ordinals.size() == 3);
ordinals = toSet(tokenizedPrefixIndex.findKeysWithPrefix("matrix"));
Assert.assertTrue(ordinals.size() == 3);
ordinals = toSet(tokenizedPrefixIndex.findKeysWithPrefix("re"));
Assert.assertTrue(ordinals.size() == 2);
ordinals = toSet(tokenizedPrefixIndex.findKeysWithPrefix("the "));// note the whitespace in findKeysWithPrefix string.
// expected result ordinals size is 0, since entire movie is not indexed. movie name is split by whitespace.
Assert.assertTrue(ordinals.size() == 0);
ordinals = toSet(tokenizedPrefixIndex.findKeysWithPrefix(""));
Assert.assertTrue(ordinals.size() == 6);
}
|
public static final StartTime relative(Duration relativeStart) {
return new StartTime(StartTimeOption.RELATIVE, relativeStart, null);
}
|
@Test
public void testStopRelative() {
StopTime st = StopTime.relative(Duration.ofMinutes(20));
assertEquals(StopTimeOption.RELATIVE, st.option());
assertEquals(20 * 60, st.relativeTime().getSeconds());
assertNull(st.absoluteTime());
}
|
public void contains(@Nullable CharSequence string) {
checkNotNull(string);
if (actual == null) {
failWithActual("expected a string that contains", string);
} else if (!actual.contains(string)) {
failWithActual("expected to contain", string);
}
}
|
@Test
public void stringContains() {
assertThat("abc").contains("c");
}
|
public static IOException maybeExtractIOException(
String path,
Throwable thrown,
String message) {
if (thrown == null) {
return null;
}
// walk down the chain of exceptions to find the innermost.
Throwable cause = getInnermostThrowable(thrown.getCause(), thrown);
// see if this is an http channel exception
HttpChannelEOFException channelException =
maybeExtractChannelException(path, message, cause);
if (channelException != null) {
return channelException;
}
// not a channel exception, not an IOE.
if (!(cause instanceof IOException)) {
return null;
}
// the cause can be extracted to an IOE.
// rather than just return it, we try to preserve the stack trace
// of the outer exception.
// as a new instance is created through reflection, the
// class of the returned instance will be that of the innermost,
// unless no suitable constructor is available.
final IOException ioe = (IOException) cause;
return wrapWithInnerIOE(path, message, thrown, ioe);
}
|
@Test
public void testUnknownHostExceptionExtraction() throws Throwable {
final SdkClientException thrown = sdkException("top",
sdkException("middle",
new UnknownHostException("bottom")));
final IOException ioe = intercept(UnknownHostException.class, "top",
() -> {
throw maybeExtractIOException("", thrown, "");
});
// the wrapped exception is the top level one: no stack traces have
// been lost
if (ioe.getCause() != thrown) {
throw new AssertionError("Cause of " + ioe + " is not " + thrown, thrown);
}
}
|
public CheckpointManager getCheckpointManager() {
return checkpointManager;
}
|
@Test
public void testCommitFailedWillRestore() throws Exception {
long jobId = instance.getFlakeIdGenerator(Constant.SEATUNNEL_ID_GENERATOR_NAME).newId();
JobMaster jobMaster = newJobInstanceWithRunningState(jobId);
// call checkpoint timeout
jobMaster
.getCheckpointManager()
.getCheckpointCoordinator(1)
.handleCoordinatorError(
"commit failed",
new RuntimeException(),
CheckpointCloseReason.AGGREGATE_COMMIT_ERROR);
Assertions.assertTrue(jobMaster.isNeedRestore());
}
|
public void resolveDcMetadata(SamlRequest samlRequest) throws DienstencatalogusException {
final DcMetadataResponse metadataFromDc = dienstencatalogusClient.retrieveMetadataFromDc(samlRequest);
if (samlRequest instanceof AuthenticationRequest) {
dcMetadataResponseMapper.dcMetadataToAuthenticationRequest((AuthenticationRequest) samlRequest, metadataFromDc, samlRequest.getServiceEntityId());
} else {
dcMetadataResponseMapper.dcMetadataToSamlRequest(samlRequest, metadataFromDc);
}
}
|
@Test
public void resolveDcMinimumAuthLevelNotFoundTest() throws DienstencatalogusException {
DcMetadataResponse dcMetadataResponse = dcClientStubGetMetadata(stubsCaMetadataFile, null, 1L);
dcMetadataResponse.setMinimumReliabilityLevel(null);
when(dienstencatalogusClientMock.retrieveMetadataFromDc(any(SamlRequest.class))).thenReturn(dcMetadataResponse);
SamlRequest request = new AuthenticationRequest();
request.setConnectionEntityId(CONNECTION_ENTITY_ID);
request.setServiceEntityId(SERVICE_ENTITY_ID);
try {
dcMetadataService.resolveDcMetadata(request);
} catch (DienstencatalogusException e) {
assertEquals("Metadata from dc minimum reliability level not set", e.getMessage());
}
}
|
@Override
public void begin(Channels channels) {
if (running) {
return;
}
lock.lock();
try {
this.allocateBuffer2Thread();
for (ConsumerThread consumerThread : consumerThreads) {
consumerThread.start();
}
running = true;
} finally {
lock.unlock();
}
}
|
@Test
public void testBeginConsumeDriver() {
Channels<SampleData> channels = new Channels<SampleData>(2, 100, new SimpleRollingPartitioner<SampleData>(), BufferStrategy.BLOCKING);
ConsumeDriver<SampleData> pool = new ConsumeDriver<SampleData>("default", channels, new SampleConsumer(), 2, 20);
pool.begin(channels);
ConsumerThread[] threads = Whitebox.getInternalState(pool, "consumerThreads");
Assertions.assertEquals(2, threads.length);
Assertions.assertTrue(threads[0].isAlive());
Assertions.assertTrue(threads[1].isAlive());
}
|
@SuppressWarnings("deprecation")
@Override
public Integer call() throws Exception {
super.call();
try (var files = Files.walk(directory)) {
List<String> flows = files
.filter(Files::isRegularFile)
.filter(YamlFlowParser::isValidExtension)
.map(path -> {
try {
return IncludeHelperExpander.expand(Files.readString(path, Charset.defaultCharset()), path.getParent());
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.toList();
String body = "";
if (flows.isEmpty()) {
stdOut("No flow found on '{}'", directory.toFile().getAbsolutePath());
} else {
body = String.join("\n---\n", flows);
}
try(DefaultHttpClient client = client()) {
MutableHttpRequest<String> request = HttpRequest
.POST(apiUri("/flows/") + namespace + "?delete=" + delete, body).contentType(MediaType.APPLICATION_YAML);
List<UpdateResult> updated = client.toBlocking().retrieve(
this.requestOptions(request),
Argument.listOf(UpdateResult.class)
);
stdOut(updated.size() + " flow(s) for namespace '" + namespace + "' successfully updated !");
updated.forEach(flow -> stdOut("- " + flow.getNamespace() + "." + flow.getId()));
} catch (HttpClientResponseException e){
FlowValidateCommand.handleHttpException(e, "flow");
return 1;
}
} catch (ConstraintViolationException e) {
FlowValidateCommand.handleException(e, "flow");
return 1;
}
return 0;
}
|
@Test
void runWithDelete() {
URL directory = FlowNamespaceUpdateCommandTest.class.getClassLoader().getResource("flows");
ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
URL subDirectory = FlowNamespaceUpdateCommandTest.class.getClassLoader().getResource("flows/flowsSubFolder");
try (ApplicationContext ctx = ApplicationContext.run(Environment.CLI, Environment.TEST)) {
EmbeddedServer embeddedServer = ctx.getBean(EmbeddedServer.class);
embeddedServer.start();
String[] args = {
"--server",
embeddedServer.getURL().toString(),
"--user",
"myuser:pass:word",
"--delete",
"io.kestra.cli",
directory.getPath(),
};
PicocliRunner.call(FlowNamespaceUpdateCommand.class, ctx, args);
assertThat(out.toString(), containsString("3 flow(s)"));
out.reset();
args = new String[]{
"--server",
embeddedServer.getURL().toString(),
"--user",
"myuser:pass:word",
"--delete",
"io.kestra.cli",
subDirectory.getPath(),
};
PicocliRunner.call(FlowNamespaceUpdateCommand.class, ctx, args);
// 2 delete + 1 update
assertThat(out.toString(), containsString("3 flow(s)"));
}
}
|
@Override
public void run() {
if (processor != null) {
processor.execute();
} else {
if (!beforeHook()) {
logger.info("before-feature hook returned [false], aborting: {}", this);
} else {
scenarios.forEachRemaining(this::processScenario);
}
afterFeature();
}
}
|
@Test
void testTags() {
run("tags.feature");
match(fr.result.getVariables(), "{ configSource: 'normal', functionFromKarateBase: '#notnull', tagNames: ['two=foo,bar', 'one'], tagValues: { one: [], two: ['foo', 'bar'] } }");
}
|
@Override
public BeamSqlTable buildBeamSqlTable(Table table) {
return new BigQueryTable(table, getConversionOptions(table.getProperties()));
}
|
@Test
public void testSelectDirectReadMethod() {
Table table =
fakeTableWithProperties(
"hello", "{" + METHOD_PROPERTY + ": " + "\"" + Method.DIRECT_READ.toString() + "\" }");
BigQueryTable sqlTable = (BigQueryTable) provider.buildBeamSqlTable(table);
assertEquals(Method.DIRECT_READ, sqlTable.method);
}
|
public long getLang_id() {
return lang_id;
}
|
@Test
public void testGetLang_id() {
assertEquals(TestParameters.VP_LANGUAGE_ID, chmItspHeader.getLang_id());
}
|
@Override
@MethodNotAvailable
public CompletionStage<Boolean> putIfAbsentAsync(K key, V value) {
throw new MethodNotAvailableException();
}
|
@Test(expected = MethodNotAvailableException.class)
public void testPutIfAbsentAsync() {
adapter.putIfAbsentAsync(23, "newValue");
}
|
@UdafFactory(description = "Compute average of column with type Integer.",
aggregateSchema = "STRUCT<SUM integer, COUNT bigint>")
public static TableUdaf<Integer, Struct, Double> averageInt() {
return getAverageImplementation(
0,
STRUCT_INT,
(sum, newValue) -> sum.getInt32(SUM) + newValue,
(sum, count) -> sum.getInt32(SUM) / count,
(sum1, sum2) -> sum1.getInt32(SUM) + sum2.getInt32(SUM),
(sum, valueToUndo) -> sum.getInt32(SUM) - valueToUndo);
}
|
@Test
public void shouldAverageZeroes() {
final TableUdaf<Integer, Struct, Double> udaf = AverageUdaf.averageInt();
Struct agg = udaf.initialize();
final int[] values = new int[] {0, 0, 0};
for (final int thisValue : values) {
agg = udaf.aggregate(thisValue, agg);
}
final double avg = udaf.map(agg);
assertThat(0.0, equalTo(avg));
}
|
static <T, R> Function<T, R> decorateFunction(Observation observation, Function<T, R> function) {
return (T t) -> observation.observe(() -> function.apply(t));
}
|
@Test
public void shouldDecorateFunctionAndReturnWithSuccess() throws Throwable {
given(helloWorldService.returnHelloWorldWithName("Tom")).willReturn("Hello world Tom");
Function<String, String> function = Observations
.decorateFunction(observation, helloWorldService::returnHelloWorldWithName);
String result = function.apply("Tom");
assertThat(result).isEqualTo("Hello world Tom");
assertThatObservationWasStartedAndFinishedWithoutErrors();
then(helloWorldService).should().returnHelloWorldWithName("Tom");
}
|
public boolean dropTable(String location) {
return dropTable(location, true);
}
|
@Test
public void testDropTable() {
TABLES.create(SCHEMA, tableDir.toURI().toString());
TABLES.dropTable(tableDir.toURI().toString());
assertThatThrownBy(() -> TABLES.load(tableDir.toURI().toString()))
.isInstanceOf(NoSuchTableException.class)
.hasMessageStartingWith("Table does not exist");
}
|
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
final Map<Path, List<Long>> regular = new HashMap<>();
final Map<Path, List<Long>> trashed = new HashMap<>();
for(Path file : files.keySet()) {
final Map<Path, List<Long>> set = file.attributes().isDuplicate() ? trashed : regular;
if(set.containsKey(file.getParent())) {
set.get(file.getParent()).add(Long.parseLong(nodeid.getVersionId(file)));
}
else {
final List<Long> nodes = new ArrayList<>();
nodes.add(Long.parseLong(nodeid.getVersionId(file)));
set.put(file.getParent(), nodes);
}
callback.delete(file);
nodeid.cache(file, null);
}
for(List<Long> nodes : regular.values()) {
try {
new NodesApi(session.getClient()).removeNodes(new DeleteNodesRequest().nodeIds(nodes), StringUtils.EMPTY);
}
catch(ApiException e) {
switch(e.getCode()) {
case 400:
log.warn(String.format("Ignore failure %s", e));
new SDSDeleteFeature(session, nodeid).delete(files, prompt, callback);
break;
default:
throw new SDSExceptionMappingService(nodeid).map("Cannot delete {0}", e, files.keySet().iterator().next());
}
}
}
for(List<Long> nodes : trashed.values()) {
try {
new NodesApi(session.getClient()).removeDeletedNodes(new DeleteDeletedNodesRequest().deletedNodeIds(nodes), StringUtils.EMPTY);
}
catch(ApiException e) {
throw new SDSExceptionMappingService(nodeid).map("Cannot delete {0}", e, files.keySet().iterator().next());
}
}
}
|
@Test
public void testDeleteRecursively() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path folder = new SDSDirectoryFeature(session, nodeid).mkdir(
new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.directory)), new TransferStatus());
final Path file = new SDSTouchFeature(session, nodeid).touch(
new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
assertTrue(new SDSFindFeature(session, nodeid).find(file));
assertNotNull(nodeid.getVersionId(file));
new SDSBatchDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
file.attributes().setVersionId(null);
folder.attributes().setVersionId(null);
try {
nodeid.getVersionId(file);
fail();
}
catch(NotfoundException e) {
//
}
try {
nodeid.getVersionId(folder);
fail();
}
catch(NotfoundException e) {
//
}
}
|
@Restricted(NoExternalUse.class)
public boolean isDescendant(String childRelativePath) throws IOException {
return false;
}
|
@Test
public void testIsDescendant_AbstractBase() throws Exception {
VirtualFile root = new VirtualFileMinimalImplementation();
assertFalse(root.isDescendant("anything"));
}
|
public static Builder from(KubevirtNode node) {
return new Builder()
.hostname(node.hostname())
.clusterName(node.clusterName())
.type(node.type())
.intgBridge(node.intgBridge())
.tunBridge(node.tunBridge())
.managementIp(node.managementIp())
.dataIp(node.dataIp())
.state(node.state())
.phyIntfs(node.phyIntfs())
.gatewayBridgeName(node.gatewayBridgeName());
}
|
@Test
public void testFrom() {
KubevirtNode updatedNode = DefaultKubevirtNode.from(refNode).build();
assertEquals(updatedNode, refNode);
}
|
public void incBrokerPutNums() {
this.statsTable.get(Stats.BROKER_PUT_NUMS).getAndCreateStatsItem(this.clusterName).getValue().add(1);
}
|
@Test
public void testIncBrokerPutNums() {
brokerStatsManager.incBrokerPutNums();
assertThat(brokerStatsManager.getStatsItem(BROKER_PUT_NUMS, CLUSTER_NAME).getValue().doubleValue()).isEqualTo(1L);
}
|
@VisibleForTesting
CompletableFuture<Acknowledge> getBootstrapCompletionFuture() {
return bootstrapCompletionFuture;
}
|
@Test
void testClusterShutdownWhenApplicationSucceeds() throws Exception {
// we're "listening" on this to be completed to verify that the cluster
// is being shut down from the ApplicationDispatcherBootstrap
final CompletableFuture<ApplicationStatus> externalShutdownFuture =
new CompletableFuture<>();
final TestingDispatcherGateway.Builder dispatcherBuilder =
finishedJobGatewayBuilder()
.setClusterShutdownFunction(
status -> {
externalShutdownFuture.complete(status);
return CompletableFuture.completedFuture(Acknowledge.get());
});
ApplicationDispatcherBootstrap bootstrap =
createApplicationDispatcherBootstrap(
3, dispatcherBuilder.build(), scheduledExecutor);
final CompletableFuture<Acknowledge> completionFuture =
bootstrap.getBootstrapCompletionFuture();
// wait until the bootstrap "thinks" it's done
completionFuture.get(TIMEOUT_SECONDS, TimeUnit.SECONDS);
// verify that the dispatcher is actually being shut down
assertThat(externalShutdownFuture.get(TIMEOUT_SECONDS, TimeUnit.SECONDS))
.isEqualTo(ApplicationStatus.SUCCEEDED);
}
|
String currentProject() {
String urlString = String.format("%s/computeMetadata/v1/project/project-id", endpoint);
return callGet(urlString);
}
|
@Test
public void currentProject() {
// given
stubFor(get(urlEqualTo("/computeMetadata/v1/project/project-id"))
.withHeader("Metadata-Flavor", equalTo("Google"))
.willReturn(aResponse().withStatus(HttpURLConnection.HTTP_OK).withBody(PROJECT)));
// when
String result = gcpMetadataApi.currentProject();
// then
assertEquals(PROJECT, result);
}
|
public static RpcQosOptions defaultOptions() {
return newBuilder().build();
}
|
@Test
public void defaultOptionsBuildSuccessfully() {
assertNotNull(RpcQosOptions.defaultOptions());
}
|
@Override
public Path touch(final Path file, final TransferStatus status) throws BackgroundException {
return super.touch(file, status.withChecksum(write.checksum(file, status).compute(new NullInputStream(0L), status)));
}
|
@Test
public void testTouchVirtualHost() throws Exception {
final S3AccessControlListFeature acl = new S3AccessControlListFeature(session);
final S3TouchFeature feature = new S3TouchFeature(virtualhost, acl);
final String filename = new AsciiRandomStringService().random();
assertTrue(feature.isSupported(new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume)), filename));
final Path test = feature.touch(new Path(filename, EnumSet.of(Path.Type.file)), new TransferStatus());
assertNull(test.attributes().getVersionId());
assertTrue(new S3FindFeature(virtualhost, acl).find(test));
assertEquals(test.attributes(), new S3AttributesFinderFeature(virtualhost, acl).find(test));
assertEquals(test.attributes(), new DefaultAttributesFinderFeature(virtualhost).find(test));
new S3DefaultDeleteFeature(virtualhost).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(new S3FindFeature(virtualhost, acl).find(test));
}
|
@Override
public void finished(boolean allStepsExecuted) {
if (postProjectAnalysisTasks.length == 0) {
return;
}
ProjectAnalysisImpl projectAnalysis = createProjectAnalysis(allStepsExecuted ? SUCCESS : FAILED);
for (PostProjectAnalysisTask postProjectAnalysisTask : postProjectAnalysisTasks) {
executeTask(projectAnalysis, postProjectAnalysisTask);
}
}
|
@Test
@UseDataProvider("booleanValues")
public void logStatistics_add_fails_when_NPE_if_key_or_value_is_null(boolean allStepsExecuted) {
underTest.finished(allStepsExecuted);
verify(postProjectAnalysisTask).finished(taskContextCaptor.capture());
PostProjectAnalysisTask.LogStatistics logStatistics = taskContextCaptor.getValue().getLogStatistics();
assertThat(catchThrowable(() -> logStatistics.add(null, "foo")))
.isInstanceOf(NullPointerException.class)
.hasMessage("Statistic has null key");
assertThat(catchThrowable(() -> logStatistics.add(null, null)))
.isInstanceOf(NullPointerException.class)
.hasMessage("Statistic has null key");
assertThat(catchThrowable(() -> logStatistics.add("bar", null)))
.isInstanceOf(NullPointerException.class)
.hasMessage("Statistic with key [bar] has null value");
}
|
@GET
@Produces(MediaType.APPLICATION_JSON)
public Response nodes() {
log.trace(String.format(MESSAGE_NODE, QUERY));
Set<OpenstackNode> osNodes = osNodeService.nodes();
for (OpenstackNode osNode : osNodes) {
osJsonNodes.add(codec(OpenstackNode.class).encode(osNode, this));
}
return ok(root).build();
}
|
@Test
public void testGetNodesPopulatedArray() {
expect(mockOpenstackNodeService.nodes()).
andReturn(ImmutableSet.of(openstackNode)).anyTimes();
replay(mockOpenstackNodeService);
final WebTarget wt = target();
final String response = wt.path(PATH).request().get(String.class);
final JsonObject result = Json.parse(response).asObject();
assertThat(result, notNullValue());
assertThat(result.names(), hasSize(1));
assertThat(result.names().get(0), is("nodes"));
final JsonArray jsonNodes = result.get("nodes").asArray();
assertThat(jsonNodes, notNullValue());
assertThat(jsonNodes.size(), is(1));
assertThat(jsonNodes, hasNode(openstackNode));
verify(mockOpenstackNodeService);
}
|
public static Object coerceParameter(Type requiredType, Object valueToCoerce) {
return (requiredType != null && valueToCoerce != null) ? actualCoerceParameter(requiredType, valueToCoerce) :
valueToCoerce;
}
|
@Test
void coerceParameterDateToDateTimeConverted() {
Object value = LocalDate.now();
Object retrieved = CoerceUtil.coerceParameter(BuiltInType.DATE_TIME, value);
assertNotNull(retrieved);
assertTrue(retrieved instanceof ZonedDateTime);
ZonedDateTime zdtRetrieved = (ZonedDateTime) retrieved;
assertEquals(value, zdtRetrieved.toLocalDate());
assertEquals(ZoneOffset.UTC, zdtRetrieved.getOffset());
assertEquals(0, zdtRetrieved.getHour());
assertEquals(0, zdtRetrieved.getMinute());
assertEquals(0, zdtRetrieved.getSecond());
}
|
@Override
protected ResultSubpartitionView createSubpartitionView(
int subpartitionId, BufferAvailabilityListener availabilityListener)
throws IOException {
checkState(!isReleased(), "ResultPartition already released.");
// If data file is not readable, throw PartitionNotFoundException to mark this result
// partition failed. Otherwise, the partition data is not regenerated, so failover can not
// recover the job.
if (!Files.isReadable(dataFilePath)) {
throw new PartitionNotFoundException(getPartitionId());
}
// if broadcastOptimize is enabled, map every subpartitionId to the special broadcast
// subpartition.
subpartitionId = isBroadcastOnly ? BROADCAST_SUBPARTITION : subpartitionId;
HsSubpartitionConsumer subpartitionConsumer =
new HsSubpartitionConsumer(availabilityListener);
HsConsumerId lastConsumerId = lastConsumerIds[subpartitionId];
checkMultipleConsumerIsAllowed(lastConsumerId, hybridShuffleConfiguration);
// assign a unique id for each consumer, now it is guaranteed by the value that is one
// higher than the last consumerId's id field.
HsConsumerId consumerId = HsConsumerId.newId(lastConsumerId);
lastConsumerIds[subpartitionId] = consumerId;
HsDataView diskDataView =
fileDataManager.registerNewConsumer(
subpartitionId, consumerId, subpartitionConsumer);
HsDataView memoryDataView =
checkNotNull(memoryDataManager)
.registerNewConsumer(subpartitionId, consumerId, subpartitionConsumer);
subpartitionConsumer.setDiskDataView(diskDataView);
subpartitionConsumer.setMemoryDataView(memoryDataView);
return subpartitionConsumer;
}
|
@Test
void testCreateSubpartitionViewLostData() throws Exception {
final int numBuffers = 10;
BufferPool bufferPool = globalPool.createBufferPool(numBuffers, numBuffers);
HsResultPartition resultPartition = createHsResultPartition(2, bufferPool);
IOUtils.deleteFilesRecursively(tempDataPath);
assertThatThrownBy(
() ->
resultPartition.createSubpartitionView(
new ResultSubpartitionIndexSet(0),
new NoOpBufferAvailablityListener()))
.isInstanceOf(PartitionNotFoundException.class);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.