focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public static String getMetricName(String name) {
if (name.startsWith(CLUSTER)) {
return name;
}
switch (CommonUtils.PROCESS_TYPE.get()) {
case CLIENT:
return getClientMetricName(name);
case MASTER:
return getMasterMetricName(name);
case PROXY:
return getProxyMetricName(name);
case WORKER:
return getWorkerMetricName(name);
case JOB_MASTER:
return getJobMasterMetricName(name);
case JOB_WORKER:
return getJobWorkerMetricName(name);
case PLUGIN:
return getPluginMetricName(name);
case SECURITY:
return getSecurityMetricName(name);
default:
throw new IllegalStateException("Unknown process type");
}
}
|
@Test
public void getMetricNameTest() {
assertEquals("Cluster.counter", MetricsSystem.getMetricName("Cluster.counter"));
assertEquals("Master.timer", MetricsSystem.getMetricName("Master.timer"));
String workerGaugeName = "Worker.gauge";
assertEquals(workerGaugeName, MetricsSystem.getMetricName(workerGaugeName));
assertTrue(MetricsSystem.getMetricName(workerGaugeName).startsWith(workerGaugeName));
String clientCounterName = "Client.counter";
assertEquals(clientCounterName, MetricsSystem.getMetricName(clientCounterName));
assertTrue(MetricsSystem.getMetricName(clientCounterName).startsWith(clientCounterName));
}
|
@Override
public TransformResultMetadata getResultMetadata() {
return BOOLEAN_SV_NO_DICTIONARY_METADATA;
}
|
@Test
public void testLogicalOperatorNullLiteral() {
ExpressionContext intEqualsExpr =
RequestContextUtils.getExpression(String.format("EQUALS(%s, null)", INT_SV_COLUMN));
ExpressionContext longEqualsExpr =
RequestContextUtils.getExpression(String.format("EQUALS(%s, null)", LONG_SV_COLUMN));
String functionName = getFunctionName();
ExpressionContext expression = ExpressionContext.forFunction(
new FunctionContext(FunctionContext.Type.TRANSFORM, functionName,
Arrays.asList(intEqualsExpr, longEqualsExpr)));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertEquals(transformFunction.getName(), functionName);
TransformResultMetadata resultMetadata = transformFunction.getResultMetadata();
assertEquals(resultMetadata.getDataType(), FieldSpec.DataType.BOOLEAN);
assertTrue(resultMetadata.isSingleValue());
assertFalse(resultMetadata.hasDictionary());
boolean[] expectedValues = new boolean[NUM_ROWS];
RoaringBitmap nullBitmap = new RoaringBitmap();
nullBitmap.add(0L, NUM_ROWS);
testTransformFunctionWithNull(transformFunction, expectedValues, nullBitmap);
}
|
public static Optional<UserAgent> getUserAgent() {
return Optional.ofNullable(USER_AGENT_CONTEXT_KEY.get());
}
|
@Test
void getUserAgent() throws UnrecognizedUserAgentException {
when(clientConnectionManager.getUserAgent(any()))
.thenReturn(Optional.empty());
assertFalse(getRequestAttributes().hasUserAgent());
final UserAgent userAgent = UserAgentUtil.parseUserAgentString("Signal-Desktop/1.2.3 Linux");
when(clientConnectionManager.getUserAgent(any()))
.thenReturn(Optional.of(userAgent));
final GetRequestAttributesResponse response = getRequestAttributes();
assertTrue(response.hasUserAgent());
assertEquals("DESKTOP", response.getUserAgent().getPlatform());
assertEquals("1.2.3", response.getUserAgent().getVersion());
assertEquals("Linux", response.getUserAgent().getAdditionalSpecifiers());
}
|
public static Future<Void> maybeUpdateMetadataVersion(
Reconciliation reconciliation,
Vertx vertx,
TlsPemIdentity coTlsPemIdentity,
AdminClientProvider adminClientProvider,
String desiredMetadataVersion,
KafkaStatus status
) {
String bootstrapHostname = KafkaResources.bootstrapServiceName(reconciliation.name()) + "." + reconciliation.namespace() + ".svc:" + KafkaCluster.REPLICATION_PORT;
LOGGER.debugCr(reconciliation, "Creating AdminClient for Kafka cluster in namespace {}", reconciliation.namespace());
Admin kafkaAdmin = adminClientProvider.createAdminClient(bootstrapHostname, coTlsPemIdentity.pemTrustSet(), coTlsPemIdentity.pemAuthIdentity());
Promise<Void> updatePromise = Promise.promise();
maybeUpdateMetadataVersion(reconciliation, vertx, kafkaAdmin, desiredMetadataVersion, status)
.onComplete(res -> {
// Close the Admin client and return the original result
LOGGER.debugCr(reconciliation, "Closing the Kafka Admin API connection");
kafkaAdmin.close();
updatePromise.handle(res);
});
return updatePromise.future();
}
|
@Test
public void testNoMetadataVersionChange(VertxTestContext context) {
// Mock the Admin client
Admin mockAdminClient = mock(Admin.class);
// Mock describing the current metadata version
mockDescribeVersion(mockAdminClient);
// Mock the Admin client provider
AdminClientProvider mockAdminClientProvider = mockAdminClientProvider(mockAdminClient);
// Dummy KafkaStatus to check the values from
KafkaStatus status = new KafkaStatus();
Checkpoint checkpoint = context.checkpoint();
KRaftMetadataManager.maybeUpdateMetadataVersion(Reconciliation.DUMMY_RECONCILIATION, vertx, DUMMY_IDENTITY, mockAdminClientProvider, "3.6-IV1", status)
.onComplete(context.succeeding(s -> {
assertThat(status.getKafkaMetadataVersion(), is("3.6-IV1"));
verify(mockAdminClient, never()).updateFeatures(any(), any());
verify(mockAdminClient, times(1)).describeFeatures();
checkpoint.flag();
}));
}
|
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Node node = (Node) o;
return Objects.equals(getControl(), node.getControl()) && Objects.equals(getTransaction(), node.getTransaction());
}
|
@Test
public void testContains() {
NamingServerNode node1 = new NamingServerNode();
node1.setControl(new Node.Endpoint("111.11.11.1",123));
node1.setTransaction(new Node.Endpoint("111.11.11.1",124));
Node node2 = new Node();
node2.setControl(new Node.Endpoint("111.11.11.1",123));
node2.setTransaction(new Node.Endpoint("111.11.11.1",124));
NamingServerNode node3 = new NamingServerNode();
node3.setControl(new Node.Endpoint("111.11.11.1",123));
node3.setTransaction(new Node.Endpoint("111.11.11.1",124));
Assertions.assertFalse(node1.equals(node2));
Assertions.assertTrue(node1.equals(node3));
}
|
CoordinatorResult<Void, CoordinatorRecord> prepareRebalance(
ClassicGroup group,
String reason
) {
// If any members are awaiting sync, cancel their request and have them rejoin.
if (group.isInState(COMPLETING_REBALANCE)) {
resetAndPropagateAssignmentWithError(group, Errors.REBALANCE_IN_PROGRESS);
}
// If a sync expiration is pending, cancel it.
removeSyncExpiration(group);
boolean isInitialRebalance = group.isInState(EMPTY);
if (isInitialRebalance) {
// The group is new. Provide more time for the members to join.
int delayMs = classicGroupInitialRebalanceDelayMs;
int remainingMs = Math.max(group.rebalanceTimeoutMs() - classicGroupInitialRebalanceDelayMs, 0);
timer.schedule(
classicGroupJoinKey(group.groupId()),
delayMs,
TimeUnit.MILLISECONDS,
false,
() -> tryCompleteInitialRebalanceElseSchedule(group.groupId(), delayMs, remainingMs)
);
}
group.transitionTo(PREPARING_REBALANCE);
log.info("Preparing to rebalance group {} in state {} with old generation {} (reason: {}).",
group.groupId(), group.currentState(), group.generationId(), reason);
return isInitialRebalance ? EMPTY_RESULT : maybeCompleteJoinElseSchedule(group);
}
|
@Test
public void testCompleteJoinPhaseNoMembersRejoinedExtendsJoinPhase() throws Exception {
GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
.build();
ClassicGroup group = context.createClassicGroup("group-id");
JoinGroupRequestData request = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder()
.withGroupId("group-id")
.withGroupInstanceId("first-instance-id")
.withMemberId(UNKNOWN_MEMBER_ID)
.withDefaultProtocolTypeAndProtocols()
.withSessionTimeoutMs(30000)
.withRebalanceTimeoutMs(10000)
.build();
// First member joins group and completes join phase.
JoinGroupResponseData firstMemberResponse = context.joinClassicGroupAndCompleteJoin(request, true, true);
assertEquals(Errors.NONE.code(), firstMemberResponse.errorCode());
String firstMemberId = firstMemberResponse.memberId();
// Second member joins and group goes into rebalancing state.
GroupMetadataManagerTestContext.JoinResult secondMemberJoinResult = context.sendClassicGroupJoin(
request.setGroupInstanceId("second-instance-id")
);
assertTrue(secondMemberJoinResult.records.isEmpty());
assertFalse(secondMemberJoinResult.joinFuture.isDone());
// First static member rejoins and completes join phase.
GroupMetadataManagerTestContext.JoinResult firstMemberJoinResult = context.sendClassicGroupJoin(
request.setMemberId(firstMemberId).setGroupInstanceId("first-instance-id"));
assertTrue(firstMemberJoinResult.records.isEmpty());
assertTrue(firstMemberJoinResult.joinFuture.isDone());
assertTrue(secondMemberJoinResult.joinFuture.isDone());
assertEquals(Errors.NONE.code(), firstMemberJoinResult.joinFuture.get().errorCode());
assertEquals(Errors.NONE.code(), secondMemberJoinResult.joinFuture.get().errorCode());
assertEquals(2, group.numMembers());
assertEquals(2, group.generationId());
String secondMemberId = secondMemberJoinResult.joinFuture.get().memberId();
// Trigger a rebalance. No members rejoined.
context.groupMetadataManager.prepareRebalance(group, "trigger rebalance");
assertEquals(2, group.numMembers());
assertTrue(group.isInState(PREPARING_REBALANCE));
assertEquals(0, group.numAwaitingJoinResponse());
// Advance clock by rebalance timeout to complete join phase. As long as both members have not
// rejoined, we extend the join phase.
GroupMetadataManagerTestContext.assertNoOrEmptyResult(context.sleep(10000));
assertEquals(10000, context.timer.timeout("join-group-id").deadlineMs - context.time.milliseconds());
GroupMetadataManagerTestContext.assertNoOrEmptyResult(context.sleep(10000));
assertEquals(10000, context.timer.timeout("join-group-id").deadlineMs - context.time.milliseconds());
assertTrue(group.isInState(PREPARING_REBALANCE));
assertEquals(2, group.numMembers());
assertEquals(2, group.generationId());
// Let first and second member rejoin. This should complete the join phase.
firstMemberJoinResult = context.sendClassicGroupJoin(
request
.setMemberId(firstMemberId)
.setGroupInstanceId("first-instance-id")
);
assertTrue(firstMemberJoinResult.records.isEmpty());
assertFalse(firstMemberJoinResult.joinFuture.isDone());
assertTrue(group.isInState(PREPARING_REBALANCE));
assertEquals(2, group.numMembers());
assertEquals(2, group.generationId());
secondMemberJoinResult = context.sendClassicGroupJoin(
request
.setMemberId(secondMemberId)
.setGroupInstanceId("second-instance-id")
);
assertTrue(secondMemberJoinResult.records.isEmpty());
assertTrue(firstMemberJoinResult.joinFuture.isDone());
assertTrue(secondMemberJoinResult.joinFuture.isDone());
assertEquals(Errors.NONE.code(), firstMemberJoinResult.joinFuture.get().errorCode());
assertEquals(Errors.NONE.code(), secondMemberJoinResult.joinFuture.get().errorCode());
assertTrue(group.isInState(COMPLETING_REBALANCE));
assertEquals(2, group.numMembers());
assertEquals(3, group.generationId());
}
|
public static <T> Write<T> write(String jdbcUrl, String table) {
return new AutoValue_ClickHouseIO_Write.Builder<T>()
.jdbcUrl(jdbcUrl)
.table(table)
.properties(new Properties())
.maxInsertBlockSize(DEFAULT_MAX_INSERT_BLOCK_SIZE)
.initialBackoff(DEFAULT_INITIAL_BACKOFF)
.maxRetries(DEFAULT_MAX_RETRIES)
.maxCumulativeBackoff(DEFAULT_MAX_CUMULATIVE_BACKOFF)
.build()
.withInsertDeduplicate(true)
.withInsertDistributedSync(true);
}
|
@Test
public void testArrayOfArrayOfInt64() throws Exception {
Schema schema =
Schema.of(Schema.Field.of("f0", FieldType.array(FieldType.array(FieldType.INT64))));
Row row1 =
Row.withSchema(schema)
.addValue(
Arrays.asList(Arrays.asList(1L, 2L), Arrays.asList(2L, 3L), Arrays.asList(3L, 4L)))
.build();
executeSql("CREATE TABLE test_array_of_array_of_int64 (f0 Array(Array(Int64))) ENGINE=Log");
pipeline
.apply(Create.of(row1).withRowSchema(schema))
.apply(write("test_array_of_array_of_int64"));
pipeline.run().waitUntilFinish();
long sum0 =
executeQueryAsLong(
"SELECT SUM(arraySum(arrayMap(x -> arraySum(x), f0))) "
+ "FROM test_array_of_array_of_int64");
assertEquals(15L, sum0);
}
|
public long betweenYear(boolean isReset) {
final Calendar beginCal = DateUtil.calendar(begin);
final Calendar endCal = DateUtil.calendar(end);
int result = endCal.get(Calendar.YEAR) - beginCal.get(Calendar.YEAR);
if (false == isReset) {
final int beginMonthBase0 = beginCal.get(Calendar.MONTH);
final int endMonthBase0 = endCal.get(Calendar.MONTH);
if (beginMonthBase0 < endMonthBase0) {
return result;
} else if (beginMonthBase0 > endMonthBase0) {
return result - 1;
} else if (Calendar.FEBRUARY == beginMonthBase0
&& CalendarUtil.isLastDayOfMonth(beginCal)
&& CalendarUtil.isLastDayOfMonth(endCal)) {
// 考虑闰年的2月情况
// 两个日期都位于2月的最后一天,此时月数按照相等对待,此时都设置为1号
beginCal.set(Calendar.DAY_OF_MONTH, 1);
endCal.set(Calendar.DAY_OF_MONTH, 1);
}
endCal.set(Calendar.YEAR, beginCal.get(Calendar.YEAR));
long between = endCal.getTimeInMillis() - beginCal.getTimeInMillis();
if (between < 0) {
return result - 1;
}
}
return result;
}
|
@Test
public void issueI97U3JTest(){
String dateStr1 = "2024-02-29 23:59:59";
Date sdate = DateUtil.parse(dateStr1);
String dateStr2 = "2023-03-01 00:00:00";
Date edate = DateUtil.parse(dateStr2);
long result = DateUtil.betweenYear(sdate, edate, false);
assertEquals(0, result);
}
|
@Override
public IndexRange get(String index) throws NotFoundException {
final DBQuery.Query query = DBQuery.and(
DBQuery.notExists("start"),
DBQuery.is(IndexRange.FIELD_INDEX_NAME, index));
final MongoIndexRange indexRange = collection.findOne(query);
if (indexRange == null) {
throw new NotFoundException("Index range for index <" + index + "> not found.");
}
return indexRange;
}
|
@Test
@MongoDBFixtures("MongoIndexRangeServiceTest.json")
public void getReturnsExistingIndexRange() throws Exception {
IndexRange indexRange = indexRangeService.get("graylog_1");
assertThat(indexRange.indexName()).isEqualTo("graylog_1");
assertThat(indexRange.begin()).isEqualTo(new DateTime(2015, 1, 1, 0, 0, DateTimeZone.UTC));
assertThat(indexRange.end()).isEqualTo(new DateTime(2015, 1, 2, 0, 0, DateTimeZone.UTC));
assertThat(indexRange.calculatedAt()).isEqualTo(new DateTime(2015, 1, 2, 0, 0, DateTimeZone.UTC));
assertThat(indexRange.calculationDuration()).isEqualTo(23);
}
|
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) {
if (msg instanceof Http2DataFrame) {
Http2DataFrame dataFrame = (Http2DataFrame) msg;
encoder().writeData(ctx, dataFrame.stream().id(), dataFrame.content(),
dataFrame.padding(), dataFrame.isEndStream(), promise);
} else if (msg instanceof Http2HeadersFrame) {
writeHeadersFrame(ctx, (Http2HeadersFrame) msg, promise);
} else if (msg instanceof Http2WindowUpdateFrame) {
Http2WindowUpdateFrame frame = (Http2WindowUpdateFrame) msg;
Http2FrameStream frameStream = frame.stream();
// It is legit to send a WINDOW_UPDATE frame for the connection stream. The parent channel doesn't attempt
// to set the Http2FrameStream so we assume if it is null the WINDOW_UPDATE is for the connection stream.
try {
if (frameStream == null) {
increaseInitialConnectionWindow(frame.windowSizeIncrement());
} else {
consumeBytes(frameStream.id(), frame.windowSizeIncrement());
}
promise.setSuccess();
} catch (Throwable t) {
promise.setFailure(t);
}
} else if (msg instanceof Http2ResetFrame) {
Http2ResetFrame rstFrame = (Http2ResetFrame) msg;
int id = rstFrame.stream().id();
// Only ever send a reset frame if stream may have existed before as otherwise we may send a RST on a
// stream in an invalid state and cause a connection error.
if (connection().streamMayHaveExisted(id)) {
encoder().writeRstStream(ctx, rstFrame.stream().id(), rstFrame.errorCode(), promise);
} else {
ReferenceCountUtil.release(rstFrame);
promise.setFailure(Http2Exception.streamError(
rstFrame.stream().id(), Http2Error.PROTOCOL_ERROR, "Stream never existed"));
}
} else if (msg instanceof Http2PingFrame) {
Http2PingFrame frame = (Http2PingFrame) msg;
encoder().writePing(ctx, frame.ack(), frame.content(), promise);
} else if (msg instanceof Http2SettingsFrame) {
encoder().writeSettings(ctx, ((Http2SettingsFrame) msg).settings(), promise);
} else if (msg instanceof Http2SettingsAckFrame) {
// In the event of manual SETTINGS ACK, it is assumed the encoder will apply the earliest received but not
// yet ACKed settings.
encoder().writeSettingsAck(ctx, promise);
} else if (msg instanceof Http2GoAwayFrame) {
writeGoAwayFrame(ctx, (Http2GoAwayFrame) msg, promise);
} else if (msg instanceof Http2PushPromiseFrame) {
Http2PushPromiseFrame pushPromiseFrame = (Http2PushPromiseFrame) msg;
writePushPromise(ctx, pushPromiseFrame, promise);
} else if (msg instanceof Http2PriorityFrame) {
Http2PriorityFrame priorityFrame = (Http2PriorityFrame) msg;
encoder().writePriority(ctx, priorityFrame.stream().id(), priorityFrame.streamDependency(),
priorityFrame.weight(), priorityFrame.exclusive(), promise);
} else if (msg instanceof Http2UnknownFrame) {
Http2UnknownFrame unknownFrame = (Http2UnknownFrame) msg;
encoder().writeFrame(ctx, unknownFrame.frameType(), unknownFrame.stream().id(),
unknownFrame.flags(), unknownFrame.content(), promise);
} else if (!(msg instanceof Http2Frame)) {
ctx.write(msg, promise);
} else {
ReferenceCountUtil.release(msg);
throw new UnsupportedMessageTypeException(msg, SUPPORTED_MESSAGES);
}
}
|
@Test
public void sendSettingsFrame() {
Http2Settings settings = new Http2Settings();
channel.write(new DefaultHttp2SettingsFrame(settings));
verify(frameWriter).writeSettings(eqFrameCodecCtx(), same(settings), any(ChannelPromise.class));
}
|
public boolean validatePatternIfPresent(Retention retention, TableUri tableUri, String schema) {
if (retention.getColumnPattern() != null) {
if (retention.getColumnPattern().getColumnName() != null
&& !columnExists(
getSchemaFromSchemaJson(schema), retention.getColumnPattern().getColumnName())) {
return false;
}
return isPatternValid(retention.getColumnPattern().getPattern(), tableUri);
}
return true;
}
|
@Test
void testValidatePatternNegative() {
RetentionColumnPattern malformedPattern =
RetentionColumnPattern.builder().pattern("random_pattern").columnName("aa").build();
Retention testRetention =
Retention.builder()
.columnPattern(malformedPattern)
.count(1)
.granularity(TimePartitionSpec.Granularity.DAY)
.build();
Assertions.assertFalse(
validator.validatePatternIfPresent(
testRetention, TableUri.builder().build(), getSchemaJsonFromSchema(dummySchema)));
}
|
public static <T> T xmlToBean(String xml, Class<T> c) {
return xmlToBean(StrUtil.getReader(xml), c);
}
|
@Test
public void xmlToBeanTest() {
final SchoolVo schoolVo = JAXBUtil.xmlToBean(xmlStr, SchoolVo.class);
assertNotNull(schoolVo);
assertEquals("西安市第一中学", schoolVo.getSchoolName());
assertEquals("西安市雁塔区长安堡一号", schoolVo.getSchoolAddress());
assertEquals("101教室", schoolVo.getRoom().getRoomName());
assertEquals("101", schoolVo.getRoom().getRoomNo());
}
|
@Nullable
@SuppressWarnings("unchecked")
public <E> E get(@Nonnull Tag<E> tag) {
Object got = map.getOrDefault(tag, NONE);
if (got == NONE) {
throw new IllegalArgumentException("No value associated with " + tag);
}
return (E) got;
}
|
@Test(expected = IllegalArgumentException.class)
public void when_getNonexistent_then_exception() {
ibt.get(tag1());
}
|
public static VerificationMode never() {
return times(0);
}
|
@Test
public void should_monitor_server_behavior() throws Exception {
final MocoMonitor monitor = mock(MocoMonitor.class);
final HttpServer server = httpServer(port(), monitor);
server.get(by(uri("/foo"))).response("bar");
running(server, () -> assertThat(helper.get(remoteUrl("/foo")), is("bar")));
verify(monitor).onMessageArrived(any(HttpRequest.class));
verify(monitor).onMessageLeave(any(HttpResponse.class));
verify(monitor, Mockito.never()).onException(any(Exception.class));
}
|
protected Destination createDestination(String destName) throws JMSException {
String simpleName = getSimpleName(destName);
byte destinationType = getDestinationType(destName);
if (destinationType == ActiveMQDestination.QUEUE_TYPE) {
LOG.info("Creating queue: {}", destName);
return getSession().createQueue(simpleName);
} else if (destinationType == ActiveMQDestination.TOPIC_TYPE) {
LOG.info("Creating topic: {}", destName);
return getSession().createTopic(simpleName);
} else {
return createTemporaryDestination(destName);
}
}
|
@Test
public void testCreateDestination_tempTopic() throws JMSException {
assertDestinationType(TEMP_TOPIC_TYPE,
asAmqDest(jmsClient.createDestination("temp-topic://dest")));
}
|
public void heartbeat(String fileOffset) throws IOException {
logProgress(fileOffset, true);
}
|
@Test
public void testHeartbeat() throws Exception {
Path file1 = new Path(filesDir + Path.SEPARATOR + "file1");
fs.create(file1).close();
// acquire lock on file1
FileLock lock1 = FileLock.tryLock(fs, file1, locksDir, "spout1");
assertNotNull(lock1);
assertTrue(fs.exists(lock1.getLockFile()));
ArrayList<String> lines = readTextFile(lock1.getLockFile());
assertEquals(1, lines.size(), "heartbeats appear to be missing");
// heartbeat upon it
lock1.heartbeat("1");
lock1.heartbeat("2");
lock1.heartbeat("3");
lines = readTextFile(lock1.getLockFile());
assertEquals(4, lines.size(), "heartbeats appear to be missing");
lock1.heartbeat("4");
lock1.heartbeat("5");
lock1.heartbeat("6");
lines = readTextFile(lock1.getLockFile());
assertEquals(7, lines.size(), "heartbeats appear to be missing");
lock1.release();
lines = readTextFile(lock1.getLockFile());
assertNull(lines);
assertFalse(fs.exists(lock1.getLockFile()));
}
|
public ProtocolBuilder iothreads(Integer iothreads) {
this.iothreads = iothreads;
return getThis();
}
|
@Test
void iothreads() {
ProtocolBuilder builder = new ProtocolBuilder();
builder.iothreads(25);
Assertions.assertEquals(25, builder.build().getIothreads());
}
|
public static InternalRequestSignature fromHeaders(Crypto crypto, byte[] requestBody, HttpHeaders headers) {
if (headers == null) {
return null;
}
String signatureAlgorithm = headers.getHeaderString(SIGNATURE_ALGORITHM_HEADER);
String encodedSignature = headers.getHeaderString(SIGNATURE_HEADER);
if (signatureAlgorithm == null || encodedSignature == null) {
return null;
}
Mac mac;
try {
mac = crypto.mac(signatureAlgorithm);
} catch (NoSuchAlgorithmException e) {
throw new BadRequestException(e.getMessage());
}
byte[] decodedSignature;
try {
decodedSignature = Base64.getDecoder().decode(encodedSignature);
} catch (IllegalArgumentException e) {
throw new BadRequestException(e.getMessage());
}
return new InternalRequestSignature(
requestBody,
mac,
decodedSignature
);
}
|
@Test
public void fromHeadersShouldReturnNullIfSignatureAlgorithmHeaderMissing() {
assertNull(InternalRequestSignature.fromHeaders(crypto, REQUEST_BODY, internalRequestHeaders(ENCODED_SIGNATURE, null)));
}
|
@Override
public <T> T clone(T object) {
if (object instanceof String) {
return object;
} else if (object instanceof Collection) {
Object firstElement = findFirstNonNullElement((Collection) object);
if (firstElement != null && !(firstElement instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), firstElement.getClass());
return objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
} else if (object instanceof Map) {
Map.Entry firstEntry = this.findFirstNonNullEntry((Map) object);
if (firstEntry != null) {
Object key = firstEntry.getKey();
Object value = firstEntry.getValue();
if (!(key instanceof Serializable) || !(value instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), key.getClass(), value.getClass());
return (T) objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
}
} else if (object instanceof JsonNode) {
return (T) ((JsonNode) object).deepCopy();
}
if (object instanceof Serializable) {
try {
return (T) SerializationHelper.clone((Serializable) object);
} catch (SerializationException e) {
//it is possible that object itself implements java.io.Serializable, but underlying structure does not
//in this case we switch to the other JSON marshaling strategy which doesn't use the Java serialization
}
}
return jsonClone(object);
}
|
@Test
public void should_clone_serializable_object() {
Object original = new SerializableObject("value");
Object cloned = serializer.clone(original);
assertEquals(original, cloned);
assertNotSame(original, cloned);
}
|
@Override
public ExplodedPlugin explode(PluginInfo pluginInfo) {
File tempDir = new File(fs.getTempDir(), TEMP_RELATIVE_PATH);
File toDir = new File(tempDir, pluginInfo.getKey());
try {
org.sonar.core.util.FileUtils.cleanDirectory(toDir);
File jarSource = pluginInfo.getNonNullJarFile();
File jarTarget = new File(toDir, jarSource.getName());
FileUtils.copyFile(jarSource, jarTarget);
ZipUtils.unzip(jarSource, toDir, newLibFilter());
return explodeFromUnzippedDir(pluginInfo, jarTarget, toDir);
} catch (Exception e) {
throw new IllegalStateException(String.format(
"Fail to unzip plugin [%s] %s to %s", pluginInfo.getKey(), pluginInfo.getNonNullJarFile().getAbsolutePath(), toDir.getAbsolutePath()), e);
}
}
|
@Test
public void explode_is_reentrant() throws Exception {
PluginInfo info = PluginInfo.create(plugin1Jar());
ExplodedPlugin exploded1 = underTest.explode(info);
long dirSize1 = sizeOfDirectory(exploded1.getMain().getParentFile());
ExplodedPlugin exploded2 = underTest.explode(info);
long dirSize2 = sizeOfDirectory(exploded2.getMain().getParentFile());
assertThat(exploded2.getMain().getCanonicalPath()).isEqualTo(exploded1.getMain().getCanonicalPath());
assertThat(dirSize1).isEqualTo(dirSize2);
}
|
@Udf(description = "Converts a string representation of a date in the given format"
+ " into the TIMESTAMP value."
+ " Single quotes in the timestamp format can be escaped with '',"
+ " for example: 'yyyy-MM-dd''T''HH:mm:ssX'.")
public Timestamp parseTimestamp(
@UdfParameter(
description = "The string representation of a date.") final String formattedTimestamp,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
return parseTimestamp(formattedTimestamp, formatPattern, ZoneId.of("GMT").getId());
}
|
@Test
public void shouldThrowIfFormatInvalid() {
// When:
final KsqlFunctionException e = assertThrows(
KsqlFunctionException.class,
() -> udf.parseTimestamp("2021-12-01 12:10:11.123", "invalid")
);
// Then:
assertThat(e.getMessage(), containsString("Unknown pattern letter: i"));
}
|
public OptExpression next() {
// For logic scan to physical scan, we only need to match once
if (isPatternWithoutChildren && groupExpressionIndex.get(0) > 0) {
return null;
}
OptExpression expression;
do {
this.groupTraceKey = 0;
// Match with the next groupExpression of the last group node
int lastNode = this.groupExpressionIndex.size() - 1;
int lastNodeIndex = this.groupExpressionIndex.get(lastNode);
this.groupExpressionIndex.set(lastNode, lastNodeIndex + 1);
expression = match(pattern, groupExpression);
} while (expression == null && this.groupExpressionIndex.size() != 1);
nextIdx++;
return expression;
}
|
@Test
public void testBinderMultiDepth2Repeat2() {
OptExpression expr1 = OptExpression.create(new MockOperator(OperatorType.LOGICAL_JOIN, 0),
OptExpression.create(new MockOperator(OperatorType.LOGICAL_OLAP_SCAN, 1)),
OptExpression.create(new MockOperator(OperatorType.LOGICAL_OLAP_SCAN, 2)));
OptExpression expr2 = OptExpression.create(new MockOperator(OperatorType.LOGICAL_OLAP_SCAN, 3));
OptExpression expr3 = OptExpression.create(new MockOperator(OperatorType.LOGICAL_OLAP_SCAN, 4));
Memo memo = new Memo();
GroupExpression ge = memo.init(expr1);
memo.copyIn(ge.inputAt(0), expr2);
memo.copyIn(ge.inputAt(1), expr3);
Pattern pattern = Pattern.create(OperatorType.LOGICAL_JOIN)
.addChildren(Pattern.create(OperatorType.LOGICAL_OLAP_SCAN))
.addChildren(Pattern.create(OperatorType.PATTERN_MULTI_LEAF));
Binder binder = new Binder(pattern, ge);
OptExpression result;
result = binder.next();
assertEquals(OperatorType.LOGICAL_JOIN, result.getOp().getOpType());
assertEquals(OperatorType.LOGICAL_OLAP_SCAN, result.inputAt(0).getOp().getOpType());
assertEquals(1, ((MockOperator) result.inputAt(0).getOp()).getValue());
assertEquals(OperatorType.LOGICAL_OLAP_SCAN, result.inputAt(1).getOp().getOpType());
assertEquals(2, ((MockOperator) result.inputAt(1).getOp()).getValue());
result = binder.next();
assertEquals(OperatorType.LOGICAL_JOIN, result.getOp().getOpType());
assertEquals(OperatorType.LOGICAL_OLAP_SCAN, result.inputAt(0).getOp().getOpType());
assertEquals(3, ((MockOperator) result.inputAt(0).getOp()).getValue());
assertEquals(OperatorType.LOGICAL_OLAP_SCAN, result.inputAt(1).getOp().getOpType());
assertEquals(2, ((MockOperator) result.inputAt(1).getOp()).getValue());
assertNull(binder.next());
}
|
@Override
public void doFilter(HttpRequest request, HttpResponse response, FilterChain chain) throws IOException {
if (userSession.hasSession() && userSession.isLoggedIn() && userSession.shouldResetPassword()) {
redirectTo(response, request.getContextPath() + RESET_PASSWORD_PATH);
}
chain.doFilter(request, response);
}
|
@Test
public void redirect_if_request_uri_ends_with_slash() throws Exception {
when(request.getRequestURI()).thenReturn("/projects/");
when(request.getContextPath()).thenReturn("/sonarqube");
underTest.doFilter(request, response, chain);
verify(response).sendRedirect("/sonarqube/account/reset_password");
}
|
public Collection<ServerPluginInfo> loadPlugins() {
Map<String, ServerPluginInfo> bundledPluginsByKey = new LinkedHashMap<>();
for (ServerPluginInfo bundled : getBundledPluginsMetadata()) {
failIfContains(bundledPluginsByKey, bundled,
plugin -> MessageException.of(format("Found two versions of the plugin %s [%s] in the directory %s. Please remove one of %s or %s.",
bundled.getName(), bundled.getKey(), getRelativeDir(fs.getInstalledBundledPluginsDir()), bundled.getNonNullJarFile().getName(), plugin.getNonNullJarFile().getName())));
bundledPluginsByKey.put(bundled.getKey(), bundled);
}
Map<String, ServerPluginInfo> externalPluginsByKey = new LinkedHashMap<>();
for (ServerPluginInfo external : getExternalPluginsMetadata()) {
failIfContains(bundledPluginsByKey, external,
plugin -> MessageException.of(format("Found a plugin '%s' in the directory '%s' with the same key [%s] as a built-in feature '%s'. Please remove '%s'.",
external.getName(), getRelativeDir(fs.getInstalledExternalPluginsDir()), external.getKey(), plugin.getName(),
new File(getRelativeDir(fs.getInstalledExternalPluginsDir()), external.getNonNullJarFile().getName()))));
failIfContains(externalPluginsByKey, external,
plugin -> MessageException.of(format("Found two versions of the plugin '%s' [%s] in the directory '%s'. Please remove %s or %s.", external.getName(), external.getKey(),
getRelativeDir(fs.getInstalledExternalPluginsDir()), external.getNonNullJarFile().getName(), plugin.getNonNullJarFile().getName())));
externalPluginsByKey.put(external.getKey(), external);
}
for (PluginInfo downloaded : getDownloadedPluginsMetadata()) {
failIfContains(bundledPluginsByKey, downloaded,
plugin -> MessageException.of(format("Fail to update plugin: %s. Built-in feature with same key already exists: %s. Move or delete plugin from %s directory",
plugin.getName(), plugin.getKey(), getRelativeDir(fs.getDownloadedPluginsDir()))));
ServerPluginInfo installedPlugin;
if (externalPluginsByKey.containsKey(downloaded.getKey())) {
deleteQuietly(externalPluginsByKey.get(downloaded.getKey()).getNonNullJarFile());
installedPlugin = moveDownloadedPluginToExtensions(downloaded);
LOG.info("Plugin {} [{}] updated to version {}", installedPlugin.getName(), installedPlugin.getKey(), installedPlugin.getVersion());
} else {
installedPlugin = moveDownloadedPluginToExtensions(downloaded);
LOG.info("Plugin {} [{}] installed", installedPlugin.getName(), installedPlugin.getKey());
}
externalPluginsByKey.put(downloaded.getKey(), installedPlugin);
}
Map<String, ServerPluginInfo> plugins = new HashMap<>(externalPluginsByKey.size() + bundledPluginsByKey.size());
plugins.putAll(externalPluginsByKey);
plugins.putAll(bundledPluginsByKey);
PluginRequirementsValidator.unloadIncompatiblePlugins(plugins);
return plugins.values();
}
|
@Test
public void plugin_is_ignored_if_required_plugin_is_too_old_at_startup() throws Exception {
copyTestPluginTo("test-base-plugin", fs.getInstalledExternalPluginsDir());
copyTestPluginTo("test-requirenew-plugin", fs.getInstalledExternalPluginsDir());
// the plugin "requirenew" is not installed as it requires base 0.2+ to be installed.
assertThat(underTest.loadPlugins()).extracting(PluginInfo::getKey).containsOnly("testbase");
assertThat(logs.logs()).contains("Plugin Test Require New Plugin [testrequire] is ignored because the version 0.2 of required plugin [testbase] is not installed");
}
|
@Override
String simpleTypeName() {
if (isRoot()) {
return "Root";
}
return lastComponent().getClass().getSimpleName();
}
|
@Test
public void testSimpleTypeNameOfRoot() {
assertThat(ResourceId.ROOT.simpleTypeName(), is("Root"));
}
|
static void urlEncode(String str, StringBuilder sb) {
for (int idx = 0; idx < str.length(); ++idx) {
char c = str.charAt(idx);
if ('+' == c) {
sb.append("%2B");
} else if ('%' == c) {
sb.append("%25");
} else {
sb.append(c);
}
}
}
|
@Test
void testUrlEncodeByPercent() {
// Arrange
final StringBuilder sb = new StringBuilder("??????");
// Act
GroupKey.urlEncode("%", sb);
// Assert side effects
assertNotNull(sb);
assertEquals("??????%25", sb.toString());
}
|
public String process(final Expression expression) {
return formatExpression(expression);
}
|
@Test
public void shouldGenerateCorrectCodeForIntervalUnit() {
// Given:
final IntervalUnit intervalUnit = new IntervalUnit(TimeUnit.DAYS);
// When:
final String java = sqlToJavaVisitor.process(intervalUnit);
// Then:
assertThat(java, containsString("TimeUnit.DAYS"));
}
|
@Override
public PayloadSerializer getSerializer(Schema schema, Map<String, Object> tableParams) {
Class<? extends TBase> thriftClass = getMessageClass(tableParams);
TProtocolFactory protocolFactory = getProtocolFactory(tableParams);
inferAndVerifySchema(thriftClass, schema);
return getPayloadSerializer(schema, protocolFactory, thriftClass);
}
|
@Test
public void deserialize() throws Exception {
Row row =
provider
.getSerializer(
SHUFFLED_SCHEMA,
ImmutableMap.of(
"thriftClass", TestThriftMessage.class.getName(),
"thriftProtocolFactoryClass", TCompactProtocol.Factory.class.getName()))
.deserialize(new TSerializer(new TCompactProtocol.Factory()).serialize(MESSAGE));
assertEquals(ROW, row);
}
|
public static Ip4Address valueOf(int value) {
byte[] bytes =
ByteBuffer.allocate(INET_BYTE_LENGTH).putInt(value).array();
return new Ip4Address(bytes);
}
|
@Test(expected = IllegalArgumentException.class)
public void testInvalidValueOfIncorrectString() {
Ip4Address ipAddress;
String fromString = "NoSuchIpAddress";
ipAddress = Ip4Address.valueOf(fromString);
}
|
public static void close(final Collection<DataSource> dataSources) {
Collection<Exception> causes = new LinkedList<>();
for (DataSource each : dataSources) {
if (each instanceof AutoCloseable) {
try {
((AutoCloseable) each).close();
// CHECKSTYLE:OFF
} catch (final Exception ex) {
// CHECKSTYLE:ON
causes.add(ex);
}
}
}
if (!causes.isEmpty()) {
throwException(causes);
}
}
|
@Test
void assertClose() throws Exception {
DataSource dataSource0 = mock(DataSource.class, withSettings().extraInterfaces(AutoCloseable.class));
DataSource dataSource1 = mock(DataSource.class, withSettings().extraInterfaces(AutoCloseable.class));
doThrow(new SQLException("test")).when((AutoCloseable) dataSource1).close();
DataSource dataSource2 = mock(DataSource.class, withSettings().extraInterfaces(AutoCloseable.class));
assertThrows(SQLWrapperException.class, () -> DataSourcesCloser.close(Arrays.asList(dataSource0, dataSource1, dataSource2)));
verify((AutoCloseable) dataSource0, times(1)).close();
verify((AutoCloseable) dataSource1, times(1)).close();
verify((AutoCloseable) dataSource2, times(1)).close();
}
|
@Override
public Enumeration<String> getKeys() {
if ( parent == null ) {
return Collections.enumeration( contents.keySet() );
}
Set<String> keySet = newHashSet( contents.keySet() );
keySet.addAll( Collections.list( parent.getKeys() ) );
return Collections.enumeration( keySet );
}
|
@Test
public void aggregateBundleWithNoSourceBundlesContainsNoKeys() {
ResourceBundle aggregateBundle = new AggregateResourceBundle( Collections.<ResourceBundle>emptyList() );
assertTrue( getAsSet( aggregateBundle.getKeys() ).isEmpty() );
}
|
public static String removeLeadingSlashes(String path) {
return SLASH_PREFIX_PATTERN.matcher(path).replaceFirst("");
}
|
@Test
public void removeLeadingSlashes_whenNoLeadingSlashes_returnsOriginal() {
assertThat(removeLeadingSlashes("a/b/c/")).isEqualTo("a/b/c/");
}
|
@SuppressLint("HardwareIds")
public static String getIdentifier(Context context) {
try {
if (!isAndroidIDEnabled) {
SALog.i(TAG, "SensorsData getAndroidID is disabled");
return "";
}
if (SAPropertyManager.getInstance().isLimitKey(LimitKey.ANDROID_ID)) {
return SAPropertyManager.getInstance().getLimitValue(LimitKey.ANDROID_ID);
}
if (TextUtils.isEmpty(androidID)) {
SALog.i(TAG, "SensorsData getIdentifier");
androidID = Settings.Secure.getString(context.getContentResolver(), Settings.Secure.ANDROID_ID);
if (!isValidAndroidId(androidID)) {
androidID = "";
}
}
} catch (Exception e) {
SALog.printStackTrace(e);
}
return androidID;
}
|
@Test
public void getIdentifier() {
String androidID = SensorsDataUtils.getIdentifier(mApplication);
System.out.println("androidID = " + androidID);
}
|
@Override
public void onWorkflowFinalized(Workflow workflow) {
WorkflowSummary summary = StepHelper.retrieveWorkflowSummary(objectMapper, workflow.getInput());
WorkflowRuntimeSummary runtimeSummary = retrieveWorkflowRuntimeSummary(workflow);
String reason = workflow.getReasonForIncompletion();
LOG.info(
"Workflow {} with execution_id [{}] is finalized with internal state [{}] and reason [{}]",
summary.getIdentity(),
workflow.getWorkflowId(),
workflow.getStatus(),
reason);
metrics.counter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
getClass(),
TYPE_TAG,
"onWorkflowFinalized",
MetricConstants.STATUS_TAG,
workflow.getStatus().name());
if (reason != null
&& workflow.getStatus() == Workflow.WorkflowStatus.FAILED
&& reason.startsWith(MaestroStartTask.DEDUP_FAILURE_PREFIX)) {
LOG.info(
"Workflow {} with execution_id [{}] has not actually started, thus skip onWorkflowFinalized.",
summary.getIdentity(),
workflow.getWorkflowId());
return; // special case doing nothing
}
WorkflowInstance.Status instanceStatus =
instanceDao.getWorkflowInstanceStatus(
summary.getWorkflowId(), summary.getWorkflowInstanceId(), summary.getWorkflowRunId());
if (instanceStatus == null
|| (instanceStatus.isTerminal() && workflow.getStatus().isTerminal())) {
LOG.info(
"Workflow {} with execution_id [{}] does not exist or already "
+ "in a terminal state [{}] with internal state [{}], thus skip onWorkflowFinalized.",
summary.getIdentity(),
workflow.getWorkflowId(),
instanceStatus,
workflow.getStatus());
return;
}
Map<String, Task> realTaskMap = TaskHelper.getUserDefinedRealTaskMap(workflow);
// cancel internally failed tasks
realTaskMap.values().stream()
.filter(task -> !StepHelper.retrieveStepStatus(task.getOutputData()).isTerminal())
.forEach(task -> maestroTask.cancel(workflow, task, null));
WorkflowRuntimeOverview overview =
TaskHelper.computeOverview(
objectMapper, summary, runtimeSummary.getRollupBase(), realTaskMap);
try {
validateAndUpdateOverview(overview, summary);
switch (workflow.getStatus()) {
case TERMINATED: // stopped due to stop request
if (reason != null && reason.startsWith(FAILURE_REASON_PREFIX)) {
update(workflow, WorkflowInstance.Status.FAILED, summary, overview);
} else {
update(workflow, WorkflowInstance.Status.STOPPED, summary, overview);
}
break;
case TIMED_OUT:
update(workflow, WorkflowInstance.Status.TIMED_OUT, summary, overview);
break;
default: // other status (FAILED, COMPLETED, PAUSED, RUNNING) to be handled here.
Optional<Task.Status> done =
TaskHelper.checkProgress(realTaskMap, summary, overview, true);
switch (done.orElse(Task.Status.IN_PROGRESS)) {
/**
* This is a special status to indicate that the workflow has succeeded. Check {@link
* TaskHelper#checkProgress} for more details.
*/
case FAILED_WITH_TERMINAL_ERROR:
WorkflowInstance.Status nextStatus =
AggregatedViewHelper.deriveAggregatedStatus(
instanceDao, summary, WorkflowInstance.Status.SUCCEEDED, overview);
if (!nextStatus.isTerminal()) {
throw new MaestroInternalError(
"Invalid status: [%s], expecting a terminal one", nextStatus);
}
update(workflow, nextStatus, summary, overview);
break;
case FAILED:
case CANCELED: // due to step failure
update(workflow, WorkflowInstance.Status.FAILED, summary, overview);
break;
case TIMED_OUT:
update(workflow, WorkflowInstance.Status.TIMED_OUT, summary, overview);
break;
// all other status are invalid
default:
metrics.counter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
getClass(),
TYPE_TAG,
"invalidStatusOnWorkflowFinalized");
throw new MaestroInternalError(
"Invalid status [%s] onWorkflowFinalized", workflow.getStatus());
}
break;
}
} catch (MaestroInternalError | IllegalArgumentException e) {
// non-retryable error and still fail the instance
LOG.warn("onWorkflowFinalized is failed with a non-retryable error", e);
metrics.counter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
getClass(),
TYPE_TAG,
"nonRetryableErrorOnWorkflowFinalized");
update(
workflow,
WorkflowInstance.Status.FAILED,
summary,
overview,
Details.create(
e.getMessage(), "onWorkflowFinalized is failed with non-retryable error."));
}
}
|
@Test
public void testPublishErrorOnWorkflowFinalized() {
when(workflow.getStatus()).thenReturn(Workflow.WorkflowStatus.TERMINATED);
when(instanceDao.getWorkflowInstanceStatus(eq("test-workflow-id"), anyLong(), anyLong()))
.thenReturn(WorkflowInstance.Status.IN_PROGRESS);
doCallRealMethod().when(publisher).publishOrThrow(any(), any());
doCallRealMethod().when(publisher).publishOrThrow(any(), anyLong(), any());
when(publisher.publish(any(), anyLong()))
.thenReturn(Optional.of(Details.create("test errors")));
AssertHelper.assertThrows(
"maestro event publish failure and will retry",
MaestroRetryableError.class,
"Failed to publish maestro job event",
() -> statusListener.onWorkflowFinalized(workflow));
Assert.assertEquals(
1L,
metricRepo
.getCounter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
MaestroWorkflowStatusListener.class,
"type",
"onWorkflowFinalized",
"status",
"TERMINATED")
.count());
}
|
public GpuAllocation assignGpus(Container container)
throws ResourceHandlerException {
GpuAllocation allocation = internalAssignGpus(container);
// Wait for a maximum of waitPeriodForResource seconds if no
// available GPU are there which are yet to be released.
int timeWaiting = 0;
while (allocation == null) {
if (timeWaiting >= waitPeriodForResource) {
break;
}
// Sleep for 1 sec to ensure there are some free GPU devices which are
// getting released.
try {
LOG.info("Container : " + container.getContainerId()
+ " is waiting for free GPU devices.");
Thread.sleep(WAIT_MS_PER_LOOP);
timeWaiting += WAIT_MS_PER_LOOP;
allocation = internalAssignGpus(container);
} catch (InterruptedException e) {
// On any interrupt, break the loop and continue execution.
Thread.currentThread().interrupt();
LOG.warn("Interrupted while waiting for available GPU");
break;
}
}
if(allocation == null) {
String message = "Could not get valid GPU device for container '" +
container.getContainerId()
+ "' as some other containers might not releasing GPUs.";
LOG.warn(message);
throw new ResourceHandlerException(message);
}
return allocation;
}
|
@Test
public void testRequestMoreThanAvailableGpu()
throws ResourceHandlerException {
addGpus(new GpuDevice(1, 1));
Container container = createMockContainer(2, 5L);
exception.expect(ResourceHandlerException.class);
exception.expectMessage("Failed to find enough GPUs");
testSubject.assignGpus(container);
}
|
public static KubernetesJobManagerSpecification buildKubernetesJobManagerSpecification(
FlinkPod podTemplate, KubernetesJobManagerParameters kubernetesJobManagerParameters)
throws IOException {
FlinkPod flinkPod = Preconditions.checkNotNull(podTemplate).copy();
List<HasMetadata> accompanyingResources = new ArrayList<>();
final List<KubernetesStepDecorator> stepDecorators =
new ArrayList<>(
Arrays.asList(
new InitJobManagerDecorator(kubernetesJobManagerParameters),
new EnvSecretsDecorator(kubernetesJobManagerParameters),
new MountSecretsDecorator(kubernetesJobManagerParameters),
new CmdJobManagerDecorator(kubernetesJobManagerParameters),
new InternalServiceDecorator(kubernetesJobManagerParameters),
new ExternalServiceDecorator(kubernetesJobManagerParameters)));
Configuration configuration = kubernetesJobManagerParameters.getFlinkConfiguration();
if (configuration.get(KUBERNETES_HADOOP_CONF_MOUNT_DECORATOR_ENABLED)) {
stepDecorators.add(new HadoopConfMountDecorator(kubernetesJobManagerParameters));
}
if (configuration.get(KUBERNETES_KERBEROS_MOUNT_DECORATOR_ENABLED)) {
stepDecorators.add(new KerberosMountDecorator(kubernetesJobManagerParameters));
}
stepDecorators.addAll(
Arrays.asList(
new FlinkConfMountDecorator(kubernetesJobManagerParameters),
new PodTemplateMountDecorator(kubernetesJobManagerParameters)));
for (KubernetesStepDecorator stepDecorator : stepDecorators) {
flinkPod = stepDecorator.decorateFlinkPod(flinkPod);
accompanyingResources.addAll(stepDecorator.buildAccompanyingKubernetesResources());
}
final Deployment deployment =
createJobManagerDeployment(flinkPod, kubernetesJobManagerParameters);
return new KubernetesJobManagerSpecification(deployment, accompanyingResources);
}
|
@Test
void testHadoopConfConfigMap() throws IOException {
setHadoopConfDirEnv();
generateHadoopConfFileItems();
kubernetesJobManagerSpecification =
KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification(
flinkPod, kubernetesJobManagerParameters);
final ConfigMap resultConfigMap =
(ConfigMap)
getConfigMapList(
HadoopConfMountDecorator.getHadoopConfConfigMapName(
CLUSTER_ID))
.get(0);
assertThat(resultConfigMap.getMetadata().getLabels()).hasSize(2);
final Map<String, String> resultDatas = resultConfigMap.getData();
assertThat(resultDatas).hasSize(2);
assertThat(resultDatas.get("core-site.xml")).isEqualTo("some data");
assertThat(resultDatas.get("hdfs-site.xml")).isEqualTo("some data");
}
|
public Exception getException() {
if (exception != null) return exception;
try {
final Class<? extends Exception> exceptionClass = ReflectionUtils.toClass(getExceptionType());
if (getExceptionCauseType() != null) {
final Class<? extends Exception> exceptionCauseClass = ReflectionUtils.toClass(getExceptionCauseType());
final Exception exceptionCause = getExceptionCauseMessage() != null ? ReflectionUtils.newInstanceCE(exceptionCauseClass, getExceptionCauseMessage()) : ReflectionUtils.newInstanceCE(exceptionCauseClass);
exceptionCause.setStackTrace(new StackTraceElement[]{});
return getExceptionMessage() != null ? ReflectionUtils.newInstanceCE(exceptionClass, getExceptionMessage(), exceptionCause) : ReflectionUtils.newInstanceCE(exceptionClass, exceptionCause);
} else {
return getExceptionMessage() != null ? ReflectionUtils.newInstanceCE(exceptionClass, getExceptionMessage()) : ReflectionUtils.newInstanceCE(exceptionClass);
}
} catch (ReflectiveOperationException e) {
throw new IllegalStateException("Could not reconstruct exception for class " + getExceptionType() + " and message " + getExceptionMessage(), e);
}
}
|
@Test
void getExceptionWithMessageAndNestedExceptionWithMessage() {
final FailedState failedState = new FailedState("JobRunr message", new CustomException("custom exception message", new CustomException("other exception")));
assertThat(failedState.getException())
.isInstanceOf(CustomException.class)
.hasMessage("custom exception message")
.hasCauseInstanceOf(CustomException.class)
.hasRootCauseMessage("other exception");
}
|
public static DLPReidentifyText.Builder newBuilder() {
return new AutoValue_DLPReidentifyText.Builder();
}
|
@Test
public void throwsExceptionWhenDelimiterIsSetAndHeadersAreNot() {
assertThrows(
"Column headers should be supplied when delimiter is present.",
IllegalArgumentException.class,
() ->
DLPReidentifyText.newBuilder()
.setProjectId(PROJECT_ID)
.setBatchSizeBytes(BATCH_SIZE_SMALL)
.setReidentifyTemplateName(TEMPLATE_NAME)
.setColumnDelimiter(DELIMITER)
.build());
}
|
@Override
public DescriptiveUrlBag toUrl(final Path file) {
final DescriptiveUrlBag list = new DescriptiveUrlBag();
for(String scheme : host.getProtocol().getSchemes()) {
if(Arrays.stream(Scheme.values()).noneMatch(s -> s.name().equals(scheme))) {
list.add(new DescriptiveUrl(URI.create(String.format("%s://%s%s",
scheme, new PunycodeConverter().convert(host.getHostname()), PathNormalizer.normalize(file.getAbsolute()))),
DescriptiveUrl.Type.provider,
MessageFormat.format(LocaleFactory.localizedString("{0} URL"), StringUtils.capitalize(scheme))));
}
}
return list;
}
|
@Test
public void testCustomSchemes() {
Host host = new Host(new TestProtocol() {
public String[] getSchemes() {
return new String[]{"c1", "c2"};
}
}, "localhost");
Path path = new Path("/file", EnumSet.of(Path.Type.file));
final DescriptiveUrlBag list = new CustomSchemeUrlProvider(host).toUrl(path).filter(DescriptiveUrl.Type.provider);
assertEquals(2, list.size());
assertTrue(list.contains(new DescriptiveUrl(URI.create("c1://localhost/file"))));
assertTrue(list.contains(new DescriptiveUrl(URI.create("c2://localhost/file"))));
}
|
@Transactional(readOnly = true)
public void existsGeneralSignUpUser(String phone) {
readGeneralSignUpUser(phone);
}
|
@DisplayName("정상적인 비밀번호 찾기 인증요청일 경우 SuccessResponse.noContent()를 반환한다.")
@Test
void findPasswordVerification() {
// given
String phone = "010-1234-5678";
User user = UserFixture.GENERAL_USER.toUser();
given(userService.readUserByPhone(phone)).willReturn(Optional.of(user));
// when
authFindService.existsGeneralSignUpUser(phone);
}
|
public String getJobCompletionRequestBody(String elasticAgentId, JobIdentifier jobIdentifier, Map<String, String> elasticProfileConfiguration, Map<String, String> clusterProfileConfiguration) {
JsonObject jsonObject = new JsonObject();
jsonObject.addProperty("elastic_agent_id", elasticAgentId);
jsonObject.add("job_identifier", jobIdentifierJson(jobIdentifier));
jsonObject.add("elastic_agent_profile_properties", mapToJsonObject(elasticProfileConfiguration));
jsonObject.add("cluster_profile_properties", mapToJsonObject(clusterProfileConfiguration));
return FORCED_EXPOSE_GSON.toJson(jsonObject);
}
|
@Test
public void shouldJSONizeJobCompletionRequestBody() throws Exception {
HashMap<String, String> elasticProfileConfiguration = new HashMap<>();
elasticProfileConfiguration.put("property_name", "property_value");
HashMap<String, String> clusterProfileConfiguration = new HashMap<>();
clusterProfileConfiguration.put("property_name", "property_value");
String actual = new ElasticAgentExtensionConverterV5().getJobCompletionRequestBody("ea1", jobIdentifier, elasticProfileConfiguration, clusterProfileConfiguration);
String expected = """
{ "elastic_agent_id":"ea1", "elastic_agent_profile_properties":{ "property_name":"property_value" }, "cluster_profile_properties":{ "property_name":"property_value" }, "job_identifier": {
"pipeline_name": "test-pipeline",
"pipeline_counter": 1,
"pipeline_label": "Test Pipeline",
"stage_name": "test-stage",
"stage_counter": "1",
"job_name": "test-job",
"job_id": 100
}
}""";
assertThatJson(expected).isEqualTo(actual);
}
|
public static TraceContextOrSamplingFlags create(TraceContext context) {
return new TraceContextOrSamplingFlags(1, context, emptyList());
}
|
@Test void equalsAndHashCode_samplingFlags() {
equalsAndHashCode(
() -> TraceContextOrSamplingFlags.create(SAMPLED),
() -> TraceContextOrSamplingFlags.create(NOT_SAMPLED),
() -> TraceContextOrSamplingFlags.create(context)
);
}
|
public abstract int status(HttpServletResponse response);
|
@Test void servlet25_status() {
assertThat(servlet25.status(new HttpServletResponseImpl()))
.isEqualTo(200);
}
|
public boolean schemaEquals(Object obj) {
return equals(obj) && Arrays.equals(fieldNames, ((RowTypeInfo) obj).fieldNames);
}
|
@Test
void testSchemaEquals() {
final RowTypeInfo row1 =
new RowTypeInfo(
new TypeInformation[] {
BasicTypeInfo.INT_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO
},
new String[] {"field1", "field2"});
final RowTypeInfo row2 =
new RowTypeInfo(
new TypeInformation[] {
BasicTypeInfo.INT_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO
},
new String[] {"field1", "field2"});
assertThat(row1.schemaEquals(row2)).isTrue();
final RowTypeInfo other1 =
new RowTypeInfo(
new TypeInformation[] {
BasicTypeInfo.INT_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO
},
new String[] {"otherField", "field2"});
final RowTypeInfo other2 =
new RowTypeInfo(
new TypeInformation[] {
BasicTypeInfo.LONG_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO
},
new String[] {"field1", "field2"});
assertThat(row1.schemaEquals(other1)).isFalse();
assertThat(row1.schemaEquals(other2)).isFalse();
}
|
@ApiOperation(value = "获取功能按钮列表")
@GetMapping
public ApiResult<List<BaseAction>> list(){
return ApiResult.success(baseActionService.list());
}
|
@Test
@DisplayName("操作列表")
void list() {
}
|
public static SqlMap of(final SqlType keyType, final SqlType valueType) {
return new SqlMap(keyType, valueType);
}
|
@Test
public void shouldReturnBaseType() {
assertThat(SqlMap.of(SOME_TYPE, OTHER_TYPE).baseType(), is(SqlBaseType.MAP));
}
|
public String decompress(String compressorName, String compressedString) throws IOException {
Checks.notNull(compressedString, "compressedString cannot be null");
Compressor compressor = getCompressor(compressorName);
return new String(compressor.decompress(base64Decode(compressedString)), DEFAULT_ENCODING);
}
|
@Test
public void testDecompress() throws IOException {
assertEquals("aaaaaaa", stringCodec.decompress("gzip", "H4sIAAAAAAAAAEtMBAMAdCCLWwcAAAA="));
}
|
AwsCredentials credentials() {
if (!StringUtil.isNullOrEmptyAfterTrim(awsConfig.getAccessKey())) {
return AwsCredentials.builder()
.setAccessKey(awsConfig.getAccessKey())
.setSecretKey(awsConfig.getSecretKey())
.build();
}
if (!StringUtil.isNullOrEmptyAfterTrim(ec2IamRole)) {
return fetchCredentialsFromEc2();
}
if (environment.isRunningOnEcs()) {
return fetchCredentialsFromEcs();
}
throw new NoCredentialsException();
}
|
@Test
public void credentialsAccessKey() {
// given
AwsConfig awsConfig = AwsConfig.builder()
.setAccessKey(ACCESS_KEY)
.setSecretKey(SECRET_KEY)
.build();
AwsCredentialsProvider credentialsProvider = new AwsCredentialsProvider(awsConfig, awsMetadataApi, environment);
// when
AwsCredentials credentials = credentialsProvider.credentials();
// then
assertEquals(ACCESS_KEY, credentials.getAccessKey());
assertEquals(SECRET_KEY, credentials.getSecretKey());
assertNull(credentials.getToken());
}
|
static SegmentStatus getSegmentStatus(Path path) {
try (RecordIOReader ioReader = new RecordIOReader(path)) {
boolean moreEvents = true;
SegmentStatus segmentStatus = SegmentStatus.EMPTY;
while (moreEvents) {
// If all events in the segment can be read, then assume that this is a valid segment
moreEvents = (ioReader.readEvent() != null);
if (moreEvents) segmentStatus = SegmentStatus.VALID;
}
return segmentStatus;
} catch (IOException | IllegalStateException e) {
logger.warn("Error reading segment file {}", path, e);
return SegmentStatus.INVALID;
}
}
|
@Test
public void testEmptySegment() throws Exception {
try(RecordIOWriter writer = new RecordIOWriter(file)){
// Do nothing. Creating a new writer is the same behaviour as starting and closing
// This line avoids a compiler warning.
writer.toString();
}
assertThat(RecordIOReader.getSegmentStatus(file), is(RecordIOReader.SegmentStatus.EMPTY));
}
|
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
Configuration conf = FlinkOptions.fromMap(context.getCatalogTable().getOptions());
checkArgument(!StringUtils.isNullOrEmpty(conf.getString(FlinkOptions.PATH)),
"Option [path] should not be empty.");
setupTableOptions(conf.getString(FlinkOptions.PATH), conf);
ResolvedSchema schema = context.getCatalogTable().getResolvedSchema();
sanityCheck(conf, schema);
setupConfOptions(conf, context.getObjectIdentifier(), context.getCatalogTable(), schema);
setupSortOptions(conf, context.getConfiguration());
return new HoodieTableSink(conf, schema);
}
|
@Test
void testSetupCleaningOptionsForSink() {
// definition with simple primary key and partition path
ResolvedSchema schema1 = SchemaBuilder.instance()
.field("f0", DataTypes.INT().notNull())
.field("f1", DataTypes.VARCHAR(20))
.field("f2", DataTypes.TIMESTAMP(3))
.field("ts", DataTypes.TIMESTAMP(3))
.primaryKey("f0")
.build();
// set up new retains commits that is less than min archive commits
this.conf.setString(FlinkOptions.CLEAN_RETAIN_COMMITS.key(), "11");
final MockContext sinkContext1 = MockContext.getInstance(this.conf, schema1, "f2");
final HoodieTableSink tableSink1 = (HoodieTableSink) new HoodieTableFactory().createDynamicTableSink(sinkContext1);
final Configuration conf1 = tableSink1.getConf();
assertThat(conf1.getInteger(FlinkOptions.ARCHIVE_MIN_COMMITS), is(FlinkOptions.ARCHIVE_MIN_COMMITS.defaultValue()));
assertThat(conf1.getInteger(FlinkOptions.ARCHIVE_MAX_COMMITS), is(FlinkOptions.ARCHIVE_MAX_COMMITS.defaultValue()));
// set up new retains commits that is greater than min archive commits
final int retainCommits = FlinkOptions.ARCHIVE_MIN_COMMITS.defaultValue() + 5;
this.conf.setInteger(FlinkOptions.CLEAN_RETAIN_COMMITS.key(), retainCommits);
final MockContext sinkContext2 = MockContext.getInstance(this.conf, schema1, "f2");
final HoodieTableSink tableSink2 = (HoodieTableSink) new HoodieTableFactory().createDynamicTableSink(sinkContext2);
final Configuration conf2 = tableSink2.getConf();
assertThat(conf2.getInteger(FlinkOptions.ARCHIVE_MIN_COMMITS), is(retainCommits + 10));
assertThat(conf2.getInteger(FlinkOptions.ARCHIVE_MAX_COMMITS), is(retainCommits + 20));
}
|
public static String parsingEndpointRule(String endpointUrl) {
// If entered in the configuration file, the priority in ENV will be given priority.
if (endpointUrl == null || !PATTERN.matcher(endpointUrl).find()) {
// skip retrieve from system property and retrieve directly from system env
String endpointUrlSource = NacosClientProperties.PROTOTYPE.getProperty(
PropertyKeyConst.SystemEnv.ALIBABA_ALIWARE_ENDPOINT_URL);
if (StringUtils.isNotBlank(endpointUrlSource)) {
endpointUrl = endpointUrlSource;
}
return StringUtils.isNotBlank(endpointUrl) ? endpointUrl : "";
}
endpointUrl = endpointUrl.substring(endpointUrl.indexOf("${") + 2, endpointUrl.lastIndexOf("}"));
int defStartOf = endpointUrl.indexOf(":");
String defaultEndpointUrl = null;
if (defStartOf != -1) {
defaultEndpointUrl = endpointUrl.substring(defStartOf + 1);
endpointUrl = endpointUrl.substring(0, defStartOf);
}
String endpointUrlSource = TemplateUtils.stringBlankAndThenExecute(
NacosClientProperties.PROTOTYPE.getProperty(endpointUrl),
() -> NacosClientProperties.PROTOTYPE.getProperty(
PropertyKeyConst.SystemEnv.ALIBABA_ALIWARE_ENDPOINT_URL));
if (StringUtils.isBlank(endpointUrlSource)) {
if (StringUtils.isNotBlank(defaultEndpointUrl)) {
endpointUrl = defaultEndpointUrl;
}
} else {
endpointUrl = endpointUrlSource;
}
return StringUtils.isNotBlank(endpointUrl) ? endpointUrl : "";
}
|
@Test
void testParsingEndpointRule() {
String url = "${test:www.example.com}";
String actual = ParamUtil.parsingEndpointRule(url);
assertEquals("www.example.com", actual);
}
|
@Override
public void pluginJarAdded(BundleOrPluginFileDetails bundleOrPluginFileDetails) {
final GoPluginBundleDescriptor bundleDescriptor = goPluginBundleDescriptorBuilder.build(bundleOrPluginFileDetails);
try {
LOGGER.info("Plugin load starting: {}", bundleOrPluginFileDetails.file());
validateIfExternalPluginRemovingBundledPlugin(bundleDescriptor);
validatePluginCompatibilityWithCurrentOS(bundleDescriptor);
validatePluginCompatibilityWithGoCD(bundleDescriptor);
addPlugin(bundleOrPluginFileDetails, bundleDescriptor);
} finally {
LOGGER.info("Plugin load finished: {}", bundleOrPluginFileDetails.file());
}
}
|
@Test
void shouldOverwriteAFileCalledGoPluginActivatorInLibWithOurOwnGoPluginActivatorEvenIfItExists() throws Exception {
File pluginJarFile = new File(pluginWorkDir, PLUGIN_JAR_FILE_NAME);
File expectedBundleDirectory = new File(bundleDir, PLUGIN_JAR_FILE_NAME);
File activatorFileLocation = new File(expectedBundleDirectory, "lib/go-plugin-activator.jar");
FileUtils.writeStringToFile(activatorFileLocation, "SOME-DATA", UTF_8);
copyPluginToTheDirectory(pluginWorkDir, PLUGIN_JAR_FILE_NAME);
String pluginJarFileLocation = pluginJarFile.getAbsolutePath();
GoPluginBundleDescriptor descriptor = new GoPluginBundleDescriptor(GoPluginDescriptor.builder()
.id("testplugin.descriptorValidator")
.bundleLocation(expectedBundleDirectory)
.pluginJarFileLocation(pluginJarFileLocation)
.isBundledPlugin(true)
.build());
when(goPluginBundleDescriptorBuilder.build(new BundleOrPluginFileDetails(pluginJarFile, true, pluginWorkDir))).thenReturn(descriptor);
doNothing().when(registry).loadPlugin(descriptor);
listener.pluginJarAdded(new BundleOrPluginFileDetails(pluginJarFile, true, pluginWorkDir));
assertThat(new File(expectedBundleDirectory, "lib/go-plugin-activator.jar")).exists();
assertThat(FileUtils.readFileToString(activatorFileLocation, UTF_8)).isNotEqualTo("SOME-DATA");
}
|
@NonNull
public SelectSectoralIdpStep start(@NonNull Session session) {
return new SelectSectoralIdpStepImpl(
selfIssuer,
federationMasterClient,
openIdClient,
relyingPartyKeySupplier,
session.callbackUri(),
session.nonce(),
session.codeChallengeS256(),
session.state(),
session.scopes());
}
|
@Test
void start() {
var self = URI.create("https://fachdienst.example.com");
var fedmasterClient = mock(FederationMasterClient.class);
var openIdClient = mock(OpenIdClient.class);
var keySupplier = mock(KeySupplier.class);
var flow = new AuthenticationFlow(self, fedmasterClient, openIdClient, keySupplier);
var step = flow.start(new Session(null, null, null, null, List.of()));
assertNotNull(step);
}
|
@SuppressWarnings("deprecation")
static Object[] buildArgs(final Object[] positionalArguments,
final ResourceMethodDescriptor resourceMethod,
final ServerResourceContext context,
final DynamicRecordTemplate template,
final ResourceMethodConfig resourceMethodConfig)
{
List<Parameter<?>> parameters = resourceMethod.getParameters();
Object[] arguments = Arrays.copyOf(positionalArguments, parameters.size());
fixUpComplexKeySingletonArraysInArguments(arguments);
boolean attachmentsDesired = false;
for (int i = positionalArguments.length; i < parameters.size(); ++i)
{
Parameter<?> param = parameters.get(i);
try
{
if (param.getParamType() == Parameter.ParamType.KEY || param.getParamType() == Parameter.ParamType.ASSOC_KEY_PARAM)
{
Object value = context.getPathKeys().get(param.getName());
if (value != null)
{
arguments[i] = value;
continue;
}
}
else if (param.getParamType() == Parameter.ParamType.CALLBACK)
{
continue;
}
else if (param.getParamType() == Parameter.ParamType.PARSEQ_CONTEXT_PARAM || param.getParamType() == Parameter.ParamType.PARSEQ_CONTEXT)
{
continue; // don't know what to fill in yet
}
else if (param.getParamType() == Parameter.ParamType.HEADER)
{
HeaderParam headerParam = param.getAnnotations().get(HeaderParam.class);
String value = context.getRequestHeaders().get(headerParam.value());
arguments[i] = value;
continue;
}
//Since we have multiple different types of MaskTrees that can be passed into resource methods,
//we must evaluate based on the param type (annotation used)
else if (param.getParamType() == Parameter.ParamType.PROJECTION || param.getParamType() == Parameter.ParamType.PROJECTION_PARAM)
{
arguments[i] = context.getProjectionMask();
continue;
}
else if (param.getParamType() == Parameter.ParamType.METADATA_PROJECTION_PARAM)
{
arguments[i] = context.getMetadataProjectionMask();
continue;
}
else if (param.getParamType() == Parameter.ParamType.PAGING_PROJECTION_PARAM)
{
arguments[i] = context.getPagingProjectionMask();
continue;
}
else if (param.getParamType() == Parameter.ParamType.CONTEXT || param.getParamType() == Parameter.ParamType.PAGING_CONTEXT_PARAM)
{
PagingContext ctx = RestUtils.getPagingContext(context, (PagingContext) param.getDefaultValue());
arguments[i] = ctx;
continue;
}
else if (param.getParamType() == Parameter.ParamType.PATH_KEYS || param.getParamType() == Parameter.ParamType.PATH_KEYS_PARAM)
{
arguments[i] = context.getPathKeys();
continue;
}
else if (param.getParamType() == Parameter.ParamType.PATH_KEY_PARAM) {
Object value = context.getPathKeys().get(param.getName());
if (value != null)
{
arguments[i] = value;
continue;
}
}
else if (param.getParamType() == Parameter.ParamType.RESOURCE_CONTEXT || param.getParamType() == Parameter.ParamType.RESOURCE_CONTEXT_PARAM)
{
arguments[i] = context;
continue;
}
else if (param.getParamType() == Parameter.ParamType.VALIDATOR_PARAM)
{
RestLiDataValidator validator = new RestLiDataValidator(resourceMethod.getResourceModel().getResourceClass().getAnnotations(),
resourceMethod.getResourceModel().getValueClass(), resourceMethod.getMethodType());
arguments[i] = validator;
continue;
}
else if (param.getParamType() == Parameter.ParamType.RESTLI_ATTACHMENTS_PARAM)
{
arguments[i] = context.getRequestAttachmentReader();
attachmentsDesired = true;
continue;
}
else if (param.getParamType() == Parameter.ParamType.UNSTRUCTURED_DATA_WRITER_PARAM)
{
// The OutputStream is passed to the resource implementation in a synchronous call. Upon return of the
// resource method, all the bytes would haven't written to the OutputStream. The EntityStream would have
// contained all the bytes by the time data is requested. The ownership of the OutputStream is passed to
// the ByteArrayOutputStreamWriter, which is responsible of closing the OutputStream if necessary.
ByteArrayOutputStream out = new ByteArrayOutputStream();
context.setResponseEntityStream(EntityStreams.newEntityStream(new ByteArrayOutputStreamWriter(out)));
arguments[i] = new UnstructuredDataWriter(out, context);
continue;
}
else if (param.getParamType() == Parameter.ParamType.UNSTRUCTURED_DATA_REACTIVE_READER_PARAM)
{
arguments[i] = new UnstructuredDataReactiveReader(context.getRequestEntityStream(), context.getRawRequest().getHeader(RestConstants.HEADER_CONTENT_TYPE));
continue;
}
else if (param.getParamType() == Parameter.ParamType.POST)
{
// handle action parameters
if (template != null)
{
DataMap data = template.data();
if (data.containsKey(param.getName()))
{
arguments[i] = template.getValue(param);
continue;
}
}
}
else if (param.getParamType() == Parameter.ParamType.QUERY)
{
Object value;
if (DataTemplate.class.isAssignableFrom(param.getType()))
{
value = buildDataTemplateArgument(context.getStructuredParameter(param.getName()), param,
resourceMethodConfig.shouldValidateQueryParams());
}
else
{
value = buildRegularArgument(context, param, resourceMethodConfig.shouldValidateQueryParams());
}
if (value != null)
{
arguments[i] = value;
continue;
}
}
else if (param.getParamType() == Parameter.ParamType.BATCH || param.getParamType() == Parameter.ParamType.RESOURCE_KEY)
{
// should not come to this routine since it should be handled by passing in positionalArguments
throw new RoutingException("Parameter '" + param.getName() + "' should be passed in as a positional argument",
HttpStatus.S_400_BAD_REQUEST.getCode());
}
else
{
// unknown param type
throw new RoutingException(
"Parameter '" + param.getName() + "' has an unknown parameter type '" + param.getParamType().name() + "'",
HttpStatus.S_400_BAD_REQUEST.getCode());
}
}
catch (TemplateRuntimeException e)
{
throw new RoutingException("Parameter '" + param.getName() + "' is invalid", HttpStatus.S_400_BAD_REQUEST.getCode());
}
try
{
// Handling null-valued parameters not provided in resource context or entity body
// check if it is optional parameter
if (param.isOptional() && param.hasDefaultValue())
{
arguments[i] = param.getDefaultValue();
}
else if (param.isOptional() && !param.getType().isPrimitive())
{
// optional primitive parameter must have default value or provided
arguments[i] = null;
}
else
{
throw new RoutingException("Parameter '" + param.getName() + "' is required", HttpStatus.S_400_BAD_REQUEST.getCode());
}
}
catch (ResourceConfigException e)
{
// Parameter default value format exception should result in server error code 500.
throw new RestLiServiceException(HttpStatus.S_500_INTERNAL_SERVER_ERROR,
"Parameter '" + param.getName() + "' default value is invalid", e);
}
}
//Verify that if the resource method did not expect attachments, and attachments were present, that we drain all
//incoming attachments and send back a bad request. We must take precaution here since simply ignoring the request
//attachments is not correct behavior here. Ignoring other request level constructs such as headers or query parameters
//that were not needed is safe, but not for request attachments.
if (!attachmentsDesired && context.getRequestAttachmentReader() != null)
{
throw new RestLiServiceException(HttpStatus.S_400_BAD_REQUEST,
"Resource method endpoint invoked does not accept any request attachments.");
}
return arguments;
}
|
@Test
public void testBuildArgsHappyPath()
{
//test integer association key integer
String param1Key = "param1";
Parameter<Integer> param1 = new Parameter<>(param1Key, Integer.class, DataTemplateUtil.getSchema(Integer.class),
false, null, Parameter.ParamType.ASSOC_KEY_PARAM, false, AnnotationSet.EMPTY);
Integer param1Value = 123;
//test regular string argument
String param2Key = "param2";
Parameter<String> param2 = new Parameter<>(param2Key, String.class, DataTemplateUtil.getSchema(String.class),
true, null, Parameter.ParamType.QUERY, true, AnnotationSet.EMPTY);
String param2Value = "param2Value";
//test data template argument array with more than element
String param3Key = "param3";
Parameter<StringArray> param3 = new Parameter<>(param3Key, StringArray.class, DataTemplateUtil.getSchema(StringArray.class),
true, null, Parameter.ParamType.QUERY, true, AnnotationSet.EMPTY);
DataList param3Value = new DataList(Arrays.asList("param3a", "param3b"));
StringArray param3Final = new StringArray(param3Value);
//test data template argument array with only one element
String param4Key = "param4";
Parameter<StringArray> param4 = new Parameter<>(param4Key, StringArray.class, DataTemplateUtil.getSchema(StringArray.class),
true, null, Parameter.ParamType.QUERY, true, AnnotationSet.EMPTY);
String param4Value = "param4Value";
StringArray param4Final = new StringArray(param4Value);
// test record template
String param5Key = "param5";
Parameter<TestRecord> param5 = new Parameter<>(param5Key, TestRecord.class, DataTemplateUtil.getSchema(TestRecord.class),
true, null, Parameter.ParamType.QUERY, true, AnnotationSet.EMPTY);
DataMap param5Value = new DataMap();
param5Value.put("doubleField", "5.5");
param5Value.put("floatField", "5");
param5Value.put("intField", "5");
param5Value.put("longField", "5");
TestRecord param5Final = new TestRecord();
param5Final.setDoubleField(5.5);
param5Final.setFloatField(5F);
param5Final.setIntField(5);
param5Final.setLongField(5);
// test record template array
String param6Key = "param6";
Parameter<TestRecordArray> param6 = new Parameter<>(param6Key, TestRecordArray.class, DataTemplateUtil.getSchema(TestRecordArray.class),
true, null, Parameter.ParamType.QUERY, true, AnnotationSet.EMPTY);
DataList param6Value = new DataList();
DataMap testRecordDataMap1 = new DataMap();
testRecordDataMap1.put("doubleField", "6.6");
testRecordDataMap1.put("floatField", "6");
testRecordDataMap1.put("intField", "6");
testRecordDataMap1.put("longField", "6");
DataMap testRecordDataMap2 = new DataMap();
testRecordDataMap2.put("doubleField", "66.6");
testRecordDataMap2.put("floatField", "66");
testRecordDataMap2.put("intField", "66");
testRecordDataMap2.put("longField", "66");
param6Value.add(testRecordDataMap1);
param6Value.add(testRecordDataMap2);
TestRecordArray param6Final = new TestRecordArray();
TestRecord testRecord1 = new TestRecord();
testRecord1.setDoubleField(6.6);
testRecord1.setFloatField(6);
testRecord1.setIntField(6);
testRecord1.setLongField(6);
TestRecord testRecord2 = new TestRecord();
testRecord2.setDoubleField(66.6);
testRecord2.setFloatField(66);
testRecord2.setIntField(66);
testRecord2.setLongField(66);
param6Final.add(testRecord1);
param6Final.add(testRecord2);
List<Parameter<?>> parameters = new ArrayList<>();
parameters.add(param1);
parameters.add(param2);
parameters.add(param3);
parameters.add(param4);
parameters.add(param5);
parameters.add(param6);
Object[] positionalArguments = new Object[0];
Capture<String> param1Capture = EasyMock.newCapture();
Capture<String> param2Capture = EasyMock.newCapture();
Capture<String> param3Capture = EasyMock.newCapture();
Capture<String> param4Capture = EasyMock.newCapture();
Capture<String> param5Capture = EasyMock.newCapture();
Capture<String> param6Capture = EasyMock.newCapture();
ServerResourceContext mockResourceContext = EasyMock.createMock(ServerResourceContext.class);
EasyMock.expect(mockResourceContext.getRequestAttachmentReader()).andReturn(null);
MutablePathKeys mockPathKeys = EasyMock.createMock(MutablePathKeys.class);
ResourceMethodDescriptor mockResourceMethodDescriptor = getMockResourceMethod(parameters);
ResourceMethodConfig mockResourceMethodConfig = EasyMock.createMock(ResourceMethodConfig.class);
EasyMock.expect(mockResourceMethodConfig.shouldValidateResourceKeys()).andReturn(true).times(5);
EasyMock.expect(mockResourceMethodConfig.shouldValidateQueryParams()).andReturn(false).times(5);
EasyMock.replay(mockResourceMethodConfig);
//easy mock for processing param1
EasyMock.expect(mockPathKeys.get(EasyMock.capture(param1Capture))).andReturn(param1Value);
EasyMock.expect(mockResourceContext.getPathKeys()).andReturn(mockPathKeys);
//easy mock for processing param2
EasyMock.expect(mockResourceContext.hasParameter(EasyMock.capture(param2Capture))).andReturn(true);
EasyMock.expect(mockResourceContext.getParameter(EasyMock.capture(param2Capture))).andReturn(param2Value);
//easy mock for processing param3
EasyMock.expect(mockResourceContext.getStructuredParameter(EasyMock.capture(param3Capture))).andReturn(param3Value);
//easy mock for processing param4
EasyMock.expect(mockResourceContext.getStructuredParameter(EasyMock.capture(param4Capture))).andReturn(param4Value);
//easy mock for processing param5
EasyMock.expect(mockResourceContext.getStructuredParameter(EasyMock.capture(param5Capture))).andReturn(param5Value);
//easy mock for processing param6
EasyMock.expect(mockResourceContext.getStructuredParameter(EasyMock.capture(param6Capture))).andReturn(param6Value);
EasyMock.replay(mockResourceContext, mockPathKeys);
Object[] results = ArgumentBuilder.buildArgs(positionalArguments, mockResourceMethodDescriptor, mockResourceContext, null, mockResourceMethodConfig);
EasyMock.verify(mockPathKeys, mockResourceContext);
Assert.assertEquals(param1Capture.getValue(), param1Key);
Assert.assertEquals(param2Capture.getValue(), param2Key);
Assert.assertEquals(param3Capture.getValue(), param3Key);
Assert.assertEquals(param4Capture.getValue(), param4Key);
Assert.assertEquals(param5Capture.getValue(), param5Key);
Assert.assertEquals(param6Capture.getValue(), param6Key);
Assert.assertEquals(results[0], param1Value);
Assert.assertEquals(results[1], param2Value);
Assert.assertEquals(results[2], param3Final);
Assert.assertEquals(results[3], param4Final);
Assert.assertEquals(results[4], param5Final);
Assert.assertEquals(results[5], param6Final);
}
|
@Override
public int hashCode() {
return Objects.hash(
partitionToken,
commitTimestamp,
serverTransactionId,
isLastRecordInTransactionInPartition,
recordSequence,
tableName,
rowType,
mods,
modType,
valueCaptureType,
numberOfRecordsInTransaction,
numberOfPartitionsInTransaction,
transactionTag,
isSystemTransaction);
}
|
@Test
public void testMetadataShouldNotInterfereInEquality() {
final DataChangeRecord record1 =
new DataChangeRecord(
"partitionToken",
Timestamp.ofTimeMicroseconds(1L),
"serverTransactionId",
true,
"recordSequence",
"tableName",
Arrays.asList(
new ColumnType("column1", new TypeCode("type1"), true, 1L),
new ColumnType("column2", new TypeCode("type2"), false, 2L)),
Collections.singletonList(
new Mod(
"{\"column1\": \"value1\"}",
"{\"column2\": \"oldValue2\"}",
"{\"column2\": \"newValue2\"}")),
ModType.UPDATE,
ValueCaptureType.OLD_AND_NEW_VALUES,
10L,
2L,
"transactionTag",
true,
mock(ChangeStreamRecordMetadata.class));
final DataChangeRecord record2 =
new DataChangeRecord(
"partitionToken",
Timestamp.ofTimeMicroseconds(1L),
"serverTransactionId",
true,
"recordSequence",
"tableName",
Arrays.asList(
new ColumnType("column1", new TypeCode("type1"), true, 1L),
new ColumnType("column2", new TypeCode("type2"), false, 2L)),
Collections.singletonList(
new Mod(
"{\"column1\": \"value1\"}",
"{\"column2\": \"oldValue2\"}",
"{\"column2\": \"newValue2\"}")),
ModType.UPDATE,
ValueCaptureType.OLD_AND_NEW_VALUES,
10L,
2L,
"transactionTag",
true,
mock(ChangeStreamRecordMetadata.class));
assertEquals(record1, record2);
assertEquals(record1.hashCode(), record2.hashCode());
}
|
public JobMetaDataParameterObject processJobMultipart(JobMultiPartParameterObject parameterObject)
throws IOException, NoSuchAlgorithmException {
// Change the timestamp in the beginning to avoid expiration
changeLastUpdatedTime();
validateReceivedParameters(parameterObject);
validateReceivedPartNumbersAreExpected(parameterObject);
validatePartChecksum(parameterObject);
// Parts numbers are good. Save them
currentPart = parameterObject.getCurrentPartNumber();
totalPart = parameterObject.getTotalPartNumber();
Path jarPath = jobMetaDataParameterObject.getJarPath();
// Append data to file
try (OutputStream outputStream = Files.newOutputStream(jarPath, StandardOpenOption.CREATE, StandardOpenOption.APPEND)) {
outputStream.write(parameterObject.getPartData(), 0, parameterObject.getPartSize());
}
if (LOGGER.isInfoEnabled()) {
String message = String.format("Session : %s jarPath: %s PartNumber: %d/%d Total file size : %d bytes",
parameterObject.getSessionId(), jarPath, currentPart, totalPart, Files.size(jarPath));
LOGGER.info(message);
}
JobMetaDataParameterObject result = null;
// If parts are complete
if (currentPart == totalPart) {
validateJarChecksum();
result = jobMetaDataParameterObject;
}
return result;
}
|
@Test
public void testInvalidSHA256() {
byte[] partData = new byte[]{1};
JobMultiPartParameterObject jobMultiPartParameterObject = new JobMultiPartParameterObject();
jobMultiPartParameterObject.setSessionId(UUID.randomUUID());
jobMultiPartParameterObject.setCurrentPartNumber(1);
jobMultiPartParameterObject.setTotalPartNumber(1);
jobMultiPartParameterObject.setPartData(partData);
jobMultiPartParameterObject.setPartSize(partData.length);
jobMultiPartParameterObject.setSha256Hex(null);
Assert.assertThrows(JetException.class, () -> jobUploadStatus.processJobMultipart(jobMultiPartParameterObject));
}
|
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter,
MetricsRecorder metricsRecorder,
BufferSupplier bufferSupplier) {
if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) {
// check the magic value
if (!records.hasMatchingMagic(toMagic))
return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder);
else
// Do in-place validation, offset assignment and maybe set timestamp
return assignOffsetsNonCompressed(offsetCounter, metricsRecorder);
} else
return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier);
}
|
@Test
public void testInvalidCreateTimeNonCompressedV2() {
long now = System.currentTimeMillis();
MemoryRecords records = createRecords(
RecordBatch.MAGIC_VALUE_V2,
now - 1001L,
Compression.NONE
);
assertThrows(RecordValidationException.class, () ->
new LogValidator(
records,
new TopicPartition("topic", 0),
time,
CompressionType.NONE,
Compression.NONE,
false,
RecordBatch.MAGIC_VALUE_V2,
TimestampType.CREATE_TIME,
1000L,
1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT,
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0),
metricsRecorder,
RequestLocal.withThreadConfinedCaching().bufferSupplier()
)
);
}
|
@Override
public void start() throws Exception {
LOG.info("Process starting.");
mRunning = true;
mJournalSystem.start();
startMasterComponents(false);
mServices.forEach(SimpleService::start);
// Perform the initial catchup before joining leader election,
// to avoid potential delay if this master is selected as leader
if (Configuration.getBoolean(PropertyKey.MASTER_JOURNAL_CATCHUP_PROTECT_ENABLED)) {
LOG.info("Waiting for journals to catch up.");
mJournalSystem.waitForCatchup();
}
LOG.info("Starting leader selector.");
mLeaderSelector.start(getRpcAddress());
while (!Thread.interrupted()) {
if (!mRunning) {
LOG.info("master process is not running. Breaking out");
break;
}
if (Configuration.getBoolean(PropertyKey.MASTER_JOURNAL_CATCHUP_PROTECT_ENABLED)) {
LOG.info("Waiting for journals to catch up.");
mJournalSystem.waitForCatchup();
}
LOG.info("Started in stand-by mode.");
mLeaderSelector.waitForState(NodeState.PRIMARY);
mLastGainPrimacyTime = CommonUtils.getCurrentMs();
if (!mRunning) {
break;
}
try {
if (!promote()) {
continue;
}
mServices.forEach(SimpleService::promote);
LOG.info("Primary started");
} catch (Throwable t) {
if (Configuration.getBoolean(PropertyKey.MASTER_JOURNAL_BACKUP_WHEN_CORRUPTED)) {
takeEmergencyBackup();
}
throw t;
}
mLeaderSelector.waitForState(NodeState.STANDBY);
mLastLosePrimacyTime = CommonUtils.getCurrentMs();
if (Configuration.getBoolean(PropertyKey.MASTER_JOURNAL_EXIT_ON_DEMOTION)) {
stop();
} else {
if (!mRunning) {
break;
}
// Dump important information asynchronously
ExecutorService es = null;
List<Future<Void>> dumpFutures = new ArrayList<>();
try {
es = Executors.newFixedThreadPool(
2, ThreadFactoryUtils.build("info-dumper-%d", true));
dumpFutures.addAll(ProcessUtils.dumpInformationOnFailover(es));
} catch (Throwable t) {
LOG.warn("Failed to dump metrics and jstacks before demotion", t);
}
// Shut down services like RPC, WebServer, Journal and all master components
LOG.info("Losing the leadership.");
mServices.forEach(SimpleService::demote);
demote();
// Block until information dump is done and close resources
for (Future<Void> f : dumpFutures) {
try {
f.get();
} catch (InterruptedException | ExecutionException e) {
LOG.warn("Failed to dump metrics and jstacks before demotion", e);
}
}
if (es != null) {
es.shutdownNow();
}
}
}
}
|
@Test
public void startStopPrimary() throws Exception {
AlluxioMasterProcess master = new AlluxioMasterProcess(new NoopJournalSystem(),
new UfsJournalSingleMasterPrimarySelector());
master.registerService(
RpcServerService.Factory.create(master.getRpcBindAddress(), master, master.getRegistry()));
master.registerService(WebServerService.Factory.create(master.getWebBindAddress(), master));
master.registerService(MetricsService.Factory.create());
Thread t = new Thread(() -> {
try {
master.start();
} catch (Exception e) {
throw new RuntimeException(e);
}
});
t.start();
master.waitForReady(10_000);
startStopTest(master);
t.interrupt();
t.join();
}
|
@Override
public MetadataNode child(String name) {
if (name.equals(ClusterImageBrokersNode.NAME)) {
return new ClusterImageBrokersNode(image);
} else if (name.equals(ClusterImageControllersNode.NAME)) {
return new ClusterImageControllersNode(image);
} else {
return null;
}
}
|
@Test
public void testUnknownChild() {
assertNull(NODE.child("unknown"));
}
|
@Override
public KsMaterializedQueryResult<Row> get(
final GenericKey key,
final int partition,
final Optional<Position> position
) {
try {
final ReadOnlyKeyValueStore<GenericKey, ValueAndTimestamp<GenericRow>> store = stateStore
.store(QueryableStoreTypes.timestampedKeyValueStore(), partition);
final ValueAndTimestamp<GenericRow> row = store.get(key);
if (row == null) {
return KsMaterializedQueryResult.rowIterator(Collections.emptyIterator());
} else {
return KsMaterializedQueryResult.rowIterator(ImmutableList.of(Row.of(
stateStore.schema(), key, row.value(), row.timestamp())).iterator());
}
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
}
|
@Test
public void shouldReturnValuesFullTableScan() {
// Given:
when(tableStore.all()).thenReturn(keyValueIterator);
when(keyValueIterator.hasNext()).thenReturn(true, true, false);
when(keyValueIterator.next())
.thenReturn(KEY_VALUE1)
.thenReturn(KEY_VALUE2);
// When:
final Iterator<Row> rowIterator = table.get(PARTITION).rowIterator;
// Then:
assertThat(rowIterator.hasNext(), is(true));
assertThat(rowIterator.next(), is(Row.of(SCHEMA, A_KEY, ROW1, TIME1)));
assertThat(rowIterator.next(), is(Row.of(SCHEMA, A_KEY2, ROW2, TIME2)));
assertThat(rowIterator.hasNext(), is(false));
}
|
static void setTableInputInformation(
TableInput.Builder tableInputBuilder, TableMetadata metadata) {
setTableInputInformation(tableInputBuilder, metadata, null);
}
|
@Test
public void testSetTableInputInformationWithExistingTable() {
// Actual TableInput
TableInput.Builder actualTableInputBuilder = TableInput.builder();
Schema schema =
new Schema(
Types.NestedField.required(1, "x", Types.StringType.get()),
Types.NestedField.required(2, "y", Types.StringType.get(), "new comment"),
Types.NestedField.required(3, "z", Types.StringType.get(), "new comment"));
PartitionSpec partitionSpec =
PartitionSpec.builderFor(schema).identity("x").withSpecId(1000).build();
TableMetadata tableMetadata =
TableMetadata.newTableMetadata(schema, partitionSpec, "s3://test", tableLocationProperties);
// Existing Table
Table existingGlueTable =
Table.builder()
.storageDescriptor(
StorageDescriptor.builder()
.columns(
ImmutableList.of(
Column.builder().name("x").comment("existing comment").build(),
Column.builder().name("y").comment("existing comment").build()))
.build())
.build();
IcebergToGlueConverter.setTableInputInformation(
actualTableInputBuilder, tableMetadata, existingGlueTable);
TableInput actualTableInput = actualTableInputBuilder.build();
// Expected TableInput
TableInput expectedTableInput =
TableInput.builder()
.storageDescriptor(
StorageDescriptor.builder()
.location("s3://test")
.additionalLocations(Sets.newHashSet(tableLocationProperties.values()))
.columns(
ImmutableList.of(
Column.builder()
.name("x")
.type("string")
.comment("existing comment")
.parameters(
ImmutableMap.of(
IcebergToGlueConverter.ICEBERG_FIELD_ID, "1",
IcebergToGlueConverter.ICEBERG_FIELD_OPTIONAL, "false",
IcebergToGlueConverter.ICEBERG_FIELD_CURRENT, "true"))
.build(),
Column.builder()
.name("y")
.type("string")
.comment("new comment")
.parameters(
ImmutableMap.of(
IcebergToGlueConverter.ICEBERG_FIELD_ID, "2",
IcebergToGlueConverter.ICEBERG_FIELD_OPTIONAL, "false",
IcebergToGlueConverter.ICEBERG_FIELD_CURRENT, "true"))
.build(),
Column.builder()
.name("z")
.type("string")
.comment("new comment")
.parameters(
ImmutableMap.of(
IcebergToGlueConverter.ICEBERG_FIELD_ID, "3",
IcebergToGlueConverter.ICEBERG_FIELD_OPTIONAL, "false",
IcebergToGlueConverter.ICEBERG_FIELD_CURRENT, "true"))
.build()))
.build())
.build();
assertThat(actualTableInput.storageDescriptor().columns())
.as("Columns should match")
.isEqualTo(expectedTableInput.storageDescriptor().columns());
}
|
@GET
@Path("/health")
@Operation(summary = "Health check endpoint to verify worker readiness and liveness")
public Response healthCheck() throws Throwable {
WorkerStatus workerStatus;
int statusCode;
try {
FutureCallback<Void> cb = new FutureCallback<>();
herder.healthCheck(cb);
long timeoutNs = TimeUnit.MILLISECONDS.toNanos(requestTimeout.healthCheckTimeoutMs());
long deadlineNs = timeoutNs + time.nanoseconds();
time.waitForFuture(cb, deadlineNs);
statusCode = Response.Status.OK.getStatusCode();
workerStatus = WorkerStatus.healthy();
} catch (TimeoutException e) {
String statusDetails = e instanceof StagedTimeoutException
? ((StagedTimeoutException) e).stage().summarize()
: null;
if (!herder.isReady()) {
statusCode = Response.Status.SERVICE_UNAVAILABLE.getStatusCode();
workerStatus = WorkerStatus.starting(statusDetails);
} else {
statusCode = Response.Status.INTERNAL_SERVER_ERROR.getStatusCode();
workerStatus = WorkerStatus.unhealthy(statusDetails);
}
} catch (ExecutionException e) {
throw e.getCause();
}
return Response.status(statusCode).entity(workerStatus).build();
}
|
@Test
public void testHealthCheckUnhealthy() throws Throwable {
expectHealthCheck(new TimeoutException());
when(herder.isReady()).thenReturn(true);
Response response = rootResource.healthCheck();
assertEquals(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(), response.getStatus());
WorkerStatus expectedStatus = WorkerStatus.unhealthy(null);
WorkerStatus actualStatus = workerStatus(response);
assertEquals(expectedStatus, actualStatus);
}
|
boolean isWriteEnclosureForValueMetaInterface( ValueMetaInterface v ) {
return ( isWriteEnclosed( v ) )
|| isEnclosureFixDisabledAndContainsSeparatorOrEnclosure( v.getName().getBytes() );
}
|
@Test
public void testWriteEnclosedForValueMetaInterfaceWithEnclosureFixDisabled() {
TextFileOutputData data = new TextFileOutputData();
data.binaryEnclosure = new byte[1];
data.writer = new ByteArrayOutputStream();
TextFileOutputMeta meta = getTextFileOutputMeta();
meta.setEnclosureForced(false);
meta.setEnclosureFixDisabled(true);
TextFileOutput textFileOutput = getTextFileOutput(data, meta);
ValueMetaBase valueMetaInterface = getValueMetaInterface();
assertFalse(textFileOutput.isWriteEnclosureForValueMetaInterface(valueMetaInterface));
}
|
public static UserAgent parse(String userAgentString) {
return UserAgentParser.parse(userAgentString);
}
|
@Test
public void parseMicroMessengerTest() {
final String uaString = "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Mobile/15A372 MicroMessenger/7.0.17(0x17001127) NetType/WIFI Language/zh_CN";
final UserAgent ua = UserAgentUtil.parse(uaString);
assertEquals("MicroMessenger", ua.getBrowser().toString());
assertEquals("7.0.17", ua.getVersion());
assertEquals("Webkit", ua.getEngine().toString());
assertEquals("604.1.38", ua.getEngineVersion());
assertEquals("iPhone", ua.getOs().toString());
assertEquals("11_0", ua.getOsVersion());
assertEquals("iPhone", ua.getPlatform().toString());
assertTrue(ua.isMobile());
}
|
@Override
public Iterator<RawUnionValue> call(Iterator<WindowedValue<InputT>> inputs) throws Exception {
SparkPipelineOptions options = pipelineOptions.get().as(SparkPipelineOptions.class);
// Register standard file systems.
FileSystems.setDefaultPipelineOptions(options);
// Do not call processElements if there are no inputs
// Otherwise, this may cause validation errors (e.g. ParDoTest)
if (!inputs.hasNext()) {
return Collections.emptyIterator();
}
try (ExecutableStageContext stageContext = contextFactory.get(jobInfo)) {
ExecutableStage executableStage = ExecutableStage.fromPayload(stagePayload);
try (StageBundleFactory stageBundleFactory =
stageContext.getStageBundleFactory(executableStage)) {
ConcurrentLinkedQueue<RawUnionValue> collector = new ConcurrentLinkedQueue<>();
StateRequestHandler stateRequestHandler =
getStateRequestHandler(
executableStage, stageBundleFactory.getProcessBundleDescriptor());
if (executableStage.getTimers().size() == 0) {
ReceiverFactory receiverFactory = new ReceiverFactory(collector, outputMap);
processElements(stateRequestHandler, receiverFactory, null, stageBundleFactory, inputs);
return collector.iterator();
}
// Used with Batch, we know that all the data is available for this key. We can't use the
// timer manager from the context because it doesn't exist. So we create one and advance
// time to the end after processing all elements.
final InMemoryTimerInternals timerInternals = new InMemoryTimerInternals();
timerInternals.advanceProcessingTime(Instant.now());
timerInternals.advanceSynchronizedProcessingTime(Instant.now());
ReceiverFactory receiverFactory = new ReceiverFactory(collector, outputMap);
TimerReceiverFactory timerReceiverFactory =
new TimerReceiverFactory(
stageBundleFactory,
(Timer<?> timer, TimerInternals.TimerData timerData) -> {
currentTimerKey = timer.getUserKey();
if (timer.getClearBit()) {
timerInternals.deleteTimer(timerData);
} else {
timerInternals.setTimer(timerData);
}
},
windowCoder);
// Process inputs.
processElements(
stateRequestHandler, receiverFactory, timerReceiverFactory, stageBundleFactory, inputs);
// Finish any pending windows by advancing the input watermark to infinity.
timerInternals.advanceInputWatermark(BoundedWindow.TIMESTAMP_MAX_VALUE);
// Finally, advance the processing time to infinity to fire any timers.
timerInternals.advanceProcessingTime(BoundedWindow.TIMESTAMP_MAX_VALUE);
timerInternals.advanceSynchronizedProcessingTime(BoundedWindow.TIMESTAMP_MAX_VALUE);
// Now we fire the timers and process elements generated by timers (which may be timers
// itself)
while (timerInternals.hasPendingTimers()) {
try (RemoteBundle bundle =
stageBundleFactory.getBundle(
receiverFactory,
timerReceiverFactory,
stateRequestHandler,
getBundleProgressHandler())) {
PipelineTranslatorUtils.fireEligibleTimers(
timerInternals, bundle.getTimerReceivers(), currentTimerKey);
}
}
return collector.iterator();
}
}
}
|
@Test
public void testNoCallOnEmptyInputIterator() throws Exception {
SparkExecutableStageFunction<Integer, ?> function = getFunction(Collections.emptyMap());
function.call(Collections.emptyIterator());
verifyZeroInteractions(stageBundleFactory);
}
|
@Override
public CompletableFuture<Void> write(
TieredStoragePartitionId partitionId, List<SubpartitionBufferContext> buffersToWrite) {
List<CompletableFuture<Void>> completableFutures = new ArrayList<>();
buffersToWrite.forEach(
subpartitionBuffers -> {
int subpartitionId = subpartitionBuffers.getSubpartitionId();
List<SegmentBufferContext> segmentBufferContexts =
subpartitionBuffers.getSegmentBufferContexts();
segmentBufferContexts.forEach(
segmentBufferContext -> {
CompletableFuture<Void> flushSuccessNotifier =
new CompletableFuture<>();
ioExecutor.execute(
() ->
flushOrFinishSegment(
partitionId,
subpartitionId,
segmentBufferContext,
flushSuccessNotifier));
completableFutures.add(flushSuccessNotifier);
});
});
return FutureUtils.waitForAll(completableFutures);
}
|
@Test
void testWrite() throws IOException {
TieredStoragePartitionId partitionId =
TieredStorageIdMappingUtils.convertId(new ResultPartitionID());
int numSubpartitions = 5;
int numSegments = 10;
int numBuffersPerSegment = 10;
int bufferSizeBytes = 3;
Path tieredStorageDir = Path.fromLocalFile(tempFolder.toFile());
SegmentPartitionFileWriter partitionFileWriter =
new SegmentPartitionFileWriter(tieredStorageDir.getPath(), numSubpartitions);
// Prepare the buffers to be written
List<PartitionFileWriter.SubpartitionBufferContext> subpartitionBuffers =
generateBuffersToWrite(
numSubpartitions, numSegments, numBuffersPerSegment, bufferSizeBytes);
// Write the file
partitionFileWriter.write(partitionId, subpartitionBuffers);
partitionFileWriter.release();
// Check the written files
checkWrittenSegmentFiles(
partitionId,
numSubpartitions,
numSegments,
numBuffersPerSegment,
bufferSizeBytes,
tieredStorageDir);
}
|
public static Set<Result> anaylze(String log) {
Set<Result> results = new HashSet<>();
for (Rule rule : Rule.values()) {
Matcher matcher = rule.pattern.matcher(log);
if (matcher.find()) {
results.add(new Result(rule, log, matcher));
}
}
return results;
}
|
@Test
public void neoforgeForestOptiFineIncompatible() throws IOException {
CrashReportAnalyzer.Result result = findResultByRule(
CrashReportAnalyzer.anaylze(loadLog("/crash-report/mod/neoforgeforest_optifine_incompatibility.txt")),
CrashReportAnalyzer.Rule.NEOFORGE_FOREST_OPTIFINE);
}
|
@Override
public final void close() throws Exception {
try {
doClose();
} finally {
doFinally();
}
}
|
@Test
void testClose() throws Exception {
configuration.close();
}
|
public static void checkRowIDPartitionComponent(List<HiveColumnHandle> columns, Optional<byte[]> rowIdPartitionComponent)
{
boolean supplyRowIDs = columns.stream().anyMatch(column -> HiveColumnHandle.isRowIdColumnHandle(column));
if (supplyRowIDs) {
checkArgument(rowIdPartitionComponent.isPresent(), "rowIDPartitionComponent required when supplying row IDs");
}
}
|
@Test
public void testCheckRowIDPartitionComponent_rowID()
{
HiveColumnHandle handle = HiveColumnHandle.rowIdColumnHandle();
List<HiveColumnHandle> columns = ImmutableList.of(handle);
checkRowIDPartitionComponent(columns, Optional.of(new byte[0]));
}
|
protected GelfMessage toGELFMessage(final Message message) {
final DateTime timestamp;
final Object fieldTimeStamp = message.getField(Message.FIELD_TIMESTAMP);
if (fieldTimeStamp instanceof DateTime) {
timestamp = (DateTime) fieldTimeStamp;
} else {
timestamp = Tools.nowUTC();
}
final GelfMessageLevel messageLevel = extractLevel(message.getField(Message.FIELD_LEVEL));
final String fullMessage = (String) message.getField(Message.FIELD_FULL_MESSAGE);
final String forwarder = GelfOutput.class.getCanonicalName();
final GelfMessageBuilder builder = new GelfMessageBuilder(message.getMessage(), message.getSource())
.timestamp(timestamp.getMillis() / 1000.0d)
.additionalField("_forwarder", forwarder)
.additionalFields(message.getFields());
if (messageLevel != null) {
builder.level(messageLevel);
}
if (fullMessage != null) {
builder.fullMessage(fullMessage);
}
return builder.build();
}
|
@Test
public void testToGELFMessageWithInvalidNumericStringLevel() throws Exception {
final GelfTransport transport = mock(GelfTransport.class);
final GelfOutput gelfOutput = new GelfOutput(transport);
final DateTime now = DateTime.now(DateTimeZone.UTC);
final Message message = messageFactory.createMessage("Test", "Source", now);
message.addField("level", "-1");
final GelfMessage gelfMessage = gelfOutput.toGELFMessage(message);
assertEquals(GelfMessageLevel.ALERT, gelfMessage.getLevel());
}
|
@Override
protected ConfigData<AppAuthData> fromJson(final JsonObject data) {
return GsonUtils.getGson().fromJson(data, new TypeToken<ConfigData<AppAuthData>>() {
}.getType());
}
|
@Test
public void testFromJson() {
ConfigData<AppAuthData> appAuthDataConfigData = new ConfigData<>();
AppAuthData appAuthData = new AppAuthData();
appAuthDataConfigData.setData(Collections.singletonList(appAuthData));
JsonObject jsonObject = GsonUtils.getGson().fromJson(GsonUtils.getGson().toJson(appAuthDataConfigData), JsonObject.class);
assertThat(mockAppAuthDataRefresh.fromJson(jsonObject), is(appAuthDataConfigData));
}
|
@Override
public ResourceCounter calculateRequiredSlots(
Iterable<JobInformation.VertexInformation> vertices) {
int numTotalRequiredSlots = 0;
for (SlotSharingGroupMetaInfo slotSharingGroupMetaInfo :
SlotSharingGroupMetaInfo.from(vertices).values()) {
numTotalRequiredSlots += slotSharingGroupMetaInfo.getMaxUpperBound();
}
return ResourceCounter.withResource(ResourceProfile.UNKNOWN, numTotalRequiredSlots);
}
|
@Test
void testCalculateRequiredSlots() {
final SlotSharingSlotAllocator slotAllocator =
SlotSharingSlotAllocator.createSlotSharingSlotAllocator(
TEST_RESERVE_SLOT_FUNCTION,
TEST_FREE_SLOT_FUNCTION,
TEST_IS_SLOT_FREE_FUNCTION);
final ResourceCounter resourceCounter =
slotAllocator.calculateRequiredSlots(Arrays.asList(vertex1, vertex2, vertex3));
assertThat(resourceCounter.getResources()).contains(ResourceProfile.UNKNOWN);
assertThat(resourceCounter.getResourceCount(ResourceProfile.UNKNOWN))
.isEqualTo(
Math.max(vertex1.getParallelism(), vertex2.getParallelism())
+ vertex3.getParallelism());
}
|
public void doInject(RequestResource resource, RamContext context, LoginIdentityContext result) {
}
|
@Test
void testDoInject() {
injector.doInject(null, null, null);
}
|
@Udf
public Long trunc(@UdfParameter final Long val) {
return val;
}
|
@Test
public void shouldHandleNullDecimalPlaces() {
assertThat(udf.trunc(1.75d, null), is(nullValue()));
assertThat(udf.trunc(new BigDecimal("1.75"), null), is(nullValue()));
}
|
@GetMapping("/detail")
@RequiresPermissions("system:authen:editResourceDetails")
public ShenyuAdminResult detail(@RequestParam("id")
@Existed(message = "app key not existed",
provider = AppAuthMapper.class) final String id) {
return ShenyuAdminResult.success(ShenyuResultMessage.DETAIL_SUCCESS, appAuthService.findById(id));
}
|
@Test
public void testDetail() throws Exception {
given(this.appAuthService.findById("0001")).willReturn(appAuthVO);
this.mockMvc.perform(MockMvcRequestBuilders.get("/appAuth/detail")
.param("id", "0001"))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.DETAIL_SUCCESS)))
.andExpect(jsonPath("$.data.id", is(appAuthVO.getId())))
.andReturn();
}
|
public static GtidSet fixRestoredGtidSet(GtidSet serverGtidSet, GtidSet restoredGtidSet) {
Map<String, GtidSet.UUIDSet> newSet = new HashMap<>();
serverGtidSet.getUUIDSets().forEach(uuidSet -> newSet.put(uuidSet.getUUID(), uuidSet));
for (GtidSet.UUIDSet uuidSet : restoredGtidSet.getUUIDSets()) {
GtidSet.UUIDSet serverUuidSet = newSet.get(uuidSet.getUUID());
if (serverUuidSet != null) {
long restoredIntervalEnd = getIntervalEnd(uuidSet);
List<com.github.shyiko.mysql.binlog.GtidSet.Interval> newIntervals =
new ArrayList<>();
for (GtidSet.Interval serverInterval : serverUuidSet.getIntervals()) {
if (serverInterval.getEnd() <= restoredIntervalEnd) {
newIntervals.add(
new com.github.shyiko.mysql.binlog.GtidSet.Interval(
serverInterval.getStart(), serverInterval.getEnd()));
} else if (serverInterval.getStart() <= restoredIntervalEnd
&& serverInterval.getEnd() > restoredIntervalEnd) {
newIntervals.add(
new com.github.shyiko.mysql.binlog.GtidSet.Interval(
serverInterval.getStart(), restoredIntervalEnd));
}
}
newSet.put(
uuidSet.getUUID(),
new GtidSet.UUIDSet(
new com.github.shyiko.mysql.binlog.GtidSet.UUIDSet(
uuidSet.getUUID(), newIntervals)));
} else {
newSet.put(uuidSet.getUUID(), uuidSet);
}
}
return new GtidSet(newSet);
}
|
@Test
void testFixingRestoredGtidSet() {
GtidSet serverGtidSet = new GtidSet("A:1-100");
GtidSet restoredGtidSet = new GtidSet("A:30-100");
assertThat(fixRestoredGtidSet(serverGtidSet, restoredGtidSet).toString())
.isEqualTo("A:1-100");
serverGtidSet = new GtidSet("A:1-100");
restoredGtidSet = new GtidSet("A:30-50");
assertThat(fixRestoredGtidSet(serverGtidSet, restoredGtidSet).toString())
.isEqualTo("A:1-50");
serverGtidSet = new GtidSet("A:1-100:102-200,B:20-200");
restoredGtidSet = new GtidSet("A:106-150");
assertThat(fixRestoredGtidSet(serverGtidSet, restoredGtidSet).toString())
.isEqualTo("A:1-100:102-150,B:20-200");
serverGtidSet = new GtidSet("A:1-100:102-200,B:20-200");
restoredGtidSet = new GtidSet("A:106-150,C:1-100");
assertThat(fixRestoredGtidSet(serverGtidSet, restoredGtidSet).toString())
.isEqualTo("A:1-100:102-150,B:20-200,C:1-100");
serverGtidSet = new GtidSet("A:1-100:102-200,B:20-200");
restoredGtidSet = new GtidSet("A:106-150:152-200,C:1-100");
assertThat(fixRestoredGtidSet(serverGtidSet, restoredGtidSet).toString())
.isEqualTo("A:1-100:102-200,B:20-200,C:1-100");
}
|
public static Schema mergeWideningNullable(Schema schema1, Schema schema2) {
if (schema1.getFieldCount() != schema2.getFieldCount()) {
throw new IllegalArgumentException(
"Cannot merge schemas with different numbers of fields. "
+ "schema1: "
+ schema1
+ " schema2: "
+ schema2);
}
Schema.Builder builder = Schema.builder();
for (int i = 0; i < schema1.getFieldCount(); ++i) {
String name = schema1.getField(i).getName();
builder.addField(
name, widenNullableTypes(schema1.getField(i).getType(), schema2.getField(i).getType()));
}
return builder.build();
}
|
@Test
public void testWidenArray() {
Schema schema1 = Schema.builder().addArrayField("field1", FieldType.INT32).build();
Schema schema2 =
Schema.builder().addArrayField("field1", FieldType.INT32.withNullable(true)).build();
Schema expected =
Schema.builder().addArrayField("field1", FieldType.INT32.withNullable(true)).build();
assertEquals(expected, SchemaUtils.mergeWideningNullable(schema1, schema2));
}
|
@Override
public Mono<MatchResult> matches(ServerWebExchange exchange) {
return isWebSocketUpgrade(exchange.getRequest().getHeaders()) ? match() : notMatch();
}
|
@Test
void shouldNotMatchIfNotWebSocketProtocol() {
var httpRequest = MockServerHttpRequest.get("")
.header(HttpHeaders.CONNECTION, HttpHeaders.UPGRADE)
.header(HttpHeaders.UPGRADE, "not-a-websocket")
.build();
var wsExchange = MockServerWebExchange.from(httpRequest);
var wsMatcher = new WebSocketServerWebExchangeMatcher();
StepVerifier.create(wsMatcher.matches(wsExchange))
.consumeNextWith(result -> assertFalse(result.isMatch()))
.verifyComplete();
}
|
public Component buildProject(ScannerReport.Component project, String scmBasePath) {
this.rootComponent = project;
this.scmBasePath = trimToNull(scmBasePath);
Node root = createProjectHierarchy(project);
return buildComponent(root, "", "");
}
|
@Test
void project_description_is_loaded_from_report_if_present_and_on_main_branch() {
String reportDescription = randomAlphabetic(5);
ScannerReport.Component reportProject = newBuilder()
.setType(PROJECT)
.setDescription(reportDescription)
.build();
Component root = newUnderTest(SOME_PROJECT_ATTRIBUTES, true).buildProject(reportProject, NO_SCM_BASE_PATH);
assertThat(root.getDescription()).isEqualTo(reportDescription);
}
|
@Override
public boolean accept(final Path file) {
if(list.find(new SimplePathPredicate(file)) != null) {
return true;
}
for(Path f : list) {
if(f.isChild(file)) {
return true;
}
}
if(log.isDebugEnabled()) {
log.debug(String.format("Filter %s", file));
}
return false;
}
|
@Test
public void testAcceptDirectoryChildrenOnly() {
final RecursiveSearchFilter f = new RecursiveSearchFilter(new AttributedList<>(Arrays.asList(
new Path("/d/f", EnumSet.of(Path.Type.file)))
));
assertTrue(f.accept(new Path("/d", EnumSet.of(Path.Type.directory))));
assertTrue(f.accept(new Path("/d/f", EnumSet.of(Path.Type.file))));
assertFalse(f.accept(new Path("/d/f2", EnumSet.of(Path.Type.file))));
assertFalse(f.accept(new Path("/a", EnumSet.of(Path.Type.file))));
}
|
public final void tag(I input, ScopedSpan span) {
if (input == null) throw new NullPointerException("input == null");
if (span == null) throw new NullPointerException("span == null");
if (span.isNoop()) return;
tag(span, input, span.context());
}
|
@Test void tag_customizer_withContext_ignoredErrorParsing() {
when(parseValue.apply(input, context)).thenThrow(new Error());
tag.tag(input, context, customizer);
verify(parseValue).apply(input, context);
verifyNoMoreInteractions(parseValue); // doesn't parse twice
verifyNoMoreInteractions(customizer);
}
|
@Override
public void validate(final String methodName, final Class<?>[] parameterTypes, final Object[] arguments) throws Exception {
List<Class<?>> groups = new ArrayList<>();
Class<?> methodClass = methodClass(methodName);
if (Objects.nonNull(methodClass)) {
groups.add(methodClass);
}
Set<ConstraintViolation<?>> violations = new HashSet<>();
Method method = clazz.getMethod(methodName, parameterTypes);
Class<?>[] methodClasses;
if (method.isAnnotationPresent(MethodValidated.class)) {
methodClasses = method.getAnnotation(MethodValidated.class).value();
groups.addAll(Arrays.asList(methodClasses));
}
// add into default group
groups.add(0, Default.class);
groups.add(1, clazz);
// convert list to array
Class<?>[] classGroups = new Class<?>[groups.size()];
classGroups = groups.toArray(classGroups);
Object parameterBean = getMethodParameterBean(clazz, method, arguments);
if (parameterBean != null) {
violations.addAll(validator.validate(parameterBean, classGroups));
}
for (Object arg : arguments) {
validate(violations, arg, classGroups);
}
if (!violations.isEmpty()) {
LOG.error("Failed to validate service: {}, method: {}, cause: {}", clazz.getName(), methodName, violations);
StringBuilder validateError = new StringBuilder();
violations.forEach(each -> validateError.append(each.getMessage()).append(","));
throw new ValidationException(validateError.substring(0, validateError.length() - 1));
}
}
|
@Test
public void testItWithCollectionArg() throws Exception {
apacheDubboClientValidatorUnderTest
.validate(
"methodFour",
new Class<?>[]{List.class},
new Object[]{Collections.singletonList("parameter")});
}
|
@Override
public Num calculate(BarSeries series, Position position) {
return position.hasProfit() ? series.one() : series.zero();
}
|
@Test
public void calculateWithTwoLongPositions() {
MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series), Trade.sellAt(2, series),
Trade.buyAt(3, series), Trade.sellAt(5, series));
assertNumEquals(2, getCriterion().calculate(series, tradingRecord));
}
|
@Override
public String getTypeAsString() {
String type;
if (!value.isEmpty() && StructType.class.isAssignableFrom(value.get(0).getClass())) {
type = value.get(0).getTypeAsString();
} else {
type = AbiTypes.getTypeAString(getComponentType());
}
return type + "[" + value.size() + "]";
}
|
@Test
public void testEmptyStaticArray() {
final StaticArray<Address> array =
new StaticArray0<>(Address.class, Collections.emptyList());
assertEquals(Address.TYPE_NAME + "[0]", array.getTypeAsString());
}
|
public static <T> ThrowingConsumer<StreamRecord<T>, Exception> getRecordProcessor2(
TwoInputStreamOperator<?, T, ?> operator) {
boolean canOmitSetKeyContext;
if (operator instanceof AbstractStreamOperator) {
canOmitSetKeyContext = canOmitSetKeyContext((AbstractStreamOperator<?>) operator, 1);
} else {
canOmitSetKeyContext =
operator instanceof KeyContextHandler
&& !((KeyContextHandler) operator).hasKeyContext2();
}
if (canOmitSetKeyContext) {
return operator::processElement2;
} else if (operator instanceof AsyncStateProcessing
&& ((AsyncStateProcessing) operator).isAsyncStateProcessingEnabled()) {
return ((AsyncStateProcessing) operator).getRecordProcessor(2);
} else {
return record -> {
operator.setKeyContextElement2(record);
operator.processElement2(record);
};
}
}
|
@Test
void testGetRecordProcessor2() throws Exception {
TestOperator operator1 = new TestOperator();
TestOperator operator2 = new TestKeyContextHandlerOperator(true, true);
TestOperator operator3 = new TestKeyContextHandlerOperator(true, false);
RecordProcessorUtils.getRecordProcessor2(operator1).accept(new StreamRecord<>("test"));
assertThat(operator1.setKeyContextElement2Called).isTrue();
assertThat(operator1.processElement2Called).isTrue();
RecordProcessorUtils.getRecordProcessor2(operator2).accept(new StreamRecord<>("test"));
assertThat(operator2.setKeyContextElement2Called).isTrue();
assertThat(operator2.processElement2Called).isTrue();
RecordProcessorUtils.getRecordProcessor2(operator3).accept(new StreamRecord<>("test"));
assertThat(operator3.setKeyContextElement2Called).isFalse();
assertThat(operator3.processElement2Called).isTrue();
}
|
public void wrap(final byte[] buffer)
{
capacity = buffer.length;
addressOffset = ARRAY_BASE_OFFSET;
byteBuffer = null;
wrapAdjustment = 0;
if (buffer != byteArray)
{
byteArray = buffer;
}
}
|
@Test
void shouldNotWrapInValidRange()
{
final UnsafeBuffer buffer = new UnsafeBuffer(new byte[8]);
final UnsafeBuffer slice = new UnsafeBuffer();
assertThrows(IllegalArgumentException.class, () -> slice.wrap(buffer, -1, 0));
assertThrows(IllegalArgumentException.class, () -> slice.wrap(buffer, 0, -1));
assertThrows(IllegalArgumentException.class, () -> slice.wrap(buffer, 8, 1));
assertThrows(IllegalArgumentException.class, () -> slice.wrap(buffer, 7, 3));
}
|
public static Expression convert(Filter[] filters) {
Expression expression = Expressions.alwaysTrue();
for (Filter filter : filters) {
Expression converted = convert(filter);
Preconditions.checkArgument(
converted != null, "Cannot convert filter to Iceberg: %s", filter);
expression = Expressions.and(expression, converted);
}
return expression;
}
|
@Test
public void testDateFilterConversion() {
LocalDate localDate = LocalDate.parse("2018-10-18");
Date date = Date.valueOf(localDate);
long epochDay = localDate.toEpochDay();
Expression localDateExpression = SparkFilters.convert(GreaterThan.apply("x", localDate));
Expression dateExpression = SparkFilters.convert(GreaterThan.apply("x", date));
Expression rawExpression = Expressions.greaterThan("x", epochDay);
Assert.assertEquals(
"Generated localdate expression should be correct",
rawExpression.toString(),
localDateExpression.toString());
Assert.assertEquals(
"Generated date expression should be correct",
rawExpression.toString(),
dateExpression.toString());
}
|
@Override
protected void processOptions(LinkedList<String> args) {
CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE,
OPTION_QUOTA, OPTION_HUMAN, OPTION_HEADER, OPTION_QUOTA_AND_USAGE,
OPTION_EXCLUDE_SNAPSHOT,
OPTION_ECPOLICY, OPTION_SNAPSHOT_COUNT);
cf.addOptionWithValue(OPTION_TYPE);
cf.parse(args);
if (args.isEmpty()) { // default path is the current working directory
args.add(".");
}
showQuotas = cf.getOpt(OPTION_QUOTA);
humanReadable = cf.getOpt(OPTION_HUMAN);
showQuotasAndUsageOnly = cf.getOpt(OPTION_QUOTA_AND_USAGE);
excludeSnapshots = cf.getOpt(OPTION_EXCLUDE_SNAPSHOT);
displayECPolicy = cf.getOpt(OPTION_ECPOLICY);
showSnapshot = cf.getOpt(OPTION_SNAPSHOT_COUNT);
if (showQuotas || showQuotasAndUsageOnly) {
String types = cf.getOptValue(OPTION_TYPE);
if (null != types) {
showQuotabyType = true;
storageTypes = getAndCheckStorageTypes(types);
} else {
showQuotabyType = false;
}
if (excludeSnapshots) {
out.println(OPTION_QUOTA + " or " + OPTION_QUOTA_AND_USAGE + " option "
+ "is given, the -x option is ignored.");
excludeSnapshots = false;
}
}
if (cf.getOpt(OPTION_HEADER)) {
StringBuilder headString = new StringBuilder();
if (showQuotabyType) {
headString.append(QuotaUsage.getStorageTypeHeader(storageTypes));
} else {
if (showQuotasAndUsageOnly) {
headString.append(QuotaUsage.getHeader());
} else {
headString.append(ContentSummary.getHeader(showQuotas));
}
}
if (displayECPolicy) {
headString.append(ContentSummary.getErasureCodingPolicyHeader());
}
if (showSnapshot) {
headString.append(ContentSummary.getSnapshotHeader());
}
headString.append("PATHNAME");
out.println(headString.toString());
}
}
|
@Test
public void processPathWithQuotasByQTVH() throws Exception {
Path path = new Path("mockfs:/test");
when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
PrintStream out = mock(PrintStream.class);
Count count = new Count();
count.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-q");
options.add("-t");
options.add("-v");
options.add("-h");
options.add("dummy");
count.processOptions(options);
String withStorageTypeHeader =
// <----14----> <-------18------->
" SSD_QUOTA REM_SSD_QUOTA " +
" DISK_QUOTA REM_DISK_QUOTA " +
" ARCHIVE_QUOTA REM_ARCHIVE_QUOTA " +
"PROVIDED_QUOTA REM_PROVIDED_QUOTA " +
" NVDIMM_QUOTA REM_NVDIMM_QUOTA " +
"PATHNAME";
verify(out).println(withStorageTypeHeader);
verifyNoMoreInteractions(out);
}
|
public static StackTraceElement[] extract(Throwable t,
String fqnOfInvokingClass, final int maxDepth,
List<String> frameworkPackageList) {
if (t == null) {
return null;
}
StackTraceElement[] steArray = t.getStackTrace();
StackTraceElement[] callerDataArray;
int found = LINE_NA;
for (int i = 0; i < steArray.length; i++) {
if (isInFrameworkSpace(steArray[i].getClassName(),
fqnOfInvokingClass, frameworkPackageList)) {
// the caller is assumed to be the next stack frame, hence the +1.
found = i + 1;
} else {
if (found != LINE_NA) {
break;
}
}
}
// we failed to extract caller data
if (found == LINE_NA) {
return EMPTY_CALLER_DATA_ARRAY;
}
int availableDepth = steArray.length - found;
int desiredDepth = maxDepth < (availableDepth) ? maxDepth : availableDepth;
callerDataArray = new StackTraceElement[desiredDepth];
for (int i = 0; i < desiredDepth; i++) {
callerDataArray[i] = steArray[found + i];
}
return callerDataArray;
}
|
@Test
public void testBasic() {
Throwable t = new Throwable();
StackTraceElement[] steArray = t.getStackTrace();
StackTraceElement[] cda = CallerData.extract(t, CallerDataTest.class.getName(), 50, null);
assertNotNull(cda);
assertTrue(cda.length > 0);
assertEquals(steArray.length - 1, cda.length);
}
|
@Override
public Long createAiVideoTemplate(AiVideoTemplateCreateReqVO createReqVO) {
// 插入
AiVideoTemplateDO aiVideoTemplate = AiVideoTemplateConvert.INSTANCE.convert(createReqVO);
aiVideoTemplateMapper.insert(aiVideoTemplate);
// 返回
return aiVideoTemplate.getId();
}
|
@Test
public void testCreateAiVideoTemplate_success() {
// 准备参数
AiVideoTemplateCreateReqVO reqVO = randomPojo(AiVideoTemplateCreateReqVO.class);
// 调用
Long aiVideoTemplateId = aiVideoTemplateService.createAiVideoTemplate(reqVO);
// 断言
assertNotNull(aiVideoTemplateId);
// 校验记录的属性是否正确
AiVideoTemplateDO aiVideoTemplate = aiVideoTemplateMapper.selectById(aiVideoTemplateId);
assertPojoEquals(reqVO, aiVideoTemplate);
}
|
@Override
public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic,
Map<String, Subscription> subscriptions) {
Map<String, List<TopicPartition>> assignment = new HashMap<>();
List<MemberInfo> memberInfoList = new ArrayList<>();
for (Map.Entry<String, Subscription> memberSubscription : subscriptions.entrySet()) {
assignment.put(memberSubscription.getKey(), new ArrayList<>());
memberInfoList.add(new MemberInfo(memberSubscription.getKey(),
memberSubscription.getValue().groupInstanceId()));
}
CircularIterator<MemberInfo> assigner = new CircularIterator<>(Utils.sorted(memberInfoList));
for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) {
final String topic = partition.topic();
while (!subscriptions.get(assigner.peek().memberId).topics().contains(topic))
assigner.next();
assignment.get(assigner.next().memberId).add(partition);
}
return assignment;
}
|
@Test
public void testOneConsumerMultipleTopics() {
Map<String, Integer> partitionsPerTopic = setupPartitionsPerTopicWithTwoTopics(1, 2);
Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic,
Collections.singletonMap(consumerId, new Subscription(topics(topic1, topic2))));
assertEquals(partitions(tp(topic1, 0), tp(topic2, 0), tp(topic2, 1)), assignment.get(consumerId));
}
|
@Override
public List<String> getServices() {
return polarisServiceDiscovery.getServices();
}
|
@Test
public void testGetServices() {
when(polarisServiceDiscovery.getServices()).thenReturn(singletonList(SERVICE_PROVIDER));
List<String> services = client.getServices();
assertThat(services).contains(SERVICE_PROVIDER).size().isEqualTo(1);
}
|
@Override
public SmsTemplateDO getSmsTemplate(Long id) {
return smsTemplateMapper.selectById(id);
}
|
@Test
public void testGetSmsTemplate() {
// mock 数据
SmsTemplateDO dbSmsTemplate = randomSmsTemplateDO();
smsTemplateMapper.insert(dbSmsTemplate);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbSmsTemplate.getId();
// 调用
SmsTemplateDO smsTemplate = smsTemplateService.getSmsTemplate(id);
// 校验
assertPojoEquals(dbSmsTemplate, smsTemplate);
}
|
@Override
protected String convertToString(final Integer value) {
return value.toString();
}
|
@Test
void testConvertToString() throws Exception {
assertThat(subtaskIndexPathParameter.convertToString(Integer.MAX_VALUE))
.isEqualTo("2147483647");
}
|
public synchronized void addVulnerableSoftwareIdentifier(Identifier identifier) {
this.vulnerableSoftwareIdentifiers.add(identifier);
}
|
@Test
public void testAddVulnerableSoftwareIdentifier() throws Exception {
CpeBuilder builder = new CpeBuilder();
Cpe cpe = builder.part(Part.APPLICATION).vendor("apache").product("struts").version("2.1.2").build();
CpeIdentifier id = new CpeIdentifier(cpe, Confidence.HIGHEST);
cpe = builder.part(Part.APPLICATION).vendor("apache").product("struts").version("2.1.2").build();
CpeIdentifier expResult = new CpeIdentifier(cpe, Confidence.HIGHEST);
Dependency instance = new Dependency();
instance.addVulnerableSoftwareIdentifier(id);
assertEquals(1, instance.getVulnerableSoftwareIdentifiers().size());
assertTrue("Identifier doesn't contain expected result.", instance.getVulnerableSoftwareIdentifiers().contains(expResult));
}
|
public final void containsNoneOf(
@Nullable Object firstExcluded,
@Nullable Object secondExcluded,
@Nullable Object @Nullable ... restOfExcluded) {
containsNoneIn(accumulate(firstExcluded, secondExcluded, restOfExcluded));
}
|
@Test
public void iterableContainsNoneOf() {
assertThat(asList(1, 2, 3)).containsNoneOf(4, 5, 6);
}
|
public static ExtensibleLoadManagerImpl get(LoadManager loadManager) {
if (!(loadManager instanceof ExtensibleLoadManagerWrapper loadManagerWrapper)) {
throw new IllegalArgumentException("The load manager should be 'ExtensibleLoadManagerWrapper'.");
}
return loadManagerWrapper.get();
}
|
@Test
public void testLookupOptions() throws Exception {
Pair<TopicName, NamespaceBundle> topicAndBundle =
getBundleIsNotOwnByChangeEventTopic("test-lookup-options");
TopicName topicName = topicAndBundle.getLeft();
NamespaceBundle bundle = topicAndBundle.getRight();
admin.topics().createPartitionedTopic(topicName.toString(), 1);
// Test LookupOptions.readOnly = true when the bundle is not owned by any broker.
Optional<URL> webServiceUrlReadOnlyTrue = pulsar1.getNamespaceService()
.getWebServiceUrl(bundle, LookupOptions.builder().readOnly(true).requestHttps(false).build());
assertTrue(webServiceUrlReadOnlyTrue.isEmpty());
// Test LookupOptions.readOnly = false and the bundle assign to some broker.
Optional<URL> webServiceUrlReadOnlyFalse = pulsar1.getNamespaceService()
.getWebServiceUrl(bundle, LookupOptions.builder().readOnly(false).requestHttps(false).build());
assertTrue(webServiceUrlReadOnlyFalse.isPresent());
// Test LookupOptions.requestHttps = true
Optional<URL> webServiceUrlHttps = pulsar2.getNamespaceService()
.getWebServiceUrl(bundle, LookupOptions.builder().requestHttps(true).build());
assertTrue(webServiceUrlHttps.isPresent());
assertTrue(webServiceUrlHttps.get().toString().startsWith("https"));
// TODO: Support LookupOptions.loadTopicsInBundle = true
// Test LookupOptions.advertisedListenerName = internal but the broker do not have internal listener.
try {
pulsar2.getNamespaceService()
.getWebServiceUrl(bundle, LookupOptions.builder().advertisedListenerName("internal").build());
fail();
} catch (Exception e) {
assertTrue(e.getMessage().contains("the broker do not have internal listener"));
}
}
|
@Override
public boolean shouldSample() {
// This load might race with the store below, causing multiple threads to get a sample
// since the new timestamp has not been written yet, but it is extremely unlikely and
// the consequences are not severe since this is a probabilistic sampler that does not
// provide hard lower or upper bounds.
long lastSampledAt = lastSampledAtNanoTime.get(); // TODO getPlain? No transitive visibility requirements
long now = nanoClock.nanoTimeNow();
double secsSinceLastSample = (now - lastSampledAt) / 1_000_000_000.0;
// As the time elapsed since last sample increases, so does the probability of a new sample
// being selected.
double sampleProb = Math.min(secsSinceLastSample * desiredSamplesPerSec, 1.0);
if (randomSupplier.get().nextDouble() < sampleProb) {
lastSampledAtNanoTime.set(now); // TODO setPlain? No transitive visibility requirements
return true;
} else {
return false;
}
}
|
@Test
void zero_desired_sample_rate_returns_false() {
var clock = MockUtils.mockedClockReturning(ms2ns(10_000));
var rng = MockUtils.mockedRandomReturning(0.99999999); // [0, 1)
var sampler = new ProbabilisticSampleRate(clock, () -> rng, 0.0);
assertFalse(sampler.shouldSample());
}
|
@Override
public ResultSet executeQuery(String sql)
throws SQLException {
validateState();
try {
if (!DriverUtils.queryContainsLimitStatement(sql)) {
sql += " " + LIMIT_STATEMENT + " " + _maxRows;
}
String enabledSql = DriverUtils.enableQueryOptions(sql, _connection.getQueryOptions());
ResultSetGroup resultSetGroup = _session.execute(enabledSql);
if (resultSetGroup.getResultSetCount() == 0) {
_resultSet = PinotResultSet.empty();
return _resultSet;
}
_resultSet = new PinotResultSet(resultSetGroup.getResultSet(0));
return _resultSet;
} catch (PinotClientException e) {
throw new SQLException(String.format("Failed to execute query : %s", sql), e);
}
}
|
@Test
public void testSetDisableNullHandling()
throws Exception {
Properties props = new Properties();
props.put(QueryOptionKey.ENABLE_NULL_HANDLING, "false");
PinotConnection pinotConnection =
new PinotConnection(props, "dummy", _dummyPinotClientTransport, "dummy", _dummyPinotControllerTransport);
Statement statement = pinotConnection.createStatement();
Assert.assertNotNull(statement);
statement.executeQuery(BASIC_TEST_QUERY);
String expectedSql =
DriverUtils.createSetQueryOptionString(QueryOptionKey.ENABLE_NULL_HANDLING, false) + BASIC_TEST_QUERY;
Assert.assertEquals(_dummyPinotClientTransport.getLastQuery().substring(0, expectedSql.length()), expectedSql);
}
|
@Override
public void logAfterExecution(StatementContext context) {
log(context);
}
|
@Test
public void logsExecutionTime() {
final MetricRegistry mockRegistry = mock(MetricRegistry.class);
final StatementNameStrategy mockNameStrategy = mock(StatementNameStrategy.class);
final InstrumentedSqlLogger logger = new InstrumentedSqlLogger(mockRegistry, mockNameStrategy);
final StatementContext mockContext = mock(StatementContext.class);
final Timer mockTimer = mock(Timer.class);
final String statementName = "my-fake-name";
final long fakeElapsed = 1234L;
when(mockNameStrategy.getStatementName(mockContext)).thenReturn(statementName);
when(mockRegistry.timer(statementName)).thenReturn(mockTimer);
when(mockContext.getElapsedTime(ChronoUnit.NANOS)).thenReturn(fakeElapsed);
logger.logAfterExecution(mockContext);
verify(mockTimer).update(fakeElapsed, TimeUnit.NANOSECONDS);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.