focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public String authenticate(AuthenticationDataSource authData) throws AuthenticationException {
SocketAddress clientAddress;
String roleToken;
ErrorCode errorCode = ErrorCode.UNKNOWN;
try {
if (authData.hasDataFromPeer()) {
clientAddress = authData.getPeerAddress();
} else {
errorCode = ErrorCode.NO_CLIENT;
throw new AuthenticationException("Authentication data source does not have a client address");
}
if (authData.hasDataFromCommand()) {
roleToken = authData.getCommandData();
} else if (authData.hasDataFromHttp()) {
roleToken = authData.getHttpHeader(AuthZpeClient.ZPE_TOKEN_HDR);
} else {
errorCode = ErrorCode.NO_TOKEN;
throw new AuthenticationException("Authentication data source does not have a role token");
}
if (roleToken == null) {
errorCode = ErrorCode.NO_TOKEN;
throw new AuthenticationException("Athenz token is null, can't authenticate");
}
if (roleToken.isEmpty()) {
errorCode = ErrorCode.NO_TOKEN;
throw new AuthenticationException("Athenz RoleToken is empty, Server is Using Athenz Authentication");
}
if (log.isDebugEnabled()) {
log.debug("Athenz RoleToken : [{}] received from Client: {}", roleToken, clientAddress);
}
RoleToken token = new RoleToken(roleToken);
if (!domainNameList.contains(token.getDomain())) {
errorCode = ErrorCode.DOMAIN_MISMATCH;
throw new AuthenticationException(
String.format("Athenz RoleToken Domain mismatch, Expected: %s, Found: %s",
domainNameList.toString(), token.getDomain()));
}
// Synchronize for non-thread safe static calls inside athenz library
synchronized (this) {
PublicKey ztsPublicKey = AuthZpeClient.getZtsPublicKey(token.getKeyId());
if (ztsPublicKey == null) {
errorCode = ErrorCode.NO_PUBLIC_KEY;
throw new AuthenticationException("Unable to retrieve ZTS Public Key");
}
if (token.validate(ztsPublicKey, allowedOffset, false, null)) {
log.debug("Athenz Role Token : {}, Authenticated for Client: {}", roleToken, clientAddress);
AuthenticationMetrics.authenticateSuccess(getClass().getSimpleName(), getAuthMethodName());
return token.getPrincipal();
} else {
errorCode = ErrorCode.INVALID_TOKEN;
throw new AuthenticationException(
String.format("Athenz Role Token Not Authenticated from Client: %s", clientAddress));
}
}
} catch (AuthenticationException exception) {
incrementFailureMetric(errorCode);
throw exception;
}
}
|
@Test
public void testAuthenticateSignedToken() throws Exception {
List<String> roles = new ArrayList<String>() {
{
add("test_role");
}
};
RoleToken token = new RoleToken.Builder("Z1", "test_provider", roles).principal("test_app").build();
String privateKey = new String(Files.readAllBytes(Paths.get("./src/test/resources/zts_private.pem")));
token.sign(privateKey);
AuthenticationDataSource authData = new AuthenticationDataCommand(token.getSignedToken(),
new InetSocketAddress("localhost", 0), null);
assertEquals(provider.authenticate(authData), "test_app");
}
|
public static int computeAvailableContainers(Resource available,
Resource required, EnumSet<SchedulerResourceTypes> resourceTypes) {
if (resourceTypes.contains(SchedulerResourceTypes.CPU)) {
return Math.min(
calculateRatioOrMaxValue(available.getMemorySize(), required.getMemorySize()),
calculateRatioOrMaxValue(available.getVirtualCores(), required
.getVirtualCores()));
}
return calculateRatioOrMaxValue(
available.getMemorySize(), required.getMemorySize());
}
|
@Test
public void testComputeAvailableContainers() throws Exception {
Resource clusterAvailableResources = Resource.newInstance(81920, 40);
Resource nonZeroResource = Resource.newInstance(1024, 2);
int expectedNumberOfContainersForMemory = 80;
int expectedNumberOfContainersForCPU = 20;
verifyDifferentResourceTypes(clusterAvailableResources, nonZeroResource,
expectedNumberOfContainersForMemory,
expectedNumberOfContainersForCPU);
Resource zeroMemoryResource = Resource.newInstance(0,
nonZeroResource.getVirtualCores());
verifyDifferentResourceTypes(clusterAvailableResources, zeroMemoryResource,
Integer.MAX_VALUE,
expectedNumberOfContainersForCPU);
Resource zeroCpuResource = Resource.newInstance(
nonZeroResource.getMemorySize(), 0);
verifyDifferentResourceTypes(clusterAvailableResources, zeroCpuResource,
expectedNumberOfContainersForMemory,
expectedNumberOfContainersForMemory);
}
|
static InjectorFunction injectorFunction(InjectorFunction existing, InjectorFunction... update) {
if (update == null) throw new NullPointerException("injectorFunctions == null");
LinkedHashSet<InjectorFunction> injectorFunctionSet =
new LinkedHashSet<InjectorFunction>(Arrays.asList(update));
if (injectorFunctionSet.contains(null)) {
throw new NullPointerException("injectorFunction == null");
}
injectorFunctionSet.remove(InjectorFunction.NOOP);
if (injectorFunctionSet.isEmpty()) return existing;
if (injectorFunctionSet.size() == 1) return injectorFunctionSet.iterator().next();
return new CompositeInjectorFunction(injectorFunctionSet.toArray(new InjectorFunction[0]));
}
|
@Test void injectorFunction_single() {
InjectorFunction existing = mock(InjectorFunction.class);
assertThat(injectorFunction(existing, two))
.isSameAs(two);
}
|
public void parse(InputStream stream, ContentHandler handler, Metadata metadata,
ParseContext context) throws IOException, SAXException, TikaException {
byte[] header = new byte[4];
IOUtils.read(stream, header, 0, 4); // Extract magic byte
if (header[0] == (byte) 'i' && header[1] == (byte) 'c' && header[2] == (byte) 'n' &&
header[3] == (byte) 's') {
// Good, signature found
} else {
throw new TikaException("ICNS magic signature invalid");
}
IOUtils.read(stream, header, 0, 4); //Extract image size/length of bytes in file
int image_length = java.nio.ByteBuffer.wrap(header).getInt();
image_length -= 8;//for the bytes read so far
if (image_length > MAX_IMAGE_LENGTH_BYTES) {
throw new TikaMemoryLimitException(image_length, MAX_IMAGE_LENGTH_BYTES);
} else if (image_length < 0) {
throw new TikaException("image length must be >= 0");
}
byte[] full_file = new byte[image_length];
IOUtils.readFully(stream, full_file);
ArrayList<ICNSType> icons = new ArrayList<>();
ArrayList<ICNSType> icon_masks = new ArrayList<>();
byte[] tempByteArray = new byte[4];
for (int offset = 0; offset < image_length - 8; ) {
//Read the ResType/OSTYpe identifier for sub-icon
tempByteArray[0] = full_file[offset];
tempByteArray[1] = full_file[offset + 1];
tempByteArray[2] = full_file[offset + 2];
tempByteArray[3] = full_file[offset + 3];
ICNSType icnstype = findIconType(tempByteArray);
if (icnstype == null) {
//exit out of loop
//No more icons left
break;
} else if (icnstype.hasMask() == true) {
icon_masks.add(findIconType(tempByteArray));
} else {
icons.add(findIconType(tempByteArray));
}
//Read the sub-icon length
tempByteArray[0] = full_file[offset + 4];
tempByteArray[1] = full_file[offset + 5];
tempByteArray[2] = full_file[offset + 6];
tempByteArray[3] = full_file[offset + 7];
int icon_length = java.nio.ByteBuffer.wrap(tempByteArray).getInt();
if (icon_length <= 0) {
break;
}
offset = offset + icon_length;
}
StringBuilder icon_details = new StringBuilder();
StringBuilder iconmask_details = new StringBuilder();
String bitsPerPixel;
String dimensions;
for (ICNSType icon : icons) {
bitsPerPixel = (icon.getBitsPerPixel() != 0) ? icon.getBitsPerPixel() + " bpp" :
"JPEG 2000 or PNG format";
dimensions = (!icon.hasRetinaDisplay()) ? (icon.getHeight() + "x" + icon.getWidth()) :
(icon.getHeight() + "x" + icon.getWidth() + "@2X");
icon_details.append(", ").append(dimensions).append(" (").append(bitsPerPixel).append(")");
}
for (ICNSType icon : icon_masks) {
iconmask_details
.append(", ")
.append(icon.getHeight())
.append("x")
.append(icon.getWidth())
.append(" (")
.append(icon.getBitsPerPixel())
.append(" bpp")
.append(")");
}
metadata.set(Metadata.CONTENT_TYPE, ICNS_MIME_TYPE);
if (!icon_details.toString().equals("")) {
metadata.set("Icon count", String.valueOf(icons.size()));
icon_details = new StringBuilder(icon_details.substring(2));
metadata.set("Icon details", icon_details.toString());
}
if (!iconmask_details.toString().equals("")) {
metadata.set("Masked icon count", String.valueOf(icon_masks.size()));
iconmask_details = new StringBuilder(iconmask_details.substring(2));
metadata.set("Masked icon details", iconmask_details.toString());
}
XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata);
xhtml.startDocument();
xhtml.endDocument();
}
|
@Test
public void testICNS() throws Exception {
Metadata metadata = new Metadata();
metadata.set(Metadata.CONTENT_TYPE, "image/icns");
metadata.set("Icons count", "2");
metadata.set("Icons details", "16x16 (24 bpp), 32x32 (24 bpp)");
metadata.set("Masked icon count", "2");
metadata.set("Masked icon details", "16x16 (8 bpp), 32x32 (8 bpp)");
try (InputStream stream = getClass().getResourceAsStream("/test-documents/testICNS.icns")) {
parser.parse(stream, new DefaultHandler(), metadata, new ParseContext());
}
}
|
@Override
public void execute(ComputationStep.Context context) {
new PathAwareCrawler<>(
FormulaExecutorComponentVisitor.newBuilder(metricRepository, measureRepository)
.buildFor(List.of(duplicationFormula)))
.visit(treeRootHolder.getRoot());
}
|
@Test
public void compute_no_new_duplicated_lines_density_when_no_lines() {
underTest.execute(new TestComputationStepContext());
assertNoRawMeasures(NEW_DUPLICATED_LINES_DENSITY_KEY);
}
|
public static KeyFormat sanitizeKeyFormat(
final KeyFormat keyFormat,
final List<SqlType> newKeyColumnSqlTypes,
final boolean allowKeyFormatChangeToSupportNewKeySchema
) {
return sanitizeKeyFormatWrapping(
!allowKeyFormatChangeToSupportNewKeySchema ? keyFormat :
sanitizeKeyFormatForTypeCompatibility(
sanitizeKeyFormatForMultipleColumns(
keyFormat,
newKeyColumnSqlTypes.size()),
newKeyColumnSqlTypes
),
newKeyColumnSqlTypes.size() == 1
);
}
|
@Test
public void shouldNotConvertFormatWhenSanitizingWithSingleColumnAndSupportedPrimitiveType() {
// Given:
final KeyFormat format = KeyFormat.nonWindowed(
FormatInfo.of(KafkaFormat.NAME),
SerdeFeatures.of());
// When:
final KeyFormat sanitized = SerdeFeaturesFactory.sanitizeKeyFormat(format, SINGLE_SQL_TYPE, true);
// Then:
assertThat(sanitized.getFormatInfo(), equalTo(FormatInfo.of(KafkaFormat.NAME)));
assertThat(sanitized.getFeatures(), equalTo(SerdeFeatures.of()));
}
|
@Deprecated
public static boolean isEmpty( String val ) {
return Utils.isEmpty( val );
}
|
@Test
public void testReleaseType() {
for ( Const.ReleaseType type : Const.ReleaseType.values() ) {
assertFalse( type.getMessage().isEmpty() );
}
}
|
public static List<JobDataNodeLine> convertDataNodesToLines(final Map<String, List<DataNode>> actualDataNodes) {
List<Pair<String, JobDataNodeLine>> result = new LinkedList<>();
for (Entry<String, Map<String, List<DataNode>>> entry : groupDataSourceDataNodesMapByDataSourceName(actualDataNodes).entrySet()) {
result.add(Pair.of(entry.getKey(), new JobDataNodeLine(getJobDataNodeEntries(entry.getValue()))));
}
// Sort by dataSourceName, make sure data node lines have the same ordering
result.sort(Entry.comparingByKey());
return result.stream().map(Pair::getValue).collect(Collectors.toList());
}
|
@Test
void assertConvertDataNodesToLinesWithMultipleDataSource() {
List<DataNode> dataNodes = Arrays.asList(new DataNode("ds_0", "t_order_0"), new DataNode("ds_0", "t_order_2"), new DataNode("ds_1", "t_order_1"), new DataNode("ds_1", "t_order_3"));
List<JobDataNodeLine> jobDataNodeLines = JobDataNodeLineConvertUtils.convertDataNodesToLines(Collections.singletonMap("t_order", dataNodes));
assertThat(jobDataNodeLines.size(), is(2));
JobDataNodeEntry jobDataNodeEntry = jobDataNodeLines.get(0).getEntries().iterator().next();
assertThat(jobDataNodeEntry.getDataNodes().stream().map(DataNode::getTableName).collect(Collectors.toList()), is(Arrays.asList("t_order_0", "t_order_2")));
jobDataNodeEntry = jobDataNodeLines.get(1).getEntries().iterator().next();
assertThat(jobDataNodeEntry.getDataNodes().stream().map(DataNode::getTableName).collect(Collectors.toList()), is(Arrays.asList("t_order_1", "t_order_3")));
}
|
@Override
public void write(Object object) throws IOException {
objectOutputStream.writeObject(object);
objectOutputStream.flush();
preventMemoryLeak();
}
|
@Test
public void flushesAfterWrite() throws IOException {
// given
ObjectWriter objectWriter = new AutoFlushingObjectWriter(objectOutputStream, 2);
String object = "foo";
// when
objectWriter.write(object);
// then
InOrder inOrder = inOrder(objectOutputStream);
inOrder.verify(objectOutputStream).writeObjectOverride(object);
inOrder.verify(objectOutputStream).flush();
}
|
private synchronized long nextId() {
long timestamp = timeGen();
if (timestamp < lastTimestamp) {
throw new RuntimeException(String.format("Clock moved backwards. Refusing to generate id for %d milliseconds", lastTimestamp - timestamp));
}
if (lastTimestamp == timestamp) {
sequence = (sequence + 1) & SEQUENCE_MASK;
if (sequence == 0) {
timestamp = tilNextMillis(lastTimestamp);
}
} else {
sequence = 0L;
}
lastTimestamp = timestamp;
return ((timestamp - idepoch) << TIMESTAMP_LEFT_SHIFT)
| (datacenterId << DATACENTER_ID_SHIFT)
| (workerId << WORKER_ID_SHIFT) | sequence;
}
|
@Test
public void testNextId() throws Exception {
UUIDUtils uuidUtils = UUIDUtils.getInstance();
Class<?> uUIDUtilsClass = uuidUtils.getClass();
Field field = uUIDUtilsClass.getDeclaredField("lastTimestamp");
field.setAccessible(true);
field.set(uuidUtils, System.currentTimeMillis());
Method method = uUIDUtilsClass.getDeclaredMethod("nextId");
method.setAccessible(true);
method.invoke(UUIDUtils.getInstance());
}
|
@Override
public float readFloat() throws EOFException {
return Float.intBitsToFloat(readInt());
}
|
@Test
public void testReadFloatByteOrder() throws Exception {
double readFloat = in.readFloat(LITTLE_ENDIAN);
int intB = Bits.readIntL(INIT_DATA, 0);
double aFloat = Float.intBitsToFloat(intB);
assertEquals(aFloat, readFloat, 0);
}
|
public static <T> RetryOperator<T> of(Retry retry) {
return new RetryOperator<>(retry);
}
|
@Test
public void returnOnErrorUsingMono() {
RetryConfig config = retryConfig();
Retry retry = Retry.of("testName", config);
RetryOperator<String> retryOperator = RetryOperator.of(retry);
given(helloWorldService.returnHelloWorld())
.willThrow(new HelloWorldException());
StepVerifier.create(Mono.fromCallable(helloWorldService::returnHelloWorld)
.transformDeferred(retryOperator))
.expectSubscription()
.expectError(HelloWorldException.class)
.verify(Duration.ofSeconds(1));
StepVerifier.create(Mono.fromCallable(helloWorldService::returnHelloWorld)
.transformDeferred(retryOperator))
.expectSubscription()
.expectError(HelloWorldException.class)
.verify(Duration.ofSeconds(1));
then(helloWorldService).should(times(6)).returnHelloWorld();
Retry.Metrics metrics = retry.getMetrics();
assertThat(metrics.getNumberOfFailedCallsWithRetryAttempt()).isEqualTo(2);
assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isZero();
}
|
@Override
public DdlCommand create(
final String sqlExpression,
final DdlStatement ddlStatement,
final SessionConfig config
) {
return FACTORIES
.getOrDefault(ddlStatement.getClass(), (statement, cf, ci) -> {
throw new KsqlException(
"Unable to find ddl command factory for statement:"
+ statement.getClass()
+ " valid statements:"
+ FACTORIES.keySet()
);
})
.handle(
this,
new CallInfo(sqlExpression, config),
ddlStatement);
}
|
@Test
public void shouldCreateCommandForDropTable() {
// Given:
final DropTable ddlStatement = new DropTable(TABLE_NAME, true, true);
// When:
final DdlCommand result = commandFactories
.create(sqlExpression, ddlStatement, SessionConfig.of(ksqlConfig, emptyMap()));
// Then:
assertThat(result, is(dropSourceCommand));
verify(dropSourceFactory).create(ddlStatement);
}
|
public static Builder builder(String testId) {
return new Builder(testId);
}
|
@Test
public void testCreateResourceManagerBuilderReturnsSplunkResourceManager() {
assertThat(
SplunkResourceManager.builder(TEST_ID)
.setHecPort(DEFAULT_SPLUNK_HEC_INTERNAL_PORT)
.setSplunkdPort(DEFAULT_SPLUNKD_INTERNAL_PORT)
.setHost(HOST)
.useStaticContainer()
.build())
.isInstanceOf(SplunkResourceManager.class);
}
|
@VisibleForTesting
void validateParentMenu(Long parentId, Long childId) {
if (parentId == null || ID_ROOT.equals(parentId)) {
return;
}
// 不能设置自己为父菜单
if (parentId.equals(childId)) {
throw exception(MENU_PARENT_ERROR);
}
MenuDO menu = menuMapper.selectById(parentId);
// 父菜单不存在
if (menu == null) {
throw exception(MENU_PARENT_NOT_EXISTS);
}
// 父菜单必须是目录或者菜单类型
if (!MenuTypeEnum.DIR.getType().equals(menu.getType())
&& !MenuTypeEnum.MENU.getType().equals(menu.getType())) {
throw exception(MENU_PARENT_NOT_DIR_OR_MENU);
}
}
|
@Test
public void testValidateParentMenu_parentTypeError() {
// mock 数据
MenuDO menuDO = buildMenuDO(MenuTypeEnum.BUTTON, "parent", 0L);
menuMapper.insert(menuDO);
// 准备参数
Long parentId = menuDO.getId();
// 调用,并断言异常
assertServiceException(() -> menuService.validateParentMenu(parentId, null),
MENU_PARENT_NOT_DIR_OR_MENU);
}
|
public static Writer createWriter(Configuration conf, Writer.Option... opts
) throws IOException {
Writer.CompressionOption compressionOption =
Options.getOption(Writer.CompressionOption.class, opts);
CompressionType kind;
if (compressionOption != null) {
kind = compressionOption.getValue();
} else {
kind = getDefaultCompressionType(conf);
opts = Options.prependOptions(opts, Writer.compression(kind));
}
switch (kind) {
default:
case NONE:
return new Writer(conf, opts);
case RECORD:
return new RecordCompressWriter(conf, opts);
case BLOCK:
return new BlockCompressWriter(conf, opts);
}
}
|
@Test
public void testSerializationUsingWritableNameAlias() throws IOException {
Configuration config = new Configuration();
config.set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, SimpleSerializer.class.getName());
Path path = new Path(System.getProperty("test.build.data", "."),
"SerializationUsingWritableNameAlias");
// write with the original serializable class
SequenceFile.Writer writer = SequenceFile.createWriter(
config,
SequenceFile.Writer.file(path),
SequenceFile.Writer.keyClass(SimpleSerializable.class),
SequenceFile.Writer.valueClass(SimpleSerializable.class));
int max = 10;
try {
SimpleSerializable val = new SimpleSerializable();
val.setId(-1);
for (int i = 0; i < max; i++) {
SimpleSerializable key = new SimpleSerializable();
key.setId(i);
writer.append(key, val);
}
} finally {
writer.close();
}
// override name so it gets forced to the new serializable
WritableName.setName(AnotherSimpleSerializable.class, SimpleSerializable.class.getName());
// read and expect our new serializable, and all the correct values read
SequenceFile.Reader reader = new SequenceFile.Reader(
config,
SequenceFile.Reader.file(path));
AnotherSimpleSerializable key = new AnotherSimpleSerializable();
int count = 0;
while (true) {
key = (AnotherSimpleSerializable) reader.next(key);
if (key == null) {
// make sure we exhausted all the ints we wrote
assertEquals(count, max);
break;
}
assertEquals(count++, key.getId());
}
}
|
private List<TarsUpstream> buildTarsUpstreamList(final List<URIRegisterDTO> uriList) {
return uriList.stream().map(dto -> CommonUpstreamUtils.buildDefaultTarsUpstream(dto.getHost(), dto.getPort()))
.collect(Collectors.toCollection(CopyOnWriteArrayList::new));
}
|
@Test
public void testBuildTarsUpstreamList() {
List<URIRegisterDTO> list = new ArrayList<>();
list.add(URIRegisterDTO.builder().appName("test1").rpcType(RpcTypeEnum.TARS.getName()).host("localhost").port(8090).build());
list.add(URIRegisterDTO.builder().appName("test2").rpcType(RpcTypeEnum.TARS.getName()).host("localhost").port(8091).build());
try {
Method testMethod = shenyuClientRegisterTarsService.getClass().getDeclaredMethod("buildTarsUpstreamList", List.class);
testMethod.setAccessible(true);
List<TarsUpstream> result = (List<TarsUpstream>) testMethod.invoke(shenyuClientRegisterTarsService, list);
assertEquals(result.size(), 2);
} catch (Exception e) {
throw new ShenyuException(e.getCause());
}
}
|
@Override
public <T> T clone(T object) {
if (object instanceof String) {
return object;
} else if (object instanceof Collection) {
Object firstElement = findFirstNonNullElement((Collection) object);
if (firstElement != null && !(firstElement instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), firstElement.getClass());
return objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
} else if (object instanceof Map) {
Map.Entry firstEntry = this.findFirstNonNullEntry((Map) object);
if (firstEntry != null) {
Object key = firstEntry.getKey();
Object value = firstEntry.getValue();
if (!(key instanceof Serializable) || !(value instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), key.getClass(), value.getClass());
return (T) objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
}
} else if (object instanceof JsonNode) {
return (T) ((JsonNode) object).deepCopy();
}
if (object instanceof Serializable) {
try {
return (T) SerializationHelper.clone((Serializable) object);
} catch (SerializationException e) {
//it is possible that object itself implements java.io.Serializable, but underlying structure does not
//in this case we switch to the other JSON marshaling strategy which doesn't use the Java serialization
}
}
return jsonClone(object);
}
|
@Test
public void should_clone_map_of_non_serializable_value() {
Map<String, NonSerializableObject> original = new HashMap<>();
original.put("key", new NonSerializableObject("value"));
Object cloned = serializer.clone(original);
assertEquals(original, cloned);
assertNotSame(original, cloned);
}
|
public static Getter newFieldGetter(Object object, Getter parent, Field field, String modifier) throws Exception {
return newGetter(object, parent, modifier, field.getType(), field::get,
(t, et) -> new FieldGetter(parent, field, modifier, t, et));
}
|
@Test
public void newFieldGetter_whenExtractingFromNonEmpty_Array_nullFirst_FieldAndParentIsNonEmptyMultiResult_thenInferReturnType()
throws Exception {
OuterObject object = new OuterObject("name", null, new InnerObject("inner", 0, 1, 2, 3));
Getter parentGetter = GetterFactory.newFieldGetter(object, null, innersArrayField, "[any]");
Getter innerObjectNameGetter = GetterFactory.newFieldGetter(object, parentGetter, innerAttributesArrayField, "[any]");
Class<?> returnType = innerObjectNameGetter.getReturnType();
assertEquals(Integer.class, returnType);
}
|
@Override
public NetworkClientDelegate.PollResult poll(long currentTimeMs) {
if (!coordinatorRequestManager.coordinator().isPresent() ||
membershipManager.shouldSkipHeartbeat()) {
membershipManager.onHeartbeatRequestSkipped();
return NetworkClientDelegate.PollResult.EMPTY;
}
pollTimer.update(currentTimeMs);
if (pollTimer.isExpired() && !membershipManager.isLeavingGroup()) {
logger.warn("Consumer poll timeout has expired. This means the time between " +
"subsequent calls to poll() was longer than the configured max.poll.interval.ms, " +
"which typically implies that the poll loop is spending too much time processing " +
"messages. You can address this either by increasing max.poll.interval.ms or by " +
"reducing the maximum size of batches returned in poll() with max.poll.records.");
membershipManager.transitionToSendingLeaveGroup(true);
NetworkClientDelegate.UnsentRequest leaveHeartbeat = makeHeartbeatRequest(currentTimeMs, true);
// We can ignore the leave response because we can join before or after receiving the response.
heartbeatRequestState.reset();
heartbeatState.reset();
return new NetworkClientDelegate.PollResult(heartbeatRequestState.heartbeatIntervalMs, Collections.singletonList(leaveHeartbeat));
}
// Case 1: The member is leaving
boolean heartbeatNow = membershipManager.state() == MemberState.LEAVING ||
// Case 2: The member state indicates it should send a heartbeat without waiting for the interval, and there is no heartbeat request currently in-flight
(membershipManager.shouldHeartbeatNow() && !heartbeatRequestState.requestInFlight());
if (!heartbeatRequestState.canSendRequest(currentTimeMs) && !heartbeatNow) {
return new NetworkClientDelegate.PollResult(heartbeatRequestState.timeToNextHeartbeatMs(currentTimeMs));
}
NetworkClientDelegate.UnsentRequest request = makeHeartbeatRequest(currentTimeMs, false);
return new NetworkClientDelegate.PollResult(heartbeatRequestState.heartbeatIntervalMs, Collections.singletonList(request));
}
|
@Test
public void testSuccessfulHeartbeatTiming() {
NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(0, result.unsentRequests.size(),
"No heartbeat should be sent while interval has not expired");
assertEquals(heartbeatRequestState.timeToNextHeartbeatMs(time.milliseconds()), result.timeUntilNextPollMs);
assertNextHeartbeatTiming(DEFAULT_HEARTBEAT_INTERVAL_MS);
result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(1, result.unsentRequests.size(), "A heartbeat should be sent when interval expires");
NetworkClientDelegate.UnsentRequest inflightReq = result.unsentRequests.get(0);
assertEquals(DEFAULT_HEARTBEAT_INTERVAL_MS,
heartbeatRequestState.timeToNextHeartbeatMs(time.milliseconds()),
"Heartbeat timer was not reset to the interval when the heartbeat request was sent.");
long partOfInterval = DEFAULT_HEARTBEAT_INTERVAL_MS / 3;
time.sleep(partOfInterval);
result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(0, result.unsentRequests.size(),
"No heartbeat should be sent while only part of the interval has passed");
assertEquals(DEFAULT_HEARTBEAT_INTERVAL_MS - partOfInterval,
heartbeatRequestState.timeToNextHeartbeatMs(time.milliseconds()),
"Time to next interval was not properly updated.");
inflightReq.handler().onComplete(createHeartbeatResponse(inflightReq, Errors.NONE));
assertNextHeartbeatTiming(DEFAULT_HEARTBEAT_INTERVAL_MS - partOfInterval);
}
|
public static Void unwrapAndThrowException(ServiceException se)
throws IOException, YarnException {
Throwable cause = se.getCause();
if (cause == null) {
// SE generated by the RPC layer itself.
throw new IOException(se);
} else {
if (cause instanceof RemoteException) {
RemoteException re = (RemoteException) cause;
Class<?> realClass = null;
try {
realClass = Class.forName(re.getClassName());
} catch (ClassNotFoundException cnf) {
// Assume this to be a new exception type added to YARN. This isn't
// absolutely correct since the RPC layer could add an exception as
// well.
throw instantiateYarnException(YarnException.class, re);
}
if (YarnException.class.isAssignableFrom(realClass)) {
throw instantiateYarnException(
realClass.asSubclass(YarnException.class), re);
} else if (IOException.class.isAssignableFrom(realClass)) {
throw instantiateIOException(realClass.asSubclass(IOException.class),
re);
} else if (RuntimeException.class.isAssignableFrom(realClass)) {
throw instantiateRuntimeException(
realClass.asSubclass(RuntimeException.class), re);
} else {
throw re;
}
// RemoteException contains useful information as against the
// java.lang.reflect exceptions.
} else if (cause instanceof IOException) {
// RPC Client exception.
throw (IOException) cause;
} else if (cause instanceof RuntimeException) {
// RPC RuntimeException
throw (RuntimeException) cause;
} else {
// Should not be generated.
throw new IOException(se);
}
}
}
|
@Test
void testRPCRuntimeExceptionUnwrapping() {
String message = "RPCRuntimeExceptionUnwrapping";
RuntimeException re = new NullPointerException(message);
ServiceException se = new ServiceException(re);
Throwable t = null;
try {
RPCUtil.unwrapAndThrowException(se);
} catch (Throwable thrown) {
t = thrown;
}
assertTrue(NullPointerException.class.isInstance(t));
assertTrue(t.getMessage().contains(message));
}
|
public static String getChecksum(String algorithm, File file) throws NoSuchAlgorithmException, IOException {
FileChecksums fileChecksums = CHECKSUM_CACHE.get(file);
if (fileChecksums == null) {
try (InputStream stream = Files.newInputStream(file.toPath())) {
final MessageDigest md5Digest = getMessageDigest(MD5);
final MessageDigest sha1Digest = getMessageDigest(SHA1);
final MessageDigest sha256Digest = getMessageDigest(SHA256);
final byte[] buffer = new byte[BUFFER_SIZE];
int read = stream.read(buffer, 0, BUFFER_SIZE);
while (read > -1) {
// update all checksums together instead of reading the file multiple times
md5Digest.update(buffer, 0, read);
sha1Digest.update(buffer, 0, read);
sha256Digest.update(buffer, 0, read);
read = stream.read(buffer, 0, BUFFER_SIZE);
}
fileChecksums = new FileChecksums(
getHex(md5Digest.digest()),
getHex(sha1Digest.digest()),
getHex(sha256Digest.digest())
);
CHECKSUM_CACHE.put(file, fileChecksums);
}
}
switch (algorithm.toUpperCase()) {
case MD5:
return fileChecksums.md5;
case SHA1:
return fileChecksums.sha1;
case SHA256:
return fileChecksums.sha256;
default:
throw new NoSuchAlgorithmException(algorithm);
}
}
|
@Test
public void testGetChecksum_String_byteArr() {
String algorithm = "SHA1";
byte[] bytes = {-16, -111, 92, 95, 70, -72, -49, -94, -125, -27, -83, 103, -96, -101, 55, -109};
String expResult = "89268a389a97f0bfba13d3ff2370d8ad436e36f6";
String result = Checksum.getChecksum(algorithm, bytes);
assertEquals(expResult, result);
}
|
public static SourceConfig validateUpdate(SourceConfig existingConfig, SourceConfig newConfig) {
SourceConfig mergedConfig = clone(existingConfig);
if (!existingConfig.getTenant().equals(newConfig.getTenant())) {
throw new IllegalArgumentException("Tenants differ");
}
if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) {
throw new IllegalArgumentException("Namespaces differ");
}
if (!existingConfig.getName().equals(newConfig.getName())) {
throw new IllegalArgumentException("Function Names differ");
}
if (!StringUtils.isEmpty(newConfig.getClassName())) {
mergedConfig.setClassName(newConfig.getClassName());
}
if (!StringUtils.isEmpty(newConfig.getTopicName())) {
mergedConfig.setTopicName(newConfig.getTopicName());
}
if (!StringUtils.isEmpty(newConfig.getSerdeClassName())) {
mergedConfig.setSerdeClassName(newConfig.getSerdeClassName());
}
if (!StringUtils.isEmpty(newConfig.getSchemaType())) {
mergedConfig.setSchemaType(newConfig.getSchemaType());
}
if (newConfig.getConfigs() != null) {
mergedConfig.setConfigs(newConfig.getConfigs());
}
if (newConfig.getSecrets() != null) {
mergedConfig.setSecrets(newConfig.getSecrets());
}
if (!StringUtils.isEmpty(newConfig.getLogTopic())) {
mergedConfig.setLogTopic(newConfig.getLogTopic());
}
if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees()
.equals(existingConfig.getProcessingGuarantees())) {
throw new IllegalArgumentException("Processing Guarantees cannot be altered");
}
if (newConfig.getParallelism() != null) {
mergedConfig.setParallelism(newConfig.getParallelism());
}
if (newConfig.getResources() != null) {
mergedConfig
.setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources()));
}
if (!StringUtils.isEmpty(newConfig.getArchive())) {
mergedConfig.setArchive(newConfig.getArchive());
}
if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) {
mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags());
}
if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) {
mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions());
}
if (isBatchSource(existingConfig) != isBatchSource(newConfig)) {
throw new IllegalArgumentException("Sources cannot be update between regular sources and batchsource");
}
if (newConfig.getBatchSourceConfig() != null) {
validateBatchSourceConfigUpdate(existingConfig.getBatchSourceConfig(), newConfig.getBatchSourceConfig());
mergedConfig.setBatchSourceConfig(newConfig.getBatchSourceConfig());
}
if (newConfig.getProducerConfig() != null) {
mergedConfig.setProducerConfig(newConfig.getProducerConfig());
}
return mergedConfig;
}
|
@Test
public void testBatchConfigMergeEqual() {
SourceConfig sourceConfig = createSourceConfigWithBatch();
SourceConfig newSourceConfig = createSourceConfigWithBatch();
SourceConfig mergedConfig = SourceConfigUtils.validateUpdate(sourceConfig, newSourceConfig);
assertEquals(
new Gson().toJson(sourceConfig),
new Gson().toJson(mergedConfig)
);
}
|
@Override
public CloseableIterator<ScannerReport.Symbol> readComponentSymbols(int componentRef) {
ensureInitialized();
return delegate.readComponentSymbols(componentRef);
}
|
@Test
public void readComponentSymbols_it_not_cached() {
writer.writeComponentSymbols(COMPONENT_REF, of(SYMBOL));
assertThat(underTest.readComponentSymbols(COMPONENT_REF)).isNotSameAs(underTest.readComponentSymbols(COMPONENT_REF));
}
|
@Override
public Table getTable(String dbName, String tblName) {
Identifier identifier = new Identifier(dbName, tblName);
if (tables.containsKey(identifier)) {
return tables.get(identifier);
}
org.apache.paimon.table.Table paimonNativeTable;
try {
paimonNativeTable = this.paimonNativeCatalog.getTable(identifier);
} catch (Catalog.TableNotExistException e) {
LOG.error("Paimon table {}.{} does not exist.", dbName, tblName, e);
return null;
}
List<DataField> fields = paimonNativeTable.rowType().getFields();
ArrayList<Column> fullSchema = new ArrayList<>(fields.size());
for (DataField field : fields) {
String fieldName = field.name();
DataType type = field.type();
Type fieldType = ColumnTypeConverter.fromPaimonType(type);
Column column = new Column(fieldName, fieldType, true, field.description());
fullSchema.add(column);
}
long createTime = this.getTableCreateTime(dbName, tblName);
String comment = "";
if (paimonNativeTable.comment().isPresent()) {
comment = paimonNativeTable.comment().get();
}
PaimonTable table = new PaimonTable(this.catalogName, dbName, tblName, fullSchema, paimonNativeTable, createTime);
table.setComment(comment);
tables.put(identifier, table);
return table;
}
|
@Test
public void testGetTable(@Mocked FileStoreTable paimonNativeTable) throws Catalog.TableNotExistException {
List<DataField> fields = new ArrayList<>();
fields.add(new DataField(1, "col2", new IntType(true)));
fields.add(new DataField(2, "col3", new DoubleType(false)));
new MockUp<PaimonMetadata>() {
@Mock
public long getTableCreateTime(String dbName, String tblName) {
return 0L;
}
};
new Expectations() {
{
paimonNativeCatalog.getTable((Identifier) any);
result = paimonNativeTable;
paimonNativeTable.rowType().getFields();
result = fields;
paimonNativeTable.partitionKeys();
result = new ArrayList<>(Collections.singleton("col1"));
paimonNativeTable.location().toString();
result = "hdfs://127.0.0.1:10000/paimon";
}
};
com.starrocks.catalog.Table table = metadata.getTable("db1", "tbl1");
PaimonTable paimonTable = (PaimonTable) table;
Assert.assertEquals("db1", paimonTable.getDbName());
Assert.assertEquals("tbl1", paimonTable.getTableName());
Assert.assertEquals(Lists.newArrayList("col1"), paimonTable.getPartitionColumnNames());
Assert.assertEquals("hdfs://127.0.0.1:10000/paimon", paimonTable.getTableLocation());
Assert.assertEquals(ScalarType.INT, paimonTable.getBaseSchema().get(0).getType());
Assert.assertTrue(paimonTable.getBaseSchema().get(0).isAllowNull());
Assert.assertEquals(ScalarType.DOUBLE, paimonTable.getBaseSchema().get(1).getType());
Assert.assertTrue(paimonTable.getBaseSchema().get(1).isAllowNull());
Assert.assertEquals("paimon_catalog", paimonTable.getCatalogName());
Assert.assertEquals("paimon_catalog.db1.tbl1.0", paimonTable.getUUID());
}
|
public EndpointResponse streamQuery(
final KsqlSecurityContext securityContext,
final KsqlRequest request,
final CompletableFuture<Void> connectionClosedFuture,
final Optional<Boolean> isInternalRequest,
final MetricsCallbackHolder metricsCallbackHolder,
final Context context
) {
throwIfNotConfigured();
activenessRegistrar.updateLastRequestTime();
final PreparedStatement<?> statement = parseStatement(request);
CommandStoreUtil.httpWaitForCommandSequenceNumber(
commandQueue, request, commandQueueCatchupTimeout);
return handleStatement(securityContext, request, statement, connectionClosedFuture,
isInternalRequest, metricsCallbackHolder, context);
}
|
@Test
public void shouldWaitIfCommandSequenceNumberSpecified() throws Exception {
// When:
testResource.streamQuery(
securityContext,
new KsqlRequest(PUSH_QUERY_STRING, Collections.emptyMap(), Collections.emptyMap(), 3L),
new CompletableFuture<>(),
Optional.empty(),
new MetricsCallbackHolder(),
context
);
// Then:
verify(commandQueue).ensureConsumedPast(eq(3L), any());
}
|
public void setThemeValues(
@NonNull KeyboardTheme keyboardTheme,
float tabTextSize,
ColorStateList tabTextColor,
Drawable closeKeyboardIcon,
Drawable backspaceIcon,
Drawable settingsIcon,
Drawable keyboardDrawable,
Drawable mediaInsertionDrawable,
Drawable deleteRecentlyUsedDrawable,
int bottomPadding,
Set<MediaType> supportedMediaTypes) {
mKeyboardTheme = keyboardTheme;
mTabTitleTextSize = tabTextSize;
mTabTitleTextColor = tabTextColor;
mCloseKeyboardIcon = closeKeyboardIcon;
mBackspaceIcon = backspaceIcon;
mSettingsIcon = settingsIcon;
mMediaInsertionDrawable = mediaInsertionDrawable;
mDeleteRecentlyUsedDrawable = deleteRecentlyUsedDrawable;
mBottomPadding = bottomPadding;
findViewById(R.id.quick_keys_popup_quick_keys_insert_media)
.setVisibility(supportedMediaTypes.isEmpty() ? View.GONE : VISIBLE);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) {
setBackground(keyboardDrawable);
} else {
setBackgroundDrawable(keyboardDrawable);
}
}
|
@Test
public void testShowMediaIcon() throws Exception {
Context context = getApplicationContext();
mUnderTest.setThemeValues(
mKeyboardTheme,
10f,
new ColorStateList(new int[][] {{0}}, new int[] {Color.WHITE}),
context.getDrawable(R.drawable.ic_cancel),
context.getDrawable(R.drawable.sym_keyboard_delete_light),
context.getDrawable(R.drawable.ic_action_settings),
context.getDrawable(R.drawable.dark_background),
context.getDrawable(R.drawable.ic_media_insertion),
context.getDrawable(R.drawable.ic_delete_forever_dark),
10,
Collections.singleton(MediaType.Image));
Assert.assertEquals(
View.VISIBLE,
mUnderTest.findViewById(R.id.quick_keys_popup_quick_keys_insert_media).getVisibility());
mUnderTest.setThemeValues(
mKeyboardTheme,
10f,
new ColorStateList(new int[][] {{0}}, new int[] {Color.WHITE}),
context.getDrawable(R.drawable.ic_cancel),
context.getDrawable(R.drawable.sym_keyboard_delete_light),
context.getDrawable(R.drawable.ic_action_settings),
context.getDrawable(R.drawable.dark_background),
context.getDrawable(R.drawable.ic_media_insertion),
context.getDrawable(R.drawable.ic_delete_forever_dark),
10,
Collections.emptySet());
Assert.assertEquals(
View.GONE,
mUnderTest.findViewById(R.id.quick_keys_popup_quick_keys_insert_media).getVisibility());
}
|
@Udf(description = "Returns the hyperbolic cosine of an INT value")
public Double cosh(
@UdfParameter(
value = "value",
description = "The value in radians to get the hyperbolic cosine of."
) final Integer value
) {
return cosh(value == null ? null : value.doubleValue());
}
|
@Test
public void shouldHandleLessThanNegative2Pi() {
assertThat(udf.cosh(-9.1), closeTo(4477.646407574158, 0.000000000000001));
assertThat(udf.cosh(-6.3), closeTo(272.286873215353, 0.000000000000001));
assertThat(udf.cosh(-7), closeTo(548.317035155212, 0.000000000000001));
assertThat(udf.cosh(-7L), closeTo(548.317035155212, 0.000000000000001));
}
|
public RuntimeOptionsBuilder parse(String... args) {
return parse(Arrays.asList(args));
}
|
@Test
void prints_supported_keywords() {
parser.parse("--i18n", "en");
assertThat(output(), startsWith(" | feature | \"Feature\", \"Business Need\", \"Ability\" |"));
}
|
public static void addEstimateTableReadersMemMetric(final StreamsMetricsImpl streamsMetrics,
final RocksDBMetricContext metricContext,
final Gauge<BigInteger> valueProvider) {
addMutableMetric(
streamsMetrics,
metricContext,
valueProvider,
ESTIMATED_MEMORY_OF_TABLE_READERS,
ESTIMATED_MEMORY_OF_TABLE_READERS_DESCRIPTION
);
}
|
@Test
public void shouldAddEstimateTableReadersMemMetric() {
final String name = "estimate-table-readers-mem";
final String description =
"Estimated memory in bytes used for reading SST tables, excluding memory used in block cache";
runAndVerifyMutableMetric(
name,
description,
() -> RocksDBMetrics.addEstimateTableReadersMemMetric(streamsMetrics, ROCKSDB_METRIC_CONTEXT, VALUE_PROVIDER)
);
}
|
@Override
public SchemaKStream<?> buildStream(final PlanBuildContext buildContext) {
final PlanNode source = getSource();
final SchemaKStream<?> schemaKStream = source.buildStream(buildContext);
final QueryContext.Stacker contextStacker = buildContext.buildNodeContext(getId().toString());
return schemaKStream.into(
ksqlTopic,
contextStacker,
getTimestampColumn()
);
}
|
@Test
public void shouldCallInto() {
// When:
final SchemaKStream<?> result = outputNode.buildStream(planBuildContext);
// Then:
verify(sourceStream).into(
eq(ksqlTopic),
stackerCaptor.capture(),
eq(outputNode.getTimestampColumn())
);
assertThat(
stackerCaptor.getValue().getQueryContext().getContext(),
equalTo(ImmutableList.of("0"))
);
assertThat(result, sameInstance(sinkStream));
}
|
public static KsqlClient createInternalClient(
final Map<String, String> clientProps,
final BiFunction<Integer, String, SocketAddress> socketAddressFactory,
final Vertx vertx) {
final String internalClientAuth = clientProps.get(
KsqlRestConfig.KSQL_INTERNAL_SSL_CLIENT_AUTHENTICATION_CONFIG);
final boolean verifyHost = !Strings.isNullOrEmpty(internalClientAuth)
&& !KsqlRestConfig.SSL_CLIENT_AUTHENTICATION_NONE.equals(internalClientAuth);
return new KsqlClient(
Optional.empty(),
new LocalProperties(ImmutableMap.of()),
httpOptionsFactory(clientProps, verifyHost, InternalKsqlClientFactory::createClientOptions),
httpOptionsFactory(clientProps, verifyHost,
InternalKsqlClientFactory::createClientOptionsHttp2),
socketAddressFactory,
vertx
);
}
|
@Test
public void shouldCreateClient() {
// When:
KsqlClient client = InternalKsqlClientFactory.createInternalClient(
ImmutableMap.of(), SocketAddress::inetSocketAddress,
vertx);
// Then:
assertThat(client, notNullValue());
}
|
public static <T> RBFNetwork<T> fit(T[] x, double[] y, RBF<T>[] rbf) {
return fit(x, y, rbf, false);
}
|
@Test
public void testBank32nh() {
System.out.println("bank32nh");
MathEx.setSeed(19650218); // to get repeatable results.
double[][] x = MathEx.clone(Bank32nh.x);
MathEx.standardize(x);
RegressionValidations<RBFNetwork<double[]>> result = CrossValidation.regression(10, x, Bank32nh.y,
(xi, yi) -> RBFNetwork.fit(xi, yi, RBF.fit(xi, 20, 5.0)));
System.out.println(result);
assertEquals(0.0869, result.avg.rmse, 1E-4);
}
|
@Override
public NacosUser authenticate(String username, String rawPassword) throws AccessException {
if (StringUtils.isBlank(username)) {
throw new AccessException("user not found!");
}
if (!caseSensitive) {
username = username.toLowerCase();
}
try {
return super.authenticate(username, rawPassword);
} catch (AccessException | UsernameNotFoundException ignored) {
if (Loggers.AUTH.isWarnEnabled()) {
Loggers.AUTH.warn("try login with LDAP, user: {}", username);
}
}
UserDetails userDetails;
try {
if (!ldapLogin(username, rawPassword)) {
throw new AccessException("LDAP login failed.");
}
userDetails = userDetailsService.loadUserByUsername(AuthConstants.LDAP_PREFIX + username);
} catch (UsernameNotFoundException exception) {
String ldapUsername = AuthConstants.LDAP_PREFIX + username;
userDetailsService.createUser(ldapUsername, AuthConstants.LDAP_DEFAULT_ENCODED_PASSWORD);
User user = new User();
user.setUsername(ldapUsername);
user.setPassword(AuthConstants.LDAP_DEFAULT_ENCODED_PASSWORD);
userDetails = new NacosUserDetails(user);
} catch (Exception e) {
Loggers.AUTH.error("[LDAP-LOGIN] failed", e);
throw new AccessException("user not found");
}
return new NacosUser(userDetails.getUsername(), jwtTokenManager.createToken(userDetails.getUsername()));
}
|
@Test
void testLdapAuthenticate() throws AccessException {
NacosUserDetails nacosUserDetails = new NacosUserDetails(user);
when(userDetailsService.loadUserByUsername(anyString())).thenReturn(nacosUserDetails);
NacosUser authenticate = ldapAuthenticationManager.authenticate("nacos", "test");
assertEquals(user.getUsername(), authenticate.getUserName());
}
|
public static AggregationUnit create(final AggregationType type, final boolean isDistinct) {
switch (type) {
case MAX:
return new ComparableAggregationUnit(false);
case MIN:
return new ComparableAggregationUnit(true);
case SUM:
return isDistinct ? new DistinctSumAggregationUnit() : new AccumulationAggregationUnit();
case COUNT:
return isDistinct ? new DistinctCountAggregationUnit() : new AccumulationAggregationUnit();
case AVG:
return isDistinct ? new DistinctAverageAggregationUnit() : new AverageAggregationUnit();
case BIT_XOR:
return new BitXorAggregationUnit();
default:
throw new UnsupportedSQLOperationException(type.name());
}
}
|
@Test
void assertCreateComparableAggregationUnit() {
assertThat(AggregationUnitFactory.create(AggregationType.MIN, false), instanceOf(ComparableAggregationUnit.class));
assertThat(AggregationUnitFactory.create(AggregationType.MAX, false), instanceOf(ComparableAggregationUnit.class));
}
|
public boolean isValid() throws IOException {
if (contractBinary.equals(BIN_NOT_PROVIDED)) {
throw new UnsupportedOperationException(
"Contract binary not present in contract wrapper, "
+ "please generate your wrapper using -abiFile=<file>");
}
if (contractAddress.equals("")) {
throw new UnsupportedOperationException(
"Contract binary not present, you will need to regenerate your smart "
+ "contract wrapper with web3j v2.2.0+");
}
EthGetCode ethGetCode =
transactionManager.getCode(contractAddress, DefaultBlockParameterName.LATEST);
if (ethGetCode.hasError()) {
return false;
}
String code = cleanHexPrefix(ethGetCode.getCode());
int metadataIndex = -1;
for (String metadataIndicator : METADATA_HASH_INDICATORS) {
metadataIndex = code.indexOf(metadataIndicator);
if (metadataIndex != -1) {
code = code.substring(0, metadataIndex);
break;
}
}
// There may be multiple contracts in the Solidity bytecode, hence we only check for a
// match with a subset
return !code.isEmpty() && contractBinary.contains(code);
}
|
@Test
public void testIsValid() throws Exception {
prepareEthGetCode(TEST_CONTRACT_BINARY);
Contract contract = deployContract(createTransactionReceipt());
assertTrue(contract.isValid());
}
|
@Override
public Optional<IndexMetaData> revise(final String tableName, final IndexMetaData originalMetaData, final ShardingRule rule) {
if (shardingTable.getActualDataNodes().isEmpty()) {
return Optional.empty();
}
IndexMetaData result = new IndexMetaData(IndexMetaDataUtils.getLogicIndexName(originalMetaData.getName(), shardingTable.getActualDataNodes().iterator().next().getTableName()));
result.getColumns().addAll(originalMetaData.getColumns());
result.setUnique(originalMetaData.isUnique());
return Optional.of(result);
}
|
@Test
void assertRevise() {
shardingRule = createShardingRule();
ShardingTable shardingTable = mock(ShardingTable.class);
when(shardingTable.getActualDataNodes()).thenReturn(Arrays.asList(new DataNode("SCHEMA_NAME", "TABLE_NAME_0"), new DataNode("SCHEMA_NAME", "TABLE_NAME_1")));
shardingIndexReviser = new ShardingIndexReviser(shardingTable);
IndexMetaData originalMetaData = new IndexMetaData("TEST_INDEX");
originalMetaData.getColumns().add("TEST_COLUMN");
originalMetaData.setUnique(false);
Optional<IndexMetaData> revisedMetaData = shardingIndexReviser.revise("TABLE_NAME_0", originalMetaData, shardingRule);
assertTrue(revisedMetaData.isPresent());
assertThat(revisedMetaData.get().getName(), is("TEST_INDEX"));
assertThat(revisedMetaData.get().getColumns().size(), is(1));
assertFalse(revisedMetaData.get().isUnique());
}
|
@Override
public boolean equals(final Object o) {
if(this == o) {
return true;
}
if(!(o instanceof Acl)) {
return false;
}
if(!super.equals(o)) {
return false;
}
final Acl acl = (Acl) o;
return Objects.equals(canned, acl.canned);
}
|
@Test
public void testEquals() {
assertEquals(
new Acl(new Acl.UserAndRole(new Acl.CanonicalUser("i-1"), new Acl.Role("r-1"))),
new Acl(new Acl.UserAndRole(new Acl.CanonicalUser("i-1"), new Acl.Role("r-1"))));
assertNotEquals(
new Acl(new Acl.UserAndRole(new Acl.CanonicalUser("i-1"), new Acl.Role("r-1"))),
new Acl(new Acl.UserAndRole(new Acl.CanonicalUser("i-2"), new Acl.Role("r-1"))));
assertNotEquals(
new Acl(new Acl.UserAndRole(new Acl.CanonicalUser("i-1"), new Acl.Role("r-1"))),
new Acl(new Acl.UserAndRole(new Acl.CanonicalUser("i-1"), new Acl.Role("r-2"))));
assertNotEquals(
new Acl(new Acl.UserAndRole(new Acl.CanonicalUser("i-1"), new Acl.Role("r-1")), new Acl.UserAndRole(new Acl.CanonicalUser("i-2"), new Acl.Role("r-1"))),
new Acl(new Acl.UserAndRole(new Acl.CanonicalUser("i-1"), new Acl.Role("r-2"))));
}
|
@Override
protected String getScheme() {
return config.getScheme();
}
|
@Test
public void testGetSchemeWithS3Options() {
S3FileSystem s3FileSystem = new S3FileSystem(s3Options());
assertEquals("s3", s3FileSystem.getScheme());
}
|
boolean canSwap(Replica r1, Replica r2, ClusterModel clusterModel) {
String mappedRackIdOfR1 = mappedRackIdOf(r1.broker());
String mappedRackIdOfR2 = mappedRackIdOf(r2.broker());
boolean inSameRack = r1.broker() != r2.broker() && mappedRackIdOfR1.equals(mappedRackIdOfR2);
boolean rackAware =
clusterModel.partition(r1.topicPartition()).partitionRacks().stream()
.map(this::mappedRackIdOf)
.noneMatch(mappedRackIdOfR2::equals)
&& clusterModel.partition(r2.topicPartition()).partitionRacks().stream()
.map(this::mappedRackIdOf)
.noneMatch(mappedRackIdOfR1::equals);
boolean sameRole = r1.isLeader() == r2.isLeader();
return (inSameRack || rackAware) && sameRole;
}
|
@Test
public void testCanSwap() {
KafkaAssignerDiskUsageDistributionGoal goal = new KafkaAssignerDiskUsageDistributionGoal();
ClusterModel clusterModel = createClusterModel();
Replica r1 = clusterModel.broker(0).replica(T0P0);
Replica r2 = clusterModel.broker(1).replica(T2P0);
assertTrue("Replicas in the same rack should be good to swap", goal.canSwap(r1, r2, clusterModel));
assertTrue("Replicas in the same rack should be good to swap", goal.canSwap(r2, r1, clusterModel));
r2 = clusterModel.broker(1).replica(T1P0);
assertFalse("Should not be able to swap replica with different roles.", goal.canSwap(r1, r2, clusterModel));
assertFalse("Should not be able to swap replica with different roles.", goal.canSwap(r2, r1, clusterModel));
r2 = clusterModel.broker(2).replica(T2P1);
assertFalse("Should not be able to put two replicas in the same broker", goal.canSwap(r1, r2, clusterModel));
assertFalse("Should not be able to put two replicas in the same broker", goal.canSwap(r2, r1, clusterModel));
r2 = clusterModel.broker(3).replica(T2P2);
assertFalse("Should not be able to put two replicas in the same rack", goal.canSwap(r1, r2, clusterModel));
assertFalse("Should not be able to put two replicas in the same rack", goal.canSwap(r2, r1, clusterModel));
r1 = clusterModel.broker(3).replica(T0P2);
r2 = clusterModel.broker(4).replica(T1P2);
assertTrue("Should be able to swap", goal.canSwap(r1, r2, clusterModel));
assertTrue("Should be able to swap", goal.canSwap(r2, r1, clusterModel));
}
|
public static boolean urlEquals(String string1, String string2) {
Uri url1 = Uri.parse(string1);
Uri url2 = Uri.parse(string2);
if (url1 == null || url2 == null || url1.getHost() == null || url2.getHost() == null) {
return string1.equals(string2); // Unable to parse url properly
}
if (!url1.getHost().toLowerCase(Locale.ROOT).equals(url2.getHost().toLowerCase(Locale.ROOT))) {
return false;
}
List<String> pathSegments1 = normalizePathSegments(url1.getPathSegments());
List<String> pathSegments2 = normalizePathSegments(url2.getPathSegments());
if (!pathSegments1.equals(pathSegments2)) {
return false;
}
if (TextUtils.isEmpty(url1.getQuery())) {
return TextUtils.isEmpty(url2.getQuery());
}
return url1.getQuery().equals(url2.getQuery());
}
|
@Test
public void testUrlEqualsSame() {
assertTrue(UrlChecker.urlEquals("https://www.example.com/test", "https://www.example.com/test"));
assertTrue(UrlChecker.urlEquals("https://www.example.com/test", "https://www.example.com/test/"));
assertTrue(UrlChecker.urlEquals("https://www.example.com/test", "https://www.example.com//test"));
assertTrue(UrlChecker.urlEquals("https://www.example.com", "https://www.example.com/"));
assertTrue(UrlChecker.urlEquals("https://www.example.com", "http://www.example.com"));
assertTrue(UrlChecker.urlEquals("http://www.example.com/", "https://www.example.com/"));
assertTrue(UrlChecker.urlEquals("https://www.example.com/?id=42", "https://www.example.com/?id=42"));
assertTrue(UrlChecker.urlEquals("https://example.com/podcast%20test", "https://example.com/podcast test"));
assertTrue(UrlChecker.urlEquals("https://example.com/?a=podcast%20test", "https://example.com/?a=podcast test"));
assertTrue(UrlChecker.urlEquals("https://example.com/?", "https://example.com/"));
assertTrue(UrlChecker.urlEquals("https://example.com/?", "https://example.com"));
assertTrue(UrlChecker.urlEquals("https://Example.com", "https://example.com"));
assertTrue(UrlChecker.urlEquals("https://example.com/test", "https://example.com/Test"));
assertTrue(UrlChecker.urlEquals("antennapod_local:abc", "antennapod_local:abc"));
}
|
@SuppressWarnings("unchecked")
public static <T extends InputSplit> void createSplitFiles(Path jobSubmitDir,
Configuration conf, FileSystem fs, List<InputSplit> splits)
throws IOException, InterruptedException {
T[] array = (T[]) splits.toArray(new InputSplit[splits.size()]);
createSplitFiles(jobSubmitDir, conf, fs, array);
}
|
@Test
public void testMaxBlockLocationsNewSplits() throws Exception {
TEST_DIR.mkdirs();
try {
Configuration conf = new Configuration();
conf.setInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY, 4);
Path submitDir = new Path(TEST_DIR.getAbsolutePath());
FileSystem fs = FileSystem.getLocal(conf);
FileSplit split = new FileSplit(new Path("/some/path"), 0, 1,
new String[] { "loc1", "loc2", "loc3", "loc4", "loc5" });
JobSplitWriter.createSplitFiles(submitDir, conf, fs,
new FileSplit[] { split });
JobSplit.TaskSplitMetaInfo[] infos =
SplitMetaInfoReader.readSplitMetaInfo(new JobID(), fs, conf,
submitDir);
assertEquals("unexpected number of splits", 1, infos.length);
assertEquals("unexpected number of split locations",
4, infos[0].getLocations().length);
} finally {
FileUtil.fullyDelete(TEST_DIR);
}
}
|
public static FallbackMethod create(String fallbackMethodName, Method originalMethod,
Object[] args, Object original, Object proxy) throws NoSuchMethodException {
MethodMeta methodMeta = new MethodMeta(
fallbackMethodName,
originalMethod.getParameterTypes(),
originalMethod.getReturnType(),
original.getClass());
Map<Class<?>, Method> methods = FALLBACK_METHODS_CACHE
.computeIfAbsent(methodMeta, FallbackMethod::extractMethods);
if (!methods.isEmpty()) {
return new FallbackMethod(methods, originalMethod.getReturnType(), args, original, proxy);
} else {
throw new NoSuchMethodException(String.format("%s %s.%s(%s,%s)",
methodMeta.returnType, methodMeta.targetClass, methodMeta.fallbackMethodName,
StringUtils.arrayToDelimitedString(methodMeta.params, ","), Throwable.class));
}
}
|
@Test
public void mismatchReturnType_shouldThrowNoSuchMethodException() throws Throwable {
FallbackMethodTest target = new FallbackMethodTest();
Method testMethod = target.getClass().getMethod("testMethod", String.class);
assertThatThrownBy(() -> FallbackMethod
.create("duplicateException", testMethod, new Object[]{"test"}, target, target))
.isInstanceOf(IllegalStateException.class)
.hasMessage(
"You have more that one fallback method that cover the same exception type java.lang.IllegalArgumentException");
}
|
@Bean
public RetryRegistry retryRegistry(RetryConfigurationProperties retryConfigurationProperties,
EventConsumerRegistry<RetryEvent> retryEventConsumerRegistry,
RegistryEventConsumer<Retry> retryRegistryEventConsumer,
@Qualifier("compositeRetryCustomizer") CompositeCustomizer<RetryConfigCustomizer> compositeRetryCustomizer) {
RetryRegistry retryRegistry = createRetryRegistry(retryConfigurationProperties,
retryRegistryEventConsumer, compositeRetryCustomizer);
registerEventConsumer(retryRegistry, retryEventConsumerRegistry,
retryConfigurationProperties);
retryConfigurationProperties.getInstances()
.forEach((name, properties) ->
retryRegistry.retry(name, retryConfigurationProperties
.createRetryConfig(name, compositeRetryCustomizer)));
return retryRegistry;
}
|
@Test
public void testCreateRetryRegistryWithSharedConfigs() {
InstanceProperties defaultProperties = new InstanceProperties();
defaultProperties.setMaxAttempts(3);
defaultProperties.setWaitDuration(Duration.ofMillis(100L));
InstanceProperties sharedProperties = new InstanceProperties();
sharedProperties.setMaxAttempts(2);
sharedProperties.setWaitDuration(Duration.ofMillis(100L));
InstanceProperties backendWithDefaultConfig = new InstanceProperties();
backendWithDefaultConfig.setBaseConfig("default");
backendWithDefaultConfig.setWaitDuration(Duration.ofMillis(200L));
InstanceProperties backendWithSharedConfig = new InstanceProperties();
backendWithSharedConfig.setBaseConfig("sharedConfig");
backendWithSharedConfig.setWaitDuration(Duration.ofMillis(300L));
RetryConfigurationProperties retryConfigurationProperties = new RetryConfigurationProperties();
retryConfigurationProperties.getConfigs().put("default", defaultProperties);
retryConfigurationProperties.getConfigs().put("sharedConfig", sharedProperties);
retryConfigurationProperties.getInstances()
.put("backendWithDefaultConfig", backendWithDefaultConfig);
retryConfigurationProperties.getInstances()
.put("backendWithSharedConfig", backendWithSharedConfig);
RetryConfiguration retryConfiguration = new RetryConfiguration();
DefaultEventConsumerRegistry<RetryEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>();
RetryRegistry retryRegistry = retryConfiguration
.retryRegistry(retryConfigurationProperties, eventConsumerRegistry,
new CompositeRegistryEventConsumer<>(emptyList()), compositeRetryCustomizerTest());
assertThat(retryRegistry.getAllRetries().size()).isEqualTo(2);
// Should get default config and overwrite max attempt and wait time
Retry retry1 = retryRegistry.retry("backendWithDefaultConfig");
assertThat(retry1).isNotNull();
assertThat(retry1.getRetryConfig().getMaxAttempts()).isEqualTo(3);
assertThat(retry1.getRetryConfig().getIntervalBiFunction().apply(1, null)).isEqualTo(200L);
// Should get shared config and overwrite wait time
Retry retry2 = retryRegistry.retry("backendWithSharedConfig");
assertThat(retry2).isNotNull();
assertThat(retry2.getRetryConfig().getMaxAttempts()).isEqualTo(2);
assertThat(retry2.getRetryConfig().getIntervalBiFunction().apply(1, null)).isEqualTo(300L);
// Unknown backend should get default config of Registry
Retry retry3 = retryRegistry.retry("unknownBackend");
assertThat(retry3).isNotNull();
assertThat(retry3.getRetryConfig().getMaxAttempts()).isEqualTo(3);
assertThat(eventConsumerRegistry.getAllEventConsumer()).hasSize(3);
}
|
@Override
public void execute(@Nonnull Runnable command) {
throwRejectedExecutionExceptionIfShutdown();
command.run();
}
|
@Test
void testRejectedExecute() {
testRejectedExecutionException(testInstance -> testInstance.execute(() -> {}));
}
|
@Override
public int getMaxNonReclaimableBuffers(Object owner) {
checkIsInitialized();
int numBuffersUsedOrReservedForOtherOwners = 0;
for (Map.Entry<Object, TieredStorageMemorySpec> memorySpecEntry :
tieredMemorySpecs.entrySet()) {
Object userOwner = memorySpecEntry.getKey();
TieredStorageMemorySpec storageMemorySpec = memorySpecEntry.getValue();
if (!userOwner.equals(owner)) {
int numGuaranteed = storageMemorySpec.getNumGuaranteedBuffers();
int numRequested = numOwnerRequestedBuffer(userOwner);
numBuffersUsedOrReservedForOtherOwners += Math.max(numGuaranteed, numRequested);
}
}
// Note that a sudden reduction in the size of the buffer pool may result in non-reclaimable
// buffer memory occupying the guaranteed buffers of other users. However, this occurrence
// is limited to the memory tier, which is only utilized when downstream registration is in
// effect. Furthermore, the buffers within the memory tier can be recycled quickly enough,
// thereby minimizing the impact on the guaranteed buffers of other tiers.
return bufferPool.getNumBuffers() - numBuffersUsedOrReservedForOtherOwners;
}
|
@Test
void testGetMaxNonReclaimableBuffers() throws IOException {
int numBuffers = 10;
int numExclusive = 5;
TieredStorageMemoryManagerImpl storageMemoryManager =
createStorageMemoryManager(
numBuffers,
Collections.singletonList(new TieredStorageMemorySpec(this, numExclusive)));
List<BufferBuilder> requestedBuffers = new ArrayList<>();
for (int i = 1; i <= numBuffers; i++) {
requestedBuffers.add(storageMemoryManager.requestBufferBlocking(this));
assertThat(storageMemoryManager.getMaxNonReclaimableBuffers(this))
.isEqualTo(numBuffers);
int numExpectedAvailable = numBuffers - i;
assertThat(
storageMemoryManager.getMaxNonReclaimableBuffers(this)
- storageMemoryManager.numOwnerRequestedBuffer(this))
.isEqualTo(numExpectedAvailable);
}
requestedBuffers.forEach(TieredStorageMemoryManagerImplTest::recycleBufferBuilder);
storageMemoryManager.release();
}
|
public static long parseBytes(String text) throws IllegalArgumentException {
Objects.requireNonNull(text, "text cannot be null");
final String trimmed = text.trim();
if (trimmed.isEmpty()) {
throw new IllegalArgumentException("argument is an empty- or whitespace-only string");
}
final int len = trimmed.length();
int pos = 0;
char current;
while (pos < len && (current = trimmed.charAt(pos)) >= '0' && current <= '9') {
pos++;
}
final String number = trimmed.substring(0, pos);
final String unit = trimmed.substring(pos).trim().toLowerCase(Locale.US);
if (number.isEmpty()) {
throw new NumberFormatException("text does not start with a number");
}
final long value;
try {
value = Long.parseLong(number); // this throws a NumberFormatException on overflow
} catch (NumberFormatException e) {
throw new IllegalArgumentException(
"The value '"
+ number
+ "' cannot be re represented as 64bit number (numeric overflow).");
}
final long multiplier = parseUnit(unit).map(MemoryUnit::getMultiplier).orElse(1L);
final long result = value * multiplier;
// check for overflow
if (result / multiplier != value) {
throw new IllegalArgumentException(
"The value '"
+ text
+ "' cannot be re represented as 64bit number of bytes (numeric overflow).");
}
return result;
}
|
@Test
void testParseInvalid() {
// null
assertThatThrownBy(() -> MemorySize.parseBytes(null))
.isInstanceOf(NullPointerException.class);
// empty
assertThatThrownBy(() -> MemorySize.parseBytes(""))
.isInstanceOf(IllegalArgumentException.class);
// blank
assertThatThrownBy(() -> MemorySize.parseBytes(" "))
.isInstanceOf(IllegalArgumentException.class);
// no number
assertThatThrownBy(() -> MemorySize.parseBytes("foobar or fubar or foo bazz"))
.isInstanceOf(IllegalArgumentException.class);
// wrong unit
assertThatThrownBy(() -> MemorySize.parseBytes("16 gjah"))
.isInstanceOf(IllegalArgumentException.class);
// multiple numbers
assertThatThrownBy(() -> MemorySize.parseBytes("16 16 17 18 bytes"))
.isInstanceOf(IllegalArgumentException.class);
// negative number
assertThatThrownBy(() -> MemorySize.parseBytes("-100 bytes"))
.isInstanceOf(IllegalArgumentException.class);
}
|
protected TransactionReceipt executeTransaction(Function function)
throws IOException, TransactionException {
return executeTransaction(function, BigInteger.ZERO);
}
|
@Test
public void testStaticGasProvider() throws IOException, TransactionException {
StaticGasProvider gasProvider = new StaticGasProvider(BigInteger.TEN, BigInteger.ONE);
TransactionManager txManager = mock(TransactionManager.class);
when(txManager.executeTransaction(
any(BigInteger.class),
any(BigInteger.class),
anyString(),
anyString(),
any(BigInteger.class),
anyBoolean()))
.thenReturn(new TransactionReceipt());
contract = new TestContract(ADDRESS, web3j, txManager, gasProvider);
Function func =
new Function(
"test",
Collections.<Type>emptyList(),
Collections.<TypeReference<?>>emptyList());
contract.executeTransaction(func);
verify(txManager)
.executeTransaction(
eq(BigInteger.TEN),
eq(BigInteger.ONE),
anyString(),
anyString(),
any(BigInteger.class),
anyBoolean());
}
|
public void send(SlackMessage message, String webhookUrl) throws TemporaryEventNotificationException,
PermanentEventNotificationException, JsonProcessingException {
final Request request = new Request.Builder()
.url(webhookUrl)
.post(RequestBody.create(MediaType.parse(APPLICATION_JSON), objectMapper.writeValueAsString(message)))
.build();
LOG.debug("Posting to webhook url <{}> the payload is <{}>",
webhookUrl,
"");
try (final Response r = httpClient.newCall(request).execute()) {
if (!r.isSuccessful()) {
//ideally this should not happen and the user is expected to fill the
//right configuration , while setting up a notification
throw new PermanentEventNotificationException(
"Expected successful HTTP response [2xx] but got [" + r.code() + "]. " + webhookUrl);
}
} catch (IOException e) {
throw new TemporaryEventNotificationException("Unable to send the slack Message. " + e.getMessage());
}
}
|
@Test
public void sendsHttpRequestAsExpected_whenInputIsGood() throws Exception {
server.enqueue(new MockResponse().setResponseCode(200));
SlackClient slackClient = new SlackClient(httpClient, objectMapper);
slackClient.send(getMessage(), server.url("/").toString());
final RecordedRequest recordedRequest = server.takeRequest();
assertThat(recordedRequest.getMethod()).isEqualTo("POST");
assertThat(recordedRequest.getBody()).isNotNull();
assertThat(recordedRequest.getBody().readUtf8()).isEqualTo(objectMapper.writeValueAsString(getMessage()));
}
|
public static Map<String, Object> xmlToMap(String xmlStr) {
return xmlToMap(xmlStr, new HashMap<>());
}
|
@Test
public void xmlToMapTest() {
final String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>"//
+ "<returnsms>"//
+ "<returnstatus>Success</returnstatus>"//
+ "<message>ok</message>"//
+ "<remainpoint>1490</remainpoint>"//
+ "<taskID>885</taskID>"//
+ "<successCounts>1</successCounts>"//
+ "<newNode><sub>subText</sub></newNode>"//
+ "</returnsms>";
final Map<String, Object> map = XmlUtil.xmlToMap(xml);
assertEquals(6, map.size());
assertEquals("Success", map.get("returnstatus"));
assertEquals("ok", map.get("message"));
assertEquals("1490", map.get("remainpoint"));
assertEquals("885", map.get("taskID"));
assertEquals("1", map.get("successCounts"));
assertEquals("subText", ((Map<?, ?>) map.get("newNode")).get("sub"));
}
|
public Range span(Range other)
{
checkTypeCompatibility(other);
Marker lowMarker = Marker.min(low, other.getLow());
Marker highMarker = Marker.max(high, other.getHigh());
return new Range(lowMarker, highMarker);
}
|
@Test
public void testSpan()
{
assertEquals(Range.greaterThan(BIGINT, 1L).span(Range.lessThanOrEqual(BIGINT, 2L)), Range.all(BIGINT));
assertEquals(Range.greaterThan(BIGINT, 2L).span(Range.lessThanOrEqual(BIGINT, 0L)), Range.all(BIGINT));
assertEquals(Range.range(BIGINT, 1L, true, 3L, false).span(Range.equal(BIGINT, 2L)), Range.range(BIGINT, 1L, true, 3L, false));
assertEquals(Range.range(BIGINT, 1L, true, 3L, false).span(Range.range(BIGINT, 2L, false, 10L, false)), Range.range(BIGINT, 1L, true, 10L, false));
assertEquals(Range.greaterThan(BIGINT, 1L).span(Range.equal(BIGINT, 0L)), Range.greaterThanOrEqual(BIGINT, 0L));
assertEquals(Range.greaterThan(BIGINT, 1L).span(Range.greaterThanOrEqual(BIGINT, 10L)), Range.greaterThan(BIGINT, 1L));
assertEquals(Range.lessThan(BIGINT, 1L).span(Range.lessThanOrEqual(BIGINT, 1L)), Range.lessThanOrEqual(BIGINT, 1L));
assertEquals(Range.all(BIGINT).span(Range.lessThanOrEqual(BIGINT, 1L)), Range.all(BIGINT));
}
|
public boolean validate(final Protocol protocol, final LoginOptions options) {
return protocol.validate(this, options);
}
|
@Test
public void testLoginWithoutEmptyPass() {
Credentials credentials = new Credentials("guest", "");
assertTrue(credentials.validate(new TestProtocol(Scheme.ftp), new LoginOptions()));
}
|
public boolean isExpired() {
return nextVisibleTime <= System.currentTimeMillis();
}
|
@Test
public void testIsExpired() {
long startOffset = 1000L;
long retrieveTime = System.currentTimeMillis();
long invisibleTime = 1000L;
int reviveQueueId = 1;
String topicType = "NORMAL";
String brokerName = "BrokerA";
int queueId = 2;
long offset = 2000L;
long commitLogOffset = 3000L;
long pastTime = System.currentTimeMillis() - 1000L;
ReceiptHandle receiptHandle = new ReceiptHandle(startOffset, retrieveTime, invisibleTime, pastTime, reviveQueueId, topicType, brokerName, queueId, offset, commitLogOffset, "");
boolean isExpired = receiptHandle.isExpired();
assertTrue(isExpired);
}
|
@Override
public void execute(final ConnectionSession connectionSession) {
String databaseName = sqlStatement.getFromDatabase().map(schema -> schema.getDatabase().getIdentifier().getValue()).orElseGet(connectionSession::getUsedDatabaseName);
queryResultMetaData = createQueryResultMetaData(databaseName);
mergedResult = new TransparentMergedResult(getQueryResult(databaseName));
}
|
@Test
void assertShowTablesExecutorWithoutFilter() throws SQLException {
ShowTablesExecutor executor = new ShowTablesExecutor(new MySQLShowTablesStatement(), TypedSPILoader.getService(DatabaseType.class, "MySQL"));
Map<String, ShardingSphereDatabase> databases = getDatabases();
ContextManager contextManager = mockContextManager(databases);
when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager);
executor.execute(mockConnectionSession());
assertThat(executor.getQueryResultMetaData().getColumnCount(), is(1));
executor.getMergedResult().next();
assertThat(executor.getMergedResult().getValue(1, Object.class), is("T_TEST"));
executor.getMergedResult().next();
assertThat(executor.getMergedResult().getValue(1, Object.class), is("t_account"));
executor.getMergedResult().next();
assertThat(executor.getMergedResult().getValue(1, Object.class), is("t_account_bak"));
executor.getMergedResult().next();
assertThat(executor.getMergedResult().getValue(1, Object.class), is("t_account_detail"));
assertFalse(executor.getMergedResult().next());
}
|
@Override
public TbMsgAttributesNodeConfiguration defaultConfiguration() {
TbMsgAttributesNodeConfiguration configuration = new TbMsgAttributesNodeConfiguration();
configuration.setScope(DataConstants.SERVER_SCOPE);
configuration.setNotifyDevice(false);
configuration.setSendAttributesUpdatedNotification(false);
// Since version 1. For an existing rule nodes for version 0. See the TbNode implementation
configuration.setUpdateAttributesOnlyOnValueChange(true);
return configuration;
}
|
@Test
void testDefaultConfig_givenUpdateAttributesOnlyOnValueChange_thenTrue_sinceVersion1() {
assertThat(new TbMsgAttributesNodeConfiguration().defaultConfiguration().isUpdateAttributesOnlyOnValueChange()).isTrue();
}
|
public LightWeightCache(final int recommendedLength,
final int sizeLimit,
final long creationExpirationPeriod,
final long accessExpirationPeriod) {
this(recommendedLength, sizeLimit,
creationExpirationPeriod, accessExpirationPeriod, new Timer());
}
|
@Test
public void testLightWeightCache() {
// test randomized creation expiration with zero access expiration
{
final long creationExpiration = ran.nextInt(1024) + 1;
check(1, creationExpiration, 0L, 1 << 10, 65537);
check(17, creationExpiration, 0L, 1 << 16, 17);
check(255, creationExpiration, 0L, 1 << 16, 65537);
}
// test randomized creation/access expiration periods
for(int i = 0; i < 3; i++) {
final long creationExpiration = ran.nextInt(1024) + 1;
final long accessExpiration = ran.nextInt(1024) + 1;
check(1, creationExpiration, accessExpiration, 1 << 10, 65537);
check(17, creationExpiration, accessExpiration, 1 << 16, 17);
check(255, creationExpiration, accessExpiration, 1 << 16, 65537);
}
// test size limit
final int dataSize = 1 << 16;
for(int i = 0; i < 10; i++) {
final int modulus = ran.nextInt(1024) + 1;
final int sizeLimit = ran.nextInt(modulus) + 1;
checkSizeLimit(sizeLimit, dataSize, modulus);
}
}
|
@ScalarOperator(MODULUS)
@SqlType(StandardTypes.TINYINT)
public static long modulus(@SqlType(StandardTypes.TINYINT) long left, @SqlType(StandardTypes.TINYINT) long right)
{
try {
return left % right;
}
catch (ArithmeticException e) {
throw new PrestoException(DIVISION_BY_ZERO, e);
}
}
|
@Test
public void testModulus()
{
assertFunction("TINYINT'37' % TINYINT'37'", TINYINT, (byte) 0);
assertFunction("TINYINT'37' % TINYINT'17'", TINYINT, (byte) (37 % 17));
assertFunction("TINYINT'17' % TINYINT'37'", TINYINT, (byte) (17 % 37));
assertFunction("TINYINT'17' % TINYINT'17'", TINYINT, (byte) 0);
assertInvalidFunction("TINYINT'17' % TINYINT'0'", DIVISION_BY_ZERO);
}
|
public NamespaceBundle findBundle(TopicName topicName) {
checkArgument(nsname.equals(topicName.getNamespaceObject()));
return factory.getTopicBundleAssignmentStrategy().findBundle(topicName, this);
}
|
@Test
public void testFindBundle() throws Exception {
SortedSet<Long> partitions = new TreeSet<>();
partitions.add(0L);
partitions.add(0x40000000L);
partitions.add(0xa0000000L);
partitions.add(0xb0000000L);
partitions.add(0xc0000000L);
partitions.add(0xffffffffL);
NamespaceBundles bundles = new NamespaceBundles(NamespaceName.get("pulsar/global/ns1"),
factory, Optional.empty(), partitions);
TopicName topicName = TopicName.get("persistent://pulsar/global/ns1/topic-1");
NamespaceBundle bundle = bundles.findBundle(topicName);
assertTrue(bundle.includes(topicName));
topicName = TopicName.get("persistent://pulsar/use/ns2/topic-2");
try {
bundles.findBundle(topicName);
fail("Should have failed due to mismatched namespace name");
} catch (IllegalArgumentException iae) {
// OK, expected
}
Long hashKey = factory.getLongHashCode(topicName.toString());
// The following code guarantees that we have at least two ranges after the hashKey till the end
SortedSet<Long> tailSet = partitions.tailSet(hashKey);
tailSet.add(hashKey);
// Now, remove the first range to ensure the hashKey is not included in <code>newPar</code>
Iterator<Long> iter = tailSet.iterator();
iter.next();
SortedSet<Long> newPar = tailSet.tailSet(iter.next());
try {
bundles = new NamespaceBundles(topicName.getNamespaceObject(), factory, Optional.empty(), newPar);
bundles.findBundle(topicName);
fail("Should have failed due to out-of-range");
} catch (IndexOutOfBoundsException iae) {
// OK, expected
}
}
|
@Override
public void onCopied(Item src, Item item) {
// bug 5056825 - Display name field should be cleared when you copy a job within the same folder.
if (item instanceof AbstractItem && src.getParent() == item.getParent()) {
AbstractItem dest = (AbstractItem) item;
try {
dest.setDisplayName(null);
} catch (IOException ioe) {
LOGGER.log(Level.WARNING, String.format("onCopied():Exception while trying to clear the displayName for Item.name:%s", item.getName()), ioe);
}
}
}
|
@Test
public void testOnCopied() throws Exception {
DisplayNameListener listener = new DisplayNameListener();
StubJob src = new StubJob();
src.doSetName("src");
StubJob dest = new StubJob();
dest.doSetName("dest");
dest.setDisplayName("this should be cleared");
// make sure the displayname and the name are different at this point
assertNotEquals(dest.getName(), dest.getDisplayName());
listener.onCopied(src, dest);
// make sure the displayname is equals to the name as it should be null
assertEquals(dest.getName(), dest.getDisplayName());
}
|
public static <K, V> Cache<K, V> subCache(
Cache<?, ?> cache, Object keyPrefix, Object... additionalKeyPrefix) {
if (cache instanceof SubCache) {
return new SubCache<>(
((SubCache<?, ?>) cache).cache,
((SubCache<?, ?>) cache).keyPrefix.subKey(keyPrefix, additionalKeyPrefix),
((SubCache<?, ?>) cache).maxWeightInBytes,
((SubCache<?, ?>) cache).weightInBytes);
}
throw new IllegalArgumentException(
String.format(
"An unsupported type of cache was passed in. Received %s.",
cache == null ? "null" : cache.getClass()));
}
|
@Test
public void testSubCache() throws Exception {
testCache(Caches.subCache(Caches.eternal(), "prefix"));
}
|
public String get(final String key) {
List<KeyValue> keyValues = null;
try {
keyValues = client.getKVClient().get(bytesOf(key)).get().getKvs();
} catch (InterruptedException | ExecutionException e) {
LOG.error(e.getMessage(), e);
}
if (CollectionUtils.isEmpty(keyValues)) {
return null;
}
return keyValues.iterator().next().getValue().toString(UTF_8);
}
|
@Test
public void testGet() {
String result = etcdClient.get(GET_KEY);
assertEquals(VALUE, result);
}
|
public void removeFromFirst(final int toIndex) {
int alignedIndex = toIndex + this.firstOffset;
int toSegmentIndex = alignedIndex >> SEGMENT_SHIFT;
int toIndexInSeg = alignedIndex & (SEGMENT_SIZE - 1);
if (toSegmentIndex > 0) {
this.segments.removeRange(0, toSegmentIndex);
this.size -= ((toSegmentIndex << SEGMENT_SHIFT) - this.firstOffset);
}
Segment<T> firstSeg = this.getFirst();
if (firstSeg != null) {
this.size -= firstSeg.removeFromFirst(toIndexInSeg);
this.firstOffset = firstSeg.offset;
if (firstSeg.isEmpty()) {
RecycleUtil.recycle(this.segments.pollFirst());
this.firstOffset = 0;
}
} else {
this.firstOffset = this.size = 0;
}
}
|
@Test
public void testRemoveFromFirst() {
fillList();
int len = SegmentList.SEGMENT_SIZE - 1;
this.list.removeFromFirst(len);
assertEquals(1000 - len, this.list.size());
for (int i = 0; i < 1000 - len; i++) {
assertEquals(i + len, (int) this.list.get(i));
}
this.list.removeFromFirst(100);
assertEquals(1000 - len - 100, this.list.size());
for (int i = 0; i < 1000 - len - 100; i++) {
assertEquals(i + len + 100, (int) this.list.get(i));
}
this.list.removeFromFirst(1000 - len - 100);
assertTrue(this.list.isEmpty());
assertEquals(0, this.list.segmentSize());
assertNull(this.list.peekFirst());
assertNull(this.list.peekLast());
}
|
public void logPublicationRemoval(final String channel, final int sessionId, final int streamId)
{
final int length = SIZE_OF_INT * 3 + channel.length();
final int captureLength = captureLength(length);
final int encodedLength = encodedLength(captureLength);
final ManyToOneRingBuffer ringBuffer = this.ringBuffer;
final int index = ringBuffer.tryClaim(toEventCodeId(REMOVE_PUBLICATION_CLEANUP), encodedLength);
if (index > 0)
{
try
{
final UnsafeBuffer buffer = (UnsafeBuffer)ringBuffer.buffer();
encodePublicationRemoval(buffer, index, captureLength, length, channel, sessionId, streamId);
}
finally
{
ringBuffer.commit(index);
}
}
}
|
@Test
void logPublicationRemoval()
{
final int recordOffset = align(1111, ALIGNMENT);
logBuffer.putLong(CAPACITY + TAIL_POSITION_OFFSET, recordOffset);
final String uri = "uri";
final int sessionId = 42;
final int streamId = 19;
final int captureLength = uri.length() + SIZE_OF_INT * 3;
logger.logPublicationRemoval(uri, sessionId, streamId);
verifyLogHeader(
logBuffer, recordOffset, toEventCodeId(REMOVE_PUBLICATION_CLEANUP), captureLength, captureLength);
assertEquals(sessionId, logBuffer.getInt(encodedMsgOffset(recordOffset + LOG_HEADER_LENGTH), LITTLE_ENDIAN));
assertEquals(streamId,
logBuffer.getInt(encodedMsgOffset(recordOffset + LOG_HEADER_LENGTH + SIZE_OF_INT), LITTLE_ENDIAN));
assertEquals(uri,
logBuffer.getStringAscii(encodedMsgOffset(recordOffset + LOG_HEADER_LENGTH + SIZE_OF_INT * 2),
LITTLE_ENDIAN));
}
|
@Override
public LogicalSchema getSchema() {
return schema;
}
|
@Test
public void shouldBuildCorrectMultiArgAggregateSchema() {
// When:
final SchemaKStream<?> stream = buildQuery("SELECT col0, multi_arg(col0, col1, 20) FROM test1 "
+ "window TUMBLING (size 2 second) "
+ "WHERE col0 > 100 GROUP BY col0 EMIT CHANGES;");
// Then:
assertThat(stream.getSchema(), is(LogicalSchema.builder()
.keyColumn(ColumnName.of("COL0"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of("COL0"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of("KSQL_COL_0"), SqlTypes.BIGINT)
.build()
));
}
|
@Override
public KTable<K, V> reduce(final Reducer<V> reducer) {
return reduce(reducer, Materialized.with(keySerde, valueSerde));
}
|
@Test
public void shouldNotHaveNullReducerOnReduce() {
assertThrows(NullPointerException.class, () -> groupedStream.reduce(null));
}
|
static int assignActiveTaskMovements(final Map<TaskId, SortedSet<ProcessId>> tasksToCaughtUpClients,
final Map<TaskId, SortedSet<ProcessId>> tasksToClientByLag,
final Map<ProcessId, ClientState> clientStates,
final Map<ProcessId, Set<TaskId>> warmups,
final AtomicInteger remainingWarmupReplicas) {
final BiFunction<ProcessId, TaskId, Boolean> caughtUpPredicate =
(client, task) -> taskIsCaughtUpOnClient(task, client, tasksToCaughtUpClients);
final ConstrainedPrioritySet caughtUpClientsByTaskLoad = new ConstrainedPrioritySet(
caughtUpPredicate,
client -> clientStates.get(client).assignedTaskLoad()
);
final Queue<TaskMovement> taskMovements = new PriorityQueue<>(
Comparator.comparing(TaskMovement::numCaughtUpClients).thenComparing(TaskMovement::task)
);
for (final Map.Entry<ProcessId, ClientState> clientStateEntry : clientStates.entrySet()) {
final ProcessId client = clientStateEntry.getKey();
final ClientState state = clientStateEntry.getValue();
for (final TaskId task : state.activeTasks()) {
// if the desired client is not caught up, and there is another client that _is_ more caught up, then
// we schedule a movement, so we can move the active task to a more caught-up client. We'll try to
// assign a warm-up to the desired client so that we can move it later on.
if (taskIsNotCaughtUpOnClientAndOtherMoreCaughtUpClientsExist(task, client, clientStates, tasksToCaughtUpClients, tasksToClientByLag)) {
taskMovements.add(new TaskMovement(task, client, tasksToCaughtUpClients.get(task)));
}
}
caughtUpClientsByTaskLoad.offer(client);
}
final int movementsNeeded = taskMovements.size();
while (!taskMovements.isEmpty()) {
final TaskMovement movement = taskMovements.poll();
// Attempt to find a caught up standby, otherwise find any caught up client, failing that use the most
// caught up client.
final boolean moved = tryToSwapStandbyAndActiveOnCaughtUpClient(clientStates, caughtUpClientsByTaskLoad, movement) ||
tryToMoveActiveToCaughtUpClientAndTryToWarmUp(clientStates, warmups, remainingWarmupReplicas, caughtUpClientsByTaskLoad, movement) ||
tryToMoveActiveToMostCaughtUpClient(tasksToClientByLag, clientStates, warmups, remainingWarmupReplicas, caughtUpClientsByTaskLoad, movement);
if (!moved) {
throw new IllegalStateException("Tried to move task to more caught-up client as scheduled before but none exist");
}
}
return movementsNeeded;
}
|
@Test
public void shouldMoveTasksToMostCaughtUpClientsAndAssignWarmupReplicasInTheirPlace() {
final int maxWarmupReplicas = Integer.MAX_VALUE;
final Map<TaskId, Long> client1Lags = mkMap(mkEntry(TASK_0_0, 10000L), mkEntry(TASK_0_1, 20000L), mkEntry(TASK_0_2, 30000L));
final Map<TaskId, Long> client2Lags = mkMap(mkEntry(TASK_0_2, 10000L), mkEntry(TASK_0_0, 20000L), mkEntry(TASK_0_1, 30000L));
final Map<TaskId, Long> client3Lags = mkMap(mkEntry(TASK_0_1, 10000L), mkEntry(TASK_0_2, 20000L), mkEntry(TASK_0_0, 30000L));
final ClientState client1 = getClientStateWithLags(mkSet(TASK_0_0), client1Lags);
final ClientState client2 = getClientStateWithLags(mkSet(TASK_0_1), client2Lags);
final ClientState client3 = getClientStateWithLags(mkSet(TASK_0_2), client3Lags);
// To test when the task is already a standby on the most caught up node
client3.assignStandby(TASK_0_1);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(client1, client2, client3);
final Map<TaskId, SortedSet<ProcessId>> tasksToCaughtUpClients = mkMap(
mkEntry(TASK_0_0, mkSortedSet()),
mkEntry(TASK_0_1, mkSortedSet()),
mkEntry(TASK_0_2, mkSortedSet())
);
final Map<TaskId, SortedSet<ProcessId>> tasksToClientByLag = mkMap(
mkEntry(TASK_0_0, mkOrderedSet(PID_1, PID_2, PID_3)),
mkEntry(TASK_0_1, mkOrderedSet(PID_3, PID_1, PID_2)),
mkEntry(TASK_0_2, mkOrderedSet(PID_2, PID_3, PID_1))
);
assertThat(
"should have assigned movements",
assignActiveTaskMovements(
tasksToCaughtUpClients,
tasksToClientByLag,
clientStates,
new TreeMap<>(),
new AtomicInteger(maxWarmupReplicas)
),
is(2)
);
// The active tasks have changed to the ones that each client is most caught up on
assertThat(client1, hasProperty("activeTasks", ClientState::activeTasks, mkSet(TASK_0_0)));
assertThat(client2, hasProperty("activeTasks", ClientState::activeTasks, mkSet(TASK_0_2)));
assertThat(client3, hasProperty("activeTasks", ClientState::activeTasks, mkSet(TASK_0_1)));
// we assigned warmups to migrate to the input active assignment
assertThat(client1, hasProperty("standbyTasks", ClientState::standbyTasks, mkSet()));
assertThat(client2, hasProperty("standbyTasks", ClientState::standbyTasks, mkSet(TASK_0_1)));
assertThat(client3, hasProperty("standbyTasks", ClientState::standbyTasks, mkSet(TASK_0_2)));
}
|
public static Optional<JksOptions> buildJksKeyStoreOptions(
final Map<String, String> props,
final Optional<String> alias
) {
final String location = getKeyStoreLocation(props);
final String keyStorePassword = getKeyStorePassword(props);
final String keyPassword = getKeyPassword(props);
if (!Strings.isNullOrEmpty(location)) {
final JksOptions jksOptions;
if (alias.isPresent() && !alias.get().isEmpty()) {
jksOptions = buildJksOptions(
loadJksKeyStore(location, keyStorePassword, keyPassword, alias.get()),
keyStorePassword
);
} else {
jksOptions = buildJksOptions(location, keyStorePassword);
}
return Optional.of(jksOptions);
}
return Optional.empty();
}
|
@Test
public void shouldBuildKeyStoreJksOptionsWithPathAndPassword() {
// When
final Optional<JksOptions> jksOptions = VertxSslOptionsFactory.buildJksKeyStoreOptions(
ImmutableMap.of(
SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG,
"path",
SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG,
"password"
),
Optional.empty()
);
// Then
assertThat(jksOptions.get().getPath(), is("path"));
assertThat(jksOptions.get().getPassword(), is("password"));
}
|
public void getOptionHelp() {
String message = null;
DatabaseMeta database = new DatabaseMeta();
getInfo( database );
String url = database.getExtraOptionsHelpText();
if ( ( url == null ) || ( url.trim().length() == 0 ) ) {
message = Messages.getString( "DataHandler.USER_NO_HELP_AVAILABLE" );
showMessage( message, false );
return;
}
Status status = launch.openURL( url );
if ( status.equals( Status.Failed ) ) {
message = Messages.getString( "DataHandler.USER_UNABLE_TO_LAUNCH_BROWSER", url );
showMessage( message, false );
}
}
|
@Test( expected = RuntimeException.class )
public void testGetOptionHelpNoDatabase() throws Exception {
when( accessBox.getSelectedItem() ).thenReturn( "JNDI" );
when( connectionBox.getSelectedItem() ).thenReturn( "MyDB" );
dataHandler.getOptionHelp();
}
|
List<Endpoint> endpoints() {
try {
String urlString = String.format("%s/api/v1/namespaces/%s/pods", kubernetesMaster, namespace);
return enrichWithPublicAddresses(parsePodsList(callGet(urlString)));
} catch (RestClientException e) {
return handleKnownException(e);
}
}
|
@Test
public void endpointsByNamespaceWithMultipleNodePortPublicIpMatchByName() throws JsonProcessingException {
// given
stub(String.format("/api/v1/namespaces/%s/pods", NAMESPACE), podsListResponse());
stub(String.format("/api/v1/namespaces/%s/endpoints", NAMESPACE), endpointsListResponse());
stub(String.format("/api/v1/namespaces/%s/services/service-0", NAMESPACE), service(servicePort(0, 0, 0)));
stub(String.format("/api/v1/namespaces/%s/services/hazelcast-0", NAMESPACE),
service(servicePort(32123, 5701, 31916)));
stub(String.format("/api/v1/namespaces/%s/services/service-1", NAMESPACE),
service(servicePort(32124, 5701, 31917)));
stub("/api/v1/nodes/node-name-1", node("node-name-1", "10.240.0.21", "35.232.226.200"));
stub("/api/v1/nodes/node-name-2", node("node-name-2", "10.240.0.22", "35.232.226.201"));
stub(String.format("/api/v1/namespaces/%s/pods/hazelcast-0", NAMESPACE),
pod("hazelcast-0", NAMESPACE, "node-name-1", 5701));
stub(String.format("/api/v1/namespaces/%s/pods/hazelcast-1", NAMESPACE),
pod("hazelcast-1", NAMESPACE, "node-name-2", 5701));
// when
List<Endpoint> result = kubernetesClient.endpoints();
// then
assertThat(formatPrivate(result)).containsExactlyInAnyOrder(ready("192.168.0.25", 5701), ready("172.17.0.5", 5702));
assertThat(formatPublic(result)).containsExactlyInAnyOrder(ready("35.232.226.200", 31916), ready("35.232.226.201", 31917));
}
|
@SuppressWarnings("unchecked")
public static <T> T[] realloc(Class<T> klass, T[] src, int size, boolean ended)
{
T[] dest;
if (size > src.length) {
dest = (T[]) Array.newInstance(klass, size);
if (ended) {
System.arraycopy(src, 0, dest, 0, src.length);
}
else {
System.arraycopy(src, 0, dest, size - src.length, src.length);
}
}
else if (size < src.length) {
dest = (T[]) Array.newInstance(klass, size);
if (ended) {
System.arraycopy(src, src.length - size, dest, 0, size);
}
else {
System.arraycopy(src, 0, dest, 0, size);
}
}
else {
dest = src;
}
return dest;
}
|
@Test
public void testRealloc()
{
Integer[] src = new Integer[] { 1, 3, 5 };
Integer[] dest = Utils.realloc(Integer.class, src, 3, true);
assertThat(src.length, is(3));
assertThat(src, is(dest));
dest = Utils.realloc(Integer.class, src, 5, true);
assertThat(dest.length, is(5));
assertThat(dest[0], is(1));
assertThat(dest[1], is(3));
assertThat(dest[2], is(5));
assertThat(dest[4], nullValue());
dest = Utils.realloc(Integer.class, src, 6, false);
assertThat(dest.length, is(6));
assertThat(dest[0], nullValue());
assertThat(dest[1], nullValue());
assertThat(dest[2], nullValue());
assertThat(dest[3], is(1));
assertThat(dest[4], is(3));
assertThat(dest[5], is(5));
src = new Integer[] { 1, 3, 5, 7, 9, 11 };
dest = Utils.realloc(Integer.class, src, 4, false);
assertThat(dest.length, is(4));
assertThat(dest[0], is(1));
assertThat(dest[1], is(3));
assertThat(dest[2], is(5));
assertThat(dest[3], is(7));
dest = Utils.realloc(Integer.class, src, 3, true);
assertThat(dest.length, is(3));
assertThat(dest[0], is(7));
assertThat(dest[1], is(9));
assertThat(dest[2], is(11));
}
|
@Override
public RLock writeLock() {
return new RedissonWriteLock(commandExecutor, getName());
}
|
@Test
public void testExpireWrite() throws InterruptedException {
RReadWriteLock lock = redisson.getReadWriteLock("lock");
lock.writeLock().lock(2, TimeUnit.SECONDS);
final long startTime = System.currentTimeMillis();
Thread t = new Thread() {
public void run() {
RReadWriteLock lock1 = redisson.getReadWriteLock("lock");
lock1.writeLock().lock();
long spendTime = System.currentTimeMillis() - startTime;
Assertions.assertTrue(spendTime < 2050);
lock1.writeLock().unlock();
};
};
t.start();
t.join();
lock.writeLock().unlock();
}
|
public CreateTableCommand createTableCommand(
final KsqlStructuredDataOutputNode outputNode,
final Optional<RefinementInfo> emitStrategy
) {
Optional<WindowInfo> windowInfo =
outputNode.getKsqlTopic().getKeyFormat().getWindowInfo();
if (windowInfo.isPresent() && emitStrategy.isPresent()) {
final WindowInfo info = windowInfo.get();
windowInfo = Optional.of(WindowInfo.of(
info.getType(),
info.getSize(),
Optional.of(emitStrategy.get().getOutputRefinement())
));
}
return new CreateTableCommand(
outputNode.getSinkName().get(),
outputNode.getSchema(),
outputNode.getTimestampColumn(),
outputNode.getKsqlTopic().getKafkaTopicName(),
Formats.from(outputNode.getKsqlTopic()),
windowInfo,
Optional.of(outputNode.getOrReplace()),
Optional.of(false)
);
}
|
@Test
public void shouldThrowIfTableExists() {
//Given
final CreateTable ddlStatement = new CreateTable(TABLE_NAME,
TableElements.of(
tableElement("COL1", new Type(BIGINT), PRIMARY_KEY_CONSTRAINT),
tableElement("COL2", new Type(SqlTypes.STRING))),
false, false, withProperties, false);
// When:
final Exception e = assertThrows(
KsqlException.class, () -> createSourceFactory
.createTableCommand(ddlStatement, ksqlConfig));
// Then:
assertThat(e.getMessage(),
containsString("Cannot add table 'table_bob': A table with the same name already exists"));
}
|
@Override
public boolean next() throws SQLException {
return mergedResult.next();
}
|
@Test
void assertNext() throws SQLException {
assertFalse(new MaskMergedResult(mock(MaskRule.class), mock(SelectStatementContext.class), mergedResult).next());
}
|
public InputStreamWrapper getInputStreamForItem(UUID jobId, DownloadableItem item)
throws IOException {
String fetchableUrl = item.getFetchableUrl();
if (item.isInTempStore()) {
return jobStore.getStream(jobId, fetchableUrl);
}
HttpURLConnection conn = getConnection(fetchableUrl);
return new InputStreamWrapper(
conn.getInputStream(), Math.max(conn.getContentLengthLong(), 0));
}
|
@Test
public void getInputStreamFromTempStore() throws Exception {
long expectedBytes = 323;
when(jobStore.getStream(any(), anyString())).thenReturn(
new InputStreamWrapper(null, expectedBytes));
boolean inTempStore = true;
String fetchableUrl = "https://example.com";
DownloadableItem item = new PhotoModel("title", fetchableUrl, "description", "jpeg",
"123", "album", inTempStore);
UUID jobId = UUID.randomUUID();
InputStreamWrapper streamWrapper = connectionProvider.getInputStreamForItem(
jobId, item);
Truth.assertThat(streamWrapper.getBytes()).isEqualTo(expectedBytes);
verify(jobStore).getStream(eq(jobId), eq(fetchableUrl));
}
|
@Override
public JFieldVar apply(String nodeName, JsonNode node, JsonNode parent, JFieldVar field, Schema currentSchema) {
if (ruleFactory.getGenerationConfig().isIncludeJsr303Annotations()
&& (node.has("minLength") || node.has("maxLength"))
&& isApplicableType(field)) {
final Class<? extends Annotation> sizeClass
= ruleFactory.getGenerationConfig().isUseJakartaValidation()
? Size.class
: javax.validation.constraints.Size.class;
JAnnotationUse annotation = field.annotate(sizeClass);
if (node.has("minLength")) {
annotation.param("min", node.get("minLength").asInt());
}
if (node.has("maxLength")) {
annotation.param("max", node.get("maxLength").asInt());
}
}
return field;
}
|
@Test
public void testMaxLength() {
when(config.isIncludeJsr303Annotations()).thenReturn(true);
final int maxValue = new Random().nextInt();
when(subNode.asInt()).thenReturn(maxValue);
when(node.get("maxLength")).thenReturn(subNode);
when(fieldVar.annotate(sizeClass)).thenReturn(annotation);
when(node.has("maxLength")).thenReturn(true);
when(fieldVar.type().boxify().fullName()).thenReturn(fieldClass.getTypeName());
JFieldVar result = rule.apply("node", node, null, fieldVar, null);
assertSame(fieldVar, result);
verify(fieldVar, times(isApplicable ? 1 : 0)).annotate(sizeClass);
verify(annotation, times(isApplicable ? 1 : 0)).param("max", maxValue);
verify(annotation, never()).param(eq("min"), anyInt());
}
|
@Override
public void onRemovedJobGraph(JobID jobId) {
runIfStateIs(State.RUNNING, () -> handleRemovedJobGraph(jobId));
}
|
@Test
void onRemovedJobGraph_failingRemovalCall_failsFatally() throws Exception {
final FlinkException testException = new FlinkException("Test exception");
final TestingDispatcherGatewayService testingDispatcherService =
TestingDispatcherGatewayService.newBuilder()
.setOnRemovedJobGraphFunction(
jobID -> FutureUtils.completedExceptionally(testException))
.build();
dispatcherServiceFactory =
createFactoryBasedOnGenericSupplier(() -> testingDispatcherService);
try (final SessionDispatcherLeaderProcess dispatcherLeaderProcess =
createDispatcherLeaderProcess()) {
dispatcherLeaderProcess.start();
// wait for the dispatcher process to be created
dispatcherLeaderProcess.getDispatcherGateway().get();
// now notify the dispatcher service
dispatcherLeaderProcess.onRemovedJobGraph(JOB_GRAPH.getJobID());
final Throwable fatalError = fatalErrorHandler.getErrorFuture().join();
assertThat(fatalError).hasCause(testException);
fatalErrorHandler.clearError();
}
}
|
public Optional<ScimGroupDto> findByScimUuid(DbSession dbSession, String scimGroupUuid) {
return Optional.ofNullable(mapper(dbSession).findByScimUuid(scimGroupUuid));
}
|
@Test
void findByScimUuid_whenScimUuidNotFound_shouldReturnEmptyOptional() {
assertThat(scimGroupDao.findByScimUuid(db.getSession(), "unknownId")).isEmpty();
}
|
@Override
public PollResult poll(long currentTimeMs) {
return pollInternal(
prepareFetchRequests(),
this::handleFetchSuccess,
this::handleFetchFailure
);
}
|
@Test
public void testEmptyControlBatch() {
buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(),
new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
int currentOffset = 1;
// Empty control batch should not cause an exception
DefaultRecordBatch.writeEmptyHeader(buffer, RecordBatch.MAGIC_VALUE_V2, 1L,
(short) 0, -1, 0, 0,
RecordBatch.NO_PARTITION_LEADER_EPOCH, TimestampType.CREATE_TIME, time.milliseconds(),
true, true);
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset,
new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()),
new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()));
commitTransaction(buffer, 1L, currentOffset);
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(body -> {
FetchRequest request = (FetchRequest) body;
assertEquals(IsolationLevel.READ_COMMITTED, request.isolationLevel());
return true;
}, fullFetchResponseWithAbortedTransactions(records, Collections.emptyList(), Errors.NONE, 100L, 100L, 0));
networkClientDelegate.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchRecords();
assertTrue(fetchedRecords.containsKey(tp0));
assertEquals(fetchedRecords.get(tp0).size(), 2);
}
|
@Around("dataPermissionCut()")
public Object around(final ProceedingJoinPoint point) {
// CHECKSTYLE:OFF
try {
return point.proceed(getFilterSQLData(point));
} catch (Throwable throwable) {
throw new ShenyuException(throwable);
}
// CHECKSTYLE:ON
}
|
@Test
public void testAround() {
ProceedingJoinPoint point = mock(ProceedingJoinPoint.class);
boolean thrown = false;
try {
dataPermissionAspect.around(point);
} catch (ShenyuException e) {
thrown = true;
}
assertTrue(thrown);
}
|
static int readDirectBuffer(InputStream f, ByteBuffer buf, byte[] temp) throws IOException {
// copy all the bytes that return immediately, stopping at the first
// read that doesn't return a full buffer.
int nextReadLength = Math.min(buf.remaining(), temp.length);
int totalBytesRead = 0;
int bytesRead;
while ((bytesRead = f.read(temp, 0, nextReadLength)) == temp.length) {
buf.put(temp);
totalBytesRead += bytesRead;
nextReadLength = Math.min(buf.remaining(), temp.length);
}
if (bytesRead < 0) {
// return -1 if nothing was read
return totalBytesRead == 0 ? -1 : totalBytesRead;
} else {
// copy the last partial buffer
buf.put(temp, 0, bytesRead);
totalBytesRead += bytesRead;
return totalBytesRead;
}
}
|
@Test
public void testDirectLimit() throws Exception {
ByteBuffer readBuffer = ByteBuffer.allocate(20);
readBuffer.limit(8);
MockInputStream stream = new MockInputStream(7);
int len = DelegatingSeekableInputStream.readDirectBuffer(stream, readBuffer, TEMP.get());
Assert.assertEquals(7, len);
Assert.assertEquals(7, readBuffer.position());
Assert.assertEquals(8, readBuffer.limit());
len = DelegatingSeekableInputStream.readDirectBuffer(stream, readBuffer, TEMP.get());
Assert.assertEquals(1, len);
Assert.assertEquals(8, readBuffer.position());
Assert.assertEquals(8, readBuffer.limit());
len = DelegatingSeekableInputStream.readDirectBuffer(stream, readBuffer, TEMP.get());
Assert.assertEquals(0, len);
readBuffer.flip();
Assert.assertEquals("Buffer contents should match", ByteBuffer.wrap(TEST_ARRAY, 0, 8), readBuffer);
}
|
public boolean usable() {
if (path.toFile().exists()) {
return updateValues(time.milliseconds());
} else {
LOG.debug("Disabling IO metrics collection because {} does not exist.", path);
return false;
}
}
|
@Test
public void testUnableToReadNonexistentProcFile() throws IOException {
TestDirectory testDirectory = new TestDirectory();
Time time = new MockTime(0L, 100L, 1000L);
LinuxIoMetricsCollector collector = new LinuxIoMetricsCollector(testDirectory.baseDir.getAbsolutePath(), time);
// Test that we can't read the file, since it hasn't been written.
assertFalse(collector.usable());
}
|
public void writeEndpointCertificateMetadata(ApplicationId application, EndpointCertificateMetadata endpointCertificateMetadata) {
try {
Slime slime = new Slime();
EndpointCertificateMetadataSerializer.toSlime(endpointCertificateMetadata, slime.setObject());
curator.set(endpointCertificateMetadataPathOf(application), SlimeUtils.toJsonBytes(slime));
} catch (Exception e) {
throw new RuntimeException("Could not write endpoint certificate metadata for " + application, e);
}
}
|
@Test
public void can_write_object_format() {
var endpointCertificateMetadata = new EndpointCertificateMetadata("key-name", "cert-name", 1, digicert);
endpointCertificateMetadataStore.writeEndpointCertificateMetadata(applicationId, endpointCertificateMetadata);
assertEquals("{\"keyName\":\"key-name\",\"certName\":\"cert-name\",\"version\":1,\"issuer\":\"digicert\"}",
new String(curator.getData(endpointCertificateMetadataPath).orElseThrow()));
}
|
@Override
public byte[] fromConnectData(String topic, Schema schema, Object value) {
if (schema == null && value == null) {
return null;
}
JsonNode jsonValue = config.schemasEnabled() ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value);
try {
return serializer.serialize(topic, jsonValue);
} catch (SerializationException e) {
throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e);
}
}
|
@Test
public void bytesToJson() throws IOException {
JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.BYTES_SCHEMA, "test-string".getBytes()));
validateEnvelope(converted);
assertEquals(parse("{ \"type\": \"bytes\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME));
assertEquals(ByteBuffer.wrap("test-string".getBytes()),
ByteBuffer.wrap(converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).binaryValue()));
}
|
@Override
public boolean isRegistered(JobID jobId) {
return jobManagerRunners.containsKey(jobId);
}
|
@Test
void testIsRegistered() {
final JobID jobId = new JobID();
testInstance.register(TestingJobManagerRunner.newBuilder().setJobId(jobId).build());
assertThat(testInstance.isRegistered(jobId)).isTrue();
}
|
public static String uncompress(byte[] compressedURL) {
StringBuffer url = new StringBuffer();
switch (compressedURL[0] & 0x0f) {
case EDDYSTONE_URL_PROTOCOL_HTTP_WWW:
url.append(URL_PROTOCOL_HTTP_WWW_DOT);
break;
case EDDYSTONE_URL_PROTOCOL_HTTPS_WWW:
url.append(URL_PROTOCOL_HTTPS_WWW_DOT);
break;
case EDDYSTONE_URL_PROTOCOL_HTTP:
url.append(URL_PROTOCOL_HTTP_COLON_SLASH_SLASH);
break;
case EDDYSTONE_URL_PROTOCOL_HTTPS:
url.append(URL_PROTOCOL_HTTPS_COLON_SLASH_SLASH);
break;
default:
break;
}
byte lastByte = -1;
for (int i = 1; i < compressedURL.length; i++) {
byte b = compressedURL[i];
if (lastByte == 0 && b == 0 ) {
break;
}
lastByte = b;
String tld = topLevelDomainForByte(b);
if (tld != null) {
url.append(tld);
}
else {
url.append((char) b);
}
}
return url.toString();
}
|
@Test
public void testUncompressHttpsURLWithTrailingSlash() {
String testURL = "https://www.radiusnetworks.com/";
byte[] testBytes = {0x01, 'r', 'a', 'd', 'i', 'u', 's', 'n', 'e', 't', 'w', 'o', 'r', 'k', 's', 0x00};
assertEquals(testURL, UrlBeaconUrlCompressor.uncompress(testBytes));
}
|
@RequiresApi(Build.VERSION_CODES.R)
@Override
public boolean onInlineSuggestionsResponse(@NonNull InlineSuggestionsResponse response) {
final List<InlineSuggestion> inlineSuggestions = response.getInlineSuggestions();
if (inlineSuggestions.size() > 0) {
mInlineSuggestionAction.onNewSuggestions(inlineSuggestions);
getInputViewContainer().addStripAction(mInlineSuggestionAction, true);
getInputViewContainer().setActionsStripVisibility(true);
}
return !inlineSuggestions.isEmpty();
}
|
@Test
public void testActionStripAddedForAi() {
simulateOnStartInputFlow();
mAnySoftKeyboardUnderTest.onInlineSuggestionsResponse(
mockResponse(new String[] {"aiai", "newFeature"}, Mockito.mock(InlineContentView.class)));
ImageView icon =
mAnySoftKeyboardUnderTest
.getInputViewContainer()
.findViewById(R.id.inline_suggestions_strip_icon);
Assert.assertEquals(
R.drawable.ic_inline_suggestions_ai,
Shadows.shadowOf(icon.getDrawable()).getCreatedFromResId());
}
|
@Override
public URL getLocalArtifactUrl(DependencyJar dependency) {
return delegate.getLocalArtifactUrl(dependency);
}
|
@Test
public void whenRobolectricDepsPropertiesPropertyAndOfflineProperty() throws Exception {
Path depsPath =
tempDirectory.createFile(
"deps.properties", "org.robolectric\\:android-all\\:" + VERSION + ": file-123.jar");
Path jarPath = tempDirectory.createFile("file-123.jar", "...");
properties.setProperty("robolectric-deps.properties", depsPath.toString());
properties.setProperty("robolectric.offline", "true");
DependencyResolver resolver = new LegacyDependencyResolver(properties, mockClassLoader);
URL jarUrl = resolver.getLocalArtifactUrl(DEPENDENCY_COORDS);
assertThat(Fs.fromUrl(jarUrl)).isEqualTo(jarPath);
}
|
@Override
public void updateDiyTemplate(DiyTemplateUpdateReqVO updateReqVO) {
// 校验存在
validateDiyTemplateExists(updateReqVO.getId());
// 校验名称唯一
validateNameUnique(updateReqVO.getId(), updateReqVO.getName());
// 更新
DiyTemplateDO updateObj = DiyTemplateConvert.INSTANCE.convert(updateReqVO);
diyTemplateMapper.updateById(updateObj);
}
|
@Test
public void testUpdateDiyTemplate_notExists() {
// 准备参数
DiyTemplateUpdateReqVO reqVO = randomPojo(DiyTemplateUpdateReqVO.class);
// 调用, 并断言异常
assertServiceException(() -> diyTemplateService.updateDiyTemplate(reqVO), DIY_TEMPLATE_NOT_EXISTS);
}
|
void pushId(int id) {
int bucketIdx = id / BUCKET_SIZE;
if (bucketIdx >= idBuckets.length) {
throw new IllegalArgumentException("id too large: " + id);
}
DnsQueryIdRange bucket = idBuckets[bucketIdx];
assert bucket != null;
bucket.pushId(id);
if (bucket.usableIds() == bucket.maxUsableIds()) {
// All ids are usable in this bucket. Let's check if there are other buckets left that have still
// some space left and if so drop this bucket.
for (int idx = 0; idx < idBuckets.length; idx++) {
if (idx != bucketIdx) {
DnsQueryIdRange otherBucket = idBuckets[idx];
if (otherBucket != null && otherBucket.usableIds() > BUCKET_DROP_THRESHOLD) {
// Drop bucket on the floor to reduce memory usage, there is another bucket left we can
// use that still has enough ids to use.
idBuckets[bucketIdx] = null;
return;
}
}
}
}
}
|
@Test
public void testOverflow() {
final DnsQueryIdSpace ids = new DnsQueryIdSpace();
assertThrows(IllegalStateException.class, new Executable() {
@Override
public void execute() {
ids.pushId(1);
}
});
}
|
@Override
public BasicTypeDefine reconvert(Column column) {
BasicTypeDefine.BasicTypeDefineBuilder builder =
BasicTypeDefine.builder()
.name(column.getName())
.nullable(column.isNullable())
.comment(column.getComment())
.defaultValue(column.getDefaultValue());
switch (column.getDataType().getSqlType()) {
case BOOLEAN:
builder.columnType(SQLSERVER_BIT);
builder.dataType(SQLSERVER_BIT);
break;
case TINYINT:
builder.columnType(SQLSERVER_TINYINT);
builder.dataType(SQLSERVER_TINYINT);
break;
case SMALLINT:
builder.columnType(SQLSERVER_SMALLINT);
builder.dataType(SQLSERVER_SMALLINT);
break;
case INT:
builder.columnType(SQLSERVER_INT);
builder.dataType(SQLSERVER_INT);
break;
case BIGINT:
builder.columnType(SQLSERVER_BIGINT);
builder.dataType(SQLSERVER_BIGINT);
break;
case FLOAT:
builder.columnType(SQLSERVER_REAL);
builder.dataType(SQLSERVER_REAL);
break;
case DOUBLE:
builder.columnType(SQLSERVER_FLOAT);
builder.dataType(SQLSERVER_FLOAT);
break;
case DECIMAL:
DecimalType decimalType = (DecimalType) column.getDataType();
long precision = decimalType.getPrecision();
int scale = decimalType.getScale();
if (precision <= 0) {
precision = DEFAULT_PRECISION;
scale = DEFAULT_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is precision less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (precision > MAX_PRECISION) {
scale = (int) Math.max(0, scale - (precision - MAX_PRECISION));
precision = MAX_PRECISION;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum precision of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_PRECISION,
precision,
scale);
}
if (scale < 0) {
scale = 0;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is scale less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (scale > MAX_SCALE) {
scale = MAX_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_SCALE,
precision,
scale);
}
builder.columnType(String.format("%s(%s,%s)", SQLSERVER_DECIMAL, precision, scale));
builder.dataType(SQLSERVER_DECIMAL);
builder.precision(precision);
builder.scale(scale);
break;
case STRING:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(MAX_NVARCHAR);
builder.dataType(MAX_NVARCHAR);
} else if (column.getColumnLength() <= MAX_NVARCHAR_LENGTH) {
builder.columnType(
String.format("%s(%s)", SQLSERVER_NVARCHAR, column.getColumnLength()));
builder.dataType(SQLSERVER_NVARCHAR);
builder.length(column.getColumnLength());
} else {
builder.columnType(MAX_NVARCHAR);
builder.dataType(MAX_NVARCHAR);
builder.length(column.getColumnLength());
}
break;
case BYTES:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(MAX_VARBINARY);
builder.dataType(SQLSERVER_VARBINARY);
} else if (column.getColumnLength() <= MAX_BINARY_LENGTH) {
builder.columnType(
String.format("%s(%s)", SQLSERVER_VARBINARY, column.getColumnLength()));
builder.dataType(SQLSERVER_VARBINARY);
builder.length(column.getColumnLength());
} else {
builder.columnType(MAX_VARBINARY);
builder.dataType(SQLSERVER_VARBINARY);
builder.length(column.getColumnLength());
}
break;
case DATE:
builder.columnType(SQLSERVER_DATE);
builder.dataType(SQLSERVER_DATE);
break;
case TIME:
if (column.getScale() != null && column.getScale() > 0) {
int timeScale = column.getScale();
if (timeScale > MAX_TIME_SCALE) {
timeScale = MAX_TIME_SCALE;
log.warn(
"The time column {} type time({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to time({})",
column.getName(),
column.getScale(),
MAX_SCALE,
timeScale);
}
builder.columnType(String.format("%s(%s)", SQLSERVER_TIME, timeScale));
builder.scale(timeScale);
} else {
builder.columnType(SQLSERVER_TIME);
}
builder.dataType(SQLSERVER_TIME);
break;
case TIMESTAMP:
if (column.getScale() != null && column.getScale() > 0) {
int timestampScale = column.getScale();
if (timestampScale > MAX_TIMESTAMP_SCALE) {
timestampScale = MAX_TIMESTAMP_SCALE;
log.warn(
"The timestamp column {} type timestamp({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to timestamp({})",
column.getName(),
column.getScale(),
MAX_TIMESTAMP_SCALE,
timestampScale);
}
builder.columnType(
String.format("%s(%s)", SQLSERVER_DATETIME2, timestampScale));
builder.scale(timestampScale);
} else {
builder.columnType(SQLSERVER_DATETIME2);
}
builder.dataType(SQLSERVER_DATETIME2);
break;
default:
throw CommonError.convertToConnectorTypeError(
DatabaseIdentifier.SQLSERVER,
column.getDataType().getSqlType().name(),
column.getName());
}
return builder.build();
}
|
@Test
public void testReconvertUnsupported() {
Column column =
PhysicalColumn.of(
"test",
new MapType<>(BasicType.STRING_TYPE, BasicType.STRING_TYPE),
(Long) null,
true,
null,
null);
try {
SqlServerTypeConverter.INSTANCE.reconvert(column);
Assertions.fail();
} catch (SeaTunnelRuntimeException e) {
// ignore
} catch (Throwable e) {
Assertions.fail();
}
}
|
@Override
public Map<K, V> getCachedMap() {
return localCacheView.getCachedMap();
}
|
@Test
public void testReplaceOldValueFail() {
RLocalCachedMap<SimpleKey, SimpleValue> map = redisson.getLocalCachedMap(LocalCachedMapOptions.name("test"));
Map<SimpleKey, SimpleValue> cache = map.getCachedMap();
map.put(new SimpleKey("1"), new SimpleValue("2"));
boolean res = map.replace(new SimpleKey("1"), new SimpleValue("43"), new SimpleValue("31"));
Assertions.assertFalse(res);
SimpleValue val1 = map.get(new SimpleKey("1"));
Assertions.assertEquals("2", val1.getValue());
assertThat(cache.size()).isEqualTo(1);
}
|
public String convert(ILoggingEvent event) {
StringBuilder sb = new StringBuilder();
int pri = facility + LevelToSyslogSeverity.convert(event);
sb.append("<");
sb.append(pri);
sb.append(">");
sb.append(computeTimeStampString(event.getTimeStamp()));
sb.append(' ');
sb.append(localHostName);
sb.append(' ');
return sb.toString();
}
|
@Test
public void datesLessThanTen() {
// RFC 3164, section 4.1.2:
// If the day of the month is less than 10, then it MUST be represented as
// a space and then the number. For example, the 7th day of August would be
// represented as "Aug 7", with two spaces between the "g" and the "7".
LoggingEvent le = createLoggingEvent();
calendar.set(2012, Calendar.AUGUST, 7, 13, 15, 0);
le.setTimeStamp(calendar.getTimeInMillis());
assertEquals("<191>Aug 7 13:15:00 " + HOSTNAME + " ", converter.convert(le));
}
|
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + urn.hashCode();
return result;
}
|
@Test
public void hashcode_sameURN() {
ScheduledTaskHandler handler = ScheduledTaskHandlerImpl.of(1, "MyScheduler", "MyTask");
String myTaskURN = handler.toUrn();
ScheduledExecutorWaitNotifyKey keyA = new ScheduledExecutorWaitNotifyKey("myScheduler", myTaskURN);
ScheduledExecutorWaitNotifyKey keyB = new ScheduledExecutorWaitNotifyKey("myScheduler", myTaskURN);
assertEquals(keyA.hashCode(), keyB.hashCode());
}
|
@Override
public Set<MappedFieldTypeDTO> fieldTypesByStreamIds(Collection<String> streamIds, TimeRange timeRange) {
final Set<String> indexSets = streamService.indexSetIdsByIds(streamIds);
final Set<String> indexNames = this.indexLookup.indexNamesForStreamsInTimeRange(ImmutableSet.copyOf(streamIds), timeRange);
final Set<FieldTypeDTO> fieldTypeDTOs = this.indexFieldTypesService.findForIndexSets(indexSets)
.stream()
.filter(fieldTypes -> indexNames.contains(fieldTypes.indexName()))
.flatMap(fieldTypes -> fieldTypes.fields().stream())
.filter(fieldTypeDTO -> !streamAwareFieldTypes || !Collections.disjoint(fieldTypeDTO.streams(), streamIds))
.collect(Collectors.toSet());
return mergeCompoundFieldTypes(fieldTypeDTOs.stream()
.map(this::mapPhysicalFieldType));
}
|
@Test
public void testDifferenceBetweenStreamAwareAndUnawareFieldTypeRetrieval() {
final Configuration withStreamAwarenessOn = spy(new Configuration());
doReturn(true).when(withStreamAwarenessOn).maintainsStreamAwareFieldTypes();
MappedFieldTypesServiceImpl streamAwareMappedFieldTypesService = new MappedFieldTypesServiceImpl(withStreamAwarenessOn, streamService, indexFieldTypesService, new FieldTypeMapper(), indexLookup, fieldUnitObtainer);
final List<IndexFieldTypesDTO> fieldTypes = ImmutableList.of(
createIndexTypes(
"deadbeef",
"testIndex",
FieldTypeDTO.builder().fieldName("field1").physicalType("keyword").streams(Set.of("stream1")).build(),
FieldTypeDTO.builder().fieldName("field3").physicalType("keyword").streams(Set.of("stream1")).build()
),
createIndexTypes(
"affeaffe",
"testIndex2",
FieldTypeDTO.builder().fieldName("field1").physicalType("keyword").streams(Set.of("stream1", "stream2")).build(),
FieldTypeDTO.builder().fieldName("field2").physicalType("keyword").streams(Set.of("stream2")).build(),
FieldTypeDTO.builder().fieldName("field4").physicalType("keyword").streams(Set.of("stream1")).build()
)
);
when(indexFieldTypesService.findForIndexSets(Collections.singleton("indexSetId"))).thenReturn(fieldTypes);
when(indexLookup.indexNamesForStreamsInTimeRange(Collections.singleton("stream1"), RelativeRange.allTime())).thenReturn(ImmutableSet.of("testIndex", "testIndex2"));
when(indexLookup.indexNamesForStreamsInTimeRange(Collections.singleton("stream2"), RelativeRange.allTime())).thenReturn(ImmutableSet.of("testIndex2"));
Set<MappedFieldTypeDTO> result = this.mappedFieldTypesService.fieldTypesByStreamIds(Collections.singleton("stream1"), RelativeRange.allTime());
//All fields are expected
assertThat(result).containsExactlyInAnyOrder(
MappedFieldTypeDTO.create("field1", FieldTypes.Type.createType("string", ImmutableSet.of("enumerable"))),
MappedFieldTypeDTO.create("field2", FieldTypes.Type.createType("string", ImmutableSet.of("enumerable"))),
MappedFieldTypeDTO.create("field3", FieldTypes.Type.createType("string", ImmutableSet.of("enumerable"))),
MappedFieldTypeDTO.create("field4", FieldTypes.Type.createType("string", ImmutableSet.of("enumerable")))
);
result = streamAwareMappedFieldTypesService.fieldTypesByStreamIds(Collections.singleton("stream1"), RelativeRange.allTime());
//Stream-aware approach excludes field2, as it is present in stream2 only
assertThat(result).containsExactlyInAnyOrder(
MappedFieldTypeDTO.create("field1", FieldTypes.Type.createType("string", ImmutableSet.of("enumerable"))),
MappedFieldTypeDTO.create("field3", FieldTypes.Type.createType("string", ImmutableSet.of("enumerable"))),
MappedFieldTypeDTO.create("field4", FieldTypes.Type.createType("string", ImmutableSet.of("enumerable")))
);
result = this.mappedFieldTypesService.fieldTypesByStreamIds(Collections.singleton("stream2"), RelativeRange.allTime());
//Field3 is excludes, as it is present only in "testIndex", ignored during indexLookup phase
assertThat(result).containsExactlyInAnyOrder(
MappedFieldTypeDTO.create("field1", FieldTypes.Type.createType("string", ImmutableSet.of("enumerable"))),
MappedFieldTypeDTO.create("field2", FieldTypes.Type.createType("string", ImmutableSet.of("enumerable"))),
MappedFieldTypeDTO.create("field4", FieldTypes.Type.createType("string", ImmutableSet.of("enumerable")))
);
//Stream-aware approach excludes, additionally to field3, field4 as well, as it is present in stream1 only
result = streamAwareMappedFieldTypesService.fieldTypesByStreamIds(Collections.singleton("stream2"), RelativeRange.allTime());
assertThat(result).containsExactlyInAnyOrder(
MappedFieldTypeDTO.create("field1", FieldTypes.Type.createType("string", ImmutableSet.of("enumerable"))),
MappedFieldTypeDTO.create("field2", FieldTypes.Type.createType("string", ImmutableSet.of("enumerable")))
);
}
|
@Override
public long getLargestWorkerCount() {
return asyncExecutionMonitoring.getLargestWorkerCount();
}
|
@Test
public void getLargestWorkerCount_delegates_to_AsyncExecutionMonitoring() {
when(asyncExecutionMonitoring.getLargestWorkerCount()).thenReturn(12);
assertThat(underTest.getLargestWorkerCount()).isEqualTo(12);
verify(asyncExecutionMonitoring).getLargestWorkerCount();
}
|
@Udf(description = "When transforming a map, "
+ "two functions must be provided. "
+ "For each map entry, the first function provided will "
+ "be applied to the key and the second one applied to the value. "
+ "Each function must have two arguments. "
+ "The two arguments for each function are in order: the key and then the value. "
+ "The transformed map is returned."
)
public <K,V,R,T> Map<R,T> transformMap(
@UdfParameter(description = "The map") final Map<K, V> map,
@UdfParameter(description = "The key lambda function") final BiFunction<K, V, R> biFunction1,
@UdfParameter(description = "The value lambda function") final BiFunction<K, V, T> biFunction2
) {
if (map == null || biFunction1 == null || biFunction2 == null) {
return null;
}
return map.entrySet()
.stream()
.collect(Collectors.toMap(
entry -> biFunction1.apply(entry.getKey(), entry.getValue()),
entry -> biFunction2.apply(entry.getKey(), entry.getValue())));
}
|
@Test
public void shouldReturnTransformedMap() {
final Map<Integer, Integer> map1 = new HashMap<>();
assertThat(udf.transformMap(map1, biFunction1(), biFunction2()), is(Collections.emptyMap()));
map1.put(3, 100);
map1.put(1, -2);
assertThat(udf.transformMap(map1, biFunction1(), biFunction2()), is(Stream.of(new Object[][] {
{ -97, 97 },
{ 3, -3 },
}).collect(Collectors.toMap(data -> (Integer) data[0], data -> (Integer) data[1]))));
final Map<String, String> map2 = new HashMap<>();
assertThat(udf.transformMap(map2, biFunction3(), biFunction4()), is(Collections.emptyMap()));
map2.put("123", "456789");
map2.put("hello", "hi");
assertThat(udf.transformMap(map2, biFunction3(), biFunction4()), is(Stream.of(new Object[][] {
{ "456789123", false },
{ "hihello", true },
}).collect(Collectors.toMap(data -> (String) data[0], data -> (Boolean) data[1]))));
}
|
public static int getGenderByIdCard(String idcard) {
Assert.notBlank(idcard);
final int len = idcard.length();
if (!(len == CHINA_ID_MIN_LENGTH || len == CHINA_ID_MAX_LENGTH)) {
throw new IllegalArgumentException("ID Card length must be 15 or 18");
}
if (len == CHINA_ID_MIN_LENGTH) {
idcard = convert15To18(idcard);
}
char sCardChar = Objects.requireNonNull(idcard).charAt(16);
return (sCardChar % 2 != 0) ? 1 : 0;
}
|
@Test
public void getGenderByIdCardTest() {
int gender = IdcardUtil.getGenderByIdCard(ID_18);
assertEquals(1, gender);
}
|
@Nonnull
public static <C> SourceBuilder<C>.TimestampedStream<Void> timestampedStream(
@Nonnull String name,
@Nonnull FunctionEx<? super Processor.Context, ? extends C> createFn
) {
return new SourceBuilder<C>(name, createFn).new TimestampedStream<>();
}
|
@Test
public void stream_socketSource_withTimestamps_andLateness() throws IOException {
// Given
try (ServerSocket serverSocket = new ServerSocket(0)) {
startServer(serverSocket);
// When
int localPort = serverSocket.getLocalPort();
FunctionEx<String, Long> timestampFn = line -> Long.valueOf(line.substring(LINE_PREFIX.length()));
int lateness = 10;
long lastExpectedTs = itemCount - lateness;
StreamSource<String> socketSource = SourceBuilder
.timestampedStream("socket-source-with-timestamps", ctx -> socketReader(localPort))
.<String>fillBufferFn((in, buf) -> {
String line = in.readLine();
if (line != null) {
long ts = timestampFn.apply(line);
buf.add(line, ts);
if (ts >= lastExpectedTs) {
System.out.println(line);
}
}
})
.destroyFn(BufferedReader::close)
.build();
// Then
Pipeline p = Pipeline.create();
p.readFrom(socketSource)
.withNativeTimestamps(lateness)
.window(tumbling(1))
.aggregate(AggregateOperations.counting())
.writeTo(sinkList());
hz().getJet().newJob(p);
List<WindowResult<Long>> expected = LongStream.range(1, itemCount - lateness)
.mapToObj(i -> new WindowResult<>(i - 1, i, 1L))
.collect(toList());
assertTrueEventually(() -> assertEquals(expected, new ArrayList<>(sinkList)), 10);
}
}
|
public ResourceProperties getResourceProperties()
{
return _resourceProperties;
}
|
@Test
public void testResourceProperties()
{
Set<ResourceMethod> expectedSupportedMethods = new HashSet<>();
expectedSupportedMethods.add(ResourceMethod.GET);
expectedSupportedMethods.add(ResourceMethod.BATCH_PARTIAL_UPDATE);
ResourceSpec expectedResourceSpec = new ResourceSpecImpl(
expectedSupportedMethods,
null,
null,
ComplexResourceKey.class,
TestRecord.class,
TestRecord.class,
TestRecord.class,
Collections.<String, Object>emptyMap());
Map<String, Object> pathKeys = new HashMap<>();
pathKeys.put("id", new ComplexResourceKey<>(new TestRecord(), new TestRecord()));
Request<TestRecord> request = new Request<>(ResourceMethod.GET, null,
Collections.<String, String>emptyMap(),
Collections.<HttpCookie>emptyList(),
new EntityResponseDecoder<>(TestRecord.class),
expectedResourceSpec, Collections.<String, Object>emptyMap(),
Collections.<String, Class<?>>emptyMap(), null, "testRecord",
pathKeys, RestliRequestOptions.DEFAULT_OPTIONS, null);
ResourceProperties expectedResourceProperties =
new ResourcePropertiesImpl(expectedResourceSpec.getSupportedMethods(),
expectedResourceSpec.getKeyType(),
expectedResourceSpec.getComplexKeyType(),
expectedResourceSpec.getValueType(),
expectedResourceSpec.getKeyParts());
Assert.assertEquals(request.getResourceProperties(), expectedResourceProperties);
}
|
@Override
public Optional<Entity> exportEntity(EntityDescriptor entityDescriptor, EntityDescriptorIds entityDescriptorIds) {
final ModelId modelId = entityDescriptor.id();
final Optional<EventDefinitionDto> eventDefinition = eventDefinitionService.get(modelId.id());
if (!eventDefinition.isPresent()) {
LOG.debug("Couldn't find event definition {}", entityDescriptor);
return Optional.empty();
}
return Optional.of(exportNativeEntity(eventDefinition.get(), entityDescriptorIds));
}
|
@Test
@MongoDBFixtures("EventDefinitionFacadeTest.json")
public void exportEntity() {
final ModelId id = ModelId.of("5d4032513d2746703d1467f6");
when(jobDefinitionService.getByConfigField(eq("event_definition_id"), eq(id.id())))
.thenReturn(Optional.of(mock(JobDefinitionDto.class)));
final EntityDescriptor descriptor = EntityDescriptor.create(id, ModelTypes.EVENT_DEFINITION_V1);
final EntityDescriptorIds entityDescriptorIds = EntityDescriptorIds.of(descriptor);
final Optional<Entity> entity = facade.exportEntity(descriptor, entityDescriptorIds);
assertThat(entity).isPresent();
final EntityV1 entityV1 = (EntityV1) entity.get();
final EventDefinitionEntity eventDefinitionEntity = objectMapper.convertValue(entityV1.data(),
EventDefinitionEntity.class);
assertThat(eventDefinitionEntity.title().asString()).isEqualTo("title");
assertThat(eventDefinitionEntity.description().asString()).isEqualTo("description");
assertThat(eventDefinitionEntity.remediationSteps().asString()).isEqualTo("remediation");
assertThat(eventDefinitionEntity.config().type()).isEqualTo(AggregationEventProcessorConfigEntity.TYPE_NAME);
assertThat(eventDefinitionEntity.isScheduled().asBoolean(ImmutableMap.of())).isTrue();
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.