focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public static <T> WindowedValue<T> of(
T value, Instant timestamp, Collection<? extends BoundedWindow> windows, PaneInfo pane) {
checkArgument(pane != null, "WindowedValue requires PaneInfo, but it was null");
checkArgument(windows.size() > 0, "WindowedValue requires windows, but there were none");
if (windows.size() == 1) {
return of(value, timestamp, windows.iterator().next(), pane);
} else {
return new TimestampedValueInMultipleWindows<>(value, timestamp, windows, pane);
}
}
|
@Test
public void testExplodeWindowsInNoWindowsCrash() {
thrown.expect(IllegalArgumentException.class);
WindowedValue.of("foo", Instant.now(), ImmutableList.of(), PaneInfo.NO_FIRING);
}
|
public static ScheduledTaskHandler of(UUID uuid, String schedulerName, String taskName) {
return new ScheduledTaskHandlerImpl(uuid, -1, schedulerName, taskName);
}
|
@Test(expected = IllegalArgumentException.class)
public void of_withWrongURN() {
ScheduledTaskHandler.of("iamwrong");
}
|
public static boolean evaluate(IncrementallyUpdatedFilterPredicate pred) {
return Objects.requireNonNull(pred, "pred cannot be null").accept(INSTANCE);
}
|
@Test
public void testShortCircuit() {
ValueInspector neverCalled = new ValueInspector() {
@Override
public boolean accept(Visitor visitor) {
throw new ShortCircuitException();
}
};
try {
evaluate(neverCalled);
fail("this should throw");
} catch (ShortCircuitException e) {
//
}
// T || X should evaluate to true without inspecting X
ValueInspector v = intIsEven();
v.update(10);
IncrementallyUpdatedFilterPredicate or = new Or(v, neverCalled);
assertTrue(evaluate(or));
v.reset();
// F && X should evaluate to false without inspecting X
v.update(11);
IncrementallyUpdatedFilterPredicate and = new And(v, neverCalled);
assertFalse(evaluate(and));
v.reset();
}
|
static List<LocalObjectReference> imagePullSecrets(PodTemplate template, List<LocalObjectReference> defaultValue) {
return template != null && template.getImagePullSecrets() != null ? template.getImagePullSecrets() : defaultValue;
}
|
@Test
public void testImagePullSecrets() {
List<LocalObjectReference> defaults = List.of(new LocalObjectReferenceBuilder().withName("default").build());
List<LocalObjectReference> custom = List.of(new LocalObjectReferenceBuilder().withName("custom").build());
assertThat(WorkloadUtils.imagePullSecrets(null, defaults), is(defaults));
assertThat(WorkloadUtils.imagePullSecrets(new PodTemplate(), defaults), is(defaults));
assertThat(WorkloadUtils.imagePullSecrets(new PodTemplateBuilder().withImagePullSecrets(custom).build(), defaults), is(custom));
}
|
@Override
public boolean add(ResourceConfig resourceConfig) {
if (this.contains(resourceConfig) || isBlank(resourceConfig.getName())) {
return false;
}
super.add(resourceConfig);
return true;
}
|
@Test
public void shouldNotBeAbleToAddResourceWithWhiteSpaceAsName() {
ResourceConfigs actual = new ResourceConfigs();
actual.add(new ResourceConfig(" "));
assertThat(actual.size(), is(0));
}
|
public static <T> CompletionStage<Collection<T>> flattenFutures(
Collection<CompletionStage<T>> inputFutures) {
CompletableFuture<T>[] futures = inputFutures.toArray(new CompletableFuture[0]);
return CompletableFuture.allOf(futures)
.thenApply(
ignored -> {
final List<T> result =
Stream.of(futures).map(CompletableFuture::join).collect(Collectors.toList());
return result;
});
}
|
@Test
public void testFlattenFuturesForFailedFuture() {
CompletionStage<Collection<String>> resultFuture =
FutureUtils.flattenFutures(
ImmutableList.of(
CompletableFuture.completedFuture("hello"),
createFailedFuture(new RuntimeException())));
CompletionStage<Void> validationFuture =
resultFuture.handle(
(results, ex) -> {
Assert.assertTrue(
"Expected exception to be of RuntimeException", ex instanceof RuntimeException);
return null;
});
validationFuture.toCompletableFuture().join();
}
|
@Override
public RouteContext route(final RouteContext routeContext, final BroadcastRule broadcastRule) {
RouteContext result = new RouteContext();
for (String each : broadcastRule.getDataSourceNames()) {
if (resourceMetaData.getAllInstanceDataSourceNames().contains(each)) {
result.getRouteUnits().add(new RouteUnit(new RouteMapper(each, each), Collections.emptyList()));
}
}
return result;
}
|
@Test
void assertRoute() {
ResourceMetaData resourceMetaData = mock(ResourceMetaData.class);
when(resourceMetaData.getAllInstanceDataSourceNames()).thenReturn(Collections.singleton("ds_0"));
BroadcastInstanceBroadcastRoutingEngine engine = new BroadcastInstanceBroadcastRoutingEngine(resourceMetaData);
BroadcastRule broadcastRule = mock(BroadcastRule.class);
when(broadcastRule.getDataSourceNames()).thenReturn(Arrays.asList("ds_0", "ds_1"));
RouteContext routeContext = engine.route(new RouteContext(), broadcastRule);
assertThat(routeContext.getRouteUnits().size(), is(1));
assertDataSourceRouteMapper(routeContext.getRouteUnits().iterator().next(), "ds_0");
}
|
public int computeThreshold(StreamConfig streamConfig, CommittingSegmentDescriptor committingSegmentDescriptor,
@Nullable SegmentZKMetadata committingSegmentZKMetadata, String newSegmentName) {
long desiredSegmentSizeBytes = streamConfig.getFlushThresholdSegmentSizeBytes();
if (desiredSegmentSizeBytes <= 0) {
desiredSegmentSizeBytes = StreamConfig.DEFAULT_FLUSH_THRESHOLD_SEGMENT_SIZE_BYTES;
}
long optimalSegmentSizeBytesMin = desiredSegmentSizeBytes / 2;
double optimalSegmentSizeBytesMax = desiredSegmentSizeBytes * 1.5;
if (committingSegmentZKMetadata == null) { // first segment of the partition, hence committing segment is null
if (_latestSegmentRowsToSizeRatio > 0) { // new partition group added case
long targetSegmentNumRows = (long) (desiredSegmentSizeBytes * _latestSegmentRowsToSizeRatio);
targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows);
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(
"Committing segment zk metadata is not available, using prev ratio {}, setting rows threshold for {} as {}",
_latestSegmentRowsToSizeRatio, newSegmentName, targetSegmentNumRows);
return (int) targetSegmentNumRows;
} else {
final int autotuneInitialRows = streamConfig.getFlushAutotuneInitialRows();
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(
"Committing segment zk metadata is not available, setting threshold for {} as {}", newSegmentName,
autotuneInitialRows);
return autotuneInitialRows;
}
}
final long committingSegmentSizeBytes = committingSegmentDescriptor.getSegmentSizeBytes();
if (committingSegmentSizeBytes <= 0 // repair segment case
|| SegmentCompletionProtocol.REASON_FORCE_COMMIT_MESSAGE_RECEIVED.equals(
committingSegmentDescriptor.getStopReason())) {
String reason = committingSegmentSizeBytes <= 0 //
? "Committing segment size is not available" //
: "Committing segment is due to force-commit";
final int targetNumRows = committingSegmentZKMetadata.getSizeThresholdToFlushSegment();
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info("{}, setting thresholds from previous segment for {} as {}",
reason, newSegmentName, targetNumRows);
return targetNumRows;
}
final long timeConsumed = _clock.millis() - committingSegmentZKMetadata.getCreationTime();
final long numRowsConsumed = committingSegmentZKMetadata.getTotalDocs();
final int numRowsThreshold = committingSegmentZKMetadata.getSizeThresholdToFlushSegment();
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(
"{}: Data from committing segment: Time {} numRows {} threshold {} segmentSize(bytes) {}",
newSegmentName, TimeUtils.convertMillisToPeriod(timeConsumed), numRowsConsumed, numRowsThreshold,
committingSegmentSizeBytes);
double currentRatio = (double) numRowsConsumed / committingSegmentSizeBytes;
if (_latestSegmentRowsToSizeRatio > 0) {
_latestSegmentRowsToSizeRatio =
CURRENT_SEGMENT_RATIO_WEIGHT * currentRatio + PREVIOUS_SEGMENT_RATIO_WEIGHT * _latestSegmentRowsToSizeRatio;
} else {
_latestSegmentRowsToSizeRatio = currentRatio;
}
// If the number of rows consumed is less than what we set as target in metadata, then the segment hit time limit.
// We can set the new target to be slightly higher than the actual number of rows consumed so that we can aim
// to hit the row limit next time around.
//
// If the size of the committing segment is higher than the desired segment size, then the administrator has
// set a lower segment size threshold. We should treat this case as if we have hit thw row limit and not the time
// limit.
//
// TODO: add feature to adjust time threshold as well
// If we set new threshold to be numRowsConsumed, we might keep oscillating back and forth between doubling limit
// and time threshold being hit If we set new threshold to be committingSegmentZKMetadata
// .getSizeThresholdToFlushSegment(),
// we might end up using a lot more memory than required for the segment Using a minor bump strategy, until
// we add feature to adjust time We will only slightly bump the threshold based on numRowsConsumed
if (numRowsConsumed < numRowsThreshold && committingSegmentSizeBytes < desiredSegmentSizeBytes) {
final long timeThresholdMillis = streamConfig.getFlushThresholdTimeMillis();
long currentNumRows = numRowsConsumed;
StringBuilder logStringBuilder = new StringBuilder().append("Time threshold reached. ");
if (timeThresholdMillis < timeConsumed) {
// The administrator has reduced the time threshold. Adjust the
// number of rows to match the average consumption rate on the partition.
currentNumRows = timeThresholdMillis * numRowsConsumed / timeConsumed;
logStringBuilder.append(" Detected lower time threshold, adjusting numRowsConsumed to ").append(currentNumRows)
.append(". ");
}
long targetSegmentNumRows = (long) (currentNumRows * ROWS_MULTIPLIER_WHEN_TIME_THRESHOLD_HIT);
targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows);
logStringBuilder.append("Setting segment size for {} as {}");
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(logStringBuilder.toString(),
newSegmentName, targetSegmentNumRows);
return (int) targetSegmentNumRows;
}
long targetSegmentNumRows;
if (committingSegmentSizeBytes < optimalSegmentSizeBytesMin) {
targetSegmentNumRows = numRowsConsumed + numRowsConsumed / 2;
} else if (committingSegmentSizeBytes > optimalSegmentSizeBytesMax) {
targetSegmentNumRows = numRowsConsumed / 2;
} else {
if (_latestSegmentRowsToSizeRatio > 0) {
targetSegmentNumRows = (long) (desiredSegmentSizeBytes * _latestSegmentRowsToSizeRatio);
} else {
targetSegmentNumRows = (long) (desiredSegmentSizeBytes * currentRatio);
}
}
targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows);
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(
"Committing segment size {}, current ratio {}, setting threshold for {} as {}",
committingSegmentSizeBytes, _latestSegmentRowsToSizeRatio, newSegmentName, targetSegmentNumRows);
return (int) targetSegmentNumRows;
}
|
@Test
public void testUseLastSegmentsThresholdIfSegmentSizeMissing() {
long segmentSizeBytes = 0L;
int segmentSizeThreshold = 5_000;
SegmentFlushThresholdComputer computer = new SegmentFlushThresholdComputer();
StreamConfig streamConfig = mock(StreamConfig.class);
when(streamConfig.getFlushThresholdTimeMillis()).thenReturn(123L);
CommittingSegmentDescriptor committingSegmentDescriptor = mock(CommittingSegmentDescriptor.class);
when(committingSegmentDescriptor.getSegmentSizeBytes()).thenReturn(segmentSizeBytes);
SegmentZKMetadata committingSegmentZKMetadata = mock(SegmentZKMetadata.class);
when(committingSegmentZKMetadata.getSizeThresholdToFlushSegment()).thenReturn(segmentSizeThreshold);
int threshold = computer.computeThreshold(streamConfig, committingSegmentDescriptor, committingSegmentZKMetadata,
"newSegmentName");
assertEquals(threshold, segmentSizeThreshold);
}
|
@Override
public void addLogListener(@Nonnull Level level, @Nonnull LogListener logListener) {
throw new UnsupportedOperationException();
}
|
@Test(expected = UnsupportedOperationException.class)
public void testAddLogListener() {
loggingService.addLogListener(Level.INFO, logListener);
}
|
@Override
public void handleRequest(RestRequest request, RequestContext requestContext, final Callback<RestResponse> callback)
{
if (HttpMethod.POST != HttpMethod.valueOf(request.getMethod()))
{
_log.error("POST is expected, but " + request.getMethod() + " received");
callback.onError(RestException.forError(HttpStatus.S_405_METHOD_NOT_ALLOWED.getCode(), "Invalid method"));
return;
}
// Disable server-side latency instrumentation for multiplexed requests
requestContext.putLocalAttr(TimingContextUtil.TIMINGS_DISABLED_KEY_NAME, true);
IndividualRequestMap individualRequests;
try
{
individualRequests = extractIndividualRequests(request);
if (_multiplexerSingletonFilter != null) {
individualRequests = _multiplexerSingletonFilter.filterRequests(individualRequests);
}
}
catch (RestException e)
{
_log.error("Invalid multiplexed request", e);
callback.onError(e);
return;
}
catch (Exception e)
{
_log.error("Invalid multiplexed request", e);
callback.onError(RestException.forError(HttpStatus.S_400_BAD_REQUEST.getCode(), e));
return;
}
// prepare the map of individual responses to be collected
final IndividualResponseMap individualResponses = new IndividualResponseMap(individualRequests.size());
final Map<String, HttpCookie> responseCookies = new HashMap<>();
// all tasks are Void and side effect based, that will be useful when we add streaming
Task<?> requestProcessingTask = createParallelRequestsTask(request, requestContext, individualRequests, individualResponses, responseCookies);
Task<Void> responseAggregationTask = Task.action("send aggregated response", () ->
{
RestResponse aggregatedResponse = aggregateResponses(individualResponses, responseCookies);
callback.onSuccess(aggregatedResponse);
}
);
_engine.run(requestProcessingTask.andThen(responseAggregationTask), MUX_PLAN_CLASS);
}
|
@Test(dataProvider = "multiplexerConfigurations")
public void testHandleSequentialRequests(MultiplexerRunMode multiplexerRunMode) throws Exception
{
SynchronousRequestHandler mockHandler = createMockHandler();
MultiplexedRequestHandlerImpl multiplexer = createMultiplexer(mockHandler, multiplexerRunMode);
RequestContext requestContext = new RequestContext();
IndividualRequest indRequest1 = fakeIndRequest(BAR_URL);
IndividualRequest indRequest0 = fakeIndRequest(FOO_URL, ImmutableMap.of("1", indRequest1));
RestRequest request = fakeMuxRestRequest(ImmutableMap.of("0", indRequest0));
// set expectations
expect(mockHandler.handleRequestSync(fakeIndRestRequest(FOO_URL), requestContext)).andReturn(fakeIndRestResponse(FOO_ENTITY));
expect(mockHandler.handleRequestSync(fakeIndRestRequest(BAR_URL), requestContext)).andReturn(fakeIndRestResponse(BAR_ENTITY));
// switch into replay mode
replay(mockHandler);
FutureCallback<RestResponse> callback = new FutureCallback<>();
multiplexer.handleRequest(request, requestContext, callback);
RestResponse muxRestResponse = callback.get();
RestResponse expectedMuxRestResponse = fakeMuxRestResponse(ImmutableMap.of(0, fakeIndResponse(FOO_JSON_BODY), 1, fakeIndResponse(BAR_JSON_BODY)));
assertEquals(muxRestResponse, expectedMuxRestResponse);
verify(mockHandler);
}
|
@Override
public boolean isSatisfied(int index, TradingRecord tradingRecord) {
if (tradingRecord != null && !tradingRecord.isClosed()) {
Num entryPrice = tradingRecord.getCurrentPosition().getEntry().getNetPrice();
Num currentPrice = this.referencePrice.getValue(index);
Num threshold = this.stopLossThreshold.getValue(index);
if (tradingRecord.getCurrentPosition().getEntry().isBuy()) {
return currentPrice.isLessThan(entryPrice.minus(threshold));
} else {
return currentPrice.isGreaterThan(entryPrice.plus(threshold));
}
}
return false;
}
|
@Test
public void testCustomReferencePrice() {
ZonedDateTime initialEndDateTime = ZonedDateTime.now();
for (int i = 0; i < 10; i++) {
series.addBar(initialEndDateTime.plusDays(i), 100, 105, 95, 100);
}
ClosePriceIndicator customReference = new ClosePriceIndicator(series);
AverageTrueRangeStopLossRule rule = new AverageTrueRangeStopLossRule(series, customReference, 5, 2);
// Enter long position
TradingRecord tradingRecord = new BaseTradingRecord();
tradingRecord.enter(0, series.numOf(100), series.numOf(1));
// Price drops below stop loss
series.addBar(series.getLastBar().getEndTime().plusDays(1), 90, 90, 73, 73);
assertTrue(rule.isSatisfied(series.getEndIndex(), tradingRecord));
}
|
@Description("Inverse of ChiSquared cdf given df parameter and probability")
@ScalarFunction
@SqlType(StandardTypes.DOUBLE)
public static double inverseChiSquaredCdf(
@SqlType(StandardTypes.DOUBLE) double df,
@SqlType(StandardTypes.DOUBLE) double p)
{
checkCondition(p >= 0 && p <= 1, INVALID_FUNCTION_ARGUMENT, "inverseChiSquaredCdf Function: p must be in the interval [0, 1]");
checkCondition(df > 0, INVALID_FUNCTION_ARGUMENT, "inverseChiSquaredCdf Function: df must be greater than 0");
ChiSquaredDistribution distribution = new ChiSquaredDistribution(null, df, ChiSquaredDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
return distribution.inverseCumulativeProbability(p);
}
|
@Test
public void testInverseChiSquaredCdf()
{
assertFunction("inverse_chi_squared_cdf(3, 0.0)", DOUBLE, 0.0);
assertFunction("round(inverse_chi_squared_cdf(3, 0.99), 4)", DOUBLE, 11.3449);
assertFunction("round(inverse_chi_squared_cdf(3, 0.3),2)", DOUBLE, 1.42);
assertFunction("round(inverse_chi_squared_cdf(3, 0.95),2)", DOUBLE, 7.81);
assertInvalidFunction("inverse_chi_squared_cdf(-3, 0.3)", "inverseChiSquaredCdf Function: df must be greater than 0");
assertInvalidFunction("inverse_chi_squared_cdf(3, -0.1)", "inverseChiSquaredCdf Function: p must be in the interval [0, 1]");
assertInvalidFunction("inverse_chi_squared_cdf(3, 1.1)", "inverseChiSquaredCdf Function: p must be in the interval [0, 1]");
}
|
public QueryMetadata parse(String queryString) {
if (Strings.isNullOrEmpty(queryString)) {
return QueryMetadata.empty();
}
Map<String, List<SubstringMultilinePosition>> positions = new LinkedHashMap<>();
final String[] lines = queryString.split("\n");
for (int line = 0; line < lines.length; line++) {
final String currentLine = lines[line];
final Matcher matcher = PLACEHOLDER_PATTERN.matcher(currentLine);
while (matcher.find()) {
final String name = matcher.group(1);
if (!positions.containsKey(name)) {
positions.put(name, new ArrayList<>());
}
positions.get(name).add(SubstringMultilinePosition.create(line + 1, matcher.start(), matcher.end()));
}
}
final ImmutableSet<QueryParam> params = positions.entrySet().stream()
.map(entry -> QueryParam.create(entry.getKey(), entry.getValue()))
.collect(ImmutableSet.toImmutableSet());
return QueryMetadata.builder()
.usedParameters(params)
.build();
}
|
@Test
void testCharacterSpaceOfParameterNames() {
assertThat(parse("foo:$some parameter$")).isEmpty();
assertThat(parse("foo:$some-parameter$")).isEmpty();
assertThat(parse("foo:$some/parameter$")).isEmpty();
assertThat(parse("foo:$some42parameter$")).containsExactly("some42parameter");
assertThat(parse("foo:$42parameter$")).isEmpty();
assertThat(parse("foo:$parameter42$")).containsExactly("parameter42");
assertThat(parse("foo:$someparameter$")).containsExactly("someparameter");
assertThat(parse("foo:$some_parameter$")).containsExactly("some_parameter");
assertThat(parse("foo:$_someparameter$")).containsExactly("_someparameter");
assertThat(parse("foo:$_someparameter_$")).containsExactly("_someparameter_");
assertThat(parse("foo:$_someparameter_$")).containsExactly("_someparameter_");
assertThat(parse("foo:$_$")).containsExactly("_");
assertThat(parse("foo:$s$")).containsExactly("s");
assertThat(parse("foo:$9$")).isEmpty();
}
|
public static Integer parseInt(String value) {
return parseInt(value, ZERO_RADIX);
}
|
@Test
public void parseInt() {
Assertions.assertNull(TbUtils.parseInt(null));
Assertions.assertNull(TbUtils.parseInt(""));
Assertions.assertNull(TbUtils.parseInt(" "));
Assertions.assertEquals((Integer) 0, TbUtils.parseInt("0"));
Assertions.assertEquals((Integer) 0, TbUtils.parseInt("-0"));
Assertions.assertEquals(java.util.Optional.of(473).get(), TbUtils.parseInt("473"));
Assertions.assertEquals(java.util.Optional.of(-255).get(), TbUtils.parseInt("-0xFF"));
Assertions.assertThrows(NumberFormatException.class, () -> TbUtils.parseInt("-0xFF123"));
Assertions.assertEquals(java.util.Optional.of(-255).get(), TbUtils.parseInt("-FF"));
Assertions.assertEquals(java.util.Optional.of(255).get(), TbUtils.parseInt("FF"));
Assertions.assertThrows(IllegalArgumentException.class, () -> TbUtils.parseInt("FFF"));
Assertions.assertEquals(java.util.Optional.of(-2578).get(), TbUtils.parseInt("-0A12"));
Assertions.assertEquals(java.util.Optional.of(-2578).get(), TbUtils.parseHexToInt("-0A12"));
Assertions.assertThrows(IllegalArgumentException.class, () -> TbUtils.parseHexToInt("A12", false));
Assertions.assertEquals(java.util.Optional.of(-14866).get(), TbUtils.parseBigEndianHexToInt("-3A12"));
Assertions.assertThrows(IllegalArgumentException.class, () -> TbUtils.parseLittleEndianHexToInt("-A12"));
Assertions.assertThrows(NumberFormatException.class, () -> TbUtils.parseInt("0xFG"));
Assertions.assertEquals(java.util.Optional.of(102).get(), TbUtils.parseInt("1100110", MIN_RADIX));
Assertions.assertEquals(java.util.Optional.of(-102).get(), TbUtils.parseInt("1111111111111111111111111111111111111111111111111111111110011010", MIN_RADIX));
Assertions.assertThrows(NumberFormatException.class, () -> TbUtils.parseInt("1100210", MIN_RADIX));
Assertions.assertEquals(java.util.Optional.of(13158).get(), TbUtils.parseInt("11001101100110", MIN_RADIX));
Assertions.assertEquals(java.util.Optional.of(-13158).get(), TbUtils.parseInt("1111111111111111111111111111111111111111111111111100110010011010", MIN_RADIX));
Assertions.assertEquals(java.util.Optional.of(63).get(), TbUtils.parseInt("77", 8));
Assertions.assertThrows(NumberFormatException.class, () -> TbUtils.parseInt("18", 8));
Assertions.assertEquals(java.util.Optional.of(-255).get(), TbUtils.parseInt("-FF", 16));
Assertions.assertThrows(NumberFormatException.class, () -> TbUtils.parseInt("FG", 16));
Assertions.assertEquals(java.util.Optional.of(Integer.MAX_VALUE).get(), TbUtils.parseInt(Integer.toString(Integer.MAX_VALUE), 10));
Assertions.assertThrows(NumberFormatException.class, () -> TbUtils.parseInt(BigInteger.valueOf(Integer.MAX_VALUE).add(BigInteger.valueOf(1)).toString(10), 10));
Assertions.assertEquals(java.util.Optional.of(Integer.MIN_VALUE).get(), TbUtils.parseInt(Integer.toString(Integer.MIN_VALUE), 10));
Assertions.assertThrows(NumberFormatException.class, () -> TbUtils.parseInt(BigInteger.valueOf(Integer.MIN_VALUE).subtract(BigInteger.valueOf(1)).toString(10), 10));
Assertions.assertEquals(java.util.Optional.of(506070563).get(), TbUtils.parseInt("KonaIn", 30));
Assertions.assertThrows(NumberFormatException.class, () -> TbUtils.parseInt("KonaIn", 10));
Assertions.assertThrows(NumberFormatException.class, () -> TbUtils.parseInt(".456", 10));
Assertions.assertThrows(NumberFormatException.class, () -> TbUtils.parseInt("4562.", 10));
Assertions.assertThrows(IllegalArgumentException.class, () -> TbUtils.parseInt("KonaIn", MAX_RADIX + 1));
Assertions.assertThrows(IllegalArgumentException.class, () -> TbUtils.parseInt("KonaIn", MIN_RADIX - 1));
Assertions.assertThrows(IllegalArgumentException.class, () -> TbUtils.parseInt("KonaIn", 12));
}
|
public String parseToHTML() throws IOException, SAXException, TikaException {
ContentHandler handler = new ToXMLContentHandler();
AutoDetectParser parser = new AutoDetectParser();
Metadata metadata = new Metadata();
try (InputStream stream = ContentHandlerExample.class.getResourceAsStream("test.doc")) {
parser.parse(stream, handler, metadata);
return handler.toString();
}
}
|
@Test
public void testParseToHTML() throws IOException, SAXException, TikaException {
String result = example
.parseToHTML()
.trim();
assertContains("<html", result);
assertContains("<head>", result);
assertContains("<meta name=\"dc:creator\"", result);
assertContains("<title>", result);
assertContains("<body>", result);
assertContains(">test", result);
}
|
@Override
public Map<String, String> getProperties() {
final List<String> properties = this.list(PROPERTIES_KEY);
if(properties.isEmpty()) {
return parent.getProperties();
}
return properties.stream().distinct().collect(Collectors.toMap(
property -> StringUtils.contains(property, '=') ? StringUtils.substringBefore(property, '=') : property,
property -> StringUtils.contains(property, '=') ? substitutor.replace(StringUtils.substringAfter(property, '=')) : StringUtils.EMPTY));
}
|
@Test
public void testProperties() {
final Profile profile = new Profile(new TestProtocol(), new Deserializer<String>() {
@Override
public String stringForKey(final String key) {
return null;
}
@Override
public String objectForKey(final String key) {
return null;
}
@Override
public <L> List<L> listForKey(final String key) {
return (List<L>) Collections.singletonList("quota.notification.url=https://www.gmx.net/produkte/cloud/speicher-erweitern/?mc=03962659");
}
@Override
public Map<String, String> mapForKey(final String key) {
return null;
}
@Override
public boolean booleanForKey(final String key) {
return false;
}
@Override
public List<String> keys() {
return null;
}
});
assertTrue(profile.getProperties().containsKey("quota.notification.url"));
assertEquals("https://www.gmx.net/produkte/cloud/speicher-erweitern/?mc=03962659",
profile.getProperties().get("quota.notification.url"));
}
|
public void validate(DataConnectionConfig dataConnectionConfig) {
int numberOfSetItems = getNumberOfSetItems(dataConnectionConfig, CLIENT_XML_PATH, CLIENT_YML_PATH, CLIENT_XML,
CLIENT_YML);
if (numberOfSetItems != 1) {
throw new HazelcastException("HazelcastDataConnection with name '" + dataConnectionConfig.getName()
+ "' could not be created, "
+ "provide either a file path with one of "
+ "\"client_xml_path\" or \"client_yml_path\" properties "
+ "or a string content with one of \"client_xml\" or \"client_yml\" properties "
+ "for the client configuration.");
}
}
|
@Test
public void testValidateBothStringsSet() {
DataConnectionConfig dataConnectionConfig = new DataConnectionConfig();
dataConnectionConfig.setProperty(HazelcastDataConnection.CLIENT_XML, "xml");
dataConnectionConfig.setProperty(HazelcastDataConnection.CLIENT_YML, "yaml");
HazelcastDataConnectionConfigValidator validator = new HazelcastDataConnectionConfigValidator();
assertThatThrownBy(() -> validator.validate(dataConnectionConfig))
.isInstanceOf(HazelcastException.class);
}
|
@Override
protected void encode(ChannelHandlerContext ctx, byte[] msg, List<Object> out) throws Exception {
out.add(Unpooled.wrappedBuffer(msg));
}
|
@Test
public void testEncode() {
byte[] b = new byte[2048];
new Random().nextBytes(b);
ch.writeOutbound(b);
ByteBuf encoded = ch.readOutbound();
assertThat(encoded, is(wrappedBuffer(b)));
encoded.release();
}
|
public Account updatePniKeys(final Account account,
final IdentityKey pniIdentityKey,
final Map<Byte, ECSignedPreKey> deviceSignedPreKeys,
@Nullable final Map<Byte, KEMSignedPreKey> devicePqLastResortPreKeys,
final List<IncomingMessage> deviceMessages,
final Map<Byte, Integer> pniRegistrationIds) throws MismatchedDevicesException, StaleDevicesException {
validateDeviceMessages(account, deviceMessages);
// Don't try to be smart about ignoring unnecessary retries. If we make literally no change we will skip the ddb
// write anyway. Linked devices can handle some wasted extra key rotations.
final Account updatedAccount = accountsManager.updatePniKeys(
account, pniIdentityKey, deviceSignedPreKeys, devicePqLastResortPreKeys, pniRegistrationIds);
sendDeviceMessages(updatedAccount, deviceMessages);
return updatedAccount;
}
|
@Test
void updatePniKeysSetPrimaryDevicePrekeyPqAndSendMessages() throws Exception {
final UUID aci = UUID.randomUUID();
final UUID pni = UUID.randomUUID();
final Account account = mock(Account.class);
when(account.getUuid()).thenReturn(aci);
when(account.getPhoneNumberIdentifier()).thenReturn(pni);
final Device d2 = mock(Device.class);
final byte deviceId2 = 2;
when(d2.getId()).thenReturn(deviceId2);
when(account.getDevice(deviceId2)).thenReturn(Optional.of(d2));
when(account.getDevices()).thenReturn(List.of(d2));
final ECKeyPair pniIdentityKeyPair = Curve.generateKeyPair();
final IdentityKey pniIdentityKey = new IdentityKey(pniIdentityKeyPair.getPublicKey());
final Map<Byte, ECSignedPreKey> prekeys = Map.of(Device.PRIMARY_ID,
KeysHelper.signedECPreKey(1, pniIdentityKeyPair),
deviceId2, KeysHelper.signedECPreKey(2, pniIdentityKeyPair));
final Map<Byte, KEMSignedPreKey> pqPrekeys = Map.of((byte) 3, KeysHelper.signedKEMPreKey(3, pniIdentityKeyPair),
(byte) 4, KeysHelper.signedKEMPreKey(4, pniIdentityKeyPair));
final Map<Byte, Integer> registrationIds = Map.of(Device.PRIMARY_ID, 17, deviceId2, 19);
final IncomingMessage msg = mock(IncomingMessage.class);
when(msg.destinationDeviceId()).thenReturn(deviceId2);
when(msg.content()).thenReturn(Base64.getEncoder().encodeToString(new byte[]{1}));
changeNumberManager.updatePniKeys(account, pniIdentityKey, prekeys, pqPrekeys, List.of(msg), registrationIds);
verify(accountsManager).updatePniKeys(account, pniIdentityKey, prekeys, pqPrekeys, registrationIds);
final ArgumentCaptor<MessageProtos.Envelope> envelopeCaptor = ArgumentCaptor.forClass(MessageProtos.Envelope.class);
verify(messageSender).sendMessage(any(), eq(d2), envelopeCaptor.capture(), eq(false));
final MessageProtos.Envelope envelope = envelopeCaptor.getValue();
assertEquals(aci, UUID.fromString(envelope.getDestinationUuid()));
assertEquals(aci, UUID.fromString(envelope.getSourceUuid()));
assertEquals(Device.PRIMARY_ID, envelope.getSourceDevice());
assertFalse(updatedPhoneNumberIdentifiersByAccount.containsKey(account));
}
|
public static void unitize1(double[] array) {
double n = norm1(array);
for (int i = 0; i < array.length; i++) {
array[i] /= n;
}
}
|
@Test
public void testUnitize1() {
System.out.println("unitize1");
double[] data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
MathEx.unitize1(data);
assertEquals(1, MathEx.norm1(data), 1E-7);
}
|
public static PluginOption parse(String pluginSpecification) {
Matcher pluginWithFile = PLUGIN_WITH_ARGUMENT_PATTERN.matcher(pluginSpecification);
if (!pluginWithFile.matches()) {
Class<? extends Plugin> pluginClass = parsePluginName(pluginSpecification, pluginSpecification);
return new PluginOption(pluginSpecification, pluginClass, null);
}
Class<? extends Plugin> pluginClass = parsePluginName(pluginSpecification, pluginWithFile.group(1));
return new PluginOption(pluginSpecification, pluginClass, pluginWithFile.group(2));
}
|
@Test
void throws_for_unknown_plugins() {
IllegalArgumentException exception = assertThrows(IllegalArgumentException.class,
() -> PluginOption.parse("no-such-plugin"));
assertThat(exception.getMessage(), is("The plugin specification 'no-such-plugin' has a problem:\n" +
"\n" +
"Could not load plugin class 'no-such-plugin'.\n" +
"\n" +
"Plugin specifications should have the format of PLUGIN[:[PATH|[URI [OPTIONS]]]\n" +
"\n" +
"Valid values for PLUGIN are: html, json, junit, message, pretty, progress, rerun, summary, teamcity, testng, timeline, unused, usage\n"
+
"\n" +
"PLUGIN can also be a fully qualified class name, allowing registration of 3rd party plugins. The 3rd party plugin must implement io.cucumber.plugin.Plugin"));
}
|
static void setConstructor(final String segmentName,
final String generatedClassName,
final ConstructorDeclaration constructorDeclaration,
final String kiePMMLModelClass,
final boolean isInterpreted,
final double weight) {
setConstructorSuperNameInvocation(generatedClassName, constructorDeclaration, segmentName);
final BlockStmt body = constructorDeclaration.getBody();
final ExplicitConstructorInvocationStmt superStatement =
CommonCodegenUtils.getExplicitConstructorInvocationStmt(body)
.orElseThrow(() -> new KiePMMLException(String.format(MISSING_CONSTRUCTOR_IN_BODY, body)));
final Expression instantiationExpression = getInstantiationExpression(kiePMMLModelClass, isInterpreted);
String modelInstantiationString = instantiationExpression.toString();
CommonCodegenUtils.setExplicitConstructorInvocationStmtArgument(superStatement, "model",
modelInstantiationString);
CommonCodegenUtils.setAssignExpressionValue(body, "weight", new DoubleLiteralExpr(weight));
CommonCodegenUtils.setAssignExpressionValue(body, "id", new StringLiteralExpr(segmentName));
}
|
@Test
void setConstructorInterpreted() throws IOException {
ConstructorDeclaration constructorDeclaration = MODEL_TEMPLATE.getDefaultConstructor().get();
String segmentName = "SEGMENTNAME";
String generatedClassName = "GENERATEDCLASSNAME";
String kiePMMLModelClass = "KIEPMMLMODELCLASS";
double weight = 12.22;
KiePMMLSegmentFactory.setConstructor(segmentName,
generatedClassName,
constructorDeclaration,
kiePMMLModelClass,
true,
weight);
Map<Integer, Expression> superInvocationExpressionsMap = new HashMap<>();
superInvocationExpressionsMap.put(0, new NameExpr(String.format("\"%s\"", segmentName)));
ClassOrInterfaceType classOrInterfaceType = parseClassOrInterfaceType(kiePMMLModelClass);
ObjectCreationExpr objectCreationExpr = new ObjectCreationExpr();
objectCreationExpr.setType(classOrInterfaceType);
superInvocationExpressionsMap.put(3, new NameExpr(objectCreationExpr.toString()));
Map<String, Expression> assignExpressionMap = new HashMap<>();
assignExpressionMap.put("weight", new DoubleLiteralExpr(weight));
assignExpressionMap.put("id", new StringLiteralExpr(segmentName));
String text = getFileContent(TEST_01_SOURCE);
BlockStmt expected = JavaParserUtils.parseConstructorBlock(text);
assertThat(JavaParserUtils.equalsNode(expected, constructorDeclaration.getBody())).isTrue();
}
|
static KiePMMLDiscretizeBin getKiePMMLDiscretizeBin(final DiscretizeBin discretizeBin) {
KiePMMLInterval interval = KiePMMLIntervalInstanceFactory.getKiePMMLInterval(discretizeBin.getInterval());
String binValue = discretizeBin.getBinValue() != null ? discretizeBin.getBinValue().toString() : null;
return new KiePMMLDiscretizeBin(UUID.randomUUID().toString(),
getKiePMMLExtensions(discretizeBin.getExtensions()),
binValue,
interval);
}
|
@Test
void getKiePMMLDiscretizeBin() {
DiscretizeBin toConvert = getRandomDiscretizeBin();
KiePMMLDiscretizeBin retrieved = KiePMMLDiscretizeBinInstanceFactory.getKiePMMLDiscretizeBin(toConvert);
commonVerifyKiePMMLDiscretizeBin(retrieved, toConvert);
}
|
@Override
public void upgrade() {
if (hasBeenRunSuccessfully()) {
LOG.debug("Migration already completed.");
return;
}
final Map<String, String> savedSearchToViewsMap = new HashMap<>();
final Map<View, Search> newViews = this.savedSearchService.streamAll()
.map(savedSearch -> {
final Map.Entry<View, Search> newView = migrateSavedSearch(savedSearch);
savedSearchToViewsMap.put(savedSearch.id(), newView.getKey().id());
return newView;
})
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
newViews.forEach((view, search) -> {
viewService.save(view);
searchService.save(search);
});
final MigrationCompleted migrationCompleted = MigrationCompleted.create(savedSearchToViewsMap);
writeMigrationCompleted(migrationCompleted);
}
|
@Test
@MongoDBFixtures("sample_saved_search_with_stream.json")
public void migrateSavedSearchWithStreamId() throws Exception {
this.migration.upgrade();
final MigrationCompleted migrationCompleted = captureMigrationCompleted();
assertThat(migrationCompleted.savedSearchIds())
.containsExactly(new AbstractMap.SimpleEntry<>("5de660b7b2d44b5813c1d7f6", "000000020000000000000000"));
assertViewServiceCreatedViews(1, resourceFile("sample_saved_search_with_stream-expected_views.json"));
assertSearchServiceCreated(1, resourceFile("sample_saved_search_with_stream-expected_searches.json"));
}
|
static double estimatePixelCount(final Image image, final double widthOverHeight) {
if (image.getHeight() == HEIGHT_UNKNOWN) {
if (image.getWidth() == WIDTH_UNKNOWN) {
// images whose size is completely unknown will be in their own subgroups, so
// any one of them will do, hence returning the same value for all of them
return 0;
} else {
return image.getWidth() * image.getWidth() / widthOverHeight;
}
} else if (image.getWidth() == WIDTH_UNKNOWN) {
return image.getHeight() * image.getHeight() * widthOverHeight;
} else {
return image.getHeight() * image.getWidth();
}
}
|
@Test
public void testEstimatePixelCountAllKnown() {
assertEquals(20000.0, estimatePixelCount(img(100, 200), 1.0), 0.0);
assertEquals(20000.0, estimatePixelCount(img(100, 200), 12.0), 0.0);
assertEquals( 100.0, estimatePixelCount(img(100, 1), 12.0), 0.0);
assertEquals( 100.0, estimatePixelCount(img( 1, 100), 0.5), 0.0);
}
|
public Map<String, String> findTableNames(final Collection<ColumnSegment> columns, final ShardingSphereSchema schema) {
if (1 == simpleTables.size()) {
return findTableNameFromSingleTable(columns);
}
Map<String, String> result = new CaseInsensitiveMap<>();
Map<String, Collection<String>> ownerColumnNames = getOwnerColumnNames(columns);
result.putAll(findTableNameFromSQL(ownerColumnNames));
Collection<String> noOwnerColumnNames = getNoOwnerColumnNames(columns);
result.putAll(findTableNameFromMetaData(noOwnerColumnNames, schema));
result.putAll(findTableNameFromSubquery(columns, result));
return result;
}
|
@Test
void assertFindTableNameWhenColumnSegmentOwnerAbsent() {
SimpleTableSegment tableSegment1 = createTableSegment("table_1", "tbl_1");
SimpleTableSegment tableSegment2 = createTableSegment("table_2", "tbl_2");
ColumnSegment columnSegment = createColumnSegment(null, "col");
Map<String, String> actual = new TablesContext(Arrays.asList(tableSegment1, tableSegment2), TypedSPILoader.getService(DatabaseType.class, "FIXTURE"), DefaultDatabase.LOGIC_NAME)
.findTableNames(Collections.singletonList(columnSegment), mock(ShardingSphereSchema.class));
assertTrue(actual.isEmpty());
}
|
@Override
public JFieldVar apply(String nodeName, JsonNode node, JsonNode parent, JFieldVar field, Schema currentSchema) {
if (ruleFactory.getGenerationConfig().isIncludeJsr303Annotations()
&& (node.has("minLength") || node.has("maxLength"))
&& isApplicableType(field)) {
final Class<? extends Annotation> sizeClass
= ruleFactory.getGenerationConfig().isUseJakartaValidation()
? Size.class
: javax.validation.constraints.Size.class;
JAnnotationUse annotation = field.annotate(sizeClass);
if (node.has("minLength")) {
annotation.param("min", node.get("minLength").asInt());
}
if (node.has("maxLength")) {
annotation.param("max", node.get("maxLength").asInt());
}
}
return field;
}
|
@Test
public void testNotUsed() {
when(config.isIncludeJsr303Annotations()).thenReturn(true);
when(node.has("minLength")).thenReturn(false);
when(node.has("maxLength")).thenReturn(false);
when(fieldVar.type().boxify().fullName()).thenReturn(fieldClass.getTypeName());
JFieldVar result = rule.apply("node", node, null, fieldVar, null);
assertSame(fieldVar, result);
verify(fieldVar, never()).annotate(sizeClass);
verify(annotation, never()).param(anyString(), anyInt());
}
|
public static ECKeyPair deserialize(byte[] input) {
if (input.length != PRIVATE_KEY_SIZE + PUBLIC_KEY_SIZE) {
throw new RuntimeException("Invalid input key size");
}
BigInteger privateKey = Numeric.toBigInt(input, 0, PRIVATE_KEY_SIZE);
BigInteger publicKey = Numeric.toBigInt(input, PRIVATE_KEY_SIZE, PUBLIC_KEY_SIZE);
return new ECKeyPair(privateKey, publicKey);
}
|
@Test
public void testDeserializeECKey() {
assertEquals(Keys.deserialize(ENCODED), (SampleKeys.KEY_PAIR));
}
|
public InnerCNode getTree() throws CodegenRuntimeException {
try {
if (root == null) parse();
} catch (DefParserException | IOException e) {
throw new CodegenRuntimeException("Error parsing or reading config definition." + e.getMessage(), e);
}
return root;
}
|
@Test
void testFileWithNamespaceInFilename() throws IOException {
File defFile = new File(TEST_DIR + "baz.bar.foo.def");
CNode root = new DefParser("test", new FileReader(defFile)).getTree();
assertEquals("31a0f9bda0e5ff929762a29569575a7e", root.defMd5);
}
|
@Override
public void filter(ContainerRequestContext requestContext) throws IOException {
final Optional<String> header = getBearerHeader(requestContext);
if (header.isEmpty()) {
// no JWT token, we'll fail immediately
abortRequest(requestContext);
} else {
final String token = header.map(h -> h.replaceFirst(AUTHENTICATION_SCHEME + " ", "")).get();
try {
verifyToken(token);
} catch (TokenVerificationException e) {
LOG.error("Failed to verify auth token", e);
abortRequest(requestContext);
}
}
}
|
@Test
void verifyValidToken() throws IOException {
final String key = "gTVfiF6A0pB70A3UP1EahpoR6LId9DdNadIkYNygK5Z8lpeJIpw9vN0jZ6fdsfeuV9KIg9gVLkCHIPj6FHW5Q9AvpOoGZO3h";
final JwtTokenAuthFilter validator = new JwtTokenAuthFilter(key);
final ContainerRequest mockedRequest = mockRequest("Bearer " + generateToken(key));
validator.filter(mockedRequest);
Mockito.verify(mockedRequest, never()).abortWith(Mockito.any());
}
|
public final void contains(@Nullable Object element) {
if (!Iterables.contains(checkNotNull(actual), element)) {
List<@Nullable Object> elementList = newArrayList(element);
if (hasMatchingToStringPair(actual, elementList)) {
failWithoutActual(
fact("expected to contain", element),
fact("an instance of", objectToTypeName(element)),
simpleFact("but did not"),
fact(
"though it did contain",
countDuplicatesAndAddTypeInfo(
retainMatchingToString(actual, /* itemsToCheck= */ elementList))),
fullContents());
} else {
failWithActual("expected to contain", element);
}
}
}
|
@Test
public void iterableContainsFailure() {
expectFailureWhenTestingThat(asList(1, 2, 3)).contains(5);
assertFailureKeys("expected to contain", "but was");
assertFailureValue("expected to contain", "5");
}
|
@Override public String getLegacyColumnName( DatabaseMetaData dbMetaData, ResultSetMetaData rsMetaData, int index ) throws KettleDatabaseException {
if ( dbMetaData == null ) {
throw new KettleDatabaseException( BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameNoDBMetaDataException" ) );
}
if ( rsMetaData == null ) {
throw new KettleDatabaseException( BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameNoRSMetaDataException" ) );
}
try {
return rsMetaData.getColumnLabel( index );
} catch ( Exception e ) {
throw new KettleDatabaseException( String.format( "%s: %s", BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameException" ), e.getMessage() ), e );
}
}
|
@Test( expected = KettleDatabaseException.class )
public void testGetLegacyColumnNameNullRSMetaDataException() throws Exception {
new MariaDBDatabaseMeta().getLegacyColumnName( mock( DatabaseMetaData.class ), null, 1 );
}
|
Map<String, String> getShardIterators() {
if (streamArn == null) {
streamArn = getStreamArn();
}
// Either return cached ones or get new ones via GetShardIterator requests.
if (currentShardIterators.isEmpty()) {
DescribeStreamResponse streamDescriptionResult
= getClient().describeStream(DescribeStreamRequest.builder().streamArn(streamArn).build());
shardTree.populate(streamDescriptionResult.streamDescription().shards());
StreamIteratorType streamIteratorType = getEndpoint().getConfiguration().getStreamIteratorType();
currentShardIterators = getCurrentShardIterators(streamIteratorType);
} else {
Map<String, String> childShardIterators = new HashMap<>();
for (Entry<String, String> currentShardIterator : currentShardIterators.entrySet()) {
List<Shard> children = shardTree.getChildren(currentShardIterator.getKey());
if (children.isEmpty()) { // This is still an active leaf shard, reuse it.
childShardIterators.put(currentShardIterator.getKey(), currentShardIterator.getValue());
} else {
for (Shard child : children) { // Inactive shard, move down to its children.
String shardIterator = getShardIterator(child.shardId(), ShardIteratorType.TRIM_HORIZON);
childShardIterators.put(child.shardId(), shardIterator);
}
}
}
currentShardIterators = childShardIterators;
}
LOG.trace("Shard Iterators are: {}", currentShardIterators);
return currentShardIterators;
}
|
@Test
void shouldThrowIllegalArgumentExceptionIfNoStreamsAreReturned() throws Exception {
AmazonDDBStreamlessClientMock dynamoDbStreamsClient = new AmazonDDBStreamlessClientMock();
component.getConfiguration().setAmazonDynamoDbStreamsClient(dynamoDbStreamsClient);
Ddb2StreamEndpoint endpoint = (Ddb2StreamEndpoint) component.createEndpoint("aws2-ddbstreams://myTable");
ShardIteratorHandler underTest = new ShardIteratorHandler(endpoint);
endpoint.doStart();
assertThrows(IllegalArgumentException.class, () -> underTest.getShardIterators());
}
|
@Override
public void convertWeightsForChildQueues(FSQueue queue,
CapacitySchedulerConfiguration csConfig) {
List<FSQueue> children = queue.getChildQueues();
if (queue instanceof FSParentQueue || !children.isEmpty()) {
QueuePath queuePath = new QueuePath(queue.getName());
if (queue.getName().equals(ROOT_QUEUE)) {
csConfig.setNonLabeledQueueWeight(queuePath, queue.getWeight());
}
children.forEach(fsQueue -> csConfig.setNonLabeledQueueWeight(
new QueuePath(fsQueue.getName()), fsQueue.getWeight()));
csConfig.setAutoQueueCreationV2Enabled(queuePath, true);
}
}
|
@Test
public void testAutoCreateV2FlagOnParentWithoutChildren() {
FSQueue root = createParent(new ArrayList<>());
converter.convertWeightsForChildQueues(root, csConfig);
assertEquals("Number of properties", 21,
csConfig.getPropsWithPrefix(PREFIX).size());
assertTrue("root autocreate v2 enabled",
csConfig.isAutoQueueCreationV2Enabled(ROOT));
}
|
public void retain(IndexSet indexSet,
@Nullable Integer maxNumberOfIndices,
RetentionExecutor.RetentionAction action,
String actionName) {
final Map<String, Set<String>> deflectorIndices = indexSet.getAllIndexAliases();
final int indexCount = (int) deflectorIndices.keySet()
.stream()
.filter(indexName -> !indices.isReopened(indexName))
.count();
if (maxNumberOfIndices == null) {
LOG.warn("No retention strategy configuration found, not running index retention!");
return;
}
// Do we have more indices than the configured maximum?
if (indexCount <= maxNumberOfIndices) {
LOG.debug("Number of indices ({}) lower than limit ({}). Not performing any retention actions.",
indexCount, maxNumberOfIndices);
return;
}
// We have more indices than the configured maximum! Remove as many as needed.
final int removeCount = indexCount - maxNumberOfIndices;
final String msg = "Number of indices (" + indexCount + ") higher than limit (" + maxNumberOfIndices + "). " +
"Running retention for " + removeCount + " indices.";
LOG.info(msg);
activityWriter.write(new Activity(msg, CountBasedRetentionExecutor.class));
retentionExecutor.runRetention(indexSet, removeCount, action, actionName);
}
|
@Test
public void shouldRetainOldestIndices() {
underTest.retain(indexSet, 4, action, "action");
verify(action, times(1)).retain(retainedIndexName.capture(), eq(indexSet));
// Ensure that the oldest indices come first
assertThat(retainedIndexName.getAllValues().get(0)).containsExactly("test_1", "test_2");
verify(activityWriter, times(2)).write(any(Activity.class));
}
|
@Override
public void write(String propertyKey, @Nullable String value) {
checkPropertyKey(propertyKey);
try (DbSession dbSession = dbClient.openSession(false)) {
if (value == null || value.isEmpty()) {
dbClient.internalPropertiesDao().saveAsEmpty(dbSession, propertyKey);
} else {
dbClient.internalPropertiesDao().save(dbSession, propertyKey, value);
}
dbSession.commit();
}
}
|
@Test
public void write_calls_dao_save_when_value_is_neither_null_nor_empty() {
underTest.write(SOME_KEY, SOME_VALUE);
verify(internalPropertiesDao).save(dbSession, SOME_KEY, SOME_VALUE);
verify(dbSession).commit();
}
|
@Override
protected Optional<Retry> createProcessor(String businessName, RetryRule rule) {
final io.sermant.flowcontrol.common.handler.retry.Retry retry = RetryContext.INSTANCE.getRetry();
if (retry == null) {
return Optional.empty();
}
final RetryConfig retryConfig = RetryConfig.custom()
.maxAttempts(getMaxAttempts(retry, rule))
.retryOnResult(retryPredicateCreator.createResultPredicate(retry, rule))
.retryOnException(retryPredicateCreator.createExceptionPredicate(retry.retryExceptions()))
.intervalFunction(getIntervalFunction(rule))
.failAfterMaxAttempts(rule.isFailAfterMaxAttempts())
.build();
return Optional.of(RetryRegistry.of(retryConfig).retry(businessName));
}
|
@Test
public void test() {
final RetryHandlerV2 retryHandlerV2 = new RetryHandlerV2();
final AlibabaDubboRetry alibabaDubboRetry = new AlibabaDubboRetry();
RetryContext.INSTANCE.markRetry(alibabaDubboRetry);
final Retry test = retryHandlerV2.createProcessor("test", new RetryRule()).get();
Assert.assertNotNull(test);
RetryContext.INSTANCE.remove();
}
|
public static AvroGenericCoder of(Schema schema) {
return AvroGenericCoder.of(schema);
}
|
@Test
public void testAvroCoderNestedRecords() {
// Nested Record
assertDeterministic(
AvroCoder.of(
SchemaBuilder.record("nestedRecord")
.fields()
.name("subRecord")
.type()
.record("subRecord")
.fields()
.name("innerField")
.type()
.stringType()
.noDefault()
.endRecord()
.noDefault()
.endRecord()));
}
|
public Unmarshaller createUnmarshaller(Class<?> clazz) throws JAXBException {
Unmarshaller unmarshaller = getContext(clazz).createUnmarshaller();
if (unmarshallerEventHandler != null) {
unmarshaller.setEventHandler(unmarshallerEventHandler);
}
unmarshaller.setSchema(unmashallerSchema);
return unmarshaller;
}
|
@Test
void buildsUnmarshallerWithDefaultEventHandler() throws Exception {
JAXBContextFactory factory =
new JAXBContextFactory.Builder().build();
Unmarshaller unmarshaller = factory.createUnmarshaller(Object.class);
assertThat(unmarshaller.getEventHandler()).isNotNull();
}
|
public String getClusterProfileChangedRequestBody(ClusterProfilesChangedStatus status, Map<String, String> oldClusterProfile, Map<String, String> newClusterProfile) {
return switch (status) {
case CREATED -> getClusterCreatedRequestBody(newClusterProfile);
case UPDATED -> getClusterUpdatedRequestBody(oldClusterProfile, newClusterProfile);
case DELETED -> getClusterDeletedRequestBody(oldClusterProfile);
};
}
|
@Test
public void shouldGetClusterProfilesChangedRequestBodyWhenClusterProfileIsCreated() {
ClusterProfilesChangedStatus status = ClusterProfilesChangedStatus.CREATED;
Map<String, String> oldClusterProfile = null;
Map<String, String> newClusterProfile = Map.of("key1", "key2");
String json = new ElasticAgentExtensionConverterV5().getClusterProfileChangedRequestBody(status, oldClusterProfile, newClusterProfile);
assertThatJson(json).isEqualTo("{" +
" \"status\":\"created\"," +
" \"cluster_profiles_properties\":{" +
" \"key1\":\"key2\"" +
" }" +
"}");
}
|
protected String getUserName() {
return System.getProperty("user.name");
}
|
@Test
public void testGetUserName() throws Exception {
PseudoAuthenticator authenticator = new PseudoAuthenticator();
Assert.assertEquals(System.getProperty("user.name"), authenticator.getUserName());
}
|
@Override
public void insert(Person person) {
Optional<Person> elem = personList.stream().filter(p -> p.getPersonNationalId() == person.getPersonNationalId()).findFirst();
if (elem.isPresent()) {
LOGGER.info("Record already exists.");
return;
}
personList.add(person);
}
|
@Test
void testInsert(){
// DataBase initialization.
PersonDbSimulatorImplementation db = new PersonDbSimulatorImplementation();
Assertions.assertEquals(0,db.size(),"Size of null database should be 0");
// Dummy persons.
Person person1 = new Person(1, "Thomas", 27304159);
Person person2 = new Person(2, "John", 42273631);
Person person3 = new Person(3, "Arthur", 27489171);
db.insert(person1);
db.insert(person2);
db.insert(person3);
// Test size after insertion.
Assertions.assertEquals(3,db.size(),"Incorrect size for database.");
Person person4 = new Person(4, "Finn", 20499078);
Person person5 = new Person(5, "Michael", 40599078);
db.insert(person4);
db.insert(person5);
// Test size after more insertions.
Assertions.assertEquals(5,db.size(),"Incorrect size for database.");
Person person5duplicate = new Person(5,"Kevin",89589122);
db.insert(person5duplicate);
// Test size after attempt to insert record with duplicate key.
Assertions.assertEquals(5,db.size(),"Incorrect size for data base");
}
|
@Override
public void transform(Message message, DataType fromType, DataType toType) {
final Map<String, Object> headers = message.getHeaders();
CloudEvent cloudEvent = CloudEvents.v1_0;
headers.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_ID).http(),
headers.getOrDefault(CloudEvent.CAMEL_CLOUD_EVENT_ID, message.getExchange().getExchangeId()));
headers.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_VERSION).http(),
headers.getOrDefault(CloudEvent.CAMEL_CLOUD_EVENT_VERSION, cloudEvent.version()));
headers.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_TYPE).http(),
headers.getOrDefault(CloudEvent.CAMEL_CLOUD_EVENT_TYPE, CloudEvent.DEFAULT_CAMEL_CLOUD_EVENT_TYPE));
headers.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_SOURCE).http(),
headers.getOrDefault(CloudEvent.CAMEL_CLOUD_EVENT_SOURCE, CloudEvent.DEFAULT_CAMEL_CLOUD_EVENT_SOURCE));
if (headers.containsKey(CloudEvent.CAMEL_CLOUD_EVENT_SUBJECT)) {
headers.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_SUBJECT).http(),
headers.get(CloudEvent.CAMEL_CLOUD_EVENT_SUBJECT));
}
if (headers.containsKey(CloudEvent.CAMEL_CLOUD_EVENT_DATA_CONTENT_TYPE)) {
headers.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_DATA_CONTENT_TYPE).http(),
headers.get(CloudEvent.CAMEL_CLOUD_EVENT_DATA_CONTENT_TYPE));
}
headers.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_TIME).http(),
headers.getOrDefault(CloudEvent.CAMEL_CLOUD_EVENT_TIME, cloudEvent.getEventTime(message.getExchange())));
headers.putIfAbsent(Exchange.CONTENT_TYPE,
headers.getOrDefault(CloudEvent.CAMEL_CLOUD_EVENT_CONTENT_TYPE, "application/json"));
cloudEvent.attributes().stream().map(CloudEvent.Attribute::id).forEach(headers::remove);
}
|
@Test
void shouldMapToHttpCloudEvent() throws Exception {
Exchange exchange = new DefaultExchange(camelContext);
exchange.getMessage().setHeader(CloudEvent.CAMEL_CLOUD_EVENT_SUBJECT, "test1.txt");
exchange.getMessage().setHeader(CloudEvent.CAMEL_CLOUD_EVENT_TYPE, "org.apache.camel.event.test");
exchange.getMessage().setHeader(CloudEvent.CAMEL_CLOUD_EVENT_SOURCE, "org.apache.camel.test");
exchange.getMessage().setHeader(CloudEvent.CAMEL_CLOUD_EVENT_CONTENT_TYPE, "text/plain");
exchange.getMessage().setBody(new ByteArrayInputStream("Test1".getBytes(StandardCharsets.UTF_8)));
transformer.transform(exchange.getMessage(), DataType.ANY, DataType.ANY);
CloudEvent cloudEvent = CloudEvents.v1_0;
assertTrue(exchange.getMessage().hasHeaders());
assertEquals(exchange.getExchangeId(),
exchange.getMessage().getHeader(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_ID).http()));
assertEquals(cloudEvent.version(),
exchange.getMessage().getHeader(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_VERSION).http()));
assertEquals("org.apache.camel.event.test",
exchange.getMessage().getHeader(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_TYPE).http()));
assertEquals("test1.txt",
exchange.getMessage().getHeader(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_SUBJECT).http()));
assertEquals("org.apache.camel.test",
exchange.getMessage().getHeader(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_SOURCE).http()));
assertTrue(exchange.getMessage().getHeaders()
.containsKey(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_TIME).http()));
assertEquals("text/plain", exchange.getMessage().getHeader(Exchange.CONTENT_TYPE));
assertEquals("Test1", exchange.getMessage().getBody(String.class));
assertNull(exchange.getMessage().getHeader(CloudEvent.CAMEL_CLOUD_EVENT_TYPE));
assertNull(exchange.getMessage().getHeader(CloudEvent.CAMEL_CLOUD_EVENT_SOURCE));
assertNull(exchange.getMessage().getHeader(CloudEvent.CAMEL_CLOUD_EVENT_SUBJECT));
}
|
@Override
public ConfigInfo findConfigInfo(long id) {
try {
ConfigInfoMapper configInfoMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO);
return this.jt.queryForObject(configInfoMapper.select(
Arrays.asList("id", "data_id", "group_id", "tenant_id", "app_name", "content"),
Collections.singletonList("id")), new Object[] {id}, CONFIG_INFO_ROW_MAPPER);
} catch (EmptyResultDataAccessException e) { // Indicates that the data does not exist, returns null.
return null;
} catch (CannotGetJdbcConnectionException e) {
LogUtil.FATAL_LOG.error("[db-error] " + e, e);
throw e;
}
}
|
@Test
void testFindConfigInfoByIdGetConFail() {
long id = 1234567890876L;
ConfigInfo configInfo = new ConfigInfo();
configInfo.setId(id);
Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {id}), eq(CONFIG_INFO_ROW_MAPPER)))
.thenThrow(new CannotGetJdbcConnectionException("mocked exp"));
try {
ConfigInfo configReturn = externalConfigInfoPersistService.findConfigInfo(id);
assertTrue(false);
} catch (Exception e) {
assertTrue(e instanceof CannotGetJdbcConnectionException);
}
}
|
@Override
public void onDelete(Extension extension) {
if (isDisposed() || !matchers.onDeleteMatcher().match(extension)) {
return;
}
// TODO filter the event
queue.addImmediately(new Request(extension.getMetadata().getName()));
}
|
@Test
void shouldDeleteExtensionWhenDeletePredicateAlwaysTrue() {
when(matchers.onDeleteMatcher()).thenReturn(getEmptyMatcher());
watcher.onDelete(createFake("fake-name"));
verify(matchers, times(1)).onDeleteMatcher();
verify(queue, times(1)).addImmediately(
argThat(request -> request.name().equals("fake-name")));
verify(queue, times(0)).add(any());
}
|
public String process(final Expression expression) {
return formatExpression(expression);
}
|
@Test
public void shouldGenerateCorrectCodeForDateStringEQ() {
// Given:
final ComparisonExpression compExp = new ComparisonExpression(
Type.EQUAL,
DATECOL,
new StringLiteral("2021-06-23")
);
// When:
final String java = sqlToJavaVisitor.process(compExp);
// Then:
assertThat(java, containsString("(((java.sql.Date) arguments.get(\"COL13\")).compareTo(SqlTimeTypes.parseDate(\"2021-06-23\")) == 0)"));
}
|
@Override
public void onPinch() {}
|
@Test
public void testOnPinch() {
mUnderTest.onPinch();
Mockito.verifyZeroInteractions(mMockParentListener, mMockKeyboardDismissAction);
}
|
public void decode(ByteBuf buffer) {
boolean last;
int statusCode;
while (true) {
switch(state) {
case READ_COMMON_HEADER:
if (buffer.readableBytes() < SPDY_HEADER_SIZE) {
return;
}
int frameOffset = buffer.readerIndex();
int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET;
int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET;
buffer.skipBytes(SPDY_HEADER_SIZE);
boolean control = (buffer.getByte(frameOffset) & 0x80) != 0;
int version;
int type;
if (control) {
// Decode control frame common header
version = getUnsignedShort(buffer, frameOffset) & 0x7FFF;
type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET);
streamId = 0; // Default to session Stream-ID
} else {
// Decode data frame common header
version = spdyVersion; // Default to expected version
type = SPDY_DATA_FRAME;
streamId = getUnsignedInt(buffer, frameOffset);
}
flags = buffer.getByte(flagsOffset);
length = getUnsignedMedium(buffer, lengthOffset);
// Check version first then validity
if (version != spdyVersion) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SPDY Version");
} else if (!isValidFrameHeader(streamId, type, flags, length)) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid Frame Error");
} else {
state = getNextState(type, length);
}
break;
case READ_DATA_FRAME:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0));
break;
}
// Generate data frames that do not exceed maxChunkSize
int dataLength = Math.min(maxChunkSize, length);
// Wait until entire frame is readable
if (buffer.readableBytes() < dataLength) {
return;
}
ByteBuf data = buffer.alloc().buffer(dataLength);
data.writeBytes(buffer, dataLength);
length -= dataLength;
if (length == 0) {
state = State.READ_COMMON_HEADER;
}
last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN);
delegate.readDataFrame(streamId, last, data);
break;
case READ_SYN_STREAM_FRAME:
if (buffer.readableBytes() < 10) {
return;
}
int offset = buffer.readerIndex();
streamId = getUnsignedInt(buffer, offset);
int associatedToStreamId = getUnsignedInt(buffer, offset + 4);
byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07);
last = hasFlag(flags, SPDY_FLAG_FIN);
boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL);
buffer.skipBytes(10);
length -= 10;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_STREAM Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional);
}
break;
case READ_SYN_REPLY_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_REPLY Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynReplyFrame(streamId, last);
}
break;
case READ_RST_STREAM_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (streamId == 0 || statusCode == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid RST_STREAM Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readRstStreamFrame(streamId, statusCode);
}
break;
case READ_SETTINGS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR);
numSettings = getUnsignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
length -= 4;
// Validate frame length against number of entries. Each ID/Value entry is 8 bytes.
if ((length & 0x07) != 0 || length >> 3 != numSettings) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SETTINGS Frame");
} else {
state = State.READ_SETTING;
delegate.readSettingsFrame(clear);
}
break;
case READ_SETTING:
if (numSettings == 0) {
state = State.READ_COMMON_HEADER;
delegate.readSettingsEnd();
break;
}
if (buffer.readableBytes() < 8) {
return;
}
byte settingsFlags = buffer.getByte(buffer.readerIndex());
int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1);
int value = getSignedInt(buffer, buffer.readerIndex() + 4);
boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE);
boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED);
buffer.skipBytes(8);
--numSettings;
delegate.readSetting(id, value, persistValue, persisted);
break;
case READ_PING_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
int pingId = getSignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
state = State.READ_COMMON_HEADER;
delegate.readPingFrame(pingId);
break;
case READ_GOAWAY_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
state = State.READ_COMMON_HEADER;
delegate.readGoAwayFrame(lastGoodStreamId, statusCode);
break;
case READ_HEADERS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid HEADERS Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readHeadersFrame(streamId, last);
}
break;
case READ_WINDOW_UPDATE_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (deltaWindowSize == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid WINDOW_UPDATE Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readWindowUpdateFrame(streamId, deltaWindowSize);
}
break;
case READ_HEADER_BLOCK:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readHeaderBlockEnd();
break;
}
if (!buffer.isReadable()) {
return;
}
int compressedBytes = Math.min(buffer.readableBytes(), length);
ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes);
headerBlock.writeBytes(buffer, compressedBytes);
length -= compressedBytes;
delegate.readHeaderBlock(headerBlock);
break;
case DISCARD_FRAME:
int numBytes = Math.min(buffer.readableBytes(), length);
buffer.skipBytes(numBytes);
length -= numBytes;
if (length == 0) {
state = State.READ_COMMON_HEADER;
break;
}
return;
case FRAME_ERROR:
buffer.skipBytes(buffer.readableBytes());
return;
default:
throw new Error("Shouldn't reach here.");
}
}
}
|
@Test
public void testSpdyHeadersFrameHeaderBlock() throws Exception {
short type = 8;
byte flags = 0;
int length = 4;
int headerBlockLength = 1024;
int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01;
ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length);
encodeControlFrameHeader(buf, type, flags, length + headerBlockLength);
buf.writeInt(streamId);
ByteBuf headerBlock = Unpooled.buffer(headerBlockLength);
for (int i = 0; i < 256; i ++) {
headerBlock.writeInt(RANDOM.nextInt());
}
decoder.decode(buf);
decoder.decode(headerBlock);
verify(delegate).readHeadersFrame(streamId, false);
verify(delegate).readHeaderBlock(headerBlock.slice(0, headerBlock.writerIndex()));
verify(delegate).readHeaderBlockEnd();
assertFalse(buf.isReadable());
assertFalse(headerBlock.isReadable());
buf.release();
headerBlock.release();
}
|
@Override
public XmppDevice getDevice(XmppDeviceId xmppDeviceId) {
return connectedDevices.get(xmppDeviceId);
}
|
@Test
public void testAddRemoveConnectedDevice() {
// test adding connected devices
boolean add1 = agent.addConnectedDevice(jid1, device1);
assertThat(add1, is(true));
assertThat(testXmppDeviceListener.addedDevices, hasSize(1));
boolean add2 = agent.addConnectedDevice(jid2, device2);
assertThat(add2, is(true));
assertThat(testXmppDeviceListener.addedDevices, hasSize(2));
boolean add3 = agent.addConnectedDevice(jid3, device3);
assertThat(add3, is(true));
assertThat(testXmppDeviceListener.addedDevices, hasSize(3));
assertThat(testXmppDeviceListener.addedDevices, hasItems(jid1, jid2, jid3));
// Test adding a device twice - it should fail
boolean addError1 = agent.addConnectedDevice(jid1, device1);
assertThat(addError1, is(false));
assertThat(controller.connectedDevices.size(), is(3));
// test querying the individual device
XmppDevice queriedDevice = controller.getDevice(jid1);
assertThat(queriedDevice, is(device1));
// test removing device
agent.removeConnectedDevice(jid3);
assertThat(controller.connectedDevices.size(), is(2));
// Make sure the listener delete callbacks fired
assertThat(testXmppDeviceListener.removedDevices, hasSize(1));
assertThat(testXmppDeviceListener.removedDevices, hasItems(jid3));
}
|
@Override
public String getDocumentationLink(@Nullable String suffix) {
return documentationBaseUrl + Optional.ofNullable(suffix).orElse("");
}
|
@Test
public void getDocumentationLink_suffixNotProvided_withPropertyOverride_missingSlash() {
String propertyValue = "https://new-url.sonarqube.org";
when(configuration.get(DOCUMENTATION_BASE_URL)).thenReturn(Optional.of(propertyValue));
documentationLinkGenerator = new DefaultDocumentationLinkGenerator(sonarQubeVersion, configuration);
String generatedLink = documentationLinkGenerator.getDocumentationLink(null);
assertThat(generatedLink).isEqualTo(propertyValue + "/100.1000");
}
|
public static Write write() {
return new AutoValue_RedisIO_Write.Builder()
.setConnectionConfiguration(RedisConnectionConfiguration.create())
.setMethod(Write.Method.APPEND)
.build();
}
|
@Test
public void testWriteWithMethodSetWithExpiration() {
String key = "testWriteWithMethodSet";
client.set(key, "value");
String newValue = "newValue";
PCollection<KV<String, String>> write = p.apply(Create.of(KV.of(key, newValue)));
write.apply(
RedisIO.write()
.withEndpoint(REDIS_HOST, port)
.withMethod(Method.SET)
.withExpireTime(10_000L));
p.run();
assertEquals(newValue, client.get(key));
Long expireTime = client.pttl(key);
assertTrue(expireTime.toString(), 9_000 <= expireTime && expireTime <= 10_0000);
client.del(key);
}
|
public ZonedDateTime getDateTime() {
return dateTime;
}
|
@Test
void getDateTime() {
DateTimeStamp dateTimeStamp = new DateTimeStamp(.586);
assertNull(dateTimeStamp.getDateTime());
dateTimeStamp = new DateTimeStamp("2018-04-04T09:10:00.586-0100");
assertEquals(ZonedDateTime.from(formatter.parse("2018-04-04T09:10:00.586-0100")), dateTimeStamp.getDateTime());
dateTimeStamp = new DateTimeStamp("2018-04-04T09:10:00.586-0100", 0.18);
assertEquals(ZonedDateTime.from(formatter.parse("2018-04-04T09:10:00.586-0100")), dateTimeStamp.getDateTime());
}
|
public static byte[] getNullableSizePrefixedArray(final ByteBuffer buffer) {
final int size = buffer.getInt();
return getNullableArray(buffer, size);
}
|
@Test
public void getNullableSizePrefixedArrayInvalid() {
// -2
byte[] input = {-1, -1, -1, -2};
final ByteBuffer buffer = ByteBuffer.wrap(input);
assertThrows(NegativeArraySizeException.class, () -> Utils.getNullableSizePrefixedArray(buffer));
}
|
public double[][] test(DataFrame data) {
DataFrame x = formula.x(data);
int n = x.nrow();
int ntrees = models.length;
double[][] prediction = new double[ntrees][n];
for (int j = 0; j < n; j++) {
Tuple xj = x.get(j);
double base = 0;
for (int i = 0; i < ntrees; i++) {
base = base + models[i].tree.predict(xj);
prediction[i][j] = base / (i+1);
}
}
return prediction;
}
|
@Test
public void test2DPlanes() {
test("2dplanes", Planes.formula, Planes.data, 1.3581);
}
|
@Override
public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException {
String generic = invoker.getUrl().getParameter(GENERIC_KEY);
// calling a generic impl service
if (isCallingGenericImpl(generic, invocation)) {
RpcInvocation invocation2 = new RpcInvocation(invocation);
/**
* Mark this invocation as a generic impl call, this value will be removed automatically before passing on the wire.
* See {@link RpcUtils#sieveUnnecessaryAttachments(Invocation)}
*/
invocation2.put(GENERIC_IMPL_MARKER, true);
String methodName = invocation2.getMethodName();
Class<?>[] parameterTypes = invocation2.getParameterTypes();
Object[] arguments = invocation2.getArguments();
String[] types = new String[parameterTypes.length];
for (int i = 0; i < parameterTypes.length; i++) {
types[i] = ReflectUtils.getName(parameterTypes[i]);
}
Object[] args;
if (ProtocolUtils.isBeanGenericSerialization(generic)) {
args = new Object[arguments.length];
for (int i = 0; i < arguments.length; i++) {
args[i] = JavaBeanSerializeUtil.serialize(arguments[i], JavaBeanAccessor.METHOD);
}
} else {
args = PojoUtils.generalize(arguments);
}
if (RpcUtils.isReturnTypeFuture(invocation)) {
invocation2.setMethodName($INVOKE_ASYNC);
} else {
invocation2.setMethodName($INVOKE);
}
invocation2.setParameterTypes(GENERIC_PARAMETER_TYPES);
invocation2.setParameterTypesDesc(GENERIC_PARAMETER_DESC);
invocation2.setArguments(new Object[] {methodName, types, args});
return invoker.invoke(invocation2);
}
// making a generic call to a normal service
else if (isMakingGenericCall(generic, invocation)) {
Object[] args = (Object[]) invocation.getArguments()[2];
if (ProtocolUtils.isJavaGenericSerialization(generic)) {
for (Object arg : args) {
if (byte[].class != arg.getClass()) {
error(generic, byte[].class.getName(), arg.getClass().getName());
}
}
} else if (ProtocolUtils.isBeanGenericSerialization(generic)) {
for (Object arg : args) {
if (arg != null && !(arg instanceof JavaBeanDescriptor)) {
error(
generic,
JavaBeanDescriptor.class.getName(),
arg.getClass().getName());
}
}
}
invocation.setAttachment(GENERIC_KEY, invoker.getUrl().getParameter(GENERIC_KEY));
}
return invoker.invoke(invocation);
}
|
@Test
void testInvoke() throws Exception {
RpcInvocation invocation = new RpcInvocation(
"getPerson",
"org.apache.dubbo.rpc.support.DemoService",
"org.apache.dubbo.rpc.support.DemoService:dubbo",
new Class[] {Person.class},
new Object[] {new Person("dubbo", 10)});
URL url = URL.valueOf("test://test:11/org.apache.dubbo.rpc.support.DemoService?"
+ "accesslog=true&group=dubbo&version=1.1&generic=true");
Invoker invoker = Mockito.mock(Invoker.class);
Map<String, Object> person = new HashMap<String, Object>();
person.put("name", "dubbo");
person.put("age", 10);
AppResponse mockRpcResult = new AppResponse(person);
when(invoker.invoke(any(Invocation.class)))
.thenReturn(AsyncRpcResult.newDefaultAsyncResult(mockRpcResult, invocation));
when(invoker.getUrl()).thenReturn(url);
when(invoker.getInterface()).thenReturn(DemoService.class);
Result asyncResult = genericImplFilter.invoke(invoker, invocation);
Result result = asyncResult.get();
genericImplFilter.onResponse(result, invoker, invocation);
Assertions.assertEquals(Person.class, result.getValue().getClass());
Assertions.assertEquals(10, ((Person) result.getValue()).getAge());
}
|
public String getHostname() {
return hostNameSupplier.getHostName();
}
|
@Test
void testGetHostname0() {
try {
InetAddress address = mock(InetAddress.class);
when(address.getCanonicalHostName()).thenReturn("worker2.cluster.mycompany.com");
when(address.getHostName()).thenReturn("worker2.cluster.mycompany.com");
when(address.getHostAddress()).thenReturn("127.0.0.1");
final TaskManagerLocation info =
new TaskManagerLocation(ResourceID.generate(), address, 19871);
assertThat("worker2").isEqualTo(info.getHostname());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
|
@Override
public void checkSubjectAccess(
final KsqlSecurityContext securityContext,
final String subjectName,
final AclOperation operation
) {
checkAccess(new CacheKey(securityContext,
AuthObjectType.SUBJECT,
subjectName,
operation));
}
|
@Test
public void shouldCheckCacheValidatorOnSecondSubjectAccessRequest() {
// When
cache.checkSubjectAccess(securityContext, SUBJECT_1, AclOperation.READ);
when(fakeTicker.read()).thenReturn(ONE_SEC_IN_NS);
cache.checkSubjectAccess(securityContext, SUBJECT_1, AclOperation.READ);
// Then
verify(backendValidator, times(1))
.checkSubjectAccess(securityContext, SUBJECT_1, AclOperation.READ);
verifyNoMoreInteractions(backendValidator);
}
|
@Override
public SchemaResult getKeySchema(
final Optional<String> topicName,
final Optional<Integer> schemaId,
final FormatInfo expectedFormat,
final SerdeFeatures serdeFeatures
) {
return getSchema(topicName, schemaId, expectedFormat, serdeFeatures, true);
}
|
@Test
public void shouldThrowFromGetKeyWithIdSchemaOnOtherRestExceptions() throws Exception {
// Given:
when(srClient.getSchemaBySubjectAndId(any(), anyInt()))
.thenThrow(new RestClientException("failure", 1, 1));
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> supplier.getKeySchema(Optional.of(TOPIC_NAME),
Optional.of(42), expectedFormat, SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES))
);
// Then:
assertThat(e.getMessage(), containsString("Schema registry fetch for topic "
+ "key request failed for topic: " + TOPIC_NAME));
}
|
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
return (entryName.equals(name)) ? entry : ((baseConfig != null)
? baseConfig.getAppConfigurationEntry(name) : null);
}
|
@Test
public void test() throws Exception {
String krb5LoginModuleName;
if (System.getProperty("java.vendor").contains("IBM")) {
krb5LoginModuleName = "com.ibm.security.auth.module.Krb5LoginModule";
} else {
krb5LoginModuleName = "com.sun.security.auth.module.Krb5LoginModule";
}
JaasConfiguration jConf =
new JaasConfiguration("foo", "foo/localhost",
"/some/location/foo.keytab");
AppConfigurationEntry[] entries = jConf.getAppConfigurationEntry("bar");
Assert.assertNull(entries);
entries = jConf.getAppConfigurationEntry("foo");
Assert.assertEquals(1, entries.length);
AppConfigurationEntry entry = entries[0];
Assert.assertEquals(AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
entry.getControlFlag());
Assert.assertEquals(krb5LoginModuleName, entry.getLoginModuleName());
Map<String, ?> options = entry.getOptions();
Assert.assertEquals("/some/location/foo.keytab", options.get("keyTab"));
Assert.assertEquals("foo/localhost", options.get("principal"));
Assert.assertEquals("true", options.get("useKeyTab"));
Assert.assertEquals("true", options.get("storeKey"));
Assert.assertEquals("false", options.get("useTicketCache"));
Assert.assertEquals("true", options.get("refreshKrb5Config"));
Assert.assertEquals(6, options.size());
}
|
public static <N, E> void replaceDirectedNetworkNodes(
MutableNetwork<N, E> network, Function<N, N> function) {
checkArgument(network.isDirected(), "Only directed networks are supported, given %s", network);
checkArgument(
!network.allowsSelfLoops(),
"Only networks without self loops are supported, given %s",
network);
// A map from the existing node to the replacement node
Map<N, N> oldNodesToNewNodes = new HashMap<>(network.nodes().size());
for (N currentNode : network.nodes()) {
N newNode = function.apply(currentNode);
// Skip updating the network if the old node is equivalent to the new node
if (!currentNode.equals(newNode)) {
oldNodesToNewNodes.put(currentNode, newNode);
}
}
// For each replacement, connect up the existing predecessors and successors to the new node
// and then remove the old node.
for (Map.Entry<N, N> entry : oldNodesToNewNodes.entrySet()) {
N oldNode = entry.getKey();
N newNode = entry.getValue();
network.addNode(newNode);
for (N predecessor : ImmutableSet.copyOf(network.predecessors(oldNode))) {
for (E edge : ImmutableSet.copyOf(network.edgesConnecting(predecessor, oldNode))) {
network.removeEdge(edge);
network.addEdge(predecessor, newNode, edge);
}
}
for (N successor : ImmutableSet.copyOf(network.successors(oldNode))) {
for (E edge : ImmutableSet.copyOf(network.edgesConnecting(oldNode, successor))) {
network.removeEdge(edge);
network.addEdge(newNode, successor, edge);
}
}
network.removeNode(oldNode);
}
}
|
@Test
public void testNodeReplacementInEmptyNetwork() {
MutableNetwork<String, String> network = createEmptyNetwork();
Networks.replaceDirectedNetworkNodes(
network,
new Function<String, String>() {
@Override
public @Nullable String apply(@Nullable String input) {
return input.toLowerCase();
}
});
assertThat(network.nodes(), empty());
}
|
public List<Integer> getMsgIds() {
return msgIds;
}
|
@Test
void getMsgIds() {
BatchResultMessage batchResultMessage = new BatchResultMessage();
Assertions.assertTrue(batchResultMessage.getMsgIds().isEmpty());
}
|
@Override
public void onRuleChanged(final List<RuleData> ruleDataList, final DataEventTypeEnum eventType) {
WebsocketData<RuleData> configData =
new WebsocketData<>(ConfigGroupEnum.RULE.name(), eventType.name(), ruleDataList);
WebsocketCollector.send(GsonUtils.getInstance().toJson(configData), eventType);
}
|
@Test
public void testOnRuleChanged() {
String message = "{\"groupType\":\"RULE\",\"eventType\":\"UPDATE\",\"data\":[{\"id\":\"1336350040008105984\","
+ "\"name\":\"test\",\"pluginName\":\"waf\",\"selectorId\":\"1336349806465064960\","
+ "\"matchMode\":1,\"sort\":1,\"enabled\":true,\"loged\":true,\"handle\":"
+ "\"{\\\\\\\"permission\\\\\\\":\\\\\\\"reject\\\\\\\",\\\\\\\"statusCode\\\\\\\":"
+ "\\\\\\\"503\\\\\\\"}\",\"conditionDataList\":[{\"paramType\":\"header\",\"operator\":"
+ "\"\\u003d\",\"paramName\":\"test\",\"paramValue\":\"a\"}]}]}";
MockedStatic.Verification verification = () -> WebsocketCollector.send(message, DataEventTypeEnum.UPDATE);
try (MockedStatic<WebsocketCollector> mockedStatic = mockStatic(WebsocketCollector.class)) {
mockedStatic.when(verification).thenAnswer((Answer<Void>) invocation -> null);
websocketDataChangedListener.onRuleChanged(ruleDataList, DataEventTypeEnum.UPDATE);
mockedStatic.verify(verification);
}
}
|
@Override
public int compare(Object o1, Object o2) {
if (o1 == null && o2 == null) {
return 0; // o1 == o2
}
if (o1 == null) {
return -1; // o1 < o2
}
if (o2 == null) {
return 1; // o1 > o2
}
return nonNullCompare(o1, o2);
}
|
@Test
public void twoNullArgs() {
assertTrue("two nulls", cmp.compare(null, null) == 0);
}
|
@Override
public HedgeDurationSupplier getDurationSupplier() {
return durationSupplier;
}
|
@Test
public void shouldDefaultToPreconfiguredSupplier() {
then(hedge.getDurationSupplier()).isOfAnyClassIn(PreconfiguredDurationSupplier.class);
}
|
public CompletableFuture<Void> handlePullQuery(
final ServiceContext serviceContext,
final PullPhysicalPlan pullPhysicalPlan,
final ConfiguredStatement<Query> statement,
final RoutingOptions routingOptions,
final PullQueryWriteStream pullQueryQueue,
final CompletableFuture<Void> shouldCancelRequests
) {
final List<KsqlPartitionLocation> allLocations = pullPhysicalPlan.getMaterialization().locator()
.locate(
pullPhysicalPlan.getKeys(),
routingOptions,
routingFilterFactory,
pullPhysicalPlan.getPlanType() == PullPhysicalPlanType.RANGE_SCAN
);
final Map<Integer, List<Host>> emptyPartitions = allLocations.stream()
.filter(loc -> loc.getNodes().stream().noneMatch(node -> node.getHost().isSelected()))
.collect(Collectors.toMap(
KsqlPartitionLocation::getPartition,
loc -> loc.getNodes().stream().map(KsqlNode::getHost).collect(Collectors.toList())));
if (!emptyPartitions.isEmpty()) {
final MaterializationException materializationException = new MaterializationException(
"Unable to execute pull query. "
+ emptyPartitions.entrySet()
.stream()
.map(kv -> String.format(
"Partition %s failed to find valid host. Hosts scanned: %s",
kv.getKey(), kv.getValue()))
.collect(Collectors.joining(", ", "[", "]")));
LOG.debug(materializationException.getMessage());
throw materializationException;
}
// at this point we should filter out the hosts that we should not route to
final List<KsqlPartitionLocation> locations = allLocations
.stream()
.map(KsqlPartitionLocation::removeFilteredHosts)
.collect(Collectors.toList());
final CompletableFuture<Void> completableFuture = new CompletableFuture<>();
coordinatorExecutorService.submit(() -> {
try {
executeRounds(serviceContext, pullPhysicalPlan, statement, routingOptions,
locations, pullQueryQueue, shouldCancelRequests);
completableFuture.complete(null);
} catch (Throwable t) {
completableFuture.completeExceptionally(t);
}
});
return completableFuture;
}
|
@Test
public void forwardingError_errorRow() {
// Given:
locate(location5);
when(ksqlClient.makeQueryRequest(eq(node2.location()), any(), any(), any(), any(), any(), any()))
.thenAnswer(i -> {
Map<String, ?> requestProperties = i.getArgument(3);
WriteStream<List<StreamedRow>> rowConsumer = i.getArgument(4);
assertThat(requestProperties.get(KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_PARTITIONS),
is ("4"));
rowConsumer.write(
ImmutableList.of(
StreamedRow.header(queryId, logicalSchema),
StreamedRow.error(new RuntimeException("Row Error!"), 500)));
return RestResponse.successful(200, 2);
}
);
// When:
CompletableFuture<Void> future = haRouting.handlePullQuery(
serviceContext, pullPhysicalPlan, statement, routingOptions,
pullQueryQueue, disconnect);
final Exception e = assertThrows(
ExecutionException.class,
future::get
);
// Then:
assertThat(pullQueryQueue.size(), is(0));
assertThat(Throwables.getRootCause(e).getSuppressed()[0].getMessage(), containsString("Row Error!"));
}
|
CompletableFuture<Void> setPublicKey(final UUID accountIdentifier, final byte deviceId, final ECPublicKey publicKey) {
return dynamoDbAsyncClient.putItem(PutItemRequest.builder()
.tableName(tableName)
.item(Map.of(
KEY_ACCOUNT_UUID, getPartitionKey(accountIdentifier),
KEY_DEVICE_ID, getSortKey(deviceId),
ATTR_PUBLIC_KEY, AttributeValues.fromByteArray(publicKey.serialize())))
.build())
.thenRun(Util.NOOP);
}
|
@Test
void setPublicKey() {
final UUID accountIdentifier = UUID.randomUUID();
final byte deviceId = Device.PRIMARY_ID;
final ECPublicKey publicKey = Curve.generateKeyPair().getPublicKey();
assertEquals(Optional.empty(), clientPublicKeys.findPublicKey(accountIdentifier, deviceId).join());
clientPublicKeys.setPublicKey(accountIdentifier, deviceId, publicKey).join();
assertEquals(Optional.of(publicKey), clientPublicKeys.findPublicKey(accountIdentifier, deviceId).join());
}
|
@Draft
public boolean sendPicture(Socket socket, String picture, Object... args)
{
if (!FORMAT.matcher(picture).matches()) {
throw new ZMQException(picture + " is not in expected format " + FORMAT.pattern(), ZError.EPROTO);
}
ZMsg msg = new ZMsg();
for (int pictureIndex = 0, argIndex = 0; pictureIndex < picture.length(); pictureIndex++, argIndex++) {
char pattern = picture.charAt(pictureIndex);
switch (pattern) {
case 'i': {
msg.add(String.format(Locale.ENGLISH, "%d", (int) args[argIndex]));
break;
}
case '1': {
msg.add(String.format(Locale.ENGLISH, "%d", (0xff) & (int) args[argIndex]));
break;
}
case '2': {
msg.add(String.format(Locale.ENGLISH, "%d", (0xffff) & (int) args[argIndex]));
break;
}
case '4': {
msg.add(String.format(Locale.ENGLISH, "%d", (0xffffffff) & (int) args[argIndex]));
break;
}
case '8': {
msg.add(String.format(Locale.ENGLISH, "%d", (long) args[argIndex]));
break;
}
case 's': {
msg.add((String) args[argIndex]);
break;
}
case 'b':
case 'c': {
msg.add((byte[]) args[argIndex]);
break;
}
case 'f': {
msg.add((ZFrame) args[argIndex]);
break;
}
case 'm': {
ZMsg msgParm = (ZMsg) args[argIndex];
while (!msgParm.isEmpty()) {
msg.add(msgParm.pop());
}
break;
}
case 'z': {
msg.add((byte[]) null);
argIndex--;
break;
}
default:
assert (false) : "invalid picture element '" + pattern + "'";
}
}
return msg.send(socket, false);
}
|
@Test(expected = ZMQException.class)
public void testSendInvalidPictureFormat()
{
String picture = " ";
pic.sendPicture(null, picture, 255);
}
|
public boolean hasMajorMinorAndPatchVersionHigherOrEqualTo(String majorMinorAndPatchVersion) {
return hasMajorMinorAndPatchVersionHigherOrEqualTo(new VersionNumber(majorMinorAndPatchVersion));
}
|
@Test
void hasMajorMinorAndPatchVersionHigherOrEqualTo() {
assertThat(v("8.3.0").hasMajorMinorAndPatchVersionHigherOrEqualTo("8.0.1")).isTrue();
assertThat(v("5.8.0").hasMajorMinorAndPatchVersionHigherOrEqualTo("8.0.1")).isFalse();
assertThat(v("8.0.0").hasMajorMinorAndPatchVersionHigherOrEqualTo("8.0.1")).isFalse();
}
|
public PullResult getOpMessage(int queueId, long offset, int nums) {
String group = TransactionalMessageUtil.buildConsumerGroup();
String topic = TransactionalMessageUtil.buildOpTopic();
SubscriptionData sub = new SubscriptionData(topic, "*");
return getMessage(group, topic, queueId, offset, nums, sub);
}
|
@Test
public void testGetOpMessage() {
when(messageStore.getMessage(anyString(), anyString(), anyInt(), anyLong(), anyInt(), ArgumentMatchers.nullable(MessageFilter.class))).thenReturn(createGetMessageResult(GetMessageStatus.NO_MESSAGE_IN_QUEUE));
PullResult result = transactionBridge.getOpMessage(0, 0, 1);
assertThat(result.getPullStatus()).isEqualTo(PullStatus.NO_NEW_MSG);
}
|
public boolean evaluate( RowMetaInterface rowMeta, Object[] r ) {
// Start of evaluate
boolean retval = false;
// If we have 0 items in the list, evaluate the current condition
// Otherwise, evaluate all sub-conditions
//
try {
if ( isAtomic() ) {
if ( function == FUNC_TRUE ) {
return !negate;
}
// Get fieldnrs left value
//
// Check out the fieldnrs if we don't have them...
if ( leftValuename != null && leftValuename.length() > 0 ) {
leftFieldnr = rowMeta.indexOfValue( leftValuename );
}
// Get fieldnrs right value
//
if ( rightValuename != null && rightValuename.length() > 0 ) {
rightFieldnr = rowMeta.indexOfValue( rightValuename );
}
// Get fieldnrs left field
ValueMetaInterface fieldMeta = null;
Object field = null;
if ( leftFieldnr >= 0 ) {
fieldMeta = rowMeta.getValueMeta( leftFieldnr );
field = r[ leftFieldnr ];
} else {
return false; // no fields to evaluate
}
// Get fieldnrs right exact
ValueMetaInterface fieldMeta2 = rightExact != null ? rightExact.getValueMeta() : null;
Object field2 = rightExact != null ? rightExact.getValueData() : null;
if ( field2 == null && rightFieldnr >= 0 ) {
fieldMeta2 = rowMeta.getValueMeta( rightFieldnr );
field2 = r[ rightFieldnr ];
}
// Evaluate
switch ( function ) {
case FUNC_EQUAL:
retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) == 0 );
break;
case FUNC_NOT_EQUAL:
retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) != 0 );
break;
case FUNC_SMALLER:
// Added this if/else to accommodate for CUST-270
if ( "Y".equalsIgnoreCase( System.getProperty( Const.KETTLE_FILTER_TREAT_NULLS_AS_NOT_ZERO, "N" ) )
&& fieldMeta.isNull( field ) ) {
retval = false;
} else {
retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) < 0 );
}
break;
case FUNC_SMALLER_EQUAL:
// Added this if/else to accommodate for CUST-270
if ( "Y".equalsIgnoreCase( System.getProperty( Const.KETTLE_FILTER_TREAT_NULLS_AS_NOT_ZERO, "N" ) )
&& fieldMeta.isNull( field ) ) {
retval = false;
} else {
retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) <= 0 );
}
break;
case FUNC_LARGER:
retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) > 0 );
break;
case FUNC_LARGER_EQUAL:
retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) >= 0 );
break;
case FUNC_REGEXP:
if ( fieldMeta.isNull( field ) || field2 == null ) {
retval = false;
} else {
retval =
Pattern
.matches( fieldMeta2.getCompatibleString( field2 ), fieldMeta.getCompatibleString( field ) );
}
break;
case FUNC_NULL:
retval = ( fieldMeta.isNull( field ) );
break;
case FUNC_NOT_NULL:
retval = ( !fieldMeta.isNull( field ) );
break;
case FUNC_IN_LIST:
// performance reason: create the array first or again when it is against a field and not a constant
//
if ( inList == null || rightFieldnr >= 0 ) {
inList = Const.splitString( fieldMeta2.getString( field2 ), ';', true );
for ( int i = 0; i < inList.length; i++ ) {
inList[i] = inList[i] == null ? null : inList[i].replace( "\\", "" );
}
Arrays.sort( inList );
}
String searchString = fieldMeta.getCompatibleString( field );
int inIndex = -1;
if ( searchString != null ) {
inIndex = Arrays.binarySearch( inList, searchString );
}
retval = inIndex >= 0;
break;
case FUNC_CONTAINS:
String fm2CompatibleContains = fieldMeta2.getCompatibleString( field2 );
retval = Optional.ofNullable( fieldMeta.getCompatibleString( field ) )
.filter( s -> s.contains( fm2CompatibleContains ) ).isPresent();
break;
case FUNC_STARTS_WITH:
String fm2CompatibleStarts = fieldMeta2.getCompatibleString( field2 );
retval = Optional.ofNullable( fieldMeta.getCompatibleString( field ) )
.filter( s -> s.startsWith( fm2CompatibleStarts ) ).isPresent();
break;
case FUNC_ENDS_WITH:
String string = fieldMeta.getCompatibleString( field );
if ( !Utils.isEmpty( string ) ) {
if ( rightString == null && field2 != null ) {
rightString = fieldMeta2.getCompatibleString( field2 );
}
if ( rightString != null ) {
retval = string.endsWith( fieldMeta2.getCompatibleString( field2 ) );
} else {
retval = false;
}
} else {
retval = false;
}
break;
case FUNC_LIKE:
// Converts to a regular expression
// TODO: optimize the patterns and String replacements
//
if ( fieldMeta.isNull( field ) || field2 == null ) {
retval = false;
} else {
String regex = fieldMeta2.getCompatibleString( field2 );
regex = regex.replace( "%", ".*" );
regex = regex.replace( "?", "." );
retval = Pattern.matches( regex, fieldMeta.getCompatibleString( field ) );
}
break;
default:
break;
}
// Only NOT makes sense, the rest doesn't, so ignore!!!!
// Optionally negate
//
if ( isNegated() ) {
retval = !retval;
}
} else {
// Composite : get first
Condition cb0 = list.get( 0 );
retval = cb0.evaluate( rowMeta, r );
// Loop over the conditions listed below.
//
for ( int i = 1; i < list.size(); i++ ) {
// Composite : #i
// Get right hand condition
Condition cb = list.get( i );
// Evaluate the right hand side of the condition cb.evaluate() within
// the switch statement
// because the condition may be short-circuited due to the left hand
// side (retval)
switch ( cb.getOperator() ) {
case Condition.OPERATOR_OR:
retval = retval || cb.evaluate( rowMeta, r );
break;
case Condition.OPERATOR_AND:
retval = retval && cb.evaluate( rowMeta, r );
break;
case Condition.OPERATOR_OR_NOT:
retval = retval || ( !cb.evaluate( rowMeta, r ) );
break;
case Condition.OPERATOR_AND_NOT:
retval = retval && ( !cb.evaluate( rowMeta, r ) );
break;
case Condition.OPERATOR_XOR:
retval = retval ^ cb.evaluate( rowMeta, r );
break;
default:
break;
}
}
// Composite: optionally negate
if ( isNegated() ) {
retval = !retval;
}
}
} catch ( Exception e ) {
throw new RuntimeException( "Unexpected error evaluation condition [" + toString() + "]", e );
}
return retval;
}
|
@Test
public void testNegatedTrueFuncEvaluatesAsFalse() throws Exception {
String left = "test_filed";
String right = "test_value";
int func = Condition.FUNC_TRUE;
boolean negate = true;
Condition condition = new Condition( negate, left, func, right, null );
assertFalse( condition.evaluate( new RowMeta(), new Object[]{ "test" } ) );
}
|
@SqlNullable
@Description("Returns the lower left and upper right corners of bounding rectangular polygon of a Geometry")
@ScalarFunction("ST_EnvelopeAsPts")
@SqlType("array(" + GEOMETRY_TYPE_NAME + ")")
public static Block stEnvelopeAsPts(@SqlType(GEOMETRY_TYPE_NAME) Slice input)
{
Envelope envelope = deserializeEnvelope(input);
if (envelope.isEmpty()) {
return null;
}
BlockBuilder blockBuilder = GEOMETRY.createBlockBuilder(null, 2);
org.locationtech.jts.geom.Point lowerLeftCorner = createJtsPoint(envelope.getXMin(), envelope.getYMin());
org.locationtech.jts.geom.Point upperRightCorner = createJtsPoint(envelope.getXMax(), envelope.getYMax());
GEOMETRY.writeSlice(blockBuilder, serialize(lowerLeftCorner));
GEOMETRY.writeSlice(blockBuilder, serialize(upperRightCorner));
return blockBuilder.build();
}
|
@Test
public void testSTEnvelopeAsPts()
{
assertEnvelopeAsPts("MULTIPOINT (1 2, 2 4, 3 6, 4 8)", new Point(1, 2), new Point(4, 8));
assertFunction("ST_EnvelopeAsPts(ST_GeometryFromText('LINESTRING EMPTY'))", new ArrayType(GEOMETRY), null);
assertEnvelopeAsPts("LINESTRING (1 1, 2 2, 1 3)", new Point(1, 1), new Point(2, 3));
assertEnvelopeAsPts("LINESTRING (8 4, 5 7)", new Point(5, 4), new Point(8, 7));
assertEnvelopeAsPts("MULTILINESTRING ((1 1, 5 1), (2 4, 4 4))", new Point(1, 1), new Point(5, 4));
assertEnvelopeAsPts("POLYGON ((1 1, 4 1, 1 4, 1 1))", new Point(1, 1), new Point(4, 4));
assertEnvelopeAsPts("MULTIPOLYGON (((1 1, 1 3, 3 3, 3 1, 1 1)), ((0 0, 0 2, 2 2, 2 0, 0 0)))", new Point(0, 0), new Point(3, 3));
assertEnvelopeAsPts("GEOMETRYCOLLECTION (POINT (5 1), LINESTRING (3 4, 4 4))", new Point(3, 1), new Point(5, 4));
assertEnvelopeAsPts("POINT (1 2)", new Point(1, 2), new Point(1, 2));
}
|
@Override
public Collection<V> values() {
return targetMap.values();
}
|
@Test
void values() {
List<String> values = new ArrayList<>();
values.add("Value");
values.add("Value2");
Assertions.assertArrayEquals(values.toArray(), lowerCaseLinkHashMap.values().toArray());
}
|
protected static void parseParams(String queryString, CommandRequest request) {
if (queryString == null || queryString.length() < 1) {
return;
}
int offset = 0, pos = -1;
// check anchor
queryString = removeAnchor(queryString);
while (true) {
offset = pos + 1;
pos = queryString.indexOf('&', offset);
if (offset == pos) {
// empty
continue;
}
parseSingleParam(queryString.substring(offset, pos == -1 ? queryString.length() : pos), request);
if (pos < 0) {
// reach the end
break;
}
}
}
|
@Test
public void parseParams() {
CommandRequest request;
// mixed
request = new CommandRequest();
HttpEventTask.parseParams("a=1&&b&=3&&c=4&a_+1=3_3%20&%E7%9A%84=test%E7%9A%84#mark", request);
assertEquals(4, request.getParameters().size());
assertEquals("1", request.getParam("a"));
assertNull(request.getParam("b"));
assertEquals("4", request.getParam("c"));
assertEquals("3_3 ", request.getParam("a_ 1"));
assertEquals("test的", request.getParam("的"));
request = new CommandRequest();
HttpEventTask.parseParams(null, request);
assertEquals(0, request.getParameters().size());
request = new CommandRequest();
HttpEventTask.parseParams("", request);
assertEquals(0, request.getParameters().size());
request = new CommandRequest();
HttpEventTask.parseParams("&&b&=3&", request);
assertEquals(0, request.getParameters().size());
}
|
public static boolean isEmptyOrAllElementsNull(Collection<?> collection) {
for (Object o : collection) {
if (o != null) {
return false;
}
}
return true;
}
|
@Test
void testIsEmptyOrAllElementsNull() {
assertThat(CollectionUtil.isEmptyOrAllElementsNull(Collections.emptyList())).isTrue();
assertThat(CollectionUtil.isEmptyOrAllElementsNull(Collections.singletonList(null)))
.isTrue();
assertThat(CollectionUtil.isEmptyOrAllElementsNull(Arrays.asList(null, null))).isTrue();
assertThat(CollectionUtil.isEmptyOrAllElementsNull(Collections.singletonList("test")))
.isFalse();
assertThat(CollectionUtil.isEmptyOrAllElementsNull(Arrays.asList(null, "test"))).isFalse();
assertThat(CollectionUtil.isEmptyOrAllElementsNull(Arrays.asList("test", null))).isFalse();
assertThat(CollectionUtil.isEmptyOrAllElementsNull(Arrays.asList(null, "test", null)))
.isFalse();
}
|
@Override
public void doRun() {
if (versionOverride.isPresent()) {
LOG.debug("Elasticsearch version is set manually. Not running check.");
return;
}
final Optional<SearchVersion> probedVersion = this.versionProbe.probe(this.elasticsearchHosts);
probedVersion.ifPresent(version -> {
if (compatible(this.initialElasticsearchVersion, version)) {
notificationService.fixed(Notification.Type.ES_VERSION_MISMATCH);
} else {
LOG.warn("Elasticsearch version currently running ({}) is incompatible with the one Graylog was started " +
"with ({}) - a restart is required!", version, initialElasticsearchVersion);
final Notification notification = notificationService.buildNow()
.addType(Notification.Type.ES_VERSION_MISMATCH)
.addSeverity(Notification.Severity.URGENT)
.addDetail("initial_version", initialElasticsearchVersion.toString())
.addDetail("current_version", version.toString());
notificationService.publishIfFirst(notification);
}
});
}
|
@Test
void createsNotificationIfCurrentVersionIsIncompatibleWithInitialOne() {
returnProbedVersion(Version.of(9, 2, 3));
createPeriodical(SearchVersion.elasticsearch(8, 1, 2)).doRun();
assertNotificationWasRaised();
}
|
@Override
public long get(K key) {
return complete(asyncCounterMap.get(key));
}
|
@Test(expected = ConsistentMapException.Timeout.class)
public void testTimeout() {
AtomicCounterMapWithErrors<String> atomicCounterMap =
new AtomicCounterMapWithErrors<>();
atomicCounterMap.setErrorState(TestingCompletableFutures.ErrorState.TIMEOUT_EXCEPTION);
DefaultAtomicCounterMap<String> map =
new DefaultAtomicCounterMap<>(atomicCounterMap, 1000);
map.get(KEY1);
}
|
@Override
public void validate() throws TelegramApiValidationException {
if (inlineQueryId.isEmpty()) {
throw new TelegramApiValidationException("InlineQueryId can't be empty", this);
}
for (InlineQueryResult result : results) {
result.validate();
}
if (button != null) {
button.validate();
}
}
|
@Test
void testInlineQueryIdCanNotBeEmpty() {
answerInlineQuery.setInlineQueryId("");
try {
answerInlineQuery.validate();
} catch (TelegramApiValidationException e) {
assertEquals("InlineQueryId can't be empty", e.getMessage());
}
}
|
@Override
public CoordinatorRecord deserialize(
ByteBuffer keyBuffer,
ByteBuffer valueBuffer
) throws RuntimeException {
final short recordType = readVersion(keyBuffer, "key");
final ApiMessage keyMessage = apiMessageKeyFor(recordType);
readMessage(keyMessage, keyBuffer, recordType, "key");
if (valueBuffer == null) {
return new CoordinatorRecord(new ApiMessageAndVersion(keyMessage, recordType), null);
}
final ApiMessage valueMessage = apiMessageValueFor(recordType);
final short valueVersion = readVersion(valueBuffer, "value");
readMessage(valueMessage, valueBuffer, valueVersion, "value");
return new CoordinatorRecord(
new ApiMessageAndVersion(keyMessage, recordType),
new ApiMessageAndVersion(valueMessage, valueVersion)
);
}
|
@Test
public void testDeserializeWithValueEmptyBuffer() {
GroupCoordinatorRecordSerde serde = new GroupCoordinatorRecordSerde();
ApiMessageAndVersion key = new ApiMessageAndVersion(
new ConsumerGroupMetadataKey().setGroupId("foo"),
(short) 3
);
ByteBuffer keyBuffer = MessageUtil.toVersionPrefixedByteBuffer(key.version(), key.message());
ByteBuffer valueBuffer = ByteBuffer.allocate(0);
RuntimeException ex =
assertThrows(RuntimeException.class,
() -> serde.deserialize(keyBuffer, valueBuffer));
assertEquals("Could not read version from value's buffer.", ex.getMessage());
}
|
public static byte[] readPem(InputStream keyStream) {
final PemObject pemObject = readPemObject(keyStream);
if (null != pemObject) {
return pemObject.getContent();
}
return null;
}
|
@Test
@Disabled
public void readECPrivateKeyTest2() {
// https://gitee.com/dromara/hutool/issues/I37Z75
final byte[] d = PemUtil.readPem(FileUtil.getInputStream("d:/test/keys/priv.key"));
final byte[] publicKey = PemUtil.readPem(FileUtil.getInputStream("d:/test/keys/pub.key"));
final SM2 sm2 = new SM2(d, publicKey);
sm2.usePlainEncoding();
final String content = "我是Hanley.";
final byte[] sign = sm2.sign(StrUtil.utf8Bytes(content));
final boolean verify = sm2.verify(StrUtil.utf8Bytes(content), sign);
assertTrue(verify);
}
|
public double getLatitudeSpan() {
return this.maxLatitude - this.minLatitude;
}
|
@Test
public void getLatitudeSpanTest() {
BoundingBox boundingBox = new BoundingBox(MIN_LATITUDE, MIN_LONGITUDE, MAX_LATITUDE, MAX_LONGITUDE);
Assert.assertEquals(MAX_LATITUDE - MIN_LATITUDE, boundingBox.getLatitudeSpan(), 0);
}
|
public static SeaTunnelRuntimeException deserializeError(String payload) {
return deserializeError(payload, null);
}
|
@Test
public void testError() {
SeaTunnelRuntimeException error = GoogleSheetsError.deserializeError("{}");
Assertions.assertEquals(
GoogleSheetsErrorCode.DESERIALIZE_FAILED.getCode(),
error.getSeaTunnelErrorCode().getCode());
String expectedMsg =
"ErrorCode:[GOOGLE-SHEETS-01], ErrorDescription:[Fail to deserialize Google Sheets '{}']";
Assertions.assertEquals(expectedMsg, error.getMessage());
}
|
public long getRevisedRowCount(final SelectStatementContext selectStatementContext) {
if (isMaxRowCount(selectStatementContext)) {
return Integer.MAX_VALUE;
}
return rowCountSegment instanceof LimitValueSegment ? actualOffset + actualRowCount : actualRowCount;
}
|
@Test
void assertGetRevisedRowCountForSQL92() {
getRevisedRowCount(new SQL92SelectStatement());
}
|
@Override
public TaskID acquireTaskIdLock(Configuration conf) {
JobID jobId = HadoopFormats.getJobId(conf);
boolean lockAcquired = false;
int taskIdCandidate = 0;
while (!lockAcquired) {
taskIdCandidate = RANDOM_GEN.nextInt(Integer.MAX_VALUE);
Path path =
new Path(
locksDir,
String.format(LOCKS_DIR_TASK_PATTERN, getJobJtIdentifier(conf), taskIdCandidate));
lockAcquired = tryCreateFile(conf, path);
}
return HadoopFormats.createTaskID(jobId, taskIdCandidate);
}
|
@Test
public void testTaskIdLockAcquire() {
int tasksCount = 100;
for (int i = 0; i < tasksCount; i++) {
TaskID taskID = tested.acquireTaskIdLock(configuration);
assertTrue(isFileExists(getTaskIdPath(taskID)));
}
String jobFolderName = getFileInJobFolder("");
File jobFolder = new File(jobFolderName);
assertTrue(jobFolder.isDirectory());
// we have to multiply by 2 because crc files exists
assertEquals(tasksCount * 2, jobFolder.list().length);
}
|
public static void main(final String[] args) {
var task = new SimpleTask();
task.executeWith(() -> LOGGER.info("I'm done now."));
}
|
@Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
}
|
public void createUser(User body) throws RestClientException {
createUserWithHttpInfo(body);
}
|
@Test
public void createUserTest() {
User body = null;
api.createUser(body);
// TODO: test validations
}
|
@Override
public boolean intersects(PointList pointList) {
// similar code to LocationIndexTree.checkAdjacent
int len = pointList.size();
if (len == 0)
throw new IllegalArgumentException("PointList must not be empty");
double tmpLat = pointList.getLat(0);
double tmpLon = pointList.getLon(0);
if (len == 1)
return calc.calcNormalizedDist(lat, lon, tmpLat, tmpLon) <= normedDist;
for (int pointIndex = 1; pointIndex < len; pointIndex++) {
double wayLat = pointList.getLat(pointIndex);
double wayLon = pointList.getLon(pointIndex);
if (calc.validEdgeDistance(lat, lon, tmpLat, tmpLon, wayLat, wayLon)) {
if (calc.calcNormalizedEdgeDistance(lat, lon, tmpLat, tmpLon, wayLat, wayLon) <= normedDist)
return true;
} else {
if (calc.calcNormalizedDist(lat, lon, tmpLat, tmpLon) <= normedDist
|| pointIndex + 1 == len && calc.calcNormalizedDist(lat, lon, wayLat, wayLon) <= normedDist)
return true;
}
tmpLat = wayLat;
tmpLon = wayLon;
}
return false;
}
|
@Test
public void testIntersectPointList() {
Circle circle = new Circle(1.5, 0.3, DistanceCalcEarth.DIST_EARTH.calcDist(0, 0, 0, 0.7));
PointList pointList = new PointList();
pointList.add(5, 5);
pointList.add(5, 0);
assertFalse(circle.intersects(pointList));
pointList.add(-5, 0);
assertTrue(circle.intersects(pointList));
pointList = new PointList();
pointList.add(5, 1);
pointList.add(-1, 0);
assertTrue(circle.intersects(pointList));
pointList = new PointList();
pointList.add(5, 0);
pointList.add(-1, 3);
assertFalse(circle.intersects(pointList));
pointList = new PointList();
pointList.add(5, 0);
pointList.add(2, 0);
assertTrue(circle.intersects(pointList));
pointList = new PointList();
pointList.add(1.5, -2);
pointList.add(1.5, 2);
assertTrue(circle.intersects(pointList));
}
|
@Override
@Deprecated
public <VR> KStream<K, VR> flatTransformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier<? super V, Iterable<VR>> valueTransformerSupplier,
final String... stateStoreNames) {
Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null");
return doFlatTransformValues(
toValueTransformerWithKeySupplier(valueTransformerSupplier),
NamedInternal.empty(),
stateStoreNames);
}
|
@Test
@SuppressWarnings("deprecation")
public void shouldNotAllowNullValueTransformerSupplierOnFlatTransformValuesWithStores() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.flatTransformValues(
(org.apache.kafka.streams.kstream.ValueTransformerSupplier<Object, Iterable<Object>>) null,
"stateStore"));
assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null"));
}
|
public static void refreshSuperUserGroupsConfiguration() {
//load server side configuration;
refreshSuperUserGroupsConfiguration(new Configuration());
}
|
@Test(expected = IllegalArgumentException.class)
public void testProxyUsersWithNullPrefix() throws Exception {
ProxyUsers.refreshSuperUserGroupsConfiguration(new Configuration(false),
null);
}
|
@Override
public void processDeviceCreatedState(OpenstackNode osNode) {
try {
if (!isOvsdbConnected(osNode, ovsdbPortNum, ovsdbController, deviceService)) {
ovsdbController.connect(osNode.managementIp(), tpPort(ovsdbPortNum));
return;
}
if (osNode.type() == GATEWAY) {
addOrRemoveSystemInterface(osNode, INTEGRATION_BRIDGE,
osNode.uplinkPort(), deviceService, true);
}
if (osNode.dataIp() != null &&
!isIntfEnabled(osNode, VXLAN_TUNNEL)) {
createVxlanTunnelInterface(osNode);
}
if (osNode.dataIp() != null &&
!isIntfEnabled(osNode, GRE_TUNNEL)) {
createGreTunnelInterface(osNode);
}
if (osNode.dataIp() != null &&
!isIntfEnabled(osNode, GENEVE_TUNNEL)) {
createGeneveTunnelInterface(osNode);
}
if (osNode.dpdkConfig() != null && osNode.dpdkConfig().dpdkIntfs() != null) {
osNode.dpdkConfig().dpdkIntfs().stream()
.filter(dpdkintf -> dpdkintf.deviceName().equals(TUNNEL_BRIDGE))
.forEach(dpdkintf -> addOrRemoveDpdkInterface(
osNode, dpdkintf, ovsdbPortNum, ovsdbController, true));
osNode.dpdkConfig().dpdkIntfs().stream()
.filter(dpdkintf -> dpdkintf.deviceName().equals(INTEGRATION_BRIDGE))
.forEach(dpdkintf -> addOrRemoveDpdkInterface(
osNode, dpdkintf, ovsdbPortNum, ovsdbController, true));
}
// provision new physical interfaces on the given node
// this includes creating physical bridge, attaching physical port
// to physical bridge, adding patch ports to both physical bridge and br-int
provisionPhysicalInterfaces(osNode);
if (osNode.vlanIntf() != null &&
!isIntfEnabled(osNode, osNode.vlanIntf())) {
addOrRemoveSystemInterface(osNode, INTEGRATION_BRIDGE,
osNode.vlanIntf(), deviceService, true);
}
} catch (Exception e) {
log.error("Exception occurred because of {}", e);
}
}
|
@Test
public void testGatewayNodeProcessDeviceCreatedState() {
testNodeManager.createNode(GATEWAY_2);
TEST_DEVICE_SERVICE.devMap.put(GATEWAY_2_OVSDB_DEVICE.id(), GATEWAY_2_OVSDB_DEVICE);
TEST_DEVICE_SERVICE.devMap.put(GATEWAY_2_INTG_DEVICE.id(), GATEWAY_2_INTG_DEVICE);
TEST_DEVICE_SERVICE.portList.add(createPort(GATEWAY_2_INTG_DEVICE, GATEWAY_UPLINK_PORT));
assertEquals(ERR_STATE_NOT_MATCH, DEVICE_CREATED,
testNodeManager.node(GATEWAY_2_HOSTNAME).state());
target.processDeviceCreatedState(GATEWAY_2);
assertEquals(ERR_STATE_NOT_MATCH, COMPLETE,
testNodeManager.node(GATEWAY_2_HOSTNAME).state());
}
|
@Override
public String named() {
return PluginEnum.CRYPTOR_RESPONSE.getName();
}
|
@Test
public void namedTest() {
final String result = cryptorResponsePlugin.named();
assertEquals(PluginEnum.CRYPTOR_RESPONSE.getName(), result);
}
|
public static boolean isValidOrigin(String sourceHost, ZeppelinConfiguration zConf)
throws UnknownHostException, URISyntaxException {
String sourceUriHost = "";
if (sourceHost != null && !sourceHost.isEmpty()) {
sourceUriHost = new URI(sourceHost).getHost();
sourceUriHost = (sourceUriHost == null) ? "" : sourceUriHost.toLowerCase();
}
sourceUriHost = sourceUriHost.toLowerCase();
String currentHost = InetAddress.getLocalHost().getHostName().toLowerCase();
return zConf.getAllowedOrigins().contains("*")
|| currentHost.equals(sourceUriHost)
|| "localhost".equals(sourceUriHost)
|| zConf.getAllowedOrigins().contains(sourceHost);
}
|
@Test
void isValidFromStar()
throws URISyntaxException, UnknownHostException {
assertTrue(CorsUtils.isValidOrigin("http://anyhost.com",
ZeppelinConfiguration.load("zeppelin-site-star.xml")));
}
|
@Nullable
public static TraceContextOrSamplingFlags parseB3SingleFormat(CharSequence b3) {
return parseB3SingleFormat(b3, 0, b3.length());
}
|
@Test void parseB3SingleFormat_sampled() {
assertThat(parseB3SingleFormat("1"))
.isEqualTo(TraceContextOrSamplingFlags.SAMPLED);
}
|
@Override
public void onHeartbeatSuccess(ShareGroupHeartbeatResponseData response) {
if (response.errorCode() != Errors.NONE.code()) {
String errorMessage = String.format(
"Unexpected error in Heartbeat response. Expected no error, but received: %s",
Errors.forCode(response.errorCode())
);
throw new IllegalArgumentException(errorMessage);
}
MemberState state = state();
if (state == MemberState.LEAVING) {
log.debug("Ignoring heartbeat response received from broker. Member {} with epoch {} is " +
"already leaving the group.", memberId, memberEpoch);
return;
}
if (state == MemberState.UNSUBSCRIBED && maybeCompleteLeaveInProgress()) {
log.debug("Member {} with epoch {} received a successful response to the heartbeat " +
"to leave the group and completed the leave operation. ", memberId, memberEpoch);
return;
}
if (isNotInGroup()) {
log.debug("Ignoring heartbeat response received from broker. Member {} is in {} state" +
" so it's not a member of the group. ", memberId, state);
return;
}
// Update the group member id label in the client telemetry reporter if the member id has
// changed. Initially the member id is empty, and it is updated when the member joins the
// group. This is done here to avoid updating the label on every heartbeat response. Also
// check if the member id is null, as the schema defines it as nullable.
if (response.memberId() != null && !response.memberId().equals(memberId)) {
clientTelemetryReporter.ifPresent(reporter -> reporter.updateMetricsLabels(
Collections.singletonMap(ClientTelemetryProvider.GROUP_MEMBER_ID, response.memberId())));
}
this.memberId = response.memberId();
updateMemberEpoch(response.memberEpoch());
ShareGroupHeartbeatResponseData.Assignment assignment = response.assignment();
if (assignment != null) {
if (!state.canHandleNewAssignment()) {
// New assignment received but member is in a state where it cannot take new
// assignments (ex. preparing to leave the group)
log.debug("Ignoring new assignment {} received from server because member is in {} state.",
assignment, state);
return;
}
Map<Uuid, SortedSet<Integer>> newAssignment = new HashMap<>();
assignment.topicPartitions().forEach(topicPartition -> newAssignment.put(topicPartition.topicId(), new TreeSet<>(topicPartition.partitions())));
processAssignmentReceived(newAssignment);
}
}
|
@Test
public void testIgnoreHeartbeatWhenLeavingGroup() {
ShareMembershipManager membershipManager = createMemberInStableState();
mockLeaveGroup();
CompletableFuture<Void> leaveResult = membershipManager.leaveGroup();
membershipManager.onHeartbeatSuccess(createShareGroupHeartbeatResponse(createAssignment(true)).data());
assertEquals(MemberState.LEAVING, membershipManager.state());
assertEquals(-1, membershipManager.memberEpoch());
assertEquals(MEMBER_ID, membershipManager.memberId());
assertTrue(membershipManager.currentAssignment().partitions.isEmpty());
assertFalse(leaveResult.isDone(), "Leave group result should not complete until the " +
"heartbeat request to leave is sent out.");
}
|
@Override
public void setChannelStateWriter(ChannelStateWriter channelStateWriter) {
checkState(this.channelStateWriter == null, "Already initialized");
this.channelStateWriter = checkNotNull(channelStateWriter);
}
|
@TestTemplate
void testConsumeTimeoutableCheckpointBarrierQuickly() throws Exception {
PipelinedSubpartition subpartition = createSubpartition();
subpartition.setChannelStateWriter(ChannelStateWriter.NO_OP);
assertSubpartitionChannelStateFuturesAndQueuedBuffers(subpartition, null, true, 0, false);
// test without data buffer
testConsumeQuicklyWithNDataBuffers(0, subpartition, 5L);
// test with data buffer
testConsumeQuicklyWithNDataBuffers(1, subpartition, 6L);
testConsumeQuicklyWithNDataBuffers(2, subpartition, 7L);
}
|
@Override
public List<String> filter(final ReadwriteSplittingDataSourceGroupRule rule, final List<String> toBeFilteredReadDataSources) {
List<String> result = new LinkedList<>(toBeFilteredReadDataSources);
result.removeIf(rule.getDisabledDataSourceNames()::contains);
return result;
}
|
@Test
void assertGetEnabledReplicaDataSources() {
rule.disableDataSource("read_ds_0");
assertThat(new DisabledReadDataSourcesFilter().filter(rule, Arrays.asList("read_ds_0", "read_ds_1")), is(Collections.singletonList("read_ds_1")));
}
|
@UdafFactory(description = "sum int values in a list into a single int")
public static TableUdaf<List<Integer>, Integer, Integer> sumIntList() {
return new TableUdaf<List<Integer>, Integer, Integer>() {
@Override
public Integer initialize() {
return 0;
}
@Override
public Integer aggregate(final List<Integer> valueToAdd, final Integer aggregateValue) {
if (valueToAdd == null) {
return aggregateValue;
}
return aggregateValue + sumList(valueToAdd);
}
@Override
public Integer merge(final Integer aggOne, final Integer aggTwo) {
return aggOne + aggTwo;
}
@Override
public Integer map(final Integer agg) {
return agg;
}
@Override
public Integer undo(final List<Integer> valueToUndo, final Integer aggregateValue) {
if (valueToUndo == null) {
return aggregateValue;
}
return aggregateValue - sumList(valueToUndo);
}
private int sumList(final List<Integer> list) {
return sum(list, initialize(), Integer::sum);
}
};
}
|
@Test
public void shouldIgnoreNull() {
final TableUdaf<List<Integer>, Integer, Integer> udaf = ListSumUdaf.sumIntList();
final Integer[] values = new Integer[] {1, 1, null, 1};
final List<Integer> list = Arrays.asList(values);
final Integer sum = udaf.aggregate(list, 0);
assertThat(3, equalTo(sum));
}
|
public String compile(final String xls,
final String template,
int startRow,
int startCol) {
return compile( xls,
template,
InputType.XLS,
startRow,
startCol );
}
|
@Test
public void testLoadFromClassPath() {
final String drl = converter.compile("/data/MultiSheetDST.drl.xls",
"/templates/test_template1.drl",
11,
2);
assertThat(drl).isNotNull();
assertThat(drl.indexOf("rule \"How cool is Shaun 12\"") > 0).isTrue();
assertThat(drl.indexOf("rule \"How cool is Kumar 11\"") > 0).isTrue();
assertThat(drl).contains("import example.model.User;");
assertThat(drl).contains("import example.model.Car;");
}
|
@Override
@SuppressWarnings("nullness")
public synchronized List<Map<String, Object>> runSQLQuery(String sql) {
try (Statement stmt = driver.getConnection(getUri(), username, password).createStatement()) {
List<Map<String, Object>> result = new ArrayList<>();
ResultSet resultSet = stmt.executeQuery(sql);
while (resultSet.next()) {
Map<String, Object> row = new HashMap<>();
ResultSetMetaData metadata = resultSet.getMetaData();
// Columns list in table metadata is 1-indexed
for (int i = 1; i <= metadata.getColumnCount(); i++) {
row.put(metadata.getColumnName(i), resultSet.getObject(i));
}
result.add(row);
}
return result;
} catch (Exception e) {
throw new JDBCResourceManagerException("Failed to execute SQL statement: " + sql, e);
}
}
|
@Test
public void testRunSQLStatementShouldThrowErrorWhenJDBCFailsToExecuteSQL() throws SQLException {
when(container.getHost()).thenReturn(HOST);
when(container.getMappedPort(JDBC_PORT)).thenReturn(MAPPED_PORT);
Statement statement = driver.getConnection(any(), any(), any()).createStatement();
doThrow(SQLException.class).when(statement).executeQuery(anyString());
assertThrows(
JDBCResourceManagerException.class, () -> testManager.runSQLQuery("SQL statement"));
}
|
protected String generateQueryString(MultiValuedTreeMap<String, String> parameters, boolean encode, String encodeCharset)
throws ServletException {
if (parameters == null || parameters.isEmpty()) {
return null;
}
if (queryString != null) {
return queryString;
}
StringBuilder queryStringBuilder = new StringBuilder();
try {
for (String key : parameters.keySet()) {
for (String val : parameters.get(key)) {
queryStringBuilder.append("&");
if (encode) {
queryStringBuilder.append(URLEncoder.encode(key, encodeCharset));
} else {
queryStringBuilder.append(key);
}
queryStringBuilder.append("=");
if (val != null) {
if (encode) {
queryStringBuilder.append(URLEncoder.encode(val, encodeCharset));
} else {
queryStringBuilder.append(val);
}
}
}
}
} catch (UnsupportedEncodingException e) {
throw new ServletException("Invalid charset passed for query string encoding", e);
}
queryString = queryStringBuilder.toString();
queryString = queryString.substring(1); // remove the first & - faster to do it here than adding logic in the Lambda
return queryString;
}
|
@Test
void queryStringWithMultipleValues_generateQueryString_validQuery() {
AwsProxyHttpServletRequest request = new AwsProxyHttpServletRequest(multipleParams, mockContext, null, config);
String parsedString = null;
try {
parsedString = request.generateQueryString(request.getAwsProxyRequest().getMultiValueQueryStringParameters(), true, config.getUriEncoding());
} catch (ServletException e) {
e.printStackTrace();
fail("Could not generate query string");
}
assertTrue(parsedString.contains("one=two"));
assertTrue(parsedString.contains("one=three"));
assertTrue(parsedString.contains("json=%7B%22name%22%3A%22faisal%22%7D"));
assertTrue(parsedString.contains("&") && parsedString.indexOf("&") > 0 && parsedString.indexOf("&") < parsedString.length());
}
|
@Override
public SarifSchema210 deserialize(Path reportPath) {
try {
return mapper
.enable(JsonParser.Feature.INCLUDE_SOURCE_IN_LOCATION)
.addHandler(new DeserializationProblemHandler() {
@Override
public Object handleInstantiationProblem(DeserializationContext ctxt, Class<?> instClass, Object argument, Throwable t) throws IOException {
if (!instClass.equals(SarifSchema210.Version.class)) {
return NOT_HANDLED;
}
throw new UnsupportedSarifVersionException(format(UNSUPPORTED_VERSION_MESSAGE_TEMPLATE, argument), t);
}
})
.readValue(reportPath.toFile(), SarifSchema210.class);
} catch (UnsupportedSarifVersionException e) {
throw new IllegalStateException(e.getMessage(), e);
} catch (JsonMappingException | JsonParseException e) {
throw new IllegalStateException(format(SARIF_JSON_SYNTAX_ERROR, reportPath), e);
} catch (IOException e) {
throw new IllegalStateException(format(SARIF_REPORT_ERROR, reportPath), e);
}
}
|
@Test
public void deserialize_shouldFail_whenJsonSyntaxIsIncorrect() throws URISyntaxException {
URL sarifResource = requireNonNull(getClass().getResource("invalid-json-syntax.json"));
Path sarif = Paths.get(sarifResource.toURI());
assertThatThrownBy(() -> serializer.deserialize(sarif))
.isInstanceOf(IllegalStateException.class)
.hasMessage(format("Failed to read SARIF report at '%s': invalid JSON syntax or file is not UTF-8 encoded", sarif));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.