focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public void updateNode(OpenstackNode osNode) {
checkNotNull(osNode, ERR_NULL_NODE);
OpenstackNode updatedNode;
OpenstackNode existingNode = osNodeStore.node(osNode.hostname());
checkNotNull(existingNode, ERR_NULL_NODE);
DeviceId existDeviceId = osNodeStore.node(osNode.hostname()).intgBridge();
if (vlanIntfChanged(existingNode, osNode) ||
physicalIntfChanged(existingNode, osNode) ||
dpdkIntfChanged(existingNode, osNode)) {
removeNode(osNode.hostname());
//we wait 1 second for ovsdb client completely to do removal job
try {
TimeUnit.MILLISECONDS.sleep(1000);
} catch (InterruptedException e) {
log.error("Exception occurred because of {}", e);
}
if (!intfsRemovedFromExistNode(existingNode)) {
log.error("Updated node failed because intfs of existingNode {} " +
"are not removed properly", existingNode.toString());
return;
}
createNode(osNode);
return;
}
if (osNode.intgBridge() == null && osNode.type() != CONTROLLER) {
updatedNode = osNode.updateIntbridge(existDeviceId);
checkArgument(!hasIntgBridge(updatedNode.intgBridge(), updatedNode.hostname()),
NOT_DUPLICATED_MSG, updatedNode.intgBridge());
} else {
updatedNode = osNode;
checkArgument(!hasIntgBridge(updatedNode.intgBridge(), updatedNode.hostname()),
NOT_DUPLICATED_MSG, updatedNode.intgBridge());
}
osNodeStore.updateNode(updatedNode);
log.info(String.format(MSG_NODE, osNode.hostname(), MSG_UPDATED));
}
|
@Test(expected = IllegalArgumentException.class)
public void testUpdateNodeWithDuplicateIntgBridge() {
target.updateNode(COMPUTE_2_DUP_INT);
}
|
public final int getFlags() {
return flags;
}
|
@Test
public void getFlagsOutputZero() {
// Arrange
final LogHeader objectUnderTest = new LogHeader(0);
// Act
final int actual = objectUnderTest.getFlags();
// Assert result
Assert.assertEquals(0, actual);
}
|
public TransformWatermarks getWatermarks(ExecutableT executable) {
return transformToWatermarks.get(executable);
}
|
@Test
public void getWatermarkForUntouchedTransform() {
TransformWatermarks watermarks = manager.getWatermarks(graph.getProducer(impulse));
assertThat(watermarks.getInputWatermark(), equalTo(BoundedWindow.TIMESTAMP_MIN_VALUE));
assertThat(watermarks.getOutputWatermark(), equalTo(BoundedWindow.TIMESTAMP_MIN_VALUE));
}
|
<T extends PipelineOptions> T as(Class<T> iface) {
checkNotNull(iface);
checkArgument(iface.isInterface(), "Not an interface: %s", iface);
T existingOption = computedProperties.interfaceToProxyCache.getInstance(iface);
if (existingOption == null) {
synchronized (this) {
// double check
existingOption = computedProperties.interfaceToProxyCache.getInstance(iface);
if (existingOption == null) {
Registration<T> registration =
PipelineOptionsFactory.CACHE
.get()
.validateWellFormed(iface, computedProperties.knownInterfaces);
List<PropertyDescriptor> propertyDescriptors = registration.getPropertyDescriptors();
Class<T> proxyClass = registration.getProxyClass();
existingOption =
InstanceBuilder.ofType(proxyClass)
.fromClass(proxyClass)
.withArg(InvocationHandler.class, this)
.build();
computedProperties =
computedProperties.updated(iface, existingOption, propertyDescriptors);
}
}
}
return existingOption;
}
|
@Test
public void testDisplayDataArrayValue() throws Exception {
ArrayOptions options = PipelineOptionsFactory.as(ArrayOptions.class);
options.setDeepArray(new String[][] {new String[] {"a", "b"}, new String[] {"c"}});
options.setDeepPrimitiveArray(new int[][] {new int[] {1, 2}, new int[] {3}});
DisplayData data = DisplayData.from(options);
assertThat(data, hasDisplayItem("deepArray", "[[a, b], [c]]"));
assertThat(data, hasDisplayItem("deepPrimitiveArray", "[[1, 2], [3]]"));
ArrayOptions deserializedOptions = serializeDeserialize(ArrayOptions.class, options);
DisplayData deserializedData = DisplayData.from(deserializedOptions);
assertThat(deserializedData, hasDisplayItem("deepPrimitiveArray", "[[1, 2], [3]]"));
}
|
public AbstractRequestBuilder<K, V, R> removeParam(String key)
{
_queryParams.remove(key);
_queryParamClasses.remove(key);
return this;
}
|
@Test
public void testRemoveParam()
{
final AbstractRequestBuilder<?, ?, ?> builder = new DummyAbstractRequestBuilder();
builder.addParam("a", "b");
Assert.assertEquals(builder.getParam("a"), Arrays.asList("b"));
builder.removeParam("a");
Assert.assertFalse(builder.hasParam("a"));
}
|
@GET
@Path("{id}")
@Timed
@ApiOperation(value = "Get index set")
@ApiResponses(value = {
@ApiResponse(code = 403, message = "Unauthorized"),
@ApiResponse(code = 404, message = "Index set not found"),
})
public IndexSetSummary get(@ApiParam(name = "id", required = true)
@PathParam("id") String id) {
checkPermission(RestPermissions.INDEXSETS_READ, id);
final IndexSetConfig defaultIndexSet = indexSetService.getDefault();
return indexSetService.get(id)
.map(config -> IndexSetSummary.fromIndexSetConfig(config, config.equals(defaultIndexSet)))
.orElseThrow(() -> new NotFoundException("Couldn't load index set with ID <" + id + ">"));
}
|
@Test
public void getDenied() {
notPermitted();
expectedException.expect(ForbiddenException.class);
expectedException.expectMessage("Not authorized to access resource id <id>");
try {
indexSetsResource.get("id");
} finally {
verifyNoMoreInteractions(indexSetService);
}
}
|
public static int levenshtein(String a, String b) {
// switch parameters to use the shorter one as b to save space.
if (a.length() < b.length()) {
String swap = a;
a = b;
b = swap;
}
int[][] d = new int[2][b.length() + 1];
for (int j = 0; j <= b.length(); j++) {
d[0][j] = j;
}
for (int i = 1; i <= a.length(); i++) {
d[1][0] = i;
for (int j = 1; j <= b.length(); j++) {
int cost = a.charAt(i - 1) == b.charAt(j - 1) ? 0 : 1;
d[1][j] = MathEx.min(
d[0][j] + 1, // deletion
d[1][j - 1] + 1, // insertion
d[0][j - 1] + cost); // substitution
}
int[] swap = d[0];
d[0] = d[1];
d[1] = swap;
}
return d[0][b.length()];
}
|
@Test
public void testPlainLevenshteinSpeedTest() {
System.out.println("Levenshtein speed test");
for (int i = 0; i < 100; i++) {
EditDistance.levenshtein(H1N1, H1N5);
}
}
|
public static String getDataSourceUnitVersionsNode(final String databaseName, final String dataSourceName) {
return String.join("/", getDataSourceUnitsNode(databaseName), dataSourceName, VERSIONS);
}
|
@Test
void assertGetDataSourceUnitVersionsNode() {
assertThat(DataSourceMetaDataNode.getDataSourceUnitVersionsNode("foo_db", "foo_ds"), is("/metadata/foo_db/data_sources/units/foo_ds/versions"));
}
|
@Override
protected void rename(
List<LocalResourceId> srcResourceIds,
List<LocalResourceId> destResourceIds,
MoveOptions... moveOptions)
throws IOException {
if (moveOptions.length > 0) {
throw new UnsupportedOperationException("Support for move options is not yet implemented.");
}
checkArgument(
srcResourceIds.size() == destResourceIds.size(),
"Number of source files %s must equal number of destination files %s",
srcResourceIds.size(),
destResourceIds.size());
int numFiles = srcResourceIds.size();
for (int i = 0; i < numFiles; i++) {
LocalResourceId src = srcResourceIds.get(i);
LocalResourceId dst = destResourceIds.get(i);
LOG.debug("Renaming {} to {}", src, dst);
File parent = dst.getCurrentDirectory().getPath().toFile();
if (!parent.exists()) {
checkArgument(
parent.mkdirs() || parent.exists(),
"Unable to make output directory %s in order to move into file %s",
parent,
dst.getPath());
}
// Rename the source file, replacing the existing destination.
Files.move(
src.getPath(),
dst.getPath(),
StandardCopyOption.REPLACE_EXISTING,
StandardCopyOption.ATOMIC_MOVE);
}
}
|
@Test
public void testMoveWithNonExistingSrcFile() throws Exception {
Path existingSrc = temporaryFolder.newFile().toPath();
Path nonExistingSrc = temporaryFolder.getRoot().toPath().resolve("non-existent-file.txt");
Path destPath1 = temporaryFolder.getRoot().toPath().resolve("nonexistentdir").resolve("dest1");
Path destPath2 = destPath1.resolveSibling("dest2");
createFileWithContent(existingSrc, "content1");
thrown.expect(NoSuchFileException.class);
localFileSystem.rename(
toLocalResourceIds(ImmutableList.of(existingSrc, nonExistingSrc), false /* isDirectory */),
toLocalResourceIds(ImmutableList.of(destPath1, destPath2), false /* isDirectory */));
}
|
@Override
public ObjectNode encode(Group group, CodecContext context) {
checkNotNull(group, "Group cannot be null");
ObjectNode result = context.mapper().createObjectNode()
// a Group id should be an unsigned integer
.put(ID, Integer.toUnsignedLong(group.id().id()))
.put(STATE, group.state().toString())
.put(LIFE, group.life())
.put(PACKETS, group.packets())
.put(BYTES, group.bytes())
.put(REFERENCE_COUNT, group.referenceCount())
.put(TYPE, group.type().toString())
.put(DEVICE_ID, group.deviceId().toString());
if (group.appId() != null) {
result.put(APP_ID, group.appId().name());
}
if (group.appCookie() != null) {
result.put(APP_COOKIE, group.appCookie().toString());
}
if (group.givenGroupId() != null) {
// a given Group id should be an unsigned integer
result.put(GIVEN_GROUP_ID, Integer.toUnsignedLong(group.givenGroupId()));
}
ArrayNode buckets = context.mapper().createArrayNode();
group.buckets().buckets().forEach(bucket -> {
ObjectNode bucketJson = context.codec(GroupBucket.class).encode(bucket, context);
buckets.add(bucketJson);
});
result.set(BUCKETS, buckets);
return result;
}
|
@Test
public void codecEncodeTest() {
GroupBucket bucket1 = DefaultGroupBucket.createAllGroupBucket(DefaultTrafficTreatment.emptyTreatment());
GroupBucket bucket2 = DefaultGroupBucket.createAllGroupBucket(DefaultTrafficTreatment.emptyTreatment());
GroupBucket bucket3 = DefaultGroupBucket.createIndirectGroupBucket(DefaultTrafficTreatment.emptyTreatment());
GroupBuckets allBuckets = new GroupBuckets(ImmutableList.of(bucket1, bucket2));
GroupBuckets indirectBuckets = new GroupBuckets(ImmutableList.of(bucket3));
DefaultGroup group = new DefaultGroup(
new GroupId(1),
NetTestTools.did("d1"),
ALL,
allBuckets);
DefaultGroup group1 = new DefaultGroup(
new GroupId(2),
NetTestTools.did("d2"),
INDIRECT,
indirectBuckets);
MockCodecContext context = new MockCodecContext();
GroupCodec codec = new GroupCodec();
ObjectNode groupJson = codec.encode(group, context);
ObjectNode groupJsonIndirect = codec.encode(group1, context);
assertThat(groupJson, matchesGroup(group));
assertThat(groupJsonIndirect, matchesGroup(group1));
}
|
protected static PrivateKey toPrivateKey(File keyFile, String keyPassword) throws NoSuchAlgorithmException,
NoSuchPaddingException, InvalidKeySpecException,
InvalidAlgorithmParameterException,
KeyException, IOException {
return toPrivateKey(keyFile, keyPassword, true);
}
|
@Test
public void testPkcs1Des3EncryptedRsa() throws Exception {
PrivateKey key = SslContext.toPrivateKey(new File(getClass().getResource("rsa_pkcs1_des3_encrypted.key")
.getFile()), "example");
assertNotNull(key);
}
|
public Optional<DoFn.ProcessContinuation> run(
PartitionRecord partitionRecord,
ChangeStreamRecord record,
RestrictionTracker<StreamProgress, StreamProgress> tracker,
DoFn.OutputReceiver<KV<ByteString, ChangeStreamRecord>> receiver,
ManualWatermarkEstimator<Instant> watermarkEstimator,
BytesThroughputEstimator<KV<ByteString, ChangeStreamRecord>> throughputEstimator) {
if (record instanceof Heartbeat) {
Heartbeat heartbeat = (Heartbeat) record;
final Instant watermark = toJodaTime(heartbeat.getEstimatedLowWatermark());
// These will be filtered so the key doesn't really matter but the most logical thing to
// key a heartbeat by is the partition it corresponds to.
ByteString heartbeatKey =
Range.ByteStringRange.serializeToByteString(partitionRecord.getPartition());
KV<ByteString, ChangeStreamRecord> outputRecord = KV.of(heartbeatKey, heartbeat);
throughputEstimator.update(Instant.now(), outputRecord);
StreamProgress streamProgress =
new StreamProgress(
heartbeat.getChangeStreamContinuationToken(),
watermark,
throughputEstimator.get(),
Instant.now(),
true);
watermarkEstimator.setWatermark(watermark);
// If the tracker fail to claim the streamProgress, it most likely means the runner initiated
// a checkpoint. See {@link
// org.apache.beam.sdk.io.gcp.bigtable.changestreams.restriction.ReadChangeStreamPartitionProgressTracker}
// for more information regarding runner initiated checkpoints.
if (!tracker.tryClaim(streamProgress)) {
return Optional.of(DoFn.ProcessContinuation.stop());
}
metrics.incHeartbeatCount();
// We output heartbeats so that they are factored into throughput and can be used to
// autoscale. These will be filtered in a downstream step and never returned to users. This is
// to prevent autoscaler from scaling down when we have large tables with no throughput but
// we need enough workers to keep up with heartbeats.
// We are outputting elements with timestamp of 0 to prevent reliance on event time. This
// limits the ability to window on commit time of any data changes. It is still possible to
// window on processing time.
receiver.outputWithTimestamp(outputRecord, Instant.EPOCH);
} else if (record instanceof CloseStream) {
CloseStream closeStream = (CloseStream) record;
StreamProgress streamProgress = new StreamProgress(closeStream);
// If the tracker fail to claim the streamProgress, it most likely means the runner initiated
// a checkpoint. See {@link
// org.apache.beam.sdk.io.gcp.bigtable.changestreams.restriction.ReadChangeStreamPartitionProgressTracker}
// for more information regarding runner initiated checkpoints.
if (!tracker.tryClaim(streamProgress)) {
return Optional.of(DoFn.ProcessContinuation.stop());
}
metrics.incClosestreamCount();
return Optional.of(DoFn.ProcessContinuation.resume());
} else if (record instanceof ChangeStreamMutation) {
ChangeStreamMutation changeStreamMutation = (ChangeStreamMutation) record;
final Instant watermark = toJodaTime(changeStreamMutation.getEstimatedLowWatermark());
watermarkEstimator.setWatermark(watermark);
// Build a new StreamProgress with the continuation token to be claimed.
ChangeStreamContinuationToken changeStreamContinuationToken =
ChangeStreamContinuationToken.create(
Range.ByteStringRange.create(
partitionRecord.getPartition().getStart(),
partitionRecord.getPartition().getEnd()),
changeStreamMutation.getToken());
KV<ByteString, ChangeStreamRecord> outputRecord =
KV.of(changeStreamMutation.getRowKey(), changeStreamMutation);
throughputEstimator.update(Instant.now(), outputRecord);
StreamProgress streamProgress =
new StreamProgress(
changeStreamContinuationToken,
watermark,
throughputEstimator.get(),
Instant.now(),
false);
// If the tracker fail to claim the streamProgress, it most likely means the runner initiated
// a checkpoint. See ReadChangeStreamPartitionProgressTracker for more information regarding
// runner initiated checkpoints.
if (!tracker.tryClaim(streamProgress)) {
return Optional.of(DoFn.ProcessContinuation.stop());
}
if (changeStreamMutation.getType() == ChangeStreamMutation.MutationType.GARBAGE_COLLECTION) {
metrics.incChangeStreamMutationGcCounter();
} else if (changeStreamMutation.getType() == ChangeStreamMutation.MutationType.USER) {
metrics.incChangeStreamMutationUserCounter();
}
Instant delay = toJodaTime(changeStreamMutation.getCommitTimestamp());
metrics.updateProcessingDelayFromCommitTimestamp(
Instant.now().getMillis() - delay.getMillis());
// We are outputting elements with timestamp of 0 to prevent reliance on event time. This
// limits the ability to window on commit time of any data changes. It is still possible to
// window on processing time.
receiver.outputWithTimestamp(outputRecord, Instant.EPOCH);
} else {
LOG.warn(
"RCSP {}: Invalid response type", formatByteStringRange(partitionRecord.getPartition()));
}
return Optional.empty();
}
|
@Test
public void testChangeStreamMutationGc() {
ByteStringRange partition = ByteStringRange.create("", "");
when(partitionRecord.getPartition()).thenReturn(partition);
final Instant commitTimestamp = Instant.ofEpochMilli(1_000L);
final Instant lowWatermark = Instant.ofEpochMilli(500L);
ChangeStreamContinuationToken changeStreamContinuationToken =
ChangeStreamContinuationToken.create(ByteStringRange.create("", ""), "1234");
ChangeStreamMutation changeStreamMutation = Mockito.mock(ChangeStreamMutation.class);
Mockito.when(changeStreamMutation.getCommitTimestamp())
.thenReturn(toThreetenInstant(commitTimestamp));
Mockito.when(changeStreamMutation.getToken()).thenReturn("1234");
Mockito.when(changeStreamMutation.getEstimatedLowWatermark())
.thenReturn(toThreetenInstant(lowWatermark));
Mockito.when(changeStreamMutation.getType())
.thenReturn(ChangeStreamMutation.MutationType.GARBAGE_COLLECTION);
KV<ByteString, ChangeStreamRecord> record =
KV.of(changeStreamMutation.getRowKey(), changeStreamMutation);
final Optional<DoFn.ProcessContinuation> result =
action.run(
partitionRecord,
changeStreamMutation,
tracker,
receiver,
watermarkEstimator,
throughputEstimator);
assertFalse(result.isPresent());
verify(metrics).incChangeStreamMutationGcCounter();
verify(metrics, never()).incChangeStreamMutationUserCounter();
StreamProgress streamProgress =
new StreamProgress(
changeStreamContinuationToken,
lowWatermark,
BigDecimal.valueOf(1000),
Instant.now(),
false);
verify(tracker).tryClaim(streamProgressArgumentCaptor.capture());
assertEquals(
streamProgress.getCurrentToken(),
streamProgressArgumentCaptor.getValue().getCurrentToken());
assertEquals(
streamProgress.getThroughputEstimate(),
streamProgressArgumentCaptor.getValue().getThroughputEstimate());
assertEquals(
streamProgress.getEstimatedLowWatermark(),
streamProgressArgumentCaptor.getValue().getEstimatedLowWatermark());
assertEquals(
streamProgress.isHeartbeat(), streamProgressArgumentCaptor.getValue().isHeartbeat());
verify(receiver).outputWithTimestamp(eq(record), eq(Instant.EPOCH));
verify(watermarkEstimator).setWatermark(eq(lowWatermark));
verify(throughputEstimator).update(any(), eq(record));
}
|
public String value() {
return value;
}
|
@Test
public void testConstruction() {
assertThat(subject3.value(), is("Message 3"));
MessageSubject serializerObject = new MessageSubject();
assertThat(serializerObject.value(), is(""));
}
|
public static AuthorizationsCollector parse(File file) throws ParseException {
if (file == null) {
LOG.warn("parsing NULL file, so fallback on default configuration!");
return AuthorizationsCollector.emptyImmutableCollector();
}
if (!file.exists()) {
LOG.warn(
String.format(
"parsing not existing file %s, so fallback on default configuration!",
file.getAbsolutePath()));
return AuthorizationsCollector.emptyImmutableCollector();
}
try {
Reader reader = Files.newBufferedReader(file.toPath(), UTF_8);
return parse(reader);
} catch (IOException fex) {
LOG.warn(
String.format(
"parsing not existing file %s, so fallback on default configuration!",
file.getAbsolutePath()),
fex);
return AuthorizationsCollector.emptyImmutableCollector();
}
}
|
@Test
public void testParseValidEndLineComment() throws ParseException {
Reader conf = new StringReader("topic /weather/italy/anemometer #simple comment");
AuthorizationsCollector authorizations = ACLFileParser.parse(conf);
// Verify
assertTrue(authorizations.canRead(new Topic("/weather/italy/anemometer"), "", ""));
assertTrue(authorizations.canWrite(new Topic("/weather/italy/anemometer"), "", ""));
}
|
@PublicAPI(usage = ACCESS)
public JavaCodeUnit getCodeUnitWithParameterTypes(String name, Class<?>... parameters) {
return getCodeUnitWithParameterTypes(name, ImmutableList.copyOf(parameters));
}
|
@Test
public void getCodeUnitWithParameterTypes() {
JavaClass clazz = importClasses(ChildWithFieldAndMethod.class).get(ChildWithFieldAndMethod.class);
assertIllegalArgumentException("childMethod", () -> clazz.getCodeUnitWithParameterTypes("childMethod"));
assertIllegalArgumentException("childMethod", () -> clazz.getCodeUnitWithParameterTypes("childMethod", Object.class));
assertIllegalArgumentException("wrong", () -> clazz.getCodeUnitWithParameterTypes("wrong", String.class));
assertThatCodeUnit(clazz.getCodeUnitWithParameterTypes("childMethod", String.class))
.matchesMethod(ChildWithFieldAndMethod.class, "childMethod", String.class);
assertThatCodeUnit(clazz.getCodeUnitWithParameterTypeNames("childMethod", String.class.getName()))
.matchesMethod(ChildWithFieldAndMethod.class, "childMethod", String.class);
assertThatCodeUnit(clazz.getCodeUnitWithParameterTypes(CONSTRUCTOR_NAME, Object.class))
.matchesConstructor(ChildWithFieldAndMethod.class, Object.class);
assertThatCodeUnit(clazz.getCodeUnitWithParameterTypeNames(CONSTRUCTOR_NAME, Object.class.getName()))
.matchesConstructor(ChildWithFieldAndMethod.class, Object.class);
}
|
@Override
public ByteBuf readBytes(int length) {
checkReadableBytes(length);
if (length == 0) {
return Unpooled.EMPTY_BUFFER;
}
ByteBuf buf = alloc().buffer(length, maxCapacity);
buf.writeBytes(this, readerIndex, length);
readerIndex += length;
return buf;
}
|
@Test
public void testReadBytesAfterRelease9() {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() throws IOException {
releasedBuffer().readBytes(new ByteArrayOutputStream(), 1);
}
});
}
|
@Override
public void close() {
stop();
}
|
@Test
public void createWithNullMetricRegistry() {
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
DummyReporter r = null;
try {
r = new DummyReporter(null, "example", MetricFilter.ALL, TimeUnit.SECONDS, TimeUnit.MILLISECONDS, executor);
Assert.fail("NullPointerException must be thrown !!!");
} catch (NullPointerException e) {
Assert.assertEquals("registry == null", e.getMessage());
} finally {
if (r != null) {
r.close();
}
}
}
|
@VisibleForTesting
String getRequestBodyParamsAsStr( NameValuePair[] pairs, String charset ) throws KettleException {
StringBuffer buf = new StringBuffer();
try {
for ( int i = 0; i < pairs.length; ++i ) {
NameValuePair pair = pairs[ i ];
if ( pair.getName() != null ) {
if ( i > 0 ) {
buf.append( "&" );
}
buf.append( URLEncoder.encode( pair.getName(), !StringUtil.isEmpty( charset ) ? charset : DEFAULT_ENCODING ) );
buf.append( "=" );
if ( pair.getValue() != null ) {
buf.append( URLEncoder.encode( pair.getValue(), !StringUtil.isEmpty( charset ) ? charset : DEFAULT_ENCODING ) );
}
}
}
return buf.toString();
} catch ( UnsupportedEncodingException e ) {
throw new KettleException( e.getMessage(), e.getCause() );
}
}
|
@Test
public void getRequestBodyParametersAsStringWithNullEncoding() throws KettleException {
HTTPPOST http = mock( HTTPPOST.class );
doCallRealMethod().when( http ).getRequestBodyParamsAsStr( any( NameValuePair[].class ), nullable( String.class ) );
NameValuePair[] pairs = new NameValuePair[] {
new BasicNameValuePair( "u", "usr" ),
new BasicNameValuePair( "p", "pass" )
};
assertEquals( "u=usr&p=pass", http.getRequestBodyParamsAsStr( pairs, null ) );
}
|
public boolean overlap(final Window other) throws IllegalArgumentException {
if (getClass() != other.getClass()) {
throw new IllegalArgumentException("Cannot compare windows of different type. Other window has type "
+ other.getClass() + ".");
}
final SessionWindow otherWindow = (SessionWindow) other;
return !(otherWindow.endMs < startMs || endMs < otherWindow.startMs);
}
|
@Test
public void cannotCompareSessionWindowWithDifferentWindowType() {
assertThrows(IllegalArgumentException.class, () -> window.overlap(timeWindow));
}
|
public ConvertedTime getConvertedTime(long duration) {
Set<Seconds> keys = RULES.keySet();
for (Seconds seconds : keys) {
if (duration <= seconds.getSeconds()) {
return RULES.get(seconds).getConvertedTime(duration);
}
}
return new TimeConverter.OverTwoYears().getConvertedTime(duration);
}
|
@Test
public void testShouldReportAbout1HourFor89Minutes29Seconds() throws Exception {
assertEquals(TimeConverter.ABOUT_1_HOUR_AGO, timeConverter.getConvertedTime(89 * 60 + 29));
}
|
@Override
public void tryStartBundle() {
inconsistentStateCheck();
LOG.debug(
"tryStartBundle: elementCount={}, Bundle={}", currentBundleElementCount, this.toString());
if (isBundleStarted.compareAndSet(false, true)) {
LOG.debug("Starting a new bundle.");
bundleStartTime.set(System.currentTimeMillis());
pendingBundleCount.getAndIncrement();
bundleProgressListener.onBundleStarted();
}
currentBundleElementCount.incrementAndGet();
}
|
@Test
public void testWhen() {
portableBundleManager =
new PortableBundleManager<>(
bundleProgressListener, 4, MAX_BUNDLE_TIME_MS, bundleTimerScheduler, TIMER_ID);
portableBundleManager.tryStartBundle();
portableBundleManager.tryStartBundle();
verify(bundleProgressListener, times(1)).onBundleStarted();
}
|
public static String parseContextUuid(String name, Long ledgerId) {
if (ledgerId == null || name == null) {
return null;
}
int pos = name.indexOf("-ledger-" + ledgerId);
if (pos <= 0) {
return null;
}
return name.substring(0, pos);
}
|
@Test
public void parseContextUuidTest() throws Exception {
UUID id = UUID.randomUUID();
long ledgerId = 123124;
String key = DataBlockUtils.dataBlockOffloadKey(ledgerId, id);
String keyIndex = DataBlockUtils.indexBlockOffloadKey(ledgerId, id);
assertEquals(ledgerId, DataBlockUtils.parseLedgerId(key).longValue());
assertEquals(ledgerId, DataBlockUtils.parseLedgerId(keyIndex).longValue());
assertEquals(id.toString(), DataBlockUtils.parseContextUuid(key, ledgerId));
assertEquals(id.toString(), DataBlockUtils.parseContextUuid(keyIndex, ledgerId));
assertNull(DataBlockUtils.parseContextUuid(null, null));
assertNull(DataBlockUtils.parseContextUuid(null, ledgerId));
assertNull(DataBlockUtils.parseContextUuid("foo", null));
assertNull(DataBlockUtils.parseContextUuid("-ledger-" + ledgerId, ledgerId));
assertNull(DataBlockUtils.parseContextUuid("something" + ledgerId, ledgerId));
}
|
public static ParamType getVarArgsSchemaFromType(final Type type) {
return getSchemaFromType(type, VARARGS_JAVA_TO_ARG_TYPE);
}
|
@Test
public void shouldGetBooleanSchemaForBooleanPrimitiveClassVariadic() {
assertThat(
UdfUtil.getVarArgsSchemaFromType(boolean.class),
equalTo(ParamTypes.BOOLEAN)
);
}
|
@Override
public String getAll() {
DriverHandler handler = handler();
NetconfController controller = handler.get(NetconfController.class);
MastershipService mastershipService = handler.get(MastershipService.class);
DeviceId ncDeviceId = handler.data().deviceId();
checkNotNull(controller, "Netconf controller is null");
String reply = null;
if (!mastershipService.isLocalMaster(ncDeviceId)) {
log.warn("Not master for {} Use {} to execute command",
ncDeviceId,
mastershipService.getMasterFor(ncDeviceId));
return null;
}
try {
StringBuilder request = new StringBuilder();
request.append(VOLT_NE_OPEN + VOLT_NE_NAMESPACE);
request.append(ANGLE_RIGHT + NEW_LINE);
request.append(VOLT_NE_CLOSE);
reply = controller
.getDevicesMap()
.get(ncDeviceId)
.getSession()
.get(request.toString(), REPORT_ALL);
} catch (NetconfException e) {
log.error("Cannot communicate to device {} exception {}", ncDeviceId, e);
}
return reply;
}
|
@Test
public void testGetAll() throws Exception {
String reply = voltConfig.getAll();
assertNotNull("Incorrect response", reply);
}
|
@Override
public void execute(Context context) {
executeForBranch(treeRootHolder.getRoot());
}
|
@Test
public void givenNoPreviousUpgradeEvents_whenStepIsExecuted_thenANewUpgradeEventIsCreated() {
when(sonarQubeVersion.get()).thenReturn(Version.parse("10.3"));
when(dbClient.eventDao()).thenReturn(mock());
when(dbClient.eventDao().selectSqUpgradesByMostRecentFirst(any(), any())).thenReturn(Collections.emptyList());
underTest.execute(new TestComputationStepContext());
verify(eventRepository, times(1)).add(eventArgumentCaptor.capture());
verifyNoMoreInteractions(eventRepository);
assertThat(eventArgumentCaptor.getAllValues())
.extracting(Event::getCategory, Event::getName)
.containsExactly(tuple(Event.Category.SQ_UPGRADE, "10.3"));
}
|
synchronized int increaseVersion() {
return ++version;
}
|
@Test
void increaseVersion() {
Job job = anEnqueuedJob().build();
assertThat(job.getVersion()).isZero();
assertThat(job.increaseVersion()).isEqualTo(1);
assertThat(job.getVersion()).isEqualTo(1);
assertThat(job.increaseVersion()).isEqualTo(2);
assertThat(job.getVersion()).isEqualTo(2);
}
|
@Override
protected void descendingSort(File[] matchingFileArray, Instant instant) {
String regexForIndexExtreaction = createStemRegex(instant);
final Pattern pattern = Pattern.compile(regexForIndexExtreaction);
Arrays.sort(matchingFileArray, new Comparator<File>() {
@Override
public int compare(final File f1, final File f2) {
int index1 = extractIndex(pattern, f1);
int index2 = extractIndex(pattern, f2);
if (index1 == index2)
return 0;
// descending sort, i.e. newest files first
if (index2 < index1)
return -1;
else
return 1;
}
private int extractIndex(Pattern pattern, File f1) {
Matcher matcher = pattern.matcher(f1.getName());
if (matcher.find()) {
String indexAsStr = matcher.group(1);
if (indexAsStr == null || indexAsStr.isEmpty())
return NO_INDEX; // unreachable code?
else
return Integer.parseInt(indexAsStr);
} else
return NO_INDEX;
}
});
}
|
@Test
public void smoke() {
FileNamePattern fileNamePattern = new FileNamePattern("smoke-%d-%i.gz", context);
SizeAndTimeBasedArchiveRemover remover = new SizeAndTimeBasedArchiveRemover(fileNamePattern, null);
File[] fileArray = new File[2];
File[] expected = new File[2];
fileArray[0] = expected[1] = new File("/tmp/smoke-1970-01-01-0.gz");
fileArray[1] = expected[0] = new File("/tmp/smoke-1970-01-01-1.gz");
remover.descendingSort(fileArray, Instant.ofEpochMilli(0));
assertArrayEquals(expected, fileArray);
}
|
@Override
public RouterFunction<ServerResponse> create(String prefix) {
return RouterFunctions.route(GET(StringUtils.prependIfMissing(prefix, "/")),
handlerFunction());
}
|
@Test
void create() {
String prefix = "/topics";
RouterFunction<ServerResponse> routerFunction = categoriesRouteFactory.create(prefix);
WebTestClient webClient = getWebTestClient(routerFunction);
when(categoryFinder.listAsTree())
.thenReturn(Flux.empty());
webClient.get()
.uri(prefix)
.exchange()
.expectStatus().isOk();
}
|
public static SqlToConnectTypeConverter sqlToConnectConverter() {
return SQL_TO_CONNECT_CONVERTER;
}
|
@Test
public void shouldGetLogicalForEverySqlType() {
for (final Entry<SqlType, Schema> entry : SQL_TO_LOGICAL.entrySet()) {
final SqlType sqlType = entry.getKey();
final Schema logical = entry.getValue();
final Schema result = SchemaConverters.sqlToConnectConverter().toConnectSchema(sqlType);
assertThat(result, is(logical));
}
}
|
@Override
public void rewind() throws IOException {
super.rewind();
for (int i = size - 1; i >= 0; i--) {
File fi = getNumberedFileName(i);
if (Files.exists(Util.fileToPath(fi))) {
File next = getNumberedFileName(i + 1);
Files.move(Util.fileToPath(fi), Util.fileToPath(next), StandardCopyOption.REPLACE_EXISTING);
}
}
}
|
@Issue("JENKINS-16634")
@Test
public void deletedFolder() throws Exception {
assumeFalse("Windows does not allow deleting a directory with a "
+ "file open, so this case should never occur", Functions.isWindows());
File dir = tmp.newFolder("dir");
File base = new File(dir, "x.log");
RewindableRotatingFileOutputStream os = new RewindableRotatingFileOutputStream(base, 3);
for (int i = 0; i < 2; i++) {
FileUtils.deleteDirectory(dir);
os.write('.');
FileUtils.deleteDirectory(dir);
os.write('.');
FileUtils.deleteDirectory(dir);
os.rewind();
}
}
|
public static List<List<Expr>> candidateOfPartitionByExprs(List<List<Expr>> partitionByExprs) {
if (partitionByExprs.isEmpty()) {
return Lists.newArrayList();
}
PermutationGenerator generator = new PermutationGenerator<Expr>(partitionByExprs);
int totalCount = 0;
List<List<Expr>> candidates = Lists.newArrayList();
while (generator.hasNext() && totalCount < 8) {
candidates.add(generator.next());
totalCount++;
}
return candidates;
}
|
@Test
public void testPermutaionsOfPartitionByExprs2() throws Exception {
List<List<Expr>> slotRefs = createSlotRefArray(1, 3);
for (List<Expr> refs: slotRefs) {
System.out.println(slotRefsToInt(refs));
}
List<List<Expr>> newSlotRefs = PlanNode.candidateOfPartitionByExprs(slotRefs);
Assert.assertTrue(newSlotRefs.size() == 3);
for (List<Expr> candidates: newSlotRefs) {
System.out.println(slotRefsToInt(candidates));
}
Assert.assertTrue(slotRefsEqualTo(newSlotRefs.get(0), Arrays.asList(0)));
Assert.assertTrue(slotRefsEqualTo(newSlotRefs.get(1), Arrays.asList(1)));
Assert.assertTrue(slotRefsEqualTo(newSlotRefs.get(2), Arrays.asList(2)));
}
|
public List<InputSplit> getSplits(JobContext job) throws IOException {
StopWatch sw = new StopWatch().start();
long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job));
long maxSize = getMaxSplitSize(job);
// generate splits
List<InputSplit> splits = new ArrayList<InputSplit>();
List<FileStatus> files = listStatus(job);
boolean ignoreDirs = !getInputDirRecursive(job)
&& job.getConfiguration().getBoolean(INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS, false);
for (FileStatus file: files) {
if (ignoreDirs && file.isDirectory()) {
continue;
}
Path path = file.getPath();
long length = file.getLen();
if (length != 0) {
BlockLocation[] blkLocations;
if (file instanceof LocatedFileStatus) {
blkLocations = ((LocatedFileStatus) file).getBlockLocations();
} else {
FileSystem fs = path.getFileSystem(job.getConfiguration());
blkLocations = fs.getFileBlockLocations(file, 0, length);
}
if (isSplitable(job, path)) {
long blockSize = file.getBlockSize();
long splitSize = computeSplitSize(blockSize, minSize, maxSize);
long bytesRemaining = length;
while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
splits.add(makeSplit(path, length-bytesRemaining, splitSize,
blkLocations[blkIndex].getHosts(),
blkLocations[blkIndex].getCachedHosts()));
bytesRemaining -= splitSize;
}
if (bytesRemaining != 0) {
int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
splits.add(makeSplit(path, length-bytesRemaining, bytesRemaining,
blkLocations[blkIndex].getHosts(),
blkLocations[blkIndex].getCachedHosts()));
}
} else { // not splitable
if (LOG.isDebugEnabled()) {
// Log only if the file is big enough to be splitted
if (length > Math.min(file.getBlockSize(), minSize)) {
LOG.debug("File is not splittable so no parallelization "
+ "is possible: " + file.getPath());
}
}
splits.add(makeSplit(path, 0, length, blkLocations[0].getHosts(),
blkLocations[0].getCachedHosts()));
}
} else {
//Create empty hosts array for zero length files
splits.add(makeSplit(path, 0, length, new String[0]));
}
}
// Save the number of input files for metrics/loadgen
job.getConfiguration().setLong(NUM_INPUT_FILES, files.size());
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Total # of splits generated by getSplits: " + splits.size()
+ ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS));
}
return splits;
}
|
@Test
public void testNumInputFilesIgnoreDirs() throws Exception {
Configuration conf = getConfiguration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
conf.setBoolean(FileInputFormat.INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS, true);
Job job = Job.getInstance(conf);
FileInputFormat<?, ?> fileInputFormat = new TextInputFormat();
List<InputSplit> splits = fileInputFormat.getSplits(job);
Assert.assertEquals("Input splits are not correct", 1, splits.size());
verifySplits(Lists.newArrayList("test:/a1/file1"), splits);
}
|
@Override
public boolean isSchemaAvailable() {
return true;
}
|
@Test
void assertIsSchemaAvailable() {
assertTrue(dialectDatabaseMetaData.isSchemaAvailable());
}
|
public static <T extends NumericType> T decodeNumeric(String input, Class<T> type) {
try {
byte[] inputByteArray = Numeric.hexStringToByteArray(input);
int typeLengthAsBytes = getTypeLengthInBytes(type);
int valueOffset = Type.MAX_BYTE_LENGTH - typeLengthAsBytes;
BigInteger numericValue;
if (Uint.class.isAssignableFrom(type) || Ufixed.class.isAssignableFrom(type)) {
numericValue = new BigInteger(1, inputByteArray, valueOffset, typeLengthAsBytes);
} else {
numericValue = new BigInteger(inputByteArray, valueOffset, typeLengthAsBytes);
}
return type.getConstructor(BigInteger.class).newInstance(numericValue);
} catch (NoSuchMethodException
| SecurityException
| InstantiationException
| IllegalAccessException
| IllegalArgumentException
| InvocationTargetException e) {
throw new UnsupportedOperationException(
"Unable to create instance of " + type.getName(), e);
}
}
|
@Test
public void testUint16Max() throws Exception {
assertEquals(
TypeDecoder.decodeNumeric(
TypeEncoder.encodeNumeric(
new Uint16(BigInteger.valueOf((long) Math.pow(2, 16) - 1))),
Uint16.class),
new Uint16(BigInteger.valueOf((long) Math.pow(2, 16) - 1)));
}
|
public static void validateConfig(Object config, Class annotationClass) {
for (Field field : config.getClass().getDeclaredFields()) {
Object value = null;
field.setAccessible(true);
try {
value = field.get(config);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
validateField(field, value, annotationClass);
}
validateClass(config, annotationClass);
}
|
@Test
public void testStringList() {
TestConfig testConfig = createGoodConfig();
testConfig.stringList = testIntegerList;
Exception e = expectThrows(IllegalArgumentException.class, () -> ConfigValidation.validateConfig(testConfig));
assertTrue(e.getMessage().contains("stringList"));
}
|
public <T> HttpResponse<T> httpRequest(String url, String method, HttpHeaders headers, Object requestBodyData,
TypeReference<T> responseFormat) {
return httpRequest(url, method, headers, requestBodyData, responseFormat, null, null);
}
|
@Test
public void testNullResponseType() {
RestClient client = spy(new RestClient(null));
assertThrows(NullPointerException.class, () -> client.httpRequest(
MOCK_URL,
TEST_METHOD,
null,
TEST_DTO,
null,
MOCK_SECRET_KEY,
TEST_SIGNATURE_ALGORITHM
));
}
|
@Override
public LongGaugeImpl newLongGauge(String name) {
checkNotNull(name, "name can't be null");
LongGaugeImpl gauge = new LongGaugeImpl(this, name);
gauges.put(createDescriptor(name).lookupView(), gauge);
return gauge;
}
|
@Test
public void newGauge_whenExistingMetric() {
LongGaugeImpl first = metricsRegistry.newLongGauge("foo");
LongGaugeImpl second = metricsRegistry.newLongGauge("foo");
assertNotSame(first, second);
}
|
public MethodBuilder onthrow(Object onthrow) {
this.onthrow = onthrow;
return getThis();
}
|
@Test
void onthrow() {
MethodBuilder builder = MethodBuilder.newBuilder();
builder.onthrow("on-throw-object");
Assertions.assertEquals("on-throw-object", builder.build().getOnthrow());
}
|
@Override
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
try {
String partitionColumn = job.get(Constants.JDBC_PARTITION_COLUMN);
int numPartitions = job.getInt(Constants.JDBC_NUM_PARTITIONS, -1);
String lowerBound = job.get(Constants.JDBC_LOW_BOUND);
String upperBound = job.get(Constants.JDBC_UPPER_BOUND);
InputSplit[] splits;
if (!job.getBoolean(Constants.JDBC_SPLIT_QUERY, true) || numPartitions <= 1) {
// We will not split this query if:
// 1. hive.sql.query.split is set to false (either manually or automatically by calcite
// 2. numPartitions == 1
splits = new InputSplit[1];
splits[0] = new JdbcInputSplit(FileInputFormat.getInputPaths(job)[0]);
LOGGER.info("Creating 1 input split " + splits[0]);
return splits;
}
dbAccessor = DatabaseAccessorFactory.getAccessor(job);
Path[] tablePaths = FileInputFormat.getInputPaths(job);
// We will split this query into n splits
LOGGER.debug("Creating {} input splits", numPartitions);
if (partitionColumn != null) {
List<String> columnNames = dbAccessor.getColumnNames(job);
if (!columnNames.contains(partitionColumn)) {
throw new IOException("Cannot find partitionColumn:" + partitionColumn + " in " + columnNames);
}
List<TypeInfo> hiveColumnTypesList = dbAccessor.getColumnTypes(job);
TypeInfo typeInfo = hiveColumnTypesList.get(columnNames.indexOf(partitionColumn));
if (!(typeInfo instanceof PrimitiveTypeInfo)) {
throw new IOException(partitionColumn + " is a complex type, only primitive type can be a partition column");
}
if (lowerBound == null || upperBound == null) {
Pair<String, String> boundary = dbAccessor.getBounds(job, partitionColumn, lowerBound == null,
upperBound == null);
if (lowerBound == null) {
lowerBound = boundary.getLeft();
}
if (upperBound == null) {
upperBound = boundary.getRight();
}
}
if (lowerBound == null) {
throw new IOException("lowerBound of " + partitionColumn + " cannot be null");
}
if (upperBound == null) {
throw new IOException("upperBound of " + partitionColumn + " cannot be null");
}
IntervalSplitter intervalSplitter = IntervalSplitterFactory.newIntervalSpitter(typeInfo);
List<MutablePair<String, String>> intervals = intervalSplitter.getIntervals(lowerBound, upperBound, numPartitions,
typeInfo);
if (intervals.size()<=1) {
LOGGER.debug("Creating 1 input splits");
splits = new InputSplit[1];
splits[0] = new JdbcInputSplit(FileInputFormat.getInputPaths(job)[0]);
return splits;
}
intervals.get(0).setLeft(null);
intervals.get(intervals.size()-1).setRight(null);
splits = new InputSplit[intervals.size()];
for (int i = 0; i < intervals.size(); i++) {
splits[i] = new JdbcInputSplit(partitionColumn, intervals.get(i).getLeft(), intervals.get(i).getRight(), tablePaths[0]);
}
} else {
int numRecords = dbAccessor.getTotalNumberOfRecords(job);
if (numRecords < numPartitions) {
numPartitions = numRecords;
}
int numRecordsPerSplit = numRecords / numPartitions;
int numSplitsWithExtraRecords = numRecords % numPartitions;
LOGGER.debug("Num records = {}", numRecords);
splits = new InputSplit[numPartitions];
int offset = 0;
for (int i = 0; i < numPartitions; i++) {
int numRecordsInThisSplit = numRecordsPerSplit;
if (i < numSplitsWithExtraRecords) {
numRecordsInThisSplit++;
}
splits[i] = new JdbcInputSplit(numRecordsInThisSplit, offset, tablePaths[0]);
offset += numRecordsInThisSplit;
}
}
dbAccessor = null;
LOGGER.info("Num input splits created {}", splits.length);
for (InputSplit split : splits) {
LOGGER.info("split:" + split.toString());
}
return splits;
}
catch (Exception e) {
LOGGER.error("Error while splitting input data.", e);
throw new IOException(e);
}
}
|
@Test
public void testIntervalSplit_Decimal() throws HiveJdbcDatabaseAccessException, IOException {
JdbcInputFormat f = new JdbcInputFormat();
when(mockDatabaseAccessor.getColumnNames(any(Configuration.class))).thenReturn(Lists.newArrayList("a"));
List<TypeInfo> columnTypes = Collections.singletonList(TypeInfoFactory.getDecimalTypeInfo(10, 5));
when(mockDatabaseAccessor.getColumnTypes(any(Configuration.class))).thenReturn(columnTypes);
JobConf conf = new JobConf();
conf.set("mapred.input.dir", "/temp");
conf.set("hive.sql.partitionColumn", "a");
conf.set("hive.sql.numPartitions", "4");
conf.set("hive.sql.lowerBound", "5");
conf.set("hive.sql.upperBound", "1000");
InputSplit[] splits = f.getSplits(conf, -1);
assertThat(splits, is(notNullValue()));
assertThat(splits.length, is(4));
assertNull(((JdbcInputSplit)splits[0]).getLowerBound());
assertEquals(((JdbcInputSplit)splits[0]).getUpperBound(), "253.75000");
assertEquals(((JdbcInputSplit)splits[1]).getLowerBound(), "253.75000");
assertEquals(((JdbcInputSplit)splits[1]).getUpperBound(), "502.50000");
assertEquals(((JdbcInputSplit)splits[2]).getLowerBound(), "502.50000");
assertEquals(((JdbcInputSplit)splits[2]).getUpperBound(), "751.25000");
assertEquals(((JdbcInputSplit)splits[3]).getLowerBound(), "751.25000");
assertNull(((JdbcInputSplit)splits[3]).getUpperBound());
}
|
void fail(Throwable e)
{
// The error must be recorded before setting the noMoreSplits marker to make sure
// isFinished will observe failure instead of successful completion.
// Only record the first error message.
if (setIf(stateReference, State.failed(e), state -> state.getKind() == INITIAL)) {
// Stop the split loader before finishing the queue.
// Once the queue is finished, it will always return a completed future to avoid blocking any caller.
// This could lead to a short period of busy loop in splitLoader (although unlikely in general setup).
splitLoader.stop();
queues.noMoreSplits();
}
}
|
@Test
public void testFail()
{
HiveSplitSource hiveSplitSource = HiveSplitSource.allAtOnce(
SESSION,
"database",
"table",
new CacheQuotaRequirement(GLOBAL, Optional.empty()),
10,
10,
new DataSize(1, MEGABYTE),
new TestingHiveSplitLoader(),
EXECUTOR,
new CounterStat(),
1);
// add some splits
for (int i = 0; i < 5; i++) {
hiveSplitSource.addToQueue(new TestSplit(i));
assertEquals(hiveSplitSource.getBufferedInternalSplitCount(), i + 1);
}
// remove a split and verify
assertEquals(getSplits(hiveSplitSource, 1).size(), 1);
assertEquals(hiveSplitSource.getBufferedInternalSplitCount(), 4);
// fail source
hiveSplitSource.fail(new RuntimeException("test"));
assertEquals(hiveSplitSource.getBufferedInternalSplitCount(), 4);
// try to remove a split and verify we got the expected exception
try {
getSplits(hiveSplitSource, 1);
fail("expected RuntimeException");
}
catch (RuntimeException e) {
assertEquals(e.getMessage(), "test");
}
assertEquals(hiveSplitSource.getBufferedInternalSplitCount(), 4); // 3 splits + poison
// attempt to add another split and verify it does not work
hiveSplitSource.addToQueue(new TestSplit(99));
assertEquals(hiveSplitSource.getBufferedInternalSplitCount(), 4); // 3 splits + poison
// fail source again
hiveSplitSource.fail(new RuntimeException("another failure"));
assertEquals(hiveSplitSource.getBufferedInternalSplitCount(), 4); // 3 splits + poison
// try to remove a split and verify we got the first exception
try {
getSplits(hiveSplitSource, 1);
fail("expected RuntimeException");
}
catch (RuntimeException e) {
assertEquals(e.getMessage(), "test");
}
}
|
@Override
public boolean syncVerifyData(DistroData verifyData, String targetServer) {
if (isNoExistTarget(targetServer)) {
return true;
}
// replace target server as self server so that can callback.
verifyData.getDistroKey().setTargetServer(memberManager.getSelf().getAddress());
DistroDataRequest request = new DistroDataRequest(verifyData, DataOperation.VERIFY);
Member member = memberManager.find(targetServer);
if (checkTargetServerStatusUnhealthy(member)) {
Loggers.DISTRO
.warn("[DISTRO] Cancel distro verify caused by target server {} unhealthy, key: {}", targetServer,
verifyData.getDistroKey());
return false;
}
try {
Response response = clusterRpcClientProxy.sendRequest(member, request);
return checkResponse(response);
} catch (NacosException e) {
Loggers.DISTRO.error("[DISTRO-FAILED] Verify distro data failed! key: {} ", verifyData.getDistroKey(), e);
}
return false;
}
|
@Test
void testSyncVerifyDataWithCallbackForMemberDisconnect() throws NacosException {
DistroData verifyData = new DistroData();
verifyData.setDistroKey(new DistroKey());
when(memberManager.hasMember(member.getAddress())).thenReturn(true);
when(memberManager.find(member.getAddress())).thenReturn(member);
member.setState(NodeState.UP);
transportAgent.syncVerifyData(verifyData, member.getAddress(), distroCallback);
verify(distroCallback).onFailed(null);
verify(clusterRpcClientProxy, never()).asyncRequest(any(Member.class), any(), any());
}
|
public static <
X,
P extends MessageQueryParameter<X>,
R extends RequestBody,
M extends MessageParameters>
X getQueryParameter(final HandlerRequest<R> request, final Class<P> queryParameterClass)
throws RestHandlerException {
return getQueryParameter(request, queryParameterClass, null);
}
|
@Test
void testGetQueryParameterDefaultValue() throws Exception {
final Boolean allowNonRestoredState =
HandlerRequestUtils.getQueryParameter(
HandlerRequest.resolveParametersAndCreate(
EmptyRequestBody.getInstance(),
new TestMessageParameters(),
Collections.emptyMap(),
Collections.singletonMap("key", Collections.emptyList()),
Collections.emptyList()),
TestBooleanQueryParameter.class,
true);
assertThat(allowNonRestoredState).isTrue();
}
|
@Override
public SchemaAndValue toConnectData(String topic, byte[] value) {
JsonNode jsonValue;
// This handles a tombstone message
if (value == null) {
return SchemaAndValue.NULL;
}
try {
jsonValue = deserializer.deserialize(topic, value);
} catch (SerializationException e) {
throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e);
}
if (config.schemasEnabled() && (!jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME) || !jsonValue.has(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME)))
throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." +
" If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration.");
// The deserialized data should either be an envelope object containing the schema and the payload or the schema
// was stripped during serialization and we need to fill in an all-encompassing schema.
if (!config.schemasEnabled()) {
ObjectNode envelope = JSON_NODE_FACTORY.objectNode();
envelope.set(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME, null);
envelope.set(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME, jsonValue);
jsonValue = envelope;
}
Schema schema = asConnectSchema(jsonValue.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME));
return new SchemaAndValue(
schema,
convertToConnect(schema, jsonValue.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME), config)
);
}
|
@Test
public void stringToConnect() {
assertEquals(new SchemaAndValue(Schema.STRING_SCHEMA, "foo-bar-baz"), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"string\" }, \"payload\": \"foo-bar-baz\" }".getBytes()));
}
|
public static TopicMessageType getMessageType(SendMessageRequestHeader requestHeader) {
Map<String, String> properties = MessageDecoder.string2messageProperties(requestHeader.getProperties());
String traFlag = properties.get(MessageConst.PROPERTY_TRANSACTION_PREPARED);
TopicMessageType topicMessageType = TopicMessageType.NORMAL;
if (Boolean.parseBoolean(traFlag)) {
topicMessageType = TopicMessageType.TRANSACTION;
} else if (properties.containsKey(MessageConst.PROPERTY_SHARDING_KEY)) {
topicMessageType = TopicMessageType.FIFO;
} else if (properties.get("__STARTDELIVERTIME") != null
|| properties.get(MessageConst.PROPERTY_DELAY_TIME_LEVEL) != null
|| properties.get(MessageConst.PROPERTY_TIMER_DELIVER_MS) != null
|| properties.get(MessageConst.PROPERTY_TIMER_DELAY_SEC) != null
|| properties.get(MessageConst.PROPERTY_TIMER_DELAY_MS) != null) {
topicMessageType = TopicMessageType.DELAY;
}
return topicMessageType;
}
|
@Test
public void testGetMessageTypeAsTransaction() {
SendMessageRequestHeader requestHeader = new SendMessageRequestHeader();
Map<String, String> map = new HashMap<>();
map.put(MessageConst.PROPERTY_TRANSACTION_PREPARED, "true");
requestHeader.setProperties(MessageDecoder.messageProperties2String(map));
TopicMessageType result = BrokerMetricsManager.getMessageType(requestHeader);
assertThat(TopicMessageType.TRANSACTION).isEqualTo(result);
}
|
@Override
public void checkBeforeUpdate(final CreateBroadcastTableRuleStatement sqlStatement) {
ShardingSpherePreconditions.checkNotEmpty(database.getResourceMetaData().getStorageUnits(), () -> new EmptyStorageUnitException(database.getName()));
if (!sqlStatement.isIfNotExists()) {
checkDuplicate(sqlStatement);
}
}
|
@Test
void assertCheckSQLStatementWithDuplicateBroadcastRule() {
executor.setDatabase(mockShardingSphereDatabase());
BroadcastRule rule = mock(BroadcastRule.class);
when(rule.getTables()).thenReturn(Collections.singleton("t_address"));
executor.setRule(rule);
CreateBroadcastTableRuleStatement sqlStatement = new CreateBroadcastTableRuleStatement(false, Collections.singleton("t_address"));
assertThrows(DuplicateRuleException.class, () -> executor.checkBeforeUpdate(sqlStatement));
}
|
@Override
public double read() {
return gaugeSource.read();
}
|
@Test
public void whenCreatedForDynamicLongMetricWithProvidedValue() {
DoubleGaugeImplTest.SomeObject someObject = new DoubleGaugeImplTest.SomeObject();
someObject.longField = 42;
metricsRegistry.registerDynamicMetricsProvider((descriptor, context) ->
context.collect(descriptor.withPrefix("foo"), "longField", INFO, BYTES, 42));
DoubleGauge doubleGauge = metricsRegistry.newDoubleGauge("foo.longField");
// needed to collect dynamic metrics and update the gauge created from them
metricsRegistry.collect(mock(MetricsCollector.class));
assertEquals(42, doubleGauge.read(), 10E-6);
}
|
@Override
public PageData<Asset> findAssetsByTenantId(UUID tenantId, PageLink pageLink) {
return DaoUtil.toPageData(assetRepository
.findByTenantId(
tenantId,
pageLink.getTextSearch(),
DaoUtil.toPageable(pageLink)));
}
|
@Test
public void testFindAssetsByTenantId() {
PageLink pageLink = new PageLink(20, 0, "ASSET_");
PageData<Asset> assets1 = assetDao.findAssetsByTenantId(tenantId1, pageLink);
assertEquals(20, assets1.getData().size());
pageLink = pageLink.nextPageLink();
PageData<Asset> assets2 = assetDao.findAssetsByTenantId(tenantId1, pageLink);
assertEquals(10, assets2.getData().size());
pageLink = pageLink.nextPageLink();
PageData<Asset> assets3 = assetDao.findAssetsByTenantId(tenantId1, pageLink);
assertEquals(0, assets3.getData().size());
}
|
@Override
public List<ImportValidationFeedback> verifyRule( Object subject ) {
List<ImportValidationFeedback> feedback = new ArrayList<>();
if ( !isEnabled() || !( subject instanceof TransMeta ) ) {
return feedback;
}
TransMeta transMeta = (TransMeta) subject;
String description = transMeta.getDescription();
if ( null != description && minLength <= description.length() ) {
feedback.add( new ImportValidationFeedback(
this, ImportValidationResultType.APPROVAL, "A description is present" ) );
} else {
feedback.add( new ImportValidationFeedback(
this, ImportValidationResultType.ERROR, "A description is not present or is too short." ) );
}
return feedback;
}
|
@Test
public void testVerifyRule_NullParameter_EnabledRule() {
TransformationHasDescriptionImportRule importRule = getImportRule( 10, true );
List<ImportValidationFeedback> feedbackList = importRule.verifyRule( null );
assertNotNull( feedbackList );
assertTrue( feedbackList.isEmpty() );
}
|
public static ReshuffleTriggerStateMachine create() {
return new ReshuffleTriggerStateMachine();
}
|
@Test
public void testOnTimer() throws Exception {
TriggerStateMachineTester<Integer, IntervalWindow> tester =
TriggerStateMachineTester.forTrigger(
ReshuffleTriggerStateMachine.create(), FixedWindows.of(Duration.millis(100)));
IntervalWindow arbitraryWindow = new IntervalWindow(new Instant(100), new Instant(200));
tester.fireIfShouldFire(arbitraryWindow);
assertFalse(tester.isMarkedFinished(arbitraryWindow));
}
|
@Override
public void gauge(String id, Supplier<Number> supplier, String... tagNameValuePairs) {
Id metricId = suffixBaseId(id).withTags(tagNameValuePairs);
PolledMeter.remove(registry, metricId);
PolledMeter.using(registry)
.withId(metricId)
.monitorValue(supplier, ignore -> supplier.get().doubleValue());
}
|
@Test
public void testUnregister() {
DefaultRegistry registry = new DefaultRegistry();
SpectatorMetricRegistry metricRegistry = new SpectatorMetricRegistry(registry, registry.createId("foo"));
metricRegistry.gauge("bar", () -> 10);
metricRegistry.gauge("bar", () -> 20);
PolledMeter.update(registry);
Assert.assertEquals(20.0, registry.gauge(registry.createId("foo.bar")).value(), 0);
}
|
public SuperTrendLowerBandIndicator(final BarSeries barSeries) {
this(barSeries, new ATRIndicator(barSeries, 10), 3d);
}
|
@Test
public void testSuperTrendLowerBandIndicator() {
SuperTrendLowerBandIndicator superTrendLowerBandIndicator = new SuperTrendLowerBandIndicator(data);
assertNumEquals(this.numOf(15.730621000000003), superTrendLowerBandIndicator.getValue(4));
assertNumEquals(this.numOf(17.602360938100002), superTrendLowerBandIndicator.getValue(9));
assertNumEquals(this.numOf(2.4620527443048026), superTrendLowerBandIndicator.getValue(14));
}
|
@Override
public String topic() {
if (recordContext == null) {
// This is only exposed via the deprecated ProcessorContext,
// in which case, we're preserving the pre-existing behavior
// of returning dummy values when the record context is undefined.
// For topic, the dummy value is `null`.
return null;
} else {
return recordContext.topic();
}
}
|
@Test
public void shouldReturnTopicFromRecordContext() {
assertThat(context.topic(), equalTo(recordContext.topic()));
}
|
public FEELFnResult<List> invoke(@ParameterName("list") List list, @ParameterName("position") BigDecimal position,
@ParameterName("newItem") Object newItem) {
if (list == null) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", CANNOT_BE_NULL));
}
if (position == null) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", CANNOT_BE_NULL));
}
int intPosition = position.intValue();
if (intPosition == 0 || Math.abs(intPosition) > list.size()) {
String paramProblem = String.format("%s outside valid boundaries (1-%s)", intPosition, list.size());
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", paramProblem));
}
Object e = NumberEvalHelper.coerceNumber(newItem);
List toReturn = new ArrayList(list);
int replacementPosition = intPosition > 0 ? intPosition -1 : list.size() - Math.abs(intPosition);
toReturn.set(replacementPosition, e);
return FEELFnResult.ofResult(toReturn);
}
|
@Test
void invokeListNull() {
FunctionTestUtil.assertResultError(listReplaceFunction.invoke(null, BigDecimal.ONE, ""), InvalidParametersEvent.class);
}
|
static String resolveEcsEndpoint(AwsConfig awsConfig, String region) {
String ecsHostHeader = awsConfig.getHostHeader();
if (isNullOrEmptyAfterTrim(ecsHostHeader)
|| ecsHostHeader.equals("ecs")
) {
ecsHostHeader = DEFAULT_ECS_HOST_HEADER;
}
return ecsHostHeader.replace("ecs.", "ecs." + region + ".");
}
|
@Test
public void resolveEcsEndpoints() {
assertEquals("ecs.us-east-1.amazonaws.com", resolveEcsEndpoint(AwsConfig.builder().build(), "us-east-1"));
assertEquals("ecs.us-east-1.amazonaws.com",
resolveEcsEndpoint(AwsConfig.builder().setHostHeader("ecs").build(), "us-east-1"));
assertEquals("ecs.us-east-1.something",
resolveEcsEndpoint(AwsConfig.builder().setHostHeader("ecs.something").build(), "us-east-1"));
}
|
public DMNContext populateContextForDecisionServiceWith(String decisionServiceName, Map<String, Object> json) {
DecisionServiceNode dsNode = model.getDecisionServices().stream().filter(ds -> ds.getName().equals(decisionServiceName)).findFirst().orElseThrow(IllegalArgumentException::new);
for (Entry<String, Object> kv : json.entrySet()) {
DecisionServiceNodeImpl dsNodeImpl = (DecisionServiceNodeImpl) dsNode;
DMNNode node = dsNodeImpl.getInputParameters().get(kv.getKey());
if (node instanceof InputDataNode) {
processInputDataNode(kv, (InputDataNode) node);
} else if (node instanceof DecisionNode) {
processDecisionNode(kv, (DecisionNode) node);
} else {
LOG.debug("The key {} was not a RequiredInput nor a RequiredDecision for the DecisionService, setting it as-is.", kv.getKey());
context.set(kv.getKey(), kv.getValue());
}
}
return context;
}
|
@Test
void dSBasicDS2() throws Exception {
final DMNRuntime runtime = createRuntime("0004-decision-services.dmn", DMNDecisionServicesTest.class);
final DMNModel dmnModel = runtime.getModel("http://www.trisotech.com/definitions/_686f58d4-4ec3-4c65-8c06-0e4fd8983def", "Decision Services");
assertThat(dmnModel).isNotNull();
assertThat(dmnModel.hasErrors()).as(DMNRuntimeUtil.formatMessages(dmnModel.getMessages())).isFalse();
DMNContext context = runtime.newContext();
final String JSON = "{ \"additional\":123, \"D\":\"d\", \"E\":\"e\", \"B\":\"inB\", \"C\":\"inC\"}";
new DynamicDMNContextBuilder(context, dmnModel).populateContextForDecisionServiceWith("A Only Knowing B and C", readJSON(JSON));
final DMNResult dmnResult = runtime.evaluateDecisionService(dmnModel, context, "A Only Knowing B and C");
LOG.debug("{}", dmnResult);
assertThat(dmnResult.hasErrors()).as(DMNRuntimeUtil.formatMessages(dmnResult.getMessages())).isFalse();
assertThat(dmnResult.getDecisionResultByName("A").getResult()).isEqualTo("inBinC");
}
|
static void addFieldTypeMapPopulation(BlockStmt body, Map<String, KiePMMLOriginalTypeGeneratedType> fieldTypeMap) {
for (Map.Entry<String, KiePMMLOriginalTypeGeneratedType> entry : fieldTypeMap.entrySet()) {
KiePMMLOriginalTypeGeneratedType kiePMMLOriginalTypeGeneratedType = entry.getValue();
NodeList<Expression> expressions =
NodeList.nodeList(new StringLiteralExpr(kiePMMLOriginalTypeGeneratedType.getOriginalType()),
new StringLiteralExpr(kiePMMLOriginalTypeGeneratedType.getGeneratedType()));
ObjectCreationExpr objectCreationExpr = new ObjectCreationExpr();
objectCreationExpr.setType(KiePMMLOriginalTypeGeneratedType.class.getName());
objectCreationExpr.setArguments(expressions);
expressions = NodeList.nodeList(new StringLiteralExpr(entry.getKey()), objectCreationExpr);
body.addStatement(new MethodCallExpr(new NameExpr("fieldTypeMap"), "put", expressions));
}
}
|
@Test
void addFieldTypeMapPopulation() {
BlockStmt blockStmt = new BlockStmt();
Map<String, KiePMMLOriginalTypeGeneratedType> fieldTypeMap = new HashMap<>();
IntStream.range(0, 3).forEach(index -> {
String key = "KEY-" + index;
KiePMMLOriginalTypeGeneratedType value = new KiePMMLOriginalTypeGeneratedType("ORIGINALTYPE-" + index,
"GENERATEDTYPE-" + index);
fieldTypeMap.put(key, value);
});
KiePMMLDroolsModelFactoryUtils.addFieldTypeMapPopulation(blockStmt, fieldTypeMap);
commonEvaluateFieldTypeMap(blockStmt, fieldTypeMap, fieldTypeMap.size());
}
|
@GET
@Produces(MediaType.APPLICATION_JSON)
@Operation(summary = "Get prekey count",
description = "Gets the number of one-time prekeys uploaded for this device and still available")
@ApiResponse(responseCode = "200", description = "Body contains the number of available one-time prekeys for the device.", useReturnTypeSchema = true)
@ApiResponse(responseCode = "401", description = "Account authentication check failed.")
public CompletableFuture<PreKeyCount> getStatus(@ReadOnly @Auth final AuthenticatedDevice auth,
@QueryParam("identity") @DefaultValue("aci") final IdentityType identityType) {
final CompletableFuture<Integer> ecCountFuture =
keysManager.getEcCount(auth.getAccount().getIdentifier(identityType), auth.getAuthenticatedDevice().getId());
final CompletableFuture<Integer> pqCountFuture =
keysManager.getPqCount(auth.getAccount().getIdentifier(identityType), auth.getAuthenticatedDevice().getId());
return ecCountFuture.thenCombine(pqCountFuture, PreKeyCount::new);
}
|
@Test
void testNoDevices() {
when(existsAccount.getDevices()).thenReturn(Collections.emptyList());
Response result = resources.getJerseyTest()
.target(String.format("/v2/keys/%s/*", EXISTS_UUID))
.request()
.header(HeaderUtils.UNIDENTIFIED_ACCESS_KEY, AuthHelper.getUnidentifiedAccessHeader("1337".getBytes()))
.get();
assertThat(result).isNotNull();
assertThat(result.getStatus()).isEqualTo(404);
}
|
public static SortOrder buildSortOrder(Table table) {
return buildSortOrder(table.schema(), table.spec(), table.sortOrder());
}
|
@Test
public void testEmptySpecsV2() {
PartitionSpec spec = PartitionSpec.unpartitioned();
SortOrder order = SortOrder.builderFor(SCHEMA).withOrderId(1).asc("id", NULLS_LAST).build();
TestTables.TestTable table = TestTables.create(tableDir, "test", SCHEMA, spec, order, 2);
// pass PartitionSpec.unpartitioned() on purpose as it has an empty schema
SortOrder actualOrder = SortOrderUtil.buildSortOrder(table.schema(), spec, table.sortOrder());
assertThat(actualOrder.orderId()).as("Order ID must be fresh").isOne();
assertThat(actualOrder.fields()).as("Order must have 1 field").hasSize(1);
assertThat(actualOrder.fields().get(0).sourceId()).as("Field id must be fresh").isOne();
assertThat(actualOrder.fields().get(0).direction()).as("Direction must match").isEqualTo(ASC);
assertThat(actualOrder.fields().get(0).nullOrder())
.as("Null order must match")
.isEqualTo(NULLS_LAST);
}
|
public static AuthenticationMethod getAuthenticationMethod(Configuration conf) {
String value = conf.get(HADOOP_SECURITY_AUTHENTICATION, "simple");
try {
return Enum.valueOf(AuthenticationMethod.class,
StringUtils.toUpperCase(value));
} catch (IllegalArgumentException iae) {
throw new IllegalArgumentException("Invalid attribute value for " +
HADOOP_SECURITY_AUTHENTICATION + " of " + value);
}
}
|
@Test
public void testGetAuthenticationMethod() {
Configuration conf = new Configuration();
// default is simple
conf.unset(HADOOP_SECURITY_AUTHENTICATION);
assertEquals(SIMPLE, SecurityUtil.getAuthenticationMethod(conf));
// simple
conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple");
assertEquals(SIMPLE, SecurityUtil.getAuthenticationMethod(conf));
// kerberos
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
assertEquals(KERBEROS, SecurityUtil.getAuthenticationMethod(conf));
// bad value
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kaboom");
String error = null;
try {
SecurityUtil.getAuthenticationMethod(conf);
} catch (Exception e) {
error = e.toString();
}
assertEquals("java.lang.IllegalArgumentException: " +
"Invalid attribute value for " +
HADOOP_SECURITY_AUTHENTICATION + " of kaboom", error);
}
|
@PostMapping
public Mono<ResponseEntity<ProductReview>> createProductReview(
Mono<JwtAuthenticationToken> authenticationTokenMono,
@Valid @RequestBody Mono<NewProductReviewPayload> payloadMono,
UriComponentsBuilder uriComponentsBuilder) {
return authenticationTokenMono.flatMap(token -> payloadMono
.flatMap(payload -> this.productReviewsService.createProductReview(payload.productId(),
payload.rating(), payload.review(), token.getToken().getSubject())))
.map(productReview -> ResponseEntity
.created(uriComponentsBuilder.replacePath("/feedback-api/product-reviews/{id}")
.build(productReview.getId()))
.body(productReview));
}
|
@Test
void createProductReview_ReturnsCreatedProductReview() {
// given
doReturn(Mono.just(new ProductReview(UUID.fromString("5a9ba234-cbd6-11ee-acab-5748ca6678b9"), 1, 4,
"В целом норм", "5f1d5cf8-cbd6-11ee-9579-cf24d050b47c")))
.when(this.productReviewsService)
.createProductReview(1, 4, "В целом норм", "5f1d5cf8-cbd6-11ee-9579-cf24d050b47c");
// when
StepVerifier.create(this.controller.createProductReview(
Mono.just(new JwtAuthenticationToken(Jwt.withTokenValue("e30.e30")
.headers(headers -> headers.put("foo", "bar"))
.claim("sub", "5f1d5cf8-cbd6-11ee-9579-cf24d050b47c").build())),
Mono.just(new NewProductReviewPayload(1, 4, "В целом норм")),
UriComponentsBuilder.fromUriString("http://localhost")))
// then
.expectNext(ResponseEntity.created(URI.create("http://localhost/feedback-api/product-reviews/5a9ba234-cbd6-11ee-acab-5748ca6678b9"))
.body(new ProductReview(UUID.fromString("5a9ba234-cbd6-11ee-acab-5748ca6678b9"), 1, 4,
"В целом норм", "5f1d5cf8-cbd6-11ee-9579-cf24d050b47c")))
.verifyComplete();
verify(this.productReviewsService)
.createProductReview(1, 4, "В целом норм", "5f1d5cf8-cbd6-11ee-9579-cf24d050b47c");
verifyNoMoreInteractions(this.productReviewsService);
}
|
@Override
public void initialize(String name, Map<String, String> properties) {
Preconditions.checkNotNull(properties, "Invalid catalog properties: null");
String uri = properties.get(CatalogProperties.URI);
Preconditions.checkNotNull(uri, "JDBC connection URI is required");
String inputWarehouseLocation = properties.get(CatalogProperties.WAREHOUSE_LOCATION);
Preconditions.checkArgument(
!Strings.isNullOrEmpty(inputWarehouseLocation),
"Cannot initialize JDBCCatalog because warehousePath must not be null or empty");
this.warehouseLocation = LocationUtil.stripTrailingSlash(inputWarehouseLocation);
this.catalogProperties = ImmutableMap.copyOf(properties);
if (name != null) {
this.catalogName = name;
}
if (null != ioBuilder) {
this.io = ioBuilder.apply(properties);
} else {
String ioImpl =
properties.getOrDefault(
CatalogProperties.FILE_IO_IMPL, "org.apache.iceberg.hadoop.HadoopFileIO");
this.io = CatalogUtil.loadFileIO(ioImpl, properties, conf);
}
LOG.debug("Connecting to JDBC database {}", uri);
if (null != clientPoolBuilder) {
this.connections = clientPoolBuilder.apply(properties);
} else {
this.connections = new JdbcClientPool(uri, properties);
}
this.initializeCatalogTables =
PropertyUtil.propertyAsBoolean(
properties, JdbcUtil.INIT_CATALOG_TABLES_PROPERTY, initializeCatalogTables);
if (initializeCatalogTables) {
initializeCatalogTables();
}
updateSchemaIfRequired();
this.closeableGroup = new CloseableGroup();
closeableGroup.addCloseable(metricsReporter());
closeableGroup.addCloseable(connections);
closeableGroup.setSuppressCloseFailure(true);
}
|
@Test
public void testEnableInitCatalogTablesOverridesDefault() throws Exception {
// as this test uses different connections, we can't use memory database (as it's per
// connection), but a file database instead
java.nio.file.Path dbFile = Files.createTempFile("icebergInitCatalogTables", "db");
String jdbcUrl = "jdbc:sqlite:" + dbFile.toAbsolutePath();
Map<String, String> properties = Maps.newHashMap();
properties.put(CatalogProperties.WAREHOUSE_LOCATION, this.tableDir.toAbsolutePath().toString());
properties.put(CatalogProperties.URI, jdbcUrl);
properties.put(JdbcUtil.INIT_CATALOG_TABLES_PROPERTY, "true");
JdbcCatalog jdbcCatalog = new JdbcCatalog(null, null, false);
jdbcCatalog.initialize("test_jdbc_catalog", properties);
assertThat(catalogTablesExist(jdbcUrl)).isTrue();
}
|
public Tuple2<Long, Double> increase(String name, ImmutableMap<String, String> labels, Double value, long windowSize, long now) {
ID id = new ID(name, labels);
Queue<Tuple2<Long, Double>> window = windows.computeIfAbsent(id, unused -> new PriorityQueue<>());
synchronized (window) {
window.offer(Tuple.of(now, value));
long waterLevel = now - windowSize;
Tuple2<Long, Double> peek = window.peek();
if (peek._1 > waterLevel) {
return peek;
}
Tuple2<Long, Double> result = peek;
while (peek._1 < waterLevel) {
result = window.poll();
peek = window.element();
}
// Choose the closed slot to the expected timestamp
if (waterLevel - result._1 <= peek._1 - waterLevel) {
return result;
}
return peek;
}
}
|
@Test
public void testPT1M() {
double[] actuals = parameters().stream().mapToDouble(e -> {
Tuple2<Long, Double> increase = CounterWindow.INSTANCE.increase(
"test", ImmutableMap.<String, String>builder().build(), e._2,
Duration.parse("PT1M").getSeconds() * 1000, e._1
);
return e._2 - increase._2;
}).toArray();
Assertions.assertArrayEquals(new double[] {0, 1d, 2d, 3d, 4d, 0d, 5d, 5d}, actuals, 0.d);
}
|
@Override
protected TableRecords getUndoRows() {
return super.getUndoRows();
}
|
@Test
public void getUndoRows() {
Assertions.assertEquals(executor.getUndoRows(), executor.getSqlUndoLog().getAfterImage());
}
|
public Future<Collection<Integer>> resizeAndReconcilePvcs(KafkaStatus kafkaStatus, List<PersistentVolumeClaim> pvcs) {
Set<Integer> podIdsToRestart = new HashSet<>();
List<Future<Void>> futures = new ArrayList<>(pvcs.size());
for (PersistentVolumeClaim desiredPvc : pvcs) {
Future<Void> perPvcFuture = pvcOperator.getAsync(reconciliation.namespace(), desiredPvc.getMetadata().getName())
.compose(currentPvc -> {
if (currentPvc == null || currentPvc.getStatus() == null || !"Bound".equals(currentPvc.getStatus().getPhase())) {
// This branch handles the following conditions:
// * The PVC doesn't exist yet, we should create it
// * The PVC is not Bound, we should reconcile it
return pvcOperator.reconcile(reconciliation, reconciliation.namespace(), desiredPvc.getMetadata().getName(), desiredPvc)
.map((Void) null);
} else if (currentPvc.getStatus().getConditions().stream().anyMatch(cond -> "Resizing".equals(cond.getType()) && "true".equals(cond.getStatus().toLowerCase(Locale.ENGLISH)))) {
// The PVC is Bound, but it is already resizing => Nothing to do, we should let it resize
LOGGER.debugCr(reconciliation, "The PVC {} is resizing, nothing to do", desiredPvc.getMetadata().getName());
return Future.succeededFuture();
} else if (currentPvc.getStatus().getConditions().stream().anyMatch(cond -> "FileSystemResizePending".equals(cond.getType()) && "true".equals(cond.getStatus().toLowerCase(Locale.ENGLISH)))) {
// The PVC is Bound and resized but waiting for FS resizing => We need to restart the pod which is using it
podIdsToRestart.add(getPodIndexFromPvcName(desiredPvc.getMetadata().getName()));
LOGGER.infoCr(reconciliation, "The PVC {} is waiting for file system resizing and the pod using it might need to be restarted.", desiredPvc.getMetadata().getName());
return Future.succeededFuture();
} else {
// The PVC is Bound and resizing is not in progress => We should check if the SC supports resizing and check if size changed
Long currentSize = StorageUtils.convertToMillibytes(currentPvc.getSpec().getResources().getRequests().get("storage"));
Long desiredSize = StorageUtils.convertToMillibytes(desiredPvc.getSpec().getResources().getRequests().get("storage"));
if (!currentSize.equals(desiredSize)) {
// The sizes are different => we should resize (shrinking will be handled in StorageDiff, so we do not need to check that)
return resizePvc(kafkaStatus, currentPvc, desiredPvc);
} else {
// size didn't change, just reconcile
return pvcOperator.reconcile(reconciliation, reconciliation.namespace(), desiredPvc.getMetadata().getName(), desiredPvc)
.map((Void) null);
}
}
});
futures.add(perPvcFuture);
}
return Future.all(futures)
.map(podIdsToRestart);
}
|
@Test
public void testVolumesBoundMissingStorageClass(VertxTestContext context) {
List<PersistentVolumeClaim> pvcs = List.of(
createPvc("data-pod-0"),
createPvc("data-pod-1"),
createPvc("data-pod-2")
);
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
// Mock the PVC Operator
PvcOperator mockPvcOps = supplier.pvcOperations;
when(mockPvcOps.getAsync(eq(NAMESPACE), ArgumentMatchers.startsWith("data-")))
.thenAnswer(invocation -> {
String pvcName = invocation.getArgument(1);
PersistentVolumeClaim currentPvc = pvcs.stream().filter(pvc -> pvcName.equals(pvc.getMetadata().getName())).findFirst().orElse(null);
if (currentPvc != null) {
PersistentVolumeClaim pvcWithStatus = new PersistentVolumeClaimBuilder(currentPvc)
.editSpec()
.withNewResources()
.withRequests(Map.of("storage", new Quantity("50Gi", null)))
.endResources()
.endSpec()
.withNewStatus()
.withPhase("Bound")
.withCapacity(Map.of("storage", new Quantity("50Gi", null)))
.endStatus()
.build();
return Future.succeededFuture(pvcWithStatus);
} else {
return Future.succeededFuture();
}
});
ArgumentCaptor<PersistentVolumeClaim> pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class);
when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture());
// Mock the StorageClass Operator
StorageClassOperator mockSco = supplier.storageClassOperations;
when(mockSco.getAsync(eq(STORAGE_CLASS_NAME))).thenReturn(Future.succeededFuture(null));
// Reconcile the PVCs
PvcReconciler reconciler = new PvcReconciler(
new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME),
mockPvcOps,
mockSco
);
// Used to capture the warning condition
KafkaStatus kafkaStatus = new KafkaStatus();
Checkpoint async = context.checkpoint();
reconciler.resizeAndReconcilePvcs(kafkaStatus, pvcs)
.onComplete(res -> {
assertThat(res.succeeded(), is(true));
assertThat(res.result().size(), is(0));
assertThat(pvcCaptor.getAllValues().size(), is(0));
assertThat(kafkaStatus.getConditions().size(), is(3));
kafkaStatus.getConditions().stream().forEach(c -> {
assertThat(c.getReason(), is("PvcResizingWarning"));
assertThat(c.getMessage(), containsString("Storage Class mysc not found."));
});
async.flag();
});
}
|
@Override
public String telnet(Channel channel, String message) {
if (StringUtils.isEmpty(message)) {
return "Please input service name, eg: \r\ncd XxxService\r\ncd com.xxx.XxxService";
}
StringBuilder buf = new StringBuilder();
if ("/".equals(message) || "..".equals(message)) {
String service = (String) channel.getAttribute(SERVICE_KEY);
channel.removeAttribute(SERVICE_KEY);
buf.append("Cancelled default service ").append(service).append('.');
} else {
boolean found = false;
for (Exporter<?> exporter : DubboProtocol.getDubboProtocol().getExporters()) {
if (message.equals(exporter.getInvoker().getInterface().getSimpleName())
|| message.equals(exporter.getInvoker().getInterface().getName())
|| message.equals(exporter.getInvoker().getUrl().getPath())) {
found = true;
break;
}
}
if (found) {
channel.setAttribute(SERVICE_KEY, message);
buf.append("Used the ")
.append(message)
.append(" as default.\r\nYou can cancel default service by command: cd /");
} else {
buf.append("No such service ").append(message);
}
}
return buf.toString();
}
|
@Test
void testChangeCancel2() throws RemotingException {
String result = change.telnet(mockChannel, "/");
assertEquals("Cancelled default service org.apache.dubbo.rpc.protocol.dubbo.support.DemoService.", result);
}
|
@Override
public Integer doCall() throws Exception {
JsonObject pluginConfig = loadConfig();
JsonObject plugins = pluginConfig.getMap("plugins");
Object plugin = plugins.remove(name);
if (plugin != null) {
printer().printf("Plugin %s removed%n", name);
saveConfig(pluginConfig);
} else {
printer().printf("Plugin %s not found in configuration%n", name);
}
return 0;
}
|
@Test
public void shouldHandleUnknownPlugin() throws Exception {
PluginDelete command = new PluginDelete(new CamelJBangMain().withPrinter(printer));
command.name = "foo";
command.doCall();
Assertions.assertEquals("Plugin foo not found in configuration", printer.getOutput());
Assertions.assertEquals("{\"plugins\":{}}", PluginHelper.getOrCreatePluginConfig().toJson());
}
|
public RedisConnectionFactory(final RedisConfigProperties redisConfigProperties) {
lettuceConnectionFactory = createLettuceConnectionFactory(redisConfigProperties);
lettuceConnectionFactory.afterPropertiesSet();
}
|
@Test
public void redisConnectionFactoryTest() {
final RedisConfigProperties redisConfigProperties = mock(RedisConfigProperties.class);
when(redisConfigProperties.getMode()).thenReturn(RedisModeEnum.SENTINEL.getName());
when(redisConfigProperties.getMaster()).thenReturn("master");
when(redisConfigProperties.getUrl()).thenReturn("localhost:6379");
Assertions.assertDoesNotThrow(() -> new RedisConnectionFactory(redisConfigProperties));
when(redisConfigProperties.getPassword()).thenReturn("password");
Assertions.assertDoesNotThrow(() -> new RedisConnectionFactory(redisConfigProperties));
when(redisConfigProperties.getMode()).thenReturn(RedisModeEnum.CLUSTER.getName());
Assertions.assertDoesNotThrow(() -> new RedisConnectionFactory(redisConfigProperties));
when(redisConfigProperties.getPassword()).thenReturn(null);
Assertions.assertDoesNotThrow(() -> new RedisConnectionFactory(redisConfigProperties));
when(redisConfigProperties.getMode()).thenReturn(RedisModeEnum.STANDALONE.getName());
when(redisConfigProperties.getPassword()).thenReturn("password");
when(redisConfigProperties.getMaxWait()).thenReturn(Duration.ofMillis(-1));
Assertions.assertDoesNotThrow(() -> new RedisConnectionFactory(redisConfigProperties));
}
|
@Override
public Acl getPermission(final Path file) throws BackgroundException {
try {
final Acl acl = new Acl();
if(containerService.isContainer(file)) {
final BucketAccessControls controls = session.getClient().bucketAccessControls().list(
containerService.getContainer(file).getName()).execute();
for(BucketAccessControl control : controls.getItems()) {
final String entity = control.getEntity();
acl.addAll(this.toUser(entity, control.getEmail()), new Acl.Role(control.getRole()));
}
}
else {
final ObjectAccessControls controls = session.getClient().objectAccessControls().list(containerService.getContainer(file).getName(), containerService.getKey(file)).execute();
for(ObjectAccessControl control : controls.getItems()) {
final String entity = control.getEntity();
acl.addAll(this.toUser(entity, control.getEmail()), this.toRole(control));
}
}
return acl;
}
catch(IOException e) {
final BackgroundException failure = new GoogleStorageExceptionMappingService().map("Failure to read attributes of {0}", e, file);
if(file.isDirectory()) {
if(failure instanceof NotfoundException) {
// No placeholder file may exist but we just have a common prefix
return Acl.EMPTY;
}
}
if(failure instanceof InteroperabilityException) {
// The specified method is not allowed against this resource. The case for delete markers in versioned buckets.
return Acl.EMPTY;
}
throw failure;
}
}
|
@Test(expected = NotfoundException.class)
public void testReadNotFound() throws Exception {
final Path container = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory));
final Path test = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final GoogleStorageAccessControlListFeature f = new GoogleStorageAccessControlListFeature(session);
f.getPermission(test);
}
|
@Override
@MethodNotAvailable
public V replace(K key, V newValue) {
throw new MethodNotAvailableException();
}
|
@Test(expected = MethodNotAvailableException.class)
public void testReplaceWithOldValue() {
adapter.replace(23, "oldValue", "newValue");
}
|
public static List<AclEntry> mergeAclEntries(List<AclEntry> existingAcl,
List<AclEntry> inAclSpec) throws AclException {
ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec);
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
List<AclEntry> foundAclSpecEntries =
Lists.newArrayListWithCapacity(MAX_ENTRIES);
EnumMap<AclEntryScope, AclEntry> providedMask =
Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class);
EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class);
for (AclEntry existingEntry: existingAcl) {
AclEntry aclSpecEntry = aclSpec.findByKey(existingEntry);
if (aclSpecEntry != null) {
foundAclSpecEntries.add(aclSpecEntry);
scopeDirty.add(aclSpecEntry.getScope());
if (aclSpecEntry.getType() == MASK) {
providedMask.put(aclSpecEntry.getScope(), aclSpecEntry);
maskDirty.add(aclSpecEntry.getScope());
} else {
aclBuilder.add(aclSpecEntry);
}
} else {
if (existingEntry.getType() == MASK) {
providedMask.put(existingEntry.getScope(), existingEntry);
} else {
aclBuilder.add(existingEntry);
}
}
}
// ACL spec entries that were not replacements are new additions.
for (AclEntry newEntry: aclSpec) {
if (Collections.binarySearch(foundAclSpecEntries, newEntry,
ACL_ENTRY_COMPARATOR) < 0) {
scopeDirty.add(newEntry.getScope());
if (newEntry.getType() == MASK) {
providedMask.put(newEntry.getScope(), newEntry);
maskDirty.add(newEntry.getScope());
} else {
aclBuilder.add(newEntry);
}
}
}
copyDefaultsIfNeeded(aclBuilder);
calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty);
return buildAndValidateAcl(aclBuilder);
}
|
@Test(expected=AclException.class)
public void testMergeAclEntriesNamedOther() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, OTHER, "bruce", READ_EXECUTE));
mergeAclEntries(existing, aclSpec);
}
|
Collection<OutputFile> compile() {
List<OutputFile> out = new ArrayList<>(queue.size() + 1);
for (Schema schema : queue) {
out.add(compile(schema));
}
if (protocol != null) {
out.add(compileInterface(protocol));
}
return out;
}
|
@Test
void invalidParameterCounts() throws Exception {
Schema invalidSchema1 = createSampleRecordSchema(SpecificCompiler.MAX_FIELD_PARAMETER_UNIT_COUNT + 1, 0);
SpecificCompiler compiler = new SpecificCompiler(invalidSchema1);
assertCompilesWithJavaCompiler(new File(OUTPUT_DIR, "testInvalidParameterCounts1"), compiler.compile());
Schema invalidSchema2 = createSampleRecordSchema(SpecificCompiler.MAX_FIELD_PARAMETER_UNIT_COUNT, 10);
compiler = new SpecificCompiler(invalidSchema2);
assertCompilesWithJavaCompiler(new File(OUTPUT_DIR, "testInvalidParameterCounts2"), compiler.compile());
}
|
public static BytesInput fromZigZagVarLong(long longValue) {
long zigZag = (longValue << 1) ^ (longValue >> 63);
return new UnsignedVarLongBytesInput(zigZag);
}
|
@Test
public void testFromZigZagVarLong() throws IOException {
long value = RANDOM.nextInt();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
BytesUtils.writeZigZagVarLong(value, baos);
byte[] data = baos.toByteArray();
Supplier<BytesInput> factory = () -> BytesInput.fromZigZagVarLong(value);
validate(data, factory);
}
|
public static Configuration loadConfiguration() {
return loadConfiguration(new Configuration());
}
|
@Test
void testConfigurationWithStandardYAML() throws FileNotFoundException {
File confFile = new File(tmpDir, GlobalConfiguration.FLINK_CONF_FILENAME);
try (final PrintWriter pw = new PrintWriter(confFile)) {
pw.println("Key1: ");
pw.println(" Key2: v1");
pw.println(" Key3: 'v2'");
pw.println("Key4: 1");
pw.println("Key5: '1'");
pw.println("Key6: '*'");
pw.println("Key7: true");
pw.println("Key8: 'true'");
pw.println("Key9: [a, b, '*', 1, '2', true, 'true']");
pw.println("Key10: {k1: v1, k2: '2', k3: 3}");
pw.println("Key11: [{k1: v1, k2: '2', k3: 3}, {k4: true}]");
}
Configuration conf = GlobalConfiguration.loadConfiguration(tmpDir.getAbsolutePath());
assertThat(conf.keySet()).hasSize(12);
assertThat(conf.get(ConfigOptions.key("Key1.Key2").stringType().noDefaultValue()))
.isEqualTo("v1");
assertThat(conf.get(ConfigOptions.key("Key1.Key3").stringType().noDefaultValue()))
.isEqualTo("v2");
assertThat(conf.get(ConfigOptions.key("Key4").intType().noDefaultValue())).isOne();
assertThat(conf.get(ConfigOptions.key("Key5").stringType().noDefaultValue()))
.isEqualTo("1");
assertThat(conf.get(ConfigOptions.key("Key6").stringType().noDefaultValue()))
.isEqualTo("*");
assertThat(conf.get(ConfigOptions.key("Key7").booleanType().noDefaultValue())).isTrue();
assertThat(conf.get(ConfigOptions.key("Key8").stringType().noDefaultValue()))
.isEqualTo("true");
assertThat(conf.get(ConfigOptions.key("Key9").stringType().asList().noDefaultValue()))
.isEqualTo(Arrays.asList("a", "b", "*", "1", "2", "true", "true"));
Map<String, String> map = new HashMap<>();
map.put("k1", "v1");
map.put("k2", "2");
map.put("k3", "3");
assertThat(conf.get(ConfigOptions.key("Key10").mapType().noDefaultValue())).isEqualTo(map);
Map<String, String> map2 = new HashMap<>();
map2.put("k4", "true");
assertThat(conf.get(ConfigOptions.key("Key11").mapType().asList().noDefaultValue()))
.isEqualTo(Arrays.asList(map, map2));
}
|
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
if (listClass != null || inner != null) {
log.error("Could not configure ListDeserializer as some parameters were already set -- listClass: {}, inner: {}", listClass, inner);
throw new ConfigException("List deserializer was already initialized using a non-default constructor");
}
configureListClass(configs, isKey);
configureInnerSerde(configs, isKey);
}
|
@Test
public void testListValueDeserializerNoArgConstructorsShouldThrowConfigExceptionDueMissingInnerClassProp() {
props.put(CommonClientConfigs.DEFAULT_LIST_VALUE_SERDE_TYPE_CLASS, ArrayList.class);
final ConfigException exception = assertThrows(
ConfigException.class,
() -> listDeserializer.configure(props, false)
);
assertEquals("Not able to determine the inner serde class because "
+ "it was neither passed via the constructor nor set in the config.", exception.getMessage());
}
|
public static String getPathAndQuery(ApplicationId id, String path,
String query, boolean approved) {
StringBuilder newp = new StringBuilder();
newp.append(getPath(id, path));
boolean first = appendQuery(newp, query, true);
if(approved) {
appendQuery(newp, PROXY_APPROVAL_PARAM+"=true", first);
}
return newp.toString();
}
|
@Test
void testGetPathAndQuery() {
assertEquals("/proxy/application_6384623_0005/static/app?foo=bar",
ProxyUriUtils.getPathAndQuery(BuilderUtils.newApplicationId(6384623l, 5), "/static/app",
"?foo=bar", false));
assertEquals("/proxy/application_6384623_0005/static/app?foo=bar&bad=good&proxyapproved=true",
ProxyUriUtils.getPathAndQuery(BuilderUtils.newApplicationId(6384623l, 5), "/static/app",
"foo=bar&bad=good", true));
}
|
@Override
public void flushScheduled() {
}
|
@Test
public void flushScheduled() {
mSensorsAPI.flushScheduled();
}
|
@Override
public List<Namespace> listNamespaces(Namespace namespace) {
SnowflakeIdentifier scope = NamespaceHelpers.toSnowflakeIdentifier(namespace);
List<SnowflakeIdentifier> results;
switch (scope.type()) {
case ROOT:
results = snowflakeClient.listDatabases();
break;
case DATABASE:
results = snowflakeClient.listSchemas(scope);
break;
default:
throw new IllegalArgumentException(
String.format(
"listNamespaces must be at either ROOT or DATABASE level; got %s from namespace %s",
scope, namespace));
}
return results.stream().map(NamespaceHelpers::toIcebergNamespace).collect(Collectors.toList());
}
|
@Test
public void testListNamespaceInRoot() {
assertThat(catalog.listNamespaces())
.containsExactly(Namespace.of("DB_1"), Namespace.of("DB_2"), Namespace.of("DB_3"));
}
|
@Override
public TenantDO getTenantByWebsite(String website) {
return tenantMapper.selectByWebsite(website);
}
|
@Test
public void testGetTenantByWebsite() {
// mock 数据
TenantDO dbTenant = randomPojo(TenantDO.class, o -> o.setWebsite("https://www.iocoder.cn"));
tenantMapper.insert(dbTenant);// @Sql: 先插入出一条存在的数据
// 调用
TenantDO result = tenantService.getTenantByWebsite("https://www.iocoder.cn");
// 校验存在
assertPojoEquals(result, dbTenant);
}
|
public static String getPartitionName(String basePath, String partitionPath) {
String basePathWithSlash = getPathWithSlash(basePath);
String partitionPathWithSlash = getPathWithSlash(partitionPath);
if (basePathWithSlash.equals(partitionPathWithSlash)) {
return "";
}
Preconditions.checkState(partitionPath.startsWith(basePathWithSlash),
"Can't infer partition name. base path: %s, partition path: %s", basePath, partitionPath);
partitionPath = partitionPath.endsWith("/") ? partitionPath.substring(0, partitionPath.length() - 1) : partitionPath;
return partitionPath.substring(basePathWithSlash.length());
}
|
@Test
public void testGetPartition() {
String base = "hdfs://hadoop01:9000/mytable";
String tableLocation = "hdfs://hadoop01:9000/mytable/";
Assert.assertTrue(getPartitionName(base, tableLocation).isEmpty());
String errorPath = "hdfs://aaa/bbb";
ExceptionChecker.expectThrowsWithMsg(
IllegalStateException.class,
"Can't infer partition name. base path",
() -> PartitionUtil.getPartitionName(base, errorPath));
String partitionPath = "hdfs://hadoop01:9000/mytable/year=2023/month=12/day=30";
Assert.assertEquals("year=2023/month=12/day=30", PartitionUtil.getPartitionName(base, partitionPath));
}
|
public synchronized boolean saveNamespace(long timeWindow, long txGap,
FSNamesystem source) throws IOException {
if (timeWindow > 0 || txGap > 0) {
final FSImageStorageInspector inspector = storage.readAndInspectDirs(
EnumSet.of(NameNodeFile.IMAGE, NameNodeFile.IMAGE_ROLLBACK),
StartupOption.REGULAR);
FSImageFile image = inspector.getLatestImages().get(0);
File imageFile = image.getFile();
final long checkpointTxId = image.getCheckpointTxId();
final long checkpointAge = Time.now() - imageFile.lastModified();
if (checkpointAge <= timeWindow * 1000 &&
checkpointTxId >= this.getCorrectLastAppliedOrWrittenTxId() - txGap) {
return false;
}
}
saveNamespace(source, NameNodeFile.IMAGE, null);
return true;
}
|
@Test
public void testDigest() throws IOException {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
DistributedFileSystem fs = cluster.getFileSystem();
fs.setSafeMode(SafeModeAction.ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.LEAVE);
File currentDir = FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(
0);
File fsimage = FSImageTestUtil.findNewestImageFile(currentDir
.getAbsolutePath());
assertEquals(MD5FileUtils.readStoredMd5ForFile(fsimage),
MD5FileUtils.computeMd5ForFile(fsimage));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
|
@Override
public double logp(double x) {
return Math.log(p(x));
}
|
@Test
public void testLogp() {
System.out.println("logp");
KernelDensity instance = new KernelDensity(x);
double expResult = -2.29044906;
double result = instance.logp(3.5);
assertEquals(expResult, result, 1E-8);
}
|
@Override
public void run() {
try {
backgroundJobServer.getJobSteward().notifyThreadOccupied();
MDCMapper.loadMDCContextFromJob(job);
performJob();
} catch (Exception e) {
if (isJobDeletedWhileProcessing(e)) {
// nothing to do anymore as Job is deleted
return;
} else if (isJobServerStopped(e)) {
updateJobStateToFailedAndRunJobFilters("Job processing was stopped as background job server has stopped", e);
Thread.currentThread().interrupt();
} else if (isJobNotFoundException(e)) {
updateJobStateToFailedAndRunJobFilters("Job method not found", e);
} else {
updateJobStateToFailedAndRunJobFilters("An exception occurred during the performance of the job", e);
}
} finally {
backgroundJobServer.getJobSteward().notifyThreadIdle();
MDC.clear();
}
}
|
@Test
void onConcurrentJobModificationExceptionAllIsStillOk() throws Exception {
Job job = anEnqueuedJob().build();
when(storageProvider.save(job))
.thenReturn(job)
.thenThrow(new ConcurrentJobModificationException(job));
mockBackgroundJobRunner(job, job1 -> {
});
BackgroundJobPerformer backgroundJobPerformer = new BackgroundJobPerformer(backgroundJobServer, job);
backgroundJobPerformer.run();
}
|
@Override
public List<String> findConfigInfoTags(String dataId, String group, String tenant) {
String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant;
ConfigInfoTagMapper configInfoTagMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO_TAG);
final String sql = configInfoTagMapper.select(Collections.singletonList("tag_id"),
Arrays.asList("data_id", "group_id", "tenant_id"));
return databaseOperate.queryMany(sql, new Object[] {dataId, group, tenantTmp}, String.class);
}
|
@Test
void testFindConfigInfoTags() {
String dataId = "dataId1112222";
String group = "group22";
String tenant = "tenant2";
List<String> mockedTags = Arrays.asList("tags1", "tags11", "tags111");
Mockito.when(databaseOperate.queryMany(anyString(), eq(new Object[] {dataId, group, tenant}), eq(String.class)))
.thenReturn(mockedTags);
List<String> configInfoTags = embeddedConfigInfoTagPersistService.findConfigInfoTags(dataId, group, tenant);
assertEquals(mockedTags, configInfoTags);
}
|
public long getEndInMs() {
return this.endInMs;
}
|
@Test
void testEndInMs() {
long startTime = System.currentTimeMillis();
Pane<?> pane = new Pane<>(10, startTime, new Object());
assertEquals(startTime + 10, pane.getEndInMs());
}
|
public TolerantDoubleComparison isNotWithin(double tolerance) {
return new TolerantDoubleComparison() {
@Override
public void of(double expected) {
Double actual = DoubleSubject.this.actual;
checkNotNull(
actual, "actual value cannot be null. tolerance=%s expected=%s", tolerance, expected);
checkTolerance(tolerance);
if (!notEqualWithinTolerance(actual, expected, tolerance)) {
failWithoutActual(
fact("expected not to be", doubleToString(expected)),
butWas(),
fact("within tolerance", doubleToString(tolerance)));
}
}
};
}
|
@Test
public void isNotWithinOfZero() {
assertThat(+0.0).isNotWithin(0.00001).of(+1.0);
assertThat(+0.0).isNotWithin(0.00001).of(-1.0);
assertThat(-0.0).isNotWithin(0.00001).of(+1.0);
assertThat(-0.0).isNotWithin(0.00001).of(-1.0);
assertThat(+1.0).isNotWithin(0.00001).of(+0.0);
assertThat(+1.0).isNotWithin(0.00001).of(-0.0);
assertThat(-1.0).isNotWithin(0.00001).of(+0.0);
assertThat(-1.0).isNotWithin(0.00001).of(-0.0);
assertThat(+1.0).isNotWithin(0.0).of(+0.0);
assertThat(+1.0).isNotWithin(0.0).of(-0.0);
assertThat(-1.0).isNotWithin(0.0).of(+0.0);
assertThat(-1.0).isNotWithin(0.0).of(-0.0);
assertThatIsNotWithinFails(-0.0, 0.0, 0.0);
}
|
@Override
public List<Intent> compile(MultiPointToSinglePointIntent intent, List<Intent> installable) {
Map<DeviceId, Link> links = new HashMap<>();
ConnectPoint egressPoint = intent.egressPoint();
final boolean allowMissingPaths = intentAllowsPartialFailure(intent);
boolean hasPaths = false;
boolean missingSomePaths = false;
for (ConnectPoint ingressPoint : intent.ingressPoints()) {
if (ingressPoint.deviceId().equals(egressPoint.deviceId())) {
if (deviceService.isAvailable(ingressPoint.deviceId())) {
hasPaths = true;
} else {
missingSomePaths = true;
}
continue;
}
Path path = getPath(intent, ingressPoint.deviceId(), egressPoint.deviceId());
if (path != null) {
hasPaths = true;
for (Link link : path.links()) {
if (links.containsKey(link.dst().deviceId())) {
// We've already reached the existing tree with the first
// part of this path. Add the merging point with different
// incoming port, but don't add the remainder of the path
// in case it differs from the path we already have.
links.put(link.src().deviceId(), link);
break;
}
links.put(link.src().deviceId(), link);
}
} else {
missingSomePaths = true;
}
}
// Allocate bandwidth on existing paths if a bandwidth constraint is set
List<ConnectPoint> ingressCPs =
intent.filteredIngressPoints().stream()
.map(fcp -> fcp.connectPoint())
.collect(Collectors.toList());
ConnectPoint egressCP = intent.filteredEgressPoint().connectPoint();
List<ConnectPoint> pathCPs =
links.values().stream()
.flatMap(l -> Stream.of(l.src(), l.dst()))
.collect(Collectors.toList());
pathCPs.addAll(ingressCPs);
pathCPs.add(egressCP);
allocateBandwidth(intent, pathCPs);
if (!hasPaths) {
throw new IntentException("Cannot find any path between ingress and egress points.");
} else if (!allowMissingPaths && missingSomePaths) {
throw new IntentException("Missing some paths between ingress and egress points.");
}
Intent result = LinkCollectionIntent.builder()
.appId(intent.appId())
.key(intent.key())
.treatment(intent.treatment())
.selector(intent.selector())
.links(Sets.newHashSet(links.values()))
.filteredIngressPoints(intent.filteredIngressPoints())
.filteredEgressPoints(ImmutableSet.of(intent.filteredEgressPoint()))
.priority(intent.priority())
.constraints(intent.constraints())
.resourceGroup(intent.resourceGroup())
.build();
return Collections.singletonList(result);
}
|
@Test
public void testSingleLongPathCompilation() {
Set<FilteredConnectPoint> ingress =
Sets.newHashSet(new FilteredConnectPoint(new ConnectPoint(DID_1, PORT_1)));
FilteredConnectPoint egress =
new FilteredConnectPoint(new ConnectPoint(DID_8, PORT_1));
MultiPointToSinglePointIntent intent = makeIntent(ingress, egress);
assertThat(intent, is(notNullValue()));
String[] hops = {S2, S3, S4, S5, S6, S7};
MultiPointToSinglePointIntentCompiler compiler = makeCompiler(hops);
assertThat(compiler, is(notNullValue()));
List<Intent> result = compiler.compile(intent, null);
assertThat(result, is(Matchers.notNullValue()));
assertThat(result, hasSize(1));
Intent resultIntent = result.get(0);
assertThat(resultIntent instanceof LinkCollectionIntent, is(true));
if (resultIntent instanceof LinkCollectionIntent) {
LinkCollectionIntent linkIntent = (LinkCollectionIntent) resultIntent;
assertThat(linkIntent.links(), hasSize(7));
assertThat(linkIntent.links(), linksHasPath(S1, S2));
assertThat(linkIntent.links(), linksHasPath(S2, S3));
assertThat(linkIntent.links(), linksHasPath(S3, S4));
assertThat(linkIntent.links(), linksHasPath(S4, S5));
assertThat(linkIntent.links(), linksHasPath(S5, S6));
assertThat(linkIntent.links(), linksHasPath(S6, S7));
assertThat(linkIntent.links(), linksHasPath(S7, S8));
}
assertThat("key is inherited", resultIntent.key(), is(intent.key()));
}
|
@Override
@Nonnull
public <T> List<Future<T>> invokeAll(@Nonnull Collection<? extends Callable<T>> tasks) {
throwRejectedExecutionExceptionIfShutdown();
ArrayList<Future<T>> result = new ArrayList<>();
for (Callable<T> task : tasks) {
try {
result.add(new CompletedFuture<>(task.call(), null));
} catch (Exception e) {
result.add(new CompletedFuture<>(null, e));
}
}
return result;
}
|
@Test
void testInvokeAll() {
final CompletableFuture<Thread> future = new CompletableFuture<>();
testTaskSubmissionBeforeShutdown(
testInstance -> testInstance.invokeAll(callableCollectionFromFuture(future)));
assertThat(future).isCompletedWithValue(Thread.currentThread());
}
|
public static WindowStoreIterator<ValueAndTimestamp<GenericRow>> fetch(
final ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>> store,
final GenericKey key,
final Instant lower,
final Instant upper
) {
Objects.requireNonNull(key, "key can't be null");
final List<ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>>> stores
= getStores(store);
final Function<ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>>,
WindowStoreIterator<ValueAndTimestamp<GenericRow>>> fetchFunc = windowStore ->
fetchUncached(windowStore, key, lower, upper);
return findFirstNonEmptyIterator(stores, fetchFunc);
}
|
@Test
public void shouldThrowException_wrongStateStore() {
when(provider.stores(any(), any())).thenReturn(ImmutableList.of(windowStore));
final Exception e = assertThrows(
IllegalStateException.class,
() -> WindowStoreCacheBypass.fetch(store, SOME_KEY,
Instant.ofEpochMilli(100), Instant.ofEpochMilli(200))
);
assertThat(e.getMessage(), containsString("Expecting a MeteredWindowStore"));
}
|
@Override
@PublicAPI(usage = ACCESS)
public String getName() {
return descriptor.getFullyQualifiedClassName();
}
|
@Test
@UseDataProvider
public void test_predicate_belong_to(DescribedPredicate<JavaClass> belongToPredicate) {
JavaClasses classes = new ClassFileImporter().importPackagesOf(getClass());
JavaClass outerAnonymous =
getOnlyClassSettingField(classes, ClassWithNamedAndAnonymousInnerClasses.name_of_fieldIndicatingOuterAnonymousInnerClass);
JavaClass nestedAnonymous =
getOnlyClassSettingField(classes, ClassWithNamedAndAnonymousInnerClasses.name_of_fieldIndicatingNestedAnonymousInnerClass);
assertThat(belongToPredicate)
.hasDescription(String.format("belong to any of [%s, %s]",
Object.class.getName(), ClassWithNamedAndAnonymousInnerClasses.class.getName()))
.accepts(classes.get(ClassWithNamedAndAnonymousInnerClasses.class))
.accepts(classes.get(ClassWithNamedAndAnonymousInnerClasses.NamedInnerClass.class))
.accepts(classes.get(ClassWithNamedAndAnonymousInnerClasses.NamedInnerClass.NestedNamedInnerClass.class))
.accepts(outerAnonymous)
.accepts(nestedAnonymous)
.rejects(classes.get(getClass()));
}
|
public double distance(Point point) {
return Math.hypot(this.x - point.x, this.y - point.y);
}
|
@Test
public void distanceTest() {
Point point1 = new Point(0, 0);
Point point2 = new Point(1, 1);
Point point3 = new Point(0, -1);
Assert.assertEquals(0, point1.distance(point1), 0);
Assert.assertEquals(0, point2.distance(point2), 0);
Assert.assertEquals(0, point3.distance(point3), 0);
Assert.assertEquals(Math.sqrt(2), point1.distance(point2), 0);
Assert.assertEquals(Math.sqrt(2), point2.distance(point1), 0);
Assert.assertEquals(1, point1.distance(point3), 0);
Assert.assertEquals(1, point3.distance(point1), 0);
}
|
public void setCalendar( int recordsFilter, GregorianCalendar startDate, GregorianCalendar endDate ) throws KettleException {
this.startDate = startDate;
this.endDate = endDate;
this.recordsFilter = recordsFilter;
if ( this.startDate == null || this.endDate == null ) {
throw new KettleException( BaseMessages.getString( PKG, "SalesforceInput.Error.EmptyStartDateOrEndDate" ) );
}
if ( this.startDate.getTime().compareTo( this.endDate.getTime() ) >= 0 ) {
throw new KettleException( BaseMessages.getString( PKG, "SalesforceInput.Error.WrongDates" ) );
}
// Calculate difference in days
long diffDays =
( this.endDate.getTime().getTime() - this.startDate.getTime().getTime() ) / ( 24 * 60 * 60 * 1000 );
if ( diffDays > 30 ) {
throw new KettleException( BaseMessages.getString( PKG, "SalesforceInput.Error.StartDateTooOlder" ) );
}
}
|
@Test
public void testSetCalendarStartDateTooOlder() throws KettleException {
SalesforceConnection connection = new SalesforceConnection( logInterface, url, username, password );
GregorianCalendar startDate = new GregorianCalendar( 2000, 3, 20 );
GregorianCalendar endDate = new GregorianCalendar( 2000, 2, 10 );
try {
connection.setCalendar( recordsFilter, startDate, endDate );
fail();
} catch ( KettleException expected ) {
// OK
}
}
|
public abstract void updateTableSchema(String dbName, String tableName, List<HCatFieldSchema> columnSchema)
throws HCatException;
|
@Test
public void testUpdateTableSchema() throws Exception {
try {
HCatClient client = HCatClient.create(new Configuration(hcatConf));
final String dbName = "testUpdateTableSchema_DBName";
final String tableName = "testUpdateTableSchema_TableName";
client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
client.createDatabase(HCatCreateDBDesc.create(dbName).build());
List<HCatFieldSchema> oldSchema = Arrays.asList(new HCatFieldSchema("foo", Type.INT, ""),
new HCatFieldSchema("bar", Type.STRING, ""));
client.createTable(HCatCreateTableDesc.create(dbName, tableName, oldSchema).build());
List<HCatFieldSchema> newSchema = Arrays.asList(new HCatFieldSchema("completely", Type.DOUBLE, ""),
new HCatFieldSchema("new", Type.STRING, ""),
new HCatFieldSchema("fields", Type.STRING, ""));
client.updateTableSchema(dbName, tableName, newSchema);
assertArrayEquals(newSchema.toArray(), client.getTable(dbName, tableName).getCols().toArray());
client.dropDatabase(dbName, false, HCatClient.DropDBMode.CASCADE);
}
catch (Exception exception) {
LOG.error("Unexpected exception.", exception);
assertTrue("Unexpected exception: " + exception.getMessage(), false);
}
}
|
@Override
public void check(Model model) {
if (model == null)
return;
List<Model> appenderModels = new ArrayList<>();
deepFindAllModelsOfType(AppenderModel.class, appenderModels, model);
List<Pair<Model, Model>> nestedPairs = deepFindNestedSubModelsOfType(AppenderModel.class, appenderModels);
List<Pair<Model, Model>> filteredNestedPairs = nestedPairs.stream().filter(pair -> !isSiftingAppender(pair.first)).collect(Collectors.toList());
if(filteredNestedPairs.isEmpty()) {
return;
}
addWarn(NESTED_APPENDERS_WARNING);
for(Pair<Model, Model> pair: filteredNestedPairs) {
addWarn("Appender at line "+pair.first.getLineNumber() + " contains a nested appender at line "+pair.second.getLineNumber());
}
}
|
@Test
public void smoke() {
TopModel topModel = new TopModel();
awasc.check(topModel);
statusChecker.assertIsWarningOrErrorFree();
}
|
@Override
public void request(Payload grpcRequest, StreamObserver<Payload> responseObserver) {
traceIfNecessary(grpcRequest, true);
String type = grpcRequest.getMetadata().getType();
long startTime = System.nanoTime();
//server is on starting.
if (!ApplicationUtils.isStarted()) {
Payload payloadResponse = GrpcUtils.convert(
ErrorResponse.build(NacosException.INVALID_SERVER_STATUS, "Server is starting,please try later."));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
NacosException.INVALID_SERVER_STATUS, null, null, System.nanoTime() - startTime);
return;
}
// server check.
if (ServerCheckRequest.class.getSimpleName().equals(type)) {
Payload serverCheckResponseP = GrpcUtils.convert(new ServerCheckResponse(GrpcServerConstants.CONTEXT_KEY_CONN_ID.get(), true));
traceIfNecessary(serverCheckResponseP, false);
responseObserver.onNext(serverCheckResponseP);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, true,
0, null, null, System.nanoTime() - startTime);
return;
}
RequestHandler requestHandler = requestHandlerRegistry.getByRequestType(type);
//no handler found.
if (requestHandler == null) {
Loggers.REMOTE_DIGEST.warn(String.format("[%s] No handler for request type : %s :", "grpc", type));
Payload payloadResponse = GrpcUtils
.convert(ErrorResponse.build(NacosException.NO_HANDLER, "RequestHandler Not Found"));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
NacosException.NO_HANDLER, null, null, System.nanoTime() - startTime);
return;
}
//check connection status.
String connectionId = GrpcServerConstants.CONTEXT_KEY_CONN_ID.get();
boolean requestValid = connectionManager.checkValid(connectionId);
if (!requestValid) {
Loggers.REMOTE_DIGEST
.warn("[{}] Invalid connection Id ,connection [{}] is un registered ,", "grpc", connectionId);
Payload payloadResponse = GrpcUtils
.convert(ErrorResponse.build(NacosException.UN_REGISTER, "Connection is unregistered."));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
NacosException.UN_REGISTER, null, null, System.nanoTime() - startTime);
return;
}
Object parseObj = null;
try {
parseObj = GrpcUtils.parse(grpcRequest);
} catch (Exception e) {
Loggers.REMOTE_DIGEST
.warn("[{}] Invalid request receive from connection [{}] ,error={}", "grpc", connectionId, e);
Payload payloadResponse = GrpcUtils.convert(ErrorResponse.build(NacosException.BAD_GATEWAY, e.getMessage()));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
NacosException.BAD_GATEWAY, e.getClass().getSimpleName(), null, System.nanoTime() - startTime);
return;
}
if (parseObj == null) {
Loggers.REMOTE_DIGEST.warn("[{}] Invalid request receive ,parse request is null", connectionId);
Payload payloadResponse = GrpcUtils
.convert(ErrorResponse.build(NacosException.BAD_GATEWAY, "Invalid request"));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
NacosException.BAD_GATEWAY, null, null, System.nanoTime() - startTime);
return;
}
if (!(parseObj instanceof Request)) {
Loggers.REMOTE_DIGEST
.warn("[{}] Invalid request receive ,parsed payload is not a request,parseObj={}", connectionId,
parseObj);
Payload payloadResponse = GrpcUtils
.convert(ErrorResponse.build(NacosException.BAD_GATEWAY, "Invalid request"));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
NacosException.BAD_GATEWAY, null, null, System.nanoTime() - startTime);
return;
}
Request request = (Request) parseObj;
try {
Connection connection = connectionManager.getConnection(GrpcServerConstants.CONTEXT_KEY_CONN_ID.get());
RequestMeta requestMeta = new RequestMeta();
requestMeta.setClientIp(connection.getMetaInfo().getClientIp());
requestMeta.setConnectionId(GrpcServerConstants.CONTEXT_KEY_CONN_ID.get());
requestMeta.setClientVersion(connection.getMetaInfo().getVersion());
requestMeta.setLabels(connection.getMetaInfo().getLabels());
requestMeta.setAbilityTable(connection.getAbilityTable());
connectionManager.refreshActiveTime(requestMeta.getConnectionId());
prepareRequestContext(request, requestMeta, connection);
Response response = requestHandler.handleRequest(request, requestMeta);
Payload payloadResponse = GrpcUtils.convert(response);
traceIfNecessary(payloadResponse, false);
if (response.getErrorCode() == NacosException.OVER_THRESHOLD) {
RpcScheduledExecutor.CONTROL_SCHEDULER.schedule(() -> {
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
}, 1000L, TimeUnit.MILLISECONDS);
} else {
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
}
MetricsMonitor.recordGrpcRequestEvent(type, response.isSuccess(),
response.getErrorCode(), null, request.getModule(), System.nanoTime() - startTime);
} catch (Throwable e) {
Loggers.REMOTE_DIGEST
.error("[{}] Fail to handle request from connection [{}] ,error message :{}", "grpc", connectionId,
e);
Payload payloadResponse = GrpcUtils.convert(ErrorResponse.build(e));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
ResponseCode.FAIL.getCode(), e.getClass().getSimpleName(), request.getModule(), System.nanoTime() - startTime);
} finally {
RequestContextHolder.removeContext();
}
}
|
@Test
void testHandleRequestSuccess() {
ApplicationUtils.setStarted(true);
Mockito.when(requestHandlerRegistry.getByRequestType(Mockito.anyString())).thenReturn(mockHandler);
Mockito.when(connectionManager.checkValid(Mockito.any())).thenReturn(true);
String ip = "1.1.1.1";
ConnectionMeta connectionMeta = new ConnectionMeta(connectId, ip, ip, 8888, 9848, "GRPC", "", "", new HashMap<>());
Connection connection = new GrpcConnection(connectionMeta, null, null);
Mockito.when(connectionManager.getConnection(Mockito.any())).thenReturn(connection);
RequestMeta metadata = new RequestMeta();
metadata.setClientIp("127.0.0.1");
metadata.setConnectionId(connectId);
HealthCheckRequest mockRequest = new HealthCheckRequest();
Payload payload = GrpcUtils.convert(mockRequest, metadata);
StreamObserver<Payload> streamObserver = new StreamObserver<Payload>() {
@Override
public void onNext(Payload payload) {
System.out.println("Receive data from server: " + payload);
Object res = GrpcUtils.parse(payload);
assertTrue(res instanceof HealthCheckResponse);
}
@Override
public void onError(Throwable throwable) {
fail(throwable.getMessage());
}
@Override
public void onCompleted() {
System.out.println("complete");
}
};
streamStub.request(payload, streamObserver);
ApplicationUtils.setStarted(false);
}
|
public static String evaluate(String jsonText, JsonEvaluationSpecification specification)
throws JsonMappingException {
// Parse json text ang get root node.
JsonNode rootNode;
try {
ObjectMapper mapper = new ObjectMapper();
rootNode = mapper.readTree(new StringReader(jsonText));
} catch (Exception e) {
log.error("Exception while parsing Json text", e);
throw new JsonMappingException("Exception while parsing Json payload");
}
// Retrieve evaluated node within JSON tree.
JsonNode evaluatedNode = rootNode.at(specification.getExp());
String caseKey = evaluatedNode.asText();
switch (specification.getOperator()) {
case equals:
// Consider simple equality.
String value = specification.getCases().get(caseKey);
return (value != null ? value : specification.getCases().getDefault());
case range:
// Consider range evaluation.
double caseNumber = 0.000;
try {
caseNumber = Double.parseDouble(caseKey);
} catch (NumberFormatException nfe) {
log.error(caseKey + " into range expression cannot be parsed as number. Considering default case.");
return specification.getCases().getDefault();
}
return foundRangeMatchingCase(caseNumber, specification.getCases());
case regexp:
// Consider regular expression evaluation for each case key.
for (String choiceKey : specification.getCases().keySet()) {
if (!"default".equals(choiceKey)) {
if (Pattern.matches(choiceKey, caseKey)) {
return specification.getCases().get(choiceKey);
}
}
}
break;
case size:
// Consider size evaluation.
if (evaluatedNode.isArray()) {
int size = evaluatedNode.size();
return foundRangeMatchingCase(size, specification.getCases());
}
break;
case presence:
// Consider presence evaluation of evaluatedNode directly.
if (evaluatedNode != null && evaluatedNode.toString().length() > 0) {
if (specification.getCases().containsKey("found")) {
return specification.getCases().get("found");
}
} else {
if (specification.getCases().containsKey("missing")) {
return specification.getCases().get("missing");
}
}
break;
}
return specification.getCases().getDefault();
}
|
@Test
void testRegexpOperatorDispatcher() throws Exception {
DispatchCases cases = new DispatchCases();
Map<String, String> dispatchCases = new HashMap<>();
dispatchCases.put(".*[Aa][Ll][Ee].*", "OK");
dispatchCases.put("default", "Bad");
cases.putAll(dispatchCases);
JsonEvaluationSpecification specifications = new JsonEvaluationSpecification();
specifications.setExp("/type");
specifications.setOperator(EvaluationOperator.regexp);
specifications.setCases(cases);
String result = JsonExpressionEvaluator.evaluate(BELGIUM_BEER, specifications);
assertEquals("OK", result);
result = JsonExpressionEvaluator.evaluate(GERMAN_BEER, specifications);
assertEquals("Bad", result);
result = JsonExpressionEvaluator.evaluate(ENGLISH_BEER, specifications);
assertEquals("Bad", result);
}
|
@Override
@Deprecated
public JSONObject put(String key, Object value) throws JSONException {
return set(key, value);
}
|
@Test
public void floatTest() {
final Map<String, Object> map = new HashMap<>();
map.put("c", 2.0F);
final String s = JSONUtil.toJsonStr(map);
assertEquals("{\"c\":2}", s);
}
|
@Override
public void replay(AlterJobV2 replayedJob) {
RollupJobV2 replayedRollupJob = (RollupJobV2) replayedJob;
switch (replayedJob.jobState) {
case PENDING:
replayPending(replayedRollupJob);
break;
case WAITING_TXN:
replayWaitingTxn(replayedRollupJob);
break;
case FINISHED:
replayFinished(replayedRollupJob);
break;
case CANCELLED:
replayCancelled(replayedRollupJob);
break;
default:
break;
}
}
|
@Test
public void testReplayPendingRollupJob() throws Exception {
MaterializedViewHandler materializedViewHandler = GlobalStateMgr.getCurrentState().getRollupHandler();
ArrayList<AlterClause> alterClauses = new ArrayList<>();
alterClauses.add(clause);
Database db = GlobalStateMgr.getCurrentState().getDb(GlobalStateMgrTestUtil.testDb1);
OlapTable olapTable = (OlapTable) db.getTable(GlobalStateMgrTestUtil.testTable1);
materializedViewHandler.process(alterClauses, db, olapTable);
Map<Long, AlterJobV2> alterJobsV2 = materializedViewHandler.getAlterJobsV2();
assertEquals(1, alterJobsV2.size());
RollupJobV2 rollupJob = (RollupJobV2) alterJobsV2.values().stream().findAny().get();
rollupJob.replay(rollupJob);
}
|
public static int triCodeToDubboCode(Code triCode) {
int code;
switch (triCode) {
case DEADLINE_EXCEEDED:
code = TIMEOUT_EXCEPTION;
break;
case PERMISSION_DENIED:
code = FORBIDDEN_EXCEPTION;
break;
case UNAVAILABLE:
code = NETWORK_EXCEPTION;
break;
case UNIMPLEMENTED:
code = METHOD_NOT_FOUND;
break;
default:
code = UNKNOWN_EXCEPTION;
}
return code;
}
|
@Test
void triCodeToDubboCode() {
Assertions.assertEquals(TIMEOUT_EXCEPTION, TriRpcStatus.triCodeToDubboCode(Code.DEADLINE_EXCEEDED));
Assertions.assertEquals(FORBIDDEN_EXCEPTION, TriRpcStatus.triCodeToDubboCode(Code.PERMISSION_DENIED));
Assertions.assertEquals(METHOD_NOT_FOUND, TriRpcStatus.triCodeToDubboCode(Code.UNIMPLEMENTED));
Assertions.assertEquals(UNKNOWN_EXCEPTION, TriRpcStatus.triCodeToDubboCode(Code.UNKNOWN));
}
|
public MapConfig setMaxIdleSeconds(int maxIdleSeconds) {
this.maxIdleSeconds = maxIdleSeconds;
return this;
}
|
@Test
public void testSetMaxIdleSeconds() {
assertEquals(1234, new MapConfig().setMaxIdleSeconds(1234).getMaxIdleSeconds());
}
|
public void setMaxShare(Resource resource) {
maxShareMB.set(resource.getMemorySize());
maxShareVCores.set(resource.getVirtualCores());
if (customResources != null) {
customResources.setMaxShare(resource);
}
}
|
@Test
public void testSetMaxShare() {
FSQueueMetrics metrics = setupMetrics(RESOURCE_NAME);
Resource res = Resource.newInstance(2048L, 4, ImmutableMap.of(RESOURCE_NAME,
20L));
metrics.setMaxShare(res);
assertEquals(getErrorMessage("maxShareMB"),
2048L, metrics.getMaxShareMB());
assertEquals(getErrorMessage("maxShareVcores"),
4L, metrics.getMaxShareVirtualCores());
assertEquals(getErrorMessage("maxShareMB"),
2048L, metrics.getMaxShare().getMemorySize());
assertEquals(getErrorMessage("maxShareVcores"),
4L, metrics.getMaxShare().getVirtualCores());
assertEquals(getErrorMessage("maxShare for resource: " + RESOURCE_NAME),
20L, metrics.getMaxShare().getResourceValue(RESOURCE_NAME));
res = Resource.newInstance(2049L, 5);
metrics.setMaxShare(res);
assertEquals(getErrorMessage("maxShareMB"),
2049L, metrics.getMaxShareMB());
assertEquals(getErrorMessage("maxShareVcores"),
5L, metrics.getMaxShareVirtualCores());
assertEquals(getErrorMessage("maxShareMB"),
2049L, metrics.getMaxShare().getMemorySize());
assertEquals(getErrorMessage("maxShareVcores"),
5L, metrics.getMaxShare().getVirtualCores());
assertEquals(getErrorMessage("maxShare for resource: " + RESOURCE_NAME),
0, metrics.getMaxShare().getResourceValue(RESOURCE_NAME));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.