focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public void writeLong(final long v) throws IOException {
ensureAvailable(LONG_SIZE_IN_BYTES);
MEM.putLong(buffer, ARRAY_BYTE_BASE_OFFSET + pos, v);
pos += LONG_SIZE_IN_BYTES;
}
|
@Test
public void testWriteLongForPositionVByteOrder() throws Exception {
long expected = 100;
out.writeLong(10, expected, LITTLE_ENDIAN);
out.writeLong(18, expected, BIG_ENDIAN);
long actual1 = Bits.readLong(out.buffer, 10, false);
long actual2 = Bits.readLong(out.buffer, 18, true);
assertEquals(expected, actual1);
assertEquals(expected, actual2);
}
|
static void shiftRight(Slice decimal, int rightShifts, boolean roundUp, Slice result)
{
if (rightShifts == 0) {
copyUnscaledDecimal(decimal, result);
return;
}
int wordShifts = rightShifts / 64;
int bitShiftsInWord = rightShifts % 64;
int shiftRestore = 64 - bitShiftsInWord;
// check round-ups before settings values to result.
// be aware that result could be the same object as decimal.
boolean roundCarry;
if (bitShiftsInWord == 0) {
roundCarry = roundUp && getLong(decimal, wordShifts - 1) < 0;
}
else {
roundCarry = roundUp && (getLong(decimal, wordShifts) & (1L << (bitShiftsInWord - 1))) != 0;
}
// Store negative before settings values to result.
boolean negative = isNegative(decimal);
long low;
long high;
switch (wordShifts) {
case 0:
low = getLong(decimal, 0);
high = getLong(decimal, 1);
break;
case 1:
low = getLong(decimal, 1);
high = 0;
break;
default:
throw new IllegalArgumentException();
}
if (bitShiftsInWord > 0) {
low = (low >>> bitShiftsInWord) | (high << shiftRestore);
high = (high >>> bitShiftsInWord);
}
if (roundCarry) {
if (low != ALL_BITS_SET_64) {
low++;
}
else {
low = 0;
high++;
}
}
pack(result, low, high, negative);
}
|
@Test
public void testShiftRight()
{
assertShiftRight(unscaledDecimal(0), 0, true, unscaledDecimal(0));
assertShiftRight(unscaledDecimal(0), 33, true, unscaledDecimal(0));
assertShiftRight(unscaledDecimal(1), 1, true, unscaledDecimal(1));
assertShiftRight(unscaledDecimal(-4), 1, true, unscaledDecimal(-2));
assertShiftRight(unscaledDecimal(1L << 32), 32, true, unscaledDecimal(1));
assertShiftRight(unscaledDecimal(1L << 31), 32, true, unscaledDecimal(1));
assertShiftRight(unscaledDecimal(1L << 31), 32, false, unscaledDecimal(0));
assertShiftRight(unscaledDecimal(3L << 33), 34, true, unscaledDecimal(2));
assertShiftRight(unscaledDecimal(3L << 33), 34, false, unscaledDecimal(1));
assertShiftRight(unscaledDecimal(BigInteger.valueOf(0x7FFFFFFFFFFFFFFFL).setBit(63).setBit(64)), 1, true, unscaledDecimal(BigInteger.ONE.shiftLeft(64)));
assertShiftRight(MAX_DECIMAL, 1, true, unscaledDecimal(MAX_DECIMAL_UNSCALED_VALUE.shiftRight(1).add(BigInteger.ONE)));
assertShiftRight(MIN_DECIMAL, 1, true, unscaledDecimal(MAX_DECIMAL_UNSCALED_VALUE.shiftRight(1).add(BigInteger.ONE).negate()));
assertShiftRight(MAX_DECIMAL, 66, true, unscaledDecimal(MAX_DECIMAL_UNSCALED_VALUE.shiftRight(66).add(BigInteger.ONE)));
}
|
@Override
public Result reconcile(Request request) {
var events = new LinkedHashSet<ApplicationEvent>();
client.fetch(Post.class, request.name())
.ifPresent(post -> {
if (ExtensionOperator.isDeleted(post)) {
removeFinalizers(post.getMetadata(), Set.of(FINALIZER_NAME));
unPublishPost(post, events);
events.add(new PostDeletedEvent(this, post));
cleanUpResources(post);
// update post to be able to be collected by gc collector.
client.update(post);
// fire event after updating post
events.forEach(eventPublisher::publishEvent);
return;
}
addFinalizers(post.getMetadata(), Set.of(FINALIZER_NAME));
populateLabels(post, events);
schedulePublishIfNecessary(post);
subscribeNewCommentNotification(post);
var status = post.getStatus();
if (status == null) {
status = new Post.PostStatus();
post.setStatus(status);
}
if (post.isPublished() && post.getSpec().getPublishTime() == null) {
post.getSpec().setPublishTime(Instant.now());
}
// calculate the sha256sum
var configSha256sum = Hashing.sha256().hashString(post.getSpec().toString(), UTF_8)
.toString();
var annotations = nullSafeAnnotations(post);
var oldConfigChecksum = annotations.get(Constant.CHECKSUM_CONFIG_ANNO);
if (!Objects.equals(oldConfigChecksum, configSha256sum)) {
// if the checksum doesn't match
events.add(new PostUpdatedEvent(this, post.getMetadata().getName()));
annotations.put(Constant.CHECKSUM_CONFIG_ANNO, configSha256sum);
}
if (shouldUnPublish(post)) {
unPublishPost(post, events);
} else {
publishPost(post, events);
}
var permalinkPattern = postPermalinkPolicy.pattern();
annotations.put(Constant.PERMALINK_PATTERN_ANNO, permalinkPattern);
status.setPermalink(postPermalinkPolicy.permalink(post));
if (status.getPhase() == null) {
status.setPhase(PostPhase.DRAFT.toString());
}
var excerpt = post.getSpec().getExcerpt();
if (excerpt == null) {
excerpt = new Post.Excerpt();
}
var isAutoGenerate = defaultIfNull(excerpt.getAutoGenerate(), true);
if (isAutoGenerate) {
status.setExcerpt(getExcerpt(post));
} else {
status.setExcerpt(excerpt.getRaw());
}
var ref = Ref.of(post);
// handle contributors
var headSnapshot = post.getSpec().getHeadSnapshot();
var contributors = listSnapshots(ref)
.stream()
.map(snapshot -> {
Set<String> usernames = snapshot.getSpec().getContributors();
return Objects.requireNonNullElseGet(usernames,
() -> new HashSet<String>());
})
.flatMap(Set::stream)
.distinct()
.sorted()
.toList();
status.setContributors(contributors);
// update in progress status
status.setInProgress(
!StringUtils.equals(headSnapshot, post.getSpec().getReleaseSnapshot()));
computeHiddenState(post);
// version + 1 is required to truly equal version
// as a version will be incremented after the update
status.setObservedVersion(post.getMetadata().getVersion() + 1);
client.update(post);
// fire event after updating post
events.forEach(eventPublisher::publishEvent);
});
return Result.doNotRetry();
}
|
@Test
void reconcile() {
String name = "post-A";
Post post = TestPost.postV1();
post.getSpec().setPublish(false);
post.getSpec().setHeadSnapshot("post-A-head-snapshot");
when(client.fetch(eq(Post.class), eq(name)))
.thenReturn(Optional.of(post));
when(postService.getContent(eq(post.getSpec().getReleaseSnapshot()),
eq(post.getSpec().getBaseSnapshot())))
.thenReturn(Mono.empty());
Snapshot snapshotV1 = TestPost.snapshotV1();
Snapshot snapshotV2 = TestPost.snapshotV2();
snapshotV1.getSpec().setContributors(Set.of("guqing"));
snapshotV2.getSpec().setContributors(Set.of("guqing", "zhangsan"));
when(client.listAll(eq(Snapshot.class), any(), any()))
.thenReturn(List.of(snapshotV1, snapshotV2));
ArgumentCaptor<Post> captor = ArgumentCaptor.forClass(Post.class);
postReconciler.reconcile(new Reconciler.Request(name));
verify(client, times(1)).update(captor.capture());
verify(postPermalinkPolicy, times(1)).permalink(any());
Post value = captor.getValue();
assertThat(value.getStatus().getExcerpt()).isEmpty();
assertThat(value.getStatus().getContributors()).isEqualTo(List.of("guqing", "zhangsan"));
}
|
public byte[] value() {
return value;
}
|
@Test
void testRawJsonWriter() throws IOException {
var rawJson = new RawJson("""
{"key": "value"}
""");
var writer = new RawJsonWriter();
var baos = new ByteArrayOutputStream();
try (var gen = JsonCommonModule.JSON_FACTORY.createGenerator(baos, JsonEncoding.UTF8)) {
writer.write(gen, rawJson);
}
Assertions.assertThat(rawJson.value()).isEqualTo(baos.toByteArray());
}
|
public static void checkMetaDir() throws InvalidMetaDirException,
IOException {
// check meta dir
// if metaDir is the default config: StarRocksFE.STARROCKS_HOME_DIR + "/meta",
// we should check whether both the new default dir (STARROCKS_HOME_DIR + "/meta")
// and the old default dir (DORIS_HOME_DIR + "/doris-meta") are present. If both are present,
// we need to let users keep only one to avoid starting from outdated metadata.
Path oldDefaultMetaDir = Paths.get(System.getenv("DORIS_HOME") + "/doris-meta");
Path newDefaultMetaDir = Paths.get(System.getenv("STARROCKS_HOME") + "/meta");
Path metaDir = Paths.get(Config.meta_dir);
if (metaDir.equals(newDefaultMetaDir)) {
File oldMeta = new File(oldDefaultMetaDir.toUri());
File newMeta = new File(newDefaultMetaDir.toUri());
if (oldMeta.exists() && newMeta.exists()) {
LOG.error("New default meta dir: {} and Old default meta dir: {} are both present. " +
"Please make sure {} has the latest data, and remove the another one.",
newDefaultMetaDir, oldDefaultMetaDir, newDefaultMetaDir);
throw new InvalidMetaDirException();
}
}
File meta = new File(metaDir.toUri());
if (!meta.exists()) {
// If metaDir is not the default config, it means the user has specified the other directory
// We should not use the oldDefaultMetaDir.
// Just exit in this case
if (!metaDir.equals(newDefaultMetaDir)) {
LOG.error("meta dir {} dose not exist", metaDir);
throw new InvalidMetaDirException();
}
File oldMeta = new File(oldDefaultMetaDir.toUri());
if (oldMeta.exists()) {
// For backward compatible
Config.meta_dir = oldDefaultMetaDir.toString();
} else {
LOG.error("meta dir {} does not exist", meta.getAbsolutePath());
throw new InvalidMetaDirException();
}
}
long lowerFreeDiskSize = Long.parseLong(EnvironmentParams.FREE_DISK.getDefault());
FileStore store = Files.getFileStore(Paths.get(Config.meta_dir));
if (store.getUsableSpace() < lowerFreeDiskSize) {
LOG.error("Free capacity left for meta dir: {} is less than {}",
Config.meta_dir, new ByteSizeValue(lowerFreeDiskSize));
throw new InvalidMetaDirException();
}
Path imageDir = Paths.get(Config.meta_dir + GlobalStateMgr.IMAGE_DIR);
Path bdbDir = Paths.get(BDBEnvironment.getBdbDir());
boolean haveImageData = false;
if (Files.exists(imageDir)) {
try (Stream<Path> stream = Files.walk(imageDir)) {
haveImageData = stream.anyMatch(path -> path.getFileName().toString().startsWith("image."));
}
}
boolean haveBDBData = false;
if (Files.exists(bdbDir)) {
try (Stream<Path> stream = Files.walk(bdbDir)) {
haveBDBData = stream.anyMatch(path -> path.getFileName().toString().endsWith(".jdb"));
}
}
if (haveImageData && !haveBDBData && !Config.start_with_incomplete_meta) {
LOG.error("image exists, but bdb dir is empty, " +
"set start_with_incomplete_meta to true if you want to forcefully recover from image data, " +
"this may end with stale meta data, so please be careful.");
throw new InvalidMetaDirException();
}
}
|
@Test(expected = InvalidMetaDirException.class)
public void testImageExistBDBNotExist() throws IOException,
InvalidMetaDirException {
Config.start_with_incomplete_meta = false;
Config.meta_dir = testDir + "/meta";
mkdir(Config.meta_dir + "/image");
File file = new File(Config.meta_dir + "/image/image.123");
Assert.assertTrue(file.createNewFile());
try {
MetaHelper.checkMetaDir();
} finally {
deleteDir(new File(testDir + "/"));
}
}
|
public boolean writeExternalAddress() {
List<NodeAddress> addresses = client.nodes().withName(config.getNodeName()).get().getStatus().getAddresses();
StringBuilder externalAddresses = new StringBuilder();
String address = NodeUtils.findAddress(addresses, null);
if (address == null) {
LOGGER.error("External address not found");
return false;
} else {
LOGGER.info("Default External address found {}", address);
externalAddresses.append(externalAddressExport(null, address));
}
for (NodeAddressType type : NodeAddressType.values()) {
address = NodeUtils.findAddress(addresses, type);
LOGGER.info("External {} address found {}", type.toValue(), address);
externalAddresses.append(externalAddressExport(type, address));
}
return write(FILE_EXTERNAL_ADDRESS, externalAddresses.toString());
}
|
@Test
public void testWriteExternalAddress() throws IOException {
// create and configure (env vars) the path to the rack-id file
File kafkaFolder = new File(tempDir.getPath(), "/opt/kafka");
String addressFolder = kafkaFolder.getAbsolutePath() + "/external.address";
new File(addressFolder).mkdirs();
Map<String, String> envVars = new HashMap<>(ENV_VARS);
envVars.put(InitWriterConfig.INIT_FOLDER.key(), addressFolder);
InitWriterConfig config = InitWriterConfig.fromMap(envVars);
KubernetesClient client = mockKubernetesClient(config.getNodeName(), Map.of(), ADDRESSES);
InitWriter writer = new InitWriter(client, config);
assertThat(writer.writeExternalAddress(), is(true));
assertThat(readFile(addressFolder + "/external.address"), is("export STRIMZI_NODEPORT_DEFAULT_ADDRESS=my.external.address\n" +
"export STRIMZI_NODEPORT_EXTERNALIP_ADDRESS=my.external.address\n" +
"export STRIMZI_NODEPORT_EXTERNALDNS_ADDRESS=my.external.address\n" +
"export STRIMZI_NODEPORT_INTERNALIP_ADDRESS=192.168.2.94\n" +
"export STRIMZI_NODEPORT_INTERNALDNS_ADDRESS=my.internal.address\n" +
"export STRIMZI_NODEPORT_HOSTNAME_ADDRESS=my.external.address\n"));
}
|
public static Optional<Path> getQualifiedRemoteProvidedUsrLib(
org.apache.flink.configuration.Configuration configuration,
YarnConfiguration yarnConfiguration)
throws IOException, IllegalArgumentException {
String usrlib = configuration.get(YarnConfigOptions.PROVIDED_USRLIB_DIR);
if (usrlib == null) {
return Optional.empty();
}
final Path qualifiedUsrLibPath =
FileSystem.get(yarnConfiguration).makeQualified(new Path(usrlib));
checkArgument(
isRemotePath(qualifiedUsrLibPath.toString()),
"The \"%s\" must point to a remote dir "
+ "which is accessible from all worker nodes.",
YarnConfigOptions.PROVIDED_USRLIB_DIR.key());
checkArgument(
isUsrLibDirectory(FileSystem.get(yarnConfiguration), qualifiedUsrLibPath),
"The \"%s\" should be named with \"%s\".",
YarnConfigOptions.PROVIDED_USRLIB_DIR.key(),
ConfigConstants.DEFAULT_FLINK_USR_LIB_DIR);
return Optional.of(qualifiedUsrLibPath);
}
|
@Test
void testSharedUsrLibIsNotRemotePathShouldThrowException(@TempDir Path tempDir) {
final File localLib = new File(tempDir.toAbsolutePath().toString(), "usrlib");
assertThat(localLib.mkdirs()).isTrue();
final Configuration flinkConfig = new Configuration();
flinkConfig.set(YarnConfigOptions.PROVIDED_USRLIB_DIR, localLib.getAbsolutePath());
assertThatThrownBy(
() ->
Utils.getQualifiedRemoteProvidedUsrLib(
flinkConfig, new YarnConfiguration()))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage(
"The \"%s\" must point to a remote dir "
+ "which is accessible from all worker nodes.",
YarnConfigOptions.PROVIDED_USRLIB_DIR.key());
}
|
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n) {
return invoke(n, BigDecimal.ZERO);
}
|
@Test
void invokePositive() {
FunctionTestUtil.assertResultBigDecimal(ceilingFunction.invoke(BigDecimal.valueOf(10.2)),
BigDecimal.valueOf(11));
}
|
public static List<FieldInfo> buildSourceSchemaEntity(final LogicalSchema schema) {
final List<FieldInfo> allFields = schema.columns().stream()
.map(EntityUtil::toFieldInfo)
.collect(Collectors.toList());
if (allFields.isEmpty()) {
throw new IllegalArgumentException("Root schema should contain columns: " + schema);
}
return allFields;
}
|
@Test
public void shouldBuildCorrectArrayField() {
// Given:
final LogicalSchema schema = LogicalSchema.builder()
.valueColumn(ColumnName.of("field"), SqlTypes.array(SqlTypes.BIGINT))
.build();
// When:
final List<FieldInfo> fields = EntityUtil.buildSourceSchemaEntity(schema);
// Then:
assertThat(fields, hasSize(1));
assertThat(fields.get(0).getName(), equalTo("field"));
assertThat(fields.get(0).getSchema().getTypeName(), equalTo("ARRAY"));
assertThat(fields.get(0).getSchema().getFields(), equalTo(Optional.empty()));
assertThat(fields.get(0).getSchema().getMemberSchema().get().getTypeName(),
equalTo("BIGINT"));
}
|
@Override
public boolean supportsExpressionsInOrderBy() {
return false;
}
|
@Test
void assertSupportsExpressionsInOrderBy() {
assertFalse(metaData.supportsExpressionsInOrderBy());
}
|
@Override
public Result reconcile(Request request) {
client.fetch(Theme.class, request.name())
.ifPresent(theme -> {
if (isDeleted(theme)) {
cleanUpResourcesAndRemoveFinalizer(request.name());
return;
}
addFinalizerIfNecessary(theme);
themeSettingDefaultConfig(theme);
reconcileStatus(request.name());
});
return new Result(false, null);
}
|
@Test
void themeSettingDefaultValue() throws IOException, JSONException {
Path testWorkDir = tempDirectory.resolve("reconcile-setting-value");
Files.createDirectory(testWorkDir);
when(themeRoot.get()).thenReturn(testWorkDir);
Theme theme = new Theme();
Metadata metadata = new Metadata();
metadata.setName("theme-test");
theme.setMetadata(metadata);
theme.setKind(Theme.KIND);
theme.setApiVersion("theme.halo.run/v1alpha1");
Theme.ThemeSpec themeSpec = new Theme.ThemeSpec();
themeSpec.setSettingName(null);
theme.setSpec(themeSpec);
when(extensionClient.fetch(eq(Theme.class), eq(metadata.getName())))
.thenReturn(Optional.of(theme));
Reconciler.Result reconcile =
themeReconciler.reconcile(new Reconciler.Request(metadata.getName()));
assertThat(reconcile.reEnqueue()).isFalse();
verify(extensionClient, times(3)).fetch(eq(Theme.class), eq(metadata.getName()));
// setting exists
themeSpec.setSettingName("theme-test-setting");
assertThat(theme.getSpec().getConfigMapName()).isNull();
ArgumentCaptor<Theme> captor = ArgumentCaptor.forClass(Theme.class);
themeReconciler.reconcile(new Reconciler.Request(metadata.getName()));
verify(extensionClient, times(6))
.fetch(eq(Theme.class), eq(metadata.getName()));
verify(extensionClient, times(3))
.update(captor.capture());
Theme value = captor.getValue();
assertThat(value.getSpec().getConfigMapName()).isNotNull();
// populate setting name and configMap name and configMap not exists
themeSpec.setSettingName("theme-test-setting");
themeSpec.setConfigMapName("theme-test-configmap");
when(extensionClient.fetch(eq(ConfigMap.class), any()))
.thenReturn(Optional.empty());
when(extensionClient.fetch(eq(Setting.class), eq(themeSpec.getSettingName())))
.thenReturn(Optional.of(getFakeSetting()));
themeReconciler.reconcile(new Reconciler.Request(metadata.getName()));
verify(extensionClient, times(2))
.fetch(eq(Setting.class), eq(themeSpec.getSettingName()));
ArgumentCaptor<ConfigMap> configMapCaptor = ArgumentCaptor.forClass(ConfigMap.class);
verify(extensionClient, times(1)).create(any(ConfigMap.class));
verify(extensionClient, times(1)).create(configMapCaptor.capture());
ConfigMap defaultValueConfigMap = configMapCaptor.getValue();
Map<String, String> data = defaultValueConfigMap.getData();
JSONAssert.assertEquals("""
{
"sns": "{\\"email\\":\\"example@exmple.com\\"}"
}
""",
JsonUtils.objectToJson(data),
true);
}
|
JimfsPath resolve(Name name) {
return resolve(pathService.createFileName(name));
}
|
@Test
public void testResolveName_againstEmptyPath() {
// resolve(Name) is only used in the DirectoryStream implementation, so it's only used to
// resolve the names of real existing files against some base directory's path. The base
// directory path could be the working directory path (i.e. just an empty string), in which case
// we need to be sure to return a path that is just the name of the file as opposed a path with
// two names, one being the empty string and the other the file name).
// See https://github.com/google/jimfs/issues/105
assertPathEquals("foo", pathService.emptyPath().resolve(Name.simple("foo")));
}
|
@Override
public Checksum compute(final InputStream in, final TransferStatus status) throws BackgroundException {
final byte[] digest = this.digest("SHA-256", this.normalize(in, status), status);
return new Checksum(HashAlgorithm.sha256, digest);
}
|
@Test
public void testCompute() throws Exception {
assertEquals("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
new SHA256ChecksumCompute().compute(new NullInputStream(0), new TransferStatus()).hash);
}
|
@Override
public String getProperty(final String key) {
String value = super.getProperty(key);
if (null != value) {
return value;
}
for (Properties each : multiProps) {
value = each.getProperty(key);
if (null != value) {
return value;
}
}
return null;
}
|
@Test
void assertGetProperty() {
assertThat(multiSourceProperties.getProperty("keyA"), is("valueA"));
assertThat(multiSourceProperties.getProperty("keyB"), is("valueB"));
assertNull(multiSourceProperties.getProperty("keyC"));
}
|
@VisibleForTesting
static List<Reporter> getReporters() {
return self.reporters;
}
|
@Test
public void allReporters() throws Exception {
String jsonFile = System.getProperty("java.io.tmpdir") + System.getProperty("file.separator") +
"TestMetricsOutput.json";
Configuration conf = MetastoreConf.newMetastoreConf();
MetastoreConf.setVar(conf, MetastoreConf.ConfVars.METRICS_REPORTERS, "json,jmx,console,hadoop");
MetastoreConf.setVar(conf, MetastoreConf.ConfVars.METRICS_JSON_FILE_LOCATION, jsonFile);
initializeMetrics(conf);
Assert.assertEquals(4, Metrics.getReporters().size());
}
|
public FontMetrics parse() throws IOException
{
return parseFontMetric(false);
}
|
@Test
void testHelveticaFontMetricsReducedDataset() throws IOException
{
AFMParser parser = new AFMParser(
new FileInputStream("src/test/resources/afm/Helvetica.afm"));
checkHelveticaFontMetrics(parser.parse(true));
}
|
public PropertiesSnapshot getWorkflowPropertiesSnapshot(String workflowId, String snapshotId) {
if (Constants.LATEST_INSTANCE_RUN.equalsIgnoreCase(snapshotId)) {
return getLatestPropertiesSnapshot(workflowId);
} else {
throw new UnsupportedOperationException("Specific snapshot version is not implemented.");
}
}
|
@Test
public void testGetWorkflowPropertiesSnapshot() throws Exception {
WorkflowDefinition wfd = loadWorkflow(TEST_WORKFLOW_ID1);
workflowDao.addWorkflowDefinition(wfd, wfd.getPropertiesSnapshot().extractProperties());
assertNotNull(wfd.getInternalId());
PropertiesSnapshot snapshot =
workflowDao.getWorkflowPropertiesSnapshot(TEST_WORKFLOW_ID1, "latest");
assertEquals(TEST_WORKFLOW_ID1, snapshot.getWorkflowId());
assertEquals(RunStrategy.Rule.PARALLEL, snapshot.getRunStrategy().getRule());
assertEquals(20, snapshot.getRunStrategy().getWorkflowConcurrency());
assertEquals(20, snapshot.getStepConcurrency().longValue());
assertEquals(1, snapshot.getAlerting().getTct().getCompletedByHour().intValue());
}
|
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
}
|
@Test
public void testConsumerPositionUpdatedWhenSkippingAbortedTransactions() {
buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(),
new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
long currentOffset = 0;
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset,
new SimpleRecord(time.milliseconds(), "abort1-1".getBytes(), "value".getBytes()),
new SimpleRecord(time.milliseconds(), "abort1-2".getBytes(), "value".getBytes()));
currentOffset += abortTransaction(buffer, 1L, currentOffset);
buffer.flip();
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(
new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0));
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchRecords();
// Ensure that we don't return any of the aborted records, but yet advance the consumer position.
assertFalse(fetchedRecords.containsKey(tp0));
assertEquals(currentOffset, subscriptions.position(tp0).offset);
}
|
@Override
public synchronized void init(ProcessingEnvironment processingEnv) {
super.init(processingEnv);
this.koraAppElement = this.elements.getTypeElement(CommonClassNames.koraApp.canonicalName());
if (this.koraAppElement == null) {
return;
}
this.moduleElement = this.elements.getTypeElement(CommonClassNames.module.canonicalName());
this.koraSubmoduleElement = this.elements.getTypeElement(CommonClassNames.koraSubmodule.canonicalName());
this.componentElement = this.elements.getTypeElement(CommonClassNames.component.canonicalName());
this.initialized = true;
this.ctx = new ProcessingContext(processingEnv);
log.info("@KoraApp processor started");
}
|
@Test
void appWithFactory() throws Throwable {
testClass(AppWithFactories1.class).init();
testClass(AppWithFactories2.class).init();
testClass(AppWithFactories3.class).init();
testClass(AppWithFactories4.class).init();
// testClass(AppWithFactories5.class).init();; TODO больше не нужно
assertThatThrownBy(() -> testClass(AppWithFactories6.class))
.isInstanceOf(CompilationErrorException.class)
.hasMessageStartingWith("There's a cycle in graph:");
testClass(AppWithFactories7.class).init();
testClass(AppWithFactories8.class).init();
testClass(AppWithFactories9.class).init();
assertThatThrownBy(() -> testClass(AppWithFactories10.class))
.isInstanceOf(CompilationErrorException.class)
.hasMessageStartingWith("Required dependency type was not found and can't be auto created: java.io.Closeable")
.asInstanceOf(type(CompilationErrorException.class))
.extracting(CompilationErrorException::getDiagnostics, list(Diagnostic.class))
.anySatisfy(d -> {
assertThat(d.getKind()).isEqualTo(Diagnostic.Kind.ERROR);
assertThat(d.getMessage(Locale.ENGLISH)).isEqualTo("""
Required dependency type was not found and can't be auto created: java.io.Closeable.
Please check class for @Component annotation or that required module with component is plugged in.
Dependency chain:
ru.tinkoff.kora.kora.app.annotation.processor.app.AppWithFactories10.mock1
Requested at: ru.tinkoff.kora.kora.app.annotation.processor.app.AppWithFactories10.mock1(java.io.Closeable)
""".trim());
});
// assertThatThrownBy(() -> testClass(AppWithFactories11.class))
// .isInstanceOf(CompilationErrorException.class)
// .hasMessageContaining("Required dependency was not found and candidate class ru.tinkoff.kora.kora.app.annotation.processor.app.AppWithFactories11.GenericClass<java.lang.String> is not final")
// .asInstanceOf(type(CompilationErrorException.class))
// .extracting(CompilationErrorException::getDiagnostics, list(Diagnostic.class))
// .anySatisfy(d -> {
// assertThat(d.getKind()).isEqualTo(Diagnostic.Kind.ERROR);
// assertThat(d.getMessage(Locale.ENGLISH)).isEqualTo("""
// Required dependency was not found and candidate class java.lang.Long has more then one public constructor
// Requested at: ru.tinkoff.kora.kora.app.annotation.processor.app.AppWithFactories11.<T>factory2(java.lang.Long)
// \s
// Factory ru.tinkoff.kora.kora.app.annotation.processor.app.AppWithFactories11#factory2 failed to produce component because of missing dependency of type java.lang.Long
// Factory ru.tinkoff.kora.kora.app.annotation.processor.app.AppWithFactories11#factory1 failed to produce component because of missing dependency of type java.io.Closeable
// Required dependency implementation was not found java.io.Closeable, check if it is declared or appropriate module is declared in @KoraApp
// Requested at: ru.tinkoff.kora.kora.app.annotation.processor.app.AppWithFactories11.<T>factory1(java.io.Closeable)
// \s
// Required dependency was not found and candidate class ru.tinkoff.kora.kora.app.annotation.processor.app.AppWithFactories11.GenericClass<java.lang.String> is not final
// Requested at: ru.tinkoff.kora.kora.app.annotation.processor.app.AppWithFactories11.mock1(ru.tinkoff.kora.kora.app.annotation.processor.app.AppWithFactories11.GenericClass<java.lang.String>)
// """.stripTrailing());
//
// });
testClass(AppWithFactories12.class).init();
}
@Test
void appWithExtension() throws Throwable {
var graphDraw = testClass(AppWithExtension.class);
Assertions.assertThat(graphDraw.getNodes()).hasSize(3);
var materializedGraph = graphDraw.init();
Assertions.assertThat(materializedGraph).isNotNull();
}
@Test
void extensionShouldHandleAnnotationsItProvidesAnnotationProcessorFor() throws Throwable {
var graphDraw = testClass(AppWithProcessorExtension.class, List.of(new AppWithProcessorExtension.TestProcessor()));
Assertions.assertThat(graphDraw.getNodes()).hasSize(2);
var materializedGraph = graphDraw.init();
Assertions.assertThat(materializedGraph).isNotNull();
}
@Test
void appWithComponentDescriptorCollisionAndDirect() {
assertThatThrownBy(() -> testClass(AppWithComponentCollisionAndDirect.class))
.isInstanceOfSatisfying(CompilationErrorException.class, e -> SoftAssertions.assertSoftly(s -> {
var error = e.getDiagnostics().stream().filter(d -> d.getKind() == Diagnostic.Kind.ERROR).findFirst().get();
s.assertThat(error.getMessage(Locale.US))
.startsWith("More than one component matches dependency claim ru.tinkoff.kora.kora.app.annotation.processor.app.AppWithComponentCollisionAndDirect.Class1:");
s.assertThat(error.getMessage(Locale.US)).contains("FromModuleComponent[type=ru.tinkoff.kora.kora.app.annotation.processor.app.AppWithComponentCollisionAndDirect.Class1, module=MixedInModule[element=ru.tinkoff.kora.kora.app.annotation.processor.app.AppWithComponentCollisionAndDirect], tags=[], method=c1(), methodParameterTypes=[], typeVariables=[]");
s.assertThat(error.getMessage(Locale.US)).contains("FromModuleComponent[type=ru.tinkoff.kora.kora.app.annotation.processor.app.AppWithComponentCollisionAndDirect.Class1, module=MixedInModule[element=ru.tinkoff.kora.kora.app.annotation.processor.app.AppWithComponentCollisionAndDirect], tags=[], method=c2(), methodParameterTypes=[], typeVariables=[]");
s.assertThat(error.getMessage(Locale.US)).contains("FromModuleComponent[type=ru.tinkoff.kora.kora.app.annotation.processor.app.AppWithComponentCollisionAndDirect.Class1, module=MixedInModule[element=ru.tinkoff.kora.kora.app.annotation.processor.app.AppWithComponentCollisionAndDirect], tags=[], method=c3(), methodParameterTypes=[], typeVariables=[]");
}));
}
@Test
void appWithMultipleTags() throws Throwable {
var graphDraw = testClass(AppWithMultipleTags.class);
Assertions.assertThat(graphDraw.getNodes()).hasSize(12);
var graph = graphDraw.init();
Assertions.assertThat(graph).isNotNull();
var nonTaggedClass3 = findNodesOf(graphDraw, AppWithMultipleTags.Class3.class);
Assertions.assertThat(nonTaggedClass3).hasSize(1);
var anyTaggedClass3 = findNodesOf(graphDraw, AppWithMultipleTags.Class3.class, AppWithMultipleTags.class);
Assertions.assertThat(anyTaggedClass3).hasSize(1);
Assertions.assertThat(graph.get(anyTaggedClass3.get(0)).class1s()).hasSize(4);
var tag1TaggedClass3 = findNodesOf(graphDraw, AppWithMultipleTags.Class3.class, AppWithMultipleTags.Tag1.class);
Assertions.assertThat(tag1TaggedClass3).hasSize(1);
Assertions.assertThat(graph.get(tag1TaggedClass3.get(0)).class1s()).hasSize(1);
var tag2Tag3Taggedlass3 = findNodesOf(graphDraw, AppWithMultipleTags.Class3.class, AppWithMultipleTags.Tag2.class, AppWithMultipleTags.Tag3.class);
Assertions.assertThat(tag2Tag3Taggedlass3).hasSize(1);
Assertions.assertThat(graph.get(tag2Tag3Taggedlass3.get(0)).class1s()).hasSize(2);
var tag4TaggedClass3 = findNodesOf(graphDraw, AppWithMultipleTags.Class3.class, AppWithMultipleTags.Tag4.class);
Assertions.assertThat(tag4TaggedClass3).hasSize(1);
Assertions.assertThat(graph.get(tag4TaggedClass3.get(0)).class1s()).hasSize(1);
}
@Test
void appWithWrappedComponent() throws Exception {
var graphDraw = testClass(AppWithWrappedDependency.class);
Assertions.assertThat(graphDraw.getNodes()).hasSize(7);
var materializedGraph = graphDraw.init();
Assertions.assertThat(materializedGraph).isNotNull();
}
@Test
void appWithNestedClasses() throws Exception {
var graphDraw = testClass(AppWithNestedClasses.class);
Assertions.assertThat(graphDraw.getNodes()).hasSize(2);
var materializedGraph = graphDraw.init();
Assertions.assertThat(materializedGraph).isNotNull();
}
@Test
void appWithLazyComponents() throws Exception {
var graphDraw = testClass(AppWithLazyComponents.class);
Assertions.assertThat(graphDraw.getNodes()).hasSize(3);
var materializedGraph = graphDraw.init();
Assertions.assertThat(materializedGraph).isNotNull();
}
@Test
void appWithModuleOf() throws Exception {
var graphDraw = testClass(AppWithModuleOf.class);
Assertions.assertThat(graphDraw.getNodes()).hasSize(2);
var materializedGraph = graphDraw.init();
Assertions.assertThat(materializedGraph).isNotNull();
}
@Test
void appWithClassWithComponentOf() throws Exception {
var graphDraw = testClass(AppWithClassWithComponentOf.class);
Assertions.assertThat(graphDraw.getNodes()).hasSize(5);
var materializedGraph = graphDraw.init();
Assertions.assertThat(materializedGraph).isNotNull();
}
@Test
void appWithPromiseOf() throws Exception {
var graphDraw = testClass(AppWithPromiseOf.class);
Assertions.assertThat(graphDraw.getNodes()).hasSize(6);
var materializedGraph = graphDraw.init();
Assertions.assertThat(materializedGraph).isNotNull();
materializedGraph.release();
}
@Test
void appWithOverridenModule() throws Exception {
var graphDraw = testClass(AppWithOverridenModule.class);
Assertions.assertThat(graphDraw.getNodes()).hasSize(2);
var materializedGraph = graphDraw.init();
Assertions.assertThat(materializedGraph).isNotNull();
materializedGraph.release();
}
@Test
void appWithExactDependencyMatch() throws Exception {
var graphDraw = testClass(AppWithExactMatch.class);
Assertions.assertThat(graphDraw.getNodes()).hasSize(8);
}
@Test
void appWithComponentsWithSameName() throws Exception {
var graphDraw = testClass(AppWithComponentsWithSameName.class);
Assertions.assertThat(graphDraw.getNodes()).hasSize(4);
}
@Test
void appPart() throws Exception {
var classLoader = TestUtils.annotationProcess(AppWithAppPart.class, new KoraAppProcessor());
var clazz = classLoader.loadClass(AppWithAppPart.class.getName() + "SubmoduleImpl");
Assertions.assertThat(clazz).isNotNull()
.isInterface()
.hasDeclaredMethods("_component0", "_component1")
.matches(Predicate.not(AppWithAppPart.Module.class::isAssignableFrom));
var targetFile1 = "src/test/java/" + AppWithAppPartApp.class.getName().replace('.', '/') + ".java";
var targetFile2 = "in-test-generated/classes/" + clazz.getCanonicalName().replace('.', '/') + ".class";
classLoader = TestUtils.annotationProcessFiles(List.of(targetFile1, targetFile2), false, new KoraAppProcessor());
var appClazz = classLoader.loadClass(AppWithAppPartApp.class.getName() + "Graph");
}
@Test
void appWithDefaultComponent() throws Throwable {
var graphDraw = testClass(AppWithDefaultComponent.class);
Assertions.assertThat(graphDraw.getNodes()).hasSize(3);
var graph = graphDraw.init();
Assertions.assertThat(graph).isNotNull();
var class1Nodes = findNodesOf(graphDraw, AppWithDefaultComponent.Class1.class);
Assertions.assertThat(class1Nodes).hasSize(1);
var class1Node = class1Nodes.get(0);
assertThat(graph.get(class1Node).value()).isEqualTo(2);
}
@SuppressWarnings("unchecked")
<T> Node<T> findNodeOf(ApplicationGraphDraw graphDraw, Class<T> type, Class<?>... tags) {
var nodes = findNodesOf(graphDraw, type, tags);
if (nodes.size() != 1) {
throw new IllegalStateException();
}
return (Node<T>) nodes.get(0);
}
@SuppressWarnings("unchecked")
<T> List<Node<? extends T>> findNodesOf(ApplicationGraphDraw graphDraw, Class<T> type, Class<?>... tags) {
var graph = graphDraw.init();
var anyTag = Arrays.asList(tags).contains(Tag.Any.class);
var nonTagged = tags.length == 0;
return graphDraw.getNodes().stream()
.filter(node -> type.isInstance(graph.get(node)))
.map(node -> (Node<? extends T>) node)
.filter(node -> {
if (anyTag) {
return true;
}
if (nonTagged) {
return node.tags().length == 0;
}
return Arrays.stream(tags).allMatch(tag -> Arrays.asList(node.tags()).contains(tag));
})
.collect(Collectors.toList());
}
ApplicationGraphDraw testClass(Class<?> targetClass) throws Exception {
return testClass(targetClass, List.of());
}
ApplicationGraphDraw testClass(Class<?> targetClass, List<Processor> processors) throws Exception {
try {
var processorsArray = new ArrayList<>(processors).toArray(new Processor[processors.size() + 1]);
processorsArray[processors.size()] = new KoraAppProcessor();
var classLoader = TestUtils.annotationProcess(targetClass, processorsArray);
var clazz = classLoader.loadClass(targetClass.getName() + "Graph");
@SuppressWarnings("unchecked")
var constructors = (Constructor<? extends Supplier<? extends ApplicationGraphDraw>>[]) clazz.getConstructors();
return constructors[0].newInstance().get();
} catch (Exception e) {
if (e.getCause() != null) {
throw (Exception) e.getCause();
}
throw e;
}
}
}
|
List<HikariDataSource> build(Environment environment, Callback<HikariDataSource> callback) {
List<HikariDataSource> dataSources = new ArrayList<>();
Binder.get(environment).bind("db", Bindable.ofInstance(this));
Preconditions.checkArgument(Objects.nonNull(num), "db.num is null");
Preconditions.checkArgument(CollectionUtils.isNotEmpty(user), "db.user or db.user.[index] is null");
Preconditions.checkArgument(CollectionUtils.isNotEmpty(password), "db.password or db.password.[index] is null");
for (int index = 0; index < num; index++) {
int currentSize = index + 1;
Preconditions.checkArgument(url.size() >= currentSize, "db.url.%s is null", index);
DataSourcePoolProperties poolProperties = DataSourcePoolProperties.build(environment);
if (StringUtils.isEmpty(poolProperties.getDataSource().getDriverClassName())) {
poolProperties.setDriverClassName(JDBC_DRIVER_NAME);
}
poolProperties.setJdbcUrl(url.get(index).trim());
poolProperties.setUsername(getOrDefault(user, index, user.get(0)).trim());
poolProperties.setPassword(getOrDefault(password, index, password.get(0)).trim());
HikariDataSource ds = poolProperties.getDataSource();
if (StringUtils.isEmpty(ds.getConnectionTestQuery())) {
ds.setConnectionTestQuery(TEST_QUERY);
}
dataSources.add(ds);
callback.accept(ds);
}
Preconditions.checkArgument(CollectionUtils.isNotEmpty(dataSources), "no datasource available");
return dataSources;
}
|
@Test
void externalDatasourceFailureWithLarkInfo() {
assertThrows(IllegalArgumentException.class, () -> {
MockEnvironment environment = new MockEnvironment();
new ExternalDataSourceProperties().build(environment, null);
});
}
|
@SuppressWarnings("unchecked")
public static void addIQProvider(String elementName, String namespace,
Object provider) {
validate(elementName, namespace);
// First remove existing providers
QName key = removeIQProvider(elementName, namespace);
if (provider instanceof IqProvider) {
iqProviders.put(key, (IqProvider<IQ>) provider);
} else {
throw new IllegalArgumentException("Provider must be an instance of IqProvider");
}
}
|
@Test
public void shouldInitializeSmackTest() throws Exception {
ProviderManager.addIQProvider("foo", "bar", new TestIQProvider());
assertTrue(SmackConfiguration.isSmackInitialized());
}
|
@Override
public Producer createProducer() {
return producerFactorySupplier.get().getInstance(this, getDynamicRouterComponent(), configuration);
}
|
@Test
void testCreateProducer() {
Producer actualProducer = endpoint.createProducer();
assertEquals(producer, actualProducer);
}
|
public void setContent(String content) {
this.content = content;
}
|
@Test
void setContent() {
ConfigFuture configFuture = new ConfigFuture("file.conf", "defaultValue", ConfigFuture.ConfigOperation.GET);
Assertions.assertEquals("defaultValue", configFuture.getContent());
configFuture.setContent("testValue");
Assertions.assertEquals("testValue", configFuture.getContent());
}
|
@ScalarFunction
public static byte[] fromULL(byte[] input) {
UltraLogLog ull = UltraLogLog.wrap(input);
return ObjectSerDeUtils.ULTRA_LOG_LOG_OBJECT_SER_DE.serialize(ull);
}
|
@Test
public void testULLLoading() {
for (Object i : _inputs) {
UltraLogLog ull = UltraLogLog.create(12);
UltraLogLogUtils.hashObject(i).ifPresent(ull::add);
byte[] loaded = SketchFunctions.fromULL(ull.getState());
UltraLogLog deserialized = ObjectSerDeUtils.ULTRA_LOG_LOG_OBJECT_SER_DE.deserialize(loaded);
Assert.assertEquals(deserialized.getState(), ull.getState());
}
}
|
@Override
public void alterSystemProperty(final String propertyName, final String propertyValue) {
final Map<String, String> overrides = ImmutableMap.of(propertyName, propertyValue);
this.primaryContext.alterSystemProperty(overrides);
}
|
@Test
public void shouldRaiseExceptionIfValueIsErroneous() {
setupKsqlEngineWithSharedRuntimeEnabled();
assertThrows(ConfigException.class, () ->
ksqlEngine.alterSystemProperty(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, "TEST"));
}
|
public FileOutputStream openOutputStream(File file, boolean append) {
try {
return openOutputStreamOrThrowIOE(file, append);
} catch (IOException e) {
throw new IllegalStateException("Can not open file " + file, e);
}
}
|
@Test
public void openOutputStream_overwrites_existing_file() throws Exception {
File file = temp.newFile();
FileUtils.write(file, "foo");
try (FileOutputStream outputStream = underTest.openOutputStream(file, false)) {
IOUtils.write("bar", outputStream);
}
assertThat(FileUtils.readFileToString(file)).isEqualTo("bar");
}
|
public static Resource multiply(Resource lhs, double by) {
return multiplyTo(clone(lhs), by);
}
|
@Test
void testMultiply() {
assertEquals(createResource(4, 2), multiply(createResource(2, 1), 2));
assertEquals(createResource(4, 2, 0), multiply(createResource(2, 1), 2));
assertEquals(createResource(2, 4), multiply(createResource(1, 2), 2));
assertEquals(createResource(2, 4, 0), multiply(createResource(1, 2), 2));
assertEquals(createResource(6, 6, 0), multiply(createResource(3, 3, 0), 2));
assertEquals(createResource(4, 4, 6), multiply(createResource(2, 2, 3), 2));
}
|
@Override
public void doFilter(ServletRequest req, ServletResponse resp, FilterChain chain)
throws IOException, ServletException {
if (bizConfig.isAdminServiceAccessControlEnabled()) {
HttpServletRequest request = (HttpServletRequest) req;
HttpServletResponse response = (HttpServletResponse) resp;
String token = request.getHeader(HttpHeaders.AUTHORIZATION);
if (!checkAccessToken(token)) {
logger.warn("Invalid access token: {} for uri: {}", token, request.getRequestURI());
response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "Unauthorized");
return;
}
}
chain.doFilter(req, resp);
}
|
@Test
public void testWithAccessControlEnabledWithTokenSpecifiedWithInvalidTokenPassed()
throws Exception {
String someValidToken = "someValidToken";
String someInvalidToken = "someInvalidToken";
when(bizConfig.isAdminServiceAccessControlEnabled()).thenReturn(true);
when(bizConfig.getAdminServiceAccessTokens()).thenReturn(someValidToken);
when(servletRequest.getHeader(HttpHeaders.AUTHORIZATION)).thenReturn(someInvalidToken);
authenticationFilter.doFilter(servletRequest, servletResponse, filterChain);
verify(bizConfig, times(1)).isAdminServiceAccessControlEnabled();
verify(bizConfig, times(1)).getAdminServiceAccessTokens();
verify(servletResponse, times(1))
.sendError(HttpServletResponse.SC_UNAUTHORIZED, "Unauthorized");
verify(filterChain, never()).doFilter(servletRequest, servletResponse);
}
|
@Override
public void seek(long pos) throws IOException {
Preconditions.checkArgument(pos >= 0, "Seek position is negative: %s", pos);
Preconditions.checkArgument(pos <= mLength,
"Seek position (%s) exceeds the length of the file (%s)", pos, mLength);
if (mPosition == pos) {
return;
}
if (!mUfsInStream.isPresent()) {
mPosition = pos;
return;
}
if (mUfsInStream.get() instanceof Seekable) {
((Seekable) mUfsInStream.get()).seek(pos);
} else if (mPosition < pos) {
long skipped = 0;
do {
skipped = mUfsInStream.get().skip(pos - mPosition);
if (skipped > 0) {
mPosition += skipped;
}
} while (mPosition < pos && skipped > 0);
if (mPosition != pos) {
throw new InternalRuntimeException(String.format(
"Failed to use skip to seek to pos %s, current position %s", pos, mPosition));
}
} else {
close();
}
mPosition = pos;
}
|
@Test
public void seekNegative() throws IOException, AlluxioException {
AlluxioURI ufsPath = getUfsPath();
createFile(ufsPath, CHUNK_SIZE);
try (FileInStream inStream = getStream(ufsPath)) {
assertThrows(IllegalArgumentException.class, () -> inStream.seek(-1));
}
}
|
@Deprecated
public static String addScreenWidth(String qualifiers, int screenWidth) {
int qualifiersScreenWidth = Qualifiers.getScreenWidth(qualifiers);
if (qualifiersScreenWidth == -1) {
if (qualifiers.length() > 0) {
qualifiers += "-";
}
qualifiers += "w" + screenWidth + "dp";
}
return qualifiers;
}
|
@Test
public void addScreenWidth() throws Exception {
assertThat(Qualifiers.addScreenWidth("", 320)).isEqualTo("w320dp");
assertThat(Qualifiers.addScreenWidth("w160dp", 320)).isEqualTo("w160dp");
assertThat(Qualifiers.addScreenWidth("w480dp", 320)).isEqualTo("w480dp");
assertThat(Qualifiers.addScreenWidth("en-v23", 320))
.isEqualTo("en-v23-w320dp"); // todo: order is wrong here
assertThat(Qualifiers.addScreenWidth("en-w160dp-v23", 320)).isEqualTo("en-w160dp-v23");
assertThat(Qualifiers.addScreenWidth("en-w480dp-v23", 320)).isEqualTo("en-w480dp-v23");
}
|
@Override
public void fetchSegmentToLocal(URI downloadURI, File dest)
throws Exception {
// Create a RoundRobinURIProvider to round robin IP addresses when retry uploading. Otherwise may always try to
// download from a same broken host as: 1) DNS may not RR the IP addresses 2) OS cache the DNS resolution result.
RoundRobinURIProvider uriProvider = new RoundRobinURIProvider(List.of(downloadURI), true);
int retryCount = getRetryCount(uriProvider);
_logger.info("Retry downloading for {} times. retryCount from pinot server config: {}, number of IP addresses for "
+ "download URI: {}", retryCount, _retryCount, uriProvider.numAddresses());
RetryPolicies.exponentialBackoffRetryPolicy(retryCount, _retryWaitMs, _retryDelayScaleFactor).attempt(() -> {
URI uri = uriProvider.next();
try {
String hostName = downloadURI.getHost();
int port = downloadURI.getPort();
// If the original download address is specified as host name, need add a "HOST" HTTP header to the HTTP
// request. Otherwise, if the download address is a LB address, when the LB be configured as "disallow direct
// access by IP address", downloading will fail.
List<Header> httpHeaders = new LinkedList<>();
if (!InetAddresses.isInetAddress(hostName)) {
httpHeaders.add(new BasicHeader(HttpHeaders.HOST, hostName + ":" + port));
}
int statusCode = _httpClient.downloadFile(uri, dest, _authProvider, httpHeaders);
_logger.info("Downloaded segment from: {} to: {} of size: {}; Response status code: {}", uri, dest,
dest.length(), statusCode);
return true;
} catch (HttpErrorStatusException e) {
int statusCode = e.getStatusCode();
if (statusCode == HttpStatus.SC_NOT_FOUND || statusCode >= 500) {
// Temporary exception
// 404 is treated as a temporary exception, as the downloadURI may be backed by multiple hosts,
// if singe host is down, can retry with another host.
_logger.warn("Got temporary error status code: {} while downloading segment from: {} to: {}", statusCode, uri,
dest, e);
return false;
} else {
// Permanent exception
_logger.error("Got permanent error status code: {} while downloading segment from: {} to: {}, won't retry",
statusCode, uri, dest, e);
throw e;
}
} catch (Exception e) {
_logger.warn("Caught exception while downloading segment from: {} to: {}", uri, dest, e);
return false;
}
});
}
|
@Test
public void testFetchSegmentToLocalSuccessAfterFirstTwoAttemptsFoundNoPeerServers()
throws Exception {
FileUploadDownloadClient client = mock(FileUploadDownloadClient.class);
// The download always succeeds
when(client.downloadFile(any(), any(), any())).thenReturn(200);
HttpSegmentFetcher segmentFetcher = getSegmentFetcher(client);
List<URI> uris = List.of(new URI("http://h1:8080"), new URI("http://h2:8080"));
// The first two attempts found NO peers hosting the segment, and the last one found two servers
//noinspection unchecked
Supplier<List<URI>> uriSupplier = mock(Supplier.class);
when(uriSupplier.get()).thenReturn(List.of()).thenReturn(List.of()).thenReturn(uris);
segmentFetcher.fetchSegmentToLocal(SEGMENT_NAME, uriSupplier, SEGMENT_FILE);
}
|
public void notifyPluginAboutClusterProfileChanged(String pluginId, ClusterProfilesChangedStatus status, Map<String, String> oldClusterProfile, Map<String, String> newClusterProfile) {
try {
LOGGER.debug("Processing report cluster profile changed for plugin: {} with status: {} with old cluster: {} and new cluster: {}", pluginId, status, oldClusterProfile, newClusterProfile);
extension.clusterProfileChanged(pluginId, status, oldClusterProfile, newClusterProfile);
LOGGER.debug("Done processing report cluster profile changed for plugin: {} with status: {} with old cluster: {} and new cluster: {}", pluginId, status, oldClusterProfile, newClusterProfile);
} catch (Exception e) {
LOGGER.error("An error occurred while processing report cluster profile changed for plugin: {} with status: {} with old cluster: {} and new cluster: {}", pluginId, status, oldClusterProfile, newClusterProfile, e);
}
}
|
@Test
public void shouldNotFailEvenWhenExtensionFailsToHandleClusterProfileChangedCall() {
final Map<String, String> newClusterProfileConfigurations = Map.of("Image", "alpine:latest");
doThrow(new RuntimeException("Boom!")).when(elasticAgentExtension).clusterProfileChanged(any(), any(), any(), any());
elasticAgentPluginRegistry.notifyPluginAboutClusterProfileChanged(PLUGIN_ID, ClusterProfilesChangedStatus.CREATED, null, newClusterProfileConfigurations);
verify(elasticAgentExtension, times(1)).clusterProfileChanged(PLUGIN_ID, ClusterProfilesChangedStatus.CREATED, null, newClusterProfileConfigurations);
verifyNoMoreInteractions(elasticAgentExtension);
}
|
@SuppressFBWarnings("NS_NON_SHORT_CIRCUIT")
protected boolean isValidUtf8(final byte[] input) {
int i = 0;
// Check for BOM
if (input.length >= 3 && (input[0] & 0xFF) == 0xEF
&& (input[1] & 0xFF) == 0xBB & (input[2] & 0xFF) == 0xBF) {
i = 3;
}
int end;
for (int j = input.length; i < j; ++i) {
int octet = input[i];
if ((octet & 0x80) == 0) {
continue; // ASCII
}
// Check for UTF-8 leading byte
if ((octet & 0xE0) == 0xC0) {
end = i + 1;
} else if ((octet & 0xF0) == 0xE0) {
end = i + 2;
} else if ((octet & 0xF8) == 0xF0) {
end = i + 3;
} else {
// Java only supports BMP so 3 is max
return false;
}
while (i < end) {
i++;
octet = input[i];
if ((octet & 0xC0) != 0x80) {
// Not a valid trailing byte
return false;
}
}
}
return true;
}
|
@Test
public void isValidUtf8_testUtf8Strings_allShouldSucceed() {
MockResponseWriter rw = new MockResponseWriter();
for (String s : VALID_STRINGS) {
assertTrue(rw.isValidUtf8(s.getBytes()));
}
}
|
public static <T> T[] toArray(Class<T> c, List<T> list)
{
@SuppressWarnings("unchecked")
T[] ta= (T[])Array.newInstance(c, list.size());
for (int i= 0; i<list.size(); i++)
ta[i]= list.get(i);
return ta;
}
|
@Test
public void testWithEmptyList2() {
List<String> list = new ArrayList<String>();
//this method should not throw IndexOutOfBoundsException
String[] arr = GenericsUtil.<String>toArray(String.class, list);
assertEquals("Assert list creation w/ no elements results in length 0",
0, arr.length);
}
|
@ScalarOperator(GREATER_THAN)
@SqlType(StandardTypes.BOOLEAN)
public static boolean greaterThan(@SqlType(StandardTypes.BOOLEAN) boolean left, @SqlType(StandardTypes.BOOLEAN) boolean right)
{
return left && !right;
}
|
@Test
public void testGreaterThan()
{
assertFunction("true > true", BOOLEAN, false);
assertFunction("true > false", BOOLEAN, true);
assertFunction("false > true", BOOLEAN, false);
assertFunction("false > false", BOOLEAN, false);
}
|
@Override
public IntDeviceConfig getConfig() {
return intConfig.get();
}
|
@Test
public void testPushIntAppConfig() throws IOException {
IntReportConfig config = getIntReportConfig("/report-config.json");
NetworkConfigEvent event =
new NetworkConfigEvent(NetworkConfigEvent.Type.CONFIG_ADDED, APP_ID,
config, null, IntReportConfig.class);
networkConfigListener.event(event);
// We expected that the manager will store the device config which
// converted from the app config.
IntDeviceConfig expectedConfig = createIntDeviceConfig();
IntDeviceConfig actualConfig = manager.getConfig();
assertEquals(expectedConfig, actualConfig);
// Install watch subnets via netcfg
// In the report-config.json, there are 3 subnets we want to watch
// For subnet 0.0.0.0/0, the IntManager will create only one IntIntent with an empty selector.
Set<IntIntent> expectedIntIntents = Sets.newHashSet();
ConsistentMap<IntIntentId, IntIntent> intentMap = TestUtils.getField(manager, "intentMap");
IntIntent.Builder baseIntentBuilder = IntIntent.builder()
.withReportType(IntIntent.IntReportType.TRACKED_FLOW)
.withReportType(IntIntent.IntReportType.DROPPED_PACKET)
.withReportType(IntIntent.IntReportType.CONGESTED_QUEUE)
.withTelemetryMode(IntIntent.TelemetryMode.POSTCARD);
// Watch IP Src == subnet 1
TrafficSelector expectedSelector = DefaultTrafficSelector.builder()
.matchIPSrc(IpPrefix.valueOf(WATCHED_SUBNET_1))
.build();
expectedIntIntents.add(baseIntentBuilder.withSelector(expectedSelector).build());
// Watch IP Dst == subnet 1
expectedSelector = DefaultTrafficSelector.builder()
.matchIPDst(IpPrefix.valueOf(WATCHED_SUBNET_1))
.build();
expectedIntIntents.add(baseIntentBuilder.withSelector(expectedSelector).build());
// Watch IP Src == subnet 2
expectedSelector = DefaultTrafficSelector.builder()
.matchIPSrc(IpPrefix.valueOf(WATCHED_SUBNET_2))
.build();
expectedIntIntents.add(baseIntentBuilder.withSelector(expectedSelector).build());
// Watch IP Dst == subnet 2
expectedSelector = DefaultTrafficSelector.builder()
.matchIPDst(IpPrefix.valueOf(WATCHED_SUBNET_2))
.build();
expectedIntIntents.add(baseIntentBuilder.withSelector(expectedSelector).build());
// Any packets
expectedSelector = DefaultTrafficSelector.emptySelector();
expectedIntIntents.add(baseIntentBuilder.withSelector(expectedSelector).build());
// The INT intent installation order can be random, so we need to collect
// all expected INT intents and check if actual intent exists.
assertAfter(50, 100, () -> assertEquals(5, intentMap.size()));
intentMap.entrySet().forEach(entry -> {
IntIntent actualIntIntent = entry.getValue().value();
assertTrue(expectedIntIntents.contains(actualIntIntent));
});
}
|
public static int[] range(int stop) {
return range(0, stop);
}
|
@Test
public void range(){
assertFalse(NumberUtil.isIn(new BigDecimal("1"),new BigDecimal("2"),new BigDecimal("12")));
assertTrue(NumberUtil.isIn(new BigDecimal("1"),new BigDecimal("1"),new BigDecimal("2")));
assertTrue(NumberUtil.isIn(new BigDecimal("1"),new BigDecimal("0"),new BigDecimal("2")));
assertFalse(NumberUtil.isIn(new BigDecimal("0.23"),new BigDecimal("0.12"),new BigDecimal("0.22")));
assertTrue(NumberUtil.isIn(new BigDecimal("-0.12"),new BigDecimal("-0.3"),new BigDecimal("0")));
}
|
@Nullable
public ArgType replaceTypeVariablesUsingMap(ArgType replaceType, Map<ArgType, ArgType> replaceMap) {
if (replaceMap.isEmpty()) {
return null;
}
if (replaceType.isGenericType()) {
return replaceMap.get(replaceType);
}
if (replaceType.isArray()) {
ArgType replaced = replaceTypeVariablesUsingMap(replaceType.getArrayElement(), replaceMap);
if (replaced == null) {
return null;
}
return ArgType.array(replaced);
}
ArgType wildcardType = replaceType.getWildcardType();
if (wildcardType != null && wildcardType.containsTypeVariable()) {
ArgType newWildcardType = replaceTypeVariablesUsingMap(wildcardType, replaceMap);
if (newWildcardType == null) {
return null;
}
return ArgType.wildcard(newWildcardType, replaceType.getWildcardBound());
}
if (replaceType.isGeneric()) {
ArgType outerType = replaceType.getOuterType();
if (outerType != null) {
ArgType replacedOuter = replaceTypeVariablesUsingMap(outerType, replaceMap);
if (replacedOuter == null) {
return null;
}
ArgType innerType = replaceType.getInnerType();
ArgType replacedInner = replaceTypeVariablesUsingMap(innerType, replaceMap);
return ArgType.outerGeneric(replacedOuter, replacedInner == null ? innerType : replacedInner);
}
List<ArgType> genericTypes = replaceType.getGenericTypes();
if (notEmpty(genericTypes)) {
List<ArgType> newTypes = Utils.collectionMap(genericTypes, t -> {
ArgType type = replaceTypeVariablesUsingMap(t, replaceMap);
return type == null ? t : type;
});
return ArgType.generic(replaceType, newTypes);
}
}
return null;
}
|
@Test
void replaceTypeVariablesUsingMap() {
ArgType typeVar = genericType("T");
ArgType listCls = object("java.util.List");
Map<ArgType, ArgType> typeMap = Collections.singletonMap(typeVar, STRING);
replaceTypeVar(typeVar, typeMap, STRING);
replaceTypeVar(generic(listCls, typeVar), typeMap, generic(listCls, STRING));
replaceTypeVar(array(typeVar), typeMap, array(STRING));
}
|
@Override
public Enumeration<URL> getResources(String name) throws IOException {
List<URL> resources = new ArrayList<>();
ClassLoadingStrategy loadingStrategy = getClassLoadingStrategy(name);
log.trace("Received request to load resources '{}'", name);
for (ClassLoadingStrategy.Source classLoadingSource : loadingStrategy.getSources()) {
switch (classLoadingSource) {
case APPLICATION:
if (getParent() != null) {
resources.addAll(Collections.list(getParent().getResources(name)));
}
break;
case PLUGIN:
resources.addAll(Collections.list(findResources(name)));
break;
case DEPENDENCIES:
resources.addAll(findResourcesFromDependencies(name));
break;
}
}
return Collections.enumeration(resources);
}
|
@Test
void parentLastGetResourcesExistsInBothParentAndPlugin() throws URISyntaxException, IOException {
Enumeration<URL> resources = parentLastPluginClassLoader.getResources("META-INF/file-in-both-parent-and-plugin");
assertNumberOfResourcesAndFirstLineOfFirstElement(2, "plugin", resources);
}
|
@CanIgnoreReturnValue
public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) {
List<@Nullable Object> expected =
(varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs);
return containsExactlyElementsIn(
expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable);
}
|
@Test
public void iterableContainsExactlyFailsWithSameToStringAndHeterogeneousListWithDuplicates() {
expectFailureWhenTestingThat(asList(1L, 2)).containsExactly(1, null, null, 2L, 2L);
assertFailureValue(
"missing (5)",
"1 (java.lang.Integer), null (null type) [2 copies], 2 (java.lang.Long) [2 copies]");
assertFailureValue("unexpected (2)", "1 (java.lang.Long), 2 (java.lang.Integer)");
}
|
@Override
public Processor<K, SubscriptionResponseWrapper<VO>, K, VR> get() {
return new ContextualProcessor<K, SubscriptionResponseWrapper<VO>, K, VR>() {
private String valueHashSerdePseudoTopic;
private Serializer<V> runtimeValueSerializer = constructionTimeValueSerializer;
private KTableValueGetter<K, V> valueGetter;
private Sensor droppedRecordsSensor;
@SuppressWarnings("unchecked")
@Override
public void init(final ProcessorContext<K, VR> context) {
super.init(context);
valueHashSerdePseudoTopic = valueHashSerdePseudoTopicSupplier.get();
valueGetter = valueGetterSupplier.get();
valueGetter.init(context);
if (runtimeValueSerializer == null) {
runtimeValueSerializer = (Serializer<V>) context.valueSerde().serializer();
}
final InternalProcessorContext<?, ?> internalProcessorContext = (InternalProcessorContext<?, ?>) context;
droppedRecordsSensor = TaskMetrics.droppedRecordsSensor(
Thread.currentThread().getName(),
internalProcessorContext.taskId().toString(),
internalProcessorContext.metrics()
);
}
@Override
public void process(final Record<K, SubscriptionResponseWrapper<VO>> record) {
if (record.value().getVersion() != SubscriptionResponseWrapper.CURRENT_VERSION) {
//Guard against modifications to SubscriptionResponseWrapper. Need to ensure that there is
//compatibility with previous versions to enable rolling upgrades. Must develop a strategy for
//upgrading from older SubscriptionWrapper versions to newer versions.
throw new UnsupportedVersionException("SubscriptionResponseWrapper is of an incompatible version.");
}
final ValueAndTimestamp<V> currentValueWithTimestamp = valueGetter.get(record.key());
final long[] currentHash = currentValueWithTimestamp == null ?
null :
Murmur3.hash128(runtimeValueSerializer.serialize(valueHashSerdePseudoTopic, currentValueWithTimestamp.value()));
final long[] messageHash = record.value().getOriginalValueHash();
//If this value doesn't match the current value from the original table, it is stale and should be discarded.
if (java.util.Arrays.equals(messageHash, currentHash)) {
final VR result;
if (record.value().getForeignValue() == null && (!leftJoin || currentValueWithTimestamp == null)) {
result = null; //Emit tombstone
} else {
result = joiner.apply(currentValueWithTimestamp == null ? null : currentValueWithTimestamp.value(), record.value().getForeignValue());
}
context().forward(record.withValue(result));
} else {
LOG.trace("Dropping FK-join response due to hash mismatch. Expected {}. Actual {}", messageHash, currentHash);
droppedRecordsSensor.record();
}
}
};
}
|
@Test
public void shouldEmitResultForLeftJoinWhenRightIsNull() {
final TestKTableValueGetterSupplier<String, String> valueGetterSupplier =
new TestKTableValueGetterSupplier<>();
final boolean leftJoin = true;
final ResponseJoinProcessorSupplier<String, String, String, String> processorSupplier =
new ResponseJoinProcessorSupplier<>(
valueGetterSupplier,
STRING_SERIALIZER,
() -> "value-hash-dummy-topic",
JOINER,
leftJoin
);
final Processor<String, SubscriptionResponseWrapper<String>, String, String> processor = processorSupplier.get();
final MockInternalNewProcessorContext<String, String> context = new MockInternalNewProcessorContext<>();
processor.init(context);
context.setRecordMetadata("topic", 0, 0);
valueGetterSupplier.put("lhs1", "lhsValue");
final long[] hash = Murmur3.hash128(STRING_SERIALIZER.serialize("topic-join-resolver", "lhsValue"));
processor.process(new Record<>("lhs1", new SubscriptionResponseWrapper<>(hash, null, 0), 0));
final List<MockProcessorContext.CapturedForward<? extends String, ? extends String>> forwarded = context.forwarded();
assertThat(forwarded.size(), is(1));
assertThat(forwarded.get(0).record(), is(new Record<>("lhs1", "(lhsValue,null)", 0)));
}
|
@Override
@CacheEvict(value = RedisKeyConstants.ROLE, key = "#id")
public void updateRoleDataScope(Long id, Integer dataScope, Set<Long> dataScopeDeptIds) {
// 校验是否可以更新
validateRoleForUpdate(id);
// 更新数据范围
RoleDO updateObject = new RoleDO();
updateObject.setId(id);
updateObject.setDataScope(dataScope);
updateObject.setDataScopeDeptIds(dataScopeDeptIds);
roleMapper.updateById(updateObject);
}
|
@Test
public void testUpdateRoleDataScope() {
// mock 数据
RoleDO roleDO = randomPojo(RoleDO.class, o -> o.setType(RoleTypeEnum.CUSTOM.getType()));
roleMapper.insert(roleDO);
// 准备参数
Long id = roleDO.getId();
Integer dataScope = randomEle(DataScopeEnum.values()).getScope();
Set<Long> dataScopeRoleIds = randomSet(Long.class);
// 调用
roleService.updateRoleDataScope(id, dataScope, dataScopeRoleIds);
// 断言
RoleDO dbRoleDO = roleMapper.selectById(id);
assertEquals(dataScope, dbRoleDO.getDataScope());
assertEquals(dataScopeRoleIds, dbRoleDO.getDataScopeDeptIds());
}
|
public MessageType union(MessageType toMerge) {
return union(toMerge, true);
}
|
@Test
public void testMergeSchemaWithColumnOrder() {
MessageType m1 = Types.buildMessage()
.addFields(
Types.requiredList()
.element(Types.optional(BINARY)
.columnOrder(ColumnOrder.undefined())
.named("a"))
.named("g"),
Types.optional(INT96).named("b"))
.named("root");
MessageType m2 = Types.buildMessage()
.addFields(
Types.requiredList()
.element(Types.optional(BINARY)
.columnOrder(ColumnOrder.undefined())
.named("a"))
.named("g"),
Types.optional(BINARY).named("c"))
.named("root");
MessageType m3 = Types.buildMessage()
.addFields(Types.requiredList()
.element(Types.optional(BINARY).named("a"))
.named("g"))
.named("root");
assertEquals(
Types.buildMessage()
.addFields(
Types.requiredList()
.element(Types.optional(BINARY).named("a"))
.named("g"),
Types.optional(INT96).named("b"),
Types.optional(BINARY).named("c"))
.named("root"),
m1.union(m2));
try {
m1.union(m3);
fail("An IncompatibleSchemaModificationException should have been thrown");
} catch (Exception e) {
assertTrue(
"The thrown exception should have been IncompatibleSchemaModificationException but was "
+ e.getClass(),
e instanceof IncompatibleSchemaModificationException);
assertEquals(
"can not merge type optional binary a with column order TYPE_DEFINED_ORDER into optional binary a with column order UNDEFINED",
e.getMessage());
}
}
|
public boolean isEnabled() {
return enabled;
}
|
@Test
public void testFalse() {
JaegerConfig jaegerConfig = (JaegerConfig) Config.getInstance().getJsonObjectConfig("jaeger-tracing-false", JaegerConfig.class);
Assert.assertFalse(jaegerConfig.isEnabled());
}
|
public ConfigData get(String path) {
if (allowedPaths == null) {
throw new IllegalStateException("The provider has not been configured yet.");
}
Map<String, String> data = new HashMap<>();
if (path == null || path.isEmpty()) {
return new ConfigData(data);
}
Path filePath = allowedPaths.parseUntrustedPath(path);
if (filePath == null) {
log.warn("The path {} is not allowed to be accessed", path);
return new ConfigData(data);
}
try (Reader reader = reader(filePath)) {
Properties properties = new Properties();
properties.load(reader);
Enumeration<Object> keys = properties.keys();
while (keys.hasMoreElements()) {
String key = keys.nextElement().toString();
String value = properties.getProperty(key);
if (value != null) {
data.put(key, value);
}
}
return new ConfigData(data);
} catch (IOException e) {
log.error("Could not read properties from file {}", path, e);
throw new ConfigException("Could not read properties from file " + path);
}
}
|
@Test
public void testEmptyPath() {
ConfigData configData = configProvider.get("", Collections.singleton("testKey"));
assertTrue(configData.data().isEmpty());
assertNull(configData.ttl());
}
|
public Properties getProperties() {
return properties;
}
|
@Test
public void testHibernateTypesOverrideProperties() {
assertEquals("ghi", Configuration.INSTANCE.getProperties().getProperty("hibernate.types.def"));
}
|
public static <T, PartitionColumnT> ReadWithPartitions<T, PartitionColumnT> readWithPartitions(
TypeDescriptor<PartitionColumnT> partitioningColumnType) {
return new AutoValue_JdbcIO_ReadWithPartitions.Builder<T, PartitionColumnT>()
.setPartitionColumnType(partitioningColumnType)
.setNumPartitions(DEFAULT_NUM_PARTITIONS)
.setFetchSize(DEFAULT_FETCH_SIZE)
.setUseBeamSchema(false)
.build();
}
|
@Test
public void testReadWithPartitionsBySubqery() {
PCollection<TestRow> rows =
pipeline.apply(
JdbcIO.<TestRow>readWithPartitions()
.withDataSourceConfiguration(DATA_SOURCE_CONFIGURATION)
.withRowMapper(new JdbcTestHelper.CreateTestRowOfNameAndId())
.withTable(String.format("(select * from %s) as subq", READ_TABLE_NAME))
.withNumPartitions(10)
.withPartitionColumn("id")
.withLowerBound(0L)
.withUpperBound(1000L));
PAssert.thatSingleton(rows.apply("Count All", Count.globally())).isEqualTo(1000L);
pipeline.run();
}
|
@Override
public synchronized ProducerStatsRecorderImpl getStats() {
if (stats == null) {
return null;
}
stats.reset();
producers.forEach(
(partition, producer) -> stats.updateCumulativeStats(producer.getTopic(), producer.getStats()));
return stats;
}
|
@Test
public void testGetStats() throws Exception {
String topicName = "test-stats";
ClientConfigurationData conf = new ClientConfigurationData();
conf.setServiceUrl("pulsar://localhost:6650");
conf.setStatsIntervalSeconds(100);
ThreadFactory threadFactory = new DefaultThreadFactory("client-test-stats", Thread.currentThread().isDaemon());
@Cleanup("shutdownGracefully")
EventLoopGroup eventLoopGroup = EventLoopUtil.newEventLoopGroup(conf.getNumIoThreads(), false, threadFactory);
@Cleanup
PulsarClientImpl clientImpl = new PulsarClientImpl(conf, eventLoopGroup);
ProducerConfigurationData producerConfData = new ProducerConfigurationData();
producerConfData.setMessageRoutingMode(MessageRoutingMode.CustomPartition);
producerConfData.setCustomMessageRouter(new CustomMessageRouter());
assertEquals(Long.parseLong("100"), clientImpl.getConfiguration().getStatsIntervalSeconds());
PartitionedProducerImpl impl = new PartitionedProducerImpl(
clientImpl, topicName, producerConfData,
1, null, null, null);
impl.getStats();
}
|
public <T> Mono<CosmosItemResponse<T>> readItem(
final String itemId, final PartitionKey partitionKey, final CosmosItemRequestOptions itemRequestOptions,
final Class<T> itemType) {
CosmosDbUtils.validateIfParameterIsNotEmpty(itemId, PARAM_ITEM_ID);
CosmosDbUtils.validateIfParameterIsNotEmpty(partitionKey, PARAM_PARTITION_KEY);
CosmosDbUtils.validateIfParameterIsNotEmpty(itemType, PARAM_ITEM_TYPE);
return applyToContainer(container -> container.readItem(itemId, partitionKey, itemRequestOptions, itemType));
}
|
@Test
void readItem() {
final CosmosDbContainerOperations operations
= new CosmosDbContainerOperations(Mono.just(mock(CosmosAsyncContainer.class)));
CosmosDbTestUtils.assertIllegalArgumentException(() -> operations.readItem(null, null, null, null));
CosmosDbTestUtils.assertIllegalArgumentException(() -> operations.readItem("test", null, null, null));
}
|
public List<AuditLogLine> getAuditLogLines() {
return Collections.unmodifiableList(auditLogLines);
}
|
@Test
public void noLogLinesAtTheStart() {
assertThat(scenarioResultMetadata.getAuditLogLines()).isEmpty();
}
|
@Override
public PutMessageResult putMessage(MessageExtBrokerInner msg) {
return waitForPutResult(asyncPutMessage(msg));
}
|
@Test
public void testPutMsgExceedsMaxLength() {
messageBody = new byte[4 * 1024 * 1024 + 1];
MessageExtBrokerInner msg = buildMessage();
PutMessageResult result = messageStore.putMessage(msg);
assertThat(result.getPutMessageStatus()).isEqualTo(PutMessageStatus.MESSAGE_ILLEGAL);
}
|
public boolean checkAccess(UserGroupInformation callerUGI,
ApplicationAccessType applicationAccessType,
TimelineEntity entity) throws YarnException, IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Verifying the access of "
+ (callerUGI == null ? null : callerUGI.getShortUserName())
+ " on the timeline entity "
+ new EntityIdentifier(entity.getEntityId(), entity.getEntityType()));
}
if (!adminAclsManager.areACLsEnabled()) {
return true;
}
// find domain owner and acls
AccessControlListExt aclExt = aclExts.get(entity.getDomainId());
if (aclExt == null) {
aclExt = loadDomainFromTimelineStore(entity.getDomainId());
}
if (aclExt == null) {
throw new YarnException("Domain information of the timeline entity "
+ new EntityIdentifier(entity.getEntityId(), entity.getEntityType())
+ " doesn't exist.");
}
String owner = aclExt.owner;
AccessControlList domainACL = aclExt.acls.get(applicationAccessType);
if (domainACL == null) {
LOG.debug("ACL not found for access-type {} for domain {} owned by {}."
+ " Using default [{}]", applicationAccessType,
entity.getDomainId(), owner, YarnConfiguration.DEFAULT_YARN_APP_ACL);
domainACL =
new AccessControlList(YarnConfiguration.DEFAULT_YARN_APP_ACL);
}
if (callerUGI != null
&& (adminAclsManager.isAdmin(callerUGI) ||
callerUGI.getShortUserName().equals(owner) ||
domainACL.isUserAllowed(callerUGI))) {
return true;
}
return false;
}
|
@Test
void testYarnACLsEnabledForDomain() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL, "admin");
TimelineACLsManager timelineACLsManager =
new TimelineACLsManager(conf);
TimelineDomain domain = new TimelineDomain();
domain.setOwner("owner");
assertTrue(
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("owner"), domain),
"Owner should be allowed to access");
assertFalse(
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("other"), domain),
"Other shouldn't be allowed to access");
assertTrue(
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("admin"), domain),
"Admin should be allowed to access");
}
|
public boolean hasLambdaFunctionCallArguments() {
return arguments.stream().anyMatch(
argument -> argument instanceof LambdaFunctionCall);
}
|
@Test
public void shouldReturnHasLambdaFunctionCall() {
final FunctionCall functionCall1 = new FunctionCall(SOME_NAME, SOME_ARGS);
final FunctionCall functionCall2 = new FunctionCall(SOME_NAME, ImmutableList.of(
new StringLiteral("jane"),
new LambdaFunctionCall(ImmutableList.of("x"), new StringLiteral("test"))));
assert !functionCall1.hasLambdaFunctionCallArguments();
assert functionCall2.hasLambdaFunctionCallArguments();
}
|
public AgentBootstrapperArgs parse(String... args) {
AgentBootstrapperArgs result = new AgentBootstrapperArgs();
try {
new JCommander(result).parse(args);
if (result.help) {
printUsageAndExit(0);
}
return result;
} catch (ParameterException e) {
stderr.println(e.getMessage());
printUsageAndExit(1);
}
return null;
}
|
@Test
public void serverURLMustBeAValidURL() {
try {
agentCLI.parse("-serverUrl", "foobar");
fail("Was expecting an exception!");
} catch (ExitException e) {
assertThat(e.getStatus()).isEqualTo(1);
assertThat(errorStream.toString()).contains("-serverUrl is not a valid url");
assertThat(errorStream.toString()).contains("Usage: java -jar agent-bootstrapper.jar");
}
}
|
String normalizeNotePath(String notePath) throws IOException {
if (StringUtils.isBlank(notePath)) {
notePath = "/Untitled Note";
}
if (!notePath.startsWith("/")) {
notePath = "/" + notePath;
}
notePath = notePath.replace("\r", " ").replace("\n", " ");
notePath = URLDecoder.decode(notePath, StandardCharsets.UTF_8.toString());
if (notePath.endsWith("/")) {
throw new IOException("Note name shouldn't end with '/'");
}
int pos = notePath.lastIndexOf("/");
if ((notePath.length() - pos) > 255) {
throw new IOException("Note name must be less than 255");
}
if (notePath.contains("..")) {
throw new IOException("Note name can not contain '..'");
}
return notePath;
}
|
@Test
void testNormalizeNotePath() throws IOException {
assertEquals("/Untitled Note", notebookService.normalizeNotePath(" "));
assertEquals("/Untitled Note", notebookService.normalizeNotePath(null));
assertEquals("/my_note", notebookService.normalizeNotePath("my_note"));
assertEquals("/my note", notebookService.normalizeNotePath("my\r\nnote"));
try {
String longNoteName = StringUtils.join(
IntStream.range(0, 256).boxed().collect(Collectors.toList()), "");
notebookService.normalizeNotePath(longNoteName);
fail("Should fail");
} catch (IOException e) {
assertEquals("Note name must be less than 255", e.getMessage());
}
try {
notebookService.normalizeNotePath("my..note");
fail("Should fail");
} catch (IOException e) {
assertEquals("Note name can not contain '..'", e.getMessage());
}
try {
notebookService.normalizeNotePath("%2e%2e/%2e%2e/tmp/test222");
fail("Should fail");
} catch (IOException e) {
assertEquals("Note name can not contain '..'", e.getMessage());
}
try {
notebookService.normalizeNotePath("./");
fail("Should fail");
} catch (IOException e) {
assertEquals("Note name shouldn't end with '/'", e.getMessage());
}
}
|
@Override
public void setValue(String value) throws IOException
{
checkValue(value);
// if there are export values/an Opt entry there is a different
// approach to setting the value
if (!getExportValues().isEmpty())
{
updateByOption(value);
}
else
{
updateByValue(value);
}
applyChange();
}
|
@Test
void setAbstractedRadioButtonInvalidValue() throws IOException
{
PDField radioButton = acrobatAcroForm.getField("RadioButtonGroup");
// Set a value which doesn't match the radio button list
assertThrows(IllegalArgumentException.class, () -> radioButton.setValue("InvalidValue"));
}
|
public static List<RemoteRepository> newCentralRepositorys(Proxy proxy,
ZeppelinConfiguration zConf) {
String mvnRepoEnv = System.getenv("ZEPPELIN_INTERPRETER_DEP_MVNREPO");
if (mvnRepoEnv == null) {
mvnRepoEnv = zConf.getString(ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_DEP_MVNREPO);
}
if (mvnRepoEnv == null) {
mvnRepoEnv = "https://repo1.maven.org/maven2/";
}
List<String> repoList = new ArrayList<>();
if (mvnRepoEnv.contains(",")) {
repoList.addAll(Arrays.asList(mvnRepoEnv.split(",+")));
} else {
repoList.add(mvnRepoEnv);
}
List<RemoteRepository> centralRepositorys = repoList.stream().map(repo -> {
RemoteRepository.Builder centralBuilder = new RemoteRepository.Builder("central", "default", repo);
if (proxy != null) {
centralBuilder.setProxy(proxy);
}
return centralBuilder.build();
}
).collect(Collectors.toList());
return centralRepositorys;
}
|
@Test
void getInterpreterMvnRepoPathTest() {
ZeppelinConfiguration zConf = ZeppelinConfiguration.load("zeppelin-site-test.xml");
List<RemoteRepository> remoteRepositories = Booter.newCentralRepositorys(null, zConf);
assertNotNull(remoteRepositories);
assertEquals(2, remoteRepositories.size());
assertEquals("https://repo1.maven.org/maven2/", remoteRepositories.get(0).getUrl());
assertEquals("https://repo2.maven.org/maven2/", remoteRepositories.get(1).getUrl());
}
|
public synchronized void registerProducer(final String group, final ClientChannelInfo clientChannelInfo) {
ClientChannelInfo clientChannelInfoFound = null;
ConcurrentHashMap<Channel, ClientChannelInfo> channelTable = this.groupChannelTable.get(group);
if (null == channelTable) {
channelTable = new ConcurrentHashMap<>();
this.groupChannelTable.put(group, channelTable);
}
clientChannelInfoFound = channelTable.get(clientChannelInfo.getChannel());
if (null == clientChannelInfoFound) {
channelTable.put(clientChannelInfo.getChannel(), clientChannelInfo);
clientChannelTable.put(clientChannelInfo.getClientId(), clientChannelInfo.getChannel());
log.info("new producer connected, group: {} channel: {}", group,
clientChannelInfo.toString());
}
if (clientChannelInfoFound != null) {
clientChannelInfoFound.setLastUpdateTimestamp(System.currentTimeMillis());
}
}
|
@Test
public void testRegisterProducer() throws Exception {
producerManager.registerProducer(group, clientInfo);
Map<Channel, ClientChannelInfo> channelMap = producerManager.getGroupChannelTable().get(group);
Channel channel1 = producerManager.findChannel("clientId");
assertThat(channelMap).isNotNull();
assertThat(channel1).isNotNull();
assertThat(channelMap.get(channel)).isEqualTo(clientInfo);
assertThat(channel1).isEqualTo(channel);
}
|
public void write(CruiseConfig configForEdit, OutputStream output, boolean skipPreprocessingAndValidation) throws Exception {
LOGGER.debug("[Serializing Config] Starting to write. Validation skipped? {}", skipPreprocessingAndValidation);
MagicalGoConfigXmlLoader loader = new MagicalGoConfigXmlLoader(configCache, registry);
if (!configForEdit.getOrigin().isLocal()) {
throw new GoConfigInvalidException(configForEdit, "Attempted to save merged configuration with partials");
}
if (!skipPreprocessingAndValidation) {
loader.preprocessAndValidate(configForEdit);
LOGGER.debug("[Serializing Config] Done with cruise config validators.");
}
Document document = createEmptyCruiseConfigDocument();
write(configForEdit, document.getRootElement(), configCache, registry);
LOGGER.debug("[Serializing Config] XSD and DOM validation.");
verifyXsdValid(document);
MagicalGoConfigXmlLoader.validateDom(document.getRootElement(), registry);
LOGGER.info("[Serializing Config] Generating config partial.");
XmlUtils.writeXml(document, output);
LOGGER.debug("[Serializing Config] Finished writing config partial.");
}
|
@Test
public void shouldNotDefineATrackingToolWithoutALink() {
CruiseConfig cruiseConfig = ConfigMigrator.load(ConfigFileFixture.ONE_PIPELINE);
PipelineConfig pipelineConfig = cruiseConfig.pipelineConfigByName(new CaseInsensitiveString("pipeline1"));
pipelineConfig.setTrackingTool(new TrackingTool("", "regex"));
try {
xmlWriter.write(cruiseConfig, output, false);
fail("should not save a trackingtool without a link");
} catch (Exception e) {
assertThat(e.getMessage(), containsString("Link should be populated"));
}
}
|
public int[] findMatchingLines(List<String> left, List<String> right) {
int[] index = new int[right.size()];
int dbLine = left.size();
int reportLine = right.size();
try {
PathNode node = new MyersDiff<String>().buildPath(left, right);
while (node.prev != null) {
PathNode prevNode = node.prev;
if (!node.isSnake()) {
// additions
reportLine -= (node.j - prevNode.j);
// removals
dbLine -= (node.i - prevNode.i);
} else {
// matches
for (int i = node.i; i > prevNode.i; i--) {
index[reportLine - 1] = dbLine;
reportLine--;
dbLine--;
}
}
node = prevNode;
}
} catch (DifferentiationFailedException e) {
LOG.error("Error finding matching lines", e);
return index;
}
return index;
}
|
@Test
public void shouldDetectNewLinesAtEndOfFile() {
List<String> database = new ArrayList<>();
database.add("line - 0");
database.add("line - 1");
database.add("line - 2");
List<String> report = new ArrayList<>();
report.add("line - 0");
report.add("line - 1");
report.add("line - 2");
report.add("line - new");
report.add("line - new");
int[] diff = new SourceLinesDiffFinder().findMatchingLines(database, report);
assertThat(diff).containsExactly(1, 2, 3, 0, 0);
}
|
@Override
public void exit(int count, Object... args) throws ErrorEntryFreeException {
trueExit(count, args);
}
|
@Test
public void testExitLastEntryWithDefaultContext() {
final Context defaultContext = getFakeDefaultContext();
ContextUtil.runOnContext(defaultContext, new Runnable() {
@Override
public void run() {
CtEntry entry = new CtEntry(new StringResourceWrapper("res", EntryType.IN),
null, ContextUtil.getContext());
assertSame(entry, defaultContext.getCurEntry());
assertSame(defaultContext, ContextUtil.getContext());
entry.exit();
assertNull(defaultContext.getCurEntry());
// Default context will be automatically exited.
assertNull(ContextUtil.getContext());
}
});
}
|
public long maxOffset(MessageQueue mq) throws MQClientException {
String brokerAddr = this.mQClientFactory.findBrokerAddressInPublish(this.mQClientFactory.getBrokerNameFromMessageQueue(mq));
if (null == brokerAddr) {
this.mQClientFactory.updateTopicRouteInfoFromNameServer(mq.getTopic());
brokerAddr = this.mQClientFactory.findBrokerAddressInPublish(this.mQClientFactory.getBrokerNameFromMessageQueue(mq));
}
if (brokerAddr != null) {
try {
return this.mQClientFactory.getMQClientAPIImpl().getMaxOffset(brokerAddr, mq, timeoutMillis);
} catch (Exception e) {
throw new MQClientException("Invoke Broker[" + brokerAddr + "] exception", e);
}
}
throw new MQClientException("The broker[" + mq.getBrokerName() + "] not exist", null);
}
|
@Test
public void assertMaxOffset() throws MQClientException {
assertEquals(0, mqAdminImpl.maxOffset(new MessageQueue()));
}
|
@Override
public Neighbor<double[], E>[] search(double[] q, int k) {
if (model == null) return super.search(q, k);
return search(q, k, 0.95, 100);
}
|
@Test
public void testKnn() {
System.out.println("knn");
int[] recall = new int[testx.length];
for (int i = 0; i < testx.length; i++) {
int k = 7;
Neighbor[] n1 = lsh.search(testx[i], k, 0.95, 50);
Neighbor[] n2 = naive.search(testx[i], k);
for (Neighbor m2 : n2) {
for (Neighbor m1 : n1) {
if (m1.index == m2.index) {
recall[i]++;
break;
}
}
}
}
System.out.format("q1 of recall is %d%n", MathEx.q1(recall));
System.out.format("median of recall is %d%n", MathEx.median(recall));
System.out.format("q3 of recall is %d%n", MathEx.q3(recall));
}
|
@VisibleForTesting
Optional<Xpp3Dom> getSpringBootRepackageConfiguration() {
Plugin springBootPlugin =
project.getPlugin("org.springframework.boot:spring-boot-maven-plugin");
if (springBootPlugin != null) {
for (PluginExecution execution : springBootPlugin.getExecutions()) {
if (execution.getGoals().contains("repackage")) {
Xpp3Dom configuration = (Xpp3Dom) execution.getConfiguration();
if (configuration == null) {
return Optional.of(new Xpp3Dom("configuration"));
}
boolean skip = Boolean.parseBoolean(getChildValue(configuration, "skip").orElse("false"));
return skip ? Optional.empty() : Optional.of(configuration);
}
}
}
return Optional.empty();
}
|
@Test
public void testGetSpringBootRepackageConfiguration_repackageGoal() {
when(mockMavenProject.getPlugin("org.springframework.boot:spring-boot-maven-plugin"))
.thenReturn(mockPlugin);
when(mockPlugin.getExecutions()).thenReturn(Arrays.asList(mockPluginExecution));
when(mockPluginExecution.getGoals()).thenReturn(Arrays.asList("goal", "repackage"));
when(mockPluginExecution.getConfiguration()).thenReturn(pluginConfiguration);
assertThat(mavenProjectProperties.getSpringBootRepackageConfiguration())
.isEqualTo(Optional.of(pluginConfiguration));
}
|
public static MySQLCommandPacket newInstance(final MySQLCommandPacketType commandPacketType, final MySQLPacketPayload payload,
final ConnectionSession connectionSession) {
switch (commandPacketType) {
case COM_QUIT:
return new MySQLComQuitPacket();
case COM_INIT_DB:
return new MySQLComInitDbPacket(payload);
case COM_FIELD_LIST:
return new MySQLComFieldListPacket(payload);
case COM_QUERY:
return new MySQLComQueryPacket(payload);
case COM_STMT_PREPARE:
return new MySQLComStmtPreparePacket(payload);
case COM_STMT_EXECUTE:
MySQLServerPreparedStatement serverPreparedStatement =
connectionSession.getServerPreparedStatementRegistry().getPreparedStatement(payload.getByteBuf().getIntLE(payload.getByteBuf().readerIndex()));
return new MySQLComStmtExecutePacket(payload, serverPreparedStatement.getSqlStatementContext().getSqlStatement().getParameterCount());
case COM_STMT_SEND_LONG_DATA:
return new MySQLComStmtSendLongDataPacket(payload);
case COM_STMT_RESET:
return new MySQLComStmtResetPacket(payload);
case COM_STMT_CLOSE:
return new MySQLComStmtClosePacket(payload);
case COM_SET_OPTION:
return new MySQLComSetOptionPacket(payload);
case COM_PING:
return new MySQLComPingPacket();
case COM_RESET_CONNECTION:
return new MySQLComResetConnectionPacket();
default:
return new MySQLUnsupportedCommandPacket(commandPacketType);
}
}
|
@Test
void assertNewInstanceWithComTableDumpPacket() {
assertThat(MySQLCommandPacketFactory.newInstance(MySQLCommandPacketType.COM_TABLE_DUMP, payload, connectionSession), instanceOf(MySQLUnsupportedCommandPacket.class));
}
|
@SafeVarargs
public static <T> HashSet<T> newHashSet(T... ts) {
return set(false, ts);
}
|
@Test
public void newHashSetTest() {
final Set<String> set = CollUtil.newHashSet((String[]) null);
assertNotNull(set);
}
|
@Override
public ResourceId resolve(String other, ResolveOptions resolveOptions) {
checkState(isDirectory(), "Expected this resource to be a directory, but was [%s]", toString());
if (resolveOptions == StandardResolveOptions.RESOLVE_DIRECTORY) {
if ("..".equals(other)) {
if ("/".equals(key)) {
return this;
}
int parentStopsAt = key.substring(0, key.length() - 1).lastIndexOf('/');
return fromComponents(scheme, bucket, key.substring(0, parentStopsAt + 1));
}
if ("".equals(other)) {
return this;
}
if (!other.endsWith("/")) {
other += "/";
}
if (S3_URI.matcher(other).matches()) {
return resolveFromUri(other);
}
return fromComponents(scheme, bucket, key + other);
}
if (resolveOptions == StandardResolveOptions.RESOLVE_FILE) {
checkArgument(
!other.endsWith("/"), "Cannot resolve a file with a directory path: [%s]", other);
checkArgument(!"..".equals(other), "Cannot resolve parent as file: [%s]", other);
if (S3_URI.matcher(other).matches()) {
return resolveFromUri(other);
}
return fromComponents(scheme, bucket, key + other);
}
throw new UnsupportedOperationException(
String.format("Unexpected StandardResolveOptions [%s]", resolveOptions));
}
|
@Test
public void testResolve() {
for (TestCase testCase : PATH_TEST_CASES) {
ResourceId resourceId = S3ResourceId.fromUri(testCase.baseUri);
ResourceId resolved = resourceId.resolve(testCase.relativePath, testCase.resolveOptions);
assertEquals(testCase.expectedResult, resolved.toString());
}
// Tests for common s3 paths.
assertEquals(
S3ResourceId.fromUri("s3://bucket/tmp/aa"),
S3ResourceId.fromUri("s3://bucket/tmp/").resolve("aa", RESOLVE_FILE));
assertEquals(
S3ResourceId.fromUri("s3://bucket/tmp/aa/bb/cc/"),
S3ResourceId.fromUri("s3://bucket/tmp/")
.resolve("aa", RESOLVE_DIRECTORY)
.resolve("bb", RESOLVE_DIRECTORY)
.resolve("cc", RESOLVE_DIRECTORY));
// Tests absolute path.
assertEquals(
S3ResourceId.fromUri("s3://bucket/tmp/aa"),
S3ResourceId.fromUri("s3://bucket/tmp/bb/").resolve("s3://bucket/tmp/aa", RESOLVE_FILE));
// Tests bucket with no ending '/'.
assertEquals(
S3ResourceId.fromUri("s3://my-bucket/tmp"),
S3ResourceId.fromUri("s3://my-bucket").resolve("tmp", RESOLVE_FILE));
// Tests path with unicode
assertEquals(
S3ResourceId.fromUri("s3://bucket/输出 目录/输出 文件01.txt"),
S3ResourceId.fromUri("s3://bucket/输出 目录/").resolve("输出 文件01.txt", RESOLVE_FILE));
}
|
@Override
public Batch toBatch() {
return new SparkBatch(
sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode());
}
|
@Test
public void testPartitionedIsNotNull() throws Exception {
createPartitionedTable(spark, tableName, "truncate(4, data)");
SparkScanBuilder builder = scanBuilder();
TruncateFunction.TruncateString function = new TruncateFunction.TruncateString();
UserDefinedScalarFunc udf = toUDF(function, expressions(intLit(4), fieldRef("data")));
Predicate predicate = new Predicate("IS_NOT_NULL", expressions(udf));
pushFilters(builder, predicate);
Batch scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(10);
// NOT IsNotNULL
builder = scanBuilder();
predicate = new Not(predicate);
pushFilters(builder, predicate);
scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(0);
}
|
public static String join(List<?> list, String delim) {
int len = list.size();
if (len == 0) return "";
final StringBuilder result = new StringBuilder(toString(list.get(0), delim));
for (int i = 1; i < len; i++) {
result.append(delim);
result.append(toString(list.get(i), delim));
}
return result.toString();
}
|
@Test
public void testOneElementJoin() throws IOException {
assertEquals("foo", KeyNode.join(Arrays.asList("foo"), ","));
}
|
@Override
public DnsServerAddressStream nameServerAddressStream(String hostname) {
for (;;) {
int i = hostname.indexOf('.', 1);
if (i < 0 || i == hostname.length() - 1) {
return defaultNameServerAddresses.stream();
}
DnsServerAddresses addresses = domainToNameServerStreamMap.get(hostname);
if (addresses != null) {
return addresses.stream();
}
hostname = hostname.substring(i + 1);
}
}
|
@Test
public void ignoreInvalidEntries(@TempDir Path tempDir) throws Exception {
File f = buildFile(tempDir, "domain netty.local\n" +
"nameserver nil\n" +
"nameserver 127.0.0.3\n");
UnixResolverDnsServerAddressStreamProvider p =
new UnixResolverDnsServerAddressStreamProvider(f, null);
DnsServerAddressStream stream = p.nameServerAddressStream("somehost");
assertEquals(1, stream.size());
assertHostNameEquals("127.0.0.3", stream.next());
}
|
@Override
public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) {
LOG.debug("Merging statistics: [aggregateColStats:{}, newColStats: {}]", aggregateColStats, newColStats);
DateColumnStatsDataInspector aggregateData = dateInspectorFromStats(aggregateColStats);
DateColumnStatsDataInspector newData = dateInspectorFromStats(newColStats);
Date lowValue = mergeLowValue(getLowValue(aggregateData), getLowValue(newData));
if (lowValue != null) {
aggregateData.setLowValue(lowValue);
}
Date highValue = mergeHighValue(getHighValue(aggregateData), getHighValue(newData));
if (highValue != null) {
aggregateData.setHighValue(highValue);
}
aggregateData.setNumNulls(mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls()));
NumDistinctValueEstimator oldNDVEst = aggregateData.getNdvEstimator();
NumDistinctValueEstimator newNDVEst = newData.getNdvEstimator();
List<NumDistinctValueEstimator> ndvEstimatorsList = Arrays.asList(oldNDVEst, newNDVEst);
aggregateData.setNumDVs(mergeNumDistinctValueEstimator(aggregateColStats.getColName(),
ndvEstimatorsList, aggregateData.getNumDVs(), newData.getNumDVs()));
aggregateData.setNdvEstimator(ndvEstimatorsList.get(0));
KllHistogramEstimator oldKllEst = aggregateData.getHistogramEstimator();
KllHistogramEstimator newKllEst = newData.getHistogramEstimator();
aggregateData.setHistogramEstimator(mergeHistogramEstimator(aggregateColStats.getColName(), oldKllEst, newKllEst));
aggregateColStats.getStatsData().setDateStats(aggregateData);
}
|
@Test
public void testMergeNullValues() {
ColumnStatisticsObj aggrObj = createColumnStatisticsObj(new ColStatsBuilder<>(Date.class)
.low(null)
.high(null)
.numNulls(1)
.numDVs(0)
.build());
merger.merge(aggrObj, aggrObj);
ColumnStatisticsData expectedColumnStatisticsData = new ColStatsBuilder<>(Date.class)
.low(null)
.high(null)
.numNulls(2)
.numDVs(0)
.build();
assertEquals(expectedColumnStatisticsData, aggrObj.getStatsData());
}
|
@Override
public KvMetadata resolveMetadata(
boolean isKey,
List<MappingField> resolvedFields,
Map<String, String> options,
InternalSerializationService serializationService
) {
Map<QueryPath, MappingField> fieldsByPath = extractFields(resolvedFields, isKey);
String typeName = getCompactTypeName(fieldsByPath, options, isKey);
List<TableField> fields = new ArrayList<>(fieldsByPath.size());
for (Entry<QueryPath, MappingField> entry : fieldsByPath.entrySet()) {
QueryPath path = entry.getKey();
QueryDataType type = entry.getValue().type();
String name = entry.getValue().name();
fields.add(new MapTableField(name, type, false, path));
}
maybeAddDefaultField(isKey, resolvedFields, fields, QueryDataType.OBJECT);
Schema schema = resolveSchema(typeName, getFields(fieldsByPath));
return new KvMetadata(
fields,
GenericQueryTargetDescriptor.DEFAULT,
new CompactUpsertTargetDescriptor(schema)
);
}
|
@Test
@Parameters({
"true, __key",
"false, this"
})
public void test_resolveMetadata(boolean key, String prefix) {
KvMetadata metadata = INSTANCE.resolveMetadata(
key,
List.of(
field("boolean", QueryDataType.BOOLEAN, prefix + ".boolean"),
field("byte", QueryDataType.TINYINT, prefix + ".byte"),
field("short", QueryDataType.SMALLINT, prefix + ".short"),
field("int", QueryDataType.INT, prefix + ".int"),
field("long", QueryDataType.BIGINT, prefix + ".long"),
field("float", QueryDataType.REAL, prefix + ".float"),
field("double", QueryDataType.DOUBLE, prefix + ".double"),
field("decimal", QueryDataType.DECIMAL, prefix + ".decimal"),
field("string", QueryDataType.VARCHAR, prefix + ".string"),
field("time", QueryDataType.TIME, prefix + ".time"),
field("date", QueryDataType.DATE, prefix + ".date"),
field("timestamp", QueryDataType.TIMESTAMP, prefix + ".timestamp"),
field("timestampTz", QueryDataType.TIMESTAMP_WITH_TZ_OFFSET_DATE_TIME, prefix + ".timestampTz")
),
Map.of(key ? OPTION_KEY_COMPACT_TYPE_NAME : OPTION_VALUE_COMPACT_TYPE_NAME, "test"),
createSerializationService()
);
assertThat(metadata.getFields()).containsExactly(
new MapTableField("boolean", QueryDataType.BOOLEAN, false, QueryPath.create(prefix + ".boolean")),
new MapTableField("byte", QueryDataType.TINYINT, false, QueryPath.create(prefix + ".byte")),
new MapTableField("short", QueryDataType.SMALLINT, false, QueryPath.create(prefix + ".short")),
new MapTableField("int", QueryDataType.INT, false, QueryPath.create(prefix + ".int")),
new MapTableField("long", QueryDataType.BIGINT, false, QueryPath.create(prefix + ".long")),
new MapTableField("float", QueryDataType.REAL, false, QueryPath.create(prefix + ".float")),
new MapTableField("double", QueryDataType.DOUBLE, false, QueryPath.create(prefix + ".double")),
new MapTableField("decimal", QueryDataType.DECIMAL, false, QueryPath.create(prefix + ".decimal")),
new MapTableField("string", QueryDataType.VARCHAR, false, QueryPath.create(prefix + ".string")),
new MapTableField("time", QueryDataType.TIME, false, QueryPath.create(prefix + ".time")),
new MapTableField("date", QueryDataType.DATE, false, QueryPath.create(prefix + ".date")),
new MapTableField("timestamp", QueryDataType.TIMESTAMP, false, QueryPath.create(prefix + ".timestamp")),
new MapTableField(
"timestampTz",
QueryDataType.TIMESTAMP_WITH_TZ_OFFSET_DATE_TIME,
false,
QueryPath.create(prefix + ".timestampTz")
),
new MapTableField(prefix, QueryDataType.OBJECT, true, QueryPath.create(prefix))
);
assertThat(metadata.getQueryTargetDescriptor()).isEqualTo(GenericQueryTargetDescriptor.DEFAULT);
SchemaWriter schemaWriter = new SchemaWriter("test");
schemaWriter.addField(new FieldDescriptor("boolean", FieldKind.NULLABLE_BOOLEAN));
schemaWriter.addField(new FieldDescriptor("byte", FieldKind.NULLABLE_INT8));
schemaWriter.addField(new FieldDescriptor("short", FieldKind.NULLABLE_INT16));
schemaWriter.addField(new FieldDescriptor("int", FieldKind.NULLABLE_INT32));
schemaWriter.addField(new FieldDescriptor("long", FieldKind.NULLABLE_INT64));
schemaWriter.addField(new FieldDescriptor("float", FieldKind.NULLABLE_FLOAT32));
schemaWriter.addField(new FieldDescriptor("double", FieldKind.NULLABLE_FLOAT64));
schemaWriter.addField(new FieldDescriptor("decimal", FieldKind.DECIMAL));
schemaWriter.addField(new FieldDescriptor("string", FieldKind.STRING));
schemaWriter.addField(new FieldDescriptor("time", FieldKind.TIME));
schemaWriter.addField(new FieldDescriptor("date", FieldKind.DATE));
schemaWriter.addField(new FieldDescriptor("timestamp", FieldKind.TIMESTAMP));
schemaWriter.addField(new FieldDescriptor("timestampTz", FieldKind.TIMESTAMP_WITH_TIMEZONE));
assertEquals(metadata.getUpsertTargetDescriptor(), new CompactUpsertTargetDescriptor(schemaWriter.build()));
}
|
public static String gensalt(int log_rounds, SecureRandom random) {
if (log_rounds < MIN_LOG_ROUNDS || log_rounds > MAX_LOG_ROUNDS) {
throw new IllegalArgumentException("Bad number of rounds");
}
StringBuilder rs = new StringBuilder();
byte rnd[] = new byte[BCRYPT_SALT_LEN];
random.nextBytes(rnd);
rs.append("$2a$");
if (log_rounds < 10) {
rs.append("0");
}
rs.append(log_rounds);
rs.append("$");
encode_base64(rnd, rnd.length, rs);
return rs.toString();
}
|
@PrepareForTest({BCrypt.class, SecureRandom.class})
@Test
public void testGensalt() throws Exception {
PowerMockito.whenNew(SecureRandom.class).withNoArguments()
.thenReturn(PowerMockito.mock(SecureRandom.class));
Assert.assertEquals("$2a$10$......................", BCrypt.gensalt());
Assert.assertEquals("$2a$09$......................", BCrypt.gensalt(9));
}
|
public static KeyFormat sanitizeKeyFormat(
final KeyFormat keyFormat,
final List<SqlType> newKeyColumnSqlTypes,
final boolean allowKeyFormatChangeToSupportNewKeySchema
) {
return sanitizeKeyFormatWrapping(
!allowKeyFormatChangeToSupportNewKeySchema ? keyFormat :
sanitizeKeyFormatForTypeCompatibility(
sanitizeKeyFormatForMultipleColumns(
keyFormat,
newKeyColumnSqlTypes.size()),
newKeyColumnSqlTypes
),
newKeyColumnSqlTypes.size() == 1
);
}
|
@Test
public void shouldRemoveUnapplicableKeyWrappingWhenSanitizingMulticolKey() {
// Given:
final KeyFormat format = KeyFormat.nonWindowed(
FormatInfo.of(JsonFormat.NAME),
SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES));
// When:
final KeyFormat sanitized = SerdeFeaturesFactory.sanitizeKeyFormat(format, MULTI_SQL_TYPES, true);
// Then:
assertThat(sanitized.getFormatInfo(), equalTo(FormatInfo.of(JsonFormat.NAME)));
assertThat(sanitized.getFeatures(), equalTo(SerdeFeatures.of()));
}
|
@GET
@Path("sql")
@ManualAuthorization
public String handleGetSql(@QueryParam("sql") String sqlQuery, @QueryParam("trace") String traceEnabled,
@QueryParam("queryOptions") String queryOptions, @Context HttpHeaders httpHeaders) {
try {
LOGGER.debug("Trace: {}, Running query: {}", traceEnabled, sqlQuery);
return executeSqlQuery(httpHeaders, sqlQuery, traceEnabled, queryOptions, "/sql");
} catch (ProcessingException pe) {
LOGGER.error("Caught exception while processing get request {}", pe.getMessage());
return constructQueryExceptionResponse(pe);
} catch (WebApplicationException wae) {
LOGGER.error("Caught exception while processing get request", wae);
throw wae;
} catch (Exception e) {
LOGGER.error("Caught exception while processing get request", e);
return constructQueryExceptionResponse(QueryException.getException(QueryException.INTERNAL_ERROR, e));
}
}
|
@Test
public void testV2QueryOnV1() {
String response =
_pinotQueryResource.handleGetSql("WITH tmp AS (SELECT * FROM a) SELECT * FROM tmp", null, null, null);
Assert.assertTrue(response.contains(String.valueOf(QueryException.SQL_PARSING_ERROR_CODE)));
Assert.assertTrue(response.contains("retry the query using the multi-stage query engine"));
}
|
public ArtifactResponse buildArtifactResponse(ArtifactResolveRequest artifactResolveRequest, String entityId, SignType signType) throws InstantiationException, ValidationException, ArtifactBuildException, BvdException {
final var artifactResponse = OpenSAMLUtils.buildSAMLObject(ArtifactResponse.class);
final var status = OpenSAMLUtils.buildSAMLObject(Status.class);
final var statusCode = OpenSAMLUtils.buildSAMLObject(StatusCode.class);
final var issuer = OpenSAMLUtils.buildSAMLObject(Issuer.class);
return ArtifactResponseBuilder
.newInstance(artifactResponse)
.addID()
.addIssueInstant()
.addInResponseTo(artifactResolveRequest.getArtifactResolve().getID())
.addStatus(StatusBuilder
.newInstance(status)
.addStatusCode(statusCode, StatusCode.SUCCESS)
.build())
.addIssuer(issuer, entityId)
.addMessage(buildResponse(artifactResolveRequest, entityId, signType))
.addSignature(signatureService, signType)
.build();
}
|
@Test
void parseArtifactResolvePolymorphIdentity() throws ValidationException, SamlParseException, ArtifactBuildException, BvdException, InstantiationException, JsonProcessingException, MetadataException {
when(bvdClientMock.retrieveRepresentationAffirmations(anyString())).thenReturn(getBvdResponse());
when(bvdMetadataServiceMock.generateMetadata()).thenReturn(getEntityDescriptor(BVD_ENTITY_ID));
ArtifactResolveRequest artifactResolveRequest = getArtifactResolveRequest("success", true,true, SAML_COMBICONNECT, EncryptionType.BSN, BVD_ENTITY_ID);
artifactResolveRequest.getAdAuthentication().setEncryptionIdType(EncryptionType.PSEUDONIEM.name());
artifactResolveRequest.getAdAuthentication().setPolymorphIdentity("identity");
artifactResolveRequest.getAdAuthentication().setPolymorphPseudonym("polymorphPseudonym");
ArtifactResponse artifactResponse = artifactResponseService.buildArtifactResponse(artifactResolveRequest, BVD_ENTITY_ID, BVD);
assertNotNull(artifactResponse);
}
|
@Override
public AppResponse process(Flow flow, AppSessionRequest request) {
if (appSession.getRegistrationId() == null) {
return new NokResponse();
}
Map<String, String> result = digidClient.getExistingApplication(appSession.getRegistrationId());
if (result.get(lowerUnderscore(STATUS)).equals("OK")) {
return new OkResponse();
} else if (result.get(lowerUnderscore(STATUS)).equals("PENDING")) {
// switch state to require replace action
appSession.setState(State.EXISTING_APPLICATION_FOUND.name());
return new StatusResponse("PENDING");
} else {
return new NokResponse();
}
}
|
@Test
void processExistingApplicationTest(){
when(digidClientMock.getExistingApplication(1337L)).thenReturn(Map.of(
lowerUnderscore(STATUS), "PENDING"
));
AppResponse appResponse = checkExistingApplication.process(flowMock, null);
assertEquals(State.EXISTING_APPLICATION_FOUND.name(), checkExistingApplication.getAppSession().getState());
assertTrue(appResponse instanceof StatusResponse);
assertEquals("PENDING", ((StatusResponse) appResponse).getStatus());
}
|
@Override
public ChannelFuture writePriority(ChannelHandlerContext ctx, int streamId,
int streamDependency, short weight, boolean exclusive, ChannelPromise promise) {
try {
verifyStreamId(streamId, STREAM_ID);
verifyStreamOrConnectionId(streamDependency, STREAM_DEPENDENCY);
verifyWeight(weight);
ByteBuf buf = ctx.alloc().buffer(PRIORITY_FRAME_LENGTH);
writeFrameHeaderInternal(buf, PRIORITY_ENTRY_LENGTH, PRIORITY, new Http2Flags(), streamId);
buf.writeInt(exclusive ? (int) (0x80000000L | streamDependency) : streamDependency);
// Adjust the weight so that it fits into a single byte on the wire.
buf.writeByte(weight - 1);
return ctx.write(buf, promise);
} catch (Throwable t) {
return promise.setFailure(t);
}
}
|
@Test
public void writePriorityDefaults() {
frameWriter.writePriority(
ctx, /* streamId= */ 1, /* dependencyId= */ 0, /* weight= */ (short) 16, /* exclusive= */ false, promise);
expectedOutbound = Unpooled.copiedBuffer(new byte[] {
(byte) 0x00, (byte) 0x00, (byte) 0x05, // payload length = 5
(byte) 0x02, // payload type = 2
(byte) 0x00, // flags = 0x00
(byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x01, // stream id = 1
(byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00, // dependency id = 0 | exclusive = 0 << 63
(byte) 0x0F, // weight = 15 (implicit +1)
});
assertEquals(expectedOutbound, outbound);
}
|
@ExecuteOn(TaskExecutors.IO)
@Delete(uri = "{namespace}/{id}")
@Operation(tags = {"Templates"}, summary = "Delete a template")
@ApiResponse(responseCode = "204", description = "On success")
public HttpResponse<Void> delete(
@Parameter(description = "The template namespace") @PathVariable String namespace,
@Parameter(description = "The template id") @PathVariable String id
) {
Optional<Template> template = templateRepository.findById(tenantService.resolveTenant(), namespace, id);
if (template.isPresent()) {
templateRepository.delete(template.get());
return HttpResponse.status(HttpStatus.NO_CONTENT);
} else {
return HttpResponse.status(HttpStatus.NOT_FOUND);
}
}
|
@Test
void deleteTemplate() {
Template template = createTemplate();
client.toBlocking().retrieve(POST("/api/v1/templates", template), Template.class);
Template createdTemplate = client.toBlocking().retrieve(HttpRequest.GET("/api/v1/templates/" + template.getNamespace() + "/" + template.getId()), Template.class);
assertThat(createdTemplate.getId(), is(template.getId()));
HttpResponse<Void> deleteResult = client.toBlocking().exchange(
DELETE("/api/v1/templates/" + template.getNamespace() + "/" + template.getId())
);
assertThat(deleteResult.getStatus(), is(NO_CONTENT));
HttpClientResponseException e = assertThrows(HttpClientResponseException.class, () -> {
client.toBlocking().retrieve(HttpRequest.GET("/api/v1/templates/" + template.getNamespace() + "/" + template.getId()));
});
assertThat(e.getStatus(), is(HttpStatus.NOT_FOUND));
}
|
@Override
public boolean supportsNonNullableColumns() {
return false;
}
|
@Test
void assertSupportsNonNullableColumns() {
assertFalse(metaData.supportsNonNullableColumns());
}
|
@Override
public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) {
int type = columnDef.getColumnMeta() >> 8;
int length = columnDef.getColumnMeta() & 0xff;
// unpack type & length, see https://bugs.mysql.com/bug.php?id=37426.
if (0x30 != (type & 0x30)) {
length += ((type & 0x30) ^ 0x30) << 4;
type |= 0x30;
}
switch (MySQLBinaryColumnType.valueOf(type)) {
case ENUM:
return readEnumValue(length, payload);
case SET:
return payload.getByteBuf().readByte();
case STRING:
return new MySQLBinaryString(payload.readStringFixByBytes(readActualLength(length, payload)));
default:
throw new UnsupportedSQLOperationException(MySQLBinaryColumnType.valueOf(type).toString());
}
}
|
@Test
void assertReadValueWithUnknownType() {
columnDef.setColumnMeta(MySQLBinaryColumnType.VAR_STRING.getValue() << 8);
assertThrows(UnsupportedSQLOperationException.class, () -> new MySQLStringBinlogProtocolValue().read(columnDef, payload));
}
|
@Override
public byte[] array() {
return EmptyArrays.EMPTY_BYTES;
}
|
@Test
public void testArray() {
EmptyByteBuf empty = new EmptyByteBuf(UnpooledByteBufAllocator.DEFAULT);
assertThat(empty.hasArray(), is(true));
assertThat(empty.array().length, is(0));
assertThat(empty.arrayOffset(), is(0));
}
|
@Override
protected String createRegistryCacheKey(URL url) {
String namespace = url.getParameter(CONFIG_NAMESPACE_KEY);
url = URL.valueOf(url.toServiceStringWithoutResolving());
if (StringUtils.isNotEmpty(namespace)) {
url = url.addParameter(CONFIG_NAMESPACE_KEY, namespace);
}
return url.toFullString();
}
|
@Test
void testCreateRegistryCacheKeyWithNamespace() {
URL url = URL.valueOf(
"dubbo://" + NetUtils.getLocalAddress().getHostAddress() + ":8080?namespace=test&nacos.check=false");
String registryCacheKey1 = nacosRegistryFactory.createRegistryCacheKey(url);
String registryCacheKey2 = nacosRegistryFactory.createRegistryCacheKey(url);
Assertions.assertEquals(registryCacheKey1, registryCacheKey2);
}
|
public PagesHashStrategyFactory compilePagesHashStrategyFactory(List<Type> types, List<Integer> joinChannels)
{
return compilePagesHashStrategyFactory(types, joinChannels, Optional.empty());
}
|
@Test(dataProvider = "hashEnabledValues")
public void testMultiChannel(boolean hashEnabled)
{
// compile a single channel hash strategy
JoinCompiler joinCompiler = new JoinCompiler(MetadataManager.createTestMetadataManager());
List<Type> types = ImmutableList.of(VARCHAR, VARCHAR, BIGINT, DOUBLE, BOOLEAN, VARCHAR);
List<Type> joinTypes = ImmutableList.of(VARCHAR, BIGINT, DOUBLE, BOOLEAN);
List<Type> outputTypes = ImmutableList.of(VARCHAR, BIGINT, DOUBLE, BOOLEAN, VARCHAR);
List<Integer> joinChannels = Ints.asList(1, 2, 3, 4);
List<Integer> outputChannels = Ints.asList(1, 2, 3, 4, 0);
// crate hash strategy with a single channel blocks -- make sure there is some overlap in values
List<Block> extraChannel = ImmutableList.of(
BlockAssertions.createStringSequenceBlock(10, 20),
BlockAssertions.createStringSequenceBlock(20, 30),
BlockAssertions.createStringSequenceBlock(15, 25));
List<Block> varcharChannel = ImmutableList.of(
BlockAssertions.createStringSequenceBlock(10, 20),
BlockAssertions.createStringSequenceBlock(20, 30),
BlockAssertions.createStringSequenceBlock(15, 25));
List<Block> longChannel = ImmutableList.of(
BlockAssertions.createLongSequenceBlock(10, 20),
BlockAssertions.createLongSequenceBlock(20, 30),
BlockAssertions.createLongSequenceBlock(15, 25));
List<Block> doubleChannel = ImmutableList.of(
BlockAssertions.createDoubleSequenceBlock(10, 20),
BlockAssertions.createDoubleSequenceBlock(20, 30),
BlockAssertions.createDoubleSequenceBlock(15, 25));
List<Block> booleanChannel = ImmutableList.of(
BlockAssertions.createBooleanSequenceBlock(10, 20),
BlockAssertions.createBooleanSequenceBlock(20, 30),
BlockAssertions.createBooleanSequenceBlock(15, 25));
List<Block> extraUnusedChannel = ImmutableList.of(
BlockAssertions.createBooleanSequenceBlock(10, 20),
BlockAssertions.createBooleanSequenceBlock(20, 30),
BlockAssertions.createBooleanSequenceBlock(15, 25));
OptionalInt hashChannel = OptionalInt.empty();
ImmutableList<List<Block>> channels = ImmutableList.of(extraChannel, varcharChannel, longChannel, doubleChannel, booleanChannel, extraUnusedChannel);
List<Block> precomputedHash = ImmutableList.of();
if (hashEnabled) {
ImmutableList.Builder<Block> hashChannelBuilder = ImmutableList.builder();
for (int i = 0; i < 3; i++) {
hashChannelBuilder.add(TypeUtils.getHashBlock(joinTypes, varcharChannel.get(i), longChannel.get(i), doubleChannel.get(i), booleanChannel.get(i)));
}
hashChannel = OptionalInt.of(6);
precomputedHash = hashChannelBuilder.build();
channels = ImmutableList.of(extraChannel, varcharChannel, longChannel, doubleChannel, booleanChannel, extraUnusedChannel, precomputedHash);
types = ImmutableList.of(VARCHAR, VARCHAR, BIGINT, DOUBLE, BOOLEAN, VARCHAR, BIGINT);
outputTypes = ImmutableList.of(VARCHAR, BIGINT, DOUBLE, BOOLEAN, VARCHAR, BIGINT);
outputChannels = Ints.asList(1, 2, 3, 4, 0, 6);
}
PagesHashStrategyFactory pagesHashStrategyFactory = joinCompiler.compilePagesHashStrategyFactory(types, joinChannels, Optional.of(outputChannels));
PagesHashStrategy hashStrategy = pagesHashStrategyFactory.createPagesHashStrategy(channels, hashChannel);
// todo add tests for filter function
PagesHashStrategy expectedHashStrategy = new SimplePagesHashStrategy(types, outputChannels, channels, joinChannels, hashChannel, Optional.empty(), FUNCTION_MANAGER);
// verify channel count
assertEquals(hashStrategy.getChannelCount(), outputChannels.size());
// verify size
int instanceSize = ClassLayout.parseClass(hashStrategy.getClass()).instanceSize();
long sizeInBytes = instanceSize + channels.stream()
.flatMap(List::stream)
.mapToLong(Block::getRetainedSizeInBytes)
.sum();
assertEquals(hashStrategy.getSizeInBytes(), sizeInBytes);
// verify hashStrategy is consistent with equals and hash code from block
for (int leftBlockIndex = 0; leftBlockIndex < varcharChannel.size(); leftBlockIndex++) {
PageBuilder pageBuilder = new PageBuilder(outputTypes);
Block[] leftBlocks = new Block[4];
leftBlocks[0] = varcharChannel.get(leftBlockIndex);
leftBlocks[1] = longChannel.get(leftBlockIndex);
leftBlocks[2] = doubleChannel.get(leftBlockIndex);
leftBlocks[3] = booleanChannel.get(leftBlockIndex);
int leftPositionCount = varcharChannel.get(leftBlockIndex).getPositionCount();
for (int leftBlockPosition = 0; leftBlockPosition < leftPositionCount; leftBlockPosition++) {
// hash code of position must match block hash
assertEquals(
hashStrategy.hashPosition(leftBlockIndex, leftBlockPosition),
expectedHashStrategy.hashPosition(leftBlockIndex, leftBlockPosition));
// position must be equal to itself
assertTrue(hashStrategy.positionEqualsPositionIgnoreNulls(leftBlockIndex, leftBlockPosition, leftBlockIndex, leftBlockPosition));
assertTrue(hashStrategy.positionEqualsPosition(leftBlockIndex, leftBlockPosition, leftBlockIndex, leftBlockPosition));
// check equality of every position against every other position in the block
for (int rightBlockIndex = 0; rightBlockIndex < varcharChannel.size(); rightBlockIndex++) {
Block rightBlock = varcharChannel.get(rightBlockIndex);
for (int rightBlockPosition = 0; rightBlockPosition < rightBlock.getPositionCount(); rightBlockPosition++) {
assertEquals(
hashStrategy.positionEqualsPositionIgnoreNulls(leftBlockIndex, leftBlockPosition, rightBlockIndex, rightBlockPosition),
expectedHashStrategy.positionEqualsPositionIgnoreNulls(leftBlockIndex, leftBlockPosition, rightBlockIndex, rightBlockPosition));
assertEquals(
hashStrategy.positionEqualsPosition(leftBlockIndex, leftBlockPosition, rightBlockIndex, rightBlockPosition),
expectedHashStrategy.positionEqualsPosition(leftBlockIndex, leftBlockPosition, rightBlockIndex, rightBlockPosition));
}
}
// check equality of every position against every other position in the block cursor
for (int rightBlockIndex = 0; rightBlockIndex < varcharChannel.size(); rightBlockIndex++) {
Block[] rightBlocks = new Block[4];
rightBlocks[0] = varcharChannel.get(rightBlockIndex);
rightBlocks[1] = longChannel.get(rightBlockIndex);
rightBlocks[2] = doubleChannel.get(rightBlockIndex);
rightBlocks[3] = booleanChannel.get(rightBlockIndex);
int rightPositionCount = varcharChannel.get(rightBlockIndex).getPositionCount();
for (int rightPosition = 0; rightPosition < rightPositionCount; rightPosition++) {
boolean expected = expectedHashStrategy.positionEqualsRow(leftBlockIndex, leftBlockPosition, rightPosition, new Page(rightBlocks));
assertEquals(hashStrategy.positionEqualsRow(leftBlockIndex, leftBlockPosition, rightPosition, new Page(rightBlocks)), expected);
assertEquals(hashStrategy.rowEqualsRow(leftBlockPosition, new Page(leftBlocks), rightPosition, new Page(rightBlocks)), expected);
assertEquals(hashStrategy.positionEqualsRowIgnoreNulls(leftBlockIndex, leftBlockPosition, rightPosition, new Page(rightBlocks)), expected);
}
}
// write position to output block
pageBuilder.declarePosition();
hashStrategy.appendTo(leftBlockIndex, leftBlockPosition, pageBuilder, 0);
}
// verify output block matches
Page page = pageBuilder.build();
if (hashEnabled) {
assertPageEquals(outputTypes, page, new Page(
varcharChannel.get(leftBlockIndex),
longChannel.get(leftBlockIndex),
doubleChannel.get(leftBlockIndex),
booleanChannel.get(leftBlockIndex),
extraChannel.get(leftBlockIndex),
precomputedHash.get(leftBlockIndex)));
}
else {
assertPageEquals(outputTypes, page, new Page(
varcharChannel.get(leftBlockIndex),
longChannel.get(leftBlockIndex),
doubleChannel.get(leftBlockIndex),
booleanChannel.get(leftBlockIndex),
extraChannel.get(leftBlockIndex)));
}
}
}
|
public Collection<V> remove(K key)
{
List<V> removed = data.remove(key);
if (removed != null) {
for (V val : removed) {
inverse.remove(val);
}
}
return removed;
}
|
@Test
public void testRemoveWrongValue()
{
boolean rc = map.remove(41);
assertThat(rc, is(false));
assertSize(1);
}
|
@Override
public void handleRequest(RestRequest request, RequestContext requestContext, Callback<RestResponse> callback)
{
//This code path cannot accept content types or accept types that contain
//multipart/related. This is because these types of requests will usually have very large payloads and therefore
//would degrade server performance since RestRequest reads everything into memory.
if (!isMultipart(request, requestContext, callback))
{
_restRestLiServer.handleRequest(request, requestContext, callback);
}
}
|
@Test(dataProvider = "restOrStream")
public void testDebugRequestHandlers(final RestOrStream restOrStream) throws URISyntaxException
{
//Without a resource
final Callback<RestResponse> noResourceRestResponseCallback = new Callback<RestResponse>()
{
@Override
public void onSuccess(RestResponse restResponse)
{
assertEquals(restResponse.getStatus(), 200);
String responseString = restResponse.getEntity().asString(Charset.defaultCharset());
Assert.assertEquals(responseString, DEBUG_HANDLER_RESPONSE_A);
}
@Override
public void onError(Throwable e)
{
fail();
}
};
if (restOrStream == RestOrStream.REST)
{
RestRequest request = new RestRequestBuilder(new URI("/statuses/1/__debug/a/s")).build();
_server.handleRequest(request, new RequestContext(), noResourceRestResponseCallback);
}
else
{
StreamRequest request = new StreamRequestBuilder(new URI("/statuses/1/__debug/a/s")).build(EntityStreams.emptyStream());
Callback<StreamResponse> callback = new Callback<StreamResponse>()
{
@Override
public void onSuccess(StreamResponse streamResponse)
{
Messages.toRestResponse(streamResponse, new Callback<RestResponse>()
{
@Override
public void onError(Throwable e)
{
Assert.fail();
}
@Override
public void onSuccess(RestResponse result)
{
noResourceRestResponseCallback.onSuccess(result);
}
});
}
@Override
public void onError(Throwable e)
{
fail();
}
};
_server.handleRequest(request, new RequestContext(), callback);
}
//With a resource this time
final StatusCollectionResource statusResource = getMockResource(StatusCollectionResource.class);
EasyMock.expect(statusResource.get(eq(1L))).andReturn(buildStatusRecord()).once();
replay(statusResource);
final Callback<RestResponse> resourceRestResponseCallback = new Callback<RestResponse>()
{
@Override
public void onSuccess(RestResponse restResponse)
{
assertEquals(restResponse.getStatus(), 200);
String responseString = restResponse.getEntity().asString(Charset.defaultCharset());
Assert.assertEquals(responseString, DEBUG_HANDLER_RESPONSE_B);
EasyMock.verify(statusResource);
EasyMock.reset(statusResource);
}
@Override
public void onError(Throwable e)
{
fail();
}
};
if (restOrStream == RestOrStream.REST)
{
RestRequest request = new RestRequestBuilder(new URI("/statuses/1/__debug/b")).build();
_server.handleRequest(request, new RequestContext(), resourceRestResponseCallback);
}
else
{
StreamRequest request = new StreamRequestBuilder(new URI("/statuses/1/__debug/b")).build(EntityStreams.emptyStream());
Callback<StreamResponse> callback = new Callback<StreamResponse>()
{
@Override
public void onSuccess(StreamResponse streamResponse)
{
Messages.toRestResponse(streamResponse, new Callback<RestResponse>()
{
@Override
public void onError(Throwable e)
{
Assert.fail();
}
@Override
public void onSuccess(RestResponse result)
{
resourceRestResponseCallback.onSuccess(result);
}
});
}
@Override
public void onError(Throwable e)
{
fail();
}
};
_server.handleRequest(request, new RequestContext(), callback);
}
}
|
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) {
CharStream input = CharStreams.fromString(source);
FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input );
CommonTokenStream tokens = new CommonTokenStream( lexer );
FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens );
ParserHelper parserHelper = new ParserHelper(eventsManager);
additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol()));
parser.setHelper(parserHelper);
parser.setErrorHandler( new FEELErrorHandler() );
parser.removeErrorListeners(); // removes the error listener that prints to the console
parser.addErrorListener( new FEELParserErrorListener( eventsManager ) );
// pre-loads the parser with symbols
defineVariables( inputVariableTypes, inputVariables, parser );
if (typeRegistry != null) {
parserHelper.setTypeRegistry(typeRegistry);
}
return parser;
}
|
@Test
void atLiteralTime() {
String inputExpression = "@\"23:59:00\"";
BaseNode bool = parse(inputExpression);
assertThat(bool).isInstanceOf(AtLiteralNode.class);
assertThat(bool.getResultType()).isEqualTo(BuiltInType.TIME);
assertLocation(inputExpression, bool);
}
|
public State state() {
// we do not need to use the stat lock since the variable is volatile
return state;
}
|
@Test
public void shouldDieOnInvalidOffsetExceptionDuringStartup() throws Exception {
final StateStore globalStore = builder.globalStateStores().get(GLOBAL_STORE_NAME);
initializeConsumer();
mockConsumer.setPollException(new InvalidOffsetException("Try Again!") {
@Override
public Set<TopicPartition> partitions() {
return Collections.singleton(topicPartition);
}
});
startAndSwallowError();
TestUtils.waitForCondition(
() -> globalStreamThread.state() == DEAD,
10 * 1000,
"GlobalStreamThread should have died."
);
globalStreamThread.join();
assertThat(globalStore.isOpen(), is(false));
assertFalse(new File(baseDirectoryName + File.separator + "testAppId" + File.separator + "global").exists());
}
|
static <ID, T> TaskExecutors<ID, T> batchExecutors(final String name,
int workerCount,
final TaskProcessor<T> processor,
final AcceptorExecutor<ID, T> acceptorExecutor) {
final AtomicBoolean isShutdown = new AtomicBoolean();
final TaskExecutorMetrics metrics = new TaskExecutorMetrics(name);
registeredMonitors.put(name, metrics);
return new TaskExecutors<>(idx -> new BatchWorkerRunnable<>("TaskBatchingWorker-" + name + '-' + idx, isShutdown, metrics, processor, acceptorExecutor), workerCount, isShutdown);
}
|
@Test
public void testBatchProcessingWithPermanentError() throws Exception {
taskExecutors = TaskExecutors.batchExecutors("TEST", 1, processor, acceptorExecutor);
List<TaskHolder<Integer, ProcessingResult>> taskHolderBatch = asList(permanentErrorTaskHolder(1), permanentErrorTaskHolder(2));
taskBatchQueue.add(taskHolderBatch);
// Verify that transient task is re-scheduled
processor.expectPermanentErrors(2);
verify(acceptorExecutor, never()).reprocess(taskHolderBatch, ProcessingResult.TransientError);
}
|
@Override
public FieldSet getForwardingTargetFields(int input, int sourceField) {
if (input != 0) {
throw new IndexOutOfBoundsException();
}
return this.fieldMapping.containsKey(sourceField)
? this.fieldMapping.get(sourceField)
: FieldSet.EMPTY_SET;
}
|
@Test
void testAllForwardedSingleInputSemPropsInvalidIndex2() {
assertThatThrownBy(
() -> {
SingleInputSemanticProperties sp =
new SingleInputSemanticProperties
.AllFieldsForwardedProperties();
sp.getForwardingTargetFields(1, 0);
})
.isInstanceOf(IndexOutOfBoundsException.class);
}
|
@Override
public PageResult<ProductSpuDO> getSpuPage(ProductSpuPageReqVO pageReqVO) {
return productSpuMapper.selectPage(pageReqVO);
}
|
@Test
void testGetSpuPage() {
// 准备参数
ProductSpuDO createReqVO = randomPojo(ProductSpuDO.class,o->{
o.setCategoryId(generateId());
o.setBrandId(generateId());
o.setDeliveryTemplateId(generateId());
o.setSort(RandomUtil.randomInt(1,100)); // 限制排序范围
o.setGiveIntegral(generaInt()); // 限制范围为正整数
o.setVirtualSalesCount(generaInt()); // 限制范围为正整数
o.setPrice(generaInt()); // 限制范围为正整数
o.setMarketPrice(generaInt()); // 限制范围为正整数
o.setCostPrice(generaInt()); // 限制范围为正整数
o.setStock(generaInt()); // 限制范围为正整数
o.setGiveIntegral(generaInt()); // 限制范围为正整数
o.setSalesCount(generaInt()); // 限制范围为正整数
o.setBrowseCount(generaInt()); // 限制范围为正整数
});
// 准备参数
productSpuMapper.insert(createReqVO);
// 测试 status 不匹配
productSpuMapper.insert(cloneIgnoreId(createReqVO, o -> o.setStatus(ProductSpuStatusEnum.DISABLE.getStatus())));
productSpuMapper.insert(cloneIgnoreId(createReqVO, o -> o.setStatus(ProductSpuStatusEnum.RECYCLE.getStatus())));
// 测试 SpecType 不匹配
productSpuMapper.insert(cloneIgnoreId(createReqVO, o -> o.setSpecType(true)));
// 测试 BrandId 不匹配
productSpuMapper.insert(cloneIgnoreId(createReqVO, o -> o.setBrandId(generateId())));
// 测试 CategoryId 不匹配
productSpuMapper.insert(cloneIgnoreId(createReqVO, o -> o.setCategoryId(generateId())));
// 调用
ProductSpuPageReqVO productSpuPageReqVO = new ProductSpuPageReqVO();
// 查询条件 按需打开
//productSpuPageReqVO.setTabType(ProductSpuPageReqVO.ALERT_STOCK);
//productSpuPageReqVO.setTabType(ProductSpuPageReqVO.RECYCLE_BIN);
//productSpuPageReqVO.setTabType(ProductSpuPageReqVO.FOR_SALE);
//productSpuPageReqVO.setTabType(ProductSpuPageReqVO.IN_WAREHOUSE);
//productSpuPageReqVO.setTabType(ProductSpuPageReqVO.SOLD_OUT);
//productSpuPageReqVO.setName(createReqVO.getName());
//productSpuPageReqVO.setCategoryId(createReqVO.getCategoryId());
PageResult<ProductSpuDO> spuPage = productSpuService.getSpuPage(productSpuPageReqVO);
assertEquals(1, spuPage.getTotal());
}
|
public boolean denied(String name, MediaType mediaType) {
String suffix = (name.contains(".") ? name.substring(name.lastIndexOf(".") + 1) : "").toLowerCase(Locale.ROOT);
boolean defaultDeny = false;
if (CollectionUtils.isNotEmpty(denyFiles)) {
if (denyFiles.contains(suffix)) {
return true;
}
defaultDeny = false;
}
if (CollectionUtils.isNotEmpty(allowFiles)) {
if (allowFiles.contains(suffix)) {
return false;
}
defaultDeny = true;
}
if (CollectionUtils.isNotEmpty(denyMediaType)) {
if (denyMediaType.contains(mediaType.toString())) {
return true;
}
defaultDeny = false;
}
if (CollectionUtils.isNotEmpty(allowMediaType)) {
if (allowMediaType.contains(mediaType.toString())) {
return false;
}
defaultDeny = true;
}
return defaultDeny;
}
|
@Test
public void testNoSet(){
FileUploadProperties uploadProperties=new FileUploadProperties();
assertFalse(uploadProperties.denied("test.xls", MediaType.ALL));
assertFalse(uploadProperties.denied("test.exe", MediaType.ALL));
}
|
public Class<?> getSerializedClass() {
return serializedClass;
}
|
@Test
void testEmptyConstructor() {
NacosSerializationException exception = new NacosSerializationException();
assertEquals(Constants.Exception.SERIALIZE_ERROR_CODE, exception.getErrCode());
assertNull(exception.getMessage());
assertNull(exception.getSerializedClass());
}
|
public boolean setCodeVariants(DefaultIssue issue, Set<String> currentCodeVariants, IssueChangeContext context) {
Set<String> newCodeVariants = getNewCodeVariants(issue);
if (!currentCodeVariants.equals(newCodeVariants)) {
issue.setFieldChange(context, CODE_VARIANTS,
currentCodeVariants.isEmpty() ? null : CHANGELOG_LIST_JOINER.join(currentCodeVariants),
newCodeVariants.isEmpty() ? null : CHANGELOG_LIST_JOINER.join(newCodeVariants));
issue.setCodeVariants(newCodeVariants);
issue.setUpdateDate(context.date());
issue.setChanged(true);
issue.setSendNotifications(true);
return true;
}
return false;
}
|
@Test
void setCodeVariants_whenCodeVariantsUnchanged_shouldNotBeUpdated() {
Set<String> currentCodeVariants = new HashSet<>(Arrays.asList("linux", "windows"));
Set<String> newCodeVariants = new HashSet<>(Arrays.asList("windows", "linux"));
issue.setCodeVariants(newCodeVariants);
boolean updated = underTest.setCodeVariants(issue, currentCodeVariants, context);
assertThat(updated).isFalse();
assertThat(issue.currentChange()).isNull();
}
|
public static void clean(
Object func, ExecutionConfig.ClosureCleanerLevel level, boolean checkSerializable) {
clean(func, level, checkSerializable, Collections.newSetFromMap(new IdentityHashMap<>()));
}
|
@Test
void testComplexInnerClassClean() throws Exception {
MapFunction<Integer, Integer> complexMap =
new InnerComplexMap((MapFunction<Integer, Integer>) value -> value + 1);
ClosureCleaner.clean(complexMap, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
int result = complexMap.map(3);
assertThat(result).isEqualTo(4);
}
|
void start(Iterable<ShardCheckpoint> checkpoints) {
LOG.info(
"Pool {} - starting for stream {} consumer {}. Checkpoints = {}",
poolId,
read.getStreamName(),
consumerArn,
checkpoints);
for (ShardCheckpoint shardCheckpoint : checkpoints) {
checkState(
!state.containsKey(shardCheckpoint.getShardId()),
"Duplicate shard id %s",
shardCheckpoint.getShardId());
ShardState shardState =
new ShardState(
initShardSubscriber(shardCheckpoint), shardCheckpoint, watermarkPolicyFactory);
state.put(shardCheckpoint.getShardId(), shardState);
}
}
|
@Test
public void poolFailsWhenConsumerDoesNotExist() throws Exception {
kinesis = new EFOStubbedKinesisAsyncClient(10);
kinesis
.stubSubscribeToShard("shard-000", eventWithRecords(3))
.failWith(
new CompletionException(
"Err ...",
ResourceNotFoundException.builder()
.cause(null)
.awsErrorDetails(
AwsErrorDetails.builder()
.serviceName("Kinesis")
.errorCode("ResourceNotFoundException")
.errorMessage("Consumer consumer-01 not found.")
.build())
.build()));
kinesis.stubSubscribeToShard("shard-000", eventWithRecords(3, 7));
kinesis.stubSubscribeToShard("shard-000", eventsWithoutRecords(3, 10));
kinesis.stubSubscribeToShard("shard-001", eventWithRecords(3));
kinesis.stubSubscribeToShard("shard-001", eventsWithoutRecords(3, 8));
KinesisReaderCheckpoint initialCheckpoint =
initialLatestCheckpoint(ImmutableList.of("shard-000", "shard-001"));
pool = new EFOShardSubscribersPool(readSpec, consumerArn, kinesis);
pool.start(initialCheckpoint);
Throwable exception = assertThrows(IOException.class, () -> waitForRecords(pool, 10));
assertThat(exception.getMessage())
.isEqualTo("java.util.concurrent.CompletionException: Err ...");
Throwable cause = exception.getCause().getCause();
assertThat(cause).isInstanceOf(ResourceNotFoundException.class);
assertThat(cause.getMessage())
.isEqualTo(
"Consumer consumer-01 not found. (Service: Kinesis, Status Code: 0, Request ID: null)");
}
|
void addGetModelsMethod(StringBuilder sb) {
sb.append(
" @Override\n" +
" public java.util.List<Model> getModels() {\n" +
" return java.util.Arrays.asList(" );
String collected = modelsByKBase.values().stream().flatMap( List::stream ).distinct()
.map(element -> "new " + element + "()")
.collect(Collectors.joining(","));
sb.append(collected);
sb.append(
");\n" +
" }\n" +
"\n");
}
|
@Test
public void addGetModelsMethodEmptyModelsByKBaseValuesTest() {
Map<String, List<String>> modelsByKBase = new HashMap<>();
modelsByKBase.put("default-kie", Collections.emptyList());
ModelSourceClass modelSourceClass = new ModelSourceClass(RELEASE_ID, new HashMap<>(), modelsByKBase);
StringBuilder sb = new StringBuilder();
modelSourceClass.addGetModelsMethod(sb);
String retrieved = sb.toString();
String expected = "return java.util.Arrays.asList();";
assertThat(retrieved.contains(expected)).isTrue();
String unexpected = "return java.util.Arrays.asList(new ());";
assertThat(retrieved.contains(unexpected)).isFalse();
}
|
public ClassLoader getClassLoader() {
return classLoader;
}
|
@Test(timeOut = 60_000)
public void testClassGC() {
WeakHashMap<Object, Boolean> map = new WeakHashMap<>();
furyGC(map);
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
Descriptor.clearDescriptorCache();
TestUtils.triggerOOMForSoftGC(
() -> {
System.out.printf("Wait map keys %s gc.\n", map.keySet());
return !map.isEmpty();
});
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
}
|
@Override
public <K, T> UncommittedBundle<T> createKeyedBundle(
StructuralKey<K> key, PCollection<T> output) {
return UncommittedImmutableListBundle.create(output, key);
}
|
@Test
public void keyedWithByteArrayKeyShouldCreateKeyedBundle() throws Exception {
createKeyedBundle(ByteArrayCoder.of(), new byte[] {0, 2, 4, 99});
}
|
public static KiePMMLOutputField getKiePMMLOutputField(final OutputField outputField) {
String name = outputField.getName() != null ?outputField.getName() : "" + outputField.hashCode();
final String targetField = outputField.getTargetField() != null ?outputField.getTargetField() :
null;
final RESULT_FEATURE resultFeature = outputField.getResultFeature() != null ?
RESULT_FEATURE.byName(outputField.getResultFeature().value()) : null;
final DATA_TYPE dataType = outputField.getDataType() != null ?
DATA_TYPE.byName(outputField.getDataType().value()) : null;
final KiePMMLExpression kiePMMLExpression = outputField.getExpression() != null ?
getKiePMMLExpression(outputField.getExpression()) : null;
final KiePMMLOutputField.Builder builder = KiePMMLOutputField.builder(name, Collections.emptyList())
.withResultFeature(resultFeature)
.withTargetField(targetField)
.withValue(outputField.getValue())
.withDataType(dataType)
.withRank(outputField.getRank())
.withKiePMMLExpression(kiePMMLExpression);
return builder.build();
}
|
@Test
void getKiePMMLOutputField() {
OutputField toConvert = getRandomOutputField();
KiePMMLOutputField retrieved = KiePMMLOutputFieldInstanceFactory.getKiePMMLOutputField(toConvert);
commonVerifyKiePMMLOutputField(retrieved, toConvert);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.