focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public static CronPattern of(String pattern) {
return new CronPattern(pattern);
}
|
@Test
public void patternTest() {
CronPattern pattern = CronPattern.of("* 0 4 * * ?");
assertMatch(pattern, "2017-02-09 04:00:00");
assertMatch(pattern, "2017-02-19 04:00:33");
// 6位Quartz风格表达式
pattern = CronPattern.of("* 0 4 * * ?");
assertMatch(pattern, "2017-02-09 04:00:00");
assertMatch(pattern, "2017-02-19 04:00:33");
}
|
@Override
public void run() {
if (!redoService.isConnected()) {
LogUtils.NAMING_LOGGER.warn("Grpc Connection is disconnect, skip current redo task");
return;
}
try {
redoForInstances();
redoForSubscribes();
} catch (Exception e) {
LogUtils.NAMING_LOGGER.warn("Redo task run with unexpected exception: ", e);
}
}
|
@Test
void testRunRedoRegisterSubscriber() throws NacosException {
Set<SubscriberRedoData> mockData = generateMockSubscriberData(false, false, true);
when(redoService.findSubscriberRedoData()).thenReturn(mockData);
redoTask.run();
verify(clientProxy).doSubscribe(SERVICE, GROUP, CLUSTER);
}
|
public static String encodeBase64ZippedString( String in ) throws IOException {
Charset charset = Charset.forName( Const.XML_ENCODING );
ByteArrayOutputStream baos = new ByteArrayOutputStream( 1024 );
try ( Base64OutputStream base64OutputStream = new Base64OutputStream( baos );
GZIPOutputStream gzos = new GZIPOutputStream( base64OutputStream ) ) {
gzos.write( in.getBytes( charset ) );
}
return baos.toString();
}
|
@Test
public void testEncodeBase64ZippedString() throws IOException {
String enc64 = HttpUtil.encodeBase64ZippedString( STANDART );
String decoded = HttpUtil.decodeBase64ZippedString( enc64 );
Assert.assertEquals( "Strings are the same after transformation", STANDART, decoded );
}
|
@Override
public Path copy(final Path source, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException {
try {
final CopyFileRequest copy = new CopyFileRequest()
.name(target.getName())
.parentID(fileid.getFileId(target.getParent()))
.mode(1); // Overwrite
final File file = new FilesApi(session.getClient()).filesCopy(
fileid.getFileId(source), copy);
listener.sent(status.getLength());
fileid.cache(target, file.getId());
return target.withAttributes(new StoregateAttributesFinderFeature(session, fileid).toAttributes(file));
}
catch(ApiException e) {
throw new StoregateExceptionMappingService(fileid).map("Cannot copy {0}", e, source);
}
}
|
@Test
public void testCopyWithRenameToExistingFile() throws Exception {
final StoregateIdProvider fileid = new StoregateIdProvider(session);
final Path top = new StoregateDirectoryFeature(session, fileid).mkdir(new Path(
String.format("/My files/%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path folder = new Path(top, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory));
new StoregateDirectoryFeature(session, fileid).mkdir(folder, new TransferStatus());
final Path test = new StoregateTouchFeature(session, fileid).touch(new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final Path test2 = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new StoregateTouchFeature(session, fileid).touch(test2, new TransferStatus());
assertNotEquals(test.attributes().getFileId(), new StoregateCopyFeature(session, fileid).copy(test, test2, new TransferStatus().exists(true), new DisabledConnectionCallback(), new DisabledStreamListener()).attributes().getFileId());
final Find find = new DefaultFindFeature(session);
final AttributedList<Path> files = new StoregateListService(session, fileid).list(folder, new DisabledListProgressListener());
assertTrue(find.find(test));
assertTrue(find.find(test2));
new StoregateDeleteFeature(session, fileid).delete(Collections.singletonList(top), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public void init() throws GeneralSecurityException, IOException {
createCACertAndKeyPair();
initInternal();
}
|
@Test
void testInit() throws Exception {
ProxyCA proxyCA = new ProxyCA();
assertNull(proxyCA.getCaCert());
assertNull(proxyCA.getCaKeyPair());
assertNull(proxyCA.getX509KeyManager());
assertNull(proxyCA.getHostnameVerifier());
proxyCA.init();
assertNotNull(proxyCA.getCaCert());
assertNotNull(proxyCA.getCaKeyPair());
assertNotNull(proxyCA.getX509KeyManager());
assertNotNull(proxyCA.getHostnameVerifier());
}
|
static @Nullable String resolveConsumerArn(Read spec, PipelineOptions options) {
String streamName = Preconditions.checkArgumentNotNull(spec.getStreamName());
KinesisIOOptions sourceOptions = options.as(KinesisIOOptions.class);
Map<String, String> streamToArnMapping = sourceOptions.getKinesisIOConsumerArns();
String consumerArn;
if (streamToArnMapping.containsKey(streamName)) {
consumerArn = streamToArnMapping.get(streamName); // can resolve to null too
} else {
consumerArn = spec.getConsumerArn();
}
return consumerArn;
}
|
@Test
public void testConsumerArnInPipelineOptionsOverwritesIOSetting() {
KinesisIO.Read readSpec =
KinesisIO.read().withStreamName("stream-xxx").withConsumerArn("arn-ignored");
KinesisIOOptions options =
createIOOptions("--kinesisIOConsumerArns={\"stream-xxx\": \"arn-01\"}");
assertThat(KinesisSource.resolveConsumerArn(readSpec, options)).isEqualTo("arn-01");
}
|
@Override
public boolean canManageResource(EfestoResource toProcess) {
return toProcess instanceof EfestoInputStreamResource && ((EfestoInputStreamResource) toProcess).getModelType().equalsIgnoreCase(PMML_STRING);
}
|
@Test
void canManageResource() throws IOException {
String fileName = "LinearRegressionSample.pmml";
File pmmlFile = getFileFromFileName(fileName).orElseThrow(() -> new RuntimeException("Failed to get pmmlFIle"));
EfestoInputStreamResource toProcess = new EfestoInputStreamResource(Files.newInputStream(pmmlFile.toPath()), fileName);
assertThat(kieCompilerService.canManageResource(toProcess)).isTrue();
EfestoFileResource notToProcess = new EfestoFileResource(pmmlFile);
assertThat(kieCompilerService.canManageResource(notToProcess)).isFalse();
}
|
@Override
public Message receive() {
MessageExt rmqMsg = localMessageCache.poll();
return rmqMsg == null ? null : OMSUtil.msgConvert(rmqMsg);
}
|
@Test
public void testPoll_WithTimeout() {
//There is a default timeout value, @see ClientConfig#omsOperationTimeout.
Message message = consumer.receive();
assertThat(message).isNull();
message = consumer.receive(OMS.newKeyValue().put(Message.BuiltinKeys.TIMEOUT, 100));
assertThat(message).isNull();
}
|
public String decompress(String compressorName, String compressedString) throws IOException {
Checks.notNull(compressedString, "compressedString cannot be null");
Compressor compressor = getCompressor(compressorName);
return new String(compressor.decompress(base64Decode(compressedString)), DEFAULT_ENCODING);
}
|
@Test
public void decompressShouldThrowExceptionIfCompressorNotFound() {
AssertHelper.assertThrows(
"decompress should throw exception if compressor not found",
NullPointerException.class,
"unknown compressorName: abcd",
() -> stringCodec.decompress("abcd", "H4sIAAAAAAAA/0tMBAMAdCCLWwcAAAA="));
}
|
public static String prettyPrint(String taskId) {
return taskWatchMap.get(taskId).prettyPrint();
}
|
@Test
void testPrettyPrint() {
TaskTimeCaculateUtil.startTask("Task 1", "taskId1");
// Perform some task-related operations
// ...
TaskTimeCaculateUtil.stop("taskId1");
String output = TaskTimeCaculateUtil.prettyPrint("taskId1");
Assertions.assertNotNull(output);
System.out.println(output);
}
|
public static Serializable decode(final ByteBuf byteBuf) {
int valueType = byteBuf.readUnsignedByte() & 0xff;
StringBuilder result = new StringBuilder();
decodeValue(valueType, 1, byteBuf, result);
return result.toString();
}
|
@Test
void assertDecodeLargeJsonObjectWithInt16() {
List<JsonEntry> jsonEntries = new LinkedList<>();
jsonEntries.add(new JsonEntry(JsonValueTypes.INT16, "key1", 0x00007fff));
jsonEntries.add(new JsonEntry(JsonValueTypes.INT16, "key2", 0x00008000));
ByteBuf payload = mockJsonObjectByteBuf(jsonEntries, false);
String actual = (String) MySQLJsonValueDecoder.decode(payload);
assertThat(actual, is("{\"key1\":32767,\"key2\":-32768}"));
}
|
@UdafFactory(description = "Compute sample standard deviation of column with type Double.",
aggregateSchema = "STRUCT<SUM double, COUNT bigint, M2 double>")
public static TableUdaf<Double, Struct, Double> stdDevDouble() {
return getStdDevImplementation(
0.0,
STRUCT_DOUBLE,
(agg, newValue) -> newValue + agg.getFloat64(SUM),
(agg, newValue) -> newValue * (agg.getInt64(COUNT) + 1) - (agg.getFloat64(SUM) + newValue),
(agg1, agg2) ->
agg1.getFloat64(SUM) / agg1.getInt64(COUNT)
- agg2.getFloat64(SUM) / agg2.getInt64(COUNT),
(agg1, agg2) -> agg1.getFloat64(SUM) + agg2.getFloat64(SUM),
(agg, valueToRemove) -> agg.getFloat64(SUM) - valueToRemove);
}
|
@Test
public void shouldMergeDoubles() {
final TableUdaf<Double, Struct, Double> udaf = stdDevDouble();
Struct left = udaf.initialize();
final Double[] leftValues = new Double[] {5.5, 8.4, 10.9};
for (final Double thisValue : leftValues) {
left = udaf.aggregate(thisValue, left);
}
Struct right = udaf.initialize();
final Double[] rightValues = new Double[] {6.3, 7.2, 9.7};
for (final Double thisValue : rightValues) {
right = udaf.aggregate(thisValue, right);
}
final Struct merged = udaf.merge(left, right);
assertThat(merged.getInt64(COUNT), equalTo(6L));
assertThat(merged.getFloat64(SUM), equalTo(48.0));
assertThat(merged.getFloat64(M2), equalTo(21.240000000000006));
final double standardDev = udaf.map(merged);
assertThat(standardDev, equalTo(4.248000000000001));
}
|
@Override
public boolean canManageInput(EfestoInput toEvaluate, EfestoRuntimeContext context) {
return canManageEfestoInput(toEvaluate, context);
}
|
@Test
void canManageManageableInput() {
modelLocalUriId = getModelLocalUriIdFromPmmlIdFactory(FILE_NAME, MODEL_NAME);
Map<String, Object> inputData = new HashMap<>();
inputPMML = new BaseEfestoInput<>(modelLocalUriId, inputData);
assertThat(kieRuntimeServicePMMLMapInput.canManageInput(inputPMML,
getEfestoContext(memoryCompilerClassLoader))).isTrue();
}
|
public static byte[] bigIntegerToBytes(BigInteger b, int numBytes) {
checkArgument(b.signum() >= 0, () -> "b must be positive or zero: " + b);
checkArgument(numBytes > 0, () -> "numBytes must be positive: " + numBytes);
byte[] src = b.toByteArray();
byte[] dest = new byte[numBytes];
boolean isFirstByteOnlyForSign = src[0] == 0;
int length = isFirstByteOnlyForSign ? src.length - 1 : src.length;
checkArgument(length <= numBytes, () -> "The given number does not fit in " + numBytes);
int srcPos = isFirstByteOnlyForSign ? 1 : 0;
int destPos = numBytes - length;
System.arraycopy(src, srcPos, dest, destPos, length);
return dest;
}
|
@Test
public void bigIntegerToBytes_singleByteSignFit() {
BigInteger b = BigInteger.valueOf(0b0000_1111);
byte[] expected = new byte[]{0b0000_1111};
byte[] actual = ByteUtils.bigIntegerToBytes(b, 1);
assertArrayEquals(expected, actual);
}
|
public static Ip4Prefix valueOf(int address, int prefixLength) {
return new Ip4Prefix(Ip4Address.valueOf(address), prefixLength);
}
|
@Test
public void testContainsIpAddressIPv4() {
Ip4Prefix ipPrefix;
ipPrefix = Ip4Prefix.valueOf("1.2.0.0/24");
assertTrue(ipPrefix.contains(Ip4Address.valueOf("1.2.0.0")));
assertTrue(ipPrefix.contains(Ip4Address.valueOf("1.2.0.4")));
assertFalse(ipPrefix.contains(Ip4Address.valueOf("1.3.0.0")));
assertFalse(ipPrefix.contains(Ip4Address.valueOf("0.0.0.0")));
assertFalse(ipPrefix.contains(Ip4Address.valueOf("255.255.255.255")));
ipPrefix = Ip4Prefix.valueOf("1.2.0.0/32");
assertTrue(ipPrefix.contains(Ip4Address.valueOf("1.2.0.0")));
assertFalse(ipPrefix.contains(Ip4Address.valueOf("1.2.0.4")));
assertFalse(ipPrefix.contains(Ip4Address.valueOf("1.3.0.0")));
assertFalse(ipPrefix.contains(Ip4Address.valueOf("0.0.0.0")));
assertFalse(ipPrefix.contains(Ip4Address.valueOf("255.255.255.255")));
ipPrefix = Ip4Prefix.valueOf("0.0.0.0/0");
assertTrue(ipPrefix.contains(Ip4Address.valueOf("1.2.0.0")));
assertTrue(ipPrefix.contains(Ip4Address.valueOf("1.2.0.4")));
assertTrue(ipPrefix.contains(Ip4Address.valueOf("1.3.0.0")));
assertTrue(ipPrefix.contains(Ip4Address.valueOf("0.0.0.0")));
assertTrue(ipPrefix.contains(Ip4Address.valueOf("255.255.255.255")));
ipPrefix = Ip4Prefix.valueOf("255.255.255.255/32");
assertFalse(ipPrefix.contains(Ip4Address.valueOf("1.2.0.0")));
assertFalse(ipPrefix.contains(Ip4Address.valueOf("1.2.0.4")));
assertFalse(ipPrefix.contains(Ip4Address.valueOf("1.3.0.0")));
assertFalse(ipPrefix.contains(Ip4Address.valueOf("0.0.0.0")));
assertTrue(ipPrefix.contains(Ip4Address.valueOf("255.255.255.255")));
}
|
@Override
public KStream<K, V> filter(final Predicate<? super K, ? super V> predicate) {
return filter(predicate, NamedInternal.empty());
}
|
@Test
public void shouldNotAllowNullNamedOnFilter() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.filter((k, v) -> true, null));
assertThat(exception.getMessage(), equalTo("named can't be null"));
}
|
@Override
public Set<Path> getPaths(ElementId src, ElementId dst) {
checkPermission(TOPOLOGY_READ);
return getPaths(src, dst, (LinkWeigher) null);
}
|
@Test
public void edgeToInfra() {
HostId src = hid("12:34:56:78:90:ab/1");
DeviceId dst = did("dst");
fakeTopoMgr.paths.add(createPath("edge", "middle", "dst"));
fakeHostMgr.hosts.put(src, host("12:34:56:78:90:ab/1", "edge"));
Set<Path> paths = service.getPaths(src, dst);
validatePaths(paths, 1, 3, src, dst);
}
|
@Override
public SCMPluginInfo pluginInfoFor(GoPluginDescriptor descriptor) {
SCMPropertyConfiguration scmConfiguration = extension.getSCMConfiguration(descriptor.id());
SCMView scmView = extension.getSCMView(descriptor.id());
PluggableInstanceSettings pluginSettingsAndView = getPluginSettingsAndView(descriptor, extension);
if (scmConfiguration == null) {
throw new RuntimeException(format("Plugin[%s] returned null scm configuration", descriptor.id()));
}
if (scmView == null) {
throw new RuntimeException(format("Plugin[%s] returned null scm view", descriptor.id()));
}
PluggableInstanceSettings scmSettings = new PluggableInstanceSettings(scmPluginConfigurations(scmConfiguration), new PluginView(scmView.template()));
return new SCMPluginInfo(descriptor, scmView.displayValue(), scmSettings, pluginSettingsAndView);
}
|
@Test
public void shouldBuildPluginInfo() throws Exception {
GoPluginDescriptor descriptor = GoPluginDescriptor.builder().id("plugin1").build();
SCMPluginInfo pluginInfo = new SCMPluginInfoBuilder(extension).pluginInfoFor(descriptor);
List<PluginConfiguration> scmConfigurations = List.of(
new PluginConfiguration("username", new MetadataWithPartOfIdentity(true, false, true)),
new PluginConfiguration("password", new MetadataWithPartOfIdentity(true, true, false))
);
PluginView pluginView = new PluginView("some html");
List<PluginConfiguration> pluginSettings = List.of(new PluginConfiguration("k1", new Metadata(true, false)));
assertThat(pluginInfo.getDescriptor(), is(descriptor));
assertThat(pluginInfo.getExtensionName(), is("scm"));
assertThat(pluginInfo.getDisplayName(), is("some scm plugin"));
assertThat(pluginInfo.getScmSettings(), is(new PluggableInstanceSettings(scmConfigurations, pluginView)));
assertThat(pluginInfo.getPluginSettings(), is(new PluggableInstanceSettings(pluginSettings, new PluginView("settings view"))));
}
|
public String roleName()
{
return ctx.agentRoleName();
}
|
@Test
void shouldUseAssignedRoleName()
{
final String expectedRoleName = "test-role-name";
final TestClusterClock clock = new TestClusterClock(TimeUnit.MILLISECONDS);
ctx.agentRoleName(expectedRoleName)
.epochClock(clock)
.clusterClock(clock);
final ConsensusModuleAgent agent = new ConsensusModuleAgent(ctx);
assertEquals(expectedRoleName, agent.roleName());
}
|
@Override
public void execute(final List<String> args, final PrintWriter terminal) {
CliCmdUtil.ensureArgCountBounds(args, 0, 1, HELP);
if (args.isEmpty()) {
terminal.println(restClient.getServerAddress());
return;
} else {
final String serverAddress = args.get(0);
restClient.setServerAddress(serverAddress);
terminal.println("Server now: " + serverAddress);
resetCliForNewServer.fire();
}
validateClient(terminal, restClient);
}
|
@Test
public void shouldIdentifyNonCCloudServer() {
// When:
command.execute(ImmutableList.of(VALID_SERVER_ADDRESS), terminal);
// Then:
verify(restClient).setIsCCloudServer(false);
}
|
public String getVersionsNodePath() {
return String.join("/", key, VERSIONS, currentActiveVersion);
}
|
@Test
void assertGetVersionsNodePath() {
assertThat(new MetaDataVersion("foo", "0", "1").getVersionsNodePath(), is("foo/versions/0"));
}
|
@Override
public SecretsPluginInfo pluginInfoFor(GoPluginDescriptor descriptor) {
String pluginId = descriptor.id();
return new SecretsPluginInfo(descriptor, securityConfigSettings(pluginId), image(pluginId));
}
|
@Test
public void shouldBuildPluginInfoWithSecuritySettings() {
GoPluginDescriptor descriptor = GoPluginDescriptor.builder().id("plugin1").build();
List<PluginConfiguration> pluginConfigurations = List.of(
new PluginConfiguration("username", new Metadata(true, false)),
new PluginConfiguration("password", new Metadata(true, true))
);
when(extension.getSecretsConfigMetadata(descriptor.id())).thenReturn(pluginConfigurations);
when(extension.getSecretsConfigView(descriptor.id())).thenReturn("secrets_config_view");
SecretsPluginInfo pluginInfo = new SecretsPluginInfoBuilder(extension).pluginInfoFor(descriptor);
assertThat(pluginInfo.getSecretsConfigSettings(),
is(new PluggableInstanceSettings(pluginConfigurations, new PluginView("secrets_config_view"))));
}
|
public static String getLocalHostName() {
if (StrUtil.isNotBlank(localhostName)) {
return localhostName;
}
final InetAddress localhost = getLocalhost();
if (null != localhost) {
String name = localhost.getHostName();
if (StrUtil.isEmpty(name)) {
name = localhost.getHostAddress();
}
localhostName = name;
}
return localhostName;
}
|
@Test
@Disabled
public void getLocalHostNameTest() {
// 注意此方法会触发反向DNS解析,导致阻塞,阻塞时间取决于网络!
assertNotNull(NetUtil.getLocalHostName());
}
|
@Override
public MapperResult findAllConfigInfoBetaForDumpAllFetchRows(MapperContext context) {
int startRow = context.getStartRow();
int pageSize = context.getPageSize();
String sql = " SELECT t.id,data_id,group_id,tenant_id,app_name,content,md5,gmt_modified,beta_ips,encrypted_data_key "
+ " FROM ( SELECT id FROM config_info_beta ORDER BY id LIMIT " + startRow + "," + pageSize + " )"
+ " g, config_info_beta t WHERE g.id = t.id ";
List<Object> paramList = new ArrayList<>();
paramList.add(startRow);
paramList.add(pageSize);
return new MapperResult(sql, paramList);
}
|
@Test
void testFindAllConfigInfoBetaForDumpAllFetchRows() {
MapperResult result = configInfoBetaMapperByMySql.findAllConfigInfoBetaForDumpAllFetchRows(context);
String sql = result.getSql();
List<Object> paramList = result.getParamList();
assertEquals(sql, " SELECT t.id,data_id,group_id,tenant_id,app_name,content,md5,gmt_modified,beta_ips,encrypted_data_key "
+ " FROM ( SELECT id FROM config_info_beta ORDER BY id LIMIT " + startRow + "," + pageSize + " )"
+ " g, config_info_beta t WHERE g.id = t.id ");
assertEquals(paramList, Arrays.asList(startRow, pageSize));
}
|
public Statement buildStatement(final ParserRuleContext parseTree) {
return build(Optional.of(getSources(parseTree)), parseTree);
}
|
@Test
public void shouldHandleAliasedJoinDataSources() {
// Given:
final SingleStatementContext stmt = givenQuery("SELECT * FROM TEST1 t1 JOIN TEST2 t2"
+ " ON test1.col1 = test2.col1;");
// When:
final Query result = (Query) builder.buildStatement(stmt);
// Then:
assertThat(result.getFrom(), is(instanceOf(Join.class)));
assertThat((Join) result.getFrom(), hasLeft(new AliasedRelation(TEST1, SourceName.of("T1"))));
assertThat((Join) result.getFrom(), hasRights(new AliasedRelation(TEST2, SourceName.of("T2"))));
}
|
public static void tryShutdown(HazelcastInstance hazelcastInstance) {
if (hazelcastInstance == null) {
return;
}
HazelcastInstanceImpl factory = (HazelcastInstanceImpl) hazelcastInstance;
closeSockets(factory);
try {
factory.node.shutdown(true);
} catch (Throwable ignored) {
ignore(ignored);
}
}
|
@Test
public void testTryShutdown_shouldDoNothingWhenThrowableIsThrown() {
tryShutdown(hazelcastInstanceThrowsException);
}
|
static CommandLineOptions parse(Iterable<String> options) {
CommandLineOptions.Builder optionsBuilder = CommandLineOptions.builder();
List<String> expandedOptions = new ArrayList<>();
expandParamsFiles(options, expandedOptions);
Iterator<String> it = expandedOptions.iterator();
while (it.hasNext()) {
String option = it.next();
if (!option.startsWith("-")) {
optionsBuilder.filesBuilder().add(option).addAll(it);
break;
}
String flag;
String value;
int idx = option.indexOf('=');
if (idx >= 0) {
flag = option.substring(0, idx);
value = option.substring(idx + 1);
} else {
flag = option;
value = null;
}
// NOTE: update usage information in UsageException when new flags are added
switch (flag) {
case "-i":
case "-r":
case "-replace":
case "--replace":
optionsBuilder.inPlace(true);
break;
case "--lines":
case "-lines":
case "--line":
case "-line":
parseRangeSet(optionsBuilder.linesBuilder(), getValue(flag, it, value));
break;
case "--offset":
case "-offset":
optionsBuilder.addOffset(parseInteger(it, flag, value));
break;
case "--length":
case "-length":
optionsBuilder.addLength(parseInteger(it, flag, value));
break;
case "--aosp":
case "-aosp":
case "-a":
optionsBuilder.aosp(true);
break;
case "--version":
case "-version":
case "-v":
optionsBuilder.version(true);
break;
case "--help":
case "-help":
case "-h":
optionsBuilder.help(true);
break;
case "--fix-imports-only":
optionsBuilder.fixImportsOnly(true);
break;
case "--skip-sorting-imports":
optionsBuilder.sortImports(false);
break;
case "--skip-removing-unused-imports":
optionsBuilder.removeUnusedImports(false);
break;
case "--skip-reflowing-long-strings":
optionsBuilder.reflowLongStrings(false);
break;
case "--skip-javadoc-formatting":
optionsBuilder.formatJavadoc(false);
break;
case "-":
optionsBuilder.stdin(true);
break;
case "-n":
case "--dry-run":
optionsBuilder.dryRun(true);
break;
case "--set-exit-if-changed":
optionsBuilder.setExitIfChanged(true);
break;
case "-assume-filename":
case "--assume-filename":
optionsBuilder.assumeFilename(getValue(flag, it, value));
break;
default:
throw new IllegalArgumentException("unexpected flag: " + flag);
}
}
return optionsBuilder.build();
}
|
@Test
public void lines() {
assertThat(
CommandLineOptionsParser.parse(
Arrays.asList("--lines", "1:2", "-lines=4:5", "--line", "7:8", "-line=10:11"))
.lines()
.asRanges())
.containsExactly(
Range.closedOpen(0, 2),
Range.closedOpen(3, 5),
Range.closedOpen(6, 8),
Range.closedOpen(9, 11));
}
|
@Field
public void setExtractBookmarksText(boolean extractBookmarksText) {
defaultConfig.setExtractBookmarksText(extractBookmarksText);
}
|
@Test
public void testTurningOffBookmarks() throws Exception {
PDFParserConfig config = new PDFParserConfig();
config.setExtractBookmarksText(false);
ParseContext parseContext = new ParseContext();
parseContext.set(PDFParserConfig.class, config);
String xml = getXML("testPDF_bookmarks.pdf", parseContext).xml;
assertNotContained("Denmark bookmark is here", xml);
}
|
@Override
public boolean betterThan(Num criterionValue1, Num criterionValue2) {
return criterionValue1.isGreaterThan(criterionValue2);
}
|
@Test
public void betterThan() {
AnalysisCriterion criterion = getCriterion();
assertTrue(criterion.betterThan(numOf(2.0), numOf(1.5)));
assertFalse(criterion.betterThan(numOf(1.5), numOf(2.0)));
}
|
@Override
public Health check(Set<NodeHealth> nodeHealths) {
Set<NodeHealth> appNodes = nodeHealths.stream()
.filter(s -> s.getDetails().getType() == NodeDetails.Type.APPLICATION)
.collect(Collectors.toSet());
return Arrays.stream(AppNodeClusterHealthSubChecks.values())
.map(s -> s.check(appNodes))
.reduce(Health.GREEN, HealthReducer::merge);
}
|
@Test
public void status_YELLOW_when_one_RED_node_and_one_YELLOW_application_node() {
Set<NodeHealth> nodeHealths = nodeHealths(RED, YELLOW).collect(toSet());
Health check = underTest.check(nodeHealths);
assertThat(check)
.forInput(nodeHealths)
.hasStatus(Health.Status.YELLOW)
.andCauses(
"At least one application node is RED",
"At least one application node is YELLOW");
}
|
@Override
public Long del(byte[]... keys) {
if (isQueueing() || isPipelined()) {
for (byte[] key: keys) {
write(key, LongCodec.INSTANCE, RedisCommands.DEL, key);
}
return null;
}
CommandBatchService es = new CommandBatchService(executorService);
for (byte[] key: keys) {
es.writeAsync(key, StringCodec.INSTANCE, RedisCommands.DEL, key);
}
BatchResult<Long> b = (BatchResult<Long>) es.execute();
return b.getResponses().stream().collect(Collectors.summarizingLong(v -> v)).getSum();
}
|
@Test
public void testDelPipeline() {
byte[] k = "key".getBytes();
byte[] v = "val".getBytes();
connection.set(k, v);
connection.openPipeline();
connection.get(k);
connection.del(k);
List<Object> results = connection.closePipeline();
byte[] val = (byte[])results.get(0);
assertThat(val).isEqualTo(v);
Long res = (Long) results.get(1);
assertThat(res).isEqualTo(1);
}
|
@Unstable
public static Text getRMDelegationTokenService(Configuration conf) {
return getTokenService(conf, YarnConfiguration.RM_ADDRESS,
YarnConfiguration.DEFAULT_RM_ADDRESS,
YarnConfiguration.DEFAULT_RM_PORT);
}
|
@Test
void testGetRMDelegationTokenService() {
String defaultRMAddress = YarnConfiguration.DEFAULT_RM_ADDRESS;
YarnConfiguration conf = new YarnConfiguration();
// HA is not enabled
Text tokenService = ClientRMProxy.getRMDelegationTokenService(conf);
String[] services = tokenService.toString().split(",");
assertEquals(1, services.length);
for (String service : services) {
assertTrue(service.contains(defaultRMAddress),
"Incorrect token service name");
}
// HA is enabled
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2");
conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, "rm1"),
"0.0.0.0");
conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, "rm2"),
"0.0.0.0");
tokenService = ClientRMProxy.getRMDelegationTokenService(conf);
services = tokenService.toString().split(",");
assertEquals(2, services.length);
for (String service : services) {
assertTrue(service.contains(defaultRMAddress),
"Incorrect token service name");
}
}
|
@Override
public Optional<Page> commit()
{
try {
orcWriter.close();
}
catch (IOException | UncheckedIOException | PrestoException e) {
try {
rollbackAction.call();
}
catch (Exception ignored) {
// ignore
}
throwIfInstanceOf(e, PrestoException.class);
throw new PrestoException(HIVE_WRITER_CLOSE_ERROR, "Error committing write to Hive. " + e.getMessage(), e);
}
if (validationInputFactory.isPresent()) {
try {
try (OrcDataSource input = validationInputFactory.get().get()) {
long startThreadCpuTime = THREAD_MX_BEAN.getCurrentThreadCpuTime();
orcWriter.validate(input);
validationCpuNanos += THREAD_MX_BEAN.getCurrentThreadCpuTime() - startThreadCpuTime;
}
}
catch (IOException | UncheckedIOException e) {
throw new PrestoException(HIVE_WRITE_VALIDATION_FAILED, e);
}
}
return Optional.of(createFileStatisticsPage(getFileSizeInBytes(), rowCount));
}
|
@Test
public void testIOExceptionPropagation()
{
// This test is to verify that a IOException thrown by the underlying data sink implementation is wrapped into a PrestoException.
OrcFileWriter orcFileWriter = createOrcFileWriter(false);
try {
// Throws PrestoException with HIVE_WRITER_CLOSE_ERROR error code
orcFileWriter.commit();
}
catch (Exception e) {
assertEquals(e.getClass(), PrestoException.class);
assertEquals(e.getMessage(), "Error committing write to Hive. Dummy IOException from mocked data sink instance");
assertEquals(((PrestoException) e).getErrorCode(), HIVE_WRITER_CLOSE_ERROR.toErrorCode());
assertEquals(e.getCause().getClass(), IOException.class);
}
}
|
@Override
public void validate(String value, @Nullable List<String> options) {
// Nothing to do
}
|
@Test
public void not_fail_on_valid_text() {
validation.validate("10", null);
validation.validate("abc", null);
}
|
@Override
@Transactional
public boolean checkForPreApproval(Long userId, Integer userType, String clientId, Collection<String> requestedScopes) {
// 第一步,基于 Client 的自动授权计算,如果 scopes 都在自动授权中,则返回 true 通过
OAuth2ClientDO clientDO = oauth2ClientService.validOAuthClientFromCache(clientId);
Assert.notNull(clientDO, "客户端不能为空"); // 防御性编程
if (CollUtil.containsAll(clientDO.getAutoApproveScopes(), requestedScopes)) {
// gh-877 - if all scopes are auto approved, approvals still need to be added to the approval store.
LocalDateTime expireTime = LocalDateTime.now().plusSeconds(TIMEOUT);
for (String scope : requestedScopes) {
saveApprove(userId, userType, clientId, scope, true, expireTime);
}
return true;
}
// 第二步,算上用户已经批准的授权。如果 scopes 都包含,则返回 true
List<OAuth2ApproveDO> approveDOs = getApproveList(userId, userType, clientId);
Set<String> scopes = convertSet(approveDOs, OAuth2ApproveDO::getScope,
OAuth2ApproveDO::getApproved); // 只保留未过期的 + 同意的
return CollUtil.containsAll(scopes, requestedScopes);
}
|
@Test
public void checkForPreApproval_reject() {
// 准备参数
Long userId = randomLongId();
Integer userType = randomEle(UserTypeEnum.values()).getValue();
String clientId = randomString();
List<String> requestedScopes = Lists.newArrayList("read");
// mock 方法
when(oauth2ClientService.validOAuthClientFromCache(eq(clientId)))
.thenReturn(randomPojo(OAuth2ClientDO.class).setAutoApproveScopes(null));
// mock 数据
OAuth2ApproveDO approve = randomPojo(OAuth2ApproveDO.class).setUserId(userId)
.setUserType(userType).setClientId(clientId).setScope("read")
.setExpiresTime(LocalDateTimeUtil.offset(LocalDateTime.now(), 1L, ChronoUnit.DAYS)).setApproved(false); // 拒绝
oauth2ApproveMapper.insert(approve);
// 调用
boolean success = oauth2ApproveService.checkForPreApproval(userId, userType,
clientId, requestedScopes);
// 断言
assertFalse(success);
}
|
@Override
public List<PrivilegedOperation> bootstrap(Configuration configuration)
throws ResourceHandlerException {
// if bootstrap is called on this class, disk is already enabled
// so no need to check again
this.cGroupsHandler
.initializeCGroupController(CGroupsHandler.CGroupController.BLKIO);
return null;
}
|
@Test
public void testBootstrap() throws Exception {
Configuration conf = new YarnConfiguration();
List<PrivilegedOperation> ret =
cGroupsBlkioResourceHandlerImpl.bootstrap(conf);
verify(mockCGroupsHandler, times(1)).initializeCGroupController(
CGroupsHandler.CGroupController.BLKIO);
Assert.assertNull(ret);
}
|
public CoordinatorResult<Void, CoordinatorRecord> onPartitionsDeleted(
List<TopicPartition> topicPartitions
) {
final long startTimeMs = time.milliseconds();
final List<CoordinatorRecord> records = offsetMetadataManager.onPartitionsDeleted(topicPartitions);
log.info("Generated {} tombstone records in {} milliseconds while deleting offsets for partitions {}.",
records.size(), time.milliseconds() - startTimeMs, topicPartitions);
return new CoordinatorResult<>(records, false);
}
|
@Test
public void testOnPartitionsDeleted() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
List<CoordinatorRecord> records = Collections.singletonList(GroupCoordinatorRecordHelpers.newOffsetCommitTombstoneRecord(
"group",
"foo",
0
));
when(offsetMetadataManager.onPartitionsDeleted(
Collections.singletonList(new TopicPartition("foo", 0))
)).thenReturn(records);
CoordinatorResult<Void, CoordinatorRecord> result = coordinator.onPartitionsDeleted(
Collections.singletonList(new TopicPartition("foo", 0))
);
assertEquals(records, result.records());
assertNull(result.response());
}
|
public static LogCollector<ShenyuRequestLog> getInstance() {
return INSTANCE;
}
|
@Test
public void testAbstractLogCollector() throws Exception {
HuaweiLtsLogCollector.getInstance().start();
Field field1 = AbstractLogCollector.class.getDeclaredField("started");
field1.setAccessible(true);
Assertions.assertEquals(field1.get(HuaweiLtsLogCollector.getInstance()).toString(), "true");
HuaweiLtsLogCollector.getInstance().collect(shenyuRequestLog);
HuaweiLtsLogCollector.getInstance().close();
Field field2 = AbstractLogCollector.class.getDeclaredField("started");
field2.setAccessible(true);
Assertions.assertEquals(field2.get(HuaweiLtsLogCollector.getInstance()).toString(), "false");
}
|
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
for(Path file : files.keySet()) {
callback.delete(file);
try {
if(file.attributes().isDuplicate()) {
// Already trashed
log.warn(String.format("Delete file %s already in trash", file));
new NodesApi(session.getClient()).removeDeletedNodes(new DeleteDeletedNodesRequest().deletedNodeIds(Collections.singletonList(
Long.parseLong(nodeid.getVersionId(file)))), StringUtils.EMPTY);
}
else if(file.attributes().getVerdict() == PathAttributes.Verdict.malicious) {
// Delete malicious file
log.warn(String.format("Delete file %s marked as malicious", file));
new NodesApi(session.getClient()).removeMaliciousFile(
Long.parseLong(nodeid.getVersionId(file)), StringUtils.EMPTY);
}
else {
new NodesApi(session.getClient()).removeNode(
Long.parseLong(nodeid.getVersionId(file)), StringUtils.EMPTY);
}
nodeid.cache(file, null);
}
catch(ApiException e) {
throw new SDSExceptionMappingService(nodeid).map("Cannot delete {0}", e, file);
}
}
}
|
@Test
public void testDeleteFile() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path fileInRoom = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new SDSTouchFeature(session, nodeid).touch(fileInRoom, new TransferStatus());
assertTrue(new DefaultFindFeature(session).find(fileInRoom));
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(fileInRoom), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(new DefaultFindFeature(session).find(fileInRoom));
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public String exportResources( VariableSpace space, Map<String, ResourceDefinition> definitions,
ResourceNamingInterface namingInterface, Repository repository, IMetaStore metaStore ) throws KettleException {
// Try to load the transformation from repository or file.
// Modify this recursively too...
//
// AGAIN: there is no need to clone this job entry because the caller is
// responsible for this.
//
// First load the job meta data...
//
copyVariablesFrom( space ); // To make sure variables are available.
JobMeta jobMeta = getJobMeta( repository, metaStore, space );
// Also go down into the job and export the files there. (going down
// recursively)
//
String proposedNewFilename =
jobMeta.exportResources( jobMeta, definitions, namingInterface, repository, metaStore );
// To get a relative path to it, we inject
// ${Internal.Entry.Current.Directory}
//
String newFilename = "${" + Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY + "}/" + proposedNewFilename;
// Set the filename in the job
//
jobMeta.setFilename( newFilename );
// exports always reside in the root directory, in case we want to turn this
// into a file repository...
//
jobMeta.setRepositoryDirectory( new RepositoryDirectory() );
// export to filename ALWAYS (this allows the exported XML to be executed remotely)
//
setSpecificationMethod( ObjectLocationSpecificationMethod.FILENAME );
// change it in the job entry
//
filename = newFilename;
return proposedNewFilename;
}
|
@Test
public void testExportResources() throws Exception {
try ( MockedConstruction<JobMeta> jobMetaMockedConstruction = mockConstruction( JobMeta.class );
MockedConstruction<CurrentDirectoryResolver> currentDirectoryResolverMockedConstruction = mockConstruction(
CurrentDirectoryResolver.class, ( mock, context ) ->
{
doCallRealMethod().when( mock ).normalizeSlashes( anyString() );
doReturn( space ).when( mock ).resolveCurrentDirectory( any( ObjectLocationSpecificationMethod.class ),
any( VariableSpace.class ), nullable( Repository.class ), nullable( Job.class ), anyString() );
} ) ) {
JobMeta meta = mock( JobMeta.class );
JobEntryJob jej = getJej();
jej.setDescription( JOB_ENTRY_DESCRIPTION );
doReturn( meta ).when( jej ).getJobMeta(
nullable( Repository.class ), nullable( IMetaStore.class ), nullable( VariableSpace.class ) );
doReturn( JOB_ENTRY_JOB_NAME ).when( meta ).exportResources(
nullable( JobMeta.class ), nullable( Map.class ), nullable( ResourceNamingInterface.class ),
nullable( Repository.class ), nullable( IMetaStore.class ) );
jej.exportResources( null, null, null, null, null );
verify( meta ).setFilename( "${" + Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY + "}/" + JOB_ENTRY_JOB_NAME );
verify( jej ).setSpecificationMethod( ObjectLocationSpecificationMethod.FILENAME );
}
}
|
public synchronized TopologyDescription describe() {
return internalTopologyBuilder.describe();
}
|
@Test
public void timeWindowNamedMaterializedCountShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.windowedBy(TimeWindows.of(ofMillis(1)))
.count(Materialized.<Object, Long, WindowStore<Bytes, byte[]>>as("count-store").withStoreType(Materialized.StoreType.IN_MEMORY));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000001\n" +
" Processor: KSTREAM-AGGREGATE-0000000001 (stores: [count-store])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(false));
}
|
@Override
public void setConfig(RedisClusterNode node, String param, String value) {
RedisClient entry = getEntry(node);
RFuture<Void> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_SET, param, value);
syncFuture(f);
}
|
@Test
public void testSetConfig() {
RedisClusterNode master = getFirstMaster();
connection.setConfig(master, "timeout", "10");
}
|
@Override
@SuppressWarnings("rawtypes")
public void report(SortedMap<String, Gauge> gauges,
SortedMap<String, Counter> counters,
SortedMap<String, Histogram> histograms,
SortedMap<String, Meter> meters,
SortedMap<String, Timer> timers) {
final long timestamp = clock.getTime() / 1000;
// oh it'd be lovely to use Java 7 here
try {
graphite.connect();
for (Map.Entry<String, Gauge> entry : gauges.entrySet()) {
reportGauge(entry.getKey(), entry.getValue(), timestamp);
}
for (Map.Entry<String, Counter> entry : counters.entrySet()) {
reportCounter(entry.getKey(), entry.getValue(), timestamp);
}
for (Map.Entry<String, Histogram> entry : histograms.entrySet()) {
reportHistogram(entry.getKey(), entry.getValue(), timestamp);
}
for (Map.Entry<String, Meter> entry : meters.entrySet()) {
reportMetered(entry.getKey(), entry.getValue(), timestamp);
}
for (Map.Entry<String, Timer> entry : timers.entrySet()) {
reportTimer(entry.getKey(), entry.getValue(), timestamp);
}
graphite.flush();
} catch (IOException e) {
LOGGER.warn("Unable to report to Graphite", graphite, e);
} finally {
try {
graphite.close();
} catch (IOException e1) {
LOGGER.warn("Error closing Graphite", graphite, e1);
}
}
}
|
@Test
public void reportsShortGaugeValues() throws Exception {
reporter.report(map("gauge", gauge((short) 1)),
map(),
map(),
map(),
map());
final InOrder inOrder = inOrder(graphite);
inOrder.verify(graphite).connect();
inOrder.verify(graphite).send("prefix.gauge", "1", timestamp);
inOrder.verify(graphite).flush();
inOrder.verify(graphite).close();
verifyNoMoreInteractions(graphite);
}
|
public TriRpcStatus withDescription(String description) {
return new TriRpcStatus(code, cause, description);
}
|
@Test
void withDescription() {
TriRpcStatus origin = TriRpcStatus.NOT_FOUND;
TriRpcStatus withDesc = origin.withDescription("desc");
Assertions.assertNull(origin.description);
Assertions.assertTrue(withDesc.description.contains("desc"));
}
|
@Override
public List<Port> getPorts(DeviceId deviceId) {
checkNotNull(deviceId, DEVICE_NULL);
return manager.getVirtualPorts(this.networkId, deviceId)
.stream()
.collect(Collectors.toList());
}
|
@Test(expected = NullPointerException.class)
public void testGetPortsByNullId() {
manager.registerTenantId(TenantId.tenantId(tenantIdValue1));
VirtualNetwork virtualNetwork = manager.createVirtualNetwork(TenantId.tenantId(tenantIdValue1));
DeviceService deviceService = manager.get(virtualNetwork.id(), DeviceService.class);
// test the getPorts() method using a null device identifier
deviceService.getPorts(null);
}
|
static void handleDowngrade(Namespace namespace, Admin adminClient) throws TerseException {
handleUpgradeOrDowngrade("downgrade", namespace, adminClient, downgradeType(namespace));
}
|
@Test
public void testHandleDowngrade() {
Map<String, Object> namespace = new HashMap<>();
namespace.put("metadata", "3.3-IV3");
namespace.put("feature", Collections.singletonList("foo.bar=1"));
namespace.put("dry_run", false);
String downgradeOutput = ToolsTestUtils.captureStandardOut(() -> {
Throwable t = assertThrows(TerseException.class, () -> FeatureCommand.handleDowngrade(new Namespace(namespace), buildAdminClient()));
assertTrue(t.getMessage().contains("1 out of 2 operation(s) failed."));
});
assertEquals(format("foo.bar was downgraded to 1.%n" +
"Could not downgrade metadata.version to 7. Can't downgrade to newer version."), downgradeOutput);
}
|
public static String configMapToRedactedString(Map<String, Object> map, ConfigDef configDef) {
StringBuilder bld = new StringBuilder("{");
List<String> keys = new ArrayList<>(map.keySet());
Collections.sort(keys);
String prefix = "";
for (String key : keys) {
bld.append(prefix).append(key).append("=");
ConfigKey configKey = configDef.configKeys().get(key);
if (configKey == null || configKey.type().isSensitive()) {
bld.append("(redacted)");
} else {
Object value = map.get(key);
if (value == null) {
bld.append("null");
} else if (configKey.type() == Type.STRING) {
bld.append("\"").append(value).append("\"");
} else {
bld.append(value);
}
}
prefix = ", ";
}
bld.append("}");
return bld.toString();
}
|
@Test
public void testConfigMapToRedactedStringWithSecrets() {
Map<String, Object> testMap1 = new HashMap<>();
testMap1.put("myString", "whatever");
testMap1.put("myInt", 123);
testMap1.put("myPassword", "foosecret");
testMap1.put("myString2", null);
testMap1.put("myUnknown", 456);
assertEquals("{myInt=123, myPassword=(redacted), myString=\"whatever\", myString2=null, myUnknown=(redacted)}",
ConfigUtils.configMapToRedactedString(testMap1, CONFIG));
}
|
Map<String, File> scanExistingUsers() throws IOException {
Map<String, File> users = new HashMap<>();
File[] userDirectories = listUserDirectories();
if (userDirectories != null) {
for (File directory : userDirectories) {
String userId = idStrategy.idFromFilename(directory.getName());
users.put(userId, directory);
}
}
addEmptyUsernameIfExists(users);
return users;
}
|
@Test
public void scanExistingUsersOldLegacy() throws IOException {
UserIdMigrator migrator = createUserIdMigrator();
Map<String, File> userMappings = migrator.scanExistingUsers();
assertThat(userMappings.keySet(), hasSize(4));
assertThat(userMappings.keySet(), hasItems("make\u1000000", "\u306f\u56fd\u5185\u3067\u6700\u5927", "\u1000yyy", "zzz\u1000"));
}
|
public static String getSLcDtTm() {
//
return getSLcDtTm( "mm:ss.SSS" );
}
|
@Test
public void testgetSLcDtTm() throws Exception {
//
assertEquals( 15, BTools.getSLcDtTm().length() );
assertEquals( "LDTm: ", BTools.getSLcDtTm().substring( 0, 6 ) );
//
}
|
@Nonnull
public static <T extends Throwable> T cloneExceptionWithFixedAsyncStackTrace(@Nonnull T original) {
StackTraceElement[] fixedStackTrace = getFixedStackTrace(original, Thread.currentThread().getStackTrace());
Class<? extends Throwable> exceptionClass = original.getClass();
Throwable clone = tryCreateExceptionWithMessageAndCause(exceptionClass,
original.getMessage(), original.getCause());
if (clone != null) {
clone.setStackTrace(fixedStackTrace);
return (T) clone;
}
return original;
}
|
@Test
public void testCloneExceptionWithFixedAsyncStackTrace_whenNonStandardConstructor_then_cloneReflectively() {
IOException expectedException = new IOException();
Throwable result = ExceptionUtil.cloneExceptionWithFixedAsyncStackTrace(
new NonStandardException(1337, expectedException));
assertEquals(NonStandardException.class, result.getClass());
assertEquals(expectedException, result.getCause());
assertNoAsyncTrace(result);
}
|
public SqlType getExpressionSqlType(final Expression expression) {
return getExpressionSqlType(expression, Collections.emptyMap());
}
|
@Test
public void shouldEvaluateLambdaInUDFWithMap() {
// Given:
givenUdfWithNameAndReturnType("TRANSFORM", SqlTypes.DOUBLE);
when(function.parameters()).thenReturn(
ImmutableList.of(
MapType.of(DoubleType.INSTANCE, DoubleType.INSTANCE),
LambdaType.of(ImmutableList.of(LongType.INSTANCE, DoubleType.INSTANCE), DoubleType.INSTANCE)));
final Expression expression =
new FunctionCall(
FunctionName.of("TRANSFORM"),
ImmutableList.of(
MAPCOL,
new LambdaFunctionCall(
ImmutableList.of("X", "Y"),
new ArithmeticBinaryExpression(
Operator.ADD,
new LambdaVariable("X"),
new IntegerLiteral(5))
)));
// When:
final SqlType exprType = expressionTypeManager.getExpressionSqlType(expression);
// Then:
assertThat(exprType, is(SqlTypes.DOUBLE));
verify(udfFactory).getFunction(
ImmutableList.of(
SqlArgument.of(SqlTypes.map(SqlTypes.BIGINT, SqlTypes.DOUBLE)),
SqlArgument.of(SqlLambda.of(2))));
verify(function).getReturnType(
ImmutableList.of(
SqlArgument.of(SqlTypes.map(SqlTypes.BIGINT, SqlTypes.DOUBLE)),
SqlArgument.of(
SqlLambdaResolved.of(ImmutableList.of(SqlTypes.BIGINT, SqlTypes.DOUBLE), SqlTypes.BIGINT))));
}
|
@Nullable
@Override
public String getMainClassFromJarPlugin() {
Jar jarTask = (Jar) project.getTasks().findByName("jar");
if (jarTask == null) {
return null;
}
Object value = jarTask.getManifest().getAttributes().get("Main-Class");
if (value instanceof Provider) {
value = ((Provider<?>) value).getOrNull();
}
if (value instanceof String) {
return (String) value;
}
if (value == null) {
return null;
}
return String.valueOf(value);
}
|
@Test
public void testGetMainClassFromJarAsPropertyWithValueNull_missing() {
Property<String> mainClass = project.getObjects().property(String.class).value((String) null);
Jar jar = project.getTasks().withType(Jar.class).getByName("jar");
jar.setManifest(new DefaultManifest(null).attributes(ImmutableMap.of("Main-Class", mainClass)));
assertThat(gradleProjectProperties.getMainClassFromJarPlugin()).isNull();
}
|
public X509Certificate sign(PrivateKey caPrivateKey, X509Certificate caCertificate, PKCS10CertificationRequest csr, RenewalPolicy renewalPolicy) throws Exception {
Instant validFrom = Instant.now(clock);
var validUntil = validFrom.plus(renewalPolicy.parsedCertificateLifetime());
return sign(caPrivateKey, caCertificate, csr, validFrom, validUntil);
}
|
@Test
void testSigningCertWithSixMonthsLifetime() throws Exception {
var result = sign("P6M");
assertThat(result).isNotNull();
assertThat(result.getNotAfter()).isEqualTo(fixedInstant.plus(180, ChronoUnit.DAYS));
}
|
public CSVSchemaCommand(Logger console) {
super(console);
}
|
@Test
public void testCSVSchemaCommand() throws IOException {
File file = csvFile();
CSVSchemaCommand command = new CSVSchemaCommand(createLogger());
command.samplePaths = Arrays.asList(file.getAbsolutePath());
command.recordName = "Test";
command.setConf(new Configuration());
Assert.assertEquals(0, command.run());
}
|
public static <T> CompletionStage<T> recover(CompletionStage<T> completionStage, Function<Throwable, T> exceptionHandler){
return completionStage.exceptionally(exceptionHandler);
}
|
@Test
public void shouldRecoverFromSpecificExceptions()
throws InterruptedException, ExecutionException, TimeoutException {
CompletableFuture<String> future = new CompletableFuture<>();
future.completeExceptionally(new TimeoutException());
String result = recover(future, asList(TimeoutException.class, IOException.class), (e) -> "fallback").toCompletableFuture()
.get(1, TimeUnit.SECONDS);
assertThat(result).isEqualTo("fallback");
}
|
@SuppressWarnings("unchecked")
public static <T> NFAFactory<T> compileFactory(
final Pattern<T, ?> pattern, boolean timeoutHandling) {
if (pattern == null) {
// return a factory for empty NFAs
return new NFAFactoryImpl<>(
0,
Collections.<String, Long>emptyMap(),
Collections.<State<T>>emptyList(),
timeoutHandling);
} else {
final NFAFactoryCompiler<T> nfaFactoryCompiler = new NFAFactoryCompiler<>(pattern);
nfaFactoryCompiler.compileFactory();
return new NFAFactoryImpl<>(
nfaFactoryCompiler.getWindowTime(),
nfaFactoryCompiler.getWindowTimes(),
nfaFactoryCompiler.getStates(),
timeoutHandling);
}
}
|
@Test
public void testWindowTimeCorrectlySet() {
Pattern<Event, ?> pattern =
Pattern.<Event>begin("start")
.followedBy("middle")
.within(Time.seconds(10))
.followedBy("then")
.within(Time.seconds(20))
.followedBy("end");
NFACompiler.NFAFactoryCompiler<Event> factory =
new NFACompiler.NFAFactoryCompiler<>(pattern);
factory.compileFactory();
assertEquals(10000, factory.getWindowTime());
}
|
public static Map<String, String> buildDirectoryContextProperties(ConnectorSession session)
{
ImmutableMap.Builder<String, String> directoryContextProperties = ImmutableMap.builder();
directoryContextProperties.put(PRESTO_QUERY_ID, session.getQueryId());
session.getSource().ifPresent(source -> directoryContextProperties.put(PRESTO_QUERY_SOURCE, source));
session.getClientInfo().ifPresent(clientInfo -> directoryContextProperties.put(PRESTO_CLIENT_INFO, clientInfo));
getMetastoreHeaders(session).ifPresent(metastoreHeaders -> directoryContextProperties.put(PRESTO_METASTORE_HEADER, metastoreHeaders));
directoryContextProperties.put(PRESTO_USER_NAME, session.getUser());
if (!session.getClientTags().isEmpty()) {
directoryContextProperties.put(PRESTO_CLIENT_TAGS, join(CLIENT_TAGS_DELIMITER, session.getClientTags()));
}
return directoryContextProperties.build();
}
|
@Test
public void testBuildDirectoryContextProperties()
{
Map<String, String> additionalProperties = buildDirectoryContextProperties(SESSION);
assertEquals(additionalProperties.get(PRESTO_QUERY_ID), SESSION.getQueryId());
assertEquals(Optional.ofNullable(additionalProperties.get(PRESTO_QUERY_SOURCE)), SESSION.getSource());
assertEquals(Optional.ofNullable(additionalProperties.get(PRESTO_CLIENT_INFO)), SESSION.getClientInfo());
assertEquals(additionalProperties.get(PRESTO_USER_NAME), SESSION.getUser());
assertEquals(Optional.ofNullable(additionalProperties.get(PRESTO_METASTORE_HEADER)), getMetastoreHeaders(SESSION));
assertEquals(Arrays.stream(additionalProperties.get(PRESTO_CLIENT_TAGS).split(CLIENT_TAGS_DELIMITER)).collect(toImmutableSet()), SESSION.getClientTags());
}
|
Mono<ResponseEntity<Void>> save(Post post) {
return client.post()
.uri("/posts")
.contentType(MediaType.APPLICATION_JSON)
.bodyValue(post)
.exchangeToMono(response -> {
if (response.statusCode().equals(HttpStatus.CREATED)) {
return response.toBodilessEntity();
}
return response.createError();
});
}
|
@SneakyThrows
@Test
public void testCreatePost() {
var id = UUID.randomUUID();
var data = new Post(null, "title1", "content1", Status.DRAFT, null);
stubFor(post("/posts")
.willReturn(
aResponse()
.withHeader("Location", "/posts/" + id)
.withStatus(201)
)
);
postClient.save(data)
.as(StepVerifier::create)
.consumeNextWith(
entity -> {
assertThat(entity.getHeaders().getLocation().toString()).isEqualTo("/posts/" + id);
assertThat(entity.getStatusCode().value()).isEqualTo(201);
}
)
.verifyComplete();
verify(postRequestedFor(urlEqualTo("/posts"))
.withHeader("Content-Type", equalTo("application/json"))
.withRequestBody(equalToJson(Json.write(data)))
);
}
|
protected static String getTrimmedUrl(String rawUrl) {
if (isBlank(rawUrl)) {
return rawUrl;
}
if (rawUrl.endsWith("/")) {
return substringBeforeLast(rawUrl, "/");
}
return rawUrl;
}
|
@Test
public void trim_null_url() {
assertThat(AzureDevOpsHttpClient.getTrimmedUrl(null))
.isNull();
}
|
@Override
public List<String> tokenise(String text) {
if (Objects.isNull(text) || text.isEmpty()) {
return new ArrayList<>();
}
List<String> tokens = new ArrayList<>();
text = text.replaceAll("[^\\p{L}\\p{N}\\s\\-'.]", " ").trim();
String[] parts = text.split("\\s+");
for (int i = 0; i < parts.length - 1; i++) {
tokens.add(parts[i] + " " + parts[i + 1]);
}
tokens.addAll(Arrays.asList(parts));
return tokens;
}
|
@Description("Tokenise, when text only has one word but lots of commas, then return one word")
@Test
void tokenise_WhenTextOnlyHasOneWordButLotsOfCommas_ThenReturnOneToken() {
// When
var result = textTokeniser.tokenise(",Aberdeen,,,,");
// Then
assertThat(result).isNotEmpty().hasSize(1).contains("Aberdeen");
}
|
public synchronized ObjectId insertTransformationCluster( ObjectId id_transformation, ObjectId id_cluster ) throws KettleException {
ObjectId id = connectionDelegate.getNextTransformationClusterID();
RowMetaAndData table = new RowMetaAndData();
table.addValue( new ValueMetaInteger(
KettleDatabaseRepository.FIELD_TRANS_CLUSTER_ID_TRANS_CLUSTER ), id );
table.addValue(
new ValueMetaInteger(
KettleDatabaseRepository.FIELD_TRANS_CLUSTER_ID_TRANSFORMATION ),
id_transformation );
table.addValue( new ValueMetaInteger(
KettleDatabaseRepository.FIELD_TRANS_CLUSTER_ID_CLUSTER ), id_cluster );
connectionDelegate.insertTableRow( KettleDatabaseRepository.TABLE_R_TRANS_CLUSTER, table );
return id;
}
|
@Test
public void testInsertTransformationCluster() throws KettleException {
ArgumentCaptor<String> argumentTableName = ArgumentCaptor.forClass( String.class );
ArgumentCaptor<RowMetaAndData> argumentTableData = ArgumentCaptor.forClass( RowMetaAndData.class );
doNothing().when( repo.connectionDelegate ).insertTableRow( argumentTableName.capture(), argumentTableData.capture() );
doReturn( new LongObjectId( 123 ) ).when( repo.connectionDelegate ).getNextTransformationClusterID();
ObjectId result = repo.insertTransformationCluster( new LongObjectId( 456 ), new LongObjectId( 789 ) );
RowMetaAndData insertRecord = argumentTableData.getValue();
assertEquals( KettleDatabaseRepository.TABLE_R_TRANS_CLUSTER, argumentTableName.getValue() );
assertEquals( 3, insertRecord.size() );
assertEquals( ValueMetaInterface.TYPE_INTEGER, insertRecord.getValueMeta( 0 ).getType() );
assertEquals( KettleDatabaseRepository.FIELD_TRANS_CLUSTER_ID_TRANS_CLUSTER, insertRecord.getValueMeta( 0 ).getName() );
assertEquals( Long.valueOf( 123 ), insertRecord.getInteger( 0 ) );
assertEquals( ValueMetaInterface.TYPE_INTEGER, insertRecord.getValueMeta( 1 ).getType() );
assertEquals( KettleDatabaseRepository.FIELD_TRANS_CLUSTER_ID_TRANSFORMATION, insertRecord.getValueMeta( 1 ).getName() );
assertEquals( Long.valueOf( 456 ), insertRecord.getInteger( 1 ) );
assertEquals( ValueMetaInterface.TYPE_INTEGER, insertRecord.getValueMeta( 2 ).getType() );
assertEquals( KettleDatabaseRepository.FIELD_TRANS_CLUSTER_ID_CLUSTER, insertRecord.getValueMeta( 2 ).getName() );
assertEquals( Long.valueOf( 789 ), insertRecord.getInteger( 2 ) );
assertEquals( new LongObjectId( 123 ), result );
}
|
public Account changeNumber(final Account account, final String number,
@Nullable final IdentityKey pniIdentityKey,
@Nullable final Map<Byte, ECSignedPreKey> deviceSignedPreKeys,
@Nullable final Map<Byte, KEMSignedPreKey> devicePqLastResortPreKeys,
@Nullable final List<IncomingMessage> deviceMessages,
@Nullable final Map<Byte, Integer> pniRegistrationIds)
throws InterruptedException, MismatchedDevicesException, StaleDevicesException {
if (ObjectUtils.allNotNull(pniIdentityKey, deviceSignedPreKeys, deviceMessages, pniRegistrationIds)) {
// AccountsManager validates the device set on deviceSignedPreKeys and pniRegistrationIds
validateDeviceMessages(account, deviceMessages);
} else if (!ObjectUtils.allNull(pniIdentityKey, deviceSignedPreKeys, deviceMessages, pniRegistrationIds)) {
throw new IllegalArgumentException("PNI identity key, signed pre-keys, device messages, and registration IDs must be all null or all non-null");
}
if (number.equals(account.getNumber())) {
// The client has gotten confused/desynchronized with us about their own phone number, most likely due to losing
// our OK response to an immediately preceding change-number request, and are sending a change they don't realize
// is a no-op change.
//
// We don't need to actually do a number-change operation in our DB, but we *do* need to accept their new key
// material and distribute the sync messages, to be sure all clients agree with us and each other about what their
// keys are. Pretend this change-number request was actually a PNI key distribution request.
if (pniIdentityKey == null) {
return account;
}
return updatePniKeys(account, pniIdentityKey, deviceSignedPreKeys, devicePqLastResortPreKeys, deviceMessages, pniRegistrationIds);
}
final Account updatedAccount = accountsManager.changeNumber(
account, number, pniIdentityKey, deviceSignedPreKeys, devicePqLastResortPreKeys, pniRegistrationIds);
if (deviceMessages != null) {
sendDeviceMessages(updatedAccount, deviceMessages);
}
return updatedAccount;
}
|
@Test
void changeNumberSetPrimaryDevicePrekeyPqAndSendMessages() throws Exception {
final String originalE164 = "+18005551234";
final String changedE164 = "+18025551234";
final UUID aci = UUID.randomUUID();
final UUID pni = UUID.randomUUID();
final Account account = mock(Account.class);
when(account.getNumber()).thenReturn(originalE164);
when(account.getUuid()).thenReturn(aci);
when(account.getPhoneNumberIdentifier()).thenReturn(pni);
final Device d2 = mock(Device.class);
final byte deviceId2 = 2;
when(d2.getId()).thenReturn(deviceId2);
when(account.getDevice(deviceId2)).thenReturn(Optional.of(d2));
when(account.getDevices()).thenReturn(List.of(d2));
final ECKeyPair pniIdentityKeyPair = Curve.generateKeyPair();
final IdentityKey pniIdentityKey = new IdentityKey(pniIdentityKeyPair.getPublicKey());
final Map<Byte, ECSignedPreKey> prekeys = Map.of(Device.PRIMARY_ID,
KeysHelper.signedECPreKey(1, pniIdentityKeyPair),
deviceId2, KeysHelper.signedECPreKey(2, pniIdentityKeyPair));
final Map<Byte, KEMSignedPreKey> pqPrekeys = Map.of((byte) 3, KeysHelper.signedKEMPreKey(3, pniIdentityKeyPair),
(byte) 4, KeysHelper.signedKEMPreKey(4, pniIdentityKeyPair));
final Map<Byte, Integer> registrationIds = Map.of(Device.PRIMARY_ID, 17, deviceId2, 19);
final IncomingMessage msg = mock(IncomingMessage.class);
when(msg.destinationDeviceId()).thenReturn(deviceId2);
when(msg.content()).thenReturn(Base64.getEncoder().encodeToString(new byte[]{1}));
changeNumberManager.changeNumber(account, changedE164, pniIdentityKey, prekeys, pqPrekeys, List.of(msg), registrationIds);
verify(accountsManager).changeNumber(account, changedE164, pniIdentityKey, prekeys, pqPrekeys, registrationIds);
final ArgumentCaptor<MessageProtos.Envelope> envelopeCaptor = ArgumentCaptor.forClass(MessageProtos.Envelope.class);
verify(messageSender).sendMessage(any(), eq(d2), envelopeCaptor.capture(), eq(false));
final MessageProtos.Envelope envelope = envelopeCaptor.getValue();
assertEquals(aci, UUID.fromString(envelope.getDestinationUuid()));
assertEquals(aci, UUID.fromString(envelope.getSourceUuid()));
assertEquals(Device.PRIMARY_ID, envelope.getSourceDevice());
assertEquals(updatedPhoneNumberIdentifiersByAccount.get(account), UUID.fromString(envelope.getUpdatedPni()));
}
|
@Override
public String getTargetName(final String groupName, final List<String> availableTargetNames) {
double[] weight = weightMap.containsKey(groupName) && weightMap.get(groupName).length == availableTargetNames.size() ? weightMap.get(groupName) : initWeight(availableTargetNames);
weightMap.put(groupName, weight);
return getAvailableTargetName(availableTargetNames, weight);
}
|
@Test
void assertGetSingleAvailableTarget() {
LoadBalanceAlgorithm loadBalanceAlgorithm = TypedSPILoader.getService(LoadBalanceAlgorithm.class, "WEIGHT", PropertiesBuilder.build(new Property("test_read_ds_1", "5")));
assertThat(loadBalanceAlgorithm.getTargetName("ds", Collections.singletonList("test_read_ds_1")), is("test_read_ds_1"));
}
|
public static List<ComponentDto> sortComponents(List<ComponentDto> components, ComponentTreeRequest wsRequest, List<MetricDto> metrics,
Table<String, MetricDto, ComponentTreeData.Measure> measuresByComponentUuidAndMetric) {
List<String> sortParameters = wsRequest.getSort();
if (sortParameters == null || sortParameters.isEmpty()) {
return components;
}
boolean isAscending = wsRequest.getAsc();
Map<String, Ordering<ComponentDto>> orderingsBySortField = ImmutableMap.<String, Ordering<ComponentDto>>builder()
.put(NAME_SORT, componentNameOrdering(isAscending))
.put(QUALIFIER_SORT, componentQualifierOrdering(isAscending))
.put(PATH_SORT, componentPathOrdering(isAscending))
.put(METRIC_SORT, metricValueOrdering(wsRequest, metrics, measuresByComponentUuidAndMetric))
.put(METRIC_PERIOD_SORT, metricPeriodOrdering(wsRequest, metrics, measuresByComponentUuidAndMetric))
.build();
String firstSortParameter = sortParameters.get(0);
Ordering<ComponentDto> primaryOrdering = orderingsBySortField.get(firstSortParameter);
if (sortParameters.size() > 1) {
for (int i = 1; i < sortParameters.size(); i++) {
String secondarySortParameter = sortParameters.get(i);
Ordering<ComponentDto> secondaryOrdering = orderingsBySortField.get(secondarySortParameter);
primaryOrdering = primaryOrdering.compound(secondaryOrdering);
}
}
primaryOrdering = primaryOrdering.compound(componentNameOrdering(true));
return primaryOrdering.immutableSortedCopy(components);
}
|
@Test
void sort_by_numerical_metric_period_1_key_ascending() {
components.add(newComponentWithoutSnapshotId("name-without-measure", "qualifier-without-measure", "path-without-measure"));
ComponentTreeRequest wsRequest = newRequest(singletonList(METRIC_PERIOD_SORT), true, NEW_METRIC_KEY).setMetricPeriodSort(1);
List<ComponentDto> result = sortComponents(wsRequest);
assertThat(result).extracting("path")
.containsExactly("path-1", "path-2", "path-3", "path-4", "path-5", "path-6", "path-7", "path-8", "path-9", "path-without-measure");
}
|
@Override
public SchemaPath getSchemaPath(TopicPath topicPath) throws IOException {
GetTopicRequest request = GetTopicRequest.newBuilder().setTopic(topicPath.getPath()).build();
Topic topic = publisherStub().getTopic(request);
SchemaSettings schemaSettings = topic.getSchemaSettings();
if (schemaSettings.getSchema().isEmpty()) {
return null;
}
String schemaPath = schemaSettings.getSchema();
if (schemaPath.equals(SchemaPath.DELETED_SCHEMA_PATH)) {
return null;
}
return PubsubClient.schemaPathFromPath(schemaPath);
}
|
@Test
public void getSchemaPath() throws IOException {
initializeClient(null, null);
TopicPath topicDoesNotExist =
PubsubClient.topicPathFromPath("projects/testProject/topics/idontexist");
TopicPath topicExistsDeletedSchema =
PubsubClient.topicPathFromPath("projects/testProject/topics/deletedSchema");
TopicPath topicExistsNoSchema =
PubsubClient.topicPathFromPath("projects/testProject/topics/noSchema");
TopicPath topicExistsSchema =
PubsubClient.topicPathFromPath("projects/testProject/topics/topicWithSchema");
PublisherImplBase publisherImplBase =
new PublisherImplBase() {
@Override
public void getTopic(GetTopicRequest request, StreamObserver<Topic> responseObserver) {
String topicPath = request.getTopic();
if (topicPath.equals(topicDoesNotExist.getPath())) {
responseObserver.onError(
new IOException(String.format("%s does not exist", topicPath)));
}
if (topicPath.equals(topicExistsDeletedSchema.getPath())) {
responseObserver.onNext(
Topic.newBuilder()
.setName(topicPath)
.setSchemaSettings(
SchemaSettings.newBuilder()
.setSchema(SchemaPath.DELETED_SCHEMA_PATH)
.build())
.build());
responseObserver.onCompleted();
}
if (topicPath.equals(topicExistsNoSchema.getPath())) {
responseObserver.onNext(Topic.newBuilder().setName(topicPath).build());
responseObserver.onCompleted();
}
if (topicPath.equals(topicExistsSchema.getPath())) {
responseObserver.onNext(
Topic.newBuilder()
.setName(topicPath)
.setSchemaSettings(
SchemaSettings.newBuilder().setSchema(SCHEMA.getPath()).build())
.build());
responseObserver.onCompleted();
}
}
};
Server server =
InProcessServerBuilder.forName(channelName).addService(publisherImplBase).build().start();
try {
assertThrows(
"topic does not exist",
StatusRuntimeException.class,
() -> client.getSchemaPath(topicDoesNotExist));
assertNull(
"topic with deleted Schema should return null SchemaPath",
client.getSchemaPath(topicExistsDeletedSchema));
assertNull(
"topic without Schema should return null SchemaPath",
client.getSchemaPath(topicExistsNoSchema));
assertEquals(SCHEMA.getPath(), client.getSchemaPath(topicExistsSchema).getPath());
} finally {
server.shutdownNow();
}
}
|
@Override
public void cancel(InterpreterContext context) throws InterpreterException {
String shinyApp = context.getStringLocalProperty("app", DEFAULT_APP_NAME);
IRInterpreter irInterpreter = getIRInterpreter(shinyApp);
irInterpreter.cancel(context);
}
|
@Test
void testShinyApp() throws
IOException, InterpreterException, InterruptedException, UnirestException {
/****************** Launch Shiny app with default app name *****************************/
InterpreterContext context = getInterpreterContext();
context.getLocalProperties().put("type", "ui");
InterpreterResult result =
interpreter.interpret(IOUtils.toString(getClass().getResource("/ui.R"), StandardCharsets.UTF_8), context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
context = getInterpreterContext();
context.getLocalProperties().put("type", "server");
result = interpreter.interpret(IOUtils.toString(getClass().getResource("/server.R"), StandardCharsets.UTF_8), context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
final InterpreterContext context2 = getInterpreterContext();
context2.getLocalProperties().put("type", "run");
Thread thread = new Thread(() -> {
try {
interpreter.interpret("", context2);
} catch (Exception e) {
e.printStackTrace();
}
});
thread.start();
// wait for the shiny app start
Thread.sleep(5 * 1000);
// extract shiny url
List<InterpreterResultMessage> resultMessages = context2.out.toInterpreterResultMessage();
assertEquals(1, resultMessages.size(), resultMessages.toString());
assertEquals(InterpreterResult.Type.HTML, resultMessages.get(0).getType());
String resultMessageData = resultMessages.get(0).getData();
assertTrue(resultMessageData.contains("<iframe"), resultMessageData);
Pattern urlPattern = Pattern.compile(".*src=\"(http\\S*)\".*", Pattern.DOTALL);
Matcher matcher = urlPattern.matcher(resultMessageData);
if (!matcher.matches()) {
fail("Unable to extract url: " + resultMessageData);
}
String shinyURL = matcher.group(1);
// verify shiny app via calling its rest api
HttpResponse<String> response = Unirest.get(shinyURL).asString();
assertEquals(200, response.getStatus());
assertTrue(response.getBody().contains("Shiny Text"), response.getBody());
/************************ Launch another shiny app (app2) *****************************/
context = getInterpreterContext();
context.getLocalProperties().put("type", "ui");
context.getLocalProperties().put("app", "app2");
result =
interpreter.interpret(IOUtils.toString(getClass().getResource("/ui.R"), StandardCharsets.UTF_8), context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
context = getInterpreterContext();
context.getLocalProperties().put("type", "server");
context.getLocalProperties().put("app", "app2");
result = interpreter.interpret(IOUtils.toString(getClass().getResource("/server.R"), StandardCharsets.UTF_8), context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
final InterpreterContext context3 = getInterpreterContext();
context3.getLocalProperties().put("type", "run");
context3.getLocalProperties().put("app", "app2");
thread = new Thread(() -> {
try {
interpreter.interpret("", context3);
} catch (Exception e) {
e.printStackTrace();
}
});
thread.start();
// wait for the shiny app start
Thread.sleep(5 * 1000);
// extract shiny url
resultMessages = context3.out.toInterpreterResultMessage();
assertEquals(1, resultMessages.size());
assertEquals(InterpreterResult.Type.HTML, resultMessages.get(0).getType());
resultMessageData = resultMessages.get(0).getData();
assertTrue(resultMessageData.contains("<iframe"), resultMessageData);
matcher = urlPattern.matcher(resultMessageData);
if (!matcher.matches()) {
fail("Unable to extract url: " + resultMessageData);
}
String shinyURL2 = matcher.group(1);
// verify shiny app via calling its rest api
response = Unirest.get(shinyURL2).asString();
assertEquals(200, response.getStatus());
assertTrue(response.getBody().contains("Shiny Text"), response.getBody());
// cancel paragraph to stop the first shiny app
interpreter.cancel(getInterpreterContext());
// wait for shiny app to be stopped
Thread.sleep(1000);
try {
Unirest.get(shinyURL).asString();
fail("Should fail to connect to shiny app");
} catch (Exception e) {
assertTrue(e.getMessage().contains("Connection refused"), e.getMessage());
}
// the second shiny app still works
response = Unirest.get(shinyURL2).asString();
assertEquals(200, response.getStatus());
assertTrue(response.getBody().contains("Shiny Text"), response.getBody());
}
|
public static String formatSql(final AstNode root) {
final StringBuilder builder = new StringBuilder();
new Formatter(builder).process(root, 0);
return StringUtils.stripEnd(builder.toString(), "\n");
}
|
@Test
public void shouldFormatCreateStreamStatementWithExplicitKey() {
// Given:
final CreateStream createStream = new CreateStream(
TEST,
ELEMENTS_WITH_KEY,
false,
false,
SOME_WITH_PROPS,
false);
// When:
final String sql = SqlFormatter.formatSql(createStream);
// Then:
assertThat(sql, is("CREATE STREAM TEST (`k3` STRING KEY, `Foo` STRING) "
+ "WITH (KAFKA_TOPIC='topic_test', VALUE_FORMAT='JSON');"));
}
|
public CharSequence format(Monetary monetary) {
// determine maximum number of decimals that can be visible in the formatted string
// (if all decimal groups were to be used)
int max = minDecimals;
if (decimalGroups != null)
for (int group : decimalGroups)
max += group;
final int maxVisibleDecimals = max;
int smallestUnitExponent = monetary.smallestUnitExponent();
checkState(maxVisibleDecimals <= smallestUnitExponent, () ->
"maxVisibleDecimals cannot exceed " + smallestUnitExponent + ": " + maxVisibleDecimals);
// convert to decimal
long satoshis = Math.abs(monetary.getValue());
int decimalShift = smallestUnitExponent - shift;
DecimalNumber decimal = satoshisToDecimal(satoshis, roundingMode, decimalShift, maxVisibleDecimals);
long numbers = decimal.numbers;
long decimals = decimal.decimals;
// formatting
String decimalsStr = decimalShift > 0 ? String.format(Locale.US,
"%0" + Integer.toString(decimalShift) + "d", decimals) : "";
StringBuilder str = new StringBuilder(decimalsStr);
while (str.length() > minDecimals && str.charAt(str.length() - 1) == '0')
str.setLength(str.length() - 1); // trim trailing zero
int i = minDecimals;
if (decimalGroups != null) {
for (int group : decimalGroups) {
if (str.length() > i && str.length() < i + group) {
while (str.length() < i + group)
str.append('0');
break;
}
i += group;
}
}
if (str.length() > 0)
str.insert(0, decimalMark);
str.insert(0, numbers);
if (monetary.getValue() < 0)
str.insert(0, negativeSign);
else if (positiveSign != 0)
str.insert(0, positiveSign);
if (codes != null) {
if (codePrefixed) {
str.insert(0, codeSeparator);
str.insert(0, code());
} else {
str.append(codeSeparator);
str.append(code());
}
}
// Convert to non-arabic digits.
if (zeroDigit != '0') {
int offset = zeroDigit - '0';
for (int d = 0; d < str.length(); d++) {
char c = str.charAt(d);
if (Character.isDigit(c))
str.setCharAt(d, (char) (c + offset));
}
}
return str;
}
|
@Test
public void btcRounding() {
assertEquals("0", format(ZERO, 0, 0));
assertEquals("0.00", format(ZERO, 0, 2));
assertEquals("1", format(COIN, 0, 0));
assertEquals("1.0", format(COIN, 0, 1));
assertEquals("1.00", format(COIN, 0, 2, 2));
assertEquals("1.00", format(COIN, 0, 2, 2, 2));
assertEquals("1.00", format(COIN, 0, 2, 2, 2, 2));
assertEquals("1.000", format(COIN, 0, 3));
assertEquals("1.0000", format(COIN, 0, 4));
final Coin justNot = COIN.subtract(SATOSHI);
assertEquals("1", format(justNot, 0, 0));
assertEquals("1.0", format(justNot, 0, 1));
assertEquals("1.00", format(justNot, 0, 2, 2));
assertEquals("1.00", format(justNot, 0, 2, 2, 2));
assertEquals("0.99999999", format(justNot, 0, 2, 2, 2, 2));
assertEquals("1.000", format(justNot, 0, 3));
assertEquals("1.0000", format(justNot, 0, 4));
final Coin slightlyMore = COIN.add(SATOSHI);
assertEquals("1", format(slightlyMore, 0, 0));
assertEquals("1.0", format(slightlyMore, 0, 1));
assertEquals("1.00", format(slightlyMore, 0, 2, 2));
assertEquals("1.00", format(slightlyMore, 0, 2, 2, 2));
assertEquals("1.00000001", format(slightlyMore, 0, 2, 2, 2, 2));
assertEquals("1.000", format(slightlyMore, 0, 3));
assertEquals("1.0000", format(slightlyMore, 0, 4));
final Coin pivot = COIN.add(SATOSHI.multiply(5));
assertEquals("1.00000005", format(pivot, 0, 8));
assertEquals("1.00000005", format(pivot, 0, 7, 1));
assertEquals("1.0000001", format(pivot, 0, 7));
final Coin value = Coin.valueOf(1122334455667788l);
assertEquals("11223345", format(value, 0, 0));
assertEquals("11223344.6", format(value, 0, 1));
assertEquals("11223344.5567", format(value, 0, 2, 2));
assertEquals("11223344.556678", format(value, 0, 2, 2, 2));
assertEquals("11223344.55667788", format(value, 0, 2, 2, 2, 2));
assertEquals("11223344.557", format(value, 0, 3));
assertEquals("11223344.5567", format(value, 0, 4));
}
|
public String toBaseMessageIdString(Object messageId) {
if (messageId == null) {
return null;
} else if (messageId instanceof String) {
String stringId = (String) messageId;
// If the given string has a type encoding prefix,
// we need to escape it as an encoded string (even if
// the existing encoding prefix was also for string)
if (hasTypeEncodingPrefix(stringId)) {
return AMQP_STRING_PREFIX + stringId;
} else {
return stringId;
}
} else if (messageId instanceof UUID) {
return AMQP_UUID_PREFIX + messageId.toString();
} else if (messageId instanceof UnsignedLong) {
return AMQP_ULONG_PREFIX + messageId.toString();
} else if (messageId instanceof Binary) {
ByteBuffer dup = ((Binary) messageId).asByteBuffer();
byte[] bytes = new byte[dup.remaining()];
dup.get(bytes);
String hex = convertBinaryToHexString(bytes);
return AMQP_BINARY_PREFIX + hex;
} else {
throw new IllegalArgumentException("Unsupported type provided: " + messageId.getClass());
}
}
|
@Test
public void testToBaseMessageIdStringWithNull() {
String nullString = null;
assertNull("null string should have been returned", messageIdHelper.toBaseMessageIdString(nullString));
}
|
public InetSocketAddress getBoundIpcAddress() {
return rpcServer.getAddress();
}
|
@Test(timeout=100000)
public void testJournal() throws Exception {
MetricsRecordBuilder metrics = MetricsAsserts.getMetrics(
journal.getMetrics().getName());
MetricsAsserts.assertCounter("BatchesWritten", 0L, metrics);
MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 0L, metrics);
MetricsAsserts.assertGauge("CurrentLagTxns", 0L, metrics);
MetricsAsserts.assertGauge("LastJournalTimestamp", 0L, metrics);
long beginTimestamp = System.currentTimeMillis();
IPCLoggerChannel ch = new IPCLoggerChannel(
conf, FAKE_NSINFO, journalId, jn.getBoundIpcAddress());
ch.newEpoch(1).get();
ch.setEpoch(1);
ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
ch.sendEdits(1L, 1, 1, "hello".getBytes(StandardCharsets.UTF_8)).get();
metrics = MetricsAsserts.getMetrics(
journal.getMetrics().getName());
MetricsAsserts.assertCounter("BatchesWritten", 1L, metrics);
MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 0L, metrics);
MetricsAsserts.assertGauge("CurrentLagTxns", 0L, metrics);
long lastJournalTimestamp = MetricsAsserts.getLongGauge(
"LastJournalTimestamp", metrics);
assertTrue(lastJournalTimestamp > beginTimestamp);
beginTimestamp = lastJournalTimestamp;
ch.setCommittedTxId(100L);
ch.sendEdits(1L, 2, 1, "goodbye".getBytes(StandardCharsets.UTF_8)).get();
metrics = MetricsAsserts.getMetrics(
journal.getMetrics().getName());
MetricsAsserts.assertCounter("BatchesWritten", 2L, metrics);
MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 1L, metrics);
MetricsAsserts.assertGauge("CurrentLagTxns", 98L, metrics);
lastJournalTimestamp = MetricsAsserts.getLongGauge(
"LastJournalTimestamp", metrics);
assertTrue(lastJournalTimestamp > beginTimestamp);
}
|
@Override
public SchemaKStream<?> buildStream(final PlanBuildContext buildContext) {
final Stacker contextStacker = buildContext.buildNodeContext(getId().toString());
return schemaKStreamFactory.create(
buildContext,
dataSource,
contextStacker.push(SOURCE_OP_NAME)
);
}
|
@Test
public void shouldBuildSourceStreamWithCorrectTimestampIndexForQualifiedFieldName() {
// Given:
givenNodeWithMockSource();
// When:
node.buildStream(buildContext);
// Then:
verify(schemaKStreamFactory).create(any(), any(), any());
}
|
@Override
public ExecutorService newThreadPool(ThreadPoolProfile profile, ThreadFactory threadFactory) {
ExecutorService executorService = threadPoolFactory.newThreadPool(profile, threadFactory);
return ExecutorServiceMetrics.monitor(meterRegistry, executorService, name(profile.getId()));
}
|
@Test
public void testNewThreadPool() {
final ExecutorService executorService = instrumentedThreadPoolFactory.newThreadPool(profile, threadFactory);
assertThat(executorService, is(notNullValue()));
assertThat(executorService, is(instanceOf(TimedExecutorService.class)));
Tags tags = Tags.of("name", METRICS_NAME + "1");
inOrder.verify(registry, times(1)).timer("executor", tags);
}
|
@Override
protected Optional<Change> fix(ExpressionTree tree, VisitorState state, NameSuggester suggester) {
return Change.of(definiteFix(tree, state));
}
|
@Test
public void fix() {
BugCheckerRefactoringTestHelper.newInstance(StreamResourceLeak.class, getClass())
.addInputLines(
"in/Test.java",
"import java.io.IOException;",
"import java.nio.file.Files;",
"import java.nio.file.Path;",
"import java.util.stream.Collectors;",
"class Test {",
" String f(Path p) throws IOException {",
" return Files.lines(p).collect(Collectors.joining(\", \"));",
" }",
"}")
.addOutputLines(
"out/Test.java",
"import java.io.IOException;",
"import java.nio.file.Files;",
"import java.nio.file.Path;",
"import java.util.stream.Collectors;",
"import java.util.stream.Stream;",
"class Test {",
" String f(Path p) throws IOException {",
" try (Stream<String> stream = Files.lines(p)) {",
" return stream.collect(Collectors.joining(\", \"));",
" }",
" }",
"}")
.doTest();
}
|
public String anonymize(final ParseTree tree) {
return build(tree);
}
|
@Test
public void shouldAnonymizeCreateStreamAsQueryCorrectly() {
final String output = anon.anonymize(
"CREATE STREAM my_stream AS SELECT user_id, browser_cookie, ip_address\n"
+ "FROM another_stream\n"
+ "WHERE user_id = 4214\n"
+ "AND browser_cookie = 'aefde34ec'\n"
+ "AND ip_address = '10.10.0.2';");
Approvals.verify(output);
}
|
T getFunction(final List<SqlArgument> arguments) {
// first try to get the candidates without any implicit casting
Optional<T> candidate = findMatchingCandidate(arguments, false);
if (candidate.isPresent()) {
return candidate.get();
} else if (!supportsImplicitCasts) {
throw createNoMatchingFunctionException(arguments);
}
// if none were found (candidate isn't present) try again with implicit casting
candidate = findMatchingCandidate(arguments, true);
if (candidate.isPresent()) {
return candidate.get();
}
throw createNoMatchingFunctionException(arguments);
}
|
@Test
public void shouldFindOneArg() {
// Given:
givenFunctions(
function(EXPECTED, -1, STRING)
);
// When:
final KsqlScalarFunction fun = udfIndex.getFunction(ImmutableList.of(SqlArgument.of(SqlTypes.STRING)));
// Then:
assertThat(fun.name(), equalTo(EXPECTED));
}
|
@Override
public void openWorkspace(Workspace workspace) {
synchronized (this) {
closeCurrentWorkspace();
getCurrentProject().setCurrentWorkspace(workspace);
//Event
fireWorkspaceEvent(EventType.SELECT, workspace);
}
}
|
@Test
public void testOpenWorkspace() {
ProjectControllerImpl pc = new ProjectControllerImpl();
pc.addWorkspaceListener(workspaceListener);
Project project = pc.newProject();
Workspace originalWorkspace = pc.getCurrentWorkspace();
Workspace workspace = pc.newWorkspace(project);
pc.openWorkspace(workspace);
Assert.assertSame(workspace, pc.getCurrentWorkspace());
Assert.assertTrue(originalWorkspace.isClosed());
Assert.assertTrue(workspace.isOpen());
Mockito.verify(workspaceListener).unselect(originalWorkspace);
Mockito.verify(workspaceListener).select(workspace);
}
|
public String getXML() {
try {
StringBuilder xml = new StringBuilder( 100 );
xml.append( XMLHandler.getXMLHeader() ); // UFT-8 XML header
xml.append( XMLHandler.openTag( XML_TAG ) ).append( Const.CR );
if ( id != null ) {
xml.append( " " ).append( XMLHandler.addTagValue( "ID", id ) );
}
xml.append( " " ).append( XMLHandler.addTagValue( "DragType", getTypeCode() ) );
xml.append( " " ).append(
XMLHandler
.addTagValue( "Data", new String( Base64.encodeBase64( data.getBytes( Const.XML_ENCODING ) ) ) ) );
xml.append( XMLHandler.closeTag( XML_TAG ) ).append( Const.CR );
return xml.toString();
} catch ( UnsupportedEncodingException e ) {
throw new RuntimeException( "Unable to encode String in encoding [" + Const.XML_ENCODING + "]", e );
}
}
|
@Test
public void getXMLWithId() {
DragAndDropContainer dnd = new DragAndDropContainer( DragAndDropContainer.TYPE_BASE_STEP_TYPE, "Step Name", "StepID" );
String xml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + Const.CR
+ "<DragAndDrop>" + Const.CR
+ " <ID>StepID</ID>" + Const.CR
+ " <DragType>BaseStep</DragType>" + Const.CR
+ " <Data>U3RlcCBOYW1l</Data>" + Const.CR
+ "</DragAndDrop>" + Const.CR;
assertEquals( xml, dnd.getXML() );
}
|
public static URI getCanonicalUri(URI uri, int defaultPort) {
// skip if there is no authority, ie. "file" scheme or relative uri
String host = uri.getHost();
if (host == null) {
return uri;
}
String fqHost = canonicalizeHost(host);
int port = uri.getPort();
// short out if already canonical with a port
if (host.equals(fqHost) && port != -1) {
return uri;
}
// reconstruct the uri with the canonical host and port
try {
uri = new URI(uri.getScheme(), uri.getUserInfo(),
fqHost, (port == -1) ? defaultPort : port,
uri.getPath(), uri.getQuery(), uri.getFragment());
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
return uri;
}
|
@Test
public void testCanonicalUriWithNoPortNoDefaultPort() {
URI uri = NetUtils.getCanonicalUri(URI.create("scheme://host/path"), -1);
assertEquals("scheme://host.a.b/path", uri.toString());
}
|
public Optional<Map<String, EncryptionInformation>> getReadEncryptionInformation(
ConnectorSession session,
Table table,
Optional<Set<HiveColumnHandle>> requestedColumns,
Map<String, Partition> partitions)
{
for (EncryptionInformationSource source : sources) {
Optional<Map<String, EncryptionInformation>> result = source.getReadEncryptionInformation(session, table, requestedColumns, partitions);
if (result != null && result.isPresent()) {
return result.map(ImmutableMap::copyOf);
}
}
return Optional.empty();
}
|
@Test
public void testReturnsFirstNonEmptyObject()
{
EncryptionInformation encryptionInformation1 = TestEncryptionInformationSource.createEncryptionInformation("test1");
EncryptionInformation encryptionInformation2 = TestEncryptionInformationSource.createEncryptionInformation("test2");
HiveEncryptionInformationProvider provider = new HiveEncryptionInformationProvider(ImmutableList.of(
new TestEncryptionInformationSource(Optional.empty()),
new TestEncryptionInformationSource(Optional.empty()),
new TestEncryptionInformationSource(Optional.of(encryptionInformation1)),
new TestEncryptionInformationSource(Optional.of(encryptionInformation2))));
assertEquals(provider.getReadEncryptionInformation(SESSION, TEST_TABLE, Optional.empty()).get(), encryptionInformation1);
}
|
@PublicAPI(usage = ACCESS)
public Optional<Result> match(String aPackage) {
Matcher matcher = packagePattern.matcher(aPackage);
return matcher.matches() ? Optional.of(new Result(matcher)) : Optional.empty();
}
|
@Test
@DataProvider(value = {
"some.arbitrary.pkg , some.arbitrary.pkg , true",
"some.arbitrary.pkg , some.thing.different , false",
"some..pkg , some.arbitrary.pkg , true",
"some..middle..pkg , some.arbitrary.middle.more.pkg , true",
"*..pkg , some.arbitrary.pkg , true",
"some..* , some.arbitrary.pkg , true",
"*..pkg , some.arbitrary.pkg.toomuch , false",
"toomuch.some..* , some.arbitrary.pkg , false",
"*..wrong , some.arbitrary.pkg , false",
"some..* , wrong.arbitrary.pkg , false",
"..some , some , true",
"some.. , some , true",
"*..some , some , false",
"some..* , some , false",
"..some , asome , false",
"some.. , somea , false",
"*.*.* , wrong.arbitrary.pkg , true",
"*.*.* , wrong.arbitrary.pkg.toomuch , false",
"some.arbi*.pk*.. , some.arbitrary.pkg.whatever , true",
"some.arbi*.. , some.brbitrary.pkg , false",
"some.*rary.*kg.. , some.arbitrary.pkg.whatever , true",
"some.*rary.. , some.arbitrarz.pkg , false",
"some.pkg , someepkg , false",
"..pkg.. , some.random.pkg.maybe.anywhere , true",
"..p.. , s.r.p.m.a , true",
"*..pkg..* , some.random.pkg.maybe.anywhere , true",
"*..p..* , s.r.p.m.a , true",
"..[a|b|c].pk*.. , some.a.pkg.whatever , true",
"..[b|c].pk*.. , some.a.pkg.whatever , false",
"..[a|b*].pk*.. , some.bitrary.pkg.whatever , true",
"..[a|b*].pk*.. , some.a.pkg.whatever , true",
"..[a|b*].pk*.. , some.arbitrary.pkg.whatever , false",
"..[*c*|*d*].pk*.. , some.anydinside.pkg.whatever , true",
"..[*c*|*d*].pk*.. , some.nofit.pkg.whatever , false",
})
public void match(String matcher, String target, boolean matches) {
assertThat(PackageMatcher.of(matcher).matches(target))
.as("package matches")
.isEqualTo(matches);
}
|
@Transactional
public void create(String uuid, long attendeeId, ScheduleCreateRequest request) {
Meeting meeting = meetingRepository.findByUuid(uuid)
.orElseThrow(() -> new MomoException(MeetingErrorCode.INVALID_UUID));
validateMeetingUnLocked(meeting);
Attendee attendee = attendeeRepository.findByIdAndMeeting(attendeeId, meeting)
.orElseThrow(() -> new MomoException(AttendeeErrorCode.INVALID_ATTENDEE));
scheduleRepository.deleteAllByAttendee(attendee);
List<Schedule> schedules = createSchedules(request, meeting, attendee);
scheduleRepository.saveAll(schedules);
}
|
@DisplayName("스케줄 생성 요청의 UUID가 존재하지 않으면 예외를 발생시킨다.")
@Test
void throwsExceptionWhenInvalidUUID() {
Meeting other = MeetingFixture.DINNER.create();
String invalidUUID = other.getUuid();
long attendeeId = attendee.getId();
ScheduleCreateRequest request = new ScheduleCreateRequest(dateTimes);
assertThatThrownBy(() -> scheduleService.create(invalidUUID, attendeeId, request))
.isInstanceOf(MomoException.class)
.hasMessage(MeetingErrorCode.INVALID_UUID.message());
}
|
PartitionReplica checkAndGetPrimaryReplicaOwner(int partitionId, int replicaIndex) {
InternalPartitionImpl partition = partitionStateManager.getPartitionImpl(partitionId);
PartitionReplica owner = partition.getOwnerReplicaOrNull();
if (owner == null) {
logger.info("Sync replica target is null, no need to sync -> partitionId=" + partitionId + ", replicaIndex="
+ replicaIndex);
return null;
}
PartitionReplica localReplica = PartitionReplica.from(nodeEngine.getLocalMember());
if (owner.equals(localReplica)) {
if (logger.isFinestEnabled()) {
logger.finest("This node is now owner of partition, cannot sync replica -> partitionId=" + partitionId
+ ", replicaIndex=" + replicaIndex + ", partition-info="
+ partitionStateManager.getPartitionImpl(partitionId));
}
return null;
}
if (!partition.isOwnerOrBackup(localReplica)) {
if (logger.isFinestEnabled()) {
logger.finest("This node is not backup replica of partitionId=" + partitionId
+ ", replicaIndex=" + replicaIndex + " anymore.");
}
return null;
}
return owner;
}
|
@Test
public void testCheckSyncPartitionTarget_whenPartitionOwnerIsNull_thenReturnFalse() {
assertNull(manager.checkAndGetPrimaryReplicaOwner(PARTITION_ID, 0));
}
|
public InetAddress resolve(final String name, final String uriParamName, final boolean isReResolution)
{
final long beginNs = clock.nanoTime();
maxTimeTracker.update(beginNs);
InetAddress address = null;
try
{
address = delegateResolver.resolve(name, uriParamName, isReResolution);
return address;
}
finally
{
final long endNs = clock.nanoTime();
maxTimeTracker.measureAndUpdate(endNs);
logResolve(delegateResolver.getClass().getSimpleName(), endNs - beginNs, name, isReResolution, address);
}
}
|
@Test
void resolveShouldMeasureExecutionTimeEvenWhenExceptionIsThrown()
{
final NameResolver delegateResolver = mock(NameResolver.class);
final IllegalStateException exception = new IllegalStateException("error");
when(delegateResolver.resolve(anyString(), anyString(), anyBoolean()))
.thenThrow(exception);
final NanoClock clock = mock(NanoClock.class);
final long beginNs = SECONDS.toNanos(0);
final long endNs = SECONDS.toNanos(3);
when(clock.nanoTime()).thenReturn(beginNs, endNs);
final DutyCycleTracker maxTime = mock(DutyCycleTracker.class);
final TimeTrackingNameResolver resolver = new TimeTrackingNameResolver(delegateResolver, clock, maxTime);
final String name = "localhost";
final String endpoint = "endpoint";
final boolean isReLookup = true;
final IllegalStateException error =
assertThrowsExactly(IllegalStateException.class, () -> resolver.resolve(name, endpoint, isReLookup));
assertSame(exception, error);
final InOrder inOrder = inOrder(delegateResolver, clock, maxTime);
inOrder.verify(clock).nanoTime();
inOrder.verify(maxTime).update(beginNs);
inOrder.verify(delegateResolver).resolve(name, endpoint, isReLookup);
inOrder.verify(clock).nanoTime();
inOrder.verify(maxTime).measureAndUpdate(endNs);
inOrder.verifyNoMoreInteractions();
}
|
public void setBatchSize(int batchSize) {
this.batchSize = checkPositive(batchSize, "batchSize");
}
|
@Test
public void test_setBatchSize_whenNegative() {
ReactorBuilder builder = newBuilder();
assertThrows(IllegalArgumentException.class, () -> builder.setBatchSize(-1));
}
|
@Override
public KsMaterializedQueryResult<WindowedRow> get(
final GenericKey key,
final int partition,
final Range<Instant> windowStartBounds,
final Range<Instant> windowEndBounds,
final Optional<Position> position
) {
try {
final ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>> store = stateStore
.store(QueryableStoreTypes.timestampedWindowStore(), partition);
final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds);
final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds);
try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it
= cacheBypassFetcher.fetch(store, key, lower, upper)) {
final Builder<WindowedRow> builder = ImmutableList.builder();
while (it.hasNext()) {
final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next();
final Instant windowStart = Instant.ofEpochMilli(next.key);
if (!windowStartBounds.contains(windowStart)) {
continue;
}
final Instant windowEnd = windowStart.plus(windowSize);
if (!windowEndBounds.contains(windowEnd)) {
continue;
}
final TimeWindow window =
new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli());
final WindowedRow row = WindowedRow.of(
stateStore.schema(),
new Windowed<>(key, window),
next.value.value(),
next.value.timestamp()
);
builder.add(row);
}
return KsMaterializedQueryResult.rowIterator(builder.build().iterator());
}
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
}
|
@Test
public void shouldFetchWithEndLowerBoundIfHighest() {
// Given:
final Range<Instant> startBounds = Range.closed(
NOW,
NOW.plusSeconds(10)
);
final Range<Instant> endBounds = Range.closed(
NOW.plusSeconds(5).plus(WINDOW_SIZE),
NOW.plusSeconds(15).plus(WINDOW_SIZE)
);
// When:
table.get(A_KEY, PARTITION, startBounds, endBounds);
// Then:
verify(cacheBypassFetcher).fetch(eq(tableStore), any(),
eq(endBounds.lowerEndpoint().minus(WINDOW_SIZE)), any());
}
|
@Override
public void available() {
if (!allocationExcludeChecked) {
this.checkAllocationEnabledStatus();
}
}
|
@Test
public void testResetAllocation() throws IOException {
Settings settings = Settings.builder()
.put(OpensearchProcessImpl.CLUSTER_ROUTING_ALLOCATION_EXCLUDE_SETTING, nodeName)
.build();
when(clusterClient.getSettings(any(), any())).thenReturn(new ClusterGetSettingsResponse(null, settings, null));
opensearchProcess.available();
ArgumentCaptor<ClusterUpdateSettingsRequest> settingsRequest =
ArgumentCaptor.forClass(ClusterUpdateSettingsRequest.class);
verify(clusterClient).putSettings(settingsRequest.capture(), eq(RequestOptions.DEFAULT));
assertNull(settingsRequest.getValue()
.transientSettings()
.get(OpensearchProcessImpl.CLUSTER_ROUTING_ALLOCATION_EXCLUDE_SETTING)
);
assertTrue(opensearchProcess.allocationExcludeChecked);
}
|
public static Schema mergeWideningNullable(Schema schema1, Schema schema2) {
if (schema1.getFieldCount() != schema2.getFieldCount()) {
throw new IllegalArgumentException(
"Cannot merge schemas with different numbers of fields. "
+ "schema1: "
+ schema1
+ " schema2: "
+ schema2);
}
Schema.Builder builder = Schema.builder();
for (int i = 0; i < schema1.getFieldCount(); ++i) {
String name = schema1.getField(i).getName();
builder.addField(
name, widenNullableTypes(schema1.getField(i).getType(), schema2.getField(i).getType()));
}
return builder.build();
}
|
@Test
public void testWidenIterable() {
Schema schema1 = Schema.builder().addIterableField("field1", FieldType.INT32).build();
Schema schema2 =
Schema.builder().addIterableField("field1", FieldType.INT32.withNullable(true)).build();
Schema expected =
Schema.builder().addIterableField("field1", FieldType.INT32.withNullable(true)).build();
assertEquals(expected, SchemaUtils.mergeWideningNullable(schema1, schema2));
}
|
public static boolean isValidValue(Map<String, Object> serviceSuppliedConfig,
Map<String, Object> clientSuppliedServiceConfig,
String propertyName)
{
// prevent clients from violating SLAs as published by the service
if (propertyName.equals(PropertyKeys.HTTP_REQUEST_TIMEOUT))
{
String clientSuppliedTimeout = (String)clientSuppliedServiceConfig.get(propertyName);
String serviceSuppliedTimeout = (String)serviceSuppliedConfig.get(propertyName);
try
{
return Integer.parseInt(clientSuppliedTimeout) >= Integer.parseInt(serviceSuppliedTimeout);
}
catch (NumberFormatException e)
{
_log.error("Failed to convert HTTP Request Timeout to an int. clientSuppliedTimeout is " + clientSuppliedTimeout
+ ". serviceSuppliedTimeout is " + serviceSuppliedTimeout, e);
return false;
}
}
return true;
}
|
@Test
public void testMaxResponse()
{
Map<String, Object> serviceSuppliedProperties = new HashMap<>();
serviceSuppliedProperties.put(PropertyKeys.HTTP_MAX_RESPONSE_SIZE, "1000");
Map<String, Object> clientSuppliedProperties = new HashMap<>();
clientSuppliedProperties.put(PropertyKeys.HTTP_MAX_RESPONSE_SIZE, "10000");
Assert.assertTrue(ClientServiceConfigValidator.isValidValue(serviceSuppliedProperties,
clientSuppliedProperties,
PropertyKeys.HTTP_MAX_RESPONSE_SIZE));
}
|
public <T> CompletableFuture<T> withLockAsync(final List<String> e164s,
final Supplier<CompletableFuture<T>> taskSupplier,
final Executor executor) {
if (e164s.isEmpty()) {
throw new IllegalArgumentException("List of e164s to lock must not be empty");
}
final List<LockItem> lockItems = new ArrayList<>(e164s.size());
return CompletableFuture.runAsync(() -> {
for (final String e164 : e164s) {
try {
lockItems.add(lockClient.acquireLock(AcquireLockOptions.builder(e164)
.withAcquireReleasedLocksConsistently(true)
.build()));
} catch (final InterruptedException e) {
throw new CompletionException(e);
}
}
}, executor)
.thenCompose(ignored -> taskSupplier.get())
.whenCompleteAsync((ignored, throwable) -> lockItems.forEach(lockItem -> lockClient.releaseLock(ReleaseLockOptions.builder(lockItem)
.withBestEffort(true)
.build())), executor);
}
|
@Test
void withLockAsync() throws InterruptedException {
accountLockManager.withLockAsync(List.of(FIRST_NUMBER, SECOND_NUMBER),
() -> CompletableFuture.completedFuture(null), executor).join();
verify(lockClient, times(2)).acquireLock(any());
verify(lockClient, times(2)).releaseLock(any(ReleaseLockOptions.class));
}
|
@Override
public List<TransferItem> list(final Session<?> session, final Path remote,
final Local directory, final ListProgressListener listener) throws BackgroundException {
if(log.isDebugEnabled()) {
log.debug(String.format("List children for %s", directory));
}
if(directory.isSymbolicLink()) {
final Symlink symlink = session.getFeature(Symlink.class);
if(new UploadSymlinkResolver(symlink, roots).resolve(directory)) {
if(log.isDebugEnabled()) {
log.debug(String.format("Do not list children for symbolic link %s", directory));
}
// We can resolve the target of the symbolic link and will create a link on the remote system
// using the symlink feature of the session
return Collections.emptyList();
}
}
final List<TransferItem> children = new ArrayList<>();
for(Local local : directory.list().filter(comparator, filter)) {
children.add(new TransferItem(new Path(remote, local.getName(),
local.isDirectory() ? EnumSet.of(Path.Type.directory) : EnumSet.of(Path.Type.file)), local));
}
return children;
}
|
@Test
public void testListSorted() throws Exception {
final NullLocal local = new NullLocal("t") {
@Override
public AttributedList<Local> list() {
AttributedList<Local> l = new AttributedList<>();
l.add(new NullLocal(this.getAbsolute(), "c"));
l.add(new NullLocal(this.getAbsolute(), "c.html"));
return l;
}
};
final Path root = new Path("/t", EnumSet.of(Path.Type.file));
{
Transfer t = new UploadTransfer(new Host(new TestProtocol()), Collections.singletonList(new TransferItem(root, local)),
new UploadRegexFilter(), new UploadRegexPriorityComparator(".*\\.html"));
final List<TransferItem> list = t.list(new NullSession(new Host(new TestProtocol())), root, local, new DisabledListProgressListener());
assertEquals(new NullLocal(local.getAbsolute(), "c.html"), list.get(0).local);
assertEquals(new NullLocal(local.getAbsolute(), "c"), list.get(1).local);
}
{
Transfer t = new UploadTransfer(new Host(new TestProtocol()), root, local, new UploadRegexFilter());
final List<TransferItem> list = t.list(new NullSession(new Host(new TestProtocol())), root, local, new DisabledListProgressListener());
assertEquals(new NullLocal(local.getAbsolute(), "c.html"), list.get(1).local);
assertEquals(new NullLocal(local.getAbsolute(), "c"), list.get(0).local);
}
}
|
@Override
public int delete(String patternId) {
final GrokPattern grokPattern;
try {
grokPattern = load(patternId);
} catch (NotFoundException e) {
log.debug("Couldn't find grok pattern with ID <{}> for deletion", patternId, e);
return 0;
}
final ObjectId id = new ObjectId(patternId);
final String name = grokPattern.name();
final int deletedPatterns = dbCollection.removeById(id).getN();
clusterBus.post(GrokPatternsDeletedEvent.create(ImmutableSet.of(name)));
return deletedPatterns;
}
|
@Test
@MongoDBFixtures("MongoDbGrokPatternServiceTest.json")
public void deleteNonExistentGrokPattern() {
assertThat(collection.countDocuments()).isEqualTo(3);
final int deletedRecords = service.delete("56250da2d4000000deadbeef");
assertThat(deletedRecords).isEqualTo(0);
assertThat(collection.countDocuments()).isEqualTo(3);
verify(clusterEventBus, never()).post(any(GrokPatternsDeletedEvent.class));
}
|
@SuppressWarnings("unchecked")
public static <T extends SpecificRecord> TypeInformation<Row> convertToTypeInfo(
Class<T> avroClass) {
return convertToTypeInfo(avroClass, true);
}
|
@Test
void testAvroSchemaConversion() {
final String schema = User.getClassSchema().toString(true);
validateUserSchema(AvroSchemaConverter.convertToTypeInfo(schema));
}
|
@Override
public List<TimelineEntity> getApplicationAttemptEntities(
ApplicationId appId, String fields, Map<String, String> filters,
long limit, String fromId) throws IOException {
String path = PATH_JOINER.join("clusters", clusterId, "apps",
appId, "entities", YARN_APPLICATION_ATTEMPT);
if (fields == null || fields.isEmpty()) {
fields = "INFO";
}
MultivaluedMap<String, String> params = new MultivaluedMapImpl();
params.add("fields", fields);
if (limit > 0) {
params.add("limit", Long.toString(limit));
}
if (fromId != null && !fromId.isEmpty()) {
params.add("fromid", fromId);
}
mergeFilters(params, filters);
ClientResponse response = doGetUri(baseUri, path, params);
TimelineEntity[] entities = response.getEntity(TimelineEntity[].class);
return Arrays.asList(entities);
}
|
@Test
void getApplicationAttemptEntities() throws Exception {
ApplicationId applicationId =
ApplicationId.fromString("application_1234_0001");
List<TimelineEntity> entities =
client.getApplicationAttemptEntities(applicationId, null,
null, 0, null);
assertEquals(2, entities.size());
assertEquals("mockAppAttempt2", entities.get(1).getId());
}
|
public int getVersion() {
return _version;
}
|
@Test
public void withEmptyConf()
throws JsonProcessingException {
String confStr = "{}";
RangeIndexConfig config = JsonUtils.stringToObject(confStr, RangeIndexConfig.class);
assertFalse(config.isDisabled(), "Unexpected disabled");
assertEquals(config.getVersion(), RangeIndexConfig.DEFAULT.getVersion(), "Unexpected version");
}
|
@Override
public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object o) {
Object span = request.getAttribute(SpanCustomizer.class.getName());
if (span instanceof SpanCustomizer) {
setHttpRouteAttribute(request);
handlerParser.preHandle(request, o, (SpanCustomizer) span);
}
return true;
}
|
@Test void preHandle_nothingWhenNoSpanAttribute() {
interceptor.preHandle(request, response, controller);
verify(request).getAttribute("brave.SpanCustomizer");
verifyNoMoreInteractions(request, request, parser, span);
}
|
public static Throwable findOriginalThrowable(Throwable throwable)
{
//just to make sure we don't go to infinite loop. Most exception is less than 100 level deep.
int depth = 0;
Throwable original = throwable;
while (original.getCause() != null && depth < 100)
{
original = original.getCause();
depth++;
}
return original;
}
|
@Test
public void testFindOriginalThrowable()
{
ConnectException connectException = new ConnectException("Foo");
RemoteInvocationException e = new RemoteInvocationException("Failed to get connect to a server", connectException);
Throwable throwable = LoadBalancerUtil.findOriginalThrowable(e);
Assert.assertEquals(throwable, connectException);
//we only go as far as 100 level deep for finding exception
Exception npe = new NullPointerException();
Exception temp = npe;
for (int i = 0; i < 100; i++)
{
e = new RemoteInvocationException(temp);
temp = e;
}
throwable = LoadBalancerUtil.findOriginalThrowable(e);
Assert.assertEquals(throwable, npe);
//we add the 101th exception then we lost the reference to NullPointerException
e = new RemoteInvocationException(temp);
throwable = LoadBalancerUtil.findOriginalThrowable(e);
Assert.assertFalse(throwable instanceof NullPointerException);
}
|
public static SystemState deserialize(@NonNull ConfigMap configMap) {
Map<String, String> data = configMap.getData();
if (data == null) {
return new SystemState();
}
return JsonUtils.jsonToObject(data.getOrDefault(GROUP, emptyJsonObject()),
SystemState.class);
}
|
@Test
void deserialize() {
ConfigMap configMap = new ConfigMap();
SystemState systemState = SystemState.deserialize(configMap);
assertThat(systemState).isNotNull();
configMap.setData(Map.of(SystemState.GROUP, "{\"isSetup\":true}"));
systemState = SystemState.deserialize(configMap);
assertThat(systemState.getIsSetup()).isTrue();
}
|
public String decode(byte[] val) {
return codecs[0].decode(val, 0, val.length);
}
|
@Test
public void testDecodeChineseLongTextGB2312() {
assertEquals(CHINESE_LONG_TEXT_GB2312,
gb2312().decode(CHINESE_LONG_TEXT_GB2312_BYTES));
}
|
@Override
public void executor(final Collection<URIRegisterDTO> dataList) {
if (CollectionUtils.isEmpty(dataList)) {
return;
}
final Map<String, List<URIRegisterDTO>> groupByRpcType = dataList.stream()
.filter(data -> StringUtils.isNotBlank(data.getRpcType()))
.collect(Collectors.groupingBy(URIRegisterDTO::getRpcType));
for (Map.Entry<String, List<URIRegisterDTO>> entry : groupByRpcType.entrySet()) {
final String rpcType = entry.getKey();
Optional.ofNullable(shenyuClientRegisterService.get(rpcType))
.ifPresent(service -> {
final List<URIRegisterDTO> list = entry.getValue();
Map<String, List<URIRegisterDTO>> listMap = buildData(list);
listMap.forEach((selectorName, uriList) -> {
final List<URIRegisterDTO> register = new LinkedList<>();
final List<URIRegisterDTO> offline = new LinkedList<>();
for (URIRegisterDTO d : uriList) {
final EventType eventType = d.getEventType();
if (Objects.isNull(eventType) || EventType.REGISTER.equals(eventType)) {
// eventType is null, should be old versions
register.add(d);
} else if (EventType.OFFLINE.equals(eventType)) {
offline.add(d);
}
}
if (CollectionUtils.isNotEmpty(register)) {
service.registerURI(selectorName, register);
}
if (CollectionUtils.isNotEmpty(offline)) {
service.offline(selectorName, offline);
}
});
});
}
}
|
@Test
public void testExecutor() {
List<URIRegisterDTO> list = new ArrayList<>();
uriRegisterExecutorSubscriber.executor(list);
assertTrue(list.isEmpty());
list.add(URIRegisterDTO.builder().rpcType(RpcTypeEnum.HTTP.getName())
.appName("test").contextPath("/test").build());
ShenyuClientRegisterService service = mock(ShenyuClientRegisterService.class);
when(shenyuClientRegisterService.get(any())).thenReturn(service);
uriRegisterExecutorSubscriber.executor(list);
verify(service).registerURI(any(), any());
}
|
@Nullable
public static <T> T checkSerializable(@Nullable T object, @Nonnull String objectName) {
if (object == null) {
return null;
}
if (object instanceof DataSerializable) {
// hz-serialization is implemented, but we cannot actually check it - we don't have a
// SerializationService at hand.
return object;
}
if (!(object instanceof Serializable)) {
throw new IllegalArgumentException('"' + objectName + "\" must implement Serializable");
}
try (ObjectOutputStream os = new ObjectOutputStream(OutputStream.nullOutputStream())) {
os.writeObject(object);
} catch (NotSerializableException | InvalidClassException e) {
throw new IllegalArgumentException("\"" + objectName + "\" must be serializable", e);
} catch (IOException e) {
// never really thrown, as the underlying stream never throws it
throw new JetException(e);
}
return object;
}
|
@Test
public void whenNullToCheckSerializable_thenReturnNull() {
Object returned = Util.checkSerializable(null, "object");
assertThat(returned).isNull();
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.