focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public ByteBuf setLong(int index, long value) {
throw new ReadOnlyBufferException();
}
|
@Test
public void shouldRejectSetLong() {
assertThrows(UnsupportedOperationException.class, new Executable() {
@Override
public void execute() {
unmodifiableBuffer(EMPTY_BUFFER).setLong(0, 0);
}
});
}
|
@Benchmark
@Threads(512)
public void testTinyBundleHarnessStateSampler(HarnessStateTracker state, Blackhole bh)
throws Exception {
state.tracker.start("processBundleId");
for (int i = 0; i < 3; ) {
state.state1.activate();
state.state2.activate();
state.state3.activate();
// trival code that is being sampled for this state
i += 1;
bh.consume(i);
state.state3.deactivate();
state.state2.deactivate();
state.state1.deactivate();
}
state.tracker.reset();
}
|
@Test
public void testTinyBundleHarnessStateSampler() throws Exception {
HarnessStateSampler state = new HarnessStateSampler();
HarnessStateTracker threadState = new HarnessStateTracker();
threadState.setup(state);
new ExecutionStateSamplerBenchmark().testTinyBundleHarnessStateSampler(threadState, blackhole);
state.tearDown();
threadState.tearDown();
}
|
@WithSpan
@Override
public SearchResponse apply(SearchResponse searchResponse) {
final List<ResultMessageSummary> summaries = searchResponse.messages().stream()
.map(summary -> {
// Do not touch the message if the field does not exist.
if (!summary.message().containsKey(sourceField)) {
return summary;
}
final String level = String.valueOf(summary.message().get(sourceField));
final String severity = SYSLOG_MAPPING.get(level);
// If we cannot map the severity we do not touch the message.
if (severity == null) {
return summary;
}
final Message message = messageFactory.createMessage(ImmutableMap.copyOf(summary.message()));
message.addField(targetField, severity);
return summary.toBuilder().message(message.getFields()).build();
})
.collect(Collectors.toList());
return searchResponse.toBuilder().messages(summaries).build();
}
|
@Test
public void testDecorator() throws Exception {
final DecoratorImpl decorator = DecoratorImpl.create("id",
SyslogSeverityMapperDecorator.class.getCanonicalName(),
ImmutableMap.of("source_field", "level", "target_field", "severity"),
Optional.empty(),
1);
final SyslogSeverityMapperDecorator mapperDecorator = new SyslogSeverityMapperDecorator(decorator, messageFactory);
final IndexRangeSummary indexRangeSummary = IndexRangeSummary.create("graylog_0",
Tools.nowUTC().minusDays(1),
Tools.nowUTC(),
null,
100);
final List<ResultMessageSummary> messages = ImmutableList.of(
ResultMessageSummary.create(ImmutableMultimap.of(), ImmutableMap.of("_id", "h", "level", "80"), "graylog_0"),
ResultMessageSummary.create(ImmutableMultimap.of(), ImmutableMap.of("_id", "a", "level", "0"), "graylog_0"),
ResultMessageSummary.create(ImmutableMultimap.of(), ImmutableMap.of("_id", "b", "level", "1"), "graylog_0"),
ResultMessageSummary.create(ImmutableMultimap.of(), ImmutableMap.of("_id", "c", "level", "2"), "graylog_0"),
ResultMessageSummary.create(ImmutableMultimap.of(), ImmutableMap.of("_id", "d", "level", "3"), "graylog_0"),
ResultMessageSummary.create(ImmutableMultimap.of(), ImmutableMap.of("_id", "e", "level", "4"), "graylog_0"),
ResultMessageSummary.create(ImmutableMultimap.of(), ImmutableMap.of("_id", "f", "level", "5"), "graylog_0"),
ResultMessageSummary.create(ImmutableMultimap.of(), ImmutableMap.of("_id", "g", "level", "6"), "graylog_0"),
ResultMessageSummary.create(ImmutableMultimap.of(), ImmutableMap.of("_id", "h", "level", "7"), "graylog_0"),
ResultMessageSummary.create(ImmutableMultimap.of(), ImmutableMap.of("_id", "i", "foo", "1"), "graylog_0")
);
final SearchResponse searchResponse = SearchResponse.builder()
.query("foo")
.builtQuery("foo")
.usedIndices(ImmutableSet.of(indexRangeSummary))
.messages(messages)
.fields(ImmutableSet.of("level"))
.time(100L)
.totalResults(messages.size())
.from(Tools.nowUTC().minusHours(1))
.to(Tools.nowUTC())
.build();
final SearchResponse response = mapperDecorator.apply(searchResponse);
// Returns the value if the value cannot be mapped to a Syslog severity
Assertions.assertThat(response.messages().get(0).message().get("level")).isEqualTo("80");
Assertions.assertThat(response.messages().get(0).message().get("severity")).isNull();
// Check that the mapping works correctly
Assertions.assertThat(response.messages().get(1).message().get("level")).isEqualTo("0");
Assertions.assertThat(response.messages().get(1).message().get("severity")).isEqualTo("Emergency (0)");
Assertions.assertThat(response.messages().get(2).message().get("level")).isEqualTo("1");
Assertions.assertThat(response.messages().get(2).message().get("severity")).isEqualTo("Alert (1)");
Assertions.assertThat(response.messages().get(3).message().get("level")).isEqualTo("2");
Assertions.assertThat(response.messages().get(3).message().get("severity")).isEqualTo("Critical (2)");
Assertions.assertThat(response.messages().get(4).message().get("level")).isEqualTo("3");
Assertions.assertThat(response.messages().get(4).message().get("severity")).isEqualTo("Error (3)");
Assertions.assertThat(response.messages().get(5).message().get("level")).isEqualTo("4");
Assertions.assertThat(response.messages().get(5).message().get("severity")).isEqualTo("Warning (4)");
Assertions.assertThat(response.messages().get(6).message().get("level")).isEqualTo("5");
Assertions.assertThat(response.messages().get(6).message().get("severity")).isEqualTo("Notice (5)");
Assertions.assertThat(response.messages().get(7).message().get("level")).isEqualTo("6");
Assertions.assertThat(response.messages().get(7).message().get("severity")).isEqualTo("Informational (6)");
Assertions.assertThat(response.messages().get(8).message().get("level")).isEqualTo("7");
Assertions.assertThat(response.messages().get(8).message().get("severity")).isEqualTo("Debug (7)");
// If the message does not have a source field, we do not touch it
Assertions.assertThat(response.messages().get(9).message().get("level")).isNull();
Assertions.assertThat(response.messages().get(9).message().get("severity")).isNull();
Assertions.assertThat(response.messages().get(9).message().get("foo")).isEqualTo("1");
}
|
public static void load(String originalName, ClassLoader loader) {
String mangledPackagePrefix = calculateMangledPackagePrefix();
String name = mangledPackagePrefix + originalName;
List<Throwable> suppressed = new ArrayList<Throwable>();
try {
// first try to load from java.library.path
loadLibrary(loader, name, false);
return;
} catch (Throwable ex) {
suppressed.add(ex);
}
String libname = System.mapLibraryName(name);
String path = NATIVE_RESOURCE_HOME + libname;
InputStream in = null;
OutputStream out = null;
File tmpFile = null;
URL url = getResource(path, loader);
try {
if (url == null) {
if (PlatformDependent.isOsx()) {
String fileName = path.endsWith(".jnilib") ? NATIVE_RESOURCE_HOME + "lib" + name + ".dynlib" :
NATIVE_RESOURCE_HOME + "lib" + name + ".jnilib";
url = getResource(fileName, loader);
if (url == null) {
FileNotFoundException fnf = new FileNotFoundException(fileName);
ThrowableUtil.addSuppressedAndClear(fnf, suppressed);
throw fnf;
}
} else {
FileNotFoundException fnf = new FileNotFoundException(path);
ThrowableUtil.addSuppressedAndClear(fnf, suppressed);
throw fnf;
}
}
int index = libname.lastIndexOf('.');
String prefix = libname.substring(0, index);
String suffix = libname.substring(index);
tmpFile = PlatformDependent.createTempFile(prefix, suffix, WORKDIR);
in = url.openStream();
out = new FileOutputStream(tmpFile);
byte[] buffer = new byte[8192];
int length;
while ((length = in.read(buffer)) > 0) {
out.write(buffer, 0, length);
}
out.flush();
if (shouldShadedLibraryIdBePatched(mangledPackagePrefix)) {
// Let's try to patch the id and re-sign it. This is a best-effort and might fail if a
// SecurityManager is setup or the right executables are not installed :/
tryPatchShadedLibraryIdAndSign(tmpFile, originalName);
}
// Close the output stream before loading the unpacked library,
// because otherwise Windows will refuse to load it when it's in use by other process.
closeQuietly(out);
out = null;
loadLibrary(loader, tmpFile.getPath(), true);
} catch (UnsatisfiedLinkError e) {
try {
if (tmpFile != null && tmpFile.isFile() && tmpFile.canRead() &&
!NoexecVolumeDetector.canExecuteExecutable(tmpFile)) {
// Pass "io.netty.native.workdir" as an argument to allow shading tools to see
// the string. Since this is printed out to users to tell them what to do next,
// we want the value to be correct even when shading.
logger.info("{} exists but cannot be executed even when execute permissions set; " +
"check volume for \"noexec\" flag; use -D{}=[path] " +
"to set native working directory separately.",
tmpFile.getPath(), "io.netty.native.workdir");
}
} catch (Throwable t) {
suppressed.add(t);
logger.debug("Error checking if {} is on a file store mounted with noexec", tmpFile, t);
}
// Re-throw to fail the load
ThrowableUtil.addSuppressedAndClear(e, suppressed);
throw e;
} catch (Exception e) {
UnsatisfiedLinkError ule = new UnsatisfiedLinkError("could not load a native library: " + name);
ule.initCause(e);
ThrowableUtil.addSuppressedAndClear(ule, suppressed);
throw ule;
} finally {
closeQuietly(in);
closeQuietly(out);
// After we load the library it is safe to delete the file.
// We delete the file immediately to free up resources as soon as possible,
// and if this fails fallback to deleting on JVM exit.
if (tmpFile != null && (!DELETE_NATIVE_LIB_AFTER_LOADING || !tmpFile.delete())) {
tmpFile.deleteOnExit();
}
}
}
|
@Test
@EnabledOnOs(LINUX)
@EnabledIf("is_x86_64")
void testMultipleResourcesWithSameContentInTheClassLoader() throws MalformedURLException {
URL url1 = new File("src/test/data/NativeLibraryLoader/1").toURI().toURL();
URL url2 = new File("src/test/data/NativeLibraryLoader/2").toURI().toURL();
final URLClassLoader loader = new URLClassLoader(new URL[] {url1, url2});
final String resourceName = "test3";
NativeLibraryLoader.load(resourceName, loader);
assertTrue(true);
}
|
@SuppressWarnings("unchecked")
public static synchronized <T extends Cache> T createSerializingCache(String name, Class keyClass, Class valueClass) {
T cache = (T) caches.get(name);
if (cache != null) {
return cache;
}
final Cache<String, String> delegate = (Cache<String, String>) cacheFactoryStrategy.createCache(name);
final T sCache = (T) new SerializingCache(delegate, keyClass, valueClass);
log.info("Created serializing cache [" + cacheFactoryStrategy.getClass().getName() + "] for " + name);
return wrapCache(sCache, name);
}
|
@Test
public void testSerializingCacheCreation() throws Exception
{
// Setup test fixture.
// Execute system under test.
final Cache result = CacheFactory.createSerializingCache("unittest-serializingcache-creation", String.class, String.class);
// Verify results.
assertNotNull(result);
assertInstanceOf(SerializingCache.class, ((CacheWrapper) result).getWrappedCache());
}
|
@Override
public JSONObject getIdentities() {
return new JSONObject();
}
|
@Test
public void getIdentities() {
Assert.assertEquals(0, mSensorsAPI.getIdentities().length());
}
|
public void updateRules(Map<String, List<R>> rulesMap) {
originalRules = rulesMap;
Map<Pattern, List<R>> regexRules = new HashMap<>();
Map<String, List<R>> simpleRules = new HashMap<>();
for (Map.Entry<String, List<R>> entry : rulesMap.entrySet()) {
String resource = entry.getKey();
List<R> rules = entry.getValue();
List<R> rulesOfSimple = new ArrayList<>();
List<R> rulesOfRegex = new ArrayList<>();
for (R rule : rules) {
if (predicate.test(rule)) {
rulesOfRegex.add(rule);
} else {
rulesOfSimple.add(rule);
}
}
if (!rulesOfRegex.isEmpty()) {
regexRules.put(Pattern.compile(resource), rulesOfRegex);
}
if (!rulesOfSimple.isEmpty()) {
simpleRules.put(resource, rulesOfSimple);
}
}
// rebuild regex cache rules
setRules(regexRules, simpleRules);
}
|
@Test
public void testUpdateRules() throws Exception {
// Setup
final Map<String, List<FlowRule>> rulesMap = generateFlowRules(true);
// Run the test
ruleManager.updateRules(rulesMap);
// Verify the results
assertEquals(ruleManager.getRules().size(), 2);
Field regexRules = RuleManager.class.getDeclaredField("regexRules");
regexRules.setAccessible(true);
assertEquals(((Map)regexRules.get(ruleManager)).size(), 1);
Field simpleRules = RuleManager.class.getDeclaredField("simpleRules");
simpleRules.setAccessible(true);
assertEquals(((Map)simpleRules.get(ruleManager)).size(), 1);
}
|
public List<TerminalNode> addRule(RuleImpl rule, InternalRuleBase kBase, Collection<InternalWorkingMemory> workingMemories) throws InvalidPatternException {
// the list of terminal nodes
final List<TerminalNode> termNodes = new ArrayList<>();
// transform rule and gets the array of subrules
final GroupElement[] subrules = rule.getTransformedLhs( LogicTransformer.getInstance(), kBase.getGlobals() );
for (int i = 0; i < subrules.length; i++) {
// creates a clean build context for each subrule
final BuildContext context = new BuildContext( kBase, workingMemories );
context.setRule( rule );
context.setSubRuleIndex( i );
// if running in STREAM mode, calculate temporal distance for events
if (EventProcessingOption.STREAM.equals( kBase.getRuleBaseConfiguration().getEventProcessingMode() )) {
TemporalDependencyMatrix temporal = this.utils.calculateTemporalDistance( subrules[i] );
context.setTemporalDistance( temporal );
}
if (kBase.getRuleBaseConfiguration().isSequential() ) {
context.setTupleMemoryEnabled( false );
} else {
context.setTupleMemoryEnabled( true );
}
// adds subrule
context.setSubRuleIndex(i);
addSubRule( context, subrules[i], rule );
// adds the terminal node to the list of terminal nodes
termNodes.addAll(context.getTerminals());
}
return termNodes;
}
|
@Test
public void testAddRuleWithPatterns() {
final RuleImpl rule = new RuleImpl( "only patterns" );
final Pattern c1 = new Pattern( 0,
new ClassObjectType( String.class ) );
final Pattern c2 = new Pattern( 1,
new ClassObjectType( String.class ) );
final Pattern c3 = new Pattern( 2,
new ClassObjectType( String.class ) );
final GroupElement lhsroot = GroupElementFactory.newAndInstance();
lhsroot.addChild( c1 );
lhsroot.addChild( c2 );
lhsroot.addChild( c3 );
rule.setLhs( lhsroot );
final Consequence consequence = new Consequence<KnowledgeHelper>() {
public void evaluate(KnowledgeHelper knowledgeHelper,
ValueResolver valueResolver) throws Exception {
System.out.println( "Consequence!" );
}
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
}
public void writeExternal(ObjectOutput out) throws IOException {
}
public String getName() {
return "default";
}
};
rule.setConsequence( consequence );
final List terminals = this.builder.addRule( rule, this.rulebase, Collections.emptyList() );
assertThat(terminals.size()).as("Rule must have a single terminal node").isEqualTo(1);
final RuleTerminalNode terminal = (RuleTerminalNode) terminals.get( 0 );
}
|
@Description("sine")
@ScalarFunction
@SqlType(StandardTypes.DOUBLE)
public static double sin(@SqlType(StandardTypes.DOUBLE) double num)
{
return Math.sin(num);
}
|
@Test
public void testSin()
{
for (double doubleValue : DOUBLE_VALUES) {
assertFunction("sin(" + doubleValue + ")", DOUBLE, Math.sin(doubleValue));
assertFunction("sin(REAL '" + (float) doubleValue + "')", DOUBLE, Math.sin((float) doubleValue));
}
assertFunction("sin(NULL)", DOUBLE, null);
}
|
public static Date parseTM(TimeZone tz, String s, DatePrecision precision) {
return parseTM(tz, s, false, precision);
}
|
@Test
public void testParseTM() {
DatePrecision precision = new DatePrecision();
assertEquals(0,
DateUtils.parseTM(tz, "020000.000", precision).getTime());
assertEquals(Calendar.MILLISECOND, precision.lastField);
}
|
@Override
public void configureLevel() {
add(platform, properties);
addExtraRootComponents();
Version apiVersion = MetadataLoader.loadApiVersion(System2.INSTANCE);
Version sqVersion = MetadataLoader.loadSQVersion(System2.INSTANCE);
SonarEdition edition = MetadataLoader.loadEdition(System2.INSTANCE);
add(
new SonarQubeVersion(sqVersion),
SonarRuntimeImpl.forSonarQube(apiVersion, SonarQubeSide.SERVER, edition),
ThreadLocalSettings.class,
ConfigurationProvider.class,
LogServerVersion.class,
ProcessCommandWrapperImpl.class,
RestartFlagHolderImpl.class,
UuidFactoryImpl.INSTANCE,
NetworkUtilsImpl.INSTANCE,
UrlSettings.class,
EmbeddedDatabaseFactory.class,
LogbackHelper.class,
WebServerProcessLogging.class,
DefaultDatabase.class,
MyBatis.class,
StartMyBatis.class,
PurgeProfiler.class,
ServerFileSystemImpl.class,
TempFolderCleaner.class,
new TempFolderProvider(),
System2.INSTANCE,
Paths2Impl.getInstance(),
ContainerSupportImpl.class,
Clock.systemDefaultZone(),
// user session
ThreadLocalUserSession.class,
SystemPasscodeImpl.class,
// DB
DBSessionsImpl.class,
DbClient.class,
new DaoModule(),
// Elasticsearch
WebAuthorizationTypeSupport.class,
new EsModule(),
// rules/qprofiles
RuleIndex.class,
// issues
IssueIndex.class,
IssueIndexSyncProgressChecker.class,
GlobalLockManagerImpl.class,
new OkHttpClientProvider(),
CoreExtensionRepositoryImpl.class,
CoreExtensionsLoader.class,
WebCoreExtensionsInstaller.class);
addAll(CorePropertyDefinitions.all());
// cluster
add(DefaultNodeInformation.class);
}
|
@Test
public void no_missing_dependencies_between_components() {
underTest.configureLevel();
assertThat(underTest.getContainer().context().getBeanDefinitionNames()).isNotEmpty();
}
|
public static String getMultistageReverseProxyIp(String ip) {
// 多级反向代理检测
if (ip != null && StrUtil.indexOf(ip, ',') > 0) {
final List<String> ips = StrUtil.splitTrim(ip, ',');
for (final String subIp : ips) {
if (false == isUnknown(subIp)) {
ip = subIp;
break;
}
}
}
return ip;
}
|
@Test
public void issueI64P9JTest() {
// 获取结果应该去掉空格
final String ips = "unknown, 12.34.56.78, 23.45.67.89";
final String ip = NetUtil.getMultistageReverseProxyIp(ips);
assertEquals("12.34.56.78", ip);
}
|
public static SerdeFeatures of(final SerdeFeature... features) {
return new SerdeFeatures(ImmutableSet.copyOf(features));
}
|
@Test
public void shouldDeserializeFromValue() throws Exception {
// Given:
final String json = "[\"UNWRAP_SINGLES\"]";
// When:
final SerdeFeatures result = MAPPER.readValue(json, SerdeFeatures.class);
// Then:
assertThat(result, is(SerdeFeatures.of(UNWRAP_SINGLES)));
}
|
P next(final Aeron aeron)
{
final int cursor = nextCursor();
final String endpoint = endpoints[cursor];
final ChannelUri channelUri = ChannelUri.parse(channelTemplate);
channelUri.put(ENDPOINT_PARAM_NAME, endpoint);
final String channel = channelUri.toString();
CloseHelper.quietClose(current);
current = publicationFactory.addPublication(aeron, channel, streamId);
return current;
}
|
@Test
void shouldUseAllPublicationsInListWhenGettingNextPublication()
{
publicationGroup.next(mockAeron);
publicationGroup.next(mockAeron);
publicationGroup.next(mockAeron);
verify(mockAeron).addExclusivePublication("aeron:udp?endpoint=localhost:1001|term-length=64k", streamId);
verify(mockAeron).addExclusivePublication("aeron:udp?endpoint=localhost:1002|term-length=64k", streamId);
verify(mockAeron).addExclusivePublication("aeron:udp?endpoint=localhost:1003|term-length=64k", streamId);
}
|
public static <InputT, OutputT> DoFnInvoker<InputT, OutputT> invokerFor(
DoFn<InputT, OutputT> fn) {
return ByteBuddyDoFnInvokerFactory.only().newByteBuddyInvoker(fn);
}
|
@Test
public void testBundleFinalizer() {
class BundleFinalizerDoFn extends DoFn<String, String> {
@ProcessElement
public void processElement(BundleFinalizer bundleFinalizer) {
bundleFinalizer.afterBundleCommit(Instant.ofEpochSecond(42L), null);
}
}
BundleFinalizer mockBundleFinalizer = mock(BundleFinalizer.class);
when(mockArgumentProvider.bundleFinalizer()).thenReturn(mockBundleFinalizer);
DoFnInvoker<String, String> invoker = DoFnInvokers.invokerFor(new BundleFinalizerDoFn());
invoker.invokeProcessElement(mockArgumentProvider);
verify(mockBundleFinalizer).afterBundleCommit(eq(Instant.ofEpochSecond(42L)), eq(null));
}
|
public static GlobalConfig readConfig() throws IOException, InvalidGlobalConfigException {
return readConfig(getConfigDir());
}
|
@Test
public void testReadConfig() throws IOException, InvalidGlobalConfigException {
String json =
"{\"disableUpdateCheck\":true, \"registryMirrors\":["
+ "{ \"registry\": \"registry-1.docker.io\","
+ " \"mirrors\": [\"mirror.gcr.io\", \"localhost:5000\"] },"
+ "{ \"registry\": \"another.registry\", \"mirrors\": [\"another.mirror\"] }"
+ "]}";
Files.write(configDir.resolve("config.json"), json.getBytes(StandardCharsets.UTF_8));
GlobalConfig globalConfig = GlobalConfig.readConfig(configDir);
assertThat(globalConfig.isDisableUpdateCheck()).isTrue();
assertThat(globalConfig.getRegistryMirrors())
.containsExactly(
"registry-1.docker.io",
"mirror.gcr.io",
"registry-1.docker.io",
"localhost:5000",
"another.registry",
"another.mirror");
}
|
public boolean containsInsertColumns() {
InsertStatement insertStatement = getSqlStatement();
return !insertStatement.getColumns().isEmpty() || insertStatement.getSetAssignment().isPresent();
}
|
@Test
void assertContainsInsertColumnsWithSetAssignmentForMySQL() {
MySQLInsertStatement insertStatement = new MySQLInsertStatement();
insertStatement.setSetAssignment(new SetAssignmentSegment(0, 0, Collections.emptyList()));
insertStatement.setTable(new SimpleTableSegment(new TableNameSegment(0, 0, new IdentifierValue(""))));
InsertStatementContext insertStatementContext = createInsertStatementContext(Collections.emptyList(), insertStatement);
assertTrue(insertStatementContext.containsInsertColumns());
}
|
public ProviderBuilder payload(Integer payload) {
this.payload = payload;
return getThis();
}
|
@Test
void payload() {
ProviderBuilder builder = ProviderBuilder.newBuilder();
builder.payload(40);
Assertions.assertEquals(40, builder.build().getPayload());
}
|
@Override
public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) {
table.refresh();
if (lastPosition != null) {
return discoverIncrementalSplits(lastPosition);
} else {
return discoverInitialSplits();
}
}
|
@Test
public void testIncrementalFromSnapshotTimestampWithEmptyTable() {
ScanContext scanContextWithInvalidSnapshotId =
ScanContext.builder()
.startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_SNAPSHOT_TIMESTAMP)
.startSnapshotTimestamp(1L)
.build();
ContinuousSplitPlannerImpl splitPlanner =
new ContinuousSplitPlannerImpl(
TABLE_RESOURCE.tableLoader().clone(), scanContextWithInvalidSnapshotId, null);
assertThatThrownBy(() -> splitPlanner.planSplits(null))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot find a snapshot after: 1");
}
|
public static TieredStorageTopicId convertId(IntermediateDataSetID intermediateDataSetID) {
return new TieredStorageTopicId(intermediateDataSetID.getBytes());
}
|
@Test
void testConvertSubpartitionId() {
int subpartitionId = 2;
TieredStorageSubpartitionId tieredStorageSubpartitionId =
TieredStorageIdMappingUtils.convertId(subpartitionId);
int convertedSubpartitionId =
TieredStorageIdMappingUtils.convertId(tieredStorageSubpartitionId);
assertThat(subpartitionId).isEqualTo(convertedSubpartitionId);
}
|
@GET
@Path("/jwks.json")
@Produces(MediaType.APPLICATION_JSON)
public Response jwks() {
var key = keyStore.signingKey().toPublicJWK();
var cacheControl = new CacheControl();
cacheControl.setMaxAge((int) Duration.ofMinutes(30).getSeconds());
return Response.ok(new JWKSet(List.of(key))).cacheControl(cacheControl).build();
}
|
@Test
void jwks() throws ParseException {
var key =
ECKey.parse(
"""
{"kty":"EC","use":"sig","crv":"P-256","x":"yi3EF1QZS1EiAfAAfjoDyZkRnf59H49gUyklmfwKwSY","y":"Y_SGRGjwacDuT8kbcaX1Igyq8aRfJFNBMKLb2yr0x18"}
""");
var keyStore = mock(KeyStore.class);
when(keyStore.signingKey()).thenReturn(key);
var sut = new OpenIdEndpoint(BASE_URI, null, keyStore);
try (var res = sut.jwks()) {
var jwks = res.readEntity(JWKSet.class);
assertEquals(key, jwks.getKeys().get(0));
}
}
|
@Transactional(readOnly = true)
public void existsGeneralSignUpUser(String phone) {
readGeneralSignUpUser(phone);
}
|
@DisplayName("Oauth 유저의 번호로 비밀번호 찾기 인증요청이 올 경우 UserErrorException을 발생시킨다.")
@Test
void findPasswordVerificationIfUserOauth() {
// given
String phone = "010-1234-5678";
User user = UserFixture.OAUTH_USER.toUser();
given(userService.readUserByPhone(phone)).willReturn(Optional.of(user));
// when - then
assertThrows(UserErrorException.class, () -> authFindService.existsGeneralSignUpUser(phone));
}
|
@Override public void callExtensionPoint( LogChannelInterface log, Object object ) throws KettleException {
AbstractMeta meta;
if ( object instanceof Trans ) {
meta = ( (Trans) object ).getTransMeta();
} else if ( object instanceof JobExecutionExtension ) {
meta = ( (JobExecutionExtension) object ).job.getJobMeta();
} else {
meta = (AbstractMeta) object;
}
if ( meta.getMetastoreLocatorOsgi() == null ) {
meta.setMetastoreLocatorOsgi( metastoreLocatorOsgi );
}
}
|
@Test
public void testCallExtensionPointWithTrans() throws Exception {
MetastoreLocatorOsgi mockMetastoreLocator = new MetastoreLocatorImpl();
LogChannelInterface logChannelInterface = mock( LogChannelInterface.class );
TransMeta mockTransMeta = mock( TransMeta.class );
Trans mockTrans = mock( Trans.class );
when( mockTrans.getTransMeta() ).thenReturn( mockTransMeta );
Collection<MetastoreLocator> metastoreLocators = new ArrayList<>();
metastoreLocators.add( (MetastoreLocator) mockMetastoreLocator );
try ( MockedStatic<PluginServiceLoader> pluginServiceLoaderMockedStatic = Mockito.mockStatic( PluginServiceLoader.class ) ) {
pluginServiceLoaderMockedStatic.when( () -> PluginServiceLoader.loadServices( MetastoreLocator.class ) )
.thenReturn( metastoreLocators );
MetastoreLocatorExtensionPoint metastoreLocatorExtensionPoint =
new MetastoreLocatorExtensionPoint();
metastoreLocatorExtensionPoint.callExtensionPoint( logChannelInterface, mockTrans );
verify( mockTransMeta ).setMetastoreLocatorOsgi( eq( mockMetastoreLocator ) );
}
}
|
void commitClusterState(ClusterStateChange newState, Address initiator, UUID txnId) {
commitClusterState(newState, initiator, txnId, false);
}
|
@Test(expected = IllegalArgumentException.class)
public void test_changeLocalClusterState_IN_TRANSITION() throws Exception {
clusterStateManager.commitClusterState(ClusterStateChange.from(IN_TRANSITION), newAddress(), TXN);
}
|
@Override
public void purgeMappings(Type type, DeviceId deviceId) {
store.purgeMappingEntry(type, deviceId);
}
|
@Test
public void purgeMappings() {
addMapping(MAP_DATABASE, 1);
addMapping(MAP_DATABASE, 2);
addMapping(MAP_DATABASE, 3);
assertEquals("3 mappings should exist", 3, mappingCount(MAP_DATABASE));
adminService.purgeMappings(MAP_DATABASE, LISP_DID);
assertEquals("0 mappings should exist", 0, mappingCount(MAP_DATABASE));
}
|
public static boolean isValidApi(ApiDefinition apiDefinition) {
return apiDefinition != null && StringUtil.isNotBlank(apiDefinition.getApiName())
&& apiDefinition.getPredicateItems() != null;
}
|
@Test
public void testIsValidApi() {
ApiDefinition bad1 = new ApiDefinition();
ApiDefinition bad2 = new ApiDefinition("foo");
ApiDefinition good1 = new ApiDefinition("foo")
.setPredicateItems(Collections.<ApiPredicateItem>singleton(new ApiPathPredicateItem()
.setPattern("/abc")
));
assertFalse(GatewayApiDefinitionManager.isValidApi(bad1));
assertFalse(GatewayApiDefinitionManager.isValidApi(bad2));
assertTrue(GatewayApiDefinitionManager.isValidApi(good1));
}
|
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo,
List<String> partNames, boolean areAllPartsFound) throws MetaException {
checkStatisticsList(colStatsWithSourceInfo);
ColumnStatisticsObj statsObj = null;
String colType = null;
String colName = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size();
NumDistinctValueEstimator ndvEstimator = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType,
cso.getStatsData().getSetField());
LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName,
doAllPartitionContainStats);
}
StringColumnStatsDataInspector stringColumnStatsData = stringInspectorFromStats(cso);
if (stringColumnStatsData.getNdvEstimator() == null) {
ndvEstimator = null;
break;
} else {
// check if all of the bit vectors can merge
NumDistinctValueEstimator estimator = stringColumnStatsData.getNdvEstimator();
if (ndvEstimator == null) {
ndvEstimator = estimator;
} else {
if (ndvEstimator.canMerge(estimator)) {
continue;
} else {
ndvEstimator = null;
break;
}
}
}
}
if (ndvEstimator != null) {
ndvEstimator = NumDistinctValueEstimatorFactory
.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
LOG.debug("all of the bit vectors can merge for " + colName + " is " + (ndvEstimator != null));
ColumnStatisticsData columnStatisticsData = initColumnStatisticsData();
if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
StringColumnStatsDataInspector aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
StringColumnStatsDataInspector newData = stringInspectorFromStats(cso);
if (ndvEstimator != null) {
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData
.setMaxColLen(Math.max(aggregateData.getMaxColLen(), newData.getMaxColLen()));
aggregateData
.setAvgColLen(Math.max(aggregateData.getAvgColLen(), newData.getAvgColLen()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (ndvEstimator != null) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
// aggregateData already has the ndv of the max of all
}
columnStatisticsData.setStringStats(aggregateData);
} else {
// TODO: bail out if missing stats are over a certain threshold
// we need extrapolation
LOG.debug("start extrapolation for " + colName);
Map<String, Integer> indexMap = new HashMap<>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>();
if (ndvEstimator == null) {
// if not every partition uses bitvector for ndv, we just fall back to
// the traditional extrapolation methods.
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
StringColumnStatsDataInspector aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
StringColumnStatsDataInspector newData =
stringInspectorFromStats(cso);
// newData.isSetBitVectors() should be true for sure because we
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setStringStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
ndvEstimator = NumDistinctValueEstimatorFactory
.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setAvgColLen(Math.max(aggregateData.getAvgColLen(),
newData.getAvgColLen()));
aggregateData.setMaxColLen(Math.max(aggregateData.getMaxColLen(),
newData.getMaxColLen()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setStringStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
}
}
extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(),
adjustedIndexMap, adjustedStatsMap, -1);
}
LOG.debug(
"Ndv estimatation for {} is {} # of partitions requested: {} # of partitions found: {}",
colName, columnStatisticsData.getStringStats().getNumDVs(), partNames.size(),
colStatsWithSourceInfo.size());
statsObj.setStatsData(columnStatisticsData);
return statsObj;
}
|
@Test
public void testAggregateMultiStatsWhenAllAvailable() throws MetaException {
List<String> partitions = Arrays.asList("part1", "part2", "part3");
ColumnStatisticsData data1 = new ColStatsBuilder<>(String.class).numNulls(1).numDVs(3).avgColLen(20.0 / 3).maxColLen(13)
.hll(S_1, S_2, S_3).build();
ColumnStatisticsData data2 = new ColStatsBuilder<>(String.class).numNulls(2).numDVs(3).avgColLen(14).maxColLen(18)
.hll(S_3, S_4, S_5).build();
ColumnStatisticsData data3 = new ColStatsBuilder<>(String.class).numNulls(3).numDVs(2).avgColLen(17.5).maxColLen(18)
.hll(S_6, S_7).build();
List<ColStatsObjWithSourceInfo> statsList = Arrays.asList(
createStatsWithInfo(data1, TABLE, COL, partitions.get(0)),
createStatsWithInfo(data2, TABLE, COL, partitions.get(1)),
createStatsWithInfo(data3, TABLE, COL, partitions.get(2)));
StringColumnStatsAggregator aggregator = new StringColumnStatsAggregator();
ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true);
// the aggregation does not update hll, only numNDVs is, it keeps the first hll
ColumnStatisticsData expectedStats = new ColStatsBuilder<>(String.class).numNulls(6).numDVs(7).avgColLen(17.5).maxColLen(18)
.hll(S_1, S_2, S_3).build();
Assert.assertEquals(expectedStats, computedStatsObj.getStatsData());
}
|
public static Read read() {
return new AutoValue_HCatalogIO_Read.Builder()
.setDatabase(DEFAULT_DATABASE)
.setPartitionCols(new ArrayList<>())
.build();
}
|
@Test
public void testReadTransformCanBeSerializedMultipleTimes() throws Exception {
ReaderContext context = getReaderContext(getConfigPropertiesAsMap(service.getHiveConf()));
HCatalogIO.Read spec =
HCatalogIO.read()
.withConfigProperties(getConfigPropertiesAsMap(service.getHiveConf()))
.withContext(context)
.withTable(TEST_TABLE);
SerializableUtils.clone(SerializableUtils.clone(spec));
}
|
public static boolean isAppSecured(ApplicationId appId) {
SecurityAdminService service = getSecurityService();
if (service != null) {
if (!service.isSecured(appId)) {
System.out.println("\n*******************************");
System.out.println(" SM-ONOS APP WARNING ");
System.out.println("*******************************");
System.out.println(appId.name() + " has not been secured.");
System.out.println("Please review before activating.");
return false;
}
}
return true;
}
|
@Test
public void testIsAppSecured() {
assertFalse(service.isSecured(appId));
}
|
public <T> HttpRestResult<T> getLarge(String url, Header header, Query query, Object body, Type responseType)
throws Exception {
return execute(url, HttpMethod.GET_LARGE, new RequestHttpEntity(header, query, body), responseType);
}
|
@Test
void testGetLarge() throws Exception {
when(requestClient.execute(any(), eq("GET-LARGE"), any())).thenReturn(mockResponse);
when(mockResponse.getStatusCode()).thenReturn(200);
when(mockResponse.getBody()).thenReturn(new ByteArrayInputStream("test".getBytes()));
HttpRestResult<String> result = restTemplate.getLarge("http://127.0.0.1:8848/nacos/test", Header.EMPTY, Query.EMPTY,
new Object(), String.class);
assertTrue(result.ok());
assertEquals(Header.EMPTY, result.getHeader());
assertEquals("test", result.getData());
}
|
@Override
public void updateApiErrorLogProcess(Long id, Integer processStatus, Long processUserId) {
ApiErrorLogDO errorLog = apiErrorLogMapper.selectById(id);
if (errorLog == null) {
throw exception(API_ERROR_LOG_NOT_FOUND);
}
if (!ApiErrorLogProcessStatusEnum.INIT.getStatus().equals(errorLog.getProcessStatus())) {
throw exception(API_ERROR_LOG_PROCESSED);
}
// 标记处理
apiErrorLogMapper.updateById(ApiErrorLogDO.builder().id(id).processStatus(processStatus)
.processUserId(processUserId).processTime(LocalDateTime.now()).build());
}
|
@Test
public void testUpdateApiErrorLogProcess_notFound() {
// 准备参数
Long id = randomLongId();
Integer processStatus = randomEle(ApiErrorLogProcessStatusEnum.values()).getStatus();
Long processUserId = randomLongId();
// 调用,并断言异常
assertServiceException(() ->
apiErrorLogService.updateApiErrorLogProcess(id, processStatus, processUserId),
API_ERROR_LOG_NOT_FOUND);
}
|
@VisibleForTesting
static SingleSegmentAssignment getNextSingleSegmentAssignment(Map<String, String> currentInstanceStateMap,
Map<String, String> targetInstanceStateMap, int minAvailableReplicas, boolean lowDiskMode,
Map<String, Integer> numSegmentsToOffloadMap, Map<Pair<Set<String>, Set<String>>, Set<String>> assignmentMap) {
Map<String, String> nextInstanceStateMap = new TreeMap<>();
// Assign the segment the same way as other segments if the current and target instances are the same. We need this
// to guarantee the mirror servers for replica-group based routing strategies.
Set<String> currentInstances = currentInstanceStateMap.keySet();
Set<String> targetInstances = targetInstanceStateMap.keySet();
Pair<Set<String>, Set<String>> assignmentKey = Pair.of(currentInstances, targetInstances);
Set<String> instancesToAssign = assignmentMap.get(assignmentKey);
if (instancesToAssign != null) {
Set<String> availableInstances = new TreeSet<>();
for (String instanceName : instancesToAssign) {
String currentInstanceState = currentInstanceStateMap.get(instanceName);
String targetInstanceState = targetInstanceStateMap.get(instanceName);
if (currentInstanceState != null) {
availableInstances.add(instanceName);
// Use target instance state if available in case the state changes
nextInstanceStateMap.put(instanceName,
targetInstanceState != null ? targetInstanceState : currentInstanceState);
} else {
nextInstanceStateMap.put(instanceName, targetInstanceState);
}
}
return new SingleSegmentAssignment(nextInstanceStateMap, availableInstances);
}
// Add all the common instances
// Use target instance state in case the state changes
for (Map.Entry<String, String> entry : targetInstanceStateMap.entrySet()) {
String instanceName = entry.getKey();
if (currentInstanceStateMap.containsKey(instanceName)) {
nextInstanceStateMap.put(instanceName, entry.getValue());
}
}
// Add current instances until the min available replicas achieved
int numInstancesToKeep = minAvailableReplicas - nextInstanceStateMap.size();
if (numInstancesToKeep > 0) {
// Sort instances by number of segments to offload, and keep the ones with the least segments to offload
List<Triple<String, String, Integer>> instancesInfo =
getSortedInstancesOnNumSegmentsToOffload(currentInstanceStateMap, nextInstanceStateMap,
numSegmentsToOffloadMap);
numInstancesToKeep = Integer.min(numInstancesToKeep, instancesInfo.size());
for (int i = 0; i < numInstancesToKeep; i++) {
Triple<String, String, Integer> instanceInfo = instancesInfo.get(i);
nextInstanceStateMap.put(instanceInfo.getLeft(), instanceInfo.getMiddle());
}
}
Set<String> availableInstances = new TreeSet<>(nextInstanceStateMap.keySet());
// After achieving the min available replicas, when low disk mode is enabled, only add new instances when all
// current instances exist in the next assignment.
// We want to first drop the extra instances as one step, then add the target instances as another step to avoid the
// case where segments are first added to the instance before other segments are dropped from the instance, which
// might cause server running out of disk. Note that even if segment addition and drop happen in the same step,
// there is no guarantee that server process the segment drop before the segment addition.
if (!lowDiskMode || currentInstanceStateMap.size() == nextInstanceStateMap.size()) {
int numInstancesToAdd = targetInstanceStateMap.size() - nextInstanceStateMap.size();
if (numInstancesToAdd > 0) {
// Sort instances by number of segments to offload, and add the ones with the least segments to offload
List<Triple<String, String, Integer>> instancesInfo =
getSortedInstancesOnNumSegmentsToOffload(targetInstanceStateMap, nextInstanceStateMap,
numSegmentsToOffloadMap);
for (int i = 0; i < numInstancesToAdd; i++) {
Triple<String, String, Integer> instanceInfo = instancesInfo.get(i);
nextInstanceStateMap.put(instanceInfo.getLeft(), instanceInfo.getMiddle());
}
}
}
assignmentMap.put(assignmentKey, nextInstanceStateMap.keySet());
return new SingleSegmentAssignment(nextInstanceStateMap, availableInstances);
}
|
@Test
public void testTwoMinAvailableReplicasWithLowDiskMode() {
// With 3 common instances, first assignment should keep the common instances and remove the not common instance
Map<String, String> currentInstanceStateMap =
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host3", "host4"), ONLINE);
Map<String, String> targetInstanceStateMap =
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host3", "host5"), ONLINE);
TableRebalancer.SingleSegmentAssignment assignment =
getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap,
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host3"), ONLINE));
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host2", "host3")));
// Second assignment should be the same as target assignment
assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap, targetInstanceStateMap);
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host2", "host3")));
// With 2 common instances, first assignment should keep the common instances and remove the not common instances
targetInstanceStateMap =
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host5", "host6"), ONLINE);
assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap,
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2"), ONLINE));
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host2")));
// Second assignment should be the same as target assignment
assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap, targetInstanceStateMap);
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host2")));
// With 1 common instance, fist assignment should keep the common instance, and 1 more instance from current
// assignment
targetInstanceStateMap =
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host5", "host6", "host7"), ONLINE);
assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap,
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2"), ONLINE));
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host2")));
// Second assignment should add 2 instances from target assignment
assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap,
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host5", "host6"), ONLINE));
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host2")));
// Third assignment should remove the old instance from current assignment
assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap,
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host5", "host6"), ONLINE));
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host5", "host6")));
// Fourth assignment should make the assignment the same as target assignment
assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap, targetInstanceStateMap);
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host5", "host6")));
// Without common instance, fist assignment should keep 2 instances from current assignment
targetInstanceStateMap =
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host5", "host6", "host7", "host8"), ONLINE);
assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap,
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2"), ONLINE));
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host2")));
// Second assignment should add 2 instances from target assignment
assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap,
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host5", "host6"), ONLINE));
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host2")));
// Third assignment should remove the old instances from current assignment
assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap,
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host5", "host6"), ONLINE));
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host5", "host6")));
// Fourth assignment should make the assignment the same as target assignment
assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap, targetInstanceStateMap);
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host5", "host6")));
// With increasing number of replicas, fist assignment should keep 2 instances from current assignment
targetInstanceStateMap =
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host5", "host6", "host7", "host8", "host9"), ONLINE);
assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap,
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2"), ONLINE));
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host2")));
// Second assignment should add 3 instances from target assignment
assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap,
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host5", "host6", "host7"), ONLINE));
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host2")));
// Third assignment should remove the old instances from current assignment
assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap,
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host5", "host6", "host7"), ONLINE));
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host5", "host6", "host7")));
// Fourth assignment should make the assignment the same as target assignment
assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap, targetInstanceStateMap);
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host5", "host6", "host7")));
// With decreasing number of replicas, fist assignment should keep 2 instances from current assignment
targetInstanceStateMap =
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host5", "host6", "host7"), ONLINE);
assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap,
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2"), ONLINE));
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host2")));
// Second assignment should add 1 instance from target assignment
assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap,
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host5"), ONLINE));
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host2")));
// Third assignment should remove 1 old instance from current assignment
assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap,
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host5"), ONLINE));
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host5")));
// Forth assignment should add 1 more instance from target assignment
assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap,
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host5", "host6"), ONLINE));
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host5")));
// Fifth assignment should remove the other old instance from current assignment
assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap,
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host5", "host6"), ONLINE));
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host5", "host6")));
// Sixth assignment should make the assignment the same as target assignment
assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap, targetInstanceStateMap);
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host5", "host6")));
// With increasing from 1 replica, fist assignment should keep the instance from current assignment, and add 2
// instances from target assignment
// NOTE: This is the best we can do because we don't have 2 replicas available
currentInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Collections.singletonList("host1"), ONLINE);
targetInstanceStateMap =
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host2", "host3", "host4"), ONLINE);
assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap,
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host3"), ONLINE));
assertEquals(assignment._availableInstances, Collections.singleton("host1"));
// Second assignment should remove the old instance from current assignment
assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap,
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host2", "host3"), ONLINE));
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host2", "host3")));
// Third assignment should make the assignment the same as target assignment
assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, true);
assertEquals(assignment._instanceStateMap, targetInstanceStateMap);
assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host2", "host3")));
}
|
public void scheduleHeartbeat() {
startedHeartbeat = executorService.schedule(heartbeatTask, DEFAULT_HEARTBEAT_PERIOD, TimeUnit.SECONDS);
}
|
@Test
public void flush_exceptionIsPropagated() throws IOException {
when(servletResponse.getOutputStream()).thenThrow(new IOException("mock exception"));
when(executorService.schedule(any(HeartbeatTask.class), anyLong(), any(TimeUnit.class))).thenReturn(task);
underTest.scheduleHeartbeat();
assertThatThrownBy(underTest::flush)
.isInstanceOf(IllegalStateException.class);
}
|
int getStrength(long previousDuration, long currentDuration, int strength) {
if (isPreviousDurationCloserToGoal(previousDuration, currentDuration)) {
return strength - 1;
} else {
return strength;
}
}
|
@Test
void getStrengthShouldReturnCurrentStrengthIfCurrentDurationCloserToGoal() {
// given
// when
int actual = bcCryptWorkFactorService.getStrength(960, 1021, 5);
// then
assertThat(actual).isEqualTo(5);
}
|
public void byteOrder(final ByteOrder byteOrder)
{
if (null == byteOrder)
{
throw new IllegalArgumentException("byteOrder cannot be null");
}
this.byteOrder = byteOrder;
}
|
@Test
void shouldThrowExceptionWhenCannotReadString() throws Throwable
{
final UnsafeBuffer buffer = toUnsafeBuffer((out) -> out.writeShort(42));
final DirectBufferDataInput dataInput = new DirectBufferDataInput(buffer);
dataInput.byteOrder(byteOrder());
assertThrows(EOFException.class, dataInput::readUTF);
}
|
public JobRunrConfiguration useBackgroundJobServer() {
return useBackgroundJobServerIf(true);
}
|
@Test
void backgroundJobServerThrowsExceptionIfNoStorageProviderIsAvailable() {
assertThatThrownBy(() -> JobRunr.configure()
.useBackgroundJobServer()
)
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("A StorageProvider is required to use a BackgroundJobServer. Please see the documentation on how to setup a job StorageProvider.");
}
|
public static SerializableFunction<GenericRecord, Row> getGenericRecordToRowFunction(
@Nullable Schema schema) {
return new GenericRecordToRowFn(schema);
}
|
@Test
public void testGenericRecordToRowFunction() {
SerializableUtils.ensureSerializable(AvroUtils.getGenericRecordToRowFunction(Schema.of()));
SerializableUtils.ensureSerializable(AvroUtils.getGenericRecordToRowFunction(null));
}
|
@Override
public double readDouble(@Nonnull String fieldName) throws IOException {
FieldDefinition fd = cd.getField(fieldName);
if (fd == null) {
return 0d;
}
switch (fd.getType()) {
case DOUBLE:
return super.readDouble(fieldName);
case LONG:
return super.readLong(fieldName);
case FLOAT:
return super.readFloat(fieldName);
case INT:
return super.readInt(fieldName);
case BYTE:
return super.readByte(fieldName);
case CHAR:
return super.readChar(fieldName);
case SHORT:
return super.readShort(fieldName);
default:
throw createIncompatibleClassChangeError(fd, DOUBLE);
}
}
|
@Test(expected = IncompatibleClassChangeError.class)
public void testReadDouble_IncompatibleClass() throws Exception {
reader.readDouble("string");
}
|
public static String getFileName(String compressedName) {
compressedName = compressedName.toLowerCase();
boolean hasFileName = compressedName.contains(".");
if (hasFileName
&& (isZip(compressedName)
|| isTar(compressedName)
|| isRar(compressedName)
|| is7zip(compressedName)
|| isXz(compressedName)
|| isLzma(compressedName)
|| isGzip(compressedName)
|| compressedName.endsWith(fileExtensionGzipTarShort)
|| compressedName.endsWith(fileExtensionBzip2TarShort)
|| isGzip(compressedName)
|| isBzip2(compressedName)
|| isLzma(compressedName)
|| isXz(compressedName))) {
return compressedName.substring(0, compressedName.lastIndexOf("."));
} else if (hasFileName && isGzippedTar(compressedName)
|| isXzippedTar(compressedName)
|| isLzippedTar(compressedName)
|| isBzippedTar(compressedName)) {
return compressedName.substring(0, Utils.nthToLastCharIndex(2, compressedName, '.'));
} else {
return compressedName;
}
}
|
@Test
public void getFileNameTest() throws Exception {
assertEquals("test", CompressedHelper.getFileName("test.zip"));
assertEquals("test", CompressedHelper.getFileName("test.rar"));
assertEquals("test", CompressedHelper.getFileName("test.tar"));
assertEquals("test", CompressedHelper.getFileName("test.tar.gz"));
assertEquals("test", CompressedHelper.getFileName("test.tgz"));
assertEquals("test", CompressedHelper.getFileName("test.tar.bz2"));
assertEquals("test", CompressedHelper.getFileName("test.tbz"));
assertEquals("test", CompressedHelper.getFileName("test.tar.lzma"));
assertEquals("test", CompressedHelper.getFileName("test.jar"));
assertEquals("test", CompressedHelper.getFileName("test.apk"));
assertEquals("test", CompressedHelper.getFileName("test.7z"));
assertEquals("test.txt", CompressedHelper.getFileName("test.txt.gz"));
assertEquals("test.txt", CompressedHelper.getFileName("test.txt.bz2"));
assertEquals("test.txt", CompressedHelper.getFileName("test.txt.lzma"));
assertEquals("test.txt", CompressedHelper.getFileName("test.txt.xz"));
// no extension(directory)
assertEquals("test", CompressedHelper.getFileName("test"));
// invalid extension
assertEquals("test.z", CompressedHelper.getFileName("test.z"));
// no path
assertEquals("", CompressedHelper.getFileName(""));
}
|
public static void refreshModuleConnectionCount(Map<String, Integer> connectionCnt) {
// refresh all existed module connection cnt and add new module connection count
connectionCnt.forEach((module, cnt) -> {
AtomicInteger integer = moduleConnectionCnt.get(module);
// if exists
if (integer != null) {
integer.set(cnt);
} else {
// new module comes
AtomicInteger newModuleConnCnt = new AtomicInteger(cnt);
moduleConnectionCnt.put(module, newModuleConnCnt);
NacosMeterRegistryCenter.gauge(METER_REGISTRY, "nacos_monitor",
Arrays.asList(
new ImmutableTag("module", module),
new ImmutableTag("name", "longConnection")
),
moduleConnectionCnt.get(module));
}
});
// reset the outdated module connection cnt
moduleConnectionCnt.forEach((module, cnt) -> {
if (connectionCnt.containsKey(module)) {
return;
}
cnt.set(0);
});
}
|
@Test
void testRefreshModuleConnectionCount() {
// refresh
Map<String, Integer> map = new HashMap<>();
map.put("naming", 10);
MetricsMonitor.refreshModuleConnectionCount(map);
assertEquals(1, MetricsMonitor.getModuleConnectionCnt().size());
assertEquals(10, MetricsMonitor.getModuleConnectionCnt().get("naming").get());
// refresh again
map = new HashMap<>();
map.put("naming", 11);
map.put("config", 1);
MetricsMonitor.refreshModuleConnectionCount(map);
assertEquals(2, MetricsMonitor.getModuleConnectionCnt().size());
assertEquals(11, MetricsMonitor.getModuleConnectionCnt().get("naming").get());
assertEquals(1, MetricsMonitor.getModuleConnectionCnt().get("config").get());
// refresh again
map = new HashMap<>();
map.put("naming", 1);
MetricsMonitor.refreshModuleConnectionCount(map);
assertEquals(2, MetricsMonitor.getModuleConnectionCnt().size());
assertEquals(1, MetricsMonitor.getModuleConnectionCnt().get("naming").get());
assertEquals(0, MetricsMonitor.getModuleConnectionCnt().get("config").get());
}
|
public final <T extends ChannelHandler> T removeIfExists(String name) {
return removeIfExists(context(name));
}
|
@Test
public void testRemoveIfExists() {
DefaultChannelPipeline pipeline = new DefaultChannelPipeline(new LocalChannel());
ChannelHandler handler1 = newHandler();
ChannelHandler handler2 = newHandler();
ChannelHandler handler3 = newHandler();
pipeline.addLast("handler1", handler1);
pipeline.addLast("handler2", handler2);
pipeline.addLast("handler3", handler3);
assertNotNull(pipeline.removeIfExists(handler1));
assertNull(pipeline.get("handler1"));
assertNotNull(pipeline.removeIfExists("handler2"));
assertNull(pipeline.get("handler2"));
assertNotNull(pipeline.removeIfExists(TestHandler.class));
assertNull(pipeline.get("handler3"));
}
|
static Counter getServicesCounter() {
return SERVICES_COUNTER;
}
|
@Test
public void testGetServicesCounter() {
assertNotNull("getServicesCounter", MonitoringProxy.getServicesCounter());
}
|
@Override
public Properties info(RedisClusterNode node) {
Map<String, String> info = execute(node, RedisCommands.INFO_ALL);
Properties result = new Properties();
for (Entry<String, String> entry : info.entrySet()) {
result.setProperty(entry.getKey(), entry.getValue());
}
return result;
}
|
@Test
public void testInfo() {
RedisClusterNode master = getFirstMaster();
Properties info = connection.info(master);
assertThat(info.size()).isGreaterThan(10);
}
|
public static Builder route() {
return new RouterFunctionBuilder();
}
|
@Test
void and() {
HandlerFunction<ServerResponse> handlerFunction = request -> ServerResponse.ok().build();
RouterFunction<ServerResponse> routerFunction1 = request -> Mono.empty();
RouterFunction<ServerResponse> routerFunction2 = request -> Mono.just(handlerFunction);
RouterFunction<ServerResponse> result = routerFunction1.and(routerFunction2);
assertThat(result).isNotNull();
MockServerHttpRequest mockRequest = MockServerHttpRequest.get("https://example.com").build();
ServerRequest request = new DefaultServerRequest(MockServerWebExchange.from(mockRequest), Collections.emptyList());
Mono<HandlerFunction<ServerResponse>> resultHandlerFunction = result.route(request);
StepVerifier.create(resultHandlerFunction)
.expectNext(handlerFunction)
.expectComplete()
.verify();
}
|
public static DateTime parse(CharSequence dateStr, DateFormat dateFormat) {
return new DateTime(dateStr, dateFormat);
}
|
@Test
public void parseTest5() {
// 测试时间解析
//noinspection ConstantConditions
String time = DateUtil.parse("22:12:12").toString(DatePattern.NORM_TIME_FORMAT);
assertEquals("22:12:12", time);
//noinspection ConstantConditions
time = DateUtil.parse("2:12:12").toString(DatePattern.NORM_TIME_FORMAT);
assertEquals("02:12:12", time);
//noinspection ConstantConditions
time = DateUtil.parse("2:2:12").toString(DatePattern.NORM_TIME_FORMAT);
assertEquals("02:02:12", time);
//noinspection ConstantConditions
time = DateUtil.parse("2:2:1").toString(DatePattern.NORM_TIME_FORMAT);
assertEquals("02:02:01", time);
//noinspection ConstantConditions
time = DateUtil.parse("22:2:1").toString(DatePattern.NORM_TIME_FORMAT);
assertEquals("22:02:01", time);
//noinspection ConstantConditions
time = DateUtil.parse("2:22:1").toString(DatePattern.NORM_TIME_FORMAT);
assertEquals("02:22:01", time);
// 测试两位时间解析
//noinspection ConstantConditions
time = DateUtil.parse("2:22").toString(DatePattern.NORM_TIME_FORMAT);
assertEquals("02:22:00", time);
//noinspection ConstantConditions
time = DateUtil.parse("12:22").toString(DatePattern.NORM_TIME_FORMAT);
assertEquals("12:22:00", time);
//noinspection ConstantConditions
time = DateUtil.parse("12:2").toString(DatePattern.NORM_TIME_FORMAT);
assertEquals("12:02:00", time);
}
|
public static void updateTmpDirectoriesInConfiguration(
Configuration configuration, @Nullable String defaultDirs) {
if (configuration.contains(CoreOptions.TMP_DIRS)) {
LOG.info(
"Overriding Flink's temporary file directories with those "
+ "specified in the Flink config: {}",
configuration.getValue(CoreOptions.TMP_DIRS));
} else if (defaultDirs != null) {
LOG.info("Setting directories for temporary files to: {}", defaultDirs);
configuration.set(CoreOptions.TMP_DIRS, defaultDirs);
configuration.set(USE_LOCAL_DEFAULT_TMP_DIRS, true);
}
}
|
@Test
void testUpdateTmpDirectoriesInConfiguration() {
Configuration config = new Configuration();
// test that default value is taken
BootstrapTools.updateTmpDirectoriesInConfiguration(config, "default/directory/path");
assertThat(config.get(CoreOptions.TMP_DIRS)).isEqualTo("default/directory/path");
// test that we ignore default value is value is set before
BootstrapTools.updateTmpDirectoriesInConfiguration(config, "not/default/directory/path");
assertThat(config.get(CoreOptions.TMP_DIRS)).isEqualTo("default/directory/path");
// test that empty value is not a magic string
config.set(CoreOptions.TMP_DIRS, "");
BootstrapTools.updateTmpDirectoriesInConfiguration(config, "some/new/path");
assertThat(config.get(CoreOptions.TMP_DIRS)).isEmpty();
}
|
@Override
public synchronized Optional<ListenableFuture<V>> schedule(
Checkable<K, V> target, K context) {
if (checksInProgress.containsKey(target)) {
return Optional.empty();
}
final LastCheckResult<V> result = completedChecks.get(target);
if (result != null) {
final long msSinceLastCheck = timer.monotonicNow() - result.completedAt;
if (msSinceLastCheck < minMsBetweenChecks) {
LOG.debug("Skipped checking {}. Time since last check {}ms " +
"is less than the min gap {}ms.",
target, msSinceLastCheck, minMsBetweenChecks);
return Optional.empty();
}
}
LOG.info("Scheduling a check for {}", target);
final ListenableFuture<V> lfWithoutTimeout = executorService.submit(
new Callable<V>() {
@Override
public V call() throws Exception {
return target.check(context);
}
});
final ListenableFuture<V> lf;
if (diskCheckTimeout > 0) {
lf = TimeoutFuture
.create(lfWithoutTimeout, diskCheckTimeout, TimeUnit.MILLISECONDS,
scheduledExecutorService);
} else {
lf = lfWithoutTimeout;
}
checksInProgress.put(target, lf);
addResultCachingCallback(target, lf);
return Optional.of(lf);
}
|
@Test(timeout=60000)
public void testContextIsPassed() throws Exception {
final NoOpCheckable target1 = new NoOpCheckable();
final FakeTimer timer = new FakeTimer();
ThrottledAsyncChecker<Boolean, Boolean> checker =
new ThrottledAsyncChecker<>(timer, MIN_ERROR_CHECK_GAP, 0,
getExecutorService());
assertTrue(checker.schedule(target1, true).isPresent());
waitTestCheckableCheckCount(target1, 1L);
timer.advance(MIN_ERROR_CHECK_GAP + 1);
assertTrue(checker.schedule(target1, false).isPresent());
waitTestCheckableCheckCount(target1, 2L);
}
|
@Override
public void initializeIfNeeded() {
if (state() == State.CREATED) {
StateManagerUtil.registerStateStores(log, logPrefix, topology, stateMgr, stateDirectory, processorContext);
// with and without EOS we would check for checkpointing at each commit during running,
// and the file may be deleted in which case we should checkpoint immediately,
// therefore we initialize the snapshot as empty
offsetSnapshotSinceLastFlush = Collections.emptyMap();
// no topology needs initialized, we can transit to RUNNING
// right after registered the stores
transitionTo(State.RESTORING);
transitionTo(State.RUNNING);
processorContext.initialize();
log.info("Initialized");
} else if (state() == State.RESTORING) {
throw new IllegalStateException("Illegal state " + state() + " while initializing standby task " + id);
}
}
|
@Test
public void shouldThrowLockExceptionIfFailedToLockStateDirectory() throws IOException {
stateDirectory = mock(StateDirectory.class);
when(stateDirectory.lock(taskId)).thenReturn(false);
when(stateManager.taskType()).thenReturn(TaskType.STANDBY);
task = createStandbyTask();
assertThrows(LockException.class, () -> task.initializeIfNeeded());
task = null;
}
|
@SuppressWarnings("serial")
public List<String> getPrimaryKeyOnlyName() {
List<String> list = new ArrayList<>();
for (Entry<String, ColumnMeta> entry : getPrimaryKeyMap().entrySet()) {
list.add(entry.getKey());
}
return list;
}
|
@Test
public void testGetPrimaryKeyOnlyName() {
List<String> pksName = tableMeta.getPrimaryKeyOnlyName();
assertEquals(2, pksName.size());
assertTrue(pksName.contains("col1"));
assertTrue(pksName.contains("col2"));
}
|
abstract void execute(Admin admin, Namespace ns, PrintStream out) throws Exception;
|
@Test
public void testFindHangingLookupTopicPartitionsForBroker() throws Exception {
int brokerId = 5;
String[] args = new String[]{
"--bootstrap-server",
"localhost:9092",
"find-hanging",
"--broker-id",
String.valueOf(brokerId)
};
String topic = "foo";
expectListTopics(singleton(topic));
Node node0 = new Node(0, "localhost", 9092);
Node node1 = new Node(1, "localhost", 9093);
Node node5 = new Node(5, "localhost", 9097);
TopicPartitionInfo partition0 = new TopicPartitionInfo(
0,
node0,
Arrays.asList(node0, node1),
Arrays.asList(node0, node1)
);
TopicPartitionInfo partition1 = new TopicPartitionInfo(
1,
node1,
Arrays.asList(node1, node5),
Arrays.asList(node1, node5)
);
TopicDescription description = new TopicDescription(
topic,
false,
Arrays.asList(partition0, partition1)
);
expectDescribeTopics(singletonMap(topic, description));
DescribeProducersResult result = Mockito.mock(DescribeProducersResult.class);
Mockito.when(result.all()).thenReturn(completedFuture(emptyMap()));
Mockito.when(admin.describeProducers(
Collections.singletonList(new TopicPartition(topic, 1)),
new DescribeProducersOptions().brokerId(brokerId)
)).thenReturn(result);
execute(args);
assertNormalExit();
assertNoHangingTransactions();
}
|
public long getLastAccessTime() {
return lastAccessTime;
}
|
@Test
public void testConstructorSetsLastAccessTime() {
long lastAccessTime = iterator.getLastAccessTime();
assertThat(lastAccessTime).isGreaterThan(System.currentTimeMillis() - TimeUnit.HOURS.toMillis(1));
assertThat(lastAccessTime).isLessThanOrEqualTo(System.currentTimeMillis());
}
|
public static void setOffloadDriverMetadata(LedgerInfo.Builder infoBuilder,
String driverName,
Map<String, String> offloadDriverMetadata) {
infoBuilder.getOffloadContextBuilder()
.getDriverMetadataBuilder()
.setName(driverName);
infoBuilder.getOffloadContextBuilder().getDriverMetadataBuilder().clearProperties();
offloadDriverMetadata.forEach((k, v) -> infoBuilder
.getOffloadContextBuilder()
.getDriverMetadataBuilder()
.addProperties(KeyValue.newBuilder()
.setKey(k)
.setValue(v)
.build()));
}
|
@Test
void testOffloadMetadataShouldClearBeforeSet() {
MLDataFormats.ManagedLedgerInfo.LedgerInfo.Builder builder =
MLDataFormats.ManagedLedgerInfo.LedgerInfo.newBuilder();
builder.setLedgerId(1L);
Map<String, String> map = new HashMap<>();
map.put("key1", "value1");
map.put("key2", "value2");
//only one copy of the offload metadata information is stored in metadata store,
// and the original properties need to be cleared during offload
OffloadUtils.setOffloadDriverMetadata(builder, "offload", map);
OffloadUtils.setOffloadDriverMetadata(builder, "offload", map);
MLDataFormats.OffloadDriverMetadata offloadDriverMetadata =
builder.build().getOffloadContext().getDriverMetadata();
Assert.assertEquals(offloadDriverMetadata.getPropertiesList().size(), 2);
Assert.assertEquals(offloadDriverMetadata.getProperties(0).getKey(), "key1");
Assert.assertEquals(offloadDriverMetadata.getProperties(1).getKey(), "key2");
Assert.assertEquals(offloadDriverMetadata.getProperties(0).getValue(), "value1");
Assert.assertEquals(offloadDriverMetadata.getProperties(1).getValue(), "value2");
}
|
public CompressionProvider getCompressionProvider() {
return compressionProvider;
}
|
@Test
public void getCompressionProvider() {
CompressionProvider provider = outStream.getCompressionProvider();
assertEquals( provider.getName(), PROVIDER_NAME );
}
|
@Override
public void process(HttpResponse response, HttpContext context) throws
HttpException, IOException {
List<Header> warnings = Arrays.stream(response.getHeaders("Warning")).filter(header -> !this.isDeprecationMessage(header.getValue())).collect(Collectors.toList());
response.removeHeaders("Warning");
warnings.stream().forEach(header -> response.addHeader(header));
}
|
@Test
public void testInterceptorMultipleHeaderFilteredWarning() throws IOException, HttpException {
ElasticsearchFilterDeprecationWarningsInterceptor interceptor = new ElasticsearchFilterDeprecationWarningsInterceptor();
HttpResponse response = new BasicHttpResponse(new BasicStatusLine(new ProtocolVersion("HTTP", 0, 0), 0, null));
response.addHeader("Test", "This header should not trigger the interceptor.");
response.addHeader("Warning", "This warning should not trigger the interceptor.");
response.addHeader("Warning", "This text contains the trigger: setting was deprecated in Elasticsearch - and should be filtered out");
assertThat(response.getAllHeaders())
.as("Number of Headers should be 3 before start.")
.hasSize(3);
interceptor.process(response, null);
assertThat(response.getAllHeaders())
.as("Number of Headers should be 1 less after running the interceptor.")
.hasSize(2);
}
|
@Override
@SuppressWarnings("rawtypes")
public void report(SortedMap<String, Gauge> gauges,
SortedMap<String, Counter> counters,
SortedMap<String, Histogram> histograms,
SortedMap<String, Meter> meters,
SortedMap<String, Timer> timers) {
final String dateTime = dateFormat.format(new Date(clock.getTime()));
printWithBanner(dateTime, '=');
output.println();
if (!gauges.isEmpty()) {
printWithBanner("-- Gauges", '-');
for (Map.Entry<String, Gauge> entry : gauges.entrySet()) {
output.println(entry.getKey());
printGauge(entry.getValue());
}
output.println();
}
if (!counters.isEmpty()) {
printWithBanner("-- Counters", '-');
for (Map.Entry<String, Counter> entry : counters.entrySet()) {
output.println(entry.getKey());
printCounter(entry);
}
output.println();
}
if (!histograms.isEmpty()) {
printWithBanner("-- Histograms", '-');
for (Map.Entry<String, Histogram> entry : histograms.entrySet()) {
output.println(entry.getKey());
printHistogram(entry.getValue());
}
output.println();
}
if (!meters.isEmpty()) {
printWithBanner("-- Meters", '-');
for (Map.Entry<String, Meter> entry : meters.entrySet()) {
output.println(entry.getKey());
printMeter(entry.getValue());
}
output.println();
}
if (!timers.isEmpty()) {
printWithBanner("-- Timers", '-');
for (Map.Entry<String, Timer> entry : timers.entrySet()) {
output.println(entry.getKey());
printTimer(entry.getValue());
}
output.println();
}
output.println();
output.flush();
}
|
@Test
public void reportsGaugeValues() throws Exception {
final Gauge<Integer> gauge = () -> 1;
reporter.report(map("gauge", gauge),
map(),
map(),
map(),
map());
assertThat(consoleOutput())
.isEqualTo(lines(
dateHeader,
"",
"-- Gauges ----------------------------------------------------------------------",
"gauge",
" value = 1",
"",
""
));
}
|
public <InputT, OutputT> DoFn<InputT, OutputT> get() throws Exception {
Thread currentThread = Thread.currentThread();
return (DoFn<InputT, OutputT>) outstanding.get(currentThread);
}
|
@Test
public void setupOnGet() throws Exception {
TestFn obtained = (TestFn) mgr.get();
assertThat(obtained, not(theInstance(fn)));
assertThat(obtained.setupCalled, is(true));
assertThat(obtained.teardownCalled, is(false));
}
|
public static Locale localeFromString(String s) {
if (!s.contains(LOBAR)) {
return new Locale(s);
}
String[] items = s.split(LOBAR);
return new Locale(items[0], items[1]);
}
|
@Test
public void localeFromStringFrCA() {
title("localeFromStringFrCA");
locale = LionUtils.localeFromString("fr_CA");
checkLanguageCountry(locale, "fr", "CA");
}
|
public boolean isEmpty() {
return messagesProcessed == 0 && errorsOccurred == 0;
}
|
@Test
void testNotEmpty() {
assertThat(new StatsPersistMsg(1, 0, TenantId.SYS_TENANT_ID, TenantId.SYS_TENANT_ID).isEmpty()).isFalse();
assertThat(new StatsPersistMsg(0, 1, TenantId.SYS_TENANT_ID, TenantId.SYS_TENANT_ID).isEmpty()).isFalse();
assertThat(new StatsPersistMsg(1, 1, TenantId.SYS_TENANT_ID, TenantId.SYS_TENANT_ID).isEmpty()).isFalse();
}
|
public boolean isRegisteredUser(@Nonnull final JID user, final boolean checkRemoteDomains) {
if (xmppServer.isLocal(user)) {
try {
getUser(user.getNode());
return true;
}
catch (final UserNotFoundException e) {
return false;
}
}
else if (!checkRemoteDomains) {
return false;
} else {
// Look up in the cache using the full JID
Boolean isRegistered = remoteUsersCache.get(user.toString());
if (isRegistered == null) {
// Check if the bare JID of the user is cached
isRegistered = remoteUsersCache.get(user.toBareJID());
if (isRegistered == null) {
// No information is cached so check user identity and cache it
// A disco#info is going to be sent to the bare JID of the user. This packet
// is going to be handled by the remote server.
final IQ iq = new IQ(IQ.Type.get);
iq.setFrom(xmppServer.getServerInfo().getXMPPDomain());
iq.setTo(user.toBareJID());
iq.setChildElement("query", "http://jabber.org/protocol/disco#info");
final Semaphore completionSemaphore = new Semaphore(0);
// Send the disco#info request to the remote server.
final IQRouter iqRouter = xmppServer.getIQRouter();
final long timeoutInMillis = REMOTE_DISCO_INFO_TIMEOUT.getValue().toMillis();
iqRouter.addIQResultListener(iq.getID(), new IQResultListener() {
@Override
public void receivedAnswer(final IQ packet) {
final JID from = packet.getFrom();
// Assume that the user is not a registered user
Boolean isRegistered = Boolean.FALSE;
// Analyze the disco result packet
if (IQ.Type.result == packet.getType()) {
final Element child = packet.getChildElement();
if (child != null) {
for (final Iterator it = child.elementIterator("identity"); it.hasNext();) {
final Element identity = (Element) it.next();
final String accountType = identity.attributeValue("type");
if ("registered".equals(accountType) || "admin".equals(accountType)) {
isRegistered = Boolean.TRUE;
break;
}
}
}
}
// Update cache of remote registered users
remoteUsersCache.put(from.toBareJID(), isRegistered);
completionSemaphore.release();
}
@Override
public void answerTimeout(final String packetId) {
Log.warn("The result from the disco#info request was never received. request: {}", iq);
completionSemaphore.release();
}
}, timeoutInMillis);
// Send the request
iqRouter.route(iq);
// Wait for the response
try {
completionSemaphore.tryAcquire(timeoutInMillis, TimeUnit.MILLISECONDS);
} catch (final InterruptedException e) {
Thread.currentThread().interrupt();
Log.warn("Interrupted whilst waiting for response from remote server", e);
}
isRegistered = remoteUsersCache.computeIfAbsent(user.toBareJID(), ignored -> Boolean.FALSE);
}
}
return isRegistered;
}
}
|
@Test
public void isRegisteredUserFalseWillReturnFalseForLocalNonUsers() {
final boolean result = userManager.isRegisteredUser(new JID("unknown-user", Fixtures.XMPP_DOMAIN, null), false);
assertThat(result, is(false));
}
|
@Override
public String toString() {
return MoreObjects.toStringHelper("Load").add("rate", rate())
.add("latest", latest()).toString();
}
|
@Test
public void testToString() {
DefaultLoad load = new DefaultLoad(20, 10);
String s = load.toString();
assertThat(s, containsString("Load{rate=1, latest=20}"));
}
|
@Override
public PageResult<SocialUserDO> getSocialUserPage(SocialUserPageReqVO pageReqVO) {
return socialUserMapper.selectPage(pageReqVO);
}
|
@Test
public void testGetSocialUserPage() {
// mock 数据
SocialUserDO dbSocialUser = randomPojo(SocialUserDO.class, o -> { // 等会查询到
o.setType(SocialTypeEnum.GITEE.getType());
o.setNickname("芋艿");
o.setOpenid("yudaoyuanma");
o.setCreateTime(buildTime(2020, 1, 15));
});
socialUserMapper.insert(dbSocialUser);
// 测试 type 不匹配
socialUserMapper.insert(cloneIgnoreId(dbSocialUser, o -> o.setType(SocialTypeEnum.DINGTALK.getType())));
// 测试 nickname 不匹配
socialUserMapper.insert(cloneIgnoreId(dbSocialUser, o -> o.setNickname(randomString())));
// 测试 openid 不匹配
socialUserMapper.insert(cloneIgnoreId(dbSocialUser, o -> o.setOpenid("java")));
// 测试 createTime 不匹配
socialUserMapper.insert(cloneIgnoreId(dbSocialUser, o -> o.setCreateTime(buildTime(2020, 1, 21))));
// 准备参数
SocialUserPageReqVO reqVO = new SocialUserPageReqVO();
reqVO.setType(SocialTypeEnum.GITEE.getType());
reqVO.setNickname("芋");
reqVO.setOpenid("yudao");
reqVO.setCreateTime(buildBetweenTime(2020, 1, 10, 2020, 1, 20));
// 调用
PageResult<SocialUserDO> pageResult = socialUserService.getSocialUserPage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbSocialUser, pageResult.getList().get(0));
}
|
public static BaseDataCache getInstance() {
return INSTANCE;
}
|
@Test
public void testGetInstance() {
BaseDataCache baseDataCache = BaseDataCache.getInstance();
assertNotNull(baseDataCache);
}
|
@Benchmark
@Threads(16) // Use several threads since we expect contention during bundle processing.
public void testStateWithoutCaching(StatefulTransform statefulTransform) throws Exception {
testState(statefulTransform, statefulTransform.nonCachingStateRequestHandler);
}
|
@Test
public void testStateWithoutCaching() throws Exception {
StatefulTransform transform = new StatefulTransform();
transform.elementsEmbedding = elementsEmbedding;
new ProcessBundleBenchmark().testStateWithoutCaching(transform);
transform.tearDown();
}
|
public void shiftOffsetsBy(final Consumer<byte[], byte[]> client,
final Set<TopicPartition> inputTopicPartitions,
final long shiftBy) {
final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions);
final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions);
final Map<TopicPartition, Long> topicPartitionsAndOffset = new HashMap<>(inputTopicPartitions.size());
for (final TopicPartition topicPartition : inputTopicPartitions) {
final long position = client.position(topicPartition);
final long offset = position + shiftBy;
topicPartitionsAndOffset.put(topicPartition, offset);
}
final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset =
checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets);
for (final TopicPartition topicPartition : inputTopicPartitions) {
client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition));
}
}
|
@Test
public void testShiftOffsetByWhenAfterEndOffset() {
final Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(topicPartition, 3L);
consumer.updateEndOffsets(endOffsets);
final Map<TopicPartition, Long> beginningOffsets = new HashMap<>();
beginningOffsets.put(topicPartition, 0L);
consumer.updateBeginningOffsets(beginningOffsets);
streamsResetter.shiftOffsetsBy(consumer, inputTopicPartitions, 5L);
final ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofMillis(500));
assertEquals(2, records.count());
}
|
public static String getMaskedStatement(final String query) {
try {
final ParseTree tree = DefaultKsqlParser.getParseTree(query);
return new Visitor().visit(tree);
} catch (final Exception | StackOverflowError e) {
return fallbackMasking(query);
}
}
|
@Test
public void shouldMaskValidCreateConnectorWithComment() {
final String query = "--this is a comment. \n"
+ "CREATE SOURCE CONNECTOR `test-connector` WITH ("
+ " \"connector.class\" = 'PostgresSource', \n"
+ " 'connection.url' = 'jdbc:postgresql://localhost:5432/my.db',\n"
+ " \"mode\"='bulk',\n"
+ " \"topic.prefix\"='jdbc-',\n"
+ " \"table.whitelist\"='users',\n"
+ " \"key\"='username');";
// When
final String maskedQuery = QueryMask.getMaskedStatement(query);
// Then
final String expected = "CREATE SOURCE CONNECTOR `test-connector` WITH "
+ "(\"connector.class\"='PostgresSource', "
+ "'connection.url'='[string]', "
+ "\"mode\"='[string]', "
+ "\"topic.prefix\"='[string]', "
+ "\"table.whitelist\"='[string]', "
+ "\"key\"='[string]');";
assertThat(maskedQuery, is(expected));
}
|
@Override
public String toString() {
String simpleName = getClass().getSimpleName();
if (getClass().getEnclosingClass() != null) {
simpleName = getClass().getEnclosingClass().getSimpleName() + "." + simpleName;
}
if (subTriggers == null || subTriggers.isEmpty()) {
return simpleName;
} else {
return simpleName + "(" + Joiner.on(", ").join(subTriggers) + ")";
}
}
|
@Test
public void testTriggerToString() throws Exception {
assertEquals(
"AfterWatermark.pastEndOfWindow()",
AfterWatermarkStateMachine.pastEndOfWindow().toString());
assertEquals(
"Repeatedly.forever(AfterWatermark.pastEndOfWindow())",
RepeatedlyStateMachine.forever(AfterWatermarkStateMachine.pastEndOfWindow()).toString());
}
|
public void addEntry( String filename, String extension ) throws IOException {
// Default no-op behavior
}
|
@Test
public void testAddEntry() throws IOException {
CompressionProvider provider = outStream.getCompressionProvider();
ByteArrayOutputStream out = new ByteArrayOutputStream();
outStream = new DummyCompressionOS( out, provider );
outStream.addEntry( null, null );
}
|
public static <X extends Throwable> void isNull(Object object, Supplier<X> errorSupplier) throws X {
if (null != object) {
throw errorSupplier.get();
}
}
|
@Test
public void isNullTest() {
String a = null;
cn.hutool.core.lang.Assert.isNull(a);
}
|
public static void main(String[] args) {
/*
Initialising the printer queue with jobs
*/
printerQueue.addPrinterItem(new PrinterItem(PaperSizes.A4, 5, false, false));
printerQueue.addPrinterItem(new PrinterItem(PaperSizes.A3, 2, false, false));
printerQueue.addPrinterItem(new PrinterItem(PaperSizes.A2, 5, false, false));
/*
This variable is the collecting parameter, and will store the policy abiding print jobs.
*/
var result = new LinkedList<PrinterItem>();
/*
Adding A4, A3, and A2 papers that obey the policy
*/
addValidA4Papers(result);
addValidA3Papers(result);
addValidA2Papers(result);
}
|
@Test
void executesWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
}
|
public static Optional<String> getTableNameByRowPath(final String rowPath) {
Pattern pattern = Pattern.compile(getShardingSphereDataNodePath() + "/([\\w\\-]+)/schemas/([\\w\\-]+)/tables" + "/([\\w\\-]+)?", Pattern.CASE_INSENSITIVE);
Matcher matcher = pattern.matcher(rowPath);
return matcher.find() ? Optional.of(matcher.group(3)) : Optional.empty();
}
|
@Test
void assertGetTableNameByRowPathHappyPath() {
assertThat(ShardingSphereDataNode.getTableNameByRowPath("/statistics/databases/db_name/schemas/db_schema/tables/tbl_name"), is(Optional.of("tbl_name")));
}
|
public static <T> T[] replaceFirst(T[] src, T oldValue, T[] newValues) {
int index = indexOf(src, oldValue);
if (index == -1) {
return src;
}
T[] dst = (T[]) Array.newInstance(src.getClass().getComponentType(), src.length - 1 + newValues.length);
// copy the first part till the match
System.arraycopy(src, 0, dst, 0, index);
// copy the second part from the match
System.arraycopy(src, index + 1, dst, index + newValues.length, src.length - index - 1);
// copy the newValues into the dst
System.arraycopy(newValues, 0, dst, index, newValues.length);
return dst;
}
|
@Test
public void replace_whenNewValuesEmpty() {
Integer[] result = replaceFirst(new Integer[]{1, 2, 10, 3, 4}, 10, new Integer[]{});
System.out.println(Arrays.toString(result));
assertArrayEquals(new Integer[]{1, 2, 3, 4}, result);
}
|
public AstNode rewrite(final AstNode node, final C context) {
return rewriter.process(node, context);
}
|
@Test
public void shouldRewritePartitionBy() {
// Given:
final PartitionBy partitionBy = new PartitionBy(
location,
ImmutableList.of(expression)
);
when(expressionRewriter.apply(expression, context)).thenReturn(rewrittenExpression);
// When:
final AstNode rewritten = rewriter.rewrite(partitionBy, context);
// Then:
assertThat(rewritten, equalTo(new PartitionBy(
location,
ImmutableList.of(rewrittenExpression)
)));
}
|
public static SetAssignmentSegment bind(final SetAssignmentSegment segment, final SQLStatementBinderContext binderContext,
final Map<String, TableSegmentBinderContext> tableBinderContexts, final Map<String, TableSegmentBinderContext> outerTableBinderContexts) {
return new SetAssignmentSegment(segment.getStartIndex(), segment.getStopIndex(), segment.getAssignments().stream()
.map(each -> bindColumnAssignmentSegment(each, binderContext, tableBinderContexts, outerTableBinderContexts)).collect(Collectors.toList()));
}
|
@Test
void assertBindAssignmentSegment() {
Collection<ColumnAssignmentSegment> assignments = new LinkedList<>();
ColumnSegment boundOrderIdColumn = new ColumnSegment(0, 0, new IdentifierValue("order_id"));
boundOrderIdColumn.setColumnBoundInfo(new ColumnSegmentBoundInfo(new IdentifierValue(DefaultDatabase.LOGIC_NAME), new IdentifierValue(DefaultDatabase.LOGIC_NAME),
new IdentifierValue("t_order"), new IdentifierValue("order_id")));
ColumnSegment columnSegment = new ColumnSegment(0, 0, new IdentifierValue("order_id"));
assignments.add(new ColumnAssignmentSegment(0, 0, Collections.singletonList(columnSegment), new LiteralExpressionSegment(0, 0, 1)));
SetAssignmentSegment setAssignmentSegment = new SetAssignmentSegment(0, 0, assignments);
Map<String, TableSegmentBinderContext> tableBinderContexts = Collections.singletonMap(
"t_order", new SimpleTableSegmentBinderContext(Collections.singleton(new ColumnProjectionSegment(boundOrderIdColumn))));
SetAssignmentSegment actual = AssignmentSegmentBinder.bind(setAssignmentSegment, mock(SQLStatementBinderContext.class), tableBinderContexts, Collections.emptyMap());
assertThat(actual, not(setAssignmentSegment));
assertThat(actual.getAssignments().iterator().next(), not(setAssignmentSegment.getAssignments().iterator().next()));
assertThat(actual.getAssignments().iterator().next().getColumns().iterator().next().getColumnBoundInfo().getOriginalTable().getValue(), is("t_order"));
}
|
@Override
public boolean onOptionsItemSelected(@NonNull MenuItem item) {
MainSettingsActivity mainSettingsActivity = (MainSettingsActivity) getActivity();
if (mainSettingsActivity == null) return super.onOptionsItemSelected(item);
if (item.getItemId() == R.id.add_user_word) {
createEmptyItemForAdd();
return true;
}
return super.onOptionsItemSelected(item);
}
|
@Test
public void testTwiceAddNewWordFromMenuNotAtEmptyState() {
// adding a few words to the dictionary
UserDictionary userDictionary = new UserDictionary(getApplicationContext(), "en");
userDictionary.loadDictionary();
userDictionary.addWord("hello", 1);
userDictionary.addWord("you", 2);
userDictionary.close();
UserDictionaryEditorFragment fragment = startEditorFragment();
RecyclerView wordsRecyclerView = fragment.getView().findViewById(R.id.words_recycler_view);
Assert.assertNotNull(wordsRecyclerView);
Assert.assertEquals(
3 /*two words, and one AddNew*/, wordsRecyclerView.getAdapter().getItemCount());
Assert.assertEquals(
R.id.word_editor_view_type_row, wordsRecyclerView.getAdapter().getItemViewType(0));
Assert.assertEquals(
R.id.word_editor_view_type_row, wordsRecyclerView.getAdapter().getItemViewType(1));
Assert.assertEquals(
R.id.word_editor_view_type_add_new_row, wordsRecyclerView.getAdapter().getItemViewType(2));
final MenuItem menuItem = Mockito.mock(MenuItem.class);
Mockito.doReturn(R.id.add_user_word).when(menuItem).getItemId();
fragment.onOptionsItemSelected(menuItem);
TestRxSchedulers.drainAllTasks();
fragment.onOptionsItemSelected(menuItem);
TestRxSchedulers.drainAllTasks();
Assert.assertEquals(3, wordsRecyclerView.getAdapter().getItemCount());
Assert.assertEquals(
R.id.word_editor_view_type_row, wordsRecyclerView.getAdapter().getItemViewType(0));
Assert.assertEquals(
R.id.word_editor_view_type_row, wordsRecyclerView.getAdapter().getItemViewType(1));
Assert.assertEquals(
R.id.word_editor_view_type_editing_row, wordsRecyclerView.getAdapter().getItemViewType(2));
}
|
@Override
public ConfigDef config() {
return CONFIG_DEF;
}
|
@Test
public void testConnectorConfigValidation() {
List<ConfigValue> configValues = connector.config().validate(sourceProperties);
for (ConfigValue val : configValues) {
assertEquals(0, val.errorMessages().size(), "Config property errors: " + val.errorMessages());
}
}
|
public static SourceOperationResponse performSplit(
SourceSplitRequest request, PipelineOptions options) throws Exception {
return performSplitWithApiLimit(
request, options, DEFAULT_NUM_BUNDLES_LIMIT, DATAFLOW_SPLIT_RESPONSE_API_SIZE_LIMIT);
}
|
@Test
public void testLargeSerializedSizeResplits() throws Exception {
final long apiSizeLimitForTest = 5 * 1024;
// Figure out how many splits of CountingSource are needed to exceed the API limits, using an
// extra factor of 2 to ensure that we go over the limits.
BoundedSource<Long> justForSizing = CountingSource.upTo(1000000L);
long size =
DataflowApiUtils.computeSerializedSizeBytes(
translateIOToCloudSource(justForSizing, options));
long numberToSplitToExceedLimit = 2 * apiSizeLimitForTest / size;
checkState(
numberToSplitToExceedLimit < WorkerCustomSources.DEFAULT_NUM_BUNDLES_LIMIT,
"This test expects the number of splits to be less than %s "
+ "to avoid using SplittableOnlyBoundedSource",
WorkerCustomSources.DEFAULT_NUM_BUNDLES_LIMIT);
// Generate a CountingSource and split it into the desired number of splits
// (desired size = 8 bytes, 1 long), triggering the re-split with a larger bundle size.
com.google.api.services.dataflow.model.Source source =
translateIOToCloudSource(CountingSource.upTo(numberToSplitToExceedLimit), options);
SourceSplitResponse split =
performSplit(source, options, 8L, null /* numBundles limit */, apiSizeLimitForTest);
logged.verifyWarn("too large for the Google Cloud Dataflow API");
logged.verifyWarn(String.format("%d bundles", numberToSplitToExceedLimit));
assertThat((long) split.getBundles().size(), lessThan(numberToSplitToExceedLimit));
}
|
public static Object[] realize(Object[] objs, Class<?>[] types) {
if (objs.length != types.length) {
throw new IllegalArgumentException("args.length != types.length");
}
Object[] dests = new Object[objs.length];
for (int i = 0; i < objs.length; i++) {
dests[i] = realize(objs[i], types[i]);
}
return dests;
}
|
@Test
void testListJsonObjectToListMap() throws Exception {
Method method = PojoUtilsTest.class.getMethod("setListMap", List.class);
assertNotNull(method);
JSONObject jsonObject = new JSONObject();
jsonObject.put("1", "test");
List<JSONObject> list = new ArrayList<>(1);
list.add(jsonObject);
@SuppressWarnings("unchecked")
List<Map<Integer, Object>> result = (List<Map<Integer, Object>>)
PojoUtils.realize(list, method.getParameterTypes()[0], method.getGenericParameterTypes()[0]);
method.invoke(new PojoUtilsTest(), result);
assertEquals("test", result.get(0).get(1));
}
|
public int getNextSetBitOffset(int bitOffset) {
int byteOffset = bitOffset / Byte.SIZE;
int bitOffsetInFirstByte = bitOffset % Byte.SIZE;
int firstByte = (_dataBuffer.getByte(byteOffset) << bitOffsetInFirstByte) & BYTE_MASK;
if (firstByte != 0) {
return bitOffset + FIRST_BIT_SET[firstByte];
}
while (true) {
byteOffset++;
int currentByte = _dataBuffer.getByte(byteOffset) & BYTE_MASK;
if (currentByte != 0) {
return (byteOffset * Byte.SIZE) | FIRST_BIT_SET[currentByte];
}
}
}
|
@Test
public void testGetNextSetBitOffset()
throws IOException {
int[] setBitOffsets = new int[NUM_ITERATIONS];
int bitOffset = RANDOM.nextInt(10);
for (int i = 0; i < NUM_ITERATIONS; i++) {
setBitOffsets[i] = bitOffset;
bitOffset += RANDOM.nextInt(10) + 1;
}
int dataBufferSize = setBitOffsets[NUM_ITERATIONS - 1] / Byte.SIZE + 1;
try (PinotDataBuffer dataBuffer = getEmptyDataBuffer(dataBufferSize);
PinotDataBitSet dataBitSet = new PinotDataBitSet(dataBuffer)) {
for (int i = 0; i < NUM_ITERATIONS; i++) {
dataBitSet.setBit(setBitOffsets[i]);
}
// Test next set bit offset
for (int i = 0; i < NUM_ITERATIONS - 1; i++) {
assertEquals(dataBitSet
.getNextSetBitOffset(setBitOffsets[i] + RANDOM.nextInt(setBitOffsets[i + 1] - setBitOffsets[i]) + 1),
setBitOffsets[i + 1]);
}
// Test next nth set bit offset
for (int i = 0; i < NUM_ITERATIONS - 100; i++) {
int n = RANDOM.nextInt(100) + 1;
assertEquals(
dataBitSet.getNextNthSetBitOffset(
setBitOffsets[i] + RANDOM.nextInt(setBitOffsets[i + 1] - setBitOffsets[i]) + 1, n),
setBitOffsets[i + n]);
}
}
}
|
@Override
public Collection<String> getLogicTableNames() {
return logicalTableNames;
}
|
@Test
void assertGetLogicTableMapper() {
assertThat(new LinkedList<>(ruleAttribute.getLogicTableNames()), is(Collections.singletonList("foo_tbl")));
}
|
abstract List<String> parseJobID() throws IOException;
|
@Test
public void testParseHive() throws IOException {
String errFileName = "src/test/data/status/hive";
HiveJobIDParser hiveJobIDParser = new HiveJobIDParser(errFileName, new Configuration());
List<String> jobs = hiveJobIDParser.parseJobID();
Assert.assertEquals(jobs.size(), 1);
}
|
@Override
public void updateMailSendResult(Long logId, String messageId, Exception exception) {
// 1. 成功
if (exception == null) {
mailLogMapper.updateById(new MailLogDO().setId(logId).setSendTime(LocalDateTime.now())
.setSendStatus(MailSendStatusEnum.SUCCESS.getStatus()).setSendMessageId(messageId));
return;
}
// 2. 失败
mailLogMapper.updateById(new MailLogDO().setId(logId).setSendTime(LocalDateTime.now())
.setSendStatus(MailSendStatusEnum.FAILURE.getStatus()).setSendException(getRootCauseMessage(exception)));
}
|
@Test
public void testUpdateMailSendResult_success() {
// mock 数据
MailLogDO log = randomPojo(MailLogDO.class, o -> {
o.setSendStatus(MailSendStatusEnum.INIT.getStatus());
o.setSendTime(null).setSendMessageId(null).setSendException(null)
.setTemplateParams(randomTemplateParams());
});
mailLogMapper.insert(log);
// 准备参数
Long logId = log.getId();
String messageId = randomString();
// 调用
mailLogService.updateMailSendResult(logId, messageId, null);
// 断言
MailLogDO dbLog = mailLogMapper.selectById(logId);
assertEquals(MailSendStatusEnum.SUCCESS.getStatus(), dbLog.getSendStatus());
assertNotNull(dbLog.getSendTime());
assertEquals(messageId, dbLog.getSendMessageId());
assertNull(dbLog.getSendException());
}
|
public Result execute( final Params params ) throws Throwable {
return execute( params, null );
}
|
@Test
public void testExecuteWithInvalidRepository() {
// Create Mock Objects
Params params = mock( Params.class );
KitchenCommandExecutor kitchenCommandExecutor = new KitchenCommandExecutor( Kitchen.class );
try ( MockedStatic<BaseMessages> baseMessagesMockedStatic = mockStatic( BaseMessages.class ) ) {
// Mock returns
when( params.getRepoName() ).thenReturn( "NoExistingRepository" );
baseMessagesMockedStatic.when( () -> BaseMessages.getString( any( Class.class ), anyString(), any() ) ).thenReturn( "" );
try {
Result result = kitchenCommandExecutor.execute( params, null );
Assert.assertEquals( CommandExecutorCodes.Kitchen.COULD_NOT_LOAD_JOB.getCode(), result.getExitStatus() );
} catch ( Throwable throwable ) {
Assert.fail();
}
}
}
|
static void sanityCheckResources(Builder builder) {
int numSetResources = builder._numBrokers != DEFAULT_OPTIONAL_INT ? 1 : 0;
if (builder._numRacks != DEFAULT_OPTIONAL_INT) {
numSetResources++;
}
if (builder._numDisks != DEFAULT_OPTIONAL_INT) {
numSetResources++;
}
if (builder._numPartitions != DEFAULT_OPTIONAL_INT) {
numSetResources++;
}
if (numSetResources != 1) {
throw new IllegalArgumentException(
String.format("Exactly one resource type must be set (Brokers:%s Racks:%s Disks:%s Partitions:%s).",
builder._numBrokers == DEFAULT_OPTIONAL_INT ? "-" : String.valueOf(builder._numBrokers),
builder._numRacks == DEFAULT_OPTIONAL_INT ? "-" : String.valueOf(builder._numRacks),
builder._numDisks == DEFAULT_OPTIONAL_INT ? "-" : String.valueOf(builder._numDisks),
builder._numPartitions == DEFAULT_OPTIONAL_INT ? "-" : String.valueOf(builder._numPartitions)));
}
if (builder._numPartitions != DEFAULT_OPTIONAL_INT) {
if (builder._status != ProvisionStatus.UNDER_PROVISIONED) {
throw new IllegalArgumentException("When the resource type is partition, the cluster must be under provisioned.");
} else if (builder._topicPattern == null) {
throw new IllegalArgumentException("When the resource type is partition, the corresponding topic regex must be specified.");
}
} else if (builder._topicPattern != null) {
throw new IllegalArgumentException("When the resource type is not partition, topic regex cannot be specified.");
}
}
|
@Test
public void testSanityCheckResources() {
// Set multiple resources
assertThrows(IllegalArgumentException.class, () -> ProvisionRecommendation.sanityCheckResources(
new ProvisionRecommendation.Builder(ProvisionStatus.UNDER_PROVISIONED).numBrokers(1).numRacks(1)));
// Set numPartitions under over-provisioned state
assertThrows(IllegalArgumentException.class, () -> ProvisionRecommendation.sanityCheckResources(
new ProvisionRecommendation.Builder(ProvisionStatus.OVER_PROVISIONED).numPartitions(1)));
// Set numPartitions without the topic
assertThrows(IllegalArgumentException.class, () -> ProvisionRecommendation.sanityCheckResources(
new ProvisionRecommendation.Builder(ProvisionStatus.UNDER_PROVISIONED).numPartitions(1)));
}
|
public static CumulativeWindowAssigner of(Duration maxSize, Duration step) {
return new CumulativeWindowAssigner(maxSize.toMillis(), step.toMillis(), 0, true);
}
|
@Test
public void testInvalidParameters3() {
assertThatThrownBy(
() ->
CumulativeWindowAssigner.of(
Duration.ofSeconds(5000), Duration.ofSeconds(2000)))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("size must be an integral multiple of step.");
}
|
public ExecutorService getSharedExecutor() {
return sharedExecutor;
}
|
@Test
void testSharedExecutor() throws Exception {
ExecutorService sharedExecutor = frameworkExecutorRepository.getSharedExecutor();
CountDownLatch latch = new CountDownLatch(3);
CountDownLatch latch1 = new CountDownLatch(1);
sharedExecutor.execute(() -> {
latch.countDown();
try {
latch1.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
});
sharedExecutor.execute(() -> {
latch.countDown();
try {
latch1.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
});
sharedExecutor.submit(() -> {
latch.countDown();
try {
latch1.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
});
await().until(() -> latch.getCount() == 0);
Assertions.assertEquals(3, ((ThreadPoolExecutor) sharedExecutor).getActiveCount());
latch1.countDown();
await().until(() -> ((ThreadPoolExecutor) sharedExecutor).getActiveCount() == 0);
Assertions.assertEquals(3, ((ThreadPoolExecutor) sharedExecutor).getCompletedTaskCount());
}
|
public Analysis analyze(Statement statement)
{
return analyze(statement, false);
}
|
@Test
public void testHaving()
{
// TODO: verify output
analyze("SELECT sum(a) FROM t1 HAVING avg(a) - avg(b) > 10");
}
|
boolean isEncodable(DiscreteResource resource) {
return resource.valueAs(Object.class)
.map(Object::getClass)
.map(codecs::containsKey)
.orElse(Boolean.FALSE);
}
|
@Test
public void isRootNonEncodable() {
DiscreteResource resource = Resource.ROOT;
assertThat(sut.isEncodable(resource), is(false));
}
|
public long getMillis(HazelcastProperty property) {
TimeUnit timeUnit = property.getTimeUnit();
return timeUnit.toMillis(getLong(property));
}
|
@Test(expected = IllegalArgumentException.class)
public void getTimeUnit_noTimeUnitProperty() {
defaultProperties.getMillis(ClusterProperty.EVENT_THREAD_COUNT);
}
|
@Override
public String convertDestination(ProtocolConverter converter, Destination d) {
if (d == null) {
return null;
}
ActiveMQDestination activeMQDestination = (ActiveMQDestination)d;
String physicalName = activeMQDestination.getPhysicalName();
String rc = converter.getCreatedTempDestinationName(activeMQDestination);
if( rc!=null ) {
return rc;
}
StringBuilder buffer = new StringBuilder();
if (activeMQDestination.isQueue()) {
if (activeMQDestination.isTemporary()) {
buffer.append("/remote-temp-queue/");
} else {
buffer.append("/queue/");
}
} else {
if (activeMQDestination.isTemporary()) {
buffer.append("/remote-temp-topic/");
} else {
buffer.append("/topic/");
}
}
buffer.append(physicalName);
return buffer.toString();
}
|
@Test(timeout = 10000)
public void testConvertTemporaryTopic() throws Exception {
ActiveMQDestination destination = translator.convertDestination(converter, "/temp-topic/test", false);
assertFalse(destination.isComposite());
assertEquals(ActiveMQDestination.TEMP_TOPIC_TYPE, destination.getDestinationType());
}
|
@Override
public void run() {
try {
coordinator.start();
} catch (Exception e) {
LOG.error("Coordinator error during start, exiting thread", e);
terminated = true;
}
while (!terminated) {
try {
coordinator.process();
} catch (Exception e) {
LOG.error("Coordinator error during process, exiting thread", e);
terminated = true;
}
}
try {
coordinator.stop();
} catch (Exception e) {
LOG.error("Coordinator error during stop, ignoring", e);
}
coordinator = null;
}
|
@Test
public void testRun() {
Coordinator coordinator = mock(Coordinator.class);
CoordinatorThread coordinatorThread = new CoordinatorThread(coordinator);
coordinatorThread.start();
verify(coordinator, timeout(1000)).start();
verify(coordinator, timeout(1000).atLeast(1)).process();
verify(coordinator, times(0)).stop();
assertThat(coordinatorThread.isTerminated()).isFalse();
coordinatorThread.terminate();
verify(coordinator, timeout(1000)).stop();
assertThat(coordinatorThread.isTerminated()).isTrue();
}
|
public static RawPrivateTransaction decode(final String hexTransaction) {
final byte[] transaction = Numeric.hexStringToByteArray(hexTransaction);
final TransactionType transactionType = getPrivateTransactionType(transaction);
if (transactionType == TransactionType.EIP1559) {
return decodePrivateTransaction1559(transaction);
}
return decodeLegacyPrivateTransaction(transaction);
}
|
@Test
public void testDecodingSigned1559() throws Exception {
final BigInteger nonce = BigInteger.ZERO;
final long chainId = 2018;
final BigInteger gasLimit = BigInteger.TEN;
final BigInteger maxPriorityFeePerGas = BigInteger.ONE;
final BigInteger maxFeePerGas = BigInteger.ONE;
final String to = "0x0add5355";
final RawPrivateTransaction rawTransaction =
RawPrivateTransaction.createTransaction(
chainId,
nonce,
maxPriorityFeePerGas,
maxFeePerGas,
gasLimit,
to,
"",
MOCK_ENCLAVE_KEY,
MOCK_PRIVATE_FOR,
RESTRICTED);
final String privateKey =
"8f2a55949038a9610f50fb23b5883af3b4ecb3c3bb792cbcefbd1542c692be63";
final Credentials credentials = Credentials.create(privateKey);
final byte[] encodedMessage =
PrivateTransactionEncoder.signMessage(rawTransaction, credentials);
final String hexMessage = Numeric.toHexString(encodedMessage);
final RawPrivateTransaction result = PrivateTransactionDecoder.decode(hexMessage);
final PrivateTransaction1559 result1559 =
(PrivateTransaction1559) result.getPrivateTransaction();
assertNotNull(result1559);
assertEquals(nonce, result1559.getNonce());
assertEquals(chainId, result1559.getChainId());
assertEquals(maxPriorityFeePerGas, result1559.getMaxPriorityFeePerGas());
assertEquals(maxFeePerGas, result1559.getMaxFeePerGas());
assertEquals(gasLimit, result1559.getGasLimit());
assertEquals(to, result1559.getTo());
assertEquals("0x", result1559.getData());
assertEquals(MOCK_ENCLAVE_KEY, result1559.getPrivateFrom());
assertEquals(MOCK_PRIVATE_FOR, result1559.getPrivateFor().get());
assertEquals(RESTRICTED, result1559.getRestriction());
assertTrue(result instanceof SignedRawPrivateTransaction);
final SignedRawPrivateTransaction signedResult = (SignedRawPrivateTransaction) result;
assertNotNull(signedResult.getSignatureData());
Sign.SignatureData signatureData = signedResult.getSignatureData();
final byte[] encodedTransaction = PrivateTransactionEncoder.encode(rawTransaction);
final BigInteger key = Sign.signedMessageToKey(encodedTransaction, signatureData);
assertEquals(key, credentials.getEcKeyPair().getPublicKey());
assertEquals(credentials.getAddress(), signedResult.getFrom());
signedResult.verify(credentials.getAddress());
assertNull(signedResult.getChainId());
}
|
static String getAbbreviation(Exception ex,
Integer statusCode,
String storageErrorMessage) {
String result = null;
for (RetryReasonCategory retryReasonCategory : rankedReasonCategories) {
final String abbreviation
= retryReasonCategory.captureAndGetAbbreviation(ex,
statusCode, storageErrorMessage);
if (abbreviation != null) {
result = abbreviation;
}
}
return result;
}
|
@Test
public void test503UnknownRetryReason() {
Assertions.assertThat(RetryReason.getAbbreviation(null, HTTP_UNAVAILABLE, null)).isEqualTo(
"503"
);
}
|
public static String[] splitString( String string, String separator ) {
/*
* 0123456 Example a;b;c;d --> new String[] { a, b, c, d }
*/
// System.out.println("splitString ["+path+"] using ["+separator+"]");
List<String> list = new ArrayList<>();
if ( string == null || string.length() == 0 ) {
return new String[] {};
}
int sepLen = separator.length();
int from = 0;
int end = string.length() - sepLen + 1;
for ( int i = from; i < end; i += sepLen ) {
if ( string.substring( i, i + sepLen ).equalsIgnoreCase( separator ) ) {
// OK, we found a separator, the string to add to the list
// is [from, i[
list.add( nullToEmpty( string.substring( from, i ) ) );
from = i + sepLen;
}
}
// Wait, if the string didn't end with a separator, we still have information at the end of the string...
// In our example that would be "d"...
if ( from + sepLen <= string.length() ) {
list.add( nullToEmpty( string.substring( from, string.length() ) ) );
}
return list.toArray( new String[list.size()] );
}
|
@Test
public void testSplitStringNullWithDelimiterNullAndEnclosureNullRemoveEnclosure() {
String[] result = Const.splitString( null, null, null, true );
assertNull( result );
}
|
@Override
public boolean createEmptyObject(String key) {
try {
ObjectMetadata objMeta = new ObjectMetadata();
objMeta.setContentLength(0L);
mClient.putObject(mBucketName, key, new ByteArrayInputStream(new byte[0]), objMeta);
return true;
} catch (ObsException e) {
LOG.error("Failed to create object: {}", key, e);
return false;
}
}
|
@Test
public void testCreateEmptyObject() {
// test successful create empty object
Mockito.when(mClient.putObject(ArgumentMatchers.anyString(), ArgumentMatchers.anyString(),
ArgumentMatchers.any(InputStream.class), ArgumentMatchers.any(ObjectMetadata.class)))
.thenReturn(null);
boolean result = mOBSUnderFileSystem.createEmptyObject(KEY);
Assert.assertTrue(result);
// test create empty object exception
Mockito.when(mClient.putObject(ArgumentMatchers.anyString(), ArgumentMatchers.anyString(),
ArgumentMatchers.any(InputStream.class), ArgumentMatchers.any(ObjectMetadata.class)))
.thenThrow(ObsException.class);
try {
mOBSUnderFileSystem.createEmptyObject(KEY);
} catch (Exception e) {
Assert.assertTrue(e instanceof ObsException);
}
}
|
@Nonnull
public <T> T getInstance(@Nonnull Class<T> type) {
return getInstance(new Key<>(type));
}
|
@Test
public void autoFactory_factoryMethodsCreateNewInstances() throws Exception {
injector = builder.bind(Umm.class, MyUmm.class).build();
FooFactory factory = injector.getInstance(FooFactory.class);
Foo chauncey = factory.create("Chauncey");
assertThat(chauncey.name).isEqualTo("Chauncey");
Foo anotherChauncey = factory.create("Chauncey");
assertThat(anotherChauncey).isNotSameInstanceAs(chauncey);
}
|
@Override
public ConsumerBuilder<T> topics(List<String> topicNames) {
checkArgument(topicNames != null && !topicNames.isEmpty(),
"Passed in topicNames list should not be null or empty.");
topicNames.stream().forEach(topicName ->
checkArgument(StringUtils.isNotBlank(topicName), "topicNames cannot have blank topic"));
conf.getTopicNames().addAll(topicNames.stream().map(StringUtils::trim).collect(Collectors.toList()));
return this;
}
|
@Test(expectedExceptions = IllegalArgumentException.class)
public void testConsumerBuilderImplWhenTopicNamesHasNullTopic() {
List<String> topicNames = Arrays.asList("my-topic", null);
consumerBuilderImpl.topics(topicNames);
}
|
@Override
public OUT nextRecord(OUT record) throws IOException {
OUT returnRecord = null;
do {
returnRecord = super.nextRecord(record);
} while (returnRecord == null && !reachedEnd());
return returnRecord;
}
|
@Test
void testDoubleFields() {
try {
final String fileContent = "11.1|22.2|33.3|44.4|55.5\n66.6|77.7|88.8|99.9|00.0|\n";
final FileInputSplit split = createTempFile(fileContent);
final TupleTypeInfo<Tuple5<Double, Double, Double, Double, Double>> typeInfo =
TupleTypeInfo.getBasicTupleTypeInfo(
Double.class, Double.class, Double.class, Double.class, Double.class);
final CsvInputFormat<Tuple5<Double, Double, Double, Double, Double>> format =
new TupleCsvInputFormat<>(PATH, typeInfo);
format.setFieldDelimiter("|");
format.configure(new Configuration());
format.open(split);
Tuple5<Double, Double, Double, Double, Double> result = new Tuple5<>();
result = format.nextRecord(result);
assertThat(result.f0).isEqualTo(Double.valueOf(11.1));
assertThat(result.f1).isEqualTo(Double.valueOf(22.2));
assertThat(result.f2).isEqualTo(Double.valueOf(33.3));
assertThat(result.f3).isEqualTo(Double.valueOf(44.4));
assertThat(result.f4).isEqualTo(Double.valueOf(55.5));
result = format.nextRecord(result);
assertThat(result.f0).isEqualTo(Double.valueOf(66.6));
assertThat(result.f1).isEqualTo(Double.valueOf(77.7));
assertThat(result.f2).isEqualTo(Double.valueOf(88.8));
assertThat(result.f3).isEqualTo(Double.valueOf(99.9));
assertThat(result.f4).isEqualTo(Double.valueOf(00.0));
result = format.nextRecord(result);
assertThat(result).isNull();
assertThat(format.reachedEnd()).isTrue();
} catch (Exception ex) {
fail("Test failed due to a " + ex.getClass().getName() + ": " + ex.getMessage());
}
}
|
public boolean isRegisteredUser(@Nonnull final JID user, final boolean checkRemoteDomains) {
if (xmppServer.isLocal(user)) {
try {
getUser(user.getNode());
return true;
}
catch (final UserNotFoundException e) {
return false;
}
}
else if (!checkRemoteDomains) {
return false;
} else {
// Look up in the cache using the full JID
Boolean isRegistered = remoteUsersCache.get(user.toString());
if (isRegistered == null) {
// Check if the bare JID of the user is cached
isRegistered = remoteUsersCache.get(user.toBareJID());
if (isRegistered == null) {
// No information is cached so check user identity and cache it
// A disco#info is going to be sent to the bare JID of the user. This packet
// is going to be handled by the remote server.
final IQ iq = new IQ(IQ.Type.get);
iq.setFrom(xmppServer.getServerInfo().getXMPPDomain());
iq.setTo(user.toBareJID());
iq.setChildElement("query", "http://jabber.org/protocol/disco#info");
final Semaphore completionSemaphore = new Semaphore(0);
// Send the disco#info request to the remote server.
final IQRouter iqRouter = xmppServer.getIQRouter();
final long timeoutInMillis = REMOTE_DISCO_INFO_TIMEOUT.getValue().toMillis();
iqRouter.addIQResultListener(iq.getID(), new IQResultListener() {
@Override
public void receivedAnswer(final IQ packet) {
final JID from = packet.getFrom();
// Assume that the user is not a registered user
Boolean isRegistered = Boolean.FALSE;
// Analyze the disco result packet
if (IQ.Type.result == packet.getType()) {
final Element child = packet.getChildElement();
if (child != null) {
for (final Iterator it = child.elementIterator("identity"); it.hasNext();) {
final Element identity = (Element) it.next();
final String accountType = identity.attributeValue("type");
if ("registered".equals(accountType) || "admin".equals(accountType)) {
isRegistered = Boolean.TRUE;
break;
}
}
}
}
// Update cache of remote registered users
remoteUsersCache.put(from.toBareJID(), isRegistered);
completionSemaphore.release();
}
@Override
public void answerTimeout(final String packetId) {
Log.warn("The result from the disco#info request was never received. request: {}", iq);
completionSemaphore.release();
}
}, timeoutInMillis);
// Send the request
iqRouter.route(iq);
// Wait for the response
try {
completionSemaphore.tryAcquire(timeoutInMillis, TimeUnit.MILLISECONDS);
} catch (final InterruptedException e) {
Thread.currentThread().interrupt();
Log.warn("Interrupted whilst waiting for response from remote server", e);
}
isRegistered = remoteUsersCache.computeIfAbsent(user.toBareJID(), ignored -> Boolean.FALSE);
}
}
return isRegistered;
}
}
|
@Test
public void isRegisteredUserFalseWillReturnFalseForUnknownRemoteUsers() {
final AtomicReference<IQResultListener> iqListener = new AtomicReference<>();
doAnswer(invocationOnMock -> {
final IQResultListener listener = invocationOnMock.getArgument(1);
iqListener.set(listener);
return null;
}).when(iqRouter).addIQResultListener(any(), any(), anyLong());
doAnswer(invocationOnMock -> {
final IQ iq = invocationOnMock.getArgument(0);
final Element childElement = iq.getChildElement();
final IQ response = IQ.createResultIQ(iq);
response.setChildElement(childElement.createCopy());
response.setError(new PacketError(PacketError.Condition.item_not_found, PacketError.Condition.item_not_found.getDefaultType()));
iqListener.get().receivedAnswer(response);
return null;
}).when(iqRouter).route(any());
final boolean result = userManager.isRegisteredUser(new JID(USER_ID, REMOTE_XMPP_DOMAIN, null), false);
assertThat(result, is(false));
}
|
@Override
public void execute() {
BatchGetItemResponse result
= ddbClient.batchGetItem(BatchGetItemRequest.builder().requestItems(determineBatchItems()).build());
HashMap<Object, Object> tmp = new HashMap<>();
tmp.put(Ddb2Constants.BATCH_RESPONSE, result.responses());
tmp.put(Ddb2Constants.UNPROCESSED_KEYS, result.unprocessedKeys());
addToResults(tmp);
}
|
@SuppressWarnings("unchecked")
@Test
public void execute() {
Map<String, AttributeValue> key = new HashMap<>();
key.put("1", AttributeValue.builder().s("Key_1").build());
Map<String, AttributeValue> unprocessedKey = new HashMap<>();
unprocessedKey.put("1", AttributeValue.builder().s("UNPROCESSED_KEY").build());
Map<String, KeysAndAttributes> keysAndAttributesMap = new HashMap<>();
KeysAndAttributes keysAndAttributes = KeysAndAttributes.builder().keys(key).build();
keysAndAttributesMap.put("DOMAIN1", keysAndAttributes);
exchange.getIn().setHeader(Ddb2Constants.BATCH_ITEMS, keysAndAttributesMap);
command.execute();
assertEquals(keysAndAttributesMap, ddbClient.batchGetItemRequest.requestItems());
List<Map<String, AttributeValue>> batchResponse = (List<Map<String, AttributeValue>>) exchange.getIn()
.getHeader(Ddb2Constants.BATCH_RESPONSE, Map.class).get("DOMAIN1");
AttributeValue value = batchResponse.get(0).get("attrName");
KeysAndAttributes unProcessedAttributes
= (KeysAndAttributes) exchange.getIn().getHeader(Ddb2Constants.UNPROCESSED_KEYS, Map.class).get("DOMAIN1");
Map<String, AttributeValue> next = unProcessedAttributes.keys().iterator().next();
assertEquals(AttributeValue.builder().s("attrValue").build(), value);
assertEquals(unprocessedKey, next);
}
|
public long getUncompressedLen() {
return uncompressed_len;
}
|
@Test
public void testGetUncompressedLen() {
assertEquals(TestParameters.VP_RES_TBL_UNCOMP_LENGTH,
chmLzxcResetTable.getUncompressedLen());
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.