language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/loader/ast/spi/EntityLoader.java
|
{
"start": 295,
"end": 382
}
|
interface ____ extends Loader {
@Override
EntityMappingType getLoadable();
}
|
EntityLoader
|
java
|
hibernate__hibernate-orm
|
tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/hqlsql/Dao.java
|
{
"start": 491,
"end": 2447
}
|
interface ____ {
@Find
Book getBook(String isbn);
@Find
Book getBook(String title, String isbn);
@Find
Book getBookByNaturalKey(String authorName, String title);
@HQL("from Book where title like ?1")
TypedQuery<Book> findByTitle(String title);
@HQL("from Book where title like ?1 order by title fetch first ?2 rows only")
List<Book> findFirstNByTitle(String title, int N);
@HQL("from Book where title like :title")
List<Book> findByTitleWithPagination(String title, Order<? super Book> order, Page page);
@HQL("from Book where title like :title")
SelectionQuery<Book> findByTitleWithOrdering(String title, List<Order<? super Book>> order);
@HQL("from Book where title like :title")
SelectionQuery<Book> findByTitleWithOrderingByVarargs(String title, Order<? super Book>... order);
@HQL("from Book where isbn = :isbn")
Book findByIsbn(String isbn);
@HQL("order by isbn asc, publicationDate desc")
List<Book> allBooks();
@HQL("order by isbn asc, publicationDate desc")
Book[] allBooksAsArray();
@SQL("select * from Book where isbn = :isbn")
Book findByIsbnNative(String isbn);
@Find
List<Book> publishedBooks(String publisher$name);
@HQL("from Book book join fetch book.publisher where book.title like :titlePattern")
List<Book> booksWithPublisherByTitle(String titlePattern, Page page, Order<? super Book> order);
@HQL("select title, pages from Book")
List<Dto> dtoQuery();
@HQL("select new org.hibernate.processor.test.hqlsql.Dto(title, pages) from Book")
List<Dto> dtoQuery1();
@HQL("select new Dto(title, pages) from Book")
List<Dto> dtoQuery2();
@HQL("select new map(title as title, pages as pages) from Book")
List<Map> dtoQuery3();
@HQL("select new list(title, pages) from Book")
List<List> dtoQuery4();
@HQL("from Publisher where address = :address")
List<Publisher> publisherAt(Address address);
@HQL("where array_contains(:isbns, isbn) is true")
List<Book> forIsbnIn(String[] isbns);
}
|
Dao
|
java
|
micronaut-projects__micronaut-core
|
core/src/test/java/io/micronaut/core/util/NativeImageUtilsInNativeImageTest.java
|
{
"start": 312,
"end": 483
}
|
class ____ {
@EnabledInNativeImage
@Test
void testInImageCode() {
assertTrue(NativeImageUtils.inImageCode());
}
}
|
NativeImageUtilsInNativeImageTest
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractGetFileStatusLive.java
|
{
"start": 1077,
"end": 1310
}
|
class ____ extends
AbstractContractGetFileStatusTest {
@Override
protected AbstractFSContract createContract(Configuration configuration) {
return new AdlStorageContract(configuration);
}
}
|
TestAdlContractGetFileStatusLive
|
java
|
elastic__elasticsearch
|
modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ShingleTokenFilterTests.java
|
{
"start": 1090,
"end": 1964
}
|
class ____ extends ESTokenStreamTestCase {
public void testPreConfiguredShingleFilterDisableGraphAttribute() throws Exception {
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(
Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_ascii_folding.type", "asciifolding")
.build(),
new CommonAnalysisPlugin()
);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("shingle");
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader("this is a test"));
TokenStream tokenStream = tokenFilter.create(tokenizer);
assertTrue(tokenStream.hasAttribute(DisableGraphAttribute.class));
}
}
|
ShingleTokenFilterTests
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ReturnValueIgnoredTest.java
|
{
"start": 10429,
"end": 10842
}
|
class ____ {
void f(Function<Integer, Integer> f) {
// BUG: Diagnostic contains: ReturnValueIgnored
f.apply(0);
}
}
""")
.doTest();
}
@Test
public void consumer() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import java.util.function.Consumer;
|
Test
|
java
|
quarkusio__quarkus
|
extensions/redis-client/deployment/src/main/java/io/quarkus/redis/deployment/client/DevServicesRedisProcessor.java
|
{
"start": 1855,
"end": 9634
}
|
class ____ {
private static final Logger log = Logger.getLogger(DevServicesRedisProcessor.class);
private static final int REDIS_EXPOSED_PORT = 6379;
private static final String REDIS_SCHEME = "redis://";
/**
* Label to add to shared Dev Service for Redis running in containers.
* This allows other applications to discover the running service and use it instead of starting a new instance.
*/
private static final String DEV_SERVICE_LABEL = "quarkus-dev-service-redis";
private static final ContainerLocator redisContainerLocator = locateContainerWithLabels(REDIS_EXPOSED_PORT,
DEV_SERVICE_LABEL);
@BuildStep
public void startRedisContainers(LaunchModeBuildItem launchMode,
DockerStatusBuildItem dockerStatusBuildItem,
DevServicesComposeProjectBuildItem composeProjectBuildItem,
List<DevServicesSharedNetworkBuildItem> devServicesSharedNetworkBuildItem,
RedisBuildTimeConfig config,
BuildProducer<DevServicesResultBuildItem> devServicesResult,
DevServicesConfig devServicesConfig) {
Set<String> names = new HashSet<>(config.clients().keySet());
names.add(RedisConfig.DEFAULT_CLIENT_NAME);
try {
for (String name : names) {
boolean useSharedNetwork = DevServicesSharedNetworkBuildItem.isSharedNetworkRequired(devServicesConfig,
devServicesSharedNetworkBuildItem);
io.quarkus.redis.deployment.client.DevServicesConfig redisConfig = config.clients().get(name).devservices();
if (redisDevServicesEnabled(dockerStatusBuildItem, name, redisConfig)) {
// If the dev services are disabled, we don't need to do anything
continue;
}
DevServicesResultBuildItem discovered = discoverRunningService(composeProjectBuildItem, name,
redisConfig, launchMode.getLaunchMode(), useSharedNetwork);
if (discovered != null) {
devServicesResult.produce(discovered);
} else {
devServicesResult
.produce(DevServicesResultBuildItem.owned().feature(Feature.REDIS_CLIENT)
.serviceName(name)
.serviceConfig(redisConfig)
.startable(() -> new QuarkusPortRedisContainer(
DockerImageName
.parse(redisConfig.imageName()
.orElseGet(() -> getDefaultImageNameFor("redis")))
.asCompatibleSubstituteFor("redis"),
redisConfig.port(),
composeProjectBuildItem.getDefaultNetworkId(),
useSharedNetwork)
.withEnv(redisConfig.containerEnv())
// Dev Service discovery works using a global dev service label applied in DevServicesCustomizerBuildItem
// for backwards compatibility we still add the custom label
.withSharedServiceLabel(launchMode.getLaunchMode(), redisConfig.serviceName()))
.configProvider(
Map.of(getPropertyName(name, HOSTS), s -> REDIS_SCHEME + s.getConnectionInfo()))
.build());
}
}
} catch (Throwable t) {
throw new RuntimeException(t);
}
}
/**
* The ideal re-use precedence order is the following:
* 1. Re-use existing dev service/container if one with compatible config exists (only knowable post-augmentation, or on the
* second run of a continuous testing session)
* 2. Use the container locator to find an external service (where applicable) (knowable at augmentation)
* 3. Create a new container
* This swaps 1 and 2, but that's actually ok. If an external service exists and is valid for this configuration,
* any matching service would be using it, so option 1 (an existing internal container) can't happen.
* If there's no external service, then the order is 1 and then 3, which is what we want.
* Because of how the labelling works, dev services we create will not be detected by the locator.
* The check for running services happens in RunnableDevService.start(), because it has to happen at runtime, not during
* augmentation.
* We cannot assume the order of container creation in augmentation would be the same as the runtime order.
*
* The container locator might find services from other tests, which would not be ok because they'd have the wrong config
* We can be fairly confident this isn't happening because of the tests showing config is honoured, but if we wanted to be
* extra sure we'd put on a special 'not external' label and filter for that, too
*/
private DevServicesResultBuildItem discoverRunningService(DevServicesComposeProjectBuildItem composeProjectBuildItem,
String name,
io.quarkus.redis.deployment.client.DevServicesConfig devServicesConfig,
LaunchMode launchMode,
boolean useSharedNetwork) {
return redisContainerLocator.locateContainer(devServicesConfig.serviceName(), devServicesConfig.shared(), launchMode)
.or(() -> ComposeLocator.locateContainer(composeProjectBuildItem,
List.of(devServicesConfig.imageName().orElseGet(() -> getDefaultImageNameFor("redis"))),
REDIS_EXPOSED_PORT, launchMode, useSharedNetwork))
.map(containerAddress -> {
String redisUrl = REDIS_SCHEME + containerAddress.getUrl();
return DevServicesResultBuildItem.discovered()
.feature(Feature.REDIS_CLIENT)
.containerId(containerAddress.getId())
.config(Map.of(RedisConfig.getPropertyName(name, HOSTS), redisUrl))
.build();
}).orElse(null);
}
private static boolean redisDevServicesEnabled(DockerStatusBuildItem dockerStatusBuildItem, String name,
io.quarkus.redis.deployment.client.DevServicesConfig devServicesConfig) {
if (!devServicesConfig.enabled()) {
// explicitly disabled
log.debug("Not starting devservices for " + (RedisConfig.isDefaultClient(name) ? "default redis client" : name)
+ " as it has been disabled in the config");
return true;
}
// TODO - We shouldn't query runtime config during deployment
boolean needToStart = !ConfigUtils.isPropertyNonEmpty(RedisConfig.getPropertyName(name, HOSTS));
if (!needToStart) {
log.debug("Not starting dev services for " + (RedisConfig.isDefaultClient(name) ? "default redis client" : name)
+ " as hosts have been provided");
return true;
}
if (!dockerStatusBuildItem.isContainerRuntimeAvailable()) {
log.warn("Please configure quarkus.redis.hosts for "
+ (RedisConfig.isDefaultClient(name) ? "default redis client" : name)
+ " or get a working docker instance");
return true;
}
return false;
}
private static
|
DevServicesRedisProcessor
|
java
|
apache__thrift
|
lib/java/src/main/java/org/apache/thrift/server/Invocation.java
|
{
"start": 338,
"end": 579
}
|
class ____ implements Runnable {
private final FrameBuffer frameBuffer;
public Invocation(final FrameBuffer frameBuffer) {
this.frameBuffer = frameBuffer;
}
@Override
public void run() {
frameBuffer.invoke();
}
}
|
Invocation
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestLoadTypedBytes.java
|
{
"start": 1464,
"end": 3294
}
|
class ____ {
@Test
public void testLoading() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
.build();
FileSystem fs = cluster.getFileSystem();
ByteArrayOutputStream out = new ByteArrayOutputStream();
TypedBytesOutput tboutput = new TypedBytesOutput(new DataOutputStream(out));
for (int i = 0; i < 100; i++) {
tboutput.write(new Long(i)); // key
tboutput.write("" + (10 * i)); // value
}
InputStream isBackup = System.in;
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
System.setIn(in);
LoadTypedBytes loadtb = new LoadTypedBytes(conf);
try {
Path root = new Path("/typedbytestest");
assertTrue(fs.mkdirs(root));
assertTrue(fs.exists(root));
String[] args = new String[1];
args[0] = "/typedbytestest/test.seq";
int ret = loadtb.run(args);
assertEquals(0, ret, "Return value != 0.");
Path file = new Path(root, "test.seq");
assertTrue(fs.exists(file));
SequenceFile.Reader reader = new SequenceFile.Reader(fs, file, conf);
int counter = 0;
TypedBytesWritable key = new TypedBytesWritable();
TypedBytesWritable value = new TypedBytesWritable();
while (reader.next(key, value)) {
assertEquals(Long.class, key.getValue().getClass());
assertEquals(String.class, value.getValue().getClass());
assertTrue(Integer.parseInt(value.toString()) % 10 == 0, "Invalid record.");
counter++;
}
assertEquals(100, counter, "Wrong number of records.");
} finally {
try {
fs.close();
} catch (Exception e) {
}
System.setIn(isBackup);
cluster.shutdown();
}
}
}
|
TestLoadTypedBytes
|
java
|
netty__netty
|
transport-native-kqueue/src/test/java/io/netty/channel/kqueue/KQueueETSocketDataReadInitialStateTest.java
|
{
"start": 915,
"end": 1193
}
|
class ____ extends SocketDataReadInitialStateTest {
@Override
protected List<TestsuitePermutation.BootstrapComboFactory<ServerBootstrap, Bootstrap>> newFactories() {
return KQueueSocketTestPermutation.INSTANCE.socket();
}
}
|
KQueueETSocketDataReadInitialStateTest
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
|
{
"start": 1422,
"end": 3584
}
|
class ____ extends CLITestHelperDFS {
protected MiniDFSCluster dfsCluster = null;
protected FileSystem fs = null;
protected String namenode = null;
@BeforeEach
@Override
public void setUp() throws Exception {
super.setUp();
conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
HDFSPolicyProvider.class, PolicyProvider.class);
// Many of the tests expect a replication value of 1 in the output
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
// Build racks and hosts configuration to test dfsAdmin -printTopology
String [] racks = {"/rack1", "/rack1", "/rack2", "/rack2",
"/rack2", "/rack3", "/rack4", "/rack4" };
String [] hosts = {"host1", "host2", "host3", "host4",
"host5", "host6", "host7", "host8" };
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(8)
.racks(racks)
.hosts(hosts)
.build();
dfsCluster.waitClusterUp();
namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
username = System.getProperty("user.name");
fs = dfsCluster.getFileSystem();
assertTrue(fs instanceof DistributedFileSystem,
"Not a HDFS: " + fs.getUri());
}
@Override
protected String getTestFile() {
return "testHDFSConf.xml";
}
@AfterEach
@Override
public void tearDown() throws Exception {
if (fs != null) {
fs.close();
fs = null;
}
if (dfsCluster != null) {
dfsCluster.shutdown();
dfsCluster = null;
}
Thread.sleep(2000);
super.tearDown();
}
@Override
protected String expandCommand(final String cmd) {
String expCmd = cmd;
expCmd = expCmd.replaceAll("NAMENODE", namenode);
expCmd = super.expandCommand(expCmd);
return expCmd;
}
@Override
protected Result execute(CLICommand cmd) throws Exception {
return cmd.getExecutor(namenode, conf).executeCommand(cmd.getCmd());
}
@Test
@Override
public void testAll () {
super.testAll();
}
}
|
TestHDFSCLI
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/typeutils/ExternalSerializerTest.java
|
{
"start": 3493,
"end": 4742
}
|
class ____ extends ExternalSerializerTest {
public ExternalSerializer4Test() {
super(
TestSpec.forDataType(
DataTypes.ARRAY(
DataTypes.STRUCTURED(
ImmutableTestPojo.class,
DataTypes.FIELD("age", DataTypes.INT()),
DataTypes.FIELD(
"name", DataTypes.STRING())))
.bridgedTo(List.class))
.addInstance(
Collections.singletonList(new ImmutableTestPojo(12, "Bob")))
.addInstance(
Arrays.asList(
new ImmutableTestPojo(42, "Alice"),
null,
null,
new ImmutableTestPojo(42, null))));
}
}
static final
|
ExternalSerializer4Test
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/param/MySqlParameterizedOutputVisitorTest_37.java
|
{
"start": 485,
"end": 1514
}
|
class ____ extends TestCase {
public void test_for_parameterize() throws Exception {
final DbType dbType = JdbcConstants.MYSQL;
String sql = "insert into fc_sms_0011_201704 (c1, c2, c3) values (1, 'a', 'b')";
SQLStatementParser parser = SQLParserUtils.createSQLStatementParser(sql, dbType);
List<SQLStatement> stmtList = parser.parseStatementList();
SQLStatement statement = stmtList.get(0);
StringBuilder out = new StringBuilder();
// List<Object> parameters = new ArrayList<Object>();
SQLASTOutputVisitor visitor = SQLUtils.createOutputVisitor(out, JdbcConstants.MYSQL);
visitor.setParameterized(true);
visitor.setParameterizedMergeInList(true);
// visitor.setParameters(parameters);
visitor.setExportTables(true);
visitor.setPrettyFormat(false);
statement.accept(visitor);
assertEquals("INSERT INTO fc_sms (c1, c2, c3) VALUES (?, ?, ?)", out.toString());
}
}
|
MySqlParameterizedOutputVisitorTest_37
|
java
|
apache__camel
|
components/camel-stitch/src/test/java/org/apache/camel/component/stitch/client/models/StitchResponseTest.java
|
{
"start": 1073,
"end": 1712
}
|
class ____ {
@Test
void testIfCreateStitchResponse() {
final Map<String, Object> headers = new LinkedHashMap<>();
headers.put("test_header_1", "test1");
headers.put("test_header_2", "test2");
final StitchResponse response = new StitchResponse(200, headers, "test", "testing");
final String responseAsJson = JsonUtils.convertMapToJson(response.toMap());
assertEquals("{\"code\":200,\"headers\":{\"test_header_1\":\"test1\",\"test_header_2\":\"test2\"},"
+ "\"status\":\"test\",\"message\":\"testing\"}",
responseAsJson);
}
}
|
StitchResponseTest
|
java
|
quarkusio__quarkus
|
integration-tests/smallrye-config/src/test/java/io/quarkus/it/smallrye/config/ConfigurableSourceTest.java
|
{
"start": 286,
"end": 570
}
|
class ____ {
@Test
void configurableSource() {
given()
.get("/config/{name}", "database.user.naruto")
.then()
.statusCode(OK.getStatusCode())
.body("value", equalTo("uzumaki"));
}
}
|
ConfigurableSourceTest
|
java
|
alibaba__fastjson
|
src/main/java/com/alibaba/fastjson/util/Function.java
|
{
"start": 43,
"end": 175
}
|
interface ____<ARG, V> {
/**
* Computes a result
*
* @return computed result
*/
V apply(ARG arg);
}
|
Function
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/oncrpc/TestRpcCallCache.java
|
{
"start": 1575,
"end": 4945
}
|
class ____ {
@Test
public void testRpcCallCacheConstructorIllegalArgument0() {
assertThrows(IllegalArgumentException.class, () ->
new RpcCallCache("test", 0));
}
@Test
public void testRpcCallCacheConstructorIllegalArgumentNegative() {
assertThrows(IllegalArgumentException.class, () ->
new RpcCallCache("test", -1));
}
@Test
public void testRpcCallCacheConstructor(){
RpcCallCache cache = new RpcCallCache("test", 100);
assertEquals("test", cache.getProgram());
}
@Test
public void testAddRemoveEntries() throws UnknownHostException {
RpcCallCache cache = new RpcCallCache("test", 100);
InetAddress clientIp = InetAddress.getByName("1.1.1.1");
int xid = 100;
// Ensure null is returned when there is no entry in the cache
// An entry is added to indicate the request is in progress
CacheEntry e = cache.checkOrAddToCache(clientIp, xid);
assertNull(e);
e = cache.checkOrAddToCache(clientIp, xid);
validateInprogressCacheEntry(e);
// Set call as completed
RpcResponse response = mock(RpcResponse.class);
cache.callCompleted(clientIp, xid, response);
e = cache.checkOrAddToCache(clientIp, xid);
validateCompletedCacheEntry(e, response);
}
private void validateInprogressCacheEntry(CacheEntry c) {
assertTrue(c.isInProgress());
assertFalse(c.isCompleted());
assertNull(c.getResponse());
}
private void validateCompletedCacheEntry(CacheEntry c, RpcResponse response) {
assertFalse(c.isInProgress());
assertTrue(c.isCompleted());
assertEquals(response, c.getResponse());
}
@Test
public void testCacheEntry() {
CacheEntry c = new CacheEntry();
validateInprogressCacheEntry(c);
assertTrue(c.isInProgress());
assertFalse(c.isCompleted());
assertNull(c.getResponse());
RpcResponse response = mock(RpcResponse.class);
c.setResponse(response);
validateCompletedCacheEntry(c, response);
}
@Test
public void testCacheFunctionality() throws UnknownHostException {
RpcCallCache cache = new RpcCallCache("Test", 10);
// Add 20 entries to the cache and only last 10 should be retained
int size = 0;
for (int clientId = 0; clientId < 20; clientId++) {
InetAddress clientIp = InetAddress.getByName("1.1.1."+clientId);
System.out.println("Adding " + clientIp);
cache.checkOrAddToCache(clientIp, 0);
size = Math.min(++size, 10);
System.out.println("Cache size " + cache.size());
assertEquals(size, cache.size()); // Ensure the cache size is correct
// Ensure the cache entries are correct
int startEntry = Math.max(clientId - 10 + 1, 0);
Iterator<Entry<ClientRequest, CacheEntry>> iterator = cache.iterator();
for (int i = 0; i < size; i++) {
ClientRequest key = iterator.next().getKey();
System.out.println("Entry " + key.getClientId());
assertEquals(InetAddress.getByName("1.1.1." + (startEntry + i)),
key.getClientId());
}
// Ensure cache entries are returned as in progress.
for (int i = 0; i < size; i++) {
CacheEntry e = cache.checkOrAddToCache(
InetAddress.getByName("1.1.1." + (startEntry + i)), 0);
assertNotNull(e);
assertTrue(e.isInProgress());
assertFalse(e.isCompleted());
}
}
}
}
|
TestRpcCallCache
|
java
|
grpc__grpc-java
|
okhttp/src/main/java/io/grpc/okhttp/OkHttpSettingsUtil.java
|
{
"start": 763,
"end": 1380
}
|
class ____ {
public static final int MAX_CONCURRENT_STREAMS = Settings.MAX_CONCURRENT_STREAMS;
public static final int INITIAL_WINDOW_SIZE = Settings.INITIAL_WINDOW_SIZE;
public static final int MAX_HEADER_LIST_SIZE = Settings.MAX_HEADER_LIST_SIZE;
public static final int ENABLE_PUSH = Settings.ENABLE_PUSH;
public static boolean isSet(Settings settings, int id) {
return settings.isSet(id);
}
public static int get(Settings settings, int id) {
return settings.get(id);
}
public static void set(Settings settings, int id, int value) {
settings.set(id, 0, value);
}
}
|
OkHttpSettingsUtil
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/android/FragmentInjectionTest.java
|
{
"start": 10170,
"end": 10805
}
|
class ____ extends PreferenceActivity {
// BUG: Diagnostic contains: isValidFragment unconditionally returns true
protected boolean isValidFragment(String fragment) {
if ("VALID_FRAGMENT".equals(fragment)) {
return true;
}
return true;
}
}
""")
.doTest();
}
@Test
public void finalLocalVariableIsConstant() {
compilationHelper
.addSourceLines(
"MyPrefActivity.java",
"""
import android.preference.PreferenceActivity;
|
MyPrefActivity
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/IncompleteBatches.java
|
{
"start": 1110,
"end": 2205
}
|
class ____ {
private final Set<ProducerBatch> incomplete;
public IncompleteBatches() {
this.incomplete = new HashSet<>();
}
public void add(ProducerBatch batch) {
synchronized (incomplete) {
this.incomplete.add(batch);
}
}
public void remove(ProducerBatch batch) {
synchronized (incomplete) {
boolean removed = this.incomplete.remove(batch);
if (!removed)
throw new IllegalStateException("Remove from the incomplete set failed. This should be impossible.");
}
}
public Iterable<ProducerBatch> copyAll() {
synchronized (incomplete) {
return new ArrayList<>(this.incomplete);
}
}
public Iterable<ProduceRequestResult> requestResults() {
synchronized (incomplete) {
return incomplete.stream().map(batch -> batch.produceFuture).collect(Collectors.toList());
}
}
public boolean isEmpty() {
synchronized (incomplete) {
return incomplete.isEmpty();
}
}
}
|
IncompleteBatches
|
java
|
mybatis__mybatis-3
|
src/main/java/org/apache/ibatis/builder/annotation/MapperAnnotationBuilder.java
|
{
"start": 4166,
"end": 29893
}
|
class ____ {
private static final Set<Class<? extends Annotation>> statementAnnotationTypes = Stream
.of(Select.class, Update.class, Insert.class, Delete.class, SelectProvider.class, UpdateProvider.class,
InsertProvider.class, DeleteProvider.class)
.collect(Collectors.toSet());
private final Configuration configuration;
private final MapperBuilderAssistant assistant;
private final Class<?> type;
public MapperAnnotationBuilder(Configuration configuration, Class<?> type) {
String resource = type.getName().replace('.', '/') + ".java (best guess)";
this.assistant = new MapperBuilderAssistant(configuration, resource);
this.configuration = configuration;
this.type = type;
}
public void parse() {
String resource = type.toString();
if (!configuration.isResourceLoaded(resource)) {
loadXmlResource();
configuration.addLoadedResource(resource);
assistant.setCurrentNamespace(type.getName());
parseCache();
parseCacheRef();
for (Method method : type.getMethods()) {
if (!canHaveStatement(method)) {
continue;
}
if (getAnnotationWrapper(method, false, Select.class, SelectProvider.class).isPresent()
&& method.getAnnotation(ResultMap.class) == null) {
parseResultMap(method);
}
try {
parseStatement(method);
} catch (IncompleteElementException e) {
configuration.addIncompleteMethod(new MethodResolver(this, method));
}
}
}
configuration.parsePendingMethods(false);
}
private static boolean canHaveStatement(Method method) {
// issue #237
return !method.isBridge() && !method.isDefault();
}
private void loadXmlResource() {
// Spring may not know the real resource name so we check a flag
// to prevent loading again a resource twice
// this flag is set at XMLMapperBuilder#bindMapperForNamespace
if (!configuration.isResourceLoaded("namespace:" + type.getName())) {
String xmlResource = type.getName().replace('.', '/') + ".xml";
// #1347
InputStream inputStream = type.getResourceAsStream("/" + xmlResource);
if (inputStream == null) {
// Search XML mapper that is not in the module but in the classpath.
try {
inputStream = Resources.getResourceAsStream(type.getClassLoader(), xmlResource);
} catch (IOException e2) {
// ignore, resource is not required
}
}
if (inputStream != null) {
XMLMapperBuilder xmlParser = new XMLMapperBuilder(inputStream, assistant.getConfiguration(), xmlResource,
configuration.getSqlFragments(), type);
xmlParser.parse();
}
}
}
private void parseCache() {
CacheNamespace cacheDomain = type.getAnnotation(CacheNamespace.class);
if (cacheDomain != null) {
Integer size = cacheDomain.size() == 0 ? null : cacheDomain.size();
Long flushInterval = cacheDomain.flushInterval() == 0 ? null : cacheDomain.flushInterval();
Properties props = convertToProperties(cacheDomain.properties());
assistant.useNewCache(cacheDomain.implementation(), cacheDomain.eviction(), flushInterval, size,
cacheDomain.readWrite(), cacheDomain.blocking(), props);
}
}
private Properties convertToProperties(Property[] properties) {
if (properties.length == 0) {
return null;
}
Properties props = new Properties();
for (Property property : properties) {
props.setProperty(property.name(), PropertyParser.parse(property.value(), configuration.getVariables()));
}
return props;
}
private void parseCacheRef() {
CacheNamespaceRef cacheDomainRef = type.getAnnotation(CacheNamespaceRef.class);
if (cacheDomainRef != null) {
Class<?> refType = cacheDomainRef.value();
String refName = cacheDomainRef.name();
if (refType == void.class && refName.isEmpty()) {
throw new BuilderException("Should be specified either value() or name() attribute in the @CacheNamespaceRef");
}
if (refType != void.class && !refName.isEmpty()) {
throw new BuilderException("Cannot use both value() and name() attribute in the @CacheNamespaceRef");
}
String namespace = refType != void.class ? refType.getName() : refName;
try {
assistant.useCacheRef(namespace);
} catch (IncompleteElementException e) {
configuration.addIncompleteCacheRef(new CacheRefResolver(assistant, namespace));
}
}
}
private String parseResultMap(Method method) {
Class<?> returnType = getReturnType(method, type);
Arg[] args = method.getAnnotationsByType(Arg.class);
Result[] results = method.getAnnotationsByType(Result.class);
TypeDiscriminator typeDiscriminator = method.getAnnotation(TypeDiscriminator.class);
String resultMapId = generateResultMapName(method);
applyResultMap(resultMapId, returnType, args, results, typeDiscriminator);
return resultMapId;
}
private String generateResultMapName(Method method) {
Results results = method.getAnnotation(Results.class);
if (results != null && !results.id().isEmpty()) {
return type.getName() + "." + results.id();
}
StringBuilder suffix = new StringBuilder();
for (Class<?> c : method.getParameterTypes()) {
suffix.append("-");
suffix.append(c.getSimpleName());
}
if (suffix.length() < 1) {
suffix.append("-void");
}
return type.getName() + "." + method.getName() + suffix;
}
private void applyResultMap(String resultMapId, Class<?> returnType, Arg[] args, Result[] results,
TypeDiscriminator discriminator) {
List<ResultMapping> resultMappings = new ArrayList<>();
applyConstructorArgs(args, returnType, resultMappings, resultMapId);
applyResults(results, returnType, resultMappings);
Discriminator disc = applyDiscriminator(resultMapId, returnType, discriminator);
// TODO add AutoMappingBehaviour
assistant.addResultMap(resultMapId, returnType, null, disc, resultMappings, null);
createDiscriminatorResultMaps(resultMapId, returnType, discriminator);
}
private void createDiscriminatorResultMaps(String resultMapId, Class<?> resultType, TypeDiscriminator discriminator) {
if (discriminator != null) {
for (Case c : discriminator.cases()) {
String caseResultMapId = resultMapId + "-" + c.value();
List<ResultMapping> resultMappings = new ArrayList<>();
// issue #136
applyConstructorArgs(c.constructArgs(), resultType, resultMappings, resultMapId);
applyResults(c.results(), resultType, resultMappings);
// TODO add AutoMappingBehaviour
assistant.addResultMap(caseResultMapId, c.type(), resultMapId, null, resultMappings, null);
}
}
}
private Discriminator applyDiscriminator(String resultMapId, Class<?> resultType, TypeDiscriminator discriminator) {
if (discriminator != null) {
String column = discriminator.column();
Class<?> javaType = discriminator.javaType() == void.class ? String.class : discriminator.javaType();
JdbcType jdbcType = discriminator.jdbcType() == JdbcType.UNDEFINED ? null : discriminator.jdbcType();
@SuppressWarnings("unchecked")
Class<? extends TypeHandler<?>> typeHandler = (Class<? extends TypeHandler<?>>) (discriminator
.typeHandler() == UnknownTypeHandler.class ? null : discriminator.typeHandler());
Case[] cases = discriminator.cases();
Map<String, String> discriminatorMap = new HashMap<>();
for (Case c : cases) {
String value = c.value();
String caseResultMapId = resultMapId + "-" + value;
discriminatorMap.put(value, caseResultMapId);
}
return assistant.buildDiscriminator(resultType, column, javaType, jdbcType, typeHandler, discriminatorMap);
}
return null;
}
void parseStatement(Method method) {
final Class<?> parameterTypeClass = getParameterType(method);
final ParamNameResolver paramNameResolver = new ParamNameResolver(configuration, method, type);
final LanguageDriver languageDriver = getLanguageDriver(method);
getAnnotationWrapper(method, true, statementAnnotationTypes).ifPresent(statementAnnotation -> {
final SqlSource sqlSource = buildSqlSource(statementAnnotation.getAnnotation(), parameterTypeClass,
paramNameResolver, languageDriver, method);
final SqlCommandType sqlCommandType = statementAnnotation.getSqlCommandType();
final Options options = getAnnotationWrapper(method, false, Options.class).map(x -> (Options) x.getAnnotation())
.orElse(null);
final String mappedStatementId = type.getName() + "." + method.getName();
final KeyGenerator keyGenerator;
String keyProperty = null;
String keyColumn = null;
if (SqlCommandType.INSERT.equals(sqlCommandType) || SqlCommandType.UPDATE.equals(sqlCommandType)) {
// first check for SelectKey annotation - that overrides everything else
SelectKey selectKey = getAnnotationWrapper(method, false, SelectKey.class)
.map(x -> (SelectKey) x.getAnnotation()).orElse(null);
if (selectKey != null) {
keyGenerator = handleSelectKeyAnnotation(selectKey, mappedStatementId, getParameterType(method),
paramNameResolver, languageDriver);
keyProperty = selectKey.keyProperty();
} else if (options == null) {
keyGenerator = configuration.isUseGeneratedKeys() ? Jdbc3KeyGenerator.INSTANCE : NoKeyGenerator.INSTANCE;
} else {
keyGenerator = options.useGeneratedKeys() ? Jdbc3KeyGenerator.INSTANCE : NoKeyGenerator.INSTANCE;
keyProperty = options.keyProperty();
keyColumn = options.keyColumn();
}
} else {
keyGenerator = NoKeyGenerator.INSTANCE;
}
Integer fetchSize = null;
Integer timeout = null;
StatementType statementType = StatementType.PREPARED;
ResultSetType resultSetType = configuration.getDefaultResultSetType();
boolean isSelect = sqlCommandType == SqlCommandType.SELECT;
boolean flushCache = !isSelect;
boolean useCache = isSelect;
if (options != null) {
if (FlushCachePolicy.TRUE.equals(options.flushCache())) {
flushCache = true;
} else if (FlushCachePolicy.FALSE.equals(options.flushCache())) {
flushCache = false;
}
useCache = options.useCache();
// issue #348
fetchSize = options.fetchSize() > -1 || options.fetchSize() == Integer.MIN_VALUE ? options.fetchSize() : null;
timeout = options.timeout() > -1 ? options.timeout() : null;
statementType = options.statementType();
if (options.resultSetType() != ResultSetType.DEFAULT) {
resultSetType = options.resultSetType();
}
}
String resultMapId = null;
if (isSelect) {
ResultMap resultMapAnnotation = method.getAnnotation(ResultMap.class);
if (resultMapAnnotation != null) {
resultMapId = String.join(",", resultMapAnnotation.value());
} else {
resultMapId = generateResultMapName(method);
}
}
assistant.addMappedStatement(mappedStatementId, sqlSource, statementType, sqlCommandType, fetchSize, timeout,
// ParameterMapID
null, parameterTypeClass, resultMapId, getReturnType(method, type), resultSetType, flushCache, useCache,
// TODO gcode issue #577
false, keyGenerator, keyProperty, keyColumn, statementAnnotation.getDatabaseId(), languageDriver,
// ResultSets
options != null ? nullOrEmpty(options.resultSets()) : null, statementAnnotation.isDirtySelect(),
paramNameResolver);
});
}
private LanguageDriver getLanguageDriver(Method method) {
Lang lang = method.getAnnotation(Lang.class);
Class<? extends LanguageDriver> langClass = null;
if (lang != null) {
langClass = lang.value();
}
return configuration.getLanguageDriver(langClass);
}
private Class<?> getParameterType(Method method) {
Class<?> parameterType = null;
Parameter[] parameters = method.getParameters();
for (Parameter param : parameters) {
Class<?> paramType = param.getType();
if (RowBounds.class.isAssignableFrom(paramType) || ResultHandler.class.isAssignableFrom(paramType)) {
continue;
}
if (parameterType == null && param.getAnnotation(Param.class) == null) {
parameterType = paramType;
} else {
return ParamMap.class;
}
}
return parameterType;
}
private static Class<?> getReturnType(Method method, Class<?> type) {
Class<?> returnType = method.getReturnType();
Type resolvedReturnType = TypeParameterResolver.resolveReturnType(method, type);
if (resolvedReturnType instanceof Class) {
returnType = (Class<?>) resolvedReturnType;
if (returnType.isArray()) {
returnType = returnType.getComponentType();
}
// gcode issue #508
if (void.class.equals(returnType)) {
ResultType rt = method.getAnnotation(ResultType.class);
if (rt != null) {
returnType = rt.value();
}
}
} else if (resolvedReturnType instanceof ParameterizedType) {
ParameterizedType parameterizedType = (ParameterizedType) resolvedReturnType;
Class<?> rawType = (Class<?>) parameterizedType.getRawType();
if (Collection.class.isAssignableFrom(rawType) || Cursor.class.isAssignableFrom(rawType)) {
Type[] actualTypeArguments = parameterizedType.getActualTypeArguments();
if (actualTypeArguments != null && actualTypeArguments.length == 1) {
Type returnTypeParameter = actualTypeArguments[0];
if (returnTypeParameter instanceof Class<?>) {
returnType = (Class<?>) returnTypeParameter;
} else if (returnTypeParameter instanceof ParameterizedType) {
// (gcode issue #443) actual type can be a also a parameterized type
returnType = (Class<?>) ((ParameterizedType) returnTypeParameter).getRawType();
} else if (returnTypeParameter instanceof GenericArrayType) {
Class<?> componentType = (Class<?>) ((GenericArrayType) returnTypeParameter).getGenericComponentType();
// (gcode issue #525) support List<byte[]>
returnType = Array.newInstance(componentType, 0).getClass();
}
}
} else if (method.isAnnotationPresent(MapKey.class) && Map.class.isAssignableFrom(rawType)) {
// (gcode issue 504) Do not look into Maps if there is not MapKey annotation
Type[] actualTypeArguments = parameterizedType.getActualTypeArguments();
if (actualTypeArguments != null && actualTypeArguments.length == 2) {
Type returnTypeParameter = actualTypeArguments[1];
if (returnTypeParameter instanceof Class<?>) {
returnType = (Class<?>) returnTypeParameter;
} else if (returnTypeParameter instanceof ParameterizedType) {
// (gcode issue 443) actual type can be a also a parameterized type
returnType = (Class<?>) ((ParameterizedType) returnTypeParameter).getRawType();
}
}
} else if (Optional.class.equals(rawType)) {
Type[] actualTypeArguments = parameterizedType.getActualTypeArguments();
Type returnTypeParameter = actualTypeArguments[0];
if (returnTypeParameter instanceof Class<?>) {
returnType = (Class<?>) returnTypeParameter;
}
}
}
return returnType;
}
private void applyResults(Result[] results, Class<?> resultType, List<ResultMapping> resultMappings) {
for (Result result : results) {
List<ResultFlag> flags = new ArrayList<>();
if (result.id()) {
flags.add(ResultFlag.ID);
}
@SuppressWarnings("unchecked")
Class<? extends TypeHandler<?>> typeHandler = (Class<? extends TypeHandler<?>>) (result
.typeHandler() == UnknownTypeHandler.class ? null : result.typeHandler());
boolean hasNestedResultMap = hasNestedResultMap(result);
ResultMapping resultMapping = assistant.buildResultMapping(resultType, nullOrEmpty(result.property()),
nullOrEmpty(result.column()), result.javaType() == void.class ? null : result.javaType(),
result.jdbcType() == JdbcType.UNDEFINED ? null : result.jdbcType(),
hasNestedSelect(result) ? nestedSelectId(result) : null,
hasNestedResultMap ? nestedResultMapId(result) : null, null,
hasNestedResultMap ? findColumnPrefix(result) : null, typeHandler, flags, null, null, isLazy(result));
resultMappings.add(resultMapping);
}
}
private String findColumnPrefix(Result result) {
String columnPrefix = result.one().columnPrefix();
if (columnPrefix.isEmpty()) {
columnPrefix = result.many().columnPrefix();
}
return columnPrefix;
}
private String nestedResultMapId(Result result) {
String resultMapId = result.one().resultMap();
if (resultMapId.isEmpty()) {
resultMapId = result.many().resultMap();
}
if (!resultMapId.contains(".")) {
resultMapId = type.getName() + "." + resultMapId;
}
return resultMapId;
}
private boolean hasNestedResultMap(Result result) {
if (!result.one().resultMap().isEmpty() && !result.many().resultMap().isEmpty()) {
throw new BuilderException("Cannot use both @One and @Many annotations in the same @Result");
}
return !result.one().resultMap().isEmpty() || !result.many().resultMap().isEmpty();
}
private String nestedSelectId(Result result) {
String nestedSelect = result.one().select();
if (nestedSelect.isEmpty()) {
nestedSelect = result.many().select();
}
if (!nestedSelect.contains(".")) {
nestedSelect = type.getName() + "." + nestedSelect;
}
return nestedSelect;
}
private boolean isLazy(Result result) {
boolean isLazy = configuration.isLazyLoadingEnabled();
if (!result.one().select().isEmpty() && FetchType.DEFAULT != result.one().fetchType()) {
isLazy = result.one().fetchType() == FetchType.LAZY;
} else if (!result.many().select().isEmpty() && FetchType.DEFAULT != result.many().fetchType()) {
isLazy = result.many().fetchType() == FetchType.LAZY;
}
return isLazy;
}
private boolean hasNestedSelect(Result result) {
if (!result.one().select().isEmpty() && !result.many().select().isEmpty()) {
throw new BuilderException("Cannot use both @One and @Many annotations in the same @Result");
}
return !result.one().select().isEmpty() || !result.many().select().isEmpty();
}
private void applyConstructorArgs(Arg[] args, Class<?> resultType, List<ResultMapping> resultMappings,
String resultMapId) {
final List<ResultMapping> mappings = new ArrayList<>();
for (Arg arg : args) {
List<ResultFlag> flags = new ArrayList<>();
flags.add(ResultFlag.CONSTRUCTOR);
if (arg.id()) {
flags.add(ResultFlag.ID);
}
@SuppressWarnings("unchecked")
Class<? extends TypeHandler<?>> typeHandler = (Class<? extends TypeHandler<?>>) (arg
.typeHandler() == UnknownTypeHandler.class ? null : arg.typeHandler());
ResultMapping resultMapping = assistant.buildResultMapping(resultType, nullOrEmpty(arg.name()),
nullOrEmpty(arg.column()), arg.javaType() == void.class ? null : arg.javaType(),
arg.jdbcType() == JdbcType.UNDEFINED ? null : arg.jdbcType(), nullOrEmpty(arg.select()),
nullOrEmpty(arg.resultMap()), null, nullOrEmpty(arg.columnPrefix()), typeHandler, flags, null, null, false);
mappings.add(resultMapping);
}
final ResultMappingConstructorResolver resolver = new ResultMappingConstructorResolver(configuration, mappings,
resultType, resultMapId);
resultMappings.addAll(resolver.resolveWithConstructor());
}
private String nullOrEmpty(String value) {
return value == null || value.trim().isEmpty() ? null : value;
}
private KeyGenerator handleSelectKeyAnnotation(SelectKey selectKeyAnnotation, String baseStatementId,
Class<?> parameterTypeClass, ParamNameResolver paramNameResolver, LanguageDriver languageDriver) {
String id = baseStatementId + SelectKeyGenerator.SELECT_KEY_SUFFIX;
Class<?> resultTypeClass = selectKeyAnnotation.resultType();
StatementType statementType = selectKeyAnnotation.statementType();
String keyProperty = selectKeyAnnotation.keyProperty();
String keyColumn = selectKeyAnnotation.keyColumn();
boolean executeBefore = selectKeyAnnotation.before();
// defaults
boolean useCache = false;
KeyGenerator keyGenerator = NoKeyGenerator.INSTANCE;
Integer fetchSize = null;
Integer timeout = null;
boolean flushCache = false;
String parameterMap = null;
String resultMap = null;
ResultSetType resultSetTypeEnum = null;
String databaseId = selectKeyAnnotation.databaseId().isEmpty() ? null : selectKeyAnnotation.databaseId();
SqlSource sqlSource = buildSqlSourceFromStrings(selectKeyAnnotation.statement(), parameterTypeClass,
paramNameResolver, languageDriver);
SqlCommandType sqlCommandType = SqlCommandType.SELECT;
assistant.addMappedStatement(id, sqlSource, statementType, sqlCommandType, fetchSize, timeout, parameterMap,
parameterTypeClass, resultMap, resultTypeClass, resultSetTypeEnum, flushCache, useCache, false, keyGenerator,
keyProperty, keyColumn, databaseId, languageDriver, null, false, paramNameResolver);
id = assistant.applyCurrentNamespace(id, false);
MappedStatement keyStatement = configuration.getMappedStatement(id, false);
SelectKeyGenerator answer = new SelectKeyGenerator(keyStatement, executeBefore);
configuration.addKeyGenerator(id, answer);
return answer;
}
private SqlSource buildSqlSource(Annotation annotation, Class<?> parameterType, ParamNameResolver paramNameResolver,
LanguageDriver languageDriver, Method method) {
if (annotation instanceof Select) {
return buildSqlSourceFromStrings(((Select) annotation).value(), parameterType, paramNameResolver, languageDriver);
} else if (annotation instanceof Update) {
return buildSqlSourceFromStrings(((Update) annotation).value(), parameterType, paramNameResolver, languageDriver);
} else if (annotation instanceof Insert) {
return buildSqlSourceFromStrings(((Insert) annotation).value(), parameterType, paramNameResolver, languageDriver);
} else if (annotation instanceof Delete) {
return buildSqlSourceFromStrings(((Delete) annotation).value(), parameterType, paramNameResolver, languageDriver);
} else if (annotation instanceof SelectKey) {
return buildSqlSourceFromStrings(((SelectKey) annotation).statement(), parameterType, paramNameResolver,
languageDriver);
}
return new ProviderSqlSource(assistant.getConfiguration(), annotation, type, method);
}
private SqlSource buildSqlSourceFromStrings(String[] strings, Class<?> parameterTypeClass,
ParamNameResolver paramNameResolver, LanguageDriver languageDriver) {
return languageDriver.createSqlSource(configuration, String.join(" ", strings).trim(), parameterTypeClass,
paramNameResolver);
}
@SafeVarargs
private final Optional<AnnotationWrapper> getAnnotationWrapper(Method method, boolean errorIfNoMatch,
Class<? extends Annotation>... targetTypes) {
return getAnnotationWrapper(method, errorIfNoMatch, Arrays.asList(targetTypes));
}
private Optional<AnnotationWrapper> getAnnotationWrapper(Method method, boolean errorIfNoMatch,
Collection<Class<? extends Annotation>> targetTypes) {
String databaseId = configuration.getDatabaseId();
Map<String, AnnotationWrapper> statementAnnotations = targetTypes.stream()
.flatMap(x -> Arrays.stream(method.getAnnotationsByType(x))).map(AnnotationWrapper::new)
.collect(Collectors.toMap(AnnotationWrapper::getDatabaseId, x -> x, (existing, duplicate) -> {
throw new BuilderException(
String.format("Detected conflicting annotations '%s' and '%s' on '%s'.", existing.getAnnotation(),
duplicate.getAnnotation(), method.getDeclaringClass().getName() + "." + method.getName()));
}));
AnnotationWrapper annotationWrapper = null;
if (databaseId != null) {
annotationWrapper = statementAnnotations.get(databaseId);
}
if (annotationWrapper == null) {
annotationWrapper = statementAnnotations.get("");
}
if (errorIfNoMatch && annotationWrapper == null && !statementAnnotations.isEmpty()) {
// Annotations exist, but there is no matching one for the specified databaseId
throw new BuilderException(String.format(
"Could not find a statement annotation that correspond a current database or default statement on method '%s.%s'. Current database id is [%s].",
method.getDeclaringClass().getName(), method.getName(), databaseId));
}
return Optional.ofNullable(annotationWrapper);
}
public static Class<?> getMethodReturnType(String mapperFqn, String localStatementId) {
if (mapperFqn == null || localStatementId == null) {
return null;
}
try {
Class<?> mapperClass = Resources.classForName(mapperFqn);
for (Method method : mapperClass.getMethods()) {
if (method.getName().equals(localStatementId) && canHaveStatement(method)) {
return getReturnType(method, mapperClass);
}
}
} catch (ClassNotFoundException e) {
// No corresponding mapper
|
MapperAnnotationBuilder
|
java
|
apache__camel
|
components/camel-jms/src/test/java/org/apache/camel/component/jms/JmsNoRequestTimeoutTest.java
|
{
"start": 1528,
"end": 2860
}
|
class ____ extends AbstractJMSTest {
@Order(2)
@RegisterExtension
public static CamelContextExtension camelContextExtension = new DefaultCamelContextExtension();
protected final String componentName = "activemq";
protected CamelContext context;
protected ProducerTemplate template;
protected ConsumerTemplate consumer;
@Test
public void testNoRequestTimeout() {
String reply
= template.requestBody("activemq:queue:JmsNoRequestTimeoutTest?requestTimeout=0", "Hello World", String.class);
assertEquals("Bye World", reply);
}
@Override
public String getComponentName() {
return componentName;
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("activemq:queue:JmsNoRequestTimeoutTest").transform(constant("Bye World"));
}
};
}
@Override
public CamelContextExtension getCamelContextExtension() {
return camelContextExtension;
}
@BeforeEach
void setUpRequirements() {
context = camelContextExtension.getContext();
template = camelContextExtension.getProducerTemplate();
consumer = camelContextExtension.getConsumerTemplate();
}
}
|
JmsNoRequestTimeoutTest
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/OffsetTimeAssertBaseTest.java
|
{
"start": 782,
"end": 1248
}
|
class ____ extends BaseTestTemplate<OffsetTimeAssert, OffsetTime> {
protected Comparables comparables;
protected OffsetTime now = OffsetTime.now();
@Override
protected void inject_internal_objects() {
super.inject_internal_objects();
comparables = mock(Comparables.class);
assertions.comparables = comparables;
}
@Override
protected OffsetTimeAssert create_assertions() {
return new OffsetTimeAssert(now);
}
}
|
OffsetTimeAssertBaseTest
|
java
|
alibaba__nacos
|
ai/src/test/java/com/alibaba/nacos/ai/remote/manager/AiConnectionBasedClientManagerTest.java
|
{
"start": 1981,
"end": 6098
}
|
class ____ {
private static final String CONNECTION_ID = "1111111111_127.0.0.1_12345";
@Mock
ConnectionBasedClientManager delegate;
@Mock
Connection connection;
ConnectionMeta connectionMeta;
AiConnectionBasedClientManager connectionBasedClientManager;
@BeforeEach
void setUp() {
connectionBasedClientManager = new AiConnectionBasedClientManager(delegate);
connectionMeta = new ConnectionMeta(CONNECTION_ID, "127.0.0.1", "127.0.0.1", 12345, 12345,
ConnectionType.GRPC.getType(), "3.0.0", null, new HashMap<>());
}
@AfterEach
void tearDown() {
}
@Test
void clientConnectedNotAiConnection() {
when(connection.getMetaInfo()).thenReturn(connectionMeta);
connectionBasedClientManager.clientConnected(connection);
verify(delegate, never()).clientConnected(anyString(), any(ClientAttributes.class));
}
@Test
void clientConnected() {
when(connection.getMetaInfo()).thenReturn(connectionMeta);
connectionMeta.getLabels().put(RemoteConstants.LABEL_MODULE, RemoteConstants.LABEL_MODULE_AI);
connectionBasedClientManager.clientConnected(connection);
verify(delegate).clientConnected(eq(CONNECTION_ID), any(ClientAttributes.class));
}
@Test
void clientConnectedByClient() {
ConnectionBasedClient client = new ConnectionBasedClient(CONNECTION_ID, true, 0L);
connectionBasedClientManager.clientConnected(client);
verify(delegate).clientConnected(client);
}
@Test
void syncClientConnected() {
ClientAttributes clientAttributes = new ClientAttributes();
connectionBasedClientManager.syncClientConnected(CONNECTION_ID, clientAttributes);
verify(delegate).syncClientConnected(CONNECTION_ID, clientAttributes);
}
@Test
void clientDisConnectedNotAiConnection() {
when(connection.getMetaInfo()).thenReturn(connectionMeta);
connectionBasedClientManager.clientDisConnected(connection);
verify(delegate, never()).clientDisconnected(anyString());
}
@Test
void clientDisconnected() {
when(connection.getMetaInfo()).thenReturn(connectionMeta);
connectionMeta.getLabels().put(RemoteConstants.LABEL_MODULE, RemoteConstants.LABEL_MODULE_AI);
connectionBasedClientManager.clientDisConnected(connection);
verify(delegate).clientDisconnected(CONNECTION_ID);
}
@Test
void getClient() {
ConnectionBasedClient client = new ConnectionBasedClient(CONNECTION_ID, true, 0L);
when(delegate.getClient(CONNECTION_ID)).thenReturn(client);
assertEquals(client, connectionBasedClientManager.getClient(CONNECTION_ID));
}
@Test
void contains() {
assertFalse(connectionBasedClientManager.contains(CONNECTION_ID));
when(delegate.contains(CONNECTION_ID)).thenReturn(true);
assertTrue(connectionBasedClientManager.contains(CONNECTION_ID));
}
@Test
void allClientId() {
when(delegate.allClientId()).thenReturn(Collections.singleton(CONNECTION_ID));
assertEquals(1, connectionBasedClientManager.allClientId().size());
assertEquals(CONNECTION_ID, connectionBasedClientManager.allClientId().iterator().next());
}
@Test
void isResponsibleClient() {
ConnectionBasedClient client = new ConnectionBasedClient(CONNECTION_ID, true, 0L);
assertFalse(connectionBasedClientManager.isResponsibleClient(client));
when(delegate.isResponsibleClient(client)).thenReturn(true);
assertTrue(connectionBasedClientManager.isResponsibleClient(client));
}
@Test
void verifyClient() {
DistroClientVerifyInfo verifyData = new DistroClientVerifyInfo(CONNECTION_ID, 0L);
assertFalse(connectionBasedClientManager.verifyClient(verifyData));
when(delegate.verifyClient(verifyData)).thenReturn(true);
assertTrue(connectionBasedClientManager.verifyClient(verifyData));
}
}
|
AiConnectionBasedClientManagerTest
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/failure/FailureEnricherUtilsTest.java
|
{
"start": 13655,
"end": 14615
}
|
class ____ implements FailureEnricher {
private final Set<String> outputKeys;
private final Map<String, String> outputMap;
TestEnricher(String... outputKeys) {
this.outputKeys = Arrays.stream(outputKeys).collect(Collectors.toSet());
this.outputMap = new HashMap<>();
this.outputKeys.forEach(key -> outputMap.put(key, key + "Value"));
}
TestEnricher(Map<String, String> outputValues, String... outputKeys) {
this.outputKeys = Arrays.stream(outputKeys).collect(Collectors.toSet());
this.outputMap = outputValues;
}
@Override
public Set<String> getOutputKeys() {
return outputKeys;
}
@Override
public CompletableFuture<Map<String, String>> processFailure(
Throwable cause, Context context) {
return CompletableFuture.completedFuture(outputMap);
}
}
}
|
TestEnricher
|
java
|
micronaut-projects__micronaut-core
|
inject/src/main/java/io/micronaut/context/annotation/Mapper.java
|
{
"start": 4364,
"end": 4494
}
|
interface ____ defining merge strategies.
* Merge strategies are used when the mapping has two or more arguments.
*/
|
for
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/api/connector/source/lib/InputFormatSource.java
|
{
"start": 5114,
"end": 5653
}
|
class ____ implements SourceSplit, Serializable {
private final InputSplit inputSplit;
private final String id;
public InputSplitWrapperSourceSplit(InputSplit inputSplit) {
this.inputSplit = inputSplit;
this.id = String.valueOf(inputSplit.getSplitNumber());
}
public InputSplit getInputSplit() {
return inputSplit;
}
@Override
public String splitId() {
return id;
}
}
private static
|
InputSplitWrapperSourceSplit
|
java
|
apache__maven
|
its/core-it-suite/src/test/resources/mng-4331/maven-it-plugin-dependency-collection/src/main/java/org/apache/maven/plugin/coreit/AbstractDependencyMojo.java
|
{
"start": 1389,
"end": 3915
}
|
class ____ extends AbstractMojo {
/**
* The current Maven project.
*/
@Parameter(defaultValue = "${project}", required = true, readonly = true)
protected MavenProject project;
/**
* Writes the specified artifacts to the given output file.
*
* @param pathname The path to the output file, relative to the project base directory, may be <code>null</code> or
* empty if the output file should not be written.
* @param artifacts The list of artifacts to write to the file, may be <code>null</code>.
* @throws MojoExecutionException If the output file could not be written.
*/
protected void writeArtifacts(String pathname, Collection artifacts) throws MojoExecutionException {
if (pathname == null || pathname.length() <= 0) {
return;
}
File file = resolveFile(pathname);
getLog().info("[MAVEN-CORE-IT-LOG] Dumping artifact list: " + file);
BufferedWriter writer = null;
try {
file.getParentFile().mkdirs();
writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file), "UTF-8"));
if (artifacts != null) {
for (Object artifact1 : artifacts) {
Artifact artifact = (Artifact) artifact1;
writer.write(artifact.getId());
String optional = "";
if (artifact.isOptional()) {
optional = " (optional)";
writer.write(optional);
}
writer.newLine();
getLog().info("[MAVEN-CORE-IT-LOG] " + artifact.getId() + optional);
}
}
} catch (IOException e) {
throw new MojoExecutionException("Failed to write artifact list", e);
} finally {
if (writer != null) {
try {
writer.close();
} catch (IOException e) {
// just ignore
}
}
}
}
// NOTE: We don't want to test path translation here so resolve relative path manually for robustness
private File resolveFile(String pathname) {
File file = null;
if (pathname != null) {
file = new File(pathname);
if (!file.isAbsolute()) {
file = new File(project.getBasedir(), pathname);
}
}
return file;
}
}
|
AbstractDependencyMojo
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/failures/ContextLoadFailureTests.java
|
{
"start": 2920,
"end": 3027
}
|
class ____ {
@Bean
String explosion() {
throw new StackOverflowError("Boom!");
}
}
}
}
|
Config
|
java
|
apache__maven
|
its/core-it-suite/src/test/resources/mng-8750-new-scopes/comprehensive-test/src/main/java/org/apache/maven/its/mng8750/ComprehensiveExample.java
|
{
"start": 1112,
"end": 2660
}
|
class ____ {
/**
* Method that uses a compile-only dependency.
*/
public String useCompileOnlyDep() {
CompileOnlyDep dep = new CompileOnlyDep();
return "Used compile-only dependency: " + dep.getMessage();
}
/**
* Method that uses a regular compile dependency.
*/
public String useCompileDep() {
CompileDep dep = new CompileDep();
return "Used compile dependency: " + dep.getMessage();
}
/**
* Main method for comprehensive testing.
*/
public static void main(String[] args) {
ComprehensiveExample example = new ComprehensiveExample();
System.out.println("=== Comprehensive Scope Test ===");
// Test regular compile dependency (should work at runtime)
try {
System.out.println(example.useCompileDep());
System.out.println("Compile scope verification: PASSED");
} catch (Exception e) {
System.out.println("Compile scope verification: FAILED - " + e.getMessage());
}
// Test compile-only dependency (should fail at runtime)
try {
System.out.println(example.useCompileOnlyDep());
System.out.println("Compile-only scope verification: FAILED - should not be available at runtime");
} catch (NoClassDefFoundError e) {
System.out.println("Compile-only scope verification: PASSED - not available at runtime");
}
System.out.println("=== End Comprehensive Scope Test ===");
}
}
|
ComprehensiveExample
|
java
|
apache__dubbo
|
dubbo-common/src/test/java/org/apache/dubbo/common/resource/GlobalResourcesRepositoryTest.java
|
{
"start": 1013,
"end": 2158
}
|
class ____ {
@Test
void test() {
GlobalResourcesRepository repository = GlobalResourcesRepository.getInstance();
ExecutorService globalExecutorService = GlobalResourcesRepository.getGlobalExecutorService();
Assertions.assertNotNull(globalExecutorService);
GlobalDisposable globalDisposable = new GlobalDisposable();
GlobalResourcesRepository.registerGlobalDisposable(globalDisposable);
OneOffDisposable oneOffDisposable = new OneOffDisposable();
repository.registerDisposable(oneOffDisposable);
repository.destroy();
Assertions.assertTrue(globalExecutorService.isShutdown());
Assertions.assertTrue(globalDisposable.isDestroyed());
Assertions.assertTrue(oneOffDisposable.isDestroyed());
Assertions.assertTrue(
!GlobalResourcesRepository.getGlobalReusedDisposables().isEmpty());
Assertions.assertTrue(
GlobalResourcesRepository.getGlobalReusedDisposables().contains(globalDisposable));
Assertions.assertTrue(repository.getOneoffDisposables().isEmpty());
}
|
GlobalResourcesRepositoryTest
|
java
|
junit-team__junit5
|
junit-platform-engine/src/main/java/org/junit/platform/engine/CancellationToken.java
|
{
"start": 1168,
"end": 2076
}
|
interface ____ permits RegularCancellationToken, DisabledCancellationToken {
/**
* Create a new, uncancelled cancellation token.
*/
static CancellationToken create() {
return new RegularCancellationToken();
}
/**
* Get a new cancellation token that cannot be cancelled.
*
* <p>This is only useful for cases when a cancellation token is required
* but is not supported or irrelevant, for example, in tests.
*/
static CancellationToken disabled() {
return DisabledCancellationToken.INSTANCE;
}
/**
* {@return whether cancellation has been requested}
*
* <p>Once this method returns {@code true}, it will never return
* {@code false} in a subsequent call.
*/
boolean isCancellationRequested();
/**
* Request cancellation.
*
* <p>This will call subsequent calls to {@link #isCancellationRequested()}
* to return {@code true}.
*/
void cancel();
}
|
CancellationToken
|
java
|
apache__logging-log4j2
|
log4j-1.2-api/src/main/java/org/apache/log4j/FileAppender.java
|
{
"start": 1497,
"end": 10906
}
|
class ____ extends WriterAppender {
/**
* Controls file truncatation. The default value for this variable is <code>true</code>, meaning that by default a
* <code>FileAppender</code> will append to an existing file and not truncate it.
* <p>
* This option is meaningful only if the FileAppender opens the file.
* </p>
*/
protected boolean fileAppend = true;
/**
* The name of the log file.
*/
protected String fileName = null;
/**
* Do we do bufferedIO?
*/
protected boolean bufferedIO = false;
/**
* Determines the size of IO buffer be. Default is 8K.
*/
protected int bufferSize = 8 * 1024;
/**
* The default constructor does not do anything.
*/
public FileAppender() {}
/**
* Constructs a FileAppender and open the file designated by <code>filename</code>. The opened filename will become the
* output destination for this appender.
* <p>
* The file will be appended to.
* </p>
*/
public FileAppender(final Layout layout, final String filename) throws IOException {
this(layout, filename, true);
}
/**
* Constructs a FileAppender and open the file designated by <code>filename</code>. The opened filename will become the
* output destination for this appender.
* <p>
* If the <code>append</code> parameter is true, the file will be appended to. Otherwise, the file designated by
* <code>filename</code> will be truncated before being opened.
* </p>
*/
public FileAppender(final Layout layout, final String filename, final boolean append) throws IOException {
this.layout = layout;
this.setFile(filename, append, false, bufferSize);
}
/**
* Constructs a <code>FileAppender</code> and open the file designated by <code>filename</code>. The opened filename
* will become the output destination for this appender.
* <p>
* If the <code>append</code> parameter is true, the file will be appended to. Otherwise, the file designated by
* <code>filename</code> will be truncated before being opened.
* </p>
* <p>
* If the <code>bufferedIO</code> parameter is <code>true</code>, then buffered IO will be used to write to the output
* file.
* </p>
*/
public FileAppender(
final Layout layout,
final String filename,
final boolean append,
final boolean bufferedIO,
final int bufferSize)
throws IOException {
this.layout = layout;
this.setFile(filename, append, bufferedIO, bufferSize);
}
/**
* If the value of <b>File</b> is not <code>null</code>, then {@link #setFile} is called with the values of <b>File</b>
* and <b>Append</b> properties.
*
* @since 0.8.1
*/
public void activateOptions() {
if (fileName != null) {
try {
setFile(fileName, fileAppend, bufferedIO, bufferSize);
} catch (java.io.IOException e) {
errorHandler.error(
"setFile(" + fileName + "," + fileAppend + ") call failed.", e, ErrorCode.FILE_OPEN_FAILURE);
}
} else {
// LogLog.error("File option not set for appender ["+name+"].");
LogLog.warn("File option not set for appender [" + name + "].");
LogLog.warn("Are you using FileAppender instead of ConsoleAppender?");
}
}
/**
* Closes the previously opened file.
*/
protected void closeFile() {
if (this.qw != null) {
try {
this.qw.close();
} catch (java.io.IOException e) {
if (e instanceof InterruptedIOException) {
Thread.currentThread().interrupt();
}
// Exceptionally, it does not make sense to delegate to an
// ErrorHandler. Since a closed appender is basically dead.
LogLog.error("Could not close " + qw, e);
}
}
}
/**
* Returns the value of the <b>Append</b> option.
*/
public boolean getAppend() {
return fileAppend;
}
/**
* Get the value of the <b>BufferedIO</b> option.
*
* <p>
* BufferedIO will significatnly increase performance on heavily loaded systems.
* </p>
*/
public boolean getBufferedIO() {
return this.bufferedIO;
}
/**
* Get the size of the IO buffer.
*/
public int getBufferSize() {
return this.bufferSize;
}
/** Returns the value of the <b>File</b> option. */
public String getFile() {
return fileName;
}
/**
* Close any previously opened file and call the parent's <code>reset</code>.
*/
protected void reset() {
closeFile();
this.fileName = null;
super.reset();
}
/**
* The <b>Append</b> option takes a boolean value. It is set to <code>true</code> by default. If true, then
* <code>File</code> will be opened in append mode by {@link #setFile setFile} (see above). Otherwise, {@link #setFile
* setFile} will open <code>File</code> in truncate mode.
*
* <p>
* Note: Actual opening of the file is made when {@link #activateOptions} is called, not when the options are set.
* </p>
*/
public void setAppend(final boolean flag) {
fileAppend = flag;
}
/**
* The <b>BufferedIO</b> option takes a boolean value. It is set to <code>false</code> by default. If true, then
* <code>File</code> will be opened and the resulting {@link java.io.Writer} wrapped around a {@link BufferedWriter}.
*
* BufferedIO will significatnly increase performance on heavily loaded systems.
*
*/
public void setBufferedIO(final boolean bufferedIO) {
this.bufferedIO = bufferedIO;
if (bufferedIO) {
immediateFlush = false;
}
}
/**
* Set the size of the IO buffer.
*/
public void setBufferSize(final int bufferSize) {
this.bufferSize = bufferSize;
}
/**
* The <b>File</b> property takes a string value which should be the name of the file to append to.
* <p>
* <font color="#DD0044"><b>Note that the special values "System.out" or "System.err" are no longer honored.</b></font>
* </p>
* <p>
* Note: Actual opening of the file is made when {@link #activateOptions} is called, not when the options are set.
* </p>
*/
public void setFile(final String file) {
// Trim spaces from both ends. The users probably does not want
// trailing spaces in file names.
final String val = file.trim();
fileName = val;
}
/**
* Sets and <i>opens</i> the file where the log output will go. The specified file must be writable.
* <p>
* If there was already an opened file, then the previous file is closed first.
* </p>
* <p>
* <b>Do not use this method directly. To configure a FileAppender or one of its subclasses, set its properties one by
* one and then call activateOptions.</b>
* </p>
*
* @param fileName The path to the log file.
* @param append If true will append to fileName. Otherwise will truncate fileName.
*/
@SuppressFBWarnings(
value = {"PATH_TRAVERSAL_IN", "PATH_TRAVERSAL_OUT"},
justification = "The file name comes from a configuration file.")
public synchronized void setFile(String fileName, boolean append, boolean bufferedIO, int bufferSize)
throws IOException {
LogLog.debug("setFile called: " + fileName + ", " + append);
// It does not make sense to have immediate flush and bufferedIO.
if (bufferedIO) {
setImmediateFlush(false);
}
reset();
FileOutputStream ostream = null;
try {
//
// attempt to create file
//
ostream = new FileOutputStream(fileName, append);
} catch (FileNotFoundException ex) {
//
// if parent directory does not exist then
// attempt to create it and try to create file
// see bug 9150
//
final String parentName = new File(fileName).getParent();
if (parentName != null) {
final File parentDir = new File(parentName);
if (!parentDir.exists() && parentDir.mkdirs()) {
ostream = new FileOutputStream(fileName, append);
} else {
throw ex;
}
} else {
throw ex;
}
}
Writer fw = createWriter(ostream);
if (bufferedIO) {
fw = new BufferedWriter(fw, bufferSize);
}
this.setQWForFiles(fw);
this.fileName = fileName;
this.fileAppend = append;
this.bufferedIO = bufferedIO;
this.bufferSize = bufferSize;
writeHeader();
LogLog.debug("setFile ended");
}
/**
* Sets the quiet writer being used.
*
* This method is overriden by {@link RollingFileAppender}.
*/
protected void setQWForFiles(final Writer writer) {
this.qw = new QuietWriter(writer, errorHandler);
}
}
|
FileAppender
|
java
|
quarkusio__quarkus
|
extensions/mailer/runtime/src/main/java/io/quarkus/mailer/reactive/ReactiveMailer.java
|
{
"start": 157,
"end": 523
}
|
interface ____ {
/**
* Sends the given emails.
*
* @param mails the emails to send, must not be {@code null}, must not contain {@code null}
* @return a {@link Uni} indicating when the mails have been sent. The {@link Uni} may fire a failure if the
* emails cannot be sent.
*/
Uni<Void> send(Mail... mails);
}
|
ReactiveMailer
|
java
|
bumptech__glide
|
library/src/main/java/com/bumptech/glide/RegistryFactory.java
|
{
"start": 4554,
"end": 19719
}
|
class ____ {
private RegistryFactory() {}
static GlideSupplier<Registry> lazilyCreateAndInitializeRegistry(
final Glide glide,
final List<GlideModule> manifestModules,
@Nullable final AppGlideModule annotationGeneratedModule) {
return new GlideSupplier<Registry>() {
// Rely on callers using memoization if they want to avoid duplicate work, but
// rely on ourselves to verify that no recursive initialization occurs.
private boolean isInitializing;
@Override
public Registry get() {
if (isInitializing) {
throw new IllegalStateException(
"Recursive Registry initialization! In your"
+ " AppGlideModule and LibraryGlideModules, Make sure you're using the provided "
+ "Registry rather calling glide.getRegistry()!");
}
Trace.beginSection("Glide registry");
isInitializing = true;
try {
return createAndInitRegistry(glide, manifestModules, annotationGeneratedModule);
} finally {
isInitializing = false;
Trace.endSection();
}
}
};
}
@Synthetic
static Registry createAndInitRegistry(
Glide glide,
List<GlideModule> manifestModules,
@Nullable AppGlideModule annotationGeneratedModule) {
BitmapPool bitmapPool = glide.getBitmapPool();
ArrayPool arrayPool = glide.getArrayPool();
Context context = glide.getGlideContext().getApplicationContext();
GlideExperiments experiments = glide.getGlideContext().getExperiments();
Registry registry = new Registry();
initializeDefaults(context, registry, bitmapPool, arrayPool, experiments);
initializeModules(context, glide, registry, manifestModules, annotationGeneratedModule);
return registry;
}
private static void initializeDefaults(
Context context,
Registry registry,
BitmapPool bitmapPool,
ArrayPool arrayPool,
GlideExperiments experiments) {
registry.register(new DefaultImageHeaderParser());
// Right now we're only using this parser for HEIF images, which are only supported on OMR1+.
// If we need this for other file types, we should consider removing this restriction.
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O_MR1) {
registry.register(new ExifInterfaceImageHeaderParser());
}
final Resources resources = context.getResources();
List<ImageHeaderParser> imageHeaderParsers = registry.getImageHeaderParsers();
ByteBufferGifDecoder byteBufferGifDecoder =
new ByteBufferGifDecoder(context, imageHeaderParsers, bitmapPool, arrayPool);
ResourceDecoder<ParcelFileDescriptor, Bitmap> parcelFileDescriptorVideoDecoder =
VideoDecoder.parcel(bitmapPool);
// TODO(judds): Make ParcelFileDescriptorBitmapDecoder work with ImageDecoder.
Downsampler downsampler =
new Downsampler(
registry.getImageHeaderParsers(), resources.getDisplayMetrics(), bitmapPool, arrayPool);
ResourceDecoder<ByteBuffer, Bitmap> byteBufferBitmapDecoder;
ResourceDecoder<InputStream, Bitmap> streamBitmapDecoder;
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P
&& experiments.isEnabled(EnableImageDecoderForBitmaps.class)) {
streamBitmapDecoder = new InputStreamBitmapImageDecoderResourceDecoder();
byteBufferBitmapDecoder = new ByteBufferBitmapImageDecoderResourceDecoder();
} else {
byteBufferBitmapDecoder = new ByteBufferBitmapDecoder(downsampler);
streamBitmapDecoder = new StreamBitmapDecoder(downsampler, arrayPool);
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P) {
registry.append(
Registry.BUCKET_ANIMATION,
InputStream.class,
Drawable.class,
AnimatedImageDecoder.streamDecoder(imageHeaderParsers, arrayPool));
registry.append(
Registry.BUCKET_ANIMATION,
ByteBuffer.class,
Drawable.class,
AnimatedImageDecoder.byteBufferDecoder(imageHeaderParsers, arrayPool));
}
ResourceDrawableDecoder resourceDrawableDecoder = new ResourceDrawableDecoder(context);
BitmapEncoder bitmapEncoder = new BitmapEncoder(arrayPool);
BitmapBytesTranscoder bitmapBytesTranscoder = new BitmapBytesTranscoder();
GifDrawableBytesTranscoder gifDrawableBytesTranscoder = new GifDrawableBytesTranscoder();
ContentResolver contentResolver = context.getContentResolver();
registry
.append(ByteBuffer.class, new ByteBufferEncoder())
.append(InputStream.class, new StreamEncoder(arrayPool))
/* Bitmaps */
.append(Registry.BUCKET_BITMAP, ByteBuffer.class, Bitmap.class, byteBufferBitmapDecoder)
.append(Registry.BUCKET_BITMAP, InputStream.class, Bitmap.class, streamBitmapDecoder);
if (ParcelFileDescriptorRewinder.isSupported()) {
registry.append(
Registry.BUCKET_BITMAP,
ParcelFileDescriptor.class,
Bitmap.class,
new ParcelFileDescriptorBitmapDecoder(downsampler));
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) {
registry.append(
Registry.BUCKET_BITMAP,
AssetFileDescriptor.class,
Bitmap.class,
VideoDecoder.asset(bitmapPool));
}
registry
.append(
Registry.BUCKET_BITMAP,
ParcelFileDescriptor.class,
Bitmap.class,
parcelFileDescriptorVideoDecoder)
.append(Bitmap.class, Bitmap.class, UnitModelLoader.Factory.<Bitmap>getInstance())
.append(Registry.BUCKET_BITMAP, Bitmap.class, Bitmap.class, new UnitBitmapDecoder())
.append(Bitmap.class, bitmapEncoder)
/* BitmapDrawables */
.append(
Registry.BUCKET_BITMAP_DRAWABLE,
ByteBuffer.class,
BitmapDrawable.class,
new BitmapDrawableDecoder<>(resources, byteBufferBitmapDecoder))
.append(
Registry.BUCKET_BITMAP_DRAWABLE,
InputStream.class,
BitmapDrawable.class,
new BitmapDrawableDecoder<>(resources, streamBitmapDecoder))
.append(
Registry.BUCKET_BITMAP_DRAWABLE,
ParcelFileDescriptor.class,
BitmapDrawable.class,
new BitmapDrawableDecoder<>(resources, parcelFileDescriptorVideoDecoder))
.append(BitmapDrawable.class, new BitmapDrawableEncoder(bitmapPool, bitmapEncoder))
/* GIFs */
.append(
Registry.BUCKET_ANIMATION,
InputStream.class,
GifDrawable.class,
new StreamGifDecoder(imageHeaderParsers, byteBufferGifDecoder, arrayPool))
.append(
Registry.BUCKET_ANIMATION, ByteBuffer.class, GifDrawable.class, byteBufferGifDecoder)
.append(GifDrawable.class, new GifDrawableEncoder())
/* GIF Frames */
// Compilation with Gradle requires the type to be specified for UnitModelLoader here.
.append(
GifDecoder.class, GifDecoder.class, UnitModelLoader.Factory.<GifDecoder>getInstance())
.append(
Registry.BUCKET_BITMAP,
GifDecoder.class,
Bitmap.class,
new GifFrameResourceDecoder(bitmapPool))
/* Drawables */
.append(Uri.class, Drawable.class, resourceDrawableDecoder)
.append(
Uri.class, Bitmap.class, new ResourceBitmapDecoder(resourceDrawableDecoder, bitmapPool))
/* Files */
.register(new ByteBufferRewinder.Factory())
.append(File.class, ByteBuffer.class, new ByteBufferFileLoader.Factory())
.append(File.class, InputStream.class, new FileLoader.StreamFactory())
.append(File.class, File.class, new FileDecoder())
.append(File.class, ParcelFileDescriptor.class, new FileLoader.FileDescriptorFactory())
// Compilation with Gradle requires the type to be specified for UnitModelLoader here.
.append(File.class, File.class, UnitModelLoader.Factory.<File>getInstance())
/* Models */
.register(new InputStreamRewinder.Factory(arrayPool));
if (ParcelFileDescriptorRewinder.isSupported()) {
registry.register(new ParcelFileDescriptorRewinder.Factory());
}
// DirectResourceLoader and ResourceUriLoader handle resource IDs and Uris owned by this
// package.
ModelLoaderFactory<Integer, InputStream> directResourceLoaderStreamFactory =
DirectResourceLoader.inputStreamFactory(context);
ModelLoaderFactory<Integer, AssetFileDescriptor>
directResourceLoaderAssetFileDescriptorFactory =
DirectResourceLoader.assetFileDescriptorFactory(context);
ModelLoaderFactory<Integer, Drawable> directResourceLaoderDrawableFactory =
DirectResourceLoader.drawableFactory(context);
registry
.append(int.class, InputStream.class, directResourceLoaderStreamFactory)
.append(Integer.class, InputStream.class, directResourceLoaderStreamFactory)
.append(
int.class, AssetFileDescriptor.class, directResourceLoaderAssetFileDescriptorFactory)
.append(
Integer.class,
AssetFileDescriptor.class,
directResourceLoaderAssetFileDescriptorFactory)
.append(int.class, Drawable.class, directResourceLaoderDrawableFactory)
.append(Integer.class, Drawable.class, directResourceLaoderDrawableFactory)
.append(Uri.class, InputStream.class, ResourceUriLoader.newStreamFactory(context))
.append(
Uri.class,
AssetFileDescriptor.class,
ResourceUriLoader.newAssetFileDescriptorFactory(context));
// ResourceLoader and UriLoader handle resource IDs and Uris owned by other packages.
ResourceLoader.UriFactory resourceLoaderUriFactory = new ResourceLoader.UriFactory(resources);
ResourceLoader.AssetFileDescriptorFactory resourceLoaderAssetFileDescriptorFactory =
new ResourceLoader.AssetFileDescriptorFactory(resources);
ResourceLoader.StreamFactory resourceLoaderStreamFactory =
new ResourceLoader.StreamFactory(resources);
registry
.append(Integer.class, Uri.class, resourceLoaderUriFactory)
.append(int.class, Uri.class, resourceLoaderUriFactory)
.append(Integer.class, AssetFileDescriptor.class, resourceLoaderAssetFileDescriptorFactory)
.append(int.class, AssetFileDescriptor.class, resourceLoaderAssetFileDescriptorFactory)
.append(Integer.class, InputStream.class, resourceLoaderStreamFactory)
.append(int.class, InputStream.class, resourceLoaderStreamFactory);
registry
.append(String.class, InputStream.class, new DataUrlLoader.StreamFactory<String>())
.append(Uri.class, InputStream.class, new DataUrlLoader.StreamFactory<Uri>())
.append(String.class, InputStream.class, new StringLoader.StreamFactory())
.append(String.class, ParcelFileDescriptor.class, new StringLoader.FileDescriptorFactory())
.append(
String.class, AssetFileDescriptor.class, new StringLoader.AssetFileDescriptorFactory())
.append(Uri.class, InputStream.class, new AssetUriLoader.StreamFactory(context.getAssets()))
.append(
Uri.class,
AssetFileDescriptor.class,
new AssetUriLoader.FileDescriptorFactory(context.getAssets()))
.append(Uri.class, InputStream.class, new MediaStoreImageThumbLoader.Factory(context))
.append(Uri.class, InputStream.class, new MediaStoreVideoThumbLoader.Factory(context));
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q) {
registry.append(
Uri.class, InputStream.class, new QMediaStoreUriLoader.InputStreamFactory(context));
registry.append(
Uri.class,
ParcelFileDescriptor.class,
new QMediaStoreUriLoader.FileDescriptorFactory(context));
}
boolean useMediaStoreOpenFileApisIfPossible =
experiments.isEnabled(UseMediaStoreOpenFileApisIfPossible.class);
registry
.append(
Uri.class,
InputStream.class,
new UriLoader.StreamFactory(contentResolver, useMediaStoreOpenFileApisIfPossible))
.append(
Uri.class,
ParcelFileDescriptor.class,
new UriLoader.FileDescriptorFactory(
contentResolver, useMediaStoreOpenFileApisIfPossible))
.append(
Uri.class,
AssetFileDescriptor.class,
new UriLoader.AssetFileDescriptorFactory(
contentResolver, useMediaStoreOpenFileApisIfPossible))
.append(Uri.class, InputStream.class, new UrlUriLoader.StreamFactory())
.append(URL.class, InputStream.class, new UrlLoader.StreamFactory())
.append(Uri.class, File.class, new MediaStoreFileLoader.Factory(context))
.append(GlideUrl.class, InputStream.class, new HttpGlideUrlLoader.Factory())
.append(byte[].class, ByteBuffer.class, new ByteArrayLoader.ByteBufferFactory())
.append(byte[].class, InputStream.class, new ByteArrayLoader.StreamFactory())
.append(Uri.class, Uri.class, UnitModelLoader.Factory.<Uri>getInstance())
.append(Drawable.class, Drawable.class, UnitModelLoader.Factory.<Drawable>getInstance())
.append(Drawable.class, Drawable.class, new UnitDrawableDecoder())
/* Transcoders */
.register(Bitmap.class, BitmapDrawable.class, new BitmapDrawableTranscoder(resources))
.register(Bitmap.class, byte[].class, bitmapBytesTranscoder)
.register(
Drawable.class,
byte[].class,
new DrawableBytesTranscoder(
bitmapPool, bitmapBytesTranscoder, gifDrawableBytesTranscoder))
.register(GifDrawable.class, byte[].class, gifDrawableBytesTranscoder);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
ResourceDecoder<ByteBuffer, Bitmap> byteBufferVideoDecoder =
VideoDecoder.byteBuffer(bitmapPool);
registry.append(ByteBuffer.class, Bitmap.class, byteBufferVideoDecoder);
registry.append(
ByteBuffer.class,
BitmapDrawable.class,
new BitmapDrawableDecoder<>(resources, byteBufferVideoDecoder));
}
}
private static void initializeModules(
Context context,
Glide glide,
Registry registry,
List<GlideModule> manifestModules,
@Nullable AppGlideModule annotationGeneratedModule) {
for (GlideModule module : manifestModules) {
try {
module.registerComponents(context, glide, registry);
} catch (AbstractMethodError e) {
throw new IllegalStateException(
"Attempting to register a Glide v3 module. If you see this, you or one of your"
+ " dependencies may be including Glide v3 even though you're using Glide v4."
+ " You'll need to find and remove (or update) the offending dependency."
+ " The v3 module name is: "
+ module.getClass().getName(),
e);
}
}
if (annotationGeneratedModule != null) {
annotationGeneratedModule.registerComponents(context, glide, registry);
}
}
}
|
RegistryFactory
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/KryoRegistration.java
|
{
"start": 1618,
"end": 5796
}
|
enum ____ {
UNSPECIFIED,
CLASS,
INSTANCE
}
/** The registered class. */
private final Class<?> registeredClass;
/**
* Class of the serializer to use for the registered class. Exists only if the serializer
* definition type is {@link SerializerDefinitionType#CLASS}.
*/
@Nullable private final Class<? extends Serializer<?>> serializerClass;
/**
* A serializable instance of the serializer to use for the registered class. Exists only if the
* serializer definition type is {@link SerializerDefinitionType#INSTANCE}.
*/
@Nullable
private final SerializableSerializer<? extends Serializer<?>> serializableSerializerInstance;
private final SerializerDefinitionType serializerDefinitionType;
public KryoRegistration(Class<?> registeredClass) {
this.registeredClass = Preconditions.checkNotNull(registeredClass);
this.serializerClass = null;
this.serializableSerializerInstance = null;
this.serializerDefinitionType = SerializerDefinitionType.UNSPECIFIED;
}
public KryoRegistration(
Class<?> registeredClass, Class<? extends Serializer<?>> serializerClass) {
this.registeredClass = Preconditions.checkNotNull(registeredClass);
this.serializerClass = Preconditions.checkNotNull(serializerClass);
this.serializableSerializerInstance = null;
this.serializerDefinitionType = SerializerDefinitionType.CLASS;
}
public KryoRegistration(
Class<?> registeredClass,
SerializableSerializer<? extends Serializer<?>> serializableSerializerInstance) {
this.registeredClass = Preconditions.checkNotNull(registeredClass);
this.serializerClass = null;
this.serializableSerializerInstance =
Preconditions.checkNotNull(serializableSerializerInstance);
this.serializerDefinitionType = SerializerDefinitionType.INSTANCE;
}
public Class<?> getRegisteredClass() {
return registeredClass;
}
public SerializerDefinitionType getSerializerDefinitionType() {
return serializerDefinitionType;
}
@Nullable
public Class<? extends Serializer<?>> getSerializerClass() {
return serializerClass;
}
@Nullable
public SerializableSerializer<? extends Serializer<?>> getSerializableSerializerInstance() {
return serializableSerializerInstance;
}
public Serializer<?> getSerializer(Kryo kryo) {
switch (serializerDefinitionType) {
case UNSPECIFIED:
return null;
case CLASS:
return ReflectionSerializerFactory.newSerializer(
kryo, serializerClass, registeredClass);
case INSTANCE:
return serializableSerializerInstance.getSerializer();
default:
// this should not happen; adding as a guard for the future
throw new IllegalStateException(
"Unrecognized Kryo registration serializer definition type: "
+ serializerDefinitionType);
}
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj == null) {
return false;
}
if (obj instanceof KryoRegistration) {
KryoRegistration other = (KryoRegistration) obj;
// we cannot include the serializer instances here because they don't implement the
// equals method
return serializerDefinitionType == other.serializerDefinitionType
&& registeredClass == other.registeredClass
&& serializerClass == other.serializerClass;
} else {
return false;
}
}
@Override
public int hashCode() {
int result = serializerDefinitionType.hashCode();
result = 31 * result + registeredClass.hashCode();
if (serializerClass != null) {
result = 31 * result + serializerClass.hashCode();
}
return result;
}
}
|
SerializerDefinitionType
|
java
|
spring-projects__spring-framework
|
spring-r2dbc/src/main/java/org/springframework/r2dbc/core/NamedParameterUtils.java
|
{
"start": 14042,
"end": 15194
}
|
class ____ {
private final BindMarkers bindMarkers;
private final boolean identifiable;
private final Map<String, List<NamedParameter>> references = new TreeMap<>();
NamedParameters(BindMarkersFactory factory) {
this.bindMarkers = factory.create();
this.identifiable = factory.identifiablePlaceholders();
}
/**
* Get the {@link NamedParameter} identified by {@code namedParameter}.
* Parameter objects get created if they do not yet exist.
* @param namedParameter the parameter name
* @return the named parameter
*/
NamedParameter getOrCreate(String namedParameter) {
List<NamedParameter> reference = this.references.computeIfAbsent(
namedParameter, key -> new ArrayList<>());
if (reference.isEmpty()) {
NamedParameter param = new NamedParameter(namedParameter);
reference.add(param);
return param;
}
if (this.identifiable) {
return reference.get(0);
}
NamedParameter param = new NamedParameter(namedParameter);
reference.add(param);
return param;
}
@Nullable List<NamedParameter> getMarker(String name) {
return this.references.get(name);
}
|
NamedParameters
|
java
|
google__truth
|
extensions/proto/src/main/java/com/google/common/truth/extensions/proto/IterableOfProtosFluentAssertion.java
|
{
"start": 1429,
"end": 21132
}
|
interface ____<M extends Message>
extends IterableOfProtosUsingCorrespondence<M> {
/**
* Specifies that the 'has' bit of individual fields should be ignored when comparing for
* equality.
*
* <p>For version 2 Protocol Buffers, this setting determines whether two protos with the same
* value for a field compare equal if one explicitly sets the value, and the other merely
* implicitly uses the schema-defined default. This setting also determines whether unknown fields
* should be considered in the comparison. By {@code ignoringFieldAbsence()}, unknown fields are
* ignored, and value-equal fields as specified above are considered equal.
*
* <p>For version 3 Protocol Buffers, this setting does not affect primitive fields, because their
* default value is indistinguishable from unset.
*/
IterableOfProtosFluentAssertion<M> ignoringFieldAbsence();
/**
* Specifies that the 'has' bit of these explicitly specified top-level field numbers should be
* ignored when comparing for equality. Sub-fields must be specified explicitly (via {@link
* FieldDescriptor}) if they are to be ignored as well.
*
* <p>Use {@link #ignoringFieldAbsence()} instead to ignore the 'has' bit for all fields.
*
* @see #ignoringFieldAbsence() for details
*/
IterableOfProtosFluentAssertion<M> ignoringFieldAbsenceOfFields(
int firstFieldNumber, int... rest);
/**
* Specifies that the 'has' bit of these explicitly specified top-level field numbers should be
* ignored when comparing for equality. Sub-fields must be specified explicitly (via {@link
* FieldDescriptor}) if they are to be ignored as well.
*
* <p>Use {@link #ignoringFieldAbsence()} instead to ignore the 'has' bit for all fields.
*
* @see #ignoringFieldAbsence() for details
*/
IterableOfProtosFluentAssertion<M> ignoringFieldAbsenceOfFields(Iterable<Integer> fieldNumbers);
/**
* Specifies that the 'has' bit of these explicitly specified field descriptors should be ignored
* when comparing for equality. Sub-fields must be specified explicitly if they are to be ignored
* as well.
*
* <p>Use {@link #ignoringFieldAbsence()} instead to ignore the 'has' bit for all fields.
*
* @see #ignoringFieldAbsence() for details
*/
IterableOfProtosFluentAssertion<M> ignoringFieldAbsenceOfFieldDescriptors(
FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest);
/**
* Specifies that the 'has' bit of these explicitly specified field descriptors should be ignored
* when comparing for equality. Sub-fields must be specified explicitly if they are to be ignored
* as well.
*
* <p>Use {@link #ignoringFieldAbsence()} instead to ignore the 'has' bit for all fields.
*
* @see #ignoringFieldAbsence() for details
*/
IterableOfProtosFluentAssertion<M> ignoringFieldAbsenceOfFieldDescriptors(
Iterable<FieldDescriptor> fieldDescriptors);
/**
* Specifies that the ordering of repeated fields, at all levels, should be ignored when comparing
* for equality.
*
* <p>This setting applies to all repeated fields recursively, but it does not ignore structure.
* For example, with {@link #ignoringRepeatedFieldOrder()}, a repeated {@code int32} field {@code
* bar}, set inside a repeated message field {@code foo}, the following protos will all compare
* equal:
*
* <pre>{@code
* message1: {
* foo: {
* bar: 1
* bar: 2
* }
* foo: {
* bar: 3
* bar: 4
* }
* }
*
* message2: {
* foo: {
* bar: 2
* bar: 1
* }
* foo: {
* bar: 4
* bar: 3
* }
* }
*
* message3: {
* foo: {
* bar: 4
* bar: 3
* }
* foo: {
* bar: 2
* bar: 1
* }
* }
* }</pre>
*
* <p>However, the following message will compare equal to none of these:
*
* <pre>{@code
* message4: {
* foo: {
* bar: 1
* bar: 3
* }
* foo: {
* bar: 2
* bar: 4
* }
* }
* }</pre>
*
* <p>This setting does not apply to map fields, for which field order is always ignored. The
* serialization order of map fields is undefined, and it may change from runtime to runtime.
*/
IterableOfProtosFluentAssertion<M> ignoringRepeatedFieldOrder();
/**
* Specifies that the ordering of repeated fields for these explicitly specified top-level field
* numbers should be ignored when comparing for equality. Sub-fields must be specified explicitly
* (via {@link FieldDescriptor}) if their orders are to be ignored as well.
*
* <p>Use {@link #ignoringRepeatedFieldOrder()} instead to ignore order for all fields.
*
* @see #ignoringRepeatedFieldOrder() for details.
*/
IterableOfProtosFluentAssertion<M> ignoringRepeatedFieldOrderOfFields(
int firstFieldNumber, int... rest);
/**
* Specifies that the ordering of repeated fields for these explicitly specified top-level field
* numbers should be ignored when comparing for equality. Sub-fields must be specified explicitly
* (via {@link FieldDescriptor}) if their orders are to be ignored as well.
*
* <p>Use {@link #ignoringRepeatedFieldOrder()} instead to ignore order for all fields.
*
* @see #ignoringRepeatedFieldOrder() for details.
*/
IterableOfProtosFluentAssertion<M> ignoringRepeatedFieldOrderOfFields(
Iterable<Integer> fieldNumbers);
/**
* Specifies that the ordering of repeated fields for these explicitly specified field descriptors
* should be ignored when comparing for equality. Sub-fields must be specified explicitly if their
* orders are to be ignored as well.
*
* <p>Use {@link #ignoringRepeatedFieldOrder()} instead to ignore order for all fields.
*
* @see #ignoringRepeatedFieldOrder() for details.
*/
IterableOfProtosFluentAssertion<M> ignoringRepeatedFieldOrderOfFieldDescriptors(
FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest);
/**
* Specifies that the ordering of repeated fields for these explicitly specified field descriptors
* should be ignored when comparing for equality. Sub-fields must be specified explicitly if their
* orders are to be ignored as well.
*
* <p>Use {@link #ignoringRepeatedFieldOrder()} instead to ignore order for all fields.
*
* @see #ignoringRepeatedFieldOrder() for details.
*/
IterableOfProtosFluentAssertion<M> ignoringRepeatedFieldOrderOfFieldDescriptors(
Iterable<FieldDescriptor> fieldDescriptors);
/**
* Specifies that, for all repeated and map fields, any elements in the 'actual' proto which are
* not found in the 'expected' proto are ignored, with the exception of fields in the expected
* proto which are empty. To ignore empty repeated fields as well, use {@link
* #comparingExpectedFieldsOnly}.
*
* <p>This rule is applied independently from {@link #ignoringRepeatedFieldOrder}. If ignoring
* repeated field order AND extra repeated field elements, all that is tested is that the expected
* elements comprise a subset of the actual elements. If not ignoring repeated field order, but
* still ignoring extra repeated field elements, the actual elements must contain a subsequence
* that matches the expected elements for the test to pass. (The subsequence rule does not apply
* to Map fields, which are always compared by key.)
*/
IterableOfProtosFluentAssertion<M> ignoringExtraRepeatedFieldElements();
/**
* Specifies that extra repeated field elements for these explicitly specified top-level field
* numbers should be ignored. Sub-fields must be specified explicitly (via {@link
* FieldDescriptor}) if their extra elements are to be ignored as well.
*
* <p>Use {@link #ignoringExtraRepeatedFieldElements()} instead to ignore these for all fields.
*
* @see #ignoringExtraRepeatedFieldElements() for details.
*/
IterableOfProtosFluentAssertion<M> ignoringExtraRepeatedFieldElementsOfFields(
int firstFieldNumber, int... rest);
/**
* Specifies that extra repeated field elements for these explicitly specified top-level field
* numbers should be ignored. Sub-fields must be specified explicitly (via {@link
* FieldDescriptor}) if their extra elements are to be ignored as well.
*
* <p>Use {@link #ignoringExtraRepeatedFieldElements()} instead to ignore these for all fields.
*
* @see #ignoringExtraRepeatedFieldElements() for details.
*/
IterableOfProtosFluentAssertion<M> ignoringExtraRepeatedFieldElementsOfFields(
Iterable<Integer> fieldNumbers);
/**
* Specifies that extra repeated field elements for these explicitly specified field descriptors
* should be ignored. Sub-fields must be specified explicitly if their extra elements are to be
* ignored as well.
*
* <p>Use {@link #ignoringExtraRepeatedFieldElements()} instead to ignore these for all fields.
*
* @see #ignoringExtraRepeatedFieldElements() for details.
*/
IterableOfProtosFluentAssertion<M> ignoringExtraRepeatedFieldElementsOfFieldDescriptors(
FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest);
/**
* Specifies that extra repeated field elements for these explicitly specified field descriptors
* should be ignored. Sub-fields must be specified explicitly if their extra elements are to be
* ignored as well.
*
* <p>Use {@link #ignoringExtraRepeatedFieldElements()} instead to ignore these for all fields.
*
* @see #ignoringExtraRepeatedFieldElements() for details.
*/
IterableOfProtosFluentAssertion<M> ignoringExtraRepeatedFieldElementsOfFieldDescriptors(
Iterable<FieldDescriptor> fieldDescriptors);
/**
* Compares double fields as equal if they are both finite and their absolute difference is less
* than or equal to {@code tolerance}.
*
* @param tolerance A finite, non-negative tolerance.
*/
IterableOfProtosFluentAssertion<M> usingDoubleTolerance(double tolerance);
/**
* Compares double fields with these explicitly specified top-level field numbers using the
* provided absolute tolerance.
*
* @param tolerance A finite, non-negative tolerance.
*/
IterableOfProtosFluentAssertion<M> usingDoubleToleranceForFields(
double tolerance, int firstFieldNumber, int... rest);
/**
* Compares double fields with these explicitly specified top-level field numbers using the
* provided absolute tolerance.
*
* @param tolerance A finite, non-negative tolerance.
*/
IterableOfProtosFluentAssertion<M> usingDoubleToleranceForFields(
double tolerance, Iterable<Integer> fieldNumbers);
/**
* Compares double fields with these explicitly specified fields using the provided absolute
* tolerance.
*
* @param tolerance A finite, non-negative tolerance.
*/
IterableOfProtosFluentAssertion<M> usingDoubleToleranceForFieldDescriptors(
double tolerance, FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest);
/**
* Compares double fields with these explicitly specified fields using the provided absolute
* tolerance.
*
* @param tolerance A finite, non-negative tolerance.
*/
IterableOfProtosFluentAssertion<M> usingDoubleToleranceForFieldDescriptors(
double tolerance, Iterable<FieldDescriptor> fieldDescriptors);
/**
* Compares float fields as equal if they are both finite and their absolute difference is less
* than or equal to {@code tolerance}.
*
* @param tolerance A finite, non-negative tolerance.
*/
IterableOfProtosFluentAssertion<M> usingFloatTolerance(float tolerance);
/**
* Compares float fields with these explicitly specified top-level field numbers using the
* provided absolute tolerance.
*
* @param tolerance A finite, non-negative tolerance.
*/
IterableOfProtosFluentAssertion<M> usingFloatToleranceForFields(
float tolerance, int firstFieldNumber, int... rest);
/**
* Compares float fields with these explicitly specified top-level field numbers using the
* provided absolute tolerance.
*
* @param tolerance A finite, non-negative tolerance.
*/
IterableOfProtosFluentAssertion<M> usingFloatToleranceForFields(
float tolerance, Iterable<Integer> fieldNumbers);
/**
* Compares float fields with these explicitly specified fields using the provided absolute
* tolerance.
*
* @param tolerance A finite, non-negative tolerance.
*/
IterableOfProtosFluentAssertion<M> usingFloatToleranceForFieldDescriptors(
float tolerance, FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest);
/**
* Compares float fields with these explicitly specified top-level field numbers using the
* provided absolute tolerance.
*
* @param tolerance A finite, non-negative tolerance.
*/
IterableOfProtosFluentAssertion<M> usingFloatToleranceForFieldDescriptors(
float tolerance, Iterable<FieldDescriptor> fieldDescriptors);
/**
* Limits the comparison of Protocol buffers to the fields set in the expected proto(s). When
* multiple protos are specified, the comparison is limited to the union of set fields in all the
* expected protos.
*
* <p>The "expected proto(s)" are those passed to the method in {@link
* IterableOfProtosUsingCorrespondence} at the end of the call-chain.
*
* <p>Fields not set in the expected proto(s) are ignored. In particular, proto3 fields which have
* their default values are ignored, as these are indistinguishable from unset fields. If you want
* to assert that a proto3 message has certain fields with default values, you cannot use this
* method.
*/
IterableOfProtosFluentAssertion<M> comparingExpectedFieldsOnly();
/**
* Limits the comparison of Protocol buffers to the defined {@link FieldScope}.
*
* <p>This method is additive and has well-defined ordering semantics. If the invoking {@link
* IterableOfProtosFluentAssertion} is already scoped to a {@link FieldScope} {@code X}, and this
* method is invoked with {@link FieldScope} {@code Y}, the resultant {@link
* IterableOfProtosFluentAssertion} is constrained to the intersection of {@link FieldScope}s
* {@code X} and {@code Y}.
*
* <p>By default, {@link IterableOfProtosFluentAssertion} is constrained to {@link
* FieldScopes#all()}, that is, no fields are excluded from comparison.
*/
IterableOfProtosFluentAssertion<M> withPartialScope(FieldScope fieldScope);
/**
* Excludes the top-level message fields with the given tag numbers from the comparison.
*
* <p>This method adds on any previous {@link FieldScope} related settings, overriding previous
* changes to ensure the specified fields are ignored recursively. All sub-fields of these field
* numbers are ignored, and all sub-messages of type {@code M} will also have these field numbers
* ignored.
*
* <p>If an invalid field number is supplied, the terminal comparison operation will throw a
* runtime exception.
*/
IterableOfProtosFluentAssertion<M> ignoringFields(int firstFieldNumber, int... rest);
/**
* Excludes the top-level message fields with the given tag numbers from the comparison.
*
* <p>This method adds on any previous {@link FieldScope} related settings, overriding previous
* changes to ensure the specified fields are ignored recursively. All sub-fields of these field
* numbers are ignored, and all sub-messages of type {@code M} will also have these field numbers
* ignored.
*
* <p>If an invalid field number is supplied, the terminal comparison operation will throw a
* runtime exception.
*/
IterableOfProtosFluentAssertion<M> ignoringFields(Iterable<Integer> fieldNumbers);
/**
* Excludes all message fields matching the given {@link FieldDescriptor}s from the comparison.
*
* <p>This method adds on any previous {@link FieldScope} related settings, overriding previous
* changes to ensure the specified fields are ignored recursively. All sub-fields of these field
* descriptors are ignored, no matter where they occur in the tree.
*
* <p>If a field descriptor which does not, or cannot occur in the proto structure is supplied, it
* is silently ignored.
*/
IterableOfProtosFluentAssertion<M> ignoringFieldDescriptors(
FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest);
/**
* Excludes all message fields matching the given {@link FieldDescriptor}s from the comparison.
*
* <p>This method adds on any previous {@link FieldScope} related settings, overriding previous
* changes to ensure the specified fields are ignored recursively. All sub-fields of these field
* descriptors are ignored, no matter where they occur in the tree.
*
* <p>If a field descriptor which does not, or cannot occur in the proto structure is supplied, it
* is silently ignored.
*/
IterableOfProtosFluentAssertion<M> ignoringFieldDescriptors(
Iterable<FieldDescriptor> fieldDescriptors);
/**
* Excludes all specific field paths under the argument {@link FieldScope} from the comparison.
*
* <p>This method is additive and has well-defined ordering semantics. If the invoking {@link
* IterableOfProtosFluentAssertion} is already scoped to a {@link FieldScope} {@code X}, and this
* method is invoked with {@link FieldScope} {@code Y}, the resultant {@link
* IterableOfProtosFluentAssertion} is constrained to the subtraction of {@code X - Y}.
*
* <p>By default, {@link IterableOfProtosFluentAssertion} is constrained to {@link
* FieldScopes#all()}, that is, no fields are excluded from comparison.
*/
IterableOfProtosFluentAssertion<M> ignoringFieldScope(FieldScope fieldScope);
/**
* If set, in the event of a comparison failure, the error message printed will list only those
* specific fields that did not match between the actual and expected values. Useful for very
* large protocol buffers.
*
* <p>This a purely cosmetic setting, and it has no effect on the behavior of the test.
*/
IterableOfProtosFluentAssertion<M> reportingMismatchesOnly();
/**
* Specifies the {@link TypeRegistry} and {@link ExtensionRegistry} to use for {@link
* com.google.protobuf.Any Any} messages.
*
* <p>To compare the value of an {@code Any} message, ProtoTruth looks in the given type registry
* for a descriptor for the message's type URL:
*
* <ul>
* <li>If ProtoTruth finds a descriptor, it unpacks the value and compares it against the
* expected value, respecting any configuration methods used for the assertion.
* <li>If ProtoTruth does not find a descriptor (or if the value can't be deserialized with the
* descriptor), it compares the raw, serialized bytes of the expected and actual values.
* </ul>
*
* <p>When ProtoTruth unpacks a value, it is parsing a serialized proto. That proto may contain
* extensions. To look up those extensions, ProtoTruth uses the provided {@link
* ExtensionRegistry}.
*
* @since 1.1
*/
IterableOfProtosFluentAssertion<M> unpackingAnyUsing(
TypeRegistry typeRegistry, ExtensionRegistry extensionRegistry);
/**
* @deprecated Do not call {@code equals()} on a {@code IterableOfProtosFluentAssertion}.
* @see com.google.common.truth.Subject#equals(Object)
*/
@Override
@Deprecated
boolean equals(@Nullable Object o);
/**
* @deprecated {@code IterableOfProtosFluentAssertion} does not support {@code hashCode()}.
* @see com.google.common.truth.Subject#hashCode()
*/
@Override
@Deprecated
int hashCode();
}
|
IterableOfProtosFluentAssertion
|
java
|
spring-projects__spring-boot
|
module/spring-boot-amqp/src/main/java/org/springframework/boot/amqp/autoconfigure/ConnectionFactoryCustomizer.java
|
{
"start": 963,
"end": 1150
}
|
interface ____ {
/**
* Customize the {@link ConnectionFactory}.
* @param factory the factory to customize
*/
void customize(ConnectionFactory factory);
}
|
ConnectionFactoryCustomizer
|
java
|
quarkusio__quarkus
|
independent-projects/bootstrap/core/src/main/java/io/quarkus/bootstrap/app/SbomResult.java
|
{
"start": 70,
"end": 1091
}
|
class ____ {
private final Path sbomFile;
private final String sbomSpec;
private final String sbomSpecVersion;
private final String format;
private final String classifier;
private final Path appRunner;
public SbomResult(Path sbomFile, String sbomSpec, String sbomSpecVersion, String format, String classifier,
Path appRunner) {
this.sbomFile = sbomFile;
this.sbomSpec = sbomSpec;
this.sbomSpecVersion = sbomSpecVersion;
this.format = format;
this.classifier = classifier;
this.appRunner = appRunner;
}
public Path getSbomFile() {
return sbomFile;
}
public String getSbomSpec() {
return sbomSpec;
}
public String getSbomSpecVersion() {
return sbomSpecVersion;
}
public String getFormat() {
return format;
}
public String getClassifier() {
return classifier;
}
public Path getApplicationRunner() {
return appRunner;
}
}
|
SbomResult
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/collections/semantics/CustomSemanticsTest.java
|
{
"start": 1333,
"end": 4820
}
|
class ____ {
@Test
public void verifyModel(DomainModelScope scope) {
scope.withHierarchy( TheEntityWithUniqueList.class, (entityDescriptor) -> {
final Property strings = entityDescriptor.getProperty( "strings" );
final org.hibernate.mapping.Collection collectionDescriptor = (org.hibernate.mapping.Collection) strings.getValue();
assertThat( collectionDescriptor.getCollectionSemantics() ).isInstanceOf( CustomCollectionTypeSemantics.class );
final CustomCollectionTypeSemantics semantics = (CustomCollectionTypeSemantics) collectionDescriptor.getCollectionSemantics();
assertThat( semantics.getCollectionType() ).isInstanceOf( CustomCollectionType.class );
final CustomCollectionType collectionType = (CustomCollectionType) semantics.getCollectionType();
assertThat( collectionType.getUserType() ).isInstanceOf( UniqueListType.class );
} );
scope.withHierarchy( TheEntityWithUniqueListRegistration.class, (entityDescriptor) -> {
final Property strings = entityDescriptor.getProperty( "strings" );
final org.hibernate.mapping.Collection collectionDescriptor = (org.hibernate.mapping.Collection) strings.getValue();
assertThat( collectionDescriptor.getCollectionSemantics() ).isInstanceOf( CustomCollectionTypeSemantics.class );
final CustomCollectionTypeSemantics semantics = (CustomCollectionTypeSemantics) collectionDescriptor.getCollectionSemantics();
assertThat( semantics.getCollectionType() ).isInstanceOf( CustomCollectionType.class );
final CustomCollectionType collectionType = (CustomCollectionType) semantics.getCollectionType();
assertThat( collectionType.getUserType() ).isInstanceOf( UniqueListType.class );
} );
}
@Test
public void testBasicUsage(SessionFactoryScope scope) {
scope.inTransaction( (session) -> {
final TheEntityWithUniqueList entity = new TheEntityWithUniqueList( 1, "first" );
entity.setStrings( new ArrayList<>() );
entity.getStrings().add( "the string" );
entity.getStrings().add( "another" );
session.persist( entity );
} );
scope.inTransaction( (session) -> {
final TheEntityWithUniqueList loaded = session.createQuery( "from TheEntityWithUniqueList", TheEntityWithUniqueList.class ).uniqueResult();
// try to re-add one, should throw IllegalArgumentException
try {
loaded.getStrings().add( "another" );
fail( "Expecting IllegalArgumentException" );
}
catch (IllegalArgumentException expected) {
// expected outcome
}
} );
}
@Test
public void testBasicRegistrationUsage(SessionFactoryScope scope) {
scope.inTransaction( (session) -> {
final TheEntityWithUniqueListRegistration entity = new TheEntityWithUniqueListRegistration( 1, "first" );
entity.setStrings( new ArrayList<>() );
entity.getStrings().add( "the string" );
entity.getStrings().add( "another" );
session.persist( entity );
} );
scope.inTransaction( (session) -> {
final TheEntityWithUniqueListRegistration loaded = session.createQuery( "from TheEntityWithUniqueListRegistration", TheEntityWithUniqueListRegistration.class ).uniqueResult();
// try to re-add one, should throw IllegalArgumentException
try {
loaded.getStrings().add( "another" );
fail( "Expecting IllegalArgumentException" );
}
catch (IllegalArgumentException expected) {
// expected outcome
}
} );
}
@AfterEach
public void cleanupTestData(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
}
|
CustomSemanticsTest
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java
|
{
"start": 873,
"end": 1199
}
|
interface ____ {@link ClusterStateUpdateTask} that allows the caller to be notified after the master has
* computed, published, accepted, committed, and applied the cluster state update AND only after the rest of the nodes
* (or a specified subset) have also accepted and applied the cluster state update.
*/
public abstract
|
to
|
java
|
quarkusio__quarkus
|
integration-tests/mongodb-panache/src/main/java/io/quarkus/it/mongodb/panache/bugs/Bug5885EntityRepository.java
|
{
"start": 184,
"end": 266
}
|
class ____ extends Bug5885AbstractRepository<PersonEntity> {
}
|
Bug5885EntityRepository
|
java
|
hibernate__hibernate-orm
|
hibernate-c3p0/src/main/java/org/hibernate/c3p0/internal/StrategyRegistrationProviderImpl.java
|
{
"start": 645,
"end": 1165
}
|
class ____ implements StrategyRegistrationProvider {
@Override
public Iterable<StrategyRegistration<?>> getStrategyRegistrations() {
return singleton( new SimpleStrategyRegistrationImpl<>(
ConnectionProvider.class,
C3P0ConnectionProvider.class,
"c3p0",
C3P0ConnectionProvider.class.getSimpleName(),
// legacy
"org.hibernate.connection.C3P0ConnectionProvider",
// legacy
"org.hibernate.service.jdbc.connections.internal.C3P0ConnectionProvider"
) );
}
}
|
StrategyRegistrationProviderImpl
|
java
|
qos-ch__slf4j
|
slf4j-api/src/test/java/org/slf4j/LoggerFactoryTest.java
|
{
"start": 322,
"end": 2518
}
|
class ____ {
private PrintStream rawSyserr;
private ByteArrayOutputStream mockedSyserr;
final ClassLoader classLoaderOfLoggerFactory = LoggerFactory.class.getClassLoader();
@Before
public void setUp() {
rawSyserr = System.err;
mockedSyserr = new ByteArrayOutputStream();
System.setErr(new PrintStream(mockedSyserr));
}
@After
public void cleanUp() {
System.clearProperty(LoggerFactory.PROVIDER_PROPERTY_KEY);
System.setErr(rawSyserr);
}
@Test
public void testExplicitlySpecified() {
System.setProperty(LoggerFactory.PROVIDER_PROPERTY_KEY, "org.slf4j.LoggerFactoryTest$TestingProvider");
SLF4JServiceProvider provider = LoggerFactory.loadExplicitlySpecified(classLoaderOfLoggerFactory);
assertTrue("provider should be instance of TestingProvider class", provider instanceof TestingProvider);
assertTrue(mockedSyserr.toString().contains(" Attempting to load provider \"org.slf4j.LoggerFactoryTest$TestingProvider\" specified via \"slf4j.provider\" system property"));
System.out.println(mockedSyserr.toString());
}
@Test
public void testExplicitlySpecifiedNull() {
assertNull(LoggerFactory.loadExplicitlySpecified(classLoaderOfLoggerFactory));
}
@Test
public void testExplicitlySpecifyMissingServiceProvider() {
System.setProperty(LoggerFactory.PROVIDER_PROPERTY_KEY, "com.example.ServiceProvider");
SLF4JServiceProvider provider = LoggerFactory.loadExplicitlySpecified(classLoaderOfLoggerFactory);
assertNull(provider);
assertTrue(mockedSyserr.toString().contains("Failed to instantiate the specified SLF4JServiceProvider (com.example.ServiceProvider)"));
}
@Test
public void testExplicitlySpecifyNonServiceProvider() {
System.setProperty(LoggerFactory.PROVIDER_PROPERTY_KEY, "java.lang.String");
assertNull(LoggerFactory.loadExplicitlySpecified(classLoaderOfLoggerFactory));
assertTrue(mockedSyserr.toString().contains("Specified SLF4JServiceProvider (java.lang.String) does not implement SLF4JServiceProvider interface"));
}
public static
|
LoggerFactoryTest
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/DuplicateBindingsValidationTest.java
|
{
"start": 3544,
"end": 3975
}
|
interface ____");
});
}
@Test public void duplicateExplicitBindings_TwoProvidesMethods() {
Source component =
CompilerTests.javaSource(
"test.Outer",
"package test;",
"",
"import dagger.Component;",
"import dagger.Module;",
"import dagger.Provides;",
"import javax.inject.Inject;",
"",
"final
|
Child
|
java
|
elastic__elasticsearch
|
x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistoBackedValueCountAggregator.java
|
{
"start": 1547,
"end": 3656
}
|
class ____ extends NumericMetricsAggregator.SingleValue {
final HistogramValuesSource.Histogram valuesSource;
/** Count per bucket */
LongArray counts;
public HistoBackedValueCountAggregator(
String name,
ValuesSourceConfig valuesSourceConfig,
AggregationContext aggregationContext,
Aggregator parent,
Map<String, Object> metadata
) throws IOException {
super(name, aggregationContext, parent, metadata);
assert valuesSourceConfig.hasValues();
this.valuesSource = (HistogramValuesSource.Histogram) valuesSourceConfig.getValuesSource();
counts = bigArrays().newLongArray(1, true);
}
@Override
public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, final LeafBucketCollector sub) throws IOException {
final HistogramValues values = valuesSource.getHistogramValues(aggCtx.getLeafReaderContext());
return new LeafBucketCollectorBase(sub, values) {
@Override
public void collect(int doc, long bucket) throws IOException {
counts = bigArrays().grow(counts, bucket + 1);
if (values.advanceExact(doc)) {
final HistogramValue sketch = values.histogram();
while (sketch.next()) {
counts.increment(bucket, sketch.count());
}
}
}
};
}
@Override
public double metric(long owningBucketOrd) {
return owningBucketOrd >= counts.size() ? 0 : counts.get(owningBucketOrd);
}
@Override
public InternalAggregation buildAggregation(long bucket) {
if (bucket >= counts.size()) {
return buildEmptyAggregation();
}
return new InternalValueCount(name, counts.get(bucket), metadata());
}
@Override
public InternalAggregation buildEmptyAggregation() {
return InternalValueCount.empty(name, metadata());
}
@Override
public void doClose() {
Releasables.close(counts);
}
}
|
HistoBackedValueCountAggregator
|
java
|
micronaut-projects__micronaut-core
|
inject/src/main/java/io/micronaut/context/env/ComputePlatform.java
|
{
"start": 693,
"end": 1183
}
|
enum ____ {
/**
* Google Compute Platform.
*/
GOOGLE_COMPUTE,
/**
* Amazon EC2.
*/
AMAZON_EC2,
/**
* Microsoft Azure.
*/
AZURE,
/**
* Oracle Cloud.
*/
ORACLE_CLOUD,
/**
* Digital Ocean.
*/
DIGITAL_OCEAN,
/**
* Cloud or non cloud provider on bare metal (unknown).
*/
BARE_METAL,
/**
* IBM Cloud.
*/
IBM,
/**
* Other.
*/
OTHER
}
|
ComputePlatform
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/issue_1200/Issue1262.java
|
{
"start": 420,
"end": 550
}
|
class ____ {
public Map<String, Chatter> chatterMap = new ConcurrentHashMap<String, Chatter>();
}
public static
|
Model
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/serializer/enum_/EnumFieldsTest.java
|
{
"start": 208,
"end": 570
}
|
class ____ extends TestCase {
public void test_enum() throws Exception {
Model model = new Model();
model.t1 = Type.A;
model.t2 = null;
String text = JSON.toJSONString(model, SerializerFeature.WriteMapNullValue);
Assert.assertEquals("{\"t1\":\"A\",\"t2\":null}", text);
}
public static
|
EnumFieldsTest
|
java
|
apache__maven
|
api/maven-api-spi/src/main/java/org/apache/maven/api/spi/PropertyContributor.java
|
{
"start": 1292,
"end": 2359
}
|
interface ____ extends SpiService {
/**
* Invoked just before session is created with a mutable map that carries collected user properties so far.
*
* @param userProperties The mutable user properties, never {@code null}.
* @see #contribute(ProtoSession)
*/
default void contribute(Map<String, String> userProperties) {}
/**
* Invoked just before session is created with proto session instance. The proto session contains user and
* system properties collected so far, along with other information. This method should return altered
* (contributions applied) user properties, not only the "new" or "added" properties!
*
* @param protoSession The proto session, never {@code null}.
* @return The user properties with contributions.
*/
default Map<String, String> contribute(ProtoSession protoSession) {
HashMap<String, String> userProperties = new HashMap<>(protoSession.getUserProperties());
contribute(userProperties);
return userProperties;
}
}
|
PropertyContributor
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java
|
{
"start": 2178,
"end": 2537
}
|
class ____ {@link IntervalsSource}
*
* Built-in sources include {@link Match}, which analyzes a text string and converts it
* to a proximity source (phrase, ordered or unordered depending on how
* strict the matching should be); {@link Combine}, which allows proximity queries
* between different sub-sources; and {@link Disjunction}.
*/
public abstract
|
for
|
java
|
quarkusio__quarkus
|
extensions/micrometer/runtime/src/main/java/io/quarkus/micrometer/runtime/binder/mpmetrics/MpMetricsRegistryProducer.java
|
{
"start": 335,
"end": 1288
}
|
class ____ {
@Produces
@Singleton
public MetricRegistryAdapter produceRegistry(MeterRegistry registry) {
return MpMetricsRecorder.getRegistry(MetricRegistry.Type.APPLICATION);
}
@Produces
@Singleton
@RegistryType(type = MetricRegistry.Type.APPLICATION)
public MetricRegistryAdapter produceApplicationRegistry(MeterRegistry registry) {
return MpMetricsRecorder.getRegistry(MetricRegistry.Type.APPLICATION);
}
@Produces
@Singleton
@RegistryType(type = MetricRegistry.Type.BASE)
public MetricRegistry produceBaseRegistry(MeterRegistry registry) {
return MpMetricsRecorder.getRegistry(MetricRegistry.Type.BASE);
}
@Produces
@Singleton
@RegistryType(type = MetricRegistry.Type.VENDOR)
public MetricRegistry produceVendorRegistry(MeterRegistry registry) {
return MpMetricsRecorder.getRegistry(MetricRegistry.Type.VENDOR);
}
}
|
MpMetricsRegistryProducer
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVJob.java
|
{
"start": 2249,
"end": 2499
}
|
class ____<KTYPE, VTYPE> extends Reducer<KTYPE, VTYPE, KTYPE, VTYPE> {
public void reduce(KTYPE key, VTYPE value, Context context)
throws IOException, InterruptedException {
context.write(key, value);
}
}
public static
|
KVMReducer
|
java
|
FasterXML__jackson-databind
|
src/main/java/tools/jackson/databind/ext/QNameSerializer.java
|
{
"start": 532,
"end": 2524
}
|
class ____
extends StdSerializer<QName>
{
public final static ValueSerializer<?> instance = new QNameSerializer();
public QNameSerializer() {
super(QName.class);
}
@Override
public ValueSerializer<?> createContextual(SerializationContext serializers, BeanProperty property)
{
JsonFormat.Value format = findFormatOverrides(serializers, property, handledType());
if (format != null) {
JsonFormat.Shape shape = format.getShape();
if (shape == JsonFormat.Shape.OBJECT) {
return this;
}
}
return ToStringSerializer.instance;
}
@Override
public void serialize(QName value, JsonGenerator g, SerializationContext ctxt)
{
g.writeStartObject(value);
serializeProperties(value, g, ctxt);
g.writeEndObject();
}
@Override
public final void serializeWithType(QName value, JsonGenerator g, SerializationContext ctxt,
TypeSerializer typeSer)
{
WritableTypeId typeIdDef = typeSer.writeTypePrefix(g,
ctxt, typeSer.typeId(value, JsonToken.START_OBJECT));
serializeProperties(value, g, ctxt);
typeSer.writeTypeSuffix(g, ctxt, typeIdDef);
}
private void serializeProperties(QName value, JsonGenerator g, SerializationContext ctxt)
{
g.writeStringProperty("localPart", value.getLocalPart());
if (!value.getNamespaceURI().isEmpty()) {
g.writeStringProperty("namespaceURI", value.getNamespaceURI());
}
if (!value.getPrefix().isEmpty()) {
g.writeStringProperty("prefix", value.getPrefix());
}
}
@Override
public void acceptJsonFormatVisitor(JsonFormatVisitorWrapper visitor, JavaType typeHint)
{
/*JsonObjectFormatVisitor v =*/ visitor.expectObjectFormat(typeHint);
// TODO: would need to visit properties too, see `BeanSerializerBase`
}
}
|
QNameSerializer
|
java
|
netty__netty
|
microbench/src/main/java/io/netty/buffer/AbstractByteBufGetCharSequenceBenchmark.java
|
{
"start": 1412,
"end": 5320
}
|
enum ____ {
DIRECT {
@Override
ByteBuf newBuffer(byte[] bytes, int length) {
ByteBuf buffer = Unpooled.directBuffer(length);
buffer.writeBytes(bytes, 0, length);
return buffer;
}
},
HEAP_OFFSET {
@Override
ByteBuf newBuffer(byte[] bytes, int length) {
return Unpooled.wrappedBuffer(bytes, 1, length);
}
},
HEAP {
@Override
ByteBuf newBuffer(byte[] bytes, int length) {
return Unpooled.wrappedBuffer(bytes, 0, length);
}
},
POOLED_HEAP {
@Override
ByteBuf newBuffer(byte[] bytes, int length) {
return PooledByteBufAllocator.DEFAULT.heapBuffer(length).writeBytes(bytes, 0, length);
}
},
POOLED_DIRECT {
@Override
ByteBuf newBuffer(byte[] bytes, int length) {
return PooledByteBufAllocator.DEFAULT.directBuffer(length).writeBytes(bytes, 0, length);
}
},
ADAPTIVE_HEAP {
@Override
ByteBuf newBuffer(byte[] bytes, int length) {
return ADAPTIVE_ALLOC.heapBuffer(length).writeBytes(bytes, 0, length);
}
},
ADAPTIVE_DIRECT {
@Override
ByteBuf newBuffer(byte[] bytes, int length) {
return ADAPTIVE_ALLOC.directBuffer(length).writeBytes(bytes, 0, length);
}
},
COMPOSITE {
@Override
ByteBuf newBuffer(byte[] bytes, int length) {
CompositeByteBuf buffer = Unpooled.compositeBuffer();
int offset = 0;
// 8 buffers per composite.
int capacity = length / 8;
while (length > 0) {
buffer.addComponent(true, Unpooled.wrappedBuffer(bytes, offset, Math.min(length, capacity)));
length -= capacity;
offset += capacity;
}
return buffer;
}
}
;
abstract ByteBuf newBuffer(byte[] bytes, int length);
}
@Param({
"8",
"64",
"1024",
"10240",
"1073741824"
})
public int size;
@Param({
"US-ASCII",
"ISO_8859_1",
})
public String charsetName;
@Param
public ByteBufType bufferType;
private ByteBuf buffer;
private String str;
private Charset charset;
@Override
protected String[] jvmArgs() {
// Ensure we minimize the GC overhead by sizing the heap big enough.
return new String[] { "-XX:MaxDirectMemorySize=2g", "-Xmx8g", "-Xms8g", "-Xmn6g" };
}
@Setup
public void setup() {
byte[] bytes = new byte[size + 2];
Arrays.fill(bytes, (byte) 'a');
str = new String(bytes, 0, size);
// Use an offset to not allow any optimizations because we use the exact passed in byte[] for heap buffers.
buffer = bufferType.newBuffer(bytes, size);
charset = Charset.forName(charsetName);
}
@TearDown
public void teardown() {
buffer.release();
}
@Benchmark
public int getCharSequence() {
return traverse(buffer.getCharSequence(buffer.readerIndex(), size, charset));
}
@Benchmark
public int setCharSequence() {
return buffer.setCharSequence(0, str, charset);
}
@Benchmark
public int getCharSequenceOld() {
return traverse(buffer.toString(buffer.readerIndex(), size, charset));
}
private static int traverse(CharSequence cs) {
int i = 0, len = cs.length();
while (i < len && cs.charAt(i++) != 0) {
// ensure result is "used"
}
return i;
}
}
|
ByteBufType
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/select/MySqlSelectTest_231_keywords.java
|
{
"start": 856,
"end": 1433
}
|
class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "SELECT CONDITION, LOCK, MODE, OUT, REPEAT, SECOND, MICROSECOND, USE from t";
// System.out.println(sql);
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
assertEquals(1, statementList.size());
SQLStatement stmt = statementList.get(0);
// assertEquals("SELECT CONDITION\n" +
// "FROM t", stmt.toString());
}
}
|
MySqlSelectTest_231_keywords
|
java
|
quarkusio__quarkus
|
independent-projects/qute/debug/src/test/java/io/quarkus/qute/debug/data/Item.java
|
{
"start": 74,
"end": 370
}
|
class ____ {
public final BigDecimal price;
public Item(int price) {
this(new BigDecimal(price));
}
public Item(BigDecimal price) {
this.price = price;
}
@Override
public String toString() {
return "Item(" + price.intValue() + ")";
}
}
|
Item
|
java
|
reactor__reactor-core
|
reactor-core/src/main/java/reactor/core/publisher/FluxDelaySequence.java
|
{
"start": 1837,
"end": 4503
}
|
class ____<T> implements InnerOperator<T, T> {
final CoreSubscriber<? super T> actual;
final long delay;
final TimeUnit timeUnit;
final Scheduler.Worker w;
@SuppressWarnings("NotNullFieldNotInitialized") // s initialized in onSubscribe
Subscription s;
volatile boolean done;
volatile long delayed;
static final AtomicLongFieldUpdater<DelaySubscriber> DELAYED =
AtomicLongFieldUpdater.newUpdater(DelaySubscriber.class, "delayed");
DelaySubscriber(CoreSubscriber<? super T> actual, Duration delay, Scheduler.Worker w) {
super();
this.actual = Operators.serialize(actual);
this.w = w;
this.delay = delay.toNanos();
this.timeUnit = TimeUnit.NANOSECONDS;
}
@Override
public CoreSubscriber<? super T> actual() {
return actual;
}
@Override
public void onSubscribe(Subscription s) {
if (Operators.validate(this.s, s)) {
this.s = s;
actual.onSubscribe(this);
}
}
@Override
public void onNext(final T t) {
if (done || delayed < 0) {
Operators.onNextDropped(t, currentContext());
return;
}
//keep track of the number of delayed onNext so that
//we can also delay onError/onComplete when an onNext
//is "in flight"
DELAYED.incrementAndGet(this);
w.schedule(() -> delayedNext(t), delay, timeUnit);
}
private void delayedNext(T t) {
//this onNext has been delayed and now processed
DELAYED.decrementAndGet(this);
actual.onNext(t);
}
@Override
public void onError(final Throwable t) {
if (done) {
Operators.onErrorDropped(t, currentContext());
return;
}
done = true;
//if no currently delayed onNext (eg. empty source),
// we can immediately error
if (DELAYED.compareAndSet(this, 0, -1)) {
actual.onError(t);
}
else {
w.schedule(new OnError(t), delay, timeUnit);
}
}
@Override
public void onComplete() {
if (done) {
return;
}
done = true;
//if no currently delayed onNext (eg. empty source),
// we can immediately complete
if (DELAYED.compareAndSet(this, 0, -1)) {
actual.onComplete();
}
else {
w.schedule(new OnComplete(), delay, timeUnit);
}
}
@Override
public void request(long n) {
s.request(n);
}
@Override
public void cancel() {
s.cancel();
w.dispose();
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.PARENT) return s;
if (key == Attr.RUN_ON) return w;
if (key == Attr.TERMINATED) return done;
if (key == Attr.CANCELLED) return w.isDisposed() && !done;
if (key == Attr.RUN_STYLE) return Attr.RunStyle.ASYNC;
return InnerOperator.super.scanUnsafe(key);
}
final
|
DelaySubscriber
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/GeoArgs.java
|
{
"start": 5539,
"end": 6551
}
|
enum ____ implements ProtocolKeyword {
/**
* meter.
*/
m,
/**
* kilometer.
*/
km,
/**
* feet.
*/
ft,
/**
* mile.
*/
mi;
private final byte[] asBytes;
Unit() {
asBytes = name().getBytes();
}
@Override
public byte[] getBytes() {
return asBytes;
}
}
public <K, V> void build(CommandArgs<K, V> args) {
if (withdistance) {
args.add("WITHDIST");
}
if (withhash) {
args.add("WITHHASH");
}
if (withcoordinates) {
args.add("WITHCOORD");
}
if (sort != null && sort != Sort.none) {
args.add(sort.name());
}
if (count != null) {
args.add(CommandKeyword.COUNT).add(count);
if (any) {
args.add("ANY");
}
}
}
}
|
Unit
|
java
|
google__truth
|
refactorings/src/test/java/com/google/common/truth/refactorings/NamedToWithMessageTest.java
|
{
"start": 841,
"end": 1329
}
|
class ____ {
private CompilationTestHelper compilationHelper;
@Before
public void setUp() {
compilationHelper = CompilationTestHelper.newInstance(NamedToWithMessage.class, getClass());
}
@Test
public void testPositiveCase() {
compilationHelper.addSourceFile("NamedToWithMessagePositiveCases.java").doTest();
}
@Test
public void testNegativeCase() {
compilationHelper.addSourceFile("NamedToWithMessageNegativeCases.java").doTest();
}
}
|
NamedToWithMessageTest
|
java
|
apache__flink
|
flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/serialize/PbCodegenSimpleSerializer.java
|
{
"start": 4738,
"end": 4958
}
|
enum
____.appendLine(resultVar + " = " + enumTypeStr + ".values()[0]");
appender.end("}");
appender.begin("else{");
// choose the exact
|
appender
|
java
|
elastic__elasticsearch
|
libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java
|
{
"start": 2447,
"end": 3378
}
|
interface ____ {
void set_flags(int flags); /* IN: flags word */
void set_posmode(int posmode); /* IN: indicates offset field */
void set_offset(long offset); /* IN: start of the region */
void set_length(long length); /* IN: size of the region */
long bytesalloc(); /* OUT: number of bytes allocated */
}
FStore newFStore();
int fcntl(int fd, int cmd, FStore fst);
/**
* Open a file descriptor to connect to a socket.
*
* @param domain The socket protocol family, eg AF_UNIX
* @param type The socket type, eg SOCK_DGRAM
* @param protocol The protocol for the given protocl family, normally 0
* @return an open file descriptor, or -1 on failure with errno set
* @see <a href="https://man7.org/linux/man-pages/man2/socket.2.html">socket manpage</a>
*/
int socket(int domain, int type, int protocol);
/**
* Marker
|
FStore
|
java
|
spring-projects__spring-framework
|
spring-websocket/src/main/java/org/springframework/web/socket/messaging/AbstractSubProtocolEvent.java
|
{
"start": 907,
"end": 1134
}
|
class ____ events for a message received from a WebSocket client and
* parsed into a higher-level sub-protocol (for example, STOMP).
*
* @author Rossen Stoyanchev
* @since 4.1
*/
@SuppressWarnings("serial")
public abstract
|
for
|
java
|
elastic__elasticsearch
|
x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringInfoTransportAction.java
|
{
"start": 654,
"end": 1184
}
|
class ____ extends XPackInfoFeatureTransportAction {
@Inject
public MonitoringInfoTransportAction(TransportService transportService, ActionFilters actionFilters) {
super(XPackInfoFeatureAction.MONITORING.name(), transportService, actionFilters);
}
@Override
public String name() {
return XPackField.MONITORING;
}
@Override
public boolean available() {
return true;
}
@Override
public boolean enabled() {
return true;
}
}
|
MonitoringInfoTransportAction
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/Csvout.java
|
{
"start": 1172,
"end": 2684
}
|
class ____ implements Closeable {
private final Writer out;
private final String separator;
private final String eol;
private boolean isStartOfLine = true;
/**
* Instantiate.
* @param out output stream.
* @param separator field separator.
* @param eol end of line sequence
*/
public Csvout(final Writer out,
final String separator,
final String eol) {
this.out = out;
this.separator = separator;
this.eol = eol;
}
/**
* Close the output stream.
* @throws IOException IO failure.
*/
@Override
public void close() throws IOException {
out.close();
}
/**
* Write a single object's string value.
* @param o object to write.
* @return this instance
* @throws IOException IO failure.
*/
public Csvout write(Object o) throws IOException {
if (isStartOfLine) {
isStartOfLine = false;
} else {
out.write(separator);
}
out.write(o.toString());
return this;
}
/**
* Write a newline.
* @return this instance
* @throws IOException IO failure.
*/
public Csvout newline() throws IOException {
out.write(eol);
isStartOfLine = true;
return this;
}
/**
* Write a collection of objects.
* @param objects varags list of objects to write
* @return this instance.
* @throws IOException IO failure.
*/
public Csvout write(Object... objects) throws IOException {
for (Object object : objects) {
write(object);
}
return this;
}
}
|
Csvout
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/bootstrap/binding/annotations/access/jpa/Horse.java
|
{
"start": 256,
"end": 416
}
|
class ____ extends Animal {
private String name;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
|
Horse
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/api/RPermitExpirableSemaphoreAsync.java
|
{
"start": 718,
"end": 1112
}
|
interface ____ Semaphore object with lease time parameter support for each acquired permit.
*
* <p>Each permit identified by own id and could be released only using its id.
* Permit id is a 128-bits unique random identifier generated each time during acquiring.
*
* <p>Works in non-fair mode. Therefore order of acquiring is unpredictable.
*
* @author Nikita Koksharov
*
*/
public
|
for
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/GeneratedLines.java
|
{
"start": 831,
"end": 2409
}
|
class ____ {
private static final String DAGGER_GENERATED_ANNOTATION = "@DaggerGenerated";
private static final String GENERATED_ANNOTATION =
"@Generated("
+ "value = \"dagger.internal.codegen.ComponentProcessor\", "
+ "comments = \"https://dagger.dev\")";
private static final String SUPPRESS_WARNINGS_ANNOTATION =
"@SuppressWarnings({"
+ "\"unchecked\", \"rawtypes\", \"KotlinInternal\", \"KotlinInternalInJava\", \"cast\", "
+ "\"deprecation\","
+ "\"nullness:initialization.field.uninitialized\""
+ "})";
private static final String IMPORT_DAGGER_GENERATED = "import dagger.internal.DaggerGenerated;";
private static final String IMPORT_GENERATED_ANNOTATION =
isBeforeJava9()
? "import javax.annotation.Generated;"
: "import javax.annotation.processing.Generated;";
/** Returns a {@code String} of sorted imports. Includes generated imports automatically. */
public static String generatedImports(String... extraImports) {
return ImmutableSet.<String>builder()
.add(IMPORT_DAGGER_GENERATED)
.add(IMPORT_GENERATED_ANNOTATION)
.add(extraImports)
.build()
.stream()
.sorted()
.collect(Collectors.joining("\n"));
}
/** Returns the annotations for a generated class. */
public static String generatedAnnotations() {
return Joiner.on('\n')
.join(DAGGER_GENERATED_ANNOTATION, GENERATED_ANNOTATION, SUPPRESS_WARNINGS_ANNOTATION);
}
/** Returns the annotations for a generated
|
GeneratedLines
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/CheckReturnValueTest.java
|
{
"start": 5169,
"end": 5213
}
|
class ____ extends MyObject {}
|
MySubObject1
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/NonFinalCompileTimeConstantTest.java
|
{
"start": 872,
"end": 1277
}
|
class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(NonFinalCompileTimeConstant.class, getClass());
@Test
public void positive() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import com.google.errorprone.annotations.CompileTimeConstant;
public
|
NonFinalCompileTimeConstantTest
|
java
|
micronaut-projects__micronaut-core
|
http/src/main/java/io/micronaut/http/multipart/FileUpload.java
|
{
"start": 916,
"end": 2200
}
|
interface ____ {
/**
* Gets the content type of this part.
*
* @return The content type of this part.
*/
Optional<MediaType> getContentType();
/**
* Gets the name of this part.
*
* @return The name of this part
*/
String getName();
/**
* Gets the name of this part.
*
* @return The name of this part
*/
String getFilename();
/**
* Returns the size of the part.
*
* @return The size of this part, in bytes.
*/
long getSize();
/**
* Returns the defined content length of the part.
*
* @return The content length of this part, in bytes.
*/
long getDefinedSize();
/**
* Returns whether the {@link FileUpload} has been fully uploaded or is in a partial state.
*
* @return True if the part is fully uploaded
*/
boolean isComplete();
/**
* Discards the contents of the file. This must be called
* if the file will not be read and has not already been read.
*
* Failure to either read or discard the file will result in
* memory leaks!
*
* @since 2.4.0
*/
default void discard() {
throw new UnsupportedOperationException("Discard not supported");
}
}
|
FileUpload
|
java
|
spring-projects__spring-framework
|
spring-web/src/main/java/org/springframework/web/context/support/ServletContextScope.java
|
{
"start": 1975,
"end": 3703
}
|
class ____ implements Scope, DisposableBean {
private final ServletContext servletContext;
private final Map<String, Runnable> destructionCallbacks = new LinkedHashMap<>();
/**
* Create a new Scope wrapper for the given ServletContext.
* @param servletContext the ServletContext to wrap
*/
public ServletContextScope(ServletContext servletContext) {
Assert.notNull(servletContext, "ServletContext must not be null");
this.servletContext = servletContext;
}
@Override
public Object get(String name, ObjectFactory<?> objectFactory) {
Object scopedObject = this.servletContext.getAttribute(name);
if (scopedObject == null) {
scopedObject = objectFactory.getObject();
this.servletContext.setAttribute(name, scopedObject);
}
return scopedObject;
}
@Override
public @Nullable Object remove(String name) {
Object scopedObject = this.servletContext.getAttribute(name);
if (scopedObject != null) {
synchronized (this.destructionCallbacks) {
this.destructionCallbacks.remove(name);
}
this.servletContext.removeAttribute(name);
return scopedObject;
}
else {
return null;
}
}
@Override
public void registerDestructionCallback(String name, Runnable callback) {
synchronized (this.destructionCallbacks) {
this.destructionCallbacks.put(name, callback);
}
}
/**
* Invoke all registered destruction callbacks.
* To be called on ServletContext shutdown.
* @see org.springframework.web.context.ContextCleanupListener
*/
@Override
public void destroy() {
synchronized (this.destructionCallbacks) {
for (Runnable runnable : this.destructionCallbacks.values()) {
runnable.run();
}
this.destructionCallbacks.clear();
}
}
}
|
ServletContextScope
|
java
|
spring-projects__spring-framework
|
spring-jdbc/src/main/java/org/springframework/jdbc/datasource/TransactionAwareDataSourceProxy.java
|
{
"start": 8022,
"end": 11063
}
|
interface ____ in...
switch (method.getName()) {
case "equals" -> {
// Only considered as equal when proxies are identical.
return (proxy == args[0]);
}
case "hashCode" -> {
// Use hashCode of Connection proxy.
return System.identityHashCode(proxy);
}
case "toString" -> {
// Allow for differentiating between the proxy and the raw Connection.
StringBuilder sb = new StringBuilder("Transaction-aware proxy for target Connection ");
if (this.target != null) {
sb.append('[').append(this.target).append(']');
}
else {
sb.append("from DataSource [").append(this.targetDataSource).append(']');
}
return sb.toString();
}
case "close" -> {
// Handle close method: only close if not within a transaction.
if (this.target != null) {
ConnectionHolder conHolder = (ConnectionHolder)
TransactionSynchronizationManager.getResource(this.targetDataSource);
if (conHolder != null && conHolder.hasConnection() && conHolder.getConnection() == this.target) {
// It's the transactional Connection: Don't close it.
conHolder.released();
}
else {
DataSourceUtils.doCloseConnection(this.target, this.targetDataSource);
}
}
this.closed = true;
return null;
}
case "isClosed" -> {
return this.closed;
}
case "unwrap" -> {
if (((Class<?>) args[0]).isInstance(proxy)) {
return proxy;
}
}
case "isWrapperFor" -> {
if (((Class<?>) args[0]).isInstance(proxy)) {
return true;
}
}
}
if (this.target == null) {
if (method.getName().equals("getWarnings") || method.getName().equals("clearWarnings")) {
// Avoid creation of target Connection on pre-close cleanup (for example, Hibernate Session)
return null;
}
if (this.closed) {
throw new SQLException("Connection handle already closed");
}
if (shouldObtainFixedConnection(this.targetDataSource)) {
this.target = DataSourceUtils.doGetConnection(this.targetDataSource);
}
}
Connection actualTarget = this.target;
if (actualTarget == null) {
actualTarget = DataSourceUtils.doGetConnection(this.targetDataSource);
}
if (method.getName().equals("getTargetConnection")) {
// Handle getTargetConnection method: return underlying Connection.
return actualTarget;
}
// Invoke method on target Connection.
try {
Object retVal = method.invoke(actualTarget, args);
// If return value is a Statement, apply transaction timeout.
// Applies to createStatement, prepareStatement, prepareCall.
if (retVal instanceof Statement statement) {
DataSourceUtils.applyTransactionTimeout(statement, this.targetDataSource);
}
return retVal;
}
catch (InvocationTargetException ex) {
throw ex.getTargetException();
}
finally {
if (actualTarget != this.target) {
DataSourceUtils.doReleaseConnection(actualTarget, this.targetDataSource);
}
}
}
}
}
|
coming
|
java
|
redisson__redisson
|
redisson-spring-data/redisson-spring-data-16/src/main/java/org/redisson/spring/data/connection/ScoredSortedListReplayDecoder.java
|
{
"start": 1126,
"end": 1771
}
|
class ____ implements MultiDecoder<List<Tuple>> {
@Override
public Decoder<Object> getDecoder(Codec codec, int paramNum, State state, long size) {
if (paramNum % 2 != 0) {
return DoubleCodec.INSTANCE.getValueDecoder();
}
return null;
}
@Override
public List<Tuple> decode(List<Object> parts, State state) {
List<Tuple> result = new ArrayList<Tuple>();
for (int i = 0; i < parts.size(); i += 2) {
result.add(new DefaultTuple((byte[])parts.get(i), ((Number)parts.get(i+1)).doubleValue()));
}
return result;
}
}
|
ScoredSortedListReplayDecoder
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotListing.java
|
{
"start": 1497,
"end": 4013
}
|
class ____ {
static final long seed = 0;
static final short REPLICATION = 3;
static final long BLOCKSIZE = 1024;
private final Path dir = new Path("/test.snapshot/dir");
Configuration conf;
MiniDFSCluster cluster;
FSNamesystem fsn;
DistributedFileSystem hdfs;
@BeforeEach
public void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
hdfs.mkdirs(dir);
}
@AfterEach
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
/**
* Test listing snapshots under a snapshottable directory
*/
@Test
@Timeout(value = 15)
public void testListSnapshots() throws Exception {
final Path snapshotsPath = new Path(dir, ".snapshot");
FileStatus[] stats = null;
// special case: snapshots of root
stats = hdfs.listStatus(new Path("/.snapshot"));
// should be 0 since root's snapshot quota is 0
assertEquals(0, stats.length);
// list before set dir as snapshottable
try {
stats = hdfs.listStatus(snapshotsPath);
fail("expect SnapshotException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(
"Directory is not a snapshottable directory: " + dir.toString(), e);
}
// list before creating snapshots
hdfs.allowSnapshot(dir);
stats = hdfs.listStatus(snapshotsPath);
assertEquals(0, stats.length);
// list while creating snapshots
final int snapshotNum = 5;
for (int sNum = 0; sNum < snapshotNum; sNum++) {
hdfs.createSnapshot(dir, "s_" + sNum);
stats = hdfs.listStatus(snapshotsPath);
assertEquals(sNum + 1, stats.length);
for (int i = 0; i <= sNum; i++) {
assertEquals("s_" + i, stats[i].getPath().getName());
}
}
// list while deleting snapshots
for (int sNum = snapshotNum - 1; sNum > 0; sNum--) {
hdfs.deleteSnapshot(dir, "s_" + sNum);
stats = hdfs.listStatus(snapshotsPath);
assertEquals(sNum, stats.length);
for (int i = 0; i < sNum; i++) {
assertEquals("s_" + i, stats[i].getPath().getName());
}
}
// remove the last snapshot
hdfs.deleteSnapshot(dir, "s_0");
stats = hdfs.listStatus(snapshotsPath);
assertEquals(0, stats.length);
}
}
|
TestSnapshotListing
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletSpec.java
|
{
"start": 40193,
"end": 41474
}
|
interface ____ extends Attrs, _Child {
/** URI of image to embed
* @param uri the URI
* @return the current element builder
*/
IMG $src(String uri);
/** short description
* @param desc the description
* @return the current element builder
*/
IMG $alt(String desc);
// $longdesc omitted. use <a...><img..></a> instead
// $name omitted. use id instead.
/** override height
* @param pixels the height
* @return the current element builder
*/
IMG $height(int pixels);
/**
* override height
* @param cdata the height (can use %, * etc.)
* @return the current element builder
*/
IMG $height(String cdata);
/** override width
* @param pixels the width
* @return the current element builder
*/
IMG $width(int pixels);
/**
* override width
* @param cdata the width (can use %, * etc.)
* @return the current element builder
*/
IMG $width(String cdata);
/** use client-side image map
* @param uri the URI
* @return the current element builder
*/
IMG $usemap(String uri);
/** use server-side image map
* @return the current element builder
*/
IMG $ismap();
}
/**
*
*/
public
|
IMG
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRemoveIndexLifecyclePolicyAction.java
|
{
"start": 912,
"end": 1963
}
|
class ____ extends BaseRestHandler {
@Override
public List<Route> routes() {
return List.of(new Route(POST, "/{index}/_ilm/remove"));
}
@Override
public String getName() {
return "ilm_remove_policy_for_index_action";
}
@Override
protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) {
String[] indexes = Strings.splitStringByCommaToArray(restRequest.param("index"));
RemoveIndexLifecyclePolicyAction.Request changePolicyRequest = new RemoveIndexLifecyclePolicyAction.Request(
getMasterNodeTimeout(restRequest),
getAckTimeout(restRequest),
indexes
);
changePolicyRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, changePolicyRequest.indicesOptions()));
return channel -> client.execute(
RemoveIndexLifecyclePolicyAction.INSTANCE,
changePolicyRequest,
new RestToXContentListener<>(channel)
);
}
}
|
RestRemoveIndexLifecyclePolicyAction
|
java
|
quarkusio__quarkus
|
test-framework/junit5-mockito/src/main/java/io/quarkus/test/junit/mockito/MockitoConfig.java
|
{
"start": 556,
"end": 1266
}
|
interface ____ {
/**
* If true, then Quarkus will change the scope of the target {@code Singleton} bean to {@code ApplicationScoped}
* to make it mockable.
* <p>
* This is an advanced setting and should only be used if you don't rely on the differences between {@code Singleton}
* and {@code ApplicationScoped} beans (for example it is invalid to read fields of {@code ApplicationScoped} beans
* as a proxy stands in place of the actual implementation)
*/
boolean convertScopes() default false;
/**
* If true, the mock will be created with the {@link org.mockito.Mockito#RETURNS_DEEP_STUBS}
*/
boolean returnsDeepMocks() default false;
}
|
MockitoConfig
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-resource-server/src/main/java/org/springframework/security/oauth2/server/resource/authentication/JwtAuthenticationToken.java
|
{
"start": 3002,
"end": 4529
}
|
class ____<B extends Builder<B>> extends AbstractOAuth2TokenAuthenticationBuilder<Jwt, B> {
private String name;
protected Builder(JwtAuthenticationToken token) {
super(token);
this.name = token.getName();
}
/**
* A synonym for {@link #token(Jwt)}
* @return the {@link Builder} for further configurations
*/
@Override
public B principal(@Nullable Object principal) {
Assert.isInstanceOf(Jwt.class, principal, "principal must be of type Jwt");
return token((Jwt) principal);
}
/**
* A synonym for {@link #token(Jwt)}
* @return the {@link Builder} for further configurations
*/
@Override
public B credentials(@Nullable Object credentials) {
Assert.isInstanceOf(Jwt.class, credentials, "credentials must be of type Jwt");
return token((Jwt) credentials);
}
/**
* Use this {@code token} as the token, principal, and credentials. Also sets the
* {@code name} to {@link Jwt#getSubject}.
* @param token the token to use
* @return the {@link Builder} for further configurations
*/
@Override
public B token(Jwt token) {
super.principal(token);
super.credentials(token);
return super.token(token).name(token.getSubject());
}
/**
* The name to use.
* @param name the name to use
* @return the {@link Builder} for further configurations
*/
public B name(String name) {
this.name = name;
return (B) this;
}
@Override
public JwtAuthenticationToken build() {
return new JwtAuthenticationToken(this);
}
}
}
|
Builder
|
java
|
junit-team__junit5
|
platform-tests/src/test/java/org/junit/platform/commons/util/ReflectionUtilsTests.java
|
{
"start": 19928,
"end": 20230
}
|
class ____ {
public void publicMethod() {
}
public void method(String str, Integer num) {
}
public void method(String[] strings, Integer[] nums) {
}
public void method(boolean b, char c) {
}
public void method(char[] characters, int[] nums) {
}
}
private
|
PublicClass
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java
|
{
"start": 1821,
"end": 10722
}
|
class ____ implements QueryCache, Closeable {
private static final Logger logger = LogManager.getLogger(IndicesQueryCache.class);
public static final Setting<ByteSizeValue> INDICES_CACHE_QUERY_SIZE_SETTING = Setting.memorySizeSetting(
"indices.queries.cache.size",
"10%",
Property.NodeScope
);
// mostly a way to prevent queries from being the main source of memory usage
// of the cache
public static final Setting<Integer> INDICES_CACHE_QUERY_COUNT_SETTING = Setting.intSetting(
"indices.queries.cache.count",
10_000,
1,
Property.NodeScope
);
// enables caching on all segments instead of only the larger ones, for testing only
public static final Setting<Boolean> INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING = Setting.boolSetting(
"indices.queries.cache.all_segments",
false,
Property.NodeScope
);
private final LRUQueryCache cache;
private final ShardCoreKeyMap shardKeyMap = new ShardCoreKeyMap();
private final Map<ShardId, Stats> shardStats = new ConcurrentHashMap<>();
private volatile long sharedRamBytesUsed;
/**
* Calculates a map of {@link ShardId} to {@link Long} which contains the calculated share of the {@link IndicesQueryCache} shared ram
* size for a given shard (that is, the sum of all the longs is the size of the indices query cache). Since many shards will not
* participate in the cache, shards whose calculated share is zero will not be contained in the map at all. As a consequence, the
* correct pattern for using the returned map will be via {@link Map#getOrDefault(Object, Object)} with a {@code defaultValue} of
* {@code 0L}.
* @return an unmodifiable map from {@link ShardId} to the calculated share of the query cache's shared RAM size for each shard,
* omitting shards with a zero share
*/
public static Map<ShardId, Long> getSharedRamSizeForAllShards(IndicesService indicesService) {
Map<ShardId, Long> shardIdToSharedRam = new HashMap<>();
IndicesQueryCache.CacheTotals cacheTotals = IndicesQueryCache.getCacheTotalsForAllShards(indicesService);
for (IndexService indexService : indicesService) {
for (IndexShard indexShard : indexService) {
final var queryCache = indicesService.getIndicesQueryCache();
long sharedRam = (queryCache == null) ? 0L : queryCache.getSharedRamSizeForShard(indexShard.shardId(), cacheTotals);
// as a size optimization, only store non-zero values in the map
if (sharedRam > 0L) {
shardIdToSharedRam.put(indexShard.shardId(), sharedRam);
}
}
}
return Collections.unmodifiableMap(shardIdToSharedRam);
}
public long getCacheSizeForShard(ShardId shardId) {
Stats stats = shardStats.get(shardId);
return stats != null ? stats.cacheSize : 0L;
}
public long getSharedRamBytesUsed() {
return sharedRamBytesUsed;
}
// This is a hack for the fact that the close listener for the
// ShardCoreKeyMap will be called before onDocIdSetEviction
// See onDocIdSetEviction for more info
private final Map<Object, StatsAndCount> stats2 = Collections.synchronizedMap(new IdentityHashMap<>());
public IndicesQueryCache(Settings settings) {
final ByteSizeValue size = INDICES_CACHE_QUERY_SIZE_SETTING.get(settings);
final int count = INDICES_CACHE_QUERY_COUNT_SETTING.get(settings);
logger.debug("using [node] query cache with size [{}] max filter count [{}]", size, count);
if (INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.get(settings)) {
// Use the default skip_caching_factor (i.e., 10f) in Lucene
cache = new ElasticsearchLRUQueryCache(count, size.getBytes(), Predicates.always(), 10f);
} else {
cache = new ElasticsearchLRUQueryCache(count, size.getBytes());
}
sharedRamBytesUsed = 0;
}
private static QueryCacheStats toQueryCacheStatsSafe(@Nullable Stats stats) {
return stats == null ? new QueryCacheStats() : stats.toQueryCacheStats();
}
/**
* Computes the total cache size in bytes, and the total shard count in the cache for all shards.
* @param indicesService the IndicesService instance to retrieve cache information from
* @return A CacheTotals object containing the computed total number of items in the cache and the number of shards seen in the cache
*/
public static CacheTotals getCacheTotalsForAllShards(IndicesService indicesService) {
IndicesQueryCache queryCache = indicesService.getIndicesQueryCache();
boolean hasQueryCache = queryCache != null;
long totalItemsInCache = 0L;
int shardCount = 0;
for (final IndexService indexService : indicesService) {
for (final IndexShard indexShard : indexService) {
final var shardId = indexShard.shardId();
long cacheSize = hasQueryCache ? queryCache.getCacheSizeForShard(shardId) : 0L;
shardCount++;
assert cacheSize >= 0 : "Unexpected cache size of " + cacheSize + " for shard " + shardId;
totalItemsInCache += cacheSize;
}
}
return new CacheTotals(totalItemsInCache, shardCount);
}
/**
* This method computes the shared RAM size in bytes for the given indexShard.
* @param shardId The shard to compute the shared RAM size for.
* @param cacheTotals Shard totals computed in {@link #getCacheTotalsForAllShards(IndicesService)}.
* @return the shared RAM size in bytes allocated to the given shard, or 0 if unavailable
*/
public long getSharedRamSizeForShard(ShardId shardId, CacheTotals cacheTotals) {
long sharedRamBytesUsed = getSharedRamBytesUsed();
if (sharedRamBytesUsed == 0L) {
return 0L;
}
int shardCount = cacheTotals.shardCount();
if (shardCount == 0) {
// Sometimes it's not possible to do this when there are no shard entries at all, which can happen as the shared ram usage can
// extend beyond the closing of all shards.
return 0L;
}
/*
* We have some shared ram usage that we try to distribute proportionally to the number of segment-requests in the cache for each
* shard.
*/
long totalItemsInCache = cacheTotals.totalItemsInCache();
long itemsInCacheForShard = getCacheSizeForShard(shardId);
final long additionalRamBytesUsed;
if (totalItemsInCache == 0) {
// all shards have zero cache footprint, so we apportion the size of the shared bytes equally across all shards
additionalRamBytesUsed = Math.round((double) sharedRamBytesUsed / shardCount);
} else {
/*
* Some shards have nonzero cache footprint, so we apportion the size of the shared bytes proportionally to the number of
* segment-requests in the cache for this shard (the number and size of documents associated with those requests is irrelevant
* for this calculation).
* Note that this was a somewhat arbitrary decision. Calculating it by number of documents might have been better. Calculating
* it by number of documents weighted by size would also be good, but possibly more expensive. But the decision to attribute
* memory proportionally to the number of segment-requests was made a long time ago, and we're sticking with that here for the
* sake of consistency and backwards compatibility.
*/
additionalRamBytesUsed = Math.round((double) sharedRamBytesUsed * itemsInCacheForShard / totalItemsInCache);
}
assert additionalRamBytesUsed >= 0L : additionalRamBytesUsed;
return additionalRamBytesUsed;
}
public record CacheTotals(long totalItemsInCache, int shardCount) {}
/** Get usage statistics for the given shard. */
public QueryCacheStats getStats(ShardId shard, Supplier<Long> precomputedSharedRamBytesUsed) {
final QueryCacheStats queryCacheStats = toQueryCacheStatsSafe(shardStats.get(shard));
queryCacheStats.addRamBytesUsed(precomputedSharedRamBytesUsed.get());
return queryCacheStats;
}
@Override
public Weight doCache(Weight weight, QueryCachingPolicy policy) {
while (weight instanceof CachingWeightWrapper) {
weight = ((CachingWeightWrapper) weight).in;
}
final Weight in = cache.doCache(weight, policy);
// We wrap the weight to track the readers it sees and map them with
// the shards they belong to
return new CachingWeightWrapper(in);
}
private
|
IndicesQueryCache
|
java
|
spring-projects__spring-boot
|
module/spring-boot-web-server/src/test/java/org/springframework/boot/web/server/servlet/context/ServletComponentScanRegistrarTests.java
|
{
"start": 9261,
"end": 9368
}
|
class ____ {
}
@Configuration(proxyBeanMethods = false)
@ServletComponentScan
static
|
ValueAndBasePackages
|
java
|
apache__dubbo
|
dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/ReflectionPackableMethod.java
|
{
"start": 15418,
"end": 18107
}
|
class ____ implements Pack {
private final String serialize;
private final MultipleSerialization multipleSerialization;
private final String[] argumentsType;
private final Class<?>[] actualRequestTypes;
private final URL url;
private final boolean singleArgument;
private WrapRequestPack(
MultipleSerialization multipleSerialization,
URL url,
String serialize,
Class<?>[] actualRequestTypes,
boolean singleArgument) {
this.url = url;
this.serialize = convertHessianToWrapper(serialize);
this.multipleSerialization = multipleSerialization;
this.actualRequestTypes = actualRequestTypes;
this.argumentsType =
Stream.of(actualRequestTypes).map(Class::getName).toArray(String[]::new);
this.singleArgument = singleArgument;
}
@Override
public byte[] pack(Object obj) throws IOException {
Object[] arguments;
if (singleArgument) {
arguments = new Object[] {obj};
} else {
arguments = (Object[]) obj;
}
TripleCustomerProtocolWrapper.TripleRequestWrapper.Builder builder =
TripleCustomerProtocolWrapper.TripleRequestWrapper.Builder.newBuilder();
builder.setSerializeType(serialize);
for (String type : argumentsType) {
builder.addArgTypes(type);
}
if (actualRequestTypes == null || actualRequestTypes.length == 0) {
return builder.build().toByteArray();
}
ByteArrayOutputStream bos = new ByteArrayOutputStream();
for (int i = 0; i < arguments.length; i++) {
Object argument = arguments[i];
multipleSerialization.serialize(url, serialize, actualRequestTypes[i], argument, bos);
builder.addArgs(bos.toByteArray());
bos.reset();
}
return builder.build().toByteArray();
}
/**
* Convert hessian version from Dubbo's SPI version(hessian2) to wrapper API version
* (hessian4)
*
* @param serializeType literal type
* @return hessian4 if the param is hessian2, otherwise return the param
*/
private String convertHessianToWrapper(String serializeType) {
if (TripleConstants.HESSIAN2.equals(serializeType)) {
return TripleConstants.HESSIAN4;
}
return serializeType;
}
}
private
|
WrapRequestPack
|
java
|
apache__flink
|
flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/state/rocksdb/restore/RocksDBFullRestoreOperation.java
|
{
"start": 2384,
"end": 7877
}
|
class ____<K> implements RocksDBRestoreOperation {
private final FullSnapshotRestoreOperation<K> savepointRestoreOperation;
/** Write batch size used in {@link RocksDBWriteBatchWrapper}. */
private final long writeBatchSize;
private final RocksDBHandle rocksHandle;
private final ICloseableRegistry cancelStreamRegistryForRestore;
public RocksDBFullRestoreOperation(
KeyGroupRange keyGroupRange,
ClassLoader userCodeClassLoader,
Map<String, RocksDbKvStateInfo> kvStateInformation,
StateSerializerProvider<K> keySerializerProvider,
File instanceRocksDBPath,
DBOptions dbOptions,
Function<String, ColumnFamilyOptions> columnFamilyOptionsFactory,
RocksDBNativeMetricOptions nativeMetricOptions,
MetricGroup metricGroup,
@Nonnull Collection<KeyedStateHandle> restoreStateHandles,
@Nonnull RocksDbTtlCompactFiltersManager ttlCompactFiltersManager,
@Nonnegative long writeBatchSize,
Long writeBufferManagerCapacity,
ICloseableRegistry cancelStreamRegistryForRestore) {
this.writeBatchSize = writeBatchSize;
this.cancelStreamRegistryForRestore = cancelStreamRegistryForRestore;
this.rocksHandle =
new RocksDBHandle(
kvStateInformation,
instanceRocksDBPath,
dbOptions,
columnFamilyOptionsFactory,
nativeMetricOptions,
metricGroup,
ttlCompactFiltersManager,
writeBufferManagerCapacity);
this.savepointRestoreOperation =
new FullSnapshotRestoreOperation<>(
keyGroupRange,
userCodeClassLoader,
restoreStateHandles,
keySerializerProvider);
}
/** Restores all key-groups data that is referenced by the passed state handles. */
@Override
public RocksDBRestoreResult restore()
throws IOException, StateMigrationException, RocksDBException {
rocksHandle.openDB();
try (ThrowingIterator<SavepointRestoreResult> restore =
savepointRestoreOperation.restore()) {
while (restore.hasNext()) {
applyRestoreResult(restore.next());
}
}
return new RocksDBRestoreResult(
this.rocksHandle.getDb(),
this.rocksHandle.getDefaultColumnFamilyHandle(),
this.rocksHandle.getNativeMetricMonitor(),
-1,
null,
null,
null);
}
private void applyRestoreResult(SavepointRestoreResult savepointRestoreResult)
throws IOException, RocksDBException, StateMigrationException {
List<StateMetaInfoSnapshot> restoredMetaInfos =
savepointRestoreResult.getStateMetaInfoSnapshots();
Map<Integer, ColumnFamilyHandle> columnFamilyHandles = new HashMap<>();
for (int i = 0; i < restoredMetaInfos.size(); i++) {
StateMetaInfoSnapshot restoredMetaInfo = restoredMetaInfos.get(i);
RocksDbKvStateInfo registeredStateCFHandle =
this.rocksHandle.getOrRegisterStateColumnFamilyHandle(
null, restoredMetaInfo, cancelStreamRegistryForRestore);
columnFamilyHandles.put(i, registeredStateCFHandle.columnFamilyHandle);
}
try (ThrowingIterator<KeyGroup> keyGroups = savepointRestoreResult.getRestoredKeyGroups()) {
restoreKVStateData(keyGroups, columnFamilyHandles);
}
}
/**
* Restore the KV-state / ColumnFamily data for all key-groups referenced by the current state
* handle.
*/
private void restoreKVStateData(
ThrowingIterator<KeyGroup> keyGroups, Map<Integer, ColumnFamilyHandle> columnFamilies)
throws IOException, RocksDBException, StateMigrationException {
// for all key-groups in the current state handle...
try (RocksDBWriteBatchWrapper writeBatchWrapper =
new RocksDBWriteBatchWrapper(this.rocksHandle.getDb(), writeBatchSize);
Closeable ignored =
cancelStreamRegistryForRestore.registerCloseableTemporarily(
writeBatchWrapper.getCancelCloseable())) {
ColumnFamilyHandle handle = null;
while (keyGroups.hasNext()) {
KeyGroup keyGroup = keyGroups.next();
try (ThrowingIterator<KeyGroupEntry> groupEntries = keyGroup.getKeyGroupEntries()) {
int oldKvStateId = -1;
while (groupEntries.hasNext()) {
KeyGroupEntry groupEntry = groupEntries.next();
int kvStateId = groupEntry.getKvStateId();
if (kvStateId != oldKvStateId) {
oldKvStateId = kvStateId;
handle = columnFamilies.get(kvStateId);
}
writeBatchWrapper.put(handle, groupEntry.getKey(), groupEntry.getValue());
}
}
}
}
}
@Override
public void close() throws Exception {
this.rocksHandle.close();
}
}
|
RocksDBFullRestoreOperation
|
java
|
elastic__elasticsearch
|
x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/FetchSeparatorCliCommand.java
|
{
"start": 522,
"end": 1044
}
|
class ____ extends AbstractCliCommand {
public FetchSeparatorCliCommand() {
super(Pattern.compile("fetch(?: |_)separator *= *\"(.+)\"", Pattern.CASE_INSENSITIVE));
}
@Override
protected boolean doHandle(CliTerminal terminal, CliSession cliSession, Matcher m, String line) {
cliSession.cfg().setFetchSeparator(m.group(1));
terminal.line().text("fetch separator set to \"").em(cliSession.cfg().getFetchSeparator()).text("\"").end();
return true;
}
}
|
FetchSeparatorCliCommand
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/common/telemetry/internals/MetricKey.java
|
{
"start": 1053,
"end": 2805
}
|
class ____ implements MetricKeyable {
private final String name;
private final Map<String, String> tags;
/**
* Create a {@code MetricKey}
*
* @param name metric name. This should be the telemetry metric name of the metric (the final name
* under which this metric is emitted).
*/
public MetricKey(String name) {
this(name, null);
}
/**
* Create a {@code MetricKey}
*
* @param name metric name. This should be the .converted. name of the metric (the final name
* under which this metric is emitted).
* @param tags mapping of tag keys to values.
*/
public MetricKey(String name, Map<String, String> tags) {
this.name = Objects.requireNonNull(name);
this.tags = tags != null ? Collections.unmodifiableMap(tags) : Collections.emptyMap();
}
public MetricKey(MetricName metricName) {
this(metricName.name(), metricName.tags());
}
@Override
public MetricKey key() {
return this;
}
public String name() {
return name;
}
public Map<String, String> tags() {
return tags;
}
@Override
public int hashCode() {
return Objects.hash(name, tags);
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
MetricKey other = (MetricKey) obj;
return this.name().equals(other.name()) && this.tags().equals(other.tags());
}
@Override
public String toString() {
return "MetricKey {name=" + name() + ", tags=" + tags() + "}";
}
}
|
MetricKey
|
java
|
apache__camel
|
components/camel-aws/camel-aws2-ddb/src/main/java/org/apache/camel/component/aws2/ddb/transform/serialization/gson/JavaTimeInstantTypeAdapter.java
|
{
"start": 971,
"end": 1555
}
|
class ____ implements JsonSerializer<Instant>, JsonDeserializer<Instant> {
@Override
public JsonElement serialize(
final Instant time, final Type typeOfSrc,
final JsonSerializationContext context) {
return new JsonPrimitive(time.getEpochSecond() * 1000);
}
@Override
public Instant deserialize(
final JsonElement json, final Type typeOfT,
final JsonDeserializationContext context)
throws JsonParseException {
return Instant.ofEpochMilli(json.getAsLong());
}
}
|
JavaTimeInstantTypeAdapter
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/test/java/io/vertx/tests/dns/NameResolverTest.java
|
{
"start": 1929,
"end": 38699
}
|
class ____ extends VertxTestBase {
private FakeDNSServer dnsServer;
private InetSocketAddress dnsServerAddress;
@Override
public void setUp() throws Exception {
dnsServer = new FakeDNSServer().testResolveASameServer("127.0.0.1");
dnsServer.start();
dnsServerAddress = dnsServer.localAddress();
super.setUp();
}
@Override
protected void tearDown() throws Exception {
dnsServer.stop();
super.tearDown();
}
@Override
protected VertxOptions getOptions() {
return super.getOptions().setAddressResolverOptions(getAddressResolverOptions());
}
private Future<InetAddress> resolve(String host) {
return resolve(vertx, host);
}
private Future<InetAddress> resolve(Vertx vertx, String host) {
return ((VertxImpl) vertx).nameResolver().resolve(host);
}
private AddressResolverOptions getAddressResolverOptions() {
return new AddressResolverOptions()
.addServer(dnsServerAddress.getAddress().getHostAddress() + ":" + dnsServerAddress.getPort());
}
@Test
public void testAsyncResolve() throws Exception {
resolve("vertx.io").onComplete(onSuccess(resolved -> {
assertEquals("127.0.0.1", resolved.getHostAddress());
testComplete();
}));
await();
}
@Test
public void testAsyncResolveTruncated() {
dnsServer.store(question -> Collections.singleton(new FakeDNSServer.VertxResourceRecord("vertx.io", "127.0.0.1").setTruncated(true)));
resolve("vertx.io").onComplete(onSuccess(resolved -> {
assertEquals("127.0.0.1", resolved.getHostAddress());
testComplete();
}));
await();
}
@Test
public void testAsyncResolveFail() {
resolve("vertx.com").onComplete(onFailure(failure -> {
assertTrue("Was expecting " + failure + " to be an instanceof UnknownHostException", failure instanceof UnknownHostException);
testComplete();
}));
await();
}
@Test
public void testNet() throws Exception {
testNet("vertx.io");
}
private void testNet(String hostname) throws Exception {
NetClient client = vertx.createNetClient();
NetServer server = vertx.createNetServer().connectHandler(so -> {
so.handler(buff -> {
so.write(buff);
so.close();
});
});
try {
CountDownLatch listenLatch = new CountDownLatch(1);
server.listen(1234, hostname).onComplete(onSuccess(s -> {
listenLatch.countDown();
}));
awaitLatch(listenLatch);
client.connect(1234, hostname).onComplete(onSuccess(so -> {
Buffer buffer = Buffer.buffer();
so.handler(buffer::appendBuffer);
so.closeHandler(v -> {
assertEquals(Buffer.buffer("foo"), buffer);
testComplete();
});
so.write(Buffer.buffer("foo"));
}));
await();
} finally {
client.close();
server.close();
}
}
@Test
public void testHttp() throws Exception {
HttpClient client = vertx.createHttpClient();
HttpServer server = vertx.createHttpServer().requestHandler(req -> {
req.response().end("foo");
});
try {
CountDownLatch listenLatch = new CountDownLatch(1);
server.listen(HttpTestBase.DEFAULT_HTTP_PORT, "vertx.io").onComplete(onSuccess(s -> {
listenLatch.countDown();
}));
awaitLatch(listenLatch);
client.request(HttpMethod.GET, HttpTestBase.DEFAULT_HTTP_PORT, "vertx.io", "/somepath").onComplete(onSuccess(req -> {
req.send().onComplete(onSuccess(resp -> {
Buffer buffer = Buffer.buffer();
resp.handler(buffer::appendBuffer);
resp.endHandler(v -> {
assertEquals(Buffer.buffer("foo"), buffer);
testComplete();
});
}));
}));
await();
} finally {
client.close();
server.close();
}
}
@Test
public void testOptions() {
AddressResolverOptions options = new AddressResolverOptions();
assertEquals(AddressResolverOptions.DEFAULT_OPT_RESOURCE_ENABLED, options.isOptResourceEnabled());
assertEquals(AddressResolverOptions.DEFAULT_SERVERS, options.getServers());
assertEquals(AddressResolverOptions.DEFAULT_CACHE_MIN_TIME_TO_LIVE, options.getCacheMinTimeToLive());
assertEquals(AddressResolverOptions.DEFAULT_CACHE_MAX_TIME_TO_LIVE, options.getCacheMaxTimeToLive());
assertEquals(AddressResolverOptions.DEFAULT_CACHE_NEGATIVE_TIME_TO_LIVE, options.getCacheNegativeTimeToLive());
assertEquals(AddressResolverOptions.DEFAULT_QUERY_TIMEOUT, options.getQueryTimeout());
assertEquals(AddressResolverOptions.DEFAULT_MAX_QUERIES, options.getMaxQueries());
assertEquals(AddressResolverOptions.DEFAULT_RD_FLAG, options.getRdFlag());
assertEquals(AddressResolverOptions.DEFAULT_NDOTS, options.getNdots());
assertEquals(AddressResolverOptions.DEFAULT_SEARCH_DOMAINS, options.getSearchDomains());
boolean optResourceEnabled = TestUtils.randomBoolean();
List<String> servers = Arrays.asList("1.2.3.4", "5.6.7.8");
int minTTL = TestUtils.randomPositiveInt();
int maxTTL = minTTL + 1000;
int negativeTTL = TestUtils.randomPositiveInt();
int queryTimeout = 1 + TestUtils.randomPositiveInt();
int maxQueries = 1 + TestUtils.randomPositiveInt();
boolean rdFlag = TestUtils.randomBoolean();
int ndots = TestUtils.randomPositiveInt() - 2;
List<String> searchDomains = new ArrayList<>();
for (int i = 0; i < 2; i++) {
searchDomains.add(TestUtils.randomAlphaString(15));
}
assertSame(options, options.setOptResourceEnabled(optResourceEnabled));
assertSame(options, options.setServers(new ArrayList<>(servers)));
assertSame(options, options.setCacheMinTimeToLive(0));
assertSame(options, options.setCacheMinTimeToLive(minTTL));
try {
options.setCacheMinTimeToLive(-1);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
assertSame(options, options.setCacheMaxTimeToLive(0));
assertSame(options, options.setCacheMaxTimeToLive(maxTTL));
try {
options.setCacheMaxTimeToLive(-1);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
assertSame(options, options.setCacheNegativeTimeToLive(0));
assertSame(options, options.setCacheNegativeTimeToLive(negativeTTL));
try {
options.setCacheNegativeTimeToLive(-1);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
assertSame(options, options.setQueryTimeout(queryTimeout));
try {
options.setQueryTimeout(0);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
assertSame(options, options.setMaxQueries(maxQueries));
try {
options.setMaxQueries(0);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
assertSame(options, options.setRdFlag(rdFlag));
assertSame(options, options.setSearchDomains(searchDomains));
assertSame(options, options.setNdots(ndots));
try {
options.setNdots(-2);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
assertEquals(optResourceEnabled, options.isOptResourceEnabled());
assertEquals(servers, options.getServers());
assertEquals(minTTL, options.getCacheMinTimeToLive());
assertEquals(maxTTL, options.getCacheMaxTimeToLive());
assertEquals(negativeTTL, options.getCacheNegativeTimeToLive());
assertEquals(queryTimeout, options.getQueryTimeout());
assertEquals(maxQueries, options.getMaxQueries());
assertEquals(rdFlag, options.getRdFlag());
assertEquals(ndots, options.getNdots());
assertEquals(searchDomains, options.getSearchDomains());
// Test copy and json copy
AddressResolverOptions copy = new AddressResolverOptions(options);
AddressResolverOptions jsonCopy = new AddressResolverOptions(options.toJson());
options.setOptResourceEnabled(AddressResolverOptions.DEFAULT_OPT_RESOURCE_ENABLED);
options.getServers().clear();
options.setCacheMinTimeToLive(AddressResolverOptions.DEFAULT_CACHE_MIN_TIME_TO_LIVE);
options.setCacheMaxTimeToLive(AddressResolverOptions.DEFAULT_CACHE_MAX_TIME_TO_LIVE);
options.setCacheNegativeTimeToLive(AddressResolverOptions.DEFAULT_CACHE_NEGATIVE_TIME_TO_LIVE);
options.setQueryTimeout(AddressResolverOptions.DEFAULT_QUERY_TIMEOUT);
options.setMaxQueries(AddressResolverOptions.DEFAULT_MAX_QUERIES);
options.setRdFlag(AddressResolverOptions.DEFAULT_RD_FLAG);
options.setNdots(AddressResolverOptions.DEFAULT_NDOTS);
options.setSearchDomains(AddressResolverOptions.DEFAULT_SEARCH_DOMAINS);
assertEquals(optResourceEnabled, copy.isOptResourceEnabled());
assertEquals(servers, copy.getServers());
assertEquals(minTTL, copy.getCacheMinTimeToLive());
assertEquals(maxTTL, copy.getCacheMaxTimeToLive());
assertEquals(negativeTTL, copy.getCacheNegativeTimeToLive());
assertEquals(queryTimeout, copy.getQueryTimeout());
assertEquals(maxQueries, copy.getMaxQueries());
assertEquals(rdFlag, copy.getRdFlag());
assertEquals(ndots, copy.getNdots());
assertEquals(searchDomains, copy.getSearchDomains());
assertEquals(optResourceEnabled, jsonCopy.isOptResourceEnabled());
assertEquals(servers, jsonCopy.getServers());
assertEquals(minTTL, jsonCopy.getCacheMinTimeToLive());
assertEquals(maxTTL, jsonCopy.getCacheMaxTimeToLive());
assertEquals(negativeTTL, jsonCopy.getCacheNegativeTimeToLive());
assertEquals(queryTimeout, jsonCopy.getQueryTimeout());
assertEquals(maxQueries, jsonCopy.getMaxQueries());
assertEquals(rdFlag, jsonCopy.getRdFlag());
assertEquals(ndots, jsonCopy.getNdots());
assertEquals(searchDomains, jsonCopy.getSearchDomains());
}
@Test
public void testDefaultJsonOptions() {
AddressResolverOptions options = new AddressResolverOptions(new JsonObject());
assertEquals(AddressResolverOptions.DEFAULT_OPT_RESOURCE_ENABLED, options.isOptResourceEnabled());
assertEquals(AddressResolverOptions.DEFAULT_SERVERS, options.getServers());
assertEquals(AddressResolverOptions.DEFAULT_CACHE_MIN_TIME_TO_LIVE, options.getCacheMinTimeToLive());
assertEquals(AddressResolverOptions.DEFAULT_CACHE_MAX_TIME_TO_LIVE, options.getCacheMaxTimeToLive());
assertEquals(AddressResolverOptions.DEFAULT_CACHE_NEGATIVE_TIME_TO_LIVE, options.getCacheNegativeTimeToLive());
assertEquals(AddressResolverOptions.DEFAULT_QUERY_TIMEOUT, options.getQueryTimeout());
assertEquals(AddressResolverOptions.DEFAULT_MAX_QUERIES, options.getMaxQueries());
assertEquals(AddressResolverOptions.DEFAULT_RD_FLAG, options.getRdFlag());
assertEquals(AddressResolverOptions.DEFAULT_SEARCH_DOMAINS, options.getSearchDomains());
assertEquals(AddressResolverOptions.DEFAULT_NDOTS, options.getNdots());
}
@Test
public void testAsyncResolveConnectIsNotifiedOnChannelEventLoop() throws Exception {
CountDownLatch listenLatch = new CountDownLatch(1);
NetServer server = vertx.createNetServer().connectHandler(so -> {
});
try {
server.listen(1234, "localhost").onComplete(onSuccess(v -> listenLatch.countDown()));
awaitLatch(listenLatch);
AtomicReference<Thread> channelThread = new AtomicReference<>();
CountDownLatch connectLatch = new CountDownLatch(1);
Bootstrap bootstrap = new Bootstrap();
bootstrap.channelFactory(((VertxInternal)vertx).transport().channelFactory(false));
bootstrap.group(((VertxInternal)vertx).nettyEventLoopGroup());
bootstrap.resolver(((VertxInternal) vertx).nameResolver().nettyAddressResolverGroup());
bootstrap.handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
channelThread.set(Thread.currentThread());
connectLatch.countDown();
}
});
ChannelFuture channelFut = bootstrap.connect("localhost", 1234);
awaitLatch(connectLatch);
channelFut.addListener(v -> {
assertTrue(v.isSuccess());
assertEquals(channelThread.get(), Thread.currentThread());
testComplete();
});
await();
} finally {
server.close();
}
}
@Test
public void testInvalidHostsConfig() {
try {
AddressResolverOptions options = new AddressResolverOptions().setHostsPath("whatever.txt");
vertx(new VertxOptions().setAddressResolverOptions(options));
fail();
} catch (VertxException ignore) {
}
}
@Test
public void testResolveFromClasspath() {
VertxInternal vertx = (VertxInternal) vertx(new VertxOptions().setAddressResolverOptions(new AddressResolverOptions().setHostsPath("hosts_config.txt")));
resolve(vertx, "server.net").onComplete(onSuccess(addr -> {
assertEquals("192.168.0.15", addr.getHostAddress());
assertEquals("server.net", addr.getHostName());
testComplete();
}));
await();
}
@Test
public void testResolveFromFile() {
File f = new File(new File(new File(new File("src"), "test"), "resources"), "hosts_config.txt");
VertxInternal vertx = (VertxInternal) vertx(new VertxOptions().setAddressResolverOptions(new AddressResolverOptions().setHostsPath(f.getAbsolutePath())));
resolve(vertx, "server.net").onComplete(onSuccess(addr -> {
assertEquals("192.168.0.15", addr.getHostAddress());
assertEquals("server.net", addr.getHostName());
testComplete();
}));
await();
}
@Test
public void testResolveFromBuffer() {
VertxInternal vertx = (VertxInternal) vertx(new VertxOptions().setAddressResolverOptions(new AddressResolverOptions().setHostsValue(Buffer.buffer("192.168.0.15 server.net"))));
resolve(vertx, "server.net").onComplete(onSuccess(addr -> {
assertEquals("192.168.0.15", addr.getHostAddress());
assertEquals("server.net", addr.getHostName());
testComplete();
}));
await();
}
@Test
public void testCaseInsensitiveResolveFromHosts() {
VertxInternal vertx = (VertxInternal) vertx(new VertxOptions().setAddressResolverOptions(new AddressResolverOptions().setHostsPath("hosts_config.txt")));
resolve(vertx, "SERVER.NET").onComplete(onSuccess(addr -> {
assertEquals("192.168.0.15", addr.getHostAddress());
assertEquals("server.net", addr.getHostName());
testComplete();
}));
await();
}
@Test
public void testTrailingDotResolveFromHosts() {
VertxInternal vertx = (VertxInternal) vertx(new VertxOptions().setAddressResolverOptions(new AddressResolverOptions().setHostsPath("hosts_config.txt")));
resolve(vertx,"server.net.").onComplete(onSuccess(addr -> {
assertEquals("192.168.0.15", addr.getHostAddress());
assertEquals("server.net", addr.getHostName());
testComplete();
}));
await();
}
@Test
public void testRefreshHosts1() throws Exception {
Assume.assumeFalse(Utils.isWindows());
InetAddress addr = testRefreshHosts((int) TimeUnit.SECONDS.toNanos(1), null);
assertEquals("192.168.0.16", addr.getHostAddress());
assertEquals("server.net", addr.getHostName());
}
@Test
public void testRefreshHosts2() throws Exception {
InetAddress addr = testRefreshHosts(0, null);
assertEquals("192.168.0.15", addr.getHostAddress());
assertEquals("server.net", addr.getHostName());
}
@Test
public void testRefreshHosts3() throws Exception {
InetAddress addr = testRefreshHosts(100, TimeUnit.MILLISECONDS);
assertEquals("192.168.0.16", addr.getHostAddress());
assertEquals("server.net", addr.getHostName());
}
@Test
public void testRefreshHosts4() throws Exception {
InetAddress addr = testRefreshHosts(2, TimeUnit.SECONDS);
assertEquals("192.168.0.15", addr.getHostAddress());
assertEquals("server.net", addr.getHostName());
}
private InetAddress testRefreshHosts(int period, TimeUnit timeUnit) throws Exception {
File hosts = File.createTempFile("vertx", "hosts");
hosts.deleteOnExit();
Files.writeString(hosts.toPath(), "192.168.0.15 server.net");
AddressResolverOptions options = new AddressResolverOptions()
.setHostsPath(hosts.getAbsolutePath())
.setHostsRefreshPeriod(period);
if (timeUnit != null) {
options.setHostsRefreshPeriodUnit(timeUnit);
}
VertxInternal vertx = (VertxInternal) vertx(new VertxOptions().setAddressResolverOptions(options));
InetAddress addr = awaitFuture(resolve(vertx, "server.net"));
assertEquals("192.168.0.15", addr.getHostAddress());
assertEquals("server.net", addr.getHostName());
Files.writeString(hosts.toPath(), "192.168.0.16 server.net");
Thread.sleep(1000);
return awaitFuture(resolve(vertx, "server.net"));
}
@Test
public void testResolveMissingLocalhost() throws Exception {
InetAddress localhost = InetAddress.getByName("localhost");
// Set a dns resolver that won't resolve localhost
dnsServer.testResolveASameServer("127.0.0.1");
// Test using the resolver API
VertxInternal vertx = (VertxInternal) vertx(new VertxOptions().setAddressResolverOptions(
new AddressResolverOptions().
addServer(dnsServerAddress.getAddress().getHostAddress() + ":" + dnsServerAddress.getPort()).
setOptResourceEnabled(false)
));
CompletableFuture<Void> test1 = new CompletableFuture<>();
resolve("localhost").onComplete(ar -> {
if (ar.succeeded()) {
InetAddress resolved = ar.result();
if (resolved.equals(localhost)) {
test1.complete(null);
} else {
test1.completeExceptionally(new AssertionError("Unexpected localhost value " + resolved));
}
} else {
test1.completeExceptionally(ar.cause());
}
});
test1.get(10, TimeUnit.SECONDS);
CompletableFuture<Void> test2 = new CompletableFuture<>();
resolve(vertx, "LOCALHOST").onComplete(ar -> {
if (ar.succeeded()) {
InetAddress resolved = ar.result();
if (resolved.equals(localhost)) {
test2.complete(null);
} else {
test2.completeExceptionally(new AssertionError("Unexpected localhost value " + resolved));
}
} else {
test2.completeExceptionally(ar.cause());
}
});
test2.get(10, TimeUnit.SECONDS);
// Test using bootstrap
CompletableFuture<Void> test3 = new CompletableFuture<>();
NetServer server = vertx.createNetServer(new NetServerOptions().setPort(1234).setHost(localhost.getHostAddress()));
try {
server.connectHandler(so -> {
so.end(Buffer.buffer("hello"));
});
server.listen().onComplete(ar -> {
if (ar.succeeded()) {
test3.complete(null);
} else {
test3.completeExceptionally(ar.cause());
}
});
test3.get(10, TimeUnit.SECONDS);
CompletableFuture<Void> test4 = new CompletableFuture<>();
NetClient client = vertx.createNetClient();
client.connect(1234, "localhost").onComplete(ar -> {
if (ar.succeeded()) {
test4.complete(null);
} else {
test4.completeExceptionally(ar.cause());
}
});
test4.get(10, TimeUnit.SECONDS);
} finally {
server.close();
}
}
@Test
public void testSearchDomain() throws Exception {
String addr_host1_foo_com = "127.0.0.1";
String addr_host1 = "127.0.0.2";
String addr_host3 = "127.0.0.3";
String addr_host4_sub_foo_com = "127.0.0.4";
String addr_host5_sub_foo_com = "127.0.0.5";
String addr_host5_sub = "127.0.0.6";
String addr_host6_sub_sub_foo_com = "127.0.0.7";
String addr_host7_sub_sub_foo_com = "127.0.0.8";
String addr_host7_sub_sub = "127.0.0.9";
Map<String, String> records = new HashMap<>();
records.put("host1.foo.com", addr_host1_foo_com);
records.put("host1", addr_host1);
records.put("host3", addr_host3);
records.put("host4.sub.foo.com", addr_host4_sub_foo_com);
records.put("host5.sub.foo.com", addr_host5_sub_foo_com);
records.put("host5.sub", addr_host5_sub);
records.put("host6.sub.sub.foo.com", addr_host6_sub_sub_foo_com);
records.put("host7.sub.sub.foo.com", addr_host7_sub_sub_foo_com);
records.put("host7.sub.sub", addr_host7_sub_sub);
dnsServer.testResolveA(records::get);
VertxInternal vertx = (VertxInternal) vertx(new VertxOptions().setAddressResolverOptions(
new AddressResolverOptions().
addServer(dnsServerAddress.getAddress().getHostAddress() + ":" + dnsServerAddress.getPort()).
setOptResourceEnabled(false).
setNdots(2).
addSearchDomain("foo.com")
));
// host1 resolves host1.foo.com with foo.com search domain
CountDownLatch latch1 = new CountDownLatch(1);
resolve(vertx, "host1").onComplete(onSuccess(resolved -> {
assertEquals(addr_host1_foo_com, resolved.getHostAddress());
latch1.countDown();
}));
awaitLatch(latch1);
// "host1." absolute query
CountDownLatch latch2 = new CountDownLatch(1);
resolve(vertx, "host1.").onComplete(onSuccess(resolved -> {
assertEquals(addr_host1, resolved.getHostAddress());
latch2.countDown();
}));
awaitLatch(latch2);
// "host2" not resolved
CountDownLatch latch3 = new CountDownLatch(1);
resolve(vertx, "host2").onComplete(onFailure(cause -> {
assertTrue(cause instanceof UnknownHostException);
latch3.countDown();
}));
awaitLatch(latch3);
// "host3" resolves to addr_host3 as fallback
CountDownLatch latch4 = new CountDownLatch(1);
resolve(vertx, "host3").onComplete(onSuccess(cause -> {
assertEquals(addr_host3, cause.getHostAddress());
latch4.countDown();
}));
awaitLatch(latch4);
// "host3." does not contain a dot but is absolute
CountDownLatch latch5 = new CountDownLatch(1);
resolve(vertx, "host3.").onComplete(onSuccess(resolved -> {
assertEquals(addr_host3, resolved.getHostAddress());
latch5.countDown();
}));
awaitLatch(latch5);
// "host4.sub" contains a dot but not resolved then resolved to "host4.sub.foo.com" with "foo.com" search domain
CountDownLatch latch6 = new CountDownLatch(1);
resolve(vertx, "host4.sub").onComplete(onSuccess(resolved -> {
assertEquals(addr_host4_sub_foo_com, resolved.getHostAddress());
latch6.countDown();
}));
awaitLatch(latch6);
// "host5.sub" contains one dots and is resolved via search domain to "host5.sub.foo.com" and not to "host5.sub"
CountDownLatch latch7 = new CountDownLatch(1);
resolve(vertx, "host5.sub").onComplete(onSuccess(resolved -> {
assertEquals(addr_host5_sub_foo_com, resolved.getHostAddress());
latch7.countDown();
}));
awaitLatch(latch7);
// "host6.sub.sub" contains two dots and is resolved to "host6.sub.sub.foo.com" as fallback
CountDownLatch latch8 = new CountDownLatch(1);
resolve(vertx, "host6.sub.sub").onComplete(onSuccess(resolved -> {
assertEquals(addr_host6_sub_sub_foo_com, resolved.getHostAddress());
latch8.countDown();
}));
awaitLatch(latch8);
// "host6.sub.sub" contains two dots and is resolved to "host6.sub.sub" and not to "host6.sub.sub.foo.com"
CountDownLatch latch9 = new CountDownLatch(1);
resolve(vertx, "host7.sub.sub").onComplete(onSuccess(resolved -> {
assertEquals(addr_host7_sub_sub, resolved.getHostAddress());
latch9.countDown();
}));
awaitLatch(latch9);
}
@Test
public void testMultipleSearchDomain() throws Exception {
Map<String, String> records = new HashMap<>();
records.put("host1.foo.com", "127.0.0.1");
records.put("host2.bar.com", "127.0.0.2");
records.put("host3.bar.com", "127.0.0.3");
records.put("host3.foo.com", "127.0.0.4");
dnsServer.testResolveA(records);
VertxInternal vertx = (VertxInternal) vertx(new VertxOptions().setAddressResolverOptions(
new AddressResolverOptions().
setHostsValue(Buffer.buffer()).
setNdots(1).
addServer(dnsServerAddress.getAddress().getHostAddress() + ":" + dnsServerAddress.getPort()).
setOptResourceEnabled(false).
addSearchDomain("foo.com").
addSearchDomain("bar.com")
));
// "host1" resolves via the "foo.com" search path
CountDownLatch latch1 = new CountDownLatch(1);
resolve(vertx, "host1").onComplete(onSuccess(resolved -> {
assertEquals("127.0.0.1", resolved.getHostAddress());
latch1.countDown();
}));
awaitLatch(latch1);
// "host2" resolves via the "bar.com" search path
CountDownLatch latch2 = new CountDownLatch(1);
resolve(vertx, "host2").onComplete(onSuccess(resolved -> {
assertEquals("127.0.0.2", resolved.getHostAddress());
latch2.countDown();
}));
awaitLatch(latch2);
// "host3" resolves via the "foo.com" search path as it is the first one
CountDownLatch latch3 = new CountDownLatch(1);
resolve(vertx, "host3").onComplete(onSuccess(resolved -> {
assertEquals("127.0.0.4", resolved.getHostAddress());
latch3.countDown();
}));
awaitLatch(latch3);
// "host4" does not resolve
resolve(vertx, "host4").onComplete(onFailure(cause -> {
assertTrue(cause instanceof UnknownHostException);
testComplete();
}));
await();
}
@Test
public void testSearchDomainWithNdots2() throws Exception {
Map<String, String> records = new HashMap<>();
records.put("host1.sub.foo.com", "127.0.0.1");
records.put("host2.sub.foo.com", "127.0.0.2");
records.put("host2.sub", "127.0.0.3");
dnsServer.testResolveA(records);
VertxInternal vertx = (VertxInternal) vertx(new VertxOptions().setAddressResolverOptions(
new AddressResolverOptions().
addServer(dnsServerAddress.getAddress().getHostAddress() + ":" + dnsServerAddress.getPort()).
setOptResourceEnabled(false).
addSearchDomain("foo.com").
setNdots(2)
));
CountDownLatch latch1 = new CountDownLatch(1);
resolve(vertx, "host1.sub").onComplete(onSuccess(resolved -> {
assertEquals("127.0.0.1", resolved.getHostAddress());
latch1.countDown();
}));
awaitLatch(latch1);
// "host2.sub" is resolved with the foo.com search domain as ndots = 2
CountDownLatch latch2 = new CountDownLatch(1);
resolve(vertx, "host2.sub").onComplete(onSuccess(resolved -> {
assertEquals("127.0.0.2", resolved.getHostAddress());
latch2.countDown();
}));
awaitLatch(latch2);
}
@Test
public void testSearchDomainWithNdots0() throws Exception {
Map<String, String> records = new HashMap<>();
records.put("host1", "127.0.0.2");
records.put("host1.foo.com", "127.0.0.3");
dnsServer.testResolveA(records::get);
VertxInternal vertx = (VertxInternal) vertx(new VertxOptions().setAddressResolverOptions(
new AddressResolverOptions().
addServer(dnsServerAddress.getAddress().getHostAddress() + ":" + dnsServerAddress.getPort()).
setOptResourceEnabled(false).
addSearchDomain("foo.com").
setNdots(0)
));
// "host1" resolves directly as ndots = 0
CountDownLatch latch1 = new CountDownLatch(1);
resolve(vertx, "host1").onComplete(onSuccess(resolved -> {
assertEquals("127.0.0.2", resolved.getHostAddress());
latch1.countDown();
}));
awaitLatch(latch1);
// "host1.foo.com" resolves to host1.foo.com
CountDownLatch latch2 = new CountDownLatch(1);
resolve(vertx, "host1.foo.com").onComplete(onSuccess(resolved -> {
assertEquals("127.0.0.3", resolved.getHostAddress());
latch2.countDown();
}));
awaitLatch(latch2);
}
@Test
public void testNetSearchDomain() throws Exception {
Map<String, String> records = new HashMap<>();
records.put("host1.foo.com", "127.0.0.1");
dnsServer.testResolveA(records);
vertx.close();
vertx = vertx(new VertxOptions().setAddressResolverOptions(
new AddressResolverOptions().
setHostsValue(Buffer.buffer()).
setNdots(1).
addServer(dnsServerAddress.getAddress().getHostAddress() + ":" + dnsServerAddress.getPort()).
setOptResourceEnabled(false).
addSearchDomain("foo.com")
));
testNet("host1");
}
@Test
public void testParseResolvConf() {
assertEquals(-1, NameResolver.parseLinux("options").ndots());
assertEquals(4, NameResolver.parseLinux("options ndots: 4").ndots());
assertEquals(4, NameResolver.parseLinux("\noptions ndots: 4").ndots());
assertEquals(-1, NameResolver.parseLinux("boptions ndots: 4").ndots());
assertEquals(4, NameResolver.parseLinux(" options ndots: 4").ndots());
assertEquals(4, NameResolver.parseLinux("\toptions ndots: 4").ndots());
assertEquals(4, NameResolver.parseLinux("\foptions ndots: 4").ndots());
assertEquals(4, NameResolver.parseLinux("\n options ndots: 4").ndots());
assertEquals(4, NameResolver.parseLinux("options\tndots: 4").ndots());
assertEquals(4, NameResolver.parseLinux("options\fndots: 4").ndots());
assertEquals(4, NameResolver.parseLinux("options ndots: 4").ndots());
assertEquals(-1, NameResolver.parseLinux("options\nndots: 4").ndots());
assertEquals(4, NameResolver.parseLinux("options ndots:4").ndots());
assertEquals(4, NameResolver.parseLinux("options ndots:\t4").ndots());
assertEquals(4, NameResolver.parseLinux("options ndots: 4").ndots());
assertEquals(-1, NameResolver.parseLinux("options ndots:\n4").ndots());
assertEquals(4, NameResolver.parseLinux("options ndots:4 ").ndots());
assertEquals(4, NameResolver.parseLinux("options ndots:4\t").ndots());
assertEquals(4, NameResolver.parseLinux("options ndots:4\f").ndots());
assertEquals(4, NameResolver.parseLinux("options ndots:4\n").ndots());
assertEquals(4, NameResolver.parseLinux("options ndots:4\r").ndots());
assertEquals(-1, NameResolver.parseLinux("options ndots:4_").ndots());
assertEquals(2, NameResolver.parseLinux("options ndots:4\noptions ndots:2").ndots());
assertEquals(4, NameResolver.parseLinux("options ndots:4 debug").ndots());
assertEquals(4, NameResolver.parseLinux("options debug ndots:4").ndots());
assertEquals(false, NameResolver.parseLinux("options").isRotate());
assertEquals(true, NameResolver.parseLinux("options rotate").isRotate());
assertEquals(true, NameResolver.parseLinux("options rotate\n").isRotate());
assertEquals(false, NameResolver.parseLinux("options\nrotate").isRotate());
}
@Test
public void testResolveLocalhost() {
NameResolver resolver = new NameResolver(vertx, new AddressResolverOptions());
resolver.resolve("LOCALHOST").onComplete(res -> {
if (res.succeeded()) {
assertEquals("localhost", res.result().getHostName().toLowerCase(Locale.ENGLISH));
resolver.resolve("LocalHost").onComplete(res2 -> {
if (res2.succeeded()) {
assertEquals("localhost", res2.result().getHostName().toLowerCase(Locale.ENGLISH));
resolver.resolve("localhost").onComplete(res3 -> {
if (res3.succeeded()) {
assertEquals("localhost", res3.result().getHostName().toLowerCase(Locale.ENGLISH));
testComplete();
} else {
fail(res3.cause());
}
});
} else {
fail(res2.cause());
}
});
} else {
fail(res.cause());
}
});
await();
}
@Test
public void testResolveAll() {
List<String> expectedIPAddresses = Arrays.asList("127.0.0.2", "127.0.0.3");
dnsServer.addRecordsToStore( "fakeAddress.com", expectedIPAddresses.toArray(new String[0]));
NameResolver resolver = new NameResolver(vertx, getAddressResolverOptions());
resolver.resolveAll("fakeAddress.com", res -> {
if (res.succeeded()) {
assertEquals(expectedIPAddresses, res.result().stream().map(e -> e.getAddress().getHostAddress().toLowerCase(Locale.ENGLISH)).collect(Collectors.toList()));
testComplete();
} else {
fail(res.cause());
}
});
await();
}
@Test
public void testRotationalServerSelection() throws Exception {
testServerSelection(true, false);
}
@Test
public void testRotationalServerSelectionWithCache() throws Exception {
testServerSelection(true, true);
}
@Test
public void testFirstServerSelection() throws Exception {
testServerSelection(false, false);
}
private void testServerSelection(boolean rotateServers, boolean cache) throws Exception {
int num = VertxOptions.DEFAULT_EVENT_LOOP_POOL_SIZE + 4;
List<FakeDNSServer> dnsServers = new ArrayList<>();
try {
for (int index = 1; index <= num; index++) {
FakeDNSServer server = new FakeDNSServer().store(FakeDNSServer.A_store(Collections.singletonMap("vertx.io", "127.0.0." + index)));
server.port(FakeDNSServer.PORT + index);
server.start();
dnsServers.add(server);
}
AddressResolverOptions options = new AddressResolverOptions();
options.setRotateServers(rotateServers);
options.setOptResourceEnabled(false);
if (!cache) {
options.setCacheMaxTimeToLive(0);
}
for (int i = 0; i < num; i++) {
InetSocketAddress dnsServerAddress = dnsServers.get(i).localAddress();
options.addServer(dnsServerAddress.getAddress().getHostAddress() + ":" + dnsServerAddress.getPort());
}
NameResolver resolver = new NameResolver(vertx, options);
for (int i = 0; i < num; i++) {
String resolved = resolver
.resolve("vertx.io")
.await(10, TimeUnit.SECONDS)
.getHostAddress();
int expected;
if (rotateServers && !cache) {
expected = 1 + i;
} else {
expected = 1;
}
assertEquals("127.0.0." + expected, resolved);
}
} finally {
dnsServers.forEach(FakeDNSServer::stop);
}
}
@Test
public void testAddressSelectionDefault() throws Exception {
testAddressSelection(getAddressResolverOptions(), 1);
}
@Test
public void testAddressSelectionWithRoundRobin() throws Exception {
testAddressSelection(getAddressResolverOptions().setRoundRobinInetAddress(true), 2);
}
@Test
public void testAddressSelectionWithoutRoundRobin() throws Exception {
testAddressSelection(getAddressResolverOptions().setRoundRobinInetAddress(false), 1);
}
private void testAddressSelection(AddressResolverOptions options, int expected) throws Exception {
dnsServer.addRecordsToStore("vertx.io", "127.0.0.1", "127.0.0.2");
NameResolver resolver = new NameResolver(vertx, options);
Set<String> resolved = Collections.synchronizedSet(new HashSet<>());
//due to the random nature of netty's round robin algorithm
//the below outcome is generally non-deterministic and will fail once in about 2^100 runs (virtually never)
CountDownLatch latch = new CountDownLatch(100);
for (int i = 0; i < 100; i++) {
resolver.resolve("vertx.io").onComplete(onSuccess(inetAddress -> {
resolved.add(inetAddress.getHostAddress());
latch.countDown();
}));
}
awaitLatch(latch);
assertEquals(expected, resolved.size());
}
@Test
public void testServerFailover() throws Exception {
FakeDNSServer server = new FakeDNSServer().store(FakeDNSServer.A_store(Collections.singletonMap("vertx.io", "127.0.0.1"))).port(FakeDNSServer.PORT + 2);
try {
AddressResolverOptions options = new AddressResolverOptions();
options.setOptResourceEnabled(false);
options.setMaxQueries(4); // 2 + 2
server.start();
InetSocketAddress dnsServerAddress = server.localAddress();
// First server is unreachable
options.addServer(dnsServerAddress.getAddress().getHostAddress() + ":" + (FakeDNSServer.PORT + 1));
// Second server is the failed over server
options.addServer(dnsServerAddress.getAddress().getHostAddress() + ":" + dnsServerAddress.getPort());
NameResolver resolver = new NameResolver((VertxImpl) vertx, options);
CompletableFuture<InetAddress> result = new CompletableFuture<>();
resolver.resolve("vertx.io").onComplete(ar -> {
if (ar.succeeded()) {
result.complete(ar.result());
} else {
result.completeExceptionally(ar.cause());
}
});
String resolved = result.get(10, TimeUnit.SECONDS).getHostAddress();
assertEquals("127.0.0.1", resolved);
} finally {
server.stop();
}
}
}
|
NameResolverTest
|
java
|
apache__camel
|
components/camel-fhir/camel-fhir-component/src/test/java/org/apache/camel/component/fhir/FhirUpdateIT.java
|
{
"start": 1733,
"end": 10589
}
|
class ____ extends AbstractFhirTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(FhirUpdateIT.class);
private static final String PATH_PREFIX = FhirApiCollection.getCollection().getApiName(FhirUpdateApiMethod.class).getName();
@Test
public void testResource() throws Exception {
Date date = new SimpleDateFormat("yyyy-MM-dd").parse("1998-04-29");
assertNotEquals(date, patient.getBirthDate());
this.patient.setBirthDate(date);
final Map<String, Object> headers = new HashMap<>();
// parameter type is org.hl7.fhir.instance.model.api.IBaseResource
headers.put("CamelFhir.resource", this.patient);
// parameter type is org.hl7.fhir.instance.model.api.IIdType
headers.put("CamelFhir.id", this.patient.getIdElement());
// parameter type is ca.uhn.fhir.rest.api.PreferReturnEnum
headers.put("CamelFhir.preferReturn", PreferReturnEnum.REPRESENTATION);
MethodOutcome result = requestBodyAndHeaders("direct://RESOURCE", null, headers);
assertNotNull(result, "resource result");
LOG.debug("resource: {}", result);
assertEquals(date, ((Patient) result.getResource()).getBirthDate(), "Birth date not updated!");
}
@Test
public void testResourceNoId() throws Exception {
Date date = new SimpleDateFormat("yyyy-MM-dd").parse("1998-04-29");
assertNotEquals(date, patient.getBirthDate());
this.patient.setBirthDate(date);
final Map<String, Object> headers = new HashMap<>();
// parameter type is org.hl7.fhir.instance.model.api.IBaseResource
headers.put("CamelFhir.resource", this.patient);
// parameter type is ca.uhn.fhir.rest.api.PreferReturnEnum
headers.put("CamelFhir.preferReturn", PreferReturnEnum.REPRESENTATION);
MethodOutcome result = requestBodyAndHeaders("direct://RESOURCE", null, headers);
assertNotNull(result, "resource result");
LOG.debug("resource: {}", result);
assertEquals(date, ((Patient) result.getResource()).getBirthDate(), "Birth date not updated!");
}
@Test
public void testResourceStringId() throws Exception {
Date date = new SimpleDateFormat("yyyy-MM-dd").parse("1998-04-29");
assertNotEquals(date, patient.getBirthDate());
this.patient.setBirthDate(date);
final Map<String, Object> headers = new HashMap<>();
// parameter type is org.hl7.fhir.instance.model.api.IBaseResource
headers.put("CamelFhir.resource", this.patient);
// parameter type is org.hl7.fhir.instance.model.api.IIdType
headers.put("CamelFhir.stringId", this.patient.getIdElement().getIdPart());
// parameter type is ca.uhn.fhir.rest.api.PreferReturnEnum
headers.put("CamelFhir.preferReturn", PreferReturnEnum.REPRESENTATION);
MethodOutcome result = requestBodyAndHeaders("direct://RESOURCE_WITH_STRING_ID", null, headers);
assertNotNull(result, "resource result");
LOG.debug("resource: {}", result);
assertEquals(date, ((Patient) result.getResource()).getBirthDate(), "Birth date not updated!");
}
@Test
public void testResourceAsString() throws Exception {
Date date = new SimpleDateFormat("yyyy-MM-dd").parse("1998-04-29");
assertNotEquals(date, patient.getBirthDate());
this.patient.setBirthDate(date);
final Map<String, Object> headers = new HashMap<>();
// parameter type is org.hl7.fhir.instance.model.api.IBaseResource
headers.put("CamelFhir.resourceAsString", this.fhirContext.newJsonParser().encodeResourceToString(this.patient));
// parameter type is org.hl7.fhir.instance.model.api.IIdType
headers.put("CamelFhir.id", this.patient.getIdElement());
// parameter type is ca.uhn.fhir.rest.api.PreferReturnEnum
headers.put("CamelFhir.preferReturn", PreferReturnEnum.REPRESENTATION);
MethodOutcome result = requestBodyAndHeaders("direct://RESOURCE_AS_STRING", null, headers);
assertNotNull(result, "resource result");
LOG.debug("resource: {}", result);
assertEquals(date, ((Patient) result.getResource()).getBirthDate(), "Birth date not updated!");
}
@Test
public void testResourceAsStringWithStringId() throws Exception {
Date date = new SimpleDateFormat("yyyy-MM-dd").parse("1998-04-29");
assertNotEquals(date, patient.getBirthDate());
this.patient.setBirthDate(date);
final Map<String, Object> headers = new HashMap<>();
// parameter type is org.hl7.fhir.instance.model.api.IBaseResource
headers.put("CamelFhir.resourceAsString", this.fhirContext.newJsonParser().encodeResourceToString(this.patient));
// parameter type is org.hl7.fhir.instance.model.api.IIdType
headers.put("CamelFhir.stringId", this.patient.getIdElement().getIdPart());
// parameter type is ca.uhn.fhir.rest.api.PreferReturnEnum
headers.put("CamelFhir.preferReturn", PreferReturnEnum.REPRESENTATION);
MethodOutcome result = requestBodyAndHeaders("direct://RESOURCE_AS_STRING_WITH_STRING_ID", null, headers);
assertNotNull(result, "resource result");
LOG.debug("resource: {}", result);
assertEquals(date, ((Patient) result.getResource()).getBirthDate(), "Birth date not updated!");
}
@Test
public void testResourceBySearchUrl() throws Exception {
Date date = new SimpleDateFormat("yyyy-MM-dd").parse("1998-04-29");
assertNotEquals(date, patient.getBirthDate());
this.patient.setBirthDate(date);
String url = "Patient?" + Patient.SP_RES_ID + '=' + patient.getIdPart();
final Map<String, Object> headers = new HashMap<>();
// parameter type is org.hl7.fhir.instance.model.api.IBaseResource
headers.put("CamelFhir.resource", this.patient);
// parameter type is String
headers.put("CamelFhir.url", url);
// parameter type is ca.uhn.fhir.rest.api.PreferReturnEnum
headers.put("CamelFhir.preferReturn", PreferReturnEnum.REPRESENTATION);
MethodOutcome result = requestBodyAndHeaders("direct://RESOURCE_BY_SEARCH_URL", null, headers);
assertNotNull(result, "resource result");
LOG.debug("resource: {}", result);
assertEquals(date, ((Patient) result.getResource()).getBirthDate(), "Birth date not updated!");
}
@Test
public void testResourceBySearchUrlAndResourceAsString() throws Exception {
Date date = new SimpleDateFormat("yyyy-MM-dd").parse("1998-04-29");
assertNotEquals(date, patient.getBirthDate());
this.patient.setBirthDate(date);
String url = "Patient?" + Patient.SP_RES_ID + '=' + patient.getIdPart();
final Map<String, Object> headers = new HashMap<>();
// parameter type is org.hl7.fhir.instance.model.api.IBaseResource
headers.put("CamelFhir.resourceAsString", this.fhirContext.newJsonParser().encodeResourceToString(this.patient));
// parameter type is String
headers.put("CamelFhir.url", url);
// parameter type is ca.uhn.fhir.rest.api.PreferReturnEnum
headers.put("CamelFhir.preferReturn", PreferReturnEnum.REPRESENTATION);
MethodOutcome result = requestBodyAndHeaders("direct://RESOURCE_BY_SEARCH_URL_AND_RESOURCE_AS_STRING", null, headers);
assertNotNull(result, "resource result");
LOG.debug("resource: {}", result);
assertEquals(date, ((Patient) result.getResource()).getBirthDate(), "Birth date not updated!");
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
// test route for resource
from("direct://RESOURCE")
.to("fhir://" + PATH_PREFIX + "/resource");
// test route for resource
from("direct://RESOURCE_WITH_STRING_ID")
.to("fhir://" + PATH_PREFIX + "/resource");
// test route for resource
from("direct://RESOURCE_AS_STRING")
.to("fhir://" + PATH_PREFIX + "/resource");
// test route for resource
from("direct://RESOURCE_AS_STRING_WITH_STRING_ID")
.to("fhir://" + PATH_PREFIX + "/resource");
// test route for resourceBySearchUrl
from("direct://RESOURCE_BY_SEARCH_URL")
.to("fhir://" + PATH_PREFIX + "/resourceBySearchUrl");
// test route for resourceBySearchUrl
from("direct://RESOURCE_BY_SEARCH_URL_AND_RESOURCE_AS_STRING")
.to("fhir://" + PATH_PREFIX + "/resourceBySearchUrl");
}
};
}
}
|
FhirUpdateIT
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/deser/ValueAnnotationsDeserTest.java
|
{
"start": 5925,
"end": 6387
}
|
class ____
{
/* Such annotation not allowed, since it makes no sense;
* non-container classes have no contents to annotate (but
* note that it is possible to first use @JsonDesiarialize.as
* to mark Object as, say, a List, and THEN use
* @JsonDeserialize.contentAs!)
*/
@JsonDeserialize(contentAs=String.class)
public void setValue(Object x) { }
}
final static
|
InvalidContentClass
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/dialect/function/ExtractFunction.java
|
{
"start": 2224,
"end": 11108
}
|
class ____ extends AbstractSqmFunctionDescriptor implements FunctionRenderer {
final Dialect dialect;
public ExtractFunction(Dialect dialect, TypeConfiguration typeConfiguration) {
super(
"extract",
new ArgumentTypesValidator(
StandardArgumentsValidators.exactly( 2 ),
TEMPORAL_UNIT, TEMPORAL
),
StandardFunctionReturnTypeResolvers.useArgType( 1 ),
StandardFunctionArgumentTypeResolvers.invariant( typeConfiguration, TEMPORAL_UNIT, TEMPORAL )
);
this.dialect = dialect;
}
@Override
public void render(
SqlAppender sqlAppender,
List<? extends SqlAstNode> sqlAstArguments,
ReturnableType<?> returnType,
SqlAstTranslator<?> walker) {
final ExtractUnit field = (ExtractUnit) sqlAstArguments.get( 0 );
final TemporalUnit unit = field.getUnit();
final String pattern = dialect.extractPattern( unit );
new PatternRenderer( pattern ).render( sqlAppender, sqlAstArguments, walker );
}
@Override
protected <T> SelfRenderingSqmFunction generateSqmFunctionExpression(
List<? extends SqmTypedNode<?>> arguments,
ReturnableType<T> impliedResultType,
QueryEngine queryEngine) {
final SqmExtractUnit<?> field = (SqmExtractUnit<?>) arguments.get( 0 );
final SqmExpression<?> originalExpression = (SqmExpression<?>) arguments.get( 1 );
final boolean compositeTemporal = SqmExpressionHelper.isCompositeTemporal( originalExpression );
final SqmExpression<?> expression = SqmExpressionHelper.getOffsetAdjustedExpression( originalExpression );
switch ( field.getUnit() ) {
case NANOSECOND:
return extractNanoseconds( expression, queryEngine );
case NATIVE:
throw new SemanticException("NATIVE is not a legal field for extract()");
case OFFSET:
if ( compositeTemporal ) {
final SqmPath<Object> offsetPath = ( (SqmPath<?>) originalExpression ).get(
ZONE_OFFSET_NAME
);
return new SelfRenderingSqmFunction<>(
this,
(sqlAppender, sqlAstArguments, returnType, walker) -> sqlAstArguments.get( 0 ).accept( walker ),
Collections.singletonList( offsetPath ),
null,
null,
StandardFunctionReturnTypeResolvers.useArgType( 1 ),
expression.nodeBuilder(),
"extract"
);
}
else {
// use format(arg, 'xxx') to get the offset
return extractOffsetUsingFormat( expression, queryEngine );
}
case DATE:
case TIME:
// use cast(arg as Type) to get the date or time part
// which might be javax.sql.Date / javax.sql.Time or
// java.time.LocalDate / java.time.LocalTime depending
// on the type of the expression we're extracting from
return extractDateOrTimeUsingCast( expression, field.getType(), queryEngine );
case WEEK_OF_MONTH:
// use ceiling(extract(day of month, arg)/7.0)
return extractWeek( expression, field, DAY_OF_MONTH, queryEngine );
case WEEK_OF_YEAR:
// use ceiling(extract(day of year, arg)/7.0)
return extractWeek( expression, field, DAY_OF_YEAR, queryEngine );
default:
// otherwise it's something we expect the SQL dialect
// itself to understand, either natively, or via the
// method Dialect.extract()
return new SelfRenderingSqmFunction<>(
this,
this,
expression == originalExpression ? arguments : List.of( arguments.get( 0 ), expression ),
impliedResultType,
getArgumentsValidator(),
getReturnTypeResolver(),
expression.nodeBuilder(),
"extract"
);
}
}
private SelfRenderingSqmFunction<Integer> extractWeek(
SqmExpression<?> expressionToExtract,
SqmExtractUnit<?> field,
TemporalUnit dayOf,
QueryEngine queryEngine) {
final NodeBuilder builder = field.nodeBuilder();
final TypeConfiguration typeConfiguration = queryEngine.getTypeConfiguration();
final BasicType<Integer> intType = typeConfiguration.getBasicTypeForJavaType( Integer.class );
final BasicType<Float> floatType = typeConfiguration.getBasicTypeForJavaType( Float.class );
final SqmExtractUnit<Integer> dayOfUnit = new SqmExtractUnit<>( dayOf, intType, builder );
final SqmExpression<Integer> extractDayOf
= queryEngine.getSqmFunctionRegistry()
.findFunctionDescriptor("extract")
.generateSqmExpression(
asList( dayOfUnit, expressionToExtract ),
intType,
queryEngine
);
final SqmExtractUnit<Integer> dayOfWeekUnit = new SqmExtractUnit<>( DAY_OF_WEEK, intType, builder );
final SqmExpression<Integer> extractDayOfWeek
= queryEngine.getSqmFunctionRegistry()
.findFunctionDescriptor("extract")
.generateSqmExpression(
asList( dayOfWeekUnit, expressionToExtract ),
intType,
queryEngine
);
final SqmLiteral<Float> seven = new SqmLiteral<>( 7.0f, floatType, builder );
final SqmLiteral<Integer> one = new SqmLiteral<>( 1, intType, builder );
final SqmBinaryArithmetic<Integer> daySubtractionInt = new SqmBinaryArithmetic<>(
SUBTRACT,
extractDayOf,
extractDayOfWeek,
intType,
builder
);
final SqmExpression<?> daySubtraction;
if ( dialect.requiresFloatCastingOfIntegerDivision() ) {
daySubtraction = queryEngine.getSqmFunctionRegistry()
.findFunctionDescriptor("cast")
.generateSqmExpression(
asList( daySubtractionInt, new SqmCastTarget<>( floatType, builder ) ),
floatType,
queryEngine
);
}
else {
daySubtraction = daySubtractionInt;
}
return queryEngine.getSqmFunctionRegistry()
.findFunctionDescriptor("ceiling")
.generateSqmExpression(
new SqmBinaryArithmetic<>(
ADD,
new SqmBinaryArithmetic<>(
DIVIDE,
daySubtraction,
seven,
floatType,
builder
),
one,
intType,
builder
),
intType, // Implicit cast to int
queryEngine
);
}
private SelfRenderingSqmFunction<Long> toLong(
SqmExpression<?> arg,
QueryEngine queryEngine) {
//Not every database supports round() (looking at you Derby)
//so use floor() instead, which is perfectly fine for this
// return getFunctionTemplate("round").makeSqmFunctionExpression(
// asList( arg, integerLiteral("0") ),
// basicType( Long.class ),
// creationContext.getQueryEngine(),
// creationContext.getDomainModel().getTypeConfiguration()
// );
BasicType<Long> longType = queryEngine.getTypeConfiguration().getBasicTypeForJavaType(Long.class);
return queryEngine.getSqmFunctionRegistry()
.findFunctionDescriptor("floor")
.generateSqmExpression(
arg,
longType, // Implicit cast to long
queryEngine
);
}
private SelfRenderingSqmFunction<Long> extractNanoseconds(
SqmExpression<?> expressionToExtract,
QueryEngine queryEngine) {
final NodeBuilder builder = expressionToExtract.nodeBuilder();
final TypeConfiguration typeConfiguration = queryEngine.getTypeConfiguration();
final BasicType<Float> floatType = typeConfiguration.getBasicTypeForJavaType(Float.class);
final SqmExtractUnit<Float> extractSeconds = new SqmExtractUnit<>( SECOND, floatType, builder );
final SqmLiteral<Float> billion = new SqmLiteral<>( 1e9f, floatType, builder );
return toLong(
new SqmBinaryArithmetic<>(
MULTIPLY,
generateSqmExpression(
asList( extractSeconds, expressionToExtract ),
floatType,
queryEngine
),
billion,
floatType,
builder
),
queryEngine
);
}
private SelfRenderingSqmFunction<ZoneOffset> extractOffsetUsingFormat(
SqmExpression<?> expressionToExtract,
QueryEngine queryEngine) {
final NodeBuilder builder = expressionToExtract.nodeBuilder();
final TypeConfiguration typeConfiguration = queryEngine.getTypeConfiguration();
final BasicType<ZoneOffset> offsetType = typeConfiguration.getBasicTypeForJavaType(ZoneOffset.class);
final BasicType<String> stringType = typeConfiguration.getBasicTypeForJavaType(String.class);
final SqmFormat offsetFormat = new SqmFormat(
"xxx", //pattern for timezone offset
stringType,
builder
);
return queryEngine.getSqmFunctionRegistry()
.findFunctionDescriptor("format")
.generateSqmExpression(
asList( expressionToExtract, offsetFormat ),
offsetType,
queryEngine
);
}
private SelfRenderingSqmFunction<?> extractDateOrTimeUsingCast(
SqmExpression<?> expressionToExtract,
ReturnableType<?> type,
QueryEngine queryEngine) {
final NodeBuilder builder = expressionToExtract.nodeBuilder();
final SqmCastTarget<?> target = new SqmCastTarget<>( type, builder );
return queryEngine.getSqmFunctionRegistry()
.findFunctionDescriptor("cast")
.generateSqmExpression(
asList( expressionToExtract, target ),
type,
queryEngine
);
}
@Override
public String getArgumentListSignature() {
return "(TEMPORAL_UNIT field from TEMPORAL arg)";
}
}
|
ExtractFunction
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/error/ShouldBeAfter_create_Test.java
|
{
"start": 1464,
"end": 2969
}
|
class ____ {
@Test
void should_create_error_message() {
// GIVEN
ErrorMessageFactory factory = shouldBeAfter(parse("2011-01-01"), parse("2012-01-01"));
// WHEN
String message = factory.create(new TextDescription("Test"), new StandardRepresentation());
// THEN
then(message).isEqualTo(format("[Test] %n" +
"Expecting actual:%n" +
" 2011-01-01T00:00:00.000 (java.util.Date)%n" +
"to be strictly after:%n" +
" 2012-01-01T00:00:00.000 (java.util.Date)%n"));
}
@Test
void should_create_error_message_with_comparison_strategy() {
// GIVEN
ComparatorBasedComparisonStrategy comparisonStrategy = new ComparatorBasedComparisonStrategy(NEVER_EQUALS);
ErrorMessageFactory factory = shouldBeAfter(parse("2011-01-01"), parse("2012-01-01"), comparisonStrategy);
// WHEN
String message = factory.create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo("[Test] %n" +
"Expecting actual:%n" +
" 2011-01-01T00:00:00.000 (java.util.Date)%n" +
"to be strictly after:%n" +
" 2012-01-01T00:00:00.000 (java.util.Date)%n" +
"when comparing values using '%s'",
NEVER_EQUALS.description());
}
}
|
ShouldBeAfter_create_Test
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/QueryPredicateAndParameterComparableTest.java
|
{
"start": 5959,
"end": 6774
}
|
class ____ {
private int id;
private String description;
private Set<Submission> submissions;
@Id
@GeneratedValue
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
@ManyToMany(fetch = FetchType.LAZY)
@JoinTable(name = "submissions_participations", joinColumns = @JoinColumn(name = "submitters_id"), inverseJoinColumns = @JoinColumn(name = "submissions_submissionid"))
public Set<Submission> getSubmissions() {
return submissions;
}
public void setSubmissions(Set<Submission> submissions) {
this.submissions = submissions;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
}
@Entity(name = "submissions")
public static
|
Participation
|
java
|
resilience4j__resilience4j
|
resilience4j-rxjava2/src/main/java/io/github/resilience4j/ratelimiter/operator/SingleRateLimiter.java
|
{
"start": 1998,
"end": 2491
}
|
class ____ extends AbstractSingleObserver<T> {
RateLimiterSingleObserver(SingleObserver<? super T> downstreamObserver) {
super(downstreamObserver);
}
@Override
protected void hookOnError(Throwable e) {
// NoOp
}
@Override
protected void hookOnSuccess(T value) {
// NoOp
}
@Override
protected void hookOnCancel() {
// NoOp
}
}
}
|
RateLimiterSingleObserver
|
java
|
spring-projects__spring-boot
|
module/spring-boot-mustache/src/main/java/org/springframework/boot/mustache/servlet/view/MustacheView.java
|
{
"start": 1465,
"end": 3518
}
|
class ____ extends AbstractTemplateView {
private @Nullable Compiler compiler;
private @Nullable String charset;
/**
* Set the Mustache compiler to be used by this view.
* <p>
* Typically this property is not set directly. Instead a single {@link Compiler} is
* expected in the Spring application context which is used to compile Mustache
* templates.
* @param compiler the Mustache compiler
*/
public void setCompiler(Compiler compiler) {
this.compiler = compiler;
}
/**
* Set the charset used for reading Mustache template files.
* @param charset the charset to use for reading template files
*/
public void setCharset(@Nullable String charset) {
this.charset = charset;
}
@Override
public boolean checkResource(Locale locale) throws Exception {
Resource resource = getResource();
return resource != null;
}
@Override
protected void renderMergedTemplateModel(Map<String, Object> model, HttpServletRequest request,
HttpServletResponse response) throws Exception {
Resource resource = getResource();
Assert.state(resource != null, "'resource' must not be null");
Template template = createTemplate(resource);
if (template != null) {
template.execute(model, response.getWriter());
}
}
private @Nullable Resource getResource() {
ApplicationContext applicationContext = getApplicationContext();
String url = getUrl();
if (applicationContext == null || url == null) {
return null;
}
Resource resource = applicationContext.getResource(url);
return (resource.exists()) ? resource : null;
}
private Template createTemplate(Resource resource) throws IOException {
try (Reader reader = getReader(resource)) {
Assert.state(this.compiler != null, "'compiler' must not be null");
return this.compiler.compile(reader);
}
}
private Reader getReader(Resource resource) throws IOException {
if (this.charset != null) {
return new InputStreamReader(resource.getInputStream(), this.charset);
}
return new InputStreamReader(resource.getInputStream());
}
}
|
MustacheView
|
java
|
ReactiveX__RxJava
|
src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableElementAtTest.java
|
{
"start": 1285,
"end": 12137
}
|
class ____ extends RxJavaTest {
@Test
public void elementAtFlowable() {
assertEquals(2, Flowable.fromArray(1, 2).elementAt(1).toFlowable().blockingSingle()
.intValue());
}
@Test(expected = IndexOutOfBoundsException.class)
public void elementAtWithMinusIndexFlowable() {
Flowable.fromArray(1, 2).elementAt(-1);
}
@Test
public void elementAtWithIndexOutOfBoundsFlowable() {
assertEquals(-100, Flowable.fromArray(1, 2).elementAt(2).toFlowable().blockingFirst(-100).intValue());
}
@Test
public void elementAtOrDefaultFlowable() {
assertEquals(2, Flowable.fromArray(1, 2).elementAt(1, 0).toFlowable().blockingSingle().intValue());
}
@Test
public void elementAtOrDefaultWithIndexOutOfBoundsFlowable() {
assertEquals(0, Flowable.fromArray(1, 2).elementAt(2, 0).toFlowable().blockingSingle().intValue());
}
@Test(expected = IndexOutOfBoundsException.class)
public void elementAtOrDefaultWithMinusIndexFlowable() {
Flowable.fromArray(1, 2).elementAt(-1, 0);
}
@Test
public void elementAt() {
assertEquals(2, Flowable.fromArray(1, 2).elementAt(1).blockingGet()
.intValue());
}
@Test
public void elementAtConstrainsUpstreamRequests() {
final List<Long> requests = new ArrayList<>();
Flowable.fromArray(1, 2, 3, 4)
.doOnRequest(new LongConsumer() {
@Override
public void accept(long n) throws Throwable {
requests.add(n);
}
})
.elementAt(2)
.blockingGet()
.intValue();
assertEquals(Arrays.asList(3L), requests);
}
@Test
public void elementAtWithDefaultConstrainsUpstreamRequests() {
final List<Long> requests = new ArrayList<>();
Flowable.fromArray(1, 2, 3, 4)
.doOnRequest(new LongConsumer() {
@Override
public void accept(long n) throws Throwable {
requests.add(n);
}
})
.elementAt(2, 100)
.blockingGet()
.intValue();
assertEquals(Arrays.asList(3L), requests);
}
@Test(expected = IndexOutOfBoundsException.class)
public void elementAtWithMinusIndex() {
Flowable.fromArray(1, 2).elementAt(-1);
}
@Test
public void elementAtWithIndexOutOfBounds() {
assertNull(Flowable.fromArray(1, 2).elementAt(2).blockingGet());
}
@Test
public void elementAtOrDefault() {
assertEquals(2, Flowable.fromArray(1, 2).elementAt(1, 0).blockingGet().intValue());
}
@Test
public void elementAtOrDefaultWithIndexOutOfBounds() {
assertEquals(0, Flowable.fromArray(1, 2).elementAt(2, 0).blockingGet().intValue());
}
@Test(expected = IndexOutOfBoundsException.class)
public void elementAtOrDefaultWithMinusIndex() {
Flowable.fromArray(1, 2).elementAt(-1, 0);
}
@Test(expected = IndexOutOfBoundsException.class)
public void elementAtOrErrorNegativeIndex() {
Flowable.empty()
.elementAtOrError(-1);
}
@Test
public void elementAtOrErrorNoElement() {
Flowable.empty()
.elementAtOrError(0)
.test()
.assertNoValues()
.assertError(NoSuchElementException.class);
}
@Test
public void elementAtOrErrorOneElement() {
Flowable.just(1)
.elementAtOrError(0)
.test()
.assertNoErrors()
.assertValue(1);
}
@Test
public void elementAtOrErrorMultipleElements() {
Flowable.just(1, 2, 3)
.elementAtOrError(1)
.test()
.assertNoErrors()
.assertValue(2);
}
@Test
public void elementAtOrErrorInvalidIndex() {
Flowable.just(1, 2, 3)
.elementAtOrError(3)
.test()
.assertNoValues()
.assertError(NoSuchElementException.class);
}
@Test
public void elementAtOrErrorError() {
Flowable.error(new RuntimeException("error"))
.elementAtOrError(0)
.to(TestHelper.testConsumer())
.assertNoValues()
.assertErrorMessage("error")
.assertError(RuntimeException.class);
}
@Test
public void elementAtIndex0OnEmptySource() {
Flowable.empty()
.elementAt(0)
.test()
.assertResult();
}
@Test
public void elementAtIndex0WithDefaultOnEmptySource() {
Flowable.empty()
.elementAt(0, 5)
.test()
.assertResult(5);
}
@Test
public void elementAtIndex1OnEmptySource() {
Flowable.empty()
.elementAt(1)
.test()
.assertResult();
}
@Test
public void elementAtIndex1WithDefaultOnEmptySource() {
Flowable.empty()
.elementAt(1, 10)
.test()
.assertResult(10);
}
@Test
public void elementAtOrErrorIndex1OnEmptySource() {
Flowable.empty()
.elementAtOrError(1)
.test()
.assertFailure(NoSuchElementException.class);
}
@Test
public void doubleOnSubscribe() {
TestHelper.checkDoubleOnSubscribeFlowable(new Function<Flowable<Object>, Publisher<Object>>() {
@Override
public Publisher<Object> apply(Flowable<Object> f) throws Exception {
return f.elementAt(0).toFlowable();
}
});
TestHelper.checkDoubleOnSubscribeFlowableToMaybe(new Function<Flowable<Object>, Maybe<Object>>() {
@Override
public Maybe<Object> apply(Flowable<Object> f) throws Exception {
return f.elementAt(0);
}
});
TestHelper.checkDoubleOnSubscribeFlowableToSingle(new Function<Flowable<Object>, Single<Object>>() {
@Override
public Single<Object> apply(Flowable<Object> f) throws Exception {
return f.elementAt(0, 1);
}
});
}
@Test
public void elementAtIndex1WithDefaultOnEmptySourceObservable() {
Flowable.empty()
.elementAt(1, 10)
.toFlowable()
.test()
.assertResult(10);
}
@Test
public void errorFlowable() {
Flowable.error(new TestException())
.elementAt(1, 10)
.toFlowable()
.test()
.assertFailure(TestException.class);
}
@Test
public void error() {
Flowable.error(new TestException())
.elementAt(1, 10)
.test()
.assertFailure(TestException.class);
Flowable.error(new TestException())
.elementAt(1)
.test()
.assertFailure(TestException.class);
}
@Test
public void badSource() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
new Flowable<Integer>() {
@Override
protected void subscribeActual(Subscriber<? super Integer> subscriber) {
subscriber.onSubscribe(new BooleanSubscription());
subscriber.onNext(1);
subscriber.onNext(2);
subscriber.onError(new TestException());
subscriber.onComplete();
}
}
.elementAt(0)
.toFlowable()
.test()
.assertResult(1);
TestHelper.assertUndeliverable(errors, 0, TestException.class);
} finally {
RxJavaPlugins.reset();
}
TestHelper.checkBadSourceFlowable(new Function<Flowable<Integer>, Object>() {
@Override
public Object apply(Flowable<Integer> f) throws Exception {
return f.elementAt(0);
}
}, false, null, 1);
TestHelper.checkBadSourceFlowable(new Function<Flowable<Integer>, Object>() {
@Override
public Object apply(Flowable<Integer> f) throws Exception {
return f.elementAt(0, 1);
}
}, false, null, 1, 1);
TestHelper.checkBadSourceFlowable(new Function<Flowable<Integer>, Object>() {
@Override
public Object apply(Flowable<Integer> f) throws Exception {
return f.elementAt(0).toFlowable();
}
}, false, null, 1);
TestHelper.checkBadSourceFlowable(new Function<Flowable<Integer>, Object>() {
@Override
public Object apply(Flowable<Integer> f) throws Exception {
return f.elementAt(0, 1).toFlowable();
}
}, false, null, 1, 1);
}
@Test
public void dispose() {
TestHelper.checkDisposed(PublishProcessor.create().elementAt(0).toFlowable());
TestHelper.checkDisposed(PublishProcessor.create().elementAt(0, 1).toFlowable());
TestHelper.checkDisposed(PublishProcessor.create().elementAt(0));
TestHelper.checkDisposed(PublishProcessor.create().elementAt(0, 1));
}
@Test
public void badSourceObservable() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
new Observable<Integer>() {
@Override
protected void subscribeActual(Observer<? super Integer> observer) {
observer.onSubscribe(Disposable.empty());
observer.onNext(1);
observer.onNext(2);
observer.onError(new TestException());
observer.onComplete();
}
}
.elementAt(0)
.toFlowable()
.test()
.assertResult(1);
TestHelper.assertUndeliverable(errors, 0, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void badSource2() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
new Flowable<Integer>() {
@Override
protected void subscribeActual(Subscriber<? super Integer> subscriber) {
subscriber.onSubscribe(new BooleanSubscription());
subscriber.onNext(1);
subscriber.onNext(2);
subscriber.onError(new TestException());
subscriber.onComplete();
}
}
.elementAt(0, 1)
.test()
.assertResult(1);
TestHelper.assertUndeliverable(errors, 0, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
}
|
FlowableElementAtTest
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/lz4/TestLz4CompressorDecompressor.java
|
{
"start": 2103,
"end": 13595
}
|
class ____ {
private static final Random rnd = new Random(12345l);
//test on NullPointerException in {@code compressor.setInput()}
@Test
public void testCompressorSetInputNullPointerException() {
try {
Lz4Compressor compressor = new Lz4Compressor();
compressor.setInput(null, 0, 10);
fail("testCompressorSetInputNullPointerException error !!!");
} catch (NullPointerException ex) {
// expected
} catch (Exception e) {
fail("testCompressorSetInputNullPointerException ex error !!!");
}
}
//test on NullPointerException in {@code decompressor.setInput()}
@Test
public void testDecompressorSetInputNullPointerException() {
try {
Lz4Decompressor decompressor = new Lz4Decompressor();
decompressor.setInput(null, 0, 10);
fail("testDecompressorSetInputNullPointerException error !!!");
} catch (NullPointerException ex) {
// expected
} catch (Exception e) {
fail("testDecompressorSetInputNullPointerException ex error !!!");
}
}
//test on ArrayIndexOutOfBoundsException in {@code compressor.setInput()}
@Test
public void testCompressorSetInputAIOBException() {
try {
Lz4Compressor compressor = new Lz4Compressor();
compressor.setInput(new byte[] {}, -5, 10);
fail("testCompressorSetInputAIOBException error !!!");
} catch (ArrayIndexOutOfBoundsException ex) {
// expected
} catch (Exception ex) {
fail("testCompressorSetInputAIOBException ex error !!!");
}
}
//test on ArrayIndexOutOfBoundsException in {@code decompressor.setInput()}
@Test
public void testDecompressorSetInputAIOUBException() {
try {
Lz4Decompressor decompressor = new Lz4Decompressor();
decompressor.setInput(new byte[] {}, -5, 10);
fail("testDecompressorSetInputAIOBException error !!!");
} catch (ArrayIndexOutOfBoundsException ex) {
// expected
} catch (Exception e) {
fail("testDecompressorSetInputAIOBException ex error !!!");
}
}
//test on NullPointerException in {@code compressor.compress()}
@Test
public void testCompressorCompressNullPointerException() {
try {
Lz4Compressor compressor = new Lz4Compressor();
byte[] bytes = generate(1024 * 6);
compressor.setInput(bytes, 0, bytes.length);
compressor.compress(null, 0, 0);
fail("testCompressorCompressNullPointerException error !!!");
} catch (NullPointerException ex) {
// expected
} catch (Exception e) {
fail("testCompressorCompressNullPointerException ex error !!!");
}
}
//test on NullPointerException in {@code decompressor.decompress()}
@Test
public void testDecompressorCompressNullPointerException() {
try {
Lz4Decompressor decompressor = new Lz4Decompressor();
byte[] bytes = generate(1024 * 6);
decompressor.setInput(bytes, 0, bytes.length);
decompressor.decompress(null, 0, 0);
fail("testDecompressorCompressNullPointerException error !!!");
} catch (NullPointerException ex) {
// expected
} catch (Exception e) {
fail("testDecompressorCompressNullPointerException ex error !!!");
}
}
//test on ArrayIndexOutOfBoundsException in {@code compressor.compress()}
@Test
public void testCompressorCompressAIOBException() {
try {
Lz4Compressor compressor = new Lz4Compressor();
byte[] bytes = generate(1024 * 6);
compressor.setInput(bytes, 0, bytes.length);
compressor.compress(new byte[] {}, 0, -1);
fail("testCompressorCompressAIOBException error !!!");
} catch (ArrayIndexOutOfBoundsException ex) {
// expected
} catch (Exception e) {
fail("testCompressorCompressAIOBException ex error !!!");
}
}
//test on ArrayIndexOutOfBoundsException in decompressor.decompress()
@Test
public void testDecompressorCompressAIOBException() {
try {
Lz4Decompressor decompressor = new Lz4Decompressor();
byte[] bytes = generate(1024 * 6);
decompressor.setInput(bytes, 0, bytes.length);
decompressor.decompress(new byte[] {}, 0, -1);
fail("testDecompressorCompressAIOBException error !!!");
} catch (ArrayIndexOutOfBoundsException ex) {
// expected
} catch (Exception e) {
fail("testDecompressorCompressAIOBException ex error !!!");
}
}
// test Lz4Compressor compressor.compress()
@Test
public void testSetInputWithBytesSizeMoreThenDefaultLz4CompressorByfferSize() {
int BYTES_SIZE = 1024 * 64 + 1;
try {
Lz4Compressor compressor = new Lz4Compressor();
byte[] bytes = generate(BYTES_SIZE);
assertTrue(compressor.needsInput(), "needsInput error !!!");
compressor.setInput(bytes, 0, bytes.length);
byte[] emptyBytes = new byte[BYTES_SIZE];
int csize = compressor.compress(emptyBytes, 0, bytes.length);
assertTrue(csize != 0,
"testSetInputWithBytesSizeMoreThenDefaultLz4CompressorByfferSize error !!!");
} catch (Exception ex) {
fail("testSetInputWithBytesSizeMoreThenDefaultLz4CompressorByfferSize ex error !!!");
}
}
// test compress/decompress process
@Test
public void testCompressDecompress() {
int BYTE_SIZE = 1024 * 54;
byte[] bytes = generate(BYTE_SIZE);
Lz4Compressor compressor = new Lz4Compressor();
try {
compressor.setInput(bytes, 0, bytes.length);
assertTrue(compressor.getBytesRead() > 0,
"Lz4CompressDecompress getBytesRead error !!!");
assertTrue(compressor.getBytesWritten() == 0,
"Lz4CompressDecompress getBytesWritten before compress error !!!");
byte[] compressed = new byte[BYTE_SIZE];
int cSize = compressor.compress(compressed, 0, compressed.length);
assertTrue(compressor.getBytesWritten() > 0,
"Lz4CompressDecompress getBytesWritten after compress error !!!");
Lz4Decompressor decompressor = new Lz4Decompressor();
// set as input for decompressor only compressed data indicated with cSize
decompressor.setInput(compressed, 0, cSize);
byte[] decompressed = new byte[BYTE_SIZE];
decompressor.decompress(decompressed, 0, decompressed.length);
assertTrue(decompressor.finished(), "testLz4CompressDecompress finished error !!!");
assertArrayEquals(bytes, decompressed);
compressor.reset();
decompressor.reset();
assertTrue(decompressor.getRemaining() == 0,
"decompressor getRemaining error !!!");
} catch (Exception e) {
fail("testLz4CompressDecompress ex error!!!");
}
}
// test compress/decompress with empty stream
@Test
public void testCompressorDecompressorEmptyStreamLogic() {
ByteArrayInputStream bytesIn = null;
ByteArrayOutputStream bytesOut = null;
byte[] buf = null;
BlockDecompressorStream blockDecompressorStream = null;
try {
// compress empty stream
bytesOut = new ByteArrayOutputStream();
BlockCompressorStream blockCompressorStream = new BlockCompressorStream(
bytesOut, new Lz4Compressor(), 1024, 0);
// close without write
blockCompressorStream.close();
// check compressed output
buf = bytesOut.toByteArray();
assertEquals(4, buf.length, "empty stream compressed output size != 4");
// use compressed output as input for decompression
bytesIn = new ByteArrayInputStream(buf);
// create decompression stream
blockDecompressorStream = new BlockDecompressorStream(bytesIn,
new Lz4Decompressor(), 1024);
// no byte is available because stream was closed
assertEquals(-1, blockDecompressorStream.read(), "return value is not -1");
} catch (Exception e) {
fail("testCompressorDecompressorEmptyStreamLogic ex error !!!"
+ e.getMessage());
} finally {
if (blockDecompressorStream != null)
try {
bytesIn.close();
bytesOut.close();
blockDecompressorStream.close();
} catch (IOException e) {
}
}
}
// test compress/decompress process through CompressionOutputStream/CompressionInputStream api
@Test
public void testCompressorDecopressorLogicWithCompressionStreams() {
DataOutputStream deflateOut = null;
DataInputStream inflateIn = null;
int BYTE_SIZE = 1024 * 100;
byte[] bytes = generate(BYTE_SIZE);
int bufferSize = 262144;
int compressionOverhead = (bufferSize / 6) + 32;
try {
DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
CompressionOutputStream deflateFilter = new BlockCompressorStream(
compressedDataBuffer, new Lz4Compressor(bufferSize), bufferSize,
compressionOverhead);
deflateOut = new DataOutputStream(new BufferedOutputStream(deflateFilter));
deflateOut.write(bytes, 0, bytes.length);
deflateOut.flush();
deflateFilter.finish();
DataInputBuffer deCompressedDataBuffer = new DataInputBuffer();
deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0,
compressedDataBuffer.getLength());
CompressionInputStream inflateFilter = new BlockDecompressorStream(
deCompressedDataBuffer, new Lz4Decompressor(bufferSize), bufferSize);
inflateIn = new DataInputStream(new BufferedInputStream(inflateFilter));
byte[] result = new byte[BYTE_SIZE];
inflateIn.read(result);
assertArrayEquals(result,
bytes, "original array not equals compress/decompressed array");
} catch (IOException e) {
fail("testLz4CompressorDecopressorLogicWithCompressionStreams ex error !!!");
} finally {
try {
if (deflateOut != null)
deflateOut.close();
if (inflateIn != null)
inflateIn.close();
} catch (Exception e) {
}
}
}
public static byte[] generate(int size) {
byte[] array = new byte[size];
for (int i = 0; i < size; i++)
array[i] = (byte)rnd.nextInt(16);
return array;
}
@Test
public void testLz4CompressDecompressInMultiThreads() throws Exception {
MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext();
for(int i=0;i<10;i++) {
ctx.addThread( new MultithreadedTestUtil.TestingThread(ctx) {
@Override
public void doWork() throws Exception {
testCompressDecompress();
}
});
}
ctx.startThreads();
ctx.waitFor(60000);
}
@Test
public void testLz4Compatibility() throws Exception {
// The sequence file was created using native Lz4 codec before HADOOP-17292.
// After we use lz4-java for lz4 compression, this test makes sure we can
// decompress the sequence file correctly.
Path filePath = new Path(TestLz4CompressorDecompressor.class
.getResource("/lz4/sequencefile").toURI());
Configuration conf = new Configuration();
conf.setInt("io.seqfile.compress.blocksize", 1000);
FileSystem fs = FileSystem.get(conf);
int lines = 2000;
SequenceFile.Reader reader = new SequenceFile.Reader(fs, filePath, conf);
Writable key = (Writable)reader.getKeyClass().newInstance();
Writable value = (Writable)reader.getValueClass().newInstance();
int lc = 0;
try {
while (reader.next(key, value)) {
assertEquals("key" + lc, key.toString());
assertEquals("value" + lc, value.toString());
lc++;
}
} finally {
reader.close();
}
assertEquals(lines, lc);
}
}
|
TestLz4CompressorDecompressor
|
java
|
apache__flink
|
flink-table/flink-table-api-java/src/test/java/org/apache/flink/table/expressions/resolver/ExpressionResolverTest.java
|
{
"start": 19876,
"end": 20253
}
|
class ____ extends ScalarFunction {
public int eval(Object... any) {
return 0;
}
@Override
public int hashCode() {
return 0;
}
@Override
public boolean equals(Object obj) {
return obj instanceof ScalarFunc;
}
}
/** Legacy scalar function. */
public static
|
ScalarFunc
|
java
|
apache__camel
|
catalog/camel-route-parser/src/test/java/org/apache/camel/parser/java/MyPartialRoute.java
|
{
"start": 896,
"end": 1020
}
|
class ____ extends RouteBuilder {
@Override
public void configure() {
from("timer:foo");
}
}
|
MyPartialRoute
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.