language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | netty__netty | codec-classes-quic/src/main/java/io/netty/handler/codec/quic/QuicCodecBuilder.java | {
"start": 1070,
"end": 1188
} | class ____ {@code QUIC} codec builders.
*
* @param <B> the type of the {@link QuicCodecBuilder}.
*/
public abstract | for |
java | playframework__playframework | dev-mode/sbt-plugin/src/sbt-test/play-sbt-plugin/routes-compiler-routes-compilation-java/app/controllers/Application.java | {
"start": 823,
"end": 2950
} | class ____ extends Controller {
private final ClassLoaderExecutionContext clExecutionContext;
@Inject
public Application(ClassLoaderExecutionContext ec) {
this.clExecutionContext = ec;
}
public CompletionStage<Result> async(Request request, Integer x) {
return CompletableFuture.supplyAsync(() -> 2 * x, clExecutionContext.current())
.thenApply(answer -> ok(String.format("Answer: " + answer)));
}
public WebSocket webSocket(String x) {
return WebSocket.Text.accept(
request -> Flow.fromSinkAndSource(Sink.ignore(), Source.single("Hello, " + x)));
}
public Result uriPattern(String x) {
return ok(x);
}
public Result onlyRequestParam(Request request) {
return ok();
}
public Result multiParams(
Boolean a,
Character b,
String c,
Short d,
Integer e,
Long f,
Float g,
Double h,
UUID i,
OptionalInt j,
OptionalLong k,
OptionalDouble l,
String m,
String n,
String o,
String p,
String q,
String r,
String s,
String t,
String u,
String v,
String w,
String x,
String y,
String z) {
return ok(
Stream.of(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z)
.map(Object::toString)
.collect(Collectors.joining(",")));
}
public Result urlcoding(String dynamic, String _static, String query) {
return ok(String.format("dynamic=%s static=%s query=%s", dynamic, _static, query));
}
public Result keyword(String keyword) {
return ok(keyword);
}
public Result keywordWithRequest(Request request, String keyword) {
return ok(keyword);
}
public Result reverse(
Boolean b,
Character c,
Short s,
Integer i,
Long l,
Float f,
Double d,
UUID uuid,
OptionalInt oi,
OptionalLong ol,
OptionalDouble od,
String str,
Optional<String> ostr) {
return ok();
}
public Result interpolatorWarning(String parameter) {
return ok(parameter);
}
}
| Application |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java | {
"start": 1977,
"end": 6895
} | class ____ {
public static final Logger LOG =
LoggerFactory.getLogger(TestAddBlockRetry.class);
private static final short REPLICATION = 3;
private Configuration conf;
private MiniDFSCluster cluster;
@BeforeEach
public void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION)
.build();
cluster.waitActive();
}
@AfterEach
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
/**
* Retry addBlock() while another thread is in chooseTarget().
* See HDFS-4452.
*/
@Test
public void testRetryAddBlockWhileInChooseTarget() throws Exception {
final String src = "/testRetryAddBlockWhileInChooseTarget";
final FSNamesystem ns = cluster.getNamesystem();
final NamenodeProtocols nn = cluster.getNameNodeRpc();
// create file
nn.create(src, FsPermission.getFileDefault(),
"clientName",
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
true, (short) 3, 1024, null, null, null);
// start first addBlock()
LOG.info("Starting first addBlock for " + src);
LocatedBlock[] onRetryBlock = new LocatedBlock[1];
ns.readLock(RwLockMode.GLOBAL);
FSDirWriteFileOp.ValidateAddBlockResult r;
FSPermissionChecker pc = Mockito.mock(FSPermissionChecker.class);
try {
r = FSDirWriteFileOp.validateAddBlock(ns, pc, src,
HdfsConstants.GRANDFATHER_INODE_ID,
"clientName", null, onRetryBlock);
} finally {
ns.readUnlock(RwLockMode.GLOBAL, "validateAddBlock");
}
DatanodeStorageInfo targets[] = FSDirWriteFileOp.chooseTargetForNewBlock(
ns.getBlockManager(), src, null, null, null, r);
assertNotNull(targets, "Targets must be generated");
// run second addBlock()
LOG.info("Starting second addBlock for " + src);
nn.addBlock(src, "clientName", null, null,
HdfsConstants.GRANDFATHER_INODE_ID, null, null);
assertTrue(checkFileProgress(src, false), "Penultimate block must be complete");
LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
assertEquals(1, lbs.getLocatedBlocks().size(), "Must be one block");
LocatedBlock lb2 = lbs.get(0);
assertEquals(REPLICATION, lb2.getLocations().length, "Wrong replication");
// continue first addBlock()
ns.writeLock(RwLockMode.GLOBAL);
LocatedBlock newBlock;
try {
newBlock = FSDirWriteFileOp.storeAllocatedBlock(ns, src,
HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, targets);
} finally {
ns.writeUnlock(RwLockMode.GLOBAL, "testRetryAddBlockWhileInChooseTarget");
}
assertEquals(lb2.getBlock(), newBlock.getBlock(), "Blocks are not equal");
// check locations
lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
assertEquals(1, lbs.getLocatedBlocks().size(), "Must be one block");
LocatedBlock lb1 = lbs.get(0);
assertEquals(REPLICATION, lb1.getLocations().length, "Wrong replication");
assertEquals(lb1.getBlock(), lb2.getBlock(), "Blocks are not equal");
}
boolean checkFileProgress(String src, boolean checkall) throws IOException {
final FSNamesystem ns = cluster.getNamesystem();
ns.readLock(RwLockMode.GLOBAL);
try {
return ns.checkFileProgress(src, ns.dir.getINode(src).asFile(), checkall);
} finally {
ns.readUnlock(RwLockMode.GLOBAL, "checkFileProgress");
}
}
/*
* Since NameNode will not persist any locations of the block, addBlock()
* retry call after restart NN should re-select the locations and return to
* client. refer HDFS-5257
*/
@Test
public void testAddBlockRetryShouldReturnBlockWithLocations()
throws Exception {
final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
// create file
nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName",
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
(short) 3, 1024, null, null, null);
// start first addBlock()
LOG.info("Starting first addBlock for " + src);
LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,
HdfsConstants.GRANDFATHER_INODE_ID, null, null);
assertTrue(lb1.getLocations().length > 0,
"Block locations should be present");
cluster.restartNameNode();
nameNodeRpc = cluster.getNameNodeRpc();
LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null,
HdfsConstants.GRANDFATHER_INODE_ID, null, null);
assertEquals(lb1.getBlock(), lb2.getBlock(), "Blocks are not equal");
assertTrue(lb2.getLocations().length > 0, "Wrong locations with retry");
}
}
| TestAddBlockRetry |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/config/ConfigDataLocationNotFoundExceptionTests.java | {
"start": 1034,
"end": 2169
} | class ____ {
private final Origin origin = mock(Origin.class);
private final ConfigDataLocation location = ConfigDataLocation.of("optional:test").withOrigin(this.origin);
private final ConfigDataLocationNotFoundException exception = new ConfigDataLocationNotFoundException(
this.location);
@Test
@SuppressWarnings("NullAway") // Test null check
void createWhenLocationIsNullThrowsException() {
assertThatIllegalArgumentException().isThrownBy(() -> new ConfigDataLocationNotFoundException(null))
.withMessage("'location' must not be null");
}
@Test
void getLocationReturnsLocation() {
assertThat(this.exception.getLocation()).isSameAs(this.location);
}
@Test
void getOriginReturnsLocationOrigin() {
assertThat(this.exception.getOrigin()).isSameAs(this.origin);
}
@Test
void getReferenceDescriptionReturnsLocationString() {
assertThat(this.exception.getReferenceDescription()).isEqualTo("location 'optional:test'");
}
@Test
void getMessageReturnsMessage() {
assertThat(this.exception).hasMessage("Config data location 'optional:test' cannot be found");
}
}
| ConfigDataLocationNotFoundExceptionTests |
java | google__auto | value/src/main/java/com/google/auto/value/processor/TypeEncoder.java | {
"start": 9386,
"end": 9670
} | class ____ they represent,
* spelled appropriately given the import statements.
*
* @param text the text to be decoded.
* @param packageName the package of the generated class. Other classes in the same package do not
* need to be imported.
* @param baseType a | names |
java | apache__kafka | tools/src/test/java/org/apache/kafka/tools/consumer/group/share/ShareGroupStateMessageFormatterTest.java | {
"start": 2290,
"end": 10869
} | class ____ extends CoordinatorRecordMessageFormatterTest {
private static final SharePartitionKey KEY_1 = SharePartitionKey.getInstance("gs1", Uuid.fromString("gtb2stGYRk-vWZ2zAozmoA"), 0);
private static final ShareGroupOffset SHARE_GROUP_OFFSET_1 = new ShareGroupOffset.Builder()
.setSnapshotEpoch(0)
.setStateEpoch(1)
.setLeaderEpoch(20)
.setStartOffset(50)
.setStateBatches(
List.of(
new PersisterStateBatch(
100,
200,
(byte) 1,
(short) 10
),
new PersisterStateBatch(
201,
210,
(byte) 2,
(short) 10
)
)
).build();
private static final SharePartitionKey KEY_2 = SharePartitionKey.getInstance("gs2", Uuid.fromString("r9Nq4xGAQf28jvu36t7gQQ"), 0);
private static final ShareGroupOffset SHARE_GROUP_OFFSET_2 = new ShareGroupOffset.Builder()
.setSnapshotEpoch(1)
.setStateEpoch(3)
.setLeaderEpoch(25)
.setStartOffset(55)
.setStateBatches(
List.of(
new PersisterStateBatch(
100,
150,
(byte) 1,
(short) 12
),
new PersisterStateBatch(
151,
200,
(byte) 2,
(short) 15
)
)
).build();
private static final ShareSnapshotKey SHARE_SNAPSHOT_KEY = new ShareSnapshotKey()
.setGroupId(KEY_1.groupId())
.setTopicId(KEY_1.topicId())
.setPartition(KEY_1.partition());
private static final ShareSnapshotValue SHARE_SNAPSHOT_VALUE = new ShareSnapshotValue()
.setSnapshotEpoch(SHARE_GROUP_OFFSET_1.snapshotEpoch())
.setStateEpoch(SHARE_GROUP_OFFSET_1.stateEpoch())
.setLeaderEpoch(SHARE_GROUP_OFFSET_1.leaderEpoch())
.setStartOffset(SHARE_GROUP_OFFSET_1.startOffset())
.setCreateTimestamp(1744279603)
.setWriteTimestamp(1744279603)
.setStateBatches(
SHARE_GROUP_OFFSET_1.stateBatches().stream()
.map(batch -> new ShareSnapshotValue.StateBatch()
.setFirstOffset(batch.firstOffset())
.setLastOffset(batch.lastOffset())
.setDeliveryState(batch.deliveryState())
.setDeliveryCount(batch.deliveryCount()))
.toList()
);
private static final ShareUpdateKey SHARE_UPDATE_KEY = new ShareUpdateKey()
.setGroupId(KEY_2.groupId())
.setTopicId(KEY_2.topicId())
.setPartition(KEY_2.partition());
private static final ShareUpdateValue SHARE_UPDATE_VALUE = new ShareUpdateValue()
.setSnapshotEpoch(SHARE_GROUP_OFFSET_2.snapshotEpoch())
.setLeaderEpoch(SHARE_GROUP_OFFSET_2.leaderEpoch())
.setStartOffset(SHARE_GROUP_OFFSET_2.startOffset())
.setStateBatches(
SHARE_GROUP_OFFSET_2.stateBatches().stream()
.map(batch -> new ShareUpdateValue.StateBatch()
.setFirstOffset(batch.firstOffset())
.setLastOffset(batch.lastOffset())
.setDeliveryState(batch.deliveryState())
.setDeliveryCount(batch.deliveryCount()))
.toList()
);
@Override
protected CoordinatorRecordMessageFormatter formatter() {
return new ShareGroupStateMessageFormatter();
}
@Override
protected Stream<Arguments> parameters() {
return Stream.of(
Arguments.of(
MessageUtil.toVersionPrefixedByteBuffer((short) 0, SHARE_SNAPSHOT_KEY).array(),
MessageUtil.toVersionPrefixedByteBuffer((short) 0, SHARE_SNAPSHOT_VALUE).array(),
"""
{"key":{"type":0,"data":{"groupId":"gs1","topicId":"gtb2stGYRk-vWZ2zAozmoA","partition":0}},
"value":{"version":0,
"data":{"snapshotEpoch":0,
"stateEpoch":1,
"leaderEpoch":20,
"startOffset":50,
"createTimestamp": 1744279603,
"writeTimestamp": 1744279603,
"stateBatches":[{"firstOffset":100,"lastOffset":200,"deliveryState":1,"deliveryCount":10},
{"firstOffset":201,"lastOffset":210,"deliveryState":2,"deliveryCount":10}]}}}
"""
),
Arguments.of(
MessageUtil.toVersionPrefixedByteBuffer((short) 0, SHARE_SNAPSHOT_KEY).array(),
null,
"""
{"key":{"type":0,"data":{"groupId":"gs1","topicId":"gtb2stGYRk-vWZ2zAozmoA","partition":0}},"value":null}
"""
),
Arguments.of(
MessageUtil.toVersionPrefixedByteBuffer((short) 1, SHARE_UPDATE_KEY).array(),
MessageUtil.toVersionPrefixedByteBuffer((short) 0, SHARE_UPDATE_VALUE).array(),
"""
{"key":{"type":1,"data":{"groupId":"gs2","topicId":"r9Nq4xGAQf28jvu36t7gQQ","partition":0}},
"value":{"version":0,
"data":{"snapshotEpoch":1,
"leaderEpoch":25,
"startOffset":55,
"stateBatches":[{"firstOffset":100,"lastOffset":150,"deliveryState":1,"deliveryCount":12},
{"firstOffset":151,"lastOffset":200,"deliveryState":2,"deliveryCount":15}]}}}
"""
),
// wrong versions
Arguments.of(
MessageUtil.toVersionPrefixedByteBuffer((short) 10, SHARE_SNAPSHOT_KEY).array(),
MessageUtil.toVersionPrefixedByteBuffer((short) 0, SHARE_SNAPSHOT_VALUE).array(),
""
),
Arguments.of(
MessageUtil.toVersionPrefixedByteBuffer((short) 15, SHARE_UPDATE_KEY).array(),
MessageUtil.toVersionPrefixedByteBuffer((short) 0, SHARE_UPDATE_VALUE).array(),
""
)
);
}
private static Stream<Arguments> exceptions() {
return Stream.of(
// wrong types
Arguments.of(
MessageUtil.toVersionPrefixedByteBuffer((short) 0, SHARE_SNAPSHOT_KEY).array(),
MessageUtil.toVersionPrefixedByteBuffer((short) 0, SHARE_UPDATE_VALUE).array(),
new RuntimeException("""
Could not read record at offset 0 due to: \
Could not read record with version 0 from value's buffer due to: \
non-nullable field stateBatches was serialized as null.""")
),
Arguments.of(
MessageUtil.toVersionPrefixedByteBuffer((short) 1, SHARE_UPDATE_KEY).array(),
MessageUtil.toVersionPrefixedByteBuffer((short) 0, SHARE_SNAPSHOT_VALUE).array(),
new RuntimeException("""
Could not read record at offset 0 due to: \
Could not read record with version 0 from value's buffer due to: \
non-nullable field stateBatches was serialized as null.""")
)
);
}
@ParameterizedTest
@MethodSource("exceptions")
public void testShareGroupStateMessageFormatterException(
byte[] keyBuffer,
byte[] valueBuffer,
RuntimeException expectedOutput
) {
ConsumerRecord<byte[], byte[]> record = new ConsumerRecord<>(
Topic.SHARE_GROUP_STATE_TOPIC_NAME, 0, 0,
0L, TimestampType.CREATE_TIME, 0,
0, keyBuffer, valueBuffer,
new RecordHeaders(), Optional.empty());
try (MessageFormatter formatter = new ShareGroupStateMessageFormatter()) {
formatter.configure(Map.of());
ByteArrayOutputStream out = new ByteArrayOutputStream();
RuntimeException re = assertThrows(RuntimeException.class, () -> formatter.writeTo(record, new PrintStream(out)));
assertEquals(expectedOutput.getMessage(), re.getMessage());
}
}
}
| ShareGroupStateMessageFormatterTest |
java | apache__kafka | storage/src/test/java/org/apache/kafka/server/log/remote/quota/RLMQuotaManagerTest.java | {
"start": 1558,
"end": 5608
} | class ____ {
private final MockTime time = new MockTime();
private final Metrics metrics = new Metrics(new MetricConfig(), List.of(), time);
private static final QuotaType QUOTA_TYPE = QuotaType.RLM_FETCH;
private static final String DESCRIPTION = "Tracking byte rate";
@Test
public void testQuotaExceeded() {
RLMQuotaManager quotaManager = new RLMQuotaManager(
new RLMQuotaManagerConfig(50, 11, 1), metrics, QUOTA_TYPE, DESCRIPTION, time);
assertEquals(0L, quotaManager.getThrottleTimeMs());
quotaManager.record(500);
// Move clock by 1 sec, quota is violated
moveClock(1);
assertEquals(9_000L, quotaManager.getThrottleTimeMs());
// Move clock by another 8 secs, quota is still violated for the window
moveClock(8);
assertEquals(1_000L, quotaManager.getThrottleTimeMs());
// Move clock by 1 sec, quota is no more violated
moveClock(1);
assertEquals(0L, quotaManager.getThrottleTimeMs());
}
@Test
public void testQuotaUpdate() {
RLMQuotaManager quotaManager = new RLMQuotaManager(
new RLMQuotaManagerConfig(50, 11, 1), metrics, QUOTA_TYPE, DESCRIPTION, time);
assertFalse(quotaManager.getThrottleTimeMs() > 0);
quotaManager.record(51);
assertTrue(quotaManager.getThrottleTimeMs() > 0);
Map<MetricName, KafkaMetric> fetchQuotaMetrics = metrics.metrics().entrySet().stream()
.filter(entry -> entry.getKey().name().equals("byte-rate") && entry.getKey().group().equals(QUOTA_TYPE.toString()))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
Map<MetricName, KafkaMetric> nonQuotaMetrics = metrics.metrics().entrySet().stream()
.filter(entry -> !entry.getKey().name().equals("byte-rate") || !entry.getKey().group().equals(QUOTA_TYPE.toString()))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
assertEquals(1, fetchQuotaMetrics.size());
assertFalse(nonQuotaMetrics.isEmpty());
Map<MetricName, MetricConfig> configForQuotaMetricsBeforeUpdate = extractMetricConfig(fetchQuotaMetrics);
Map<MetricName, MetricConfig> configForNonQuotaMetricsBeforeUpdate = extractMetricConfig(nonQuotaMetrics);
// Update quota to 60, quota is no more violated
Quota quota60Bytes = new Quota(60, true);
quotaManager.updateQuota(quota60Bytes);
assertFalse(quotaManager.getThrottleTimeMs() > 0);
// Verify quota metrics were updated
Map<MetricName, MetricConfig> configForQuotaMetricsAfterFirstUpdate = extractMetricConfig(fetchQuotaMetrics);
assertNotEquals(configForQuotaMetricsBeforeUpdate, configForQuotaMetricsAfterFirstUpdate);
fetchQuotaMetrics.values().forEach(metric -> assertEquals(quota60Bytes, metric.config().quota()));
// Verify non quota metrics are unchanged
assertEquals(configForNonQuotaMetricsBeforeUpdate, extractMetricConfig(nonQuotaMetrics));
// Update quota to 40, quota is violated again
Quota quota40Bytes = new Quota(40, true);
quotaManager.updateQuota(quota40Bytes);
assertTrue(quotaManager.getThrottleTimeMs() > 0);
// Verify quota metrics were updated
assertNotEquals(configForQuotaMetricsAfterFirstUpdate, extractMetricConfig(fetchQuotaMetrics));
fetchQuotaMetrics.values().forEach(metric -> assertEquals(quota40Bytes, metric.config().quota()));
// Verify non quota metrics are unchanged
assertEquals(configForNonQuotaMetricsBeforeUpdate, extractMetricConfig(nonQuotaMetrics));
}
private void moveClock(int secs) {
time.setCurrentTimeMs(time.milliseconds() + secs * 1000L);
}
private Map<MetricName, MetricConfig> extractMetricConfig(Map<MetricName, KafkaMetric> metrics) {
return metrics.entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().config()));
}
} | RLMQuotaManagerTest |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/AutoConfigurationSorterTests.java | {
"start": 13732,
"end": 13897
} | class ____ to mislead the sort by names done in
// AutoConfigurationSorter class.
@AutoConfigureBefore(OrderAutoConfigureA.class)
@AutoConfigureOrder(1)
static | names |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKeyPrefix.java | {
"start": 1091,
"end": 2853
} | class ____ extends EntityRowKey implements
RowKeyPrefix<EntityRowKey> {
/**
* Creates a prefix which generates the following rowKeyPrefixes for the
* entity table:
* {@code userName!clusterId!flowName!flowRunId!AppId!entityType!}.
* @param clusterId identifying the cluster
* @param userId identifying the user
* @param flowName identifying the flow
* @param flowRunId identifying the individual run of this flow
* @param appId identifying the application
* @param entityType which entity type
* @param entityIdPrefix for entityId
* @param entityId for an entity
*/
public EntityRowKeyPrefix(String clusterId, String userId, String flowName,
Long flowRunId, String appId, String entityType, Long entityIdPrefix,
String entityId) {
super(clusterId, userId, flowName, flowRunId, appId, entityType,
entityIdPrefix, entityId);
}
/**
* Creates a prefix which generates the following rowKeyPrefixes for the
* entity table:
* {@code userName!clusterId!flowName!flowRunId!AppId!entityType!entityId}.
*
* @param clusterId identifying the cluster
* @param userId identifying the user
* @param flowName identifying the flow
* @param flowRunId identifying the individual run of this flow
* @param appId identifying the application
*/
public EntityRowKeyPrefix(String clusterId, String userId, String flowName,
Long flowRunId, String appId) {
this(clusterId, userId, flowName, flowRunId, appId, null, null, null);
}
/*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.application.
* RowKeyPrefix#getRowKeyPrefix()
*/
public byte[] getRowKeyPrefix() {
return super.getRowKey();
}
}
| EntityRowKeyPrefix |
java | apache__maven | its/core-it-support/core-it-plugins/maven-it-plugin-uses-wagon/src/main/java/org/apache/maven/plugin/coreit/DumpVersionMojo.java | {
"start": 1579,
"end": 4343
} | class ____ extends AbstractMojo {
/**
* Project base directory used for manual path alignment.
*/
@Parameter(defaultValue = "${basedir}", readonly = true)
private File basedir;
/**
* The Wagon manager used to look up the wagon of interest.
*/
@Component
private WagonManager wagonManager;
/**
* The path to the properties file used to dump the auth infos.
*/
@Parameter(property = "wagon.propertiesFile")
private File propertiesFile;
/**
* The role hint for the provider of interest.
*/
@Parameter(property = "wagon.providerHint")
private String providerHint;
/**
* The group id for the provider of interest.
*/
@Parameter(property = "wagon.providerGroupId")
private String providerGroupId;
/**
* The artifact id for the provider of interest.
*/
@Parameter(property = "wagon.providerArtifactId")
private String providerArtifactId;
/**
* Runs this mojo.
*
* @throws MojoExecutionException If the output file could not be created.
*/
public void execute() throws MojoExecutionException {
Properties wagonProperties = new Properties();
Object wagon;
try {
wagon = wagonManager.getWagon(providerHint);
String resource = "/META-INF/maven/" + providerGroupId + "/" + providerArtifactId + "/pom.properties";
InputStream is = wagon.getClass().getResourceAsStream(resource);
wagonProperties.load(is);
} catch (IOException e) {
throw new MojoExecutionException("Wagon properties could not be read: " + e.getMessage(), e);
} catch (Exception e) {
getLog().info("[MAVEN-CORE-IT-LOG] No wagon available for " + providerHint);
wagonProperties.setProperty("missing", "true");
}
if (!propertiesFile.isAbsolute()) {
propertiesFile = new File(basedir, propertiesFile.getPath());
}
getLog().info("[MAVEN-CORE-IT-LOG] Creating output file " + propertiesFile);
OutputStream out = null;
try {
propertiesFile.getParentFile().mkdirs();
out = new FileOutputStream(propertiesFile);
wagonProperties.store(out, "MAVEN-CORE-IT-LOG");
} catch (IOException e) {
throw new MojoExecutionException("Output file could not be created: " + propertiesFile, e);
} finally {
if (out != null) {
try {
out.close();
} catch (IOException e) {
// just ignore
}
}
}
getLog().info("[MAVEN-CORE-IT-LOG] Created output file " + propertiesFile);
}
}
| DumpVersionMojo |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/dialect/function/HANAFunctionsTest.java | {
"start": 934,
"end": 3269
} | class ____ {
@BeforeEach
public void setup(SessionFactoryScope scope) {
scope.inTransaction( session -> {
Product product = new Product();
product.setLength( 100 );
product.setPrice( new BigDecimal( "1.298" ) );
session.persist( product );
} );
}
@AfterEach
public void cleanupData(SessionFactoryScope scope) {
scope.dropData();
}
@Test
@JiraKey(value = "HHH-12546")
public void testLocateFunction(SessionFactoryScope scope) {
scope.inTransaction( session -> {
Product p = session.createQuery( "select p from Product p where locate('.', cast(p.price as string)) > 0", Product.class ).uniqueResult();
assertNotNull( p );
assertEquals( 100, p.getLength() );
assertEquals( BigDecimal.valueOf( 1.29 ), p.getPrice() );
} );
scope.inTransaction( session -> {
Product p = session.createQuery( "select p from Product p where locate('.', cast(p.price as string)) = 0", Product.class ).uniqueResult();
assertNull( p );
} );
scope.inTransaction( session -> {
Product p = session.createQuery( "select p from Product p where locate('.', cast(p.price as string), 3) > 0", Product.class ).uniqueResult();
assertNull( p );
} );
}
@Test
public void testSubstringFunction(SessionFactoryScope scope) {
scope.inTransaction( session -> {
Product p = session.createQuery( "select p from Product p where substring(cast(p.price as string), 1, 2) = '1.'", Product.class ).uniqueResult();
assertNotNull( p );
assertEquals( 100, p.getLength() );
assertEquals( BigDecimal.valueOf( 1.29 ), p.getPrice() );
} );
scope.inTransaction( session -> {
Product p = session.createQuery( "select p from Product p where substring(cast(p.price as string), 1, 2) = '.1'", Product.class ).uniqueResult();
assertNull( p );
} );
scope.inTransaction( session -> {
Product p = session.createQuery( "select p from Product p where substring(cast(p.price as string), 1) = '1.29'", Product.class ).uniqueResult();
assertNotNull( p );
assertEquals( 100, p.getLength() );
assertEquals( BigDecimal.valueOf( 1.29 ), p.getPrice() );
} );
scope.inTransaction( session -> {
Product p = session.createQuery( "select p from Product p where substring(cast(p.price as string), 1) = '1.'", Product.class ).uniqueResult();
assertNull( p );
} );
}
}
| HANAFunctionsTest |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/MethodCanBeStatic.java | {
"start": 10964,
"end": 11659
} | class ____ {
private final MethodTree tree;
private boolean couldPossiblyBeStatic;
private final Set<MethodSymbol> methodsReferenced;
private final Set<MethodSymbol> referencedBy = new HashSet<>();
private MethodDetails(
MethodTree tree, boolean couldPossiblyBeStatic, Set<MethodSymbol> methodsReferenced) {
this.tree = tree;
this.couldPossiblyBeStatic = couldPossiblyBeStatic;
this.methodsReferenced = new HashSet<>(methodsReferenced);
}
}
/**
* Encapsulates how we should report findings. We support reporting a finding on either every
* affected (can be static) method, or just the first one in the file.
*/
private | MethodDetails |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/inject/annotation/MyAnnotation2Aliases.java | {
"start": 377,
"end": 1950
} | interface ____ {
int[] intArray1() default {};
int[] intArray2() default {};
@AliasFor(member = "intArray1")
int[] intArray1Alias();
@AliasFor(member = "intArray2")
int[] intArray2Alias();
String[] stringArray1() default {};
String[] stringArray2() default {};
String[] stringArray3() default {};
@AliasFor(member = "stringArray1")
String[] stringArray1Alias();
@AliasFor(member = "stringArray2")
String[] stringArray2Alias();
@AliasFor(member = "stringArray3")
String[] stringArray3Alias();
MyEnum2[] myEnumArray1() default {};
MyEnum2[] myEnumArray2() default {};
MyEnum2[] myEnumArray3() default {};
@AliasFor(member = "myEnumArray1")
MyEnum2[] myEnumArray1Alias();
@AliasFor(member = "myEnumArray2")
MyEnum2[] myEnumArray2Alias();
@AliasFor(member = "myEnumArray3")
MyEnum2[] myEnumArray3Alias();
Class[] classesArray1() default {};
Class[] classesArray2() default {};
@AliasFor(member = "classesArray1")
Class[] classesArray1Alias();
@AliasFor(member = "classesArray2")
Class[] classesArray2Alias();
MyAnnotation3 ann() default @MyAnnotation3("default");
@AliasFor(member = "ann")
MyAnnotation3 annAlias();
MyAnnotation3[] annotationsArray1() default {};
MyAnnotation3[] annotationsArray2() default {};
@AliasFor(member = "annotationsArray1")
MyAnnotation3[] annotationsArray1Alias();
@AliasFor(member = "annotationsArray2")
MyAnnotation3[] annotationsArray2Alias();
}
| MyAnnotation2Aliases |
java | spring-projects__spring-framework | spring-webflux/src/main/java/org/springframework/web/reactive/handler/AbstractUrlHandlerMapping.java | {
"start": 1366,
"end": 2181
} | class ____ URL-mapped
* {@link org.springframework.web.reactive.HandlerMapping} implementations.
*
* <p>Supports direct matches, for example, a registered "/test" matches "/test", and
* various path pattern matches, for example, a registered "/t*" pattern matches
* both "/test" and "/team", "/test/*" matches all paths under "/test",
* "/test/**" matches all paths below "/test". For details, see the
* {@link org.springframework.web.util.pattern.PathPattern} javadoc.
*
* <p>Will search all path patterns to find the most specific match for the
* current request path. The most specific pattern is defined as the longest
* path pattern with the fewest captured variables and wildcards.
*
* @author Rossen Stoyanchev
* @author Juergen Hoeller
* @author Brian Clozel
* @since 5.0
*/
public abstract | for |
java | quarkusio__quarkus | extensions/vertx-http/runtime/src/main/java/io/quarkus/vertx/http/runtime/attribute/RequestProtocolAttribute.java | {
"start": 1018,
"end": 1308
} | enum ____
return switch (version) {
case HTTP_1_0 -> "HTTP/1.0";
case HTTP_1_1 -> "HTTP/1.1";
case HTTP_2 -> "HTTP/2";
default ->
// best effort to try and infer the HTTP version from
// any "unknown" | value |
java | spring-projects__spring-boot | module/spring-boot-webflux/src/test/java/org/springframework/boot/webflux/autoconfigure/actuate/web/WebFluxEndpointAccessIntegrationTests.java | {
"start": 2684,
"end": 7894
} | class ____ {
private final ReactiveWebApplicationContextRunner contextRunner = new ReactiveWebApplicationContextRunner(
AnnotationConfigReactiveWebServerApplicationContext::new)
.withConfiguration(AutoConfigurations.of(NettyReactiveWebServerAutoConfiguration.class,
HttpHandlerAutoConfiguration.class, JacksonAutoConfiguration.class, CodecsAutoConfiguration.class,
WebFluxAutoConfiguration.class, EndpointAutoConfiguration.class, WebEndpointAutoConfiguration.class,
ManagementContextAutoConfiguration.class))
.withConfiguration(AutoConfigurations.of(BeansEndpointAutoConfiguration.class))
.withUserConfiguration(CustomWebFluxEndpoint.class)
.withPropertyValues("server.port:0");
@Test
void accessIsUnrestrictedByDefault() {
this.contextRunner.withPropertyValues("management.endpoints.web.exposure.include=*").run((context) -> {
WebTestClient client = createClient(context);
assertThat(isAccessible(client, HttpMethod.GET, "beans")).isTrue();
assertThat(isAccessible(client, HttpMethod.GET, "customwebflux")).isTrue();
assertThat(isAccessible(client, HttpMethod.POST, "customwebflux")).isTrue();
});
}
@Test
void accessCanBeReadOnlyByDefault() {
this.contextRunner
.withPropertyValues("management.endpoints.web.exposure.include=*",
"management.endpoints.access.default=READ_ONLY")
.run((context) -> {
WebTestClient client = createClient(context);
assertThat(isAccessible(client, HttpMethod.GET, "beans")).isTrue();
assertThat(isAccessible(client, HttpMethod.GET, "customwebflux")).isTrue();
assertThat(isAccessible(client, HttpMethod.POST, "customwebflux")).isFalse();
});
}
@Test
void accessCanBeNoneByDefault() {
this.contextRunner
.withPropertyValues("management.endpoints.web.exposure.include=*",
"management.endpoints.access.default=NONE")
.run((context) -> {
WebTestClient client = createClient(context);
assertThat(isAccessible(client, HttpMethod.GET, "beans")).isFalse();
assertThat(isAccessible(client, HttpMethod.GET, "customwebflux")).isFalse();
assertThat(isAccessible(client, HttpMethod.POST, "customwebflux")).isFalse();
});
}
@Test
void accessForOneEndpointCanOverrideTheDefaultAccess() {
this.contextRunner
.withPropertyValues("management.endpoints.web.exposure.include=*",
"management.endpoints.access.default=NONE", "management.endpoint.customwebflux.access=UNRESTRICTED")
.run((context) -> {
WebTestClient client = createClient(context);
assertThat(isAccessible(client, HttpMethod.GET, "beans")).isFalse();
assertThat(isAccessible(client, HttpMethod.GET, "customwebflux")).isTrue();
assertThat(isAccessible(client, HttpMethod.POST, "customwebflux")).isTrue();
});
}
@Test
void accessCanBeCappedAtReadOnly() {
this.contextRunner
.withPropertyValues("management.endpoints.web.exposure.include=*",
"management.endpoints.access.default=UNRESTRICTED",
"management.endpoints.access.max-permitted=READ_ONLY")
.run((context) -> {
WebTestClient client = createClient(context);
assertThat(isAccessible(client, HttpMethod.GET, "beans")).isTrue();
assertThat(isAccessible(client, HttpMethod.GET, "customwebflux")).isTrue();
assertThat(isAccessible(client, HttpMethod.POST, "customwebflux")).isFalse();
});
}
@Test
void accessCanBeCappedAtNone() {
this.contextRunner.withPropertyValues("management.endpoints.web.exposure.include=*",
"management.endpoints.access.default=UNRESTRICTED", "management.endpoints.access.max-permitted=NONE")
.run((context) -> {
WebTestClient client = createClient(context);
assertThat(isAccessible(client, HttpMethod.GET, "beans")).isFalse();
assertThat(isAccessible(client, HttpMethod.GET, "customwebflux")).isFalse();
assertThat(isAccessible(client, HttpMethod.POST, "customwebflux")).isFalse();
});
}
private WebTestClient createClient(AssertableReactiveWebApplicationContext context) {
WebServer webServer = context.getSourceApplicationContext(ReactiveWebServerApplicationContext.class)
.getWebServer();
assertThat(webServer).isNotNull();
int port = webServer.getPort();
ExchangeStrategies exchangeStrategies = ExchangeStrategies.builder()
.codecs((configurer) -> configurer.defaultCodecs().maxInMemorySize(-1))
.build();
return WebTestClient.bindToServer()
.baseUrl("http://localhost:" + port)
.exchangeStrategies(exchangeStrategies)
.responseTimeout(Duration.ofMinutes(5))
.build();
}
private boolean isAccessible(WebTestClient client, HttpMethod method, String path) {
path = "/actuator/" + path;
EntityExchangeResult<byte[]> result = client.method(method).uri(path).exchange().expectBody().returnResult();
if (result.getStatus() == HttpStatus.OK) {
return true;
}
if (result.getStatus() == HttpStatus.NOT_FOUND || result.getStatus() == HttpStatus.METHOD_NOT_ALLOWED) {
return false;
}
throw new IllegalStateException(
String.format("Unexpected %s HTTP status for endpoint %s", result.getStatus(), path));
}
@org.springframework.boot.actuate.endpoint.web.annotation.RestControllerEndpoint(id = "customwebflux")
@SuppressWarnings("removal")
static | WebFluxEndpointAccessIntegrationTests |
java | apache__kafka | streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/TaskAssignorIntegrationTest.java | {
"start": 3235,
"end": 8681
} | class ____ extends HighAvailabilityTaskAssignor implements
LegacyTaskAssignor { }
@SuppressWarnings("unchecked")
@Test
public void shouldProperlyConfigureTheAssignor(final TestInfo testInfo) throws NoSuchFieldException, IllegalAccessException {
// This test uses reflection to check and make sure that all the expected configurations really
// make it all the way to configure the task assignor. There's no other use case for being able
// to extract all these fields, so reflection is a good choice until we find that the maintenance
// burden is too high.
//
// Also note that this is an integration test because so many components have to come together to
// ensure these configurations wind up where they belong, and any number of future code changes
// could break this change.
final String testId = safeUniqueTestName(testInfo);
final String appId = "appId_" + testId;
final String inputTopic = "input" + testId;
IntegrationTestUtils.cleanStateBeforeTest(CLUSTER, inputTopic);
// Maybe I'm paranoid, but I don't want the compiler deciding that my lambdas are equal to the identity
// function and defeating my identity check
final AtomicInteger compilerDefeatingReference = new AtomicInteger(0);
// the implementation doesn't matter, we're just going to verify the reference.
final AssignmentListener configuredAssignmentListener =
stable -> compilerDefeatingReference.incrementAndGet();
final Properties properties = mkObjectProperties(
mkMap(
mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()),
mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, appId),
mkEntry(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath()),
mkEntry(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, "5"),
mkEntry(StreamsConfig.ACCEPTABLE_RECOVERY_LAG_CONFIG, "6"),
mkEntry(StreamsConfig.MAX_WARMUP_REPLICAS_CONFIG, "7"),
mkEntry(StreamsConfig.PROBING_REBALANCE_INTERVAL_MS_CONFIG, "480000"),
mkEntry(StreamsConfig.InternalConfig.ASSIGNMENT_LISTENER, configuredAssignmentListener),
mkEntry(StreamsConfig.InternalConfig.INTERNAL_TASK_ASSIGNOR_CLASS, MyLegacyTaskAssignor.class.getName())
)
);
final StreamsBuilder builder = new StreamsBuilder();
builder.stream(inputTopic);
try (final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), properties)) {
kafkaStreams.start();
final Field threads = KafkaStreams.class.getDeclaredField("threads");
threads.setAccessible(true);
final List<StreamThread> streamThreads = (List<StreamThread>) threads.get(kafkaStreams);
final StreamThread streamThread = streamThreads.get(0);
final Field mainConsumer = StreamThread.class.getDeclaredField("mainConsumer");
mainConsumer.setAccessible(true);
final KafkaConsumer<?, ?> parentConsumer = (KafkaConsumer<?, ?>) mainConsumer.get(streamThread);
final Field delegate = KafkaConsumer.class.getDeclaredField("delegate");
delegate.setAccessible(true);
final Consumer<?, ?> consumer = (Consumer<?, ?>) delegate.get(parentConsumer);
assertThat(consumer, instanceOf(ClassicKafkaConsumer.class));
final Field assignors = ClassicKafkaConsumer.class.getDeclaredField("assignors");
assignors.setAccessible(true);
final List<ConsumerPartitionAssignor> consumerPartitionAssignors = (List<ConsumerPartitionAssignor>) assignors.get(consumer);
final StreamsPartitionAssignor streamsPartitionAssignor = (StreamsPartitionAssignor) consumerPartitionAssignors.get(0);
final Field assignmentConfigs = StreamsPartitionAssignor.class.getDeclaredField("assignmentConfigs");
assignmentConfigs.setAccessible(true);
final AssignmentConfigs configs = (AssignmentConfigs) assignmentConfigs.get(streamsPartitionAssignor);
final Field assignmentListenerField = StreamsPartitionAssignor.class.getDeclaredField("assignmentListener");
assignmentListenerField.setAccessible(true);
final AssignmentListener actualAssignmentListener = (AssignmentListener) assignmentListenerField.get(streamsPartitionAssignor);
final Field taskAssignorSupplierField = StreamsPartitionAssignor.class.getDeclaredField("legacyTaskAssignorSupplier");
taskAssignorSupplierField.setAccessible(true);
final Supplier<LegacyTaskAssignor> taskAssignorSupplier =
(Supplier<LegacyTaskAssignor>) taskAssignorSupplierField.get(streamsPartitionAssignor);
final LegacyTaskAssignor taskAssignor = taskAssignorSupplier.get();
assertThat(configs.numStandbyReplicas(), is(5));
assertThat(configs.acceptableRecoveryLag(), is(6L));
assertThat(configs.maxWarmupReplicas(), is(7));
assertThat(configs.probingRebalanceIntervalMs(), is(480000L));
assertThat(actualAssignmentListener, sameInstance(configuredAssignmentListener));
assertThat(taskAssignor, instanceOf(MyLegacyTaskAssignor.class));
}
}
}
| MyLegacyTaskAssignor |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/SourceOperator.java | {
"start": 36187,
"end": 37217
} | class ____ {
private final CompletableFuture<Void> forcedStopFuture = new CompletableFuture<>();
private final MultipleFuturesAvailabilityHelper availabilityHelper;
private SourceOperatorAvailabilityHelper() {
availabilityHelper = new MultipleFuturesAvailabilityHelper(2);
availabilityHelper.anyOf(0, forcedStopFuture);
}
public CompletableFuture<?> update(CompletableFuture<Void> sourceReaderFuture) {
if (sourceReaderFuture == AvailabilityProvider.AVAILABLE
|| sourceReaderFuture.isDone()) {
return AvailabilityProvider.AVAILABLE;
}
availabilityHelper.resetToUnAvailable();
availabilityHelper.anyOf(0, forcedStopFuture);
availabilityHelper.anyOf(1, sourceReaderFuture);
return availabilityHelper.getAvailableFuture();
}
public void forceStop() {
forcedStopFuture.complete(null);
}
}
}
| SourceOperatorAvailabilityHelper |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/consumer/AcknowledgementCommitCallback.java | {
"start": 1372,
"end": 1584
} | interface ____ the user can implement to trigger custom actions when an acknowledgement completes.
* The callback may be executed in any thread calling {@link ShareConsumer#poll(java.time.Duration)}.
*/
public | that |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java | {
"start": 5697,
"end": 25409
} | interface ____<T, U, R> {
R apply(T t, U u) throws Exception;
}
public static String getRMWebAppURLWithoutScheme(Configuration conf,
boolean isHAEnabled, int haIdIndex) {
YarnConfiguration yarnConfig = new YarnConfiguration(conf);
// set RM_ID if we have not configure it.
if (isHAEnabled) {
String rmId = yarnConfig.get(YarnConfiguration.RM_HA_ID);
if (rmId == null || rmId.isEmpty()) {
List<String> rmIds = new ArrayList<>(HAUtil.getRMHAIds(conf));
if (rmIds != null && !rmIds.isEmpty()) {
yarnConfig.set(YarnConfiguration.RM_HA_ID, rmIds.get(haIdIndex));
}
}
}
if (YarnConfiguration.useHttps(yarnConfig)) {
if (isHAEnabled) {
return HAUtil.getConfValueForRMInstance(
YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS, yarnConfig);
}
return yarnConfig.get(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS,
YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_ADDRESS);
}else {
if (isHAEnabled) {
return HAUtil.getConfValueForRMInstance(
YarnConfiguration.RM_WEBAPP_ADDRESS, yarnConfig);
}
return yarnConfig.get(YarnConfiguration.RM_WEBAPP_ADDRESS,
YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS);
}
}
public static String getRMWebAppURLWithScheme(Configuration conf,
int haIdIndex) {
return getHttpSchemePrefix(conf) + getRMWebAppURLWithoutScheme(
conf, HAUtil.isHAEnabled(conf), haIdIndex);
}
public static String getRMWebAppURLWithScheme(Configuration conf) {
return getHttpSchemePrefix(conf) + getRMWebAppURLWithoutScheme(
conf, HAUtil.isHAEnabled(conf), 0);
}
public static String getRMWebAppURLWithoutScheme(Configuration conf) {
return getRMWebAppURLWithoutScheme(conf, false, 0);
}
public static String getRouterWebAppURLWithScheme(Configuration conf) {
return getHttpSchemePrefix(conf) + getRouterWebAppURLWithoutScheme(conf);
}
public static String getRouterWebAppURLWithoutScheme(Configuration conf) {
if (YarnConfiguration.useHttps(conf)) {
return conf.get(YarnConfiguration.ROUTER_WEBAPP_HTTPS_ADDRESS,
YarnConfiguration.DEFAULT_ROUTER_WEBAPP_HTTPS_ADDRESS);
} else {
return conf.get(YarnConfiguration.ROUTER_WEBAPP_ADDRESS,
YarnConfiguration.DEFAULT_ROUTER_WEBAPP_ADDRESS);
}
}
public static String getGPGWebAppURLWithoutScheme(Configuration conf) {
if (YarnConfiguration.useHttps(conf)) {
return conf.get(YarnConfiguration.GPG_WEBAPP_HTTPS_ADDRESS,
YarnConfiguration.DEFAULT_GPG_WEBAPP_HTTPS_ADDRESS);
} else {
return conf.get(YarnConfiguration.GPG_WEBAPP_ADDRESS,
YarnConfiguration.DEFAULT_GPG_WEBAPP_ADDRESS);
}
}
public static List<String> getProxyHostsAndPortsForAmFilter(
Configuration conf) {
List<String> addrs = new ArrayList<String>();
String proxyAddr = conf.get(YarnConfiguration.PROXY_ADDRESS);
// If PROXY_ADDRESS isn't set, fallback to RM_WEBAPP(_HTTPS)_ADDRESS
// There could be multiple if using RM HA
if (proxyAddr == null || proxyAddr.isEmpty()) {
// If RM HA is enabled, try getting those addresses
if (HAUtil.isHAEnabled(conf)) {
List<String> haAddrs =
RMHAUtils.getRMHAWebappAddresses(new YarnConfiguration(conf));
for (String addr : haAddrs) {
try {
InetSocketAddress socketAddr = NetUtils.createSocketAddr(addr);
addrs.add(getResolvedAddress(socketAddr));
} catch(IllegalArgumentException e) {
// skip if can't resolve
}
}
}
// If couldn't resolve any of the addresses or not using RM HA, fallback
if (addrs.isEmpty()) {
addrs.add(getResolvedRMWebAppURLWithoutScheme(conf));
}
} else {
addrs.add(proxyAddr);
}
return addrs;
}
public static String getProxyHostAndPort(Configuration conf) {
String addr = conf.get(YarnConfiguration.PROXY_ADDRESS);
if(addr == null || addr.isEmpty()) {
addr = getResolvedRMWebAppURLWithoutScheme(conf);
}
return addr;
}
public static String getResolvedRemoteRMWebAppURLWithScheme(
Configuration conf) {
return getHttpSchemePrefix(conf)
+ getResolvedRemoteRMWebAppURLWithoutScheme(conf);
}
public static String getResolvedRMWebAppURLWithScheme(Configuration conf) {
return getHttpSchemePrefix(conf)
+ getResolvedRMWebAppURLWithoutScheme(conf);
}
public static String getResolvedRemoteRMWebAppURLWithoutScheme(
Configuration conf) {
return getResolvedRemoteRMWebAppURLWithoutScheme(conf,
YarnConfiguration.useHttps(conf) ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY);
}
public static String getResolvedRMWebAppURLWithoutScheme(Configuration conf) {
return getResolvedRMWebAppURLWithoutScheme(conf,
YarnConfiguration.useHttps(conf) ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY);
}
public static String getResolvedRMWebAppURLWithoutScheme(Configuration conf,
Policy httpPolicy) {
InetSocketAddress address = null;
if (httpPolicy == Policy.HTTPS_ONLY) {
address =
conf.getSocketAddr(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS,
YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_ADDRESS,
YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_PORT);
} else {
address =
conf.getSocketAddr(YarnConfiguration.RM_WEBAPP_ADDRESS,
YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS,
YarnConfiguration.DEFAULT_RM_WEBAPP_PORT);
}
return getResolvedAddress(address);
}
public static String getResolvedRemoteRMWebAppURLWithoutScheme(Configuration conf,
Policy httpPolicy) {
String rmId = null;
if (HAUtil.isHAEnabled(conf)) {
// If HA enabled, pick one of the RM-IDs and rely on redirect to go to
// the Active RM
rmId = (String) HAUtil.getRMHAIds(conf).toArray()[0];
}
return getResolvedRemoteRMWebAppURLWithoutScheme(conf, httpPolicy, rmId);
}
public static String getResolvedRemoteRMWebAppURLWithoutScheme(
Configuration conf, Policy httpPolicy, String rmId) {
InetSocketAddress address = null;
if (httpPolicy == Policy.HTTPS_ONLY) {
address = conf.getSocketAddr(
rmId == null ? YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS
: HAUtil.addSuffix(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS,
rmId),
YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_ADDRESS,
YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_PORT);
} else {
address = conf.getSocketAddr(
rmId == null ? YarnConfiguration.RM_WEBAPP_ADDRESS
: HAUtil.addSuffix(YarnConfiguration.RM_WEBAPP_ADDRESS, rmId),
YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS,
YarnConfiguration.DEFAULT_RM_WEBAPP_PORT);
}
return getResolvedAddress(address);
}
public static String getResolvedAddress(InetSocketAddress address) {
address = NetUtils.getConnectAddress(address);
StringBuilder sb = new StringBuilder();
InetAddress resolved = address.getAddress();
if (resolved == null || resolved.isAnyLocalAddress() ||
resolved.isLoopbackAddress()) {
String lh = address.getHostName();
try {
lh = InetAddress.getLocalHost().getCanonicalHostName();
} catch (UnknownHostException e) {
//Ignore and fallback.
}
sb.append(lh);
} else {
sb.append(address.getHostName());
}
sb.append(":").append(address.getPort());
return sb.toString();
}
/**
* Get the URL to use for binding where bind hostname can be specified
* to override the hostname in the webAppURLWithoutScheme. Port specified in the
* webAppURLWithoutScheme will be used.
*
* @param conf the configuration
* @param hostProperty bind host property name
* @param webAppURLWithoutScheme web app URL without scheme String
* @return String representing bind URL
*/
public static String getWebAppBindURL(
Configuration conf,
String hostProperty,
String webAppURLWithoutScheme) {
// If the bind-host setting exists then it overrides the hostname
// portion of the corresponding webAppURLWithoutScheme
String host = conf.getTrimmed(hostProperty);
if (host != null && !host.isEmpty()) {
if (webAppURLWithoutScheme.contains(":")) {
webAppURLWithoutScheme = host + ":" + webAppURLWithoutScheme.split(":")[1];
}
else {
throw new YarnRuntimeException("webAppURLWithoutScheme must include port specification but doesn't: " +
webAppURLWithoutScheme);
}
}
return webAppURLWithoutScheme;
}
public static String getNMWebAppURLWithoutScheme(Configuration conf) {
if (YarnConfiguration.useHttps(conf)) {
return conf.get(YarnConfiguration.NM_WEBAPP_HTTPS_ADDRESS,
YarnConfiguration.DEFAULT_NM_WEBAPP_HTTPS_ADDRESS);
} else {
return conf.get(YarnConfiguration.NM_WEBAPP_ADDRESS,
YarnConfiguration.DEFAULT_NM_WEBAPP_ADDRESS);
}
}
public static String getAHSWebAppURLWithoutScheme(Configuration conf) {
if (YarnConfiguration.useHttps(conf)) {
return conf.get(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS);
} else {
return conf.get(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_ADDRESS);
}
}
public static String getTimelineReaderWebAppURLWithoutScheme(
Configuration conf) {
if (YarnConfiguration.useHttps(conf)) {
return conf
.get(YarnConfiguration.TIMELINE_SERVICE_READER_WEBAPP_HTTPS_ADDRESS,
YarnConfiguration.
DEFAULT_TIMELINE_SERVICE_READER_WEBAPP_HTTPS_ADDRESS);
} else {
return conf.get(YarnConfiguration.TIMELINE_SERVICE_READER_WEBAPP_ADDRESS,
YarnConfiguration.
DEFAULT_TIMELINE_SERVICE_READER_WEBAPP_ADDRESS);
}
}
public static String getTimelineCollectorWebAppURLWithoutScheme(
Configuration conf) {
if (YarnConfiguration.useHttps(conf)) {
return conf.get(
YarnConfiguration.TIMELINE_SERVICE_COLLECTOR_WEBAPP_HTTPS_ADDRESS,
YarnConfiguration.
DEFAULT_TIMELINE_SERVICE_COLLECTOR_WEBAPP_HTTPS_ADDRESS);
} else {
return conf
.get(YarnConfiguration.TIMELINE_SERVICE_COLLECTOR_WEBAPP_ADDRESS,
YarnConfiguration.
DEFAULT_TIMELINE_SERVICE_COLLECTOR_WEBAPP_ADDRESS);
}
}
/**
* if url has scheme then it will be returned as it is else it will return
* url with scheme.
* @param schemePrefix eg. http:// or https://
* @param url url.
* @return url with scheme
*/
public static String getURLWithScheme(String schemePrefix, String url) {
// If scheme is provided then it will be returned as it is
if (url.indexOf("://") > 0) {
return url;
} else {
return schemePrefix + url;
}
}
public static String getRunningLogURL(
String nodeHttpAddress, String containerId, String user) {
if (nodeHttpAddress == null || nodeHttpAddress.isEmpty() ||
containerId == null || containerId.isEmpty() ||
user == null || user.isEmpty()) {
return null;
}
return PATH_JOINER.join(
nodeHttpAddress, "node", "containerlogs", containerId, user);
}
public static String getAggregatedLogURL(String serverHttpAddress,
String allocatedNode, String containerId, String entity, String user) {
if (serverHttpAddress == null || serverHttpAddress.isEmpty() ||
allocatedNode == null || allocatedNode.isEmpty() ||
containerId == null || containerId.isEmpty() ||
entity == null || entity.isEmpty() ||
user == null || user.isEmpty()) {
return null;
}
return PATH_JOINER.join(serverHttpAddress, "applicationhistory", "logs",
allocatedNode, containerId, entity, user);
}
/**
* Choose which scheme (HTTP or HTTPS) to use when generating a URL based on
* the configuration.
*
* @param conf configuration.
* @return the scheme (HTTP / HTTPS)
*/
public static String getHttpSchemePrefix(Configuration conf) {
return YarnConfiguration.useHttps(conf) ? HTTPS_PREFIX : HTTP_PREFIX;
}
/**
* Load the SSL keystore / truststore into the HttpServer builder.
* @param builder the HttpServer2.Builder to populate with ssl config
* @return HttpServer2.Builder instance (passed in as the first parameter)
* after loading SSL stores
*/
public static HttpServer2.Builder loadSslConfiguration(
HttpServer2.Builder builder) {
return loadSslConfiguration(builder, null);
}
/**
* Load the SSL keystore / truststore into the HttpServer builder.
* @param builder the HttpServer2.Builder to populate with ssl config
* @param conf the Configuration instance to load custom SSL config from
*
* @return HttpServer2.Builder instance (passed in as the first parameter)
* after loading SSL stores
*/
public static HttpServer2.Builder loadSslConfiguration(
HttpServer2.Builder builder, Configuration conf) {
Configuration sslConf = new Configuration(false);
sslConf.addResource(YarnConfiguration.YARN_SSL_SERVER_RESOURCE_DEFAULT);
if (conf != null) {
sslConf.addResource(conf);
}
boolean needsClientAuth = YarnConfiguration.YARN_SSL_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
return builder
.needsClientAuth(needsClientAuth)
.keyPassword(getPassword(sslConf, WEB_APP_KEY_PASSWORD_KEY))
.keyStore(sslConf.get("ssl.server.keystore.location"),
getPassword(sslConf, WEB_APP_KEYSTORE_PASSWORD_KEY),
sslConf.get("ssl.server.keystore.type", "jks"))
.trustStore(sslConf.get("ssl.server.truststore.location"),
getPassword(sslConf, WEB_APP_TRUSTSTORE_PASSWORD_KEY),
sslConf.get("ssl.server.truststore.type", "jks"))
.excludeCiphers(
sslConf.get("ssl.server.exclude.cipher.list"))
.includeCiphers(
sslConf.get("ssl.server.include.cipher.list"));
}
/**
* Leverages the Configuration.getPassword method to attempt to get
* passwords from the CredentialProvider API before falling back to
* clear text in config - if falling back is allowed.
* @param conf Configuration instance
* @param alias name of the credential to retreive
* @return String credential value or null
*/
static String getPassword(Configuration conf, String alias) {
String password = null;
try {
char[] passchars = conf.getPassword(alias);
if (passchars != null) {
password = new String(passchars);
}
}
catch (IOException ioe) {
password = null;
}
return password;
}
public static ApplicationId parseApplicationId(RecordFactory recordFactory,
String appId) {
if (appId == null || appId.isEmpty()) {
throw new NotFoundException("appId, " + appId + ", is empty or null");
}
ApplicationId aid = null;
try {
aid = ApplicationId.fromString(appId);
} catch (Exception e) {
throw new BadRequestException(e);
}
if (aid == null) {
throw new NotFoundException("app with id " + appId + " not found");
}
return aid;
}
public static String getSupportedLogContentType(String format) {
if (format.equalsIgnoreCase("text")) {
return "text/plain";
} else if (format.equalsIgnoreCase("octet-stream")) {
return "application/octet-stream";
}
return null;
}
public static String getDefaultLogContentType() {
return "text/plain";
}
public static List<String> listSupportedLogContentType() {
return Arrays.asList("text", "octet-stream");
}
private static String getURLEncodedQueryString(HttpServletRequest request,
String parameterToRemove) {
String queryString = request.getQueryString();
if (queryString != null && !queryString.isEmpty()) {
String reqEncoding = request.getCharacterEncoding();
if (reqEncoding == null || reqEncoding.isEmpty()) {
reqEncoding = "ISO-8859-1";
}
Charset encoding = Charset.forName(reqEncoding);
List<NameValuePair> params = URLEncodedUtils.parse(queryString,
encoding);
if (parameterToRemove != null && !parameterToRemove.isEmpty()) {
Iterator<NameValuePair> paramIterator = params.iterator();
while(paramIterator.hasNext()) {
NameValuePair current = paramIterator.next();
if (current.getName().equals(parameterToRemove)) {
paramIterator.remove();
}
}
}
return URLEncodedUtils.format(params, encoding);
}
return null;
}
/**
* Get a query string.
* @param request HttpServletRequest with the request details
* @return the query parameter string
*/
public static List<NameValuePair> getURLEncodedQueryParam(
HttpServletRequest request) {
String queryString = request.getQueryString();
if (queryString != null && !queryString.isEmpty()) {
String reqEncoding = request.getCharacterEncoding();
if (reqEncoding == null || reqEncoding.isEmpty()) {
reqEncoding = "ISO-8859-1";
}
Charset encoding = Charset.forName(reqEncoding);
List<NameValuePair> params = URLEncodedUtils.parse(queryString,
encoding);
return params;
}
return null;
}
/**
* Get a query string.
* @param request ContainerRequestContext with the request details
* @return the query parameter string
*/
public static List<NameValuePair> getURLEncodedQueryParam(
ContainerRequestContext request) {
String queryString = request.getUriInfo().getPath();
if (queryString != null && !queryString.isEmpty()) {
return URLEncodedUtils.parse(queryString, StandardCharsets.ISO_8859_1);
}
return null;
}
/**
* Get a query string which removes the passed parameter.
* @param httpRequest HttpServletRequest with the request details
* @param parameterName the query parameters must be removed
* @return the query parameter string
*/
public static String removeQueryParams(HttpServletRequest httpRequest,
String parameterName) {
return getURLEncodedQueryString(httpRequest, parameterName);
}
/**
* Get a HTML escaped uri with the query parameters of the request.
* @param request HttpServletRequest with the request details
* @return HTML escaped uri with the query paramters
*/
public static String getHtmlEscapedURIWithQueryString(
HttpServletRequest request) {
String urlEncodedQueryString = getURLEncodedQueryString(request, null);
if (urlEncodedQueryString != null) {
return HtmlQuoting.quoteHtmlChars(
request.getRequestURI() + "?" + urlEncodedQueryString);
}
return HtmlQuoting.quoteHtmlChars(request.getRequestURI());
}
/**
* Add the query params from a HttpServletRequest to the target uri passed.
* @param request HttpServletRequest with the request details
* @param targetUri the uri to which the query params must be added
* @return URL encoded string containing the targetUri + "?" + query string
*/
public static String appendQueryParams(HttpServletRequest request,
String targetUri) {
String ret = targetUri;
String urlEncodedQueryString = getURLEncodedQueryString(request, null);
if (urlEncodedQueryString != null) {
ret += "?" + urlEncodedQueryString;
}
return ret;
}
}
| ThrowingBiFunction |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/GitEndpointBuilderFactory.java | {
"start": 38725,
"end": 40573
} | interface ____ {
/**
* Git (camel-git)
* Perform operations on git repositories.
*
* Category: file
* Since: 2.16
* Maven coordinates: org.apache.camel:camel-git
*
* @return the dsl builder for the headers' name.
*/
default GitHeaderNameBuilder git() {
return GitHeaderNameBuilder.INSTANCE;
}
/**
* Git (camel-git)
* Perform operations on git repositories.
*
* Category: file
* Since: 2.16
* Maven coordinates: org.apache.camel:camel-git
*
* Syntax: <code>git:localPath</code>
*
* Path parameter: localPath (required)
* Local repository path
*
* @param path localPath
* @return the dsl builder
*/
default GitEndpointBuilder git(String path) {
return GitEndpointBuilderFactory.endpointBuilder("git", path);
}
/**
* Git (camel-git)
* Perform operations on git repositories.
*
* Category: file
* Since: 2.16
* Maven coordinates: org.apache.camel:camel-git
*
* Syntax: <code>git:localPath</code>
*
* Path parameter: localPath (required)
* Local repository path
*
* @param componentName to use a custom component name for the endpoint
* instead of the default name
* @param path localPath
* @return the dsl builder
*/
default GitEndpointBuilder git(String componentName, String path) {
return GitEndpointBuilderFactory.endpointBuilder(componentName, path);
}
}
/**
* The builder of headers' name for the Git component.
*/
public static | GitBuilders |
java | netty__netty | example/src/main/java/io/netty/example/socksproxy/RelayHandler.java | {
"start": 896,
"end": 1811
} | class ____ extends ChannelInboundHandlerAdapter {
private final Channel relayChannel;
public RelayHandler(Channel relayChannel) {
this.relayChannel = relayChannel;
}
@Override
public void channelActive(ChannelHandlerContext ctx) {
ctx.writeAndFlush(Unpooled.EMPTY_BUFFER);
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
if (relayChannel.isActive()) {
relayChannel.writeAndFlush(msg);
} else {
ReferenceCountUtil.release(msg);
}
}
@Override
public void channelInactive(ChannelHandlerContext ctx) {
if (relayChannel.isActive()) {
SocksServerUtils.closeOnFlush(relayChannel);
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
cause.printStackTrace();
ctx.close();
}
}
| RelayHandler |
java | spring-projects__spring-boot | buildSrc/src/main/java/org/springframework/boot/build/JavaConventions.java | {
"start": 5104,
"end": 16088
} | class ____ {
private static final String SOURCE_AND_TARGET_COMPATIBILITY = "17";
void apply(Project project) {
project.getPlugins().withType(JavaBasePlugin.class, (java) -> {
project.getPlugins().apply(TestFailuresPlugin.class);
project.getPlugins().apply(ArchitecturePlugin.class);
configureSpringJavaFormat(project);
configureJavaConventions(project);
configureJavadocConventions(project);
configureTestConventions(project);
configureJarManifestConventions(project);
configureDependencyManagement(project);
configureToolchain(project);
configureProhibitedDependencyChecks(project);
configureFactoriesFilesChecks(project);
configureNullability(project);
});
}
private void configureJarManifestConventions(Project project) {
TaskProvider<ExtractResources> extractLegalResources = project.getTasks()
.register("extractLegalResources", ExtractResources.class, (task) -> {
task.getDestinationDirectory().set(project.getLayout().getBuildDirectory().dir("legal"));
task.getResourceNames().set(Arrays.asList("LICENSE.txt", "NOTICE.txt"));
task.getProperties().put("version", project.getVersion().toString());
});
SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class);
Set<String> sourceJarTaskNames = sourceSets.stream()
.map(SourceSet::getSourcesJarTaskName)
.collect(Collectors.toSet());
Set<String> javadocJarTaskNames = sourceSets.stream()
.map(SourceSet::getJavadocJarTaskName)
.collect(Collectors.toSet());
project.getTasks().withType(Jar.class, (jar) -> project.afterEvaluate((evaluated) -> {
jar.metaInf((metaInf) -> metaInf.from(extractLegalResources));
jar.manifest((manifest) -> {
Map<String, Object> attributes = new TreeMap<>();
attributes.put("Automatic-Module-Name", project.getName().replace("-", "."));
attributes.put("Build-Jdk-Spec", SOURCE_AND_TARGET_COMPATIBILITY);
attributes.put("Built-By", "Spring");
attributes.put("Implementation-Title",
determineImplementationTitle(project, sourceJarTaskNames, javadocJarTaskNames, jar));
attributes.put("Implementation-Version", project.getVersion());
manifest.attributes(attributes);
});
}));
}
private String determineImplementationTitle(Project project, Set<String> sourceJarTaskNames,
Set<String> javadocJarTaskNames, Jar jar) {
if (sourceJarTaskNames.contains(jar.getName())) {
return "Source for " + project.getName();
}
if (javadocJarTaskNames.contains(jar.getName())) {
return "Javadoc for " + project.getName();
}
return project.getDescription();
}
private void configureTestConventions(Project project) {
project.getTasks().withType(Test.class, (test) -> {
test.useJUnitPlatform();
test.setMaxHeapSize("1536M");
project.getTasks().withType(Checkstyle.class, test::mustRunAfter);
project.getTasks().withType(CheckFormat.class, test::mustRunAfter);
configureTestRetries(test);
configurePredictiveTestSelection(test);
});
project.getPlugins()
.withType(JavaPlugin.class, (javaPlugin) -> project.getDependencies()
.add(JavaPlugin.TEST_RUNTIME_ONLY_CONFIGURATION_NAME, "org.junit.platform:junit-platform-launcher"));
}
private void configureTestRetries(Test test) {
TestRetryConfiguration testRetry = test.getExtensions()
.getByType(DevelocityTestConfiguration.class)
.getTestRetry();
testRetry.getFailOnPassedAfterRetry().set(false);
testRetry.getMaxRetries().set(isCi() ? 3 : 0);
}
private boolean isCi() {
return Boolean.parseBoolean(System.getenv("CI"));
}
private void configurePredictiveTestSelection(Test test) {
if (isPredictiveTestSelectionEnabled()) {
PredictiveTestSelectionConfiguration predictiveTestSelection = test.getExtensions()
.getByType(DevelocityTestConfiguration.class)
.getPredictiveTestSelection();
predictiveTestSelection.getEnabled().convention(true);
}
}
private boolean isPredictiveTestSelectionEnabled() {
return Boolean.parseBoolean(System.getenv("ENABLE_PREDICTIVE_TEST_SELECTION"));
}
private void configureJavadocConventions(Project project) {
project.getTasks().withType(Javadoc.class, (javadoc) -> {
CoreJavadocOptions options = (CoreJavadocOptions) javadoc.getOptions();
options.source("17");
options.encoding("UTF-8");
addValuelessOption(options, "Xdoclint:none");
addValuelessOption(options, "quiet");
if (!javadoc.getName().contains("aggregated")) {
addValuelessOption(options, "-no-fonts");
}
});
}
private void addValuelessOption(CoreJavadocOptions options, String option) {
options.addMultilineMultiValueOption(option).setValue(List.of(Collections.emptyList()));
}
private void configureJavaConventions(Project project) {
if (!project.hasProperty("toolchainVersion")) {
JavaPluginExtension javaPluginExtension = project.getExtensions().getByType(JavaPluginExtension.class);
javaPluginExtension.setSourceCompatibility(JavaVersion.toVersion(SOURCE_AND_TARGET_COMPATIBILITY));
javaPluginExtension.setTargetCompatibility(JavaVersion.toVersion(SOURCE_AND_TARGET_COMPATIBILITY));
}
project.getTasks().withType(JavaCompile.class, (compile) -> {
compile.getOptions().setEncoding("UTF-8");
compile.getOptions().getRelease().set(17);
List<String> args = compile.getOptions().getCompilerArgs();
if (!args.contains("-parameters")) {
args.add("-parameters");
}
args.addAll(Arrays.asList("-Werror", "-Xlint:unchecked", "-Xlint:deprecation", "-Xlint:rawtypes",
"-Xlint:varargs"));
});
}
private void configureSpringJavaFormat(Project project) {
project.getPlugins().apply(SpringJavaFormatPlugin.class);
project.getTasks().withType(Format.class, (Format) -> Format.setEncoding("UTF-8"));
project.getPlugins().apply(CheckstylePlugin.class);
CheckstyleExtension checkstyle = project.getExtensions().getByType(CheckstyleExtension.class);
String checkstyleToolVersion = (String) project.findProperty("checkstyleToolVersion");
checkstyle.setToolVersion(checkstyleToolVersion);
checkstyle.getConfigDirectory().set(project.getRootProject().file("config/checkstyle"));
String version = SpringJavaFormatPlugin.class.getPackage().getImplementationVersion();
DependencySet checkstyleDependencies = project.getConfigurations().getByName("checkstyle").getDependencies();
checkstyleDependencies
.add(project.getDependencies().create("com.puppycrawl.tools:checkstyle:" + checkstyle.getToolVersion()));
checkstyleDependencies
.add(project.getDependencies().create("io.spring.javaformat:spring-javaformat-checkstyle:" + version));
}
private void configureDependencyManagement(Project project) {
ConfigurationContainer configurations = project.getConfigurations();
Configuration dependencyManagement = configurations.create("dependencyManagement", (configuration) -> {
configuration.setVisible(false);
configuration.setCanBeConsumed(false);
configuration.setCanBeResolved(false);
});
configurations
.matching((configuration) -> (configuration.getName().endsWith("Classpath")
|| JavaPlugin.ANNOTATION_PROCESSOR_CONFIGURATION_NAME.equals(configuration.getName()))
&& (!configuration.getName().contains("dokka")))
.all((configuration) -> configuration.extendsFrom(dependencyManagement));
Dependency springBootParent = project.getDependencies()
.enforcedPlatform(project.getDependencies()
.project(Collections.singletonMap("path", ":platform:spring-boot-internal-dependencies")));
dependencyManagement.getDependencies().add(springBootParent);
project.getPlugins()
.withType(OptionalDependenciesPlugin.class,
(optionalDependencies) -> configurations
.getByName(OptionalDependenciesPlugin.OPTIONAL_CONFIGURATION_NAME)
.extendsFrom(dependencyManagement));
}
private void configureToolchain(Project project) {
project.getPlugins().apply(ToolchainPlugin.class);
}
private void configureProhibitedDependencyChecks(Project project) {
SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class);
sourceSets.all((sourceSet) -> createProhibitedDependenciesChecks(project,
sourceSet.getCompileClasspathConfigurationName(), sourceSet.getRuntimeClasspathConfigurationName()));
}
private void createProhibitedDependenciesChecks(Project project, String... configurationNames) {
ConfigurationContainer configurations = project.getConfigurations();
for (String configurationName : configurationNames) {
Configuration configuration = configurations.getByName(configurationName);
createProhibitedDependenciesCheck(configuration, project);
}
}
private void createProhibitedDependenciesCheck(Configuration classpath, Project project) {
TaskProvider<CheckClasspathForProhibitedDependencies> checkClasspathForProhibitedDependencies = project
.getTasks()
.register("check" + StringUtils.capitalize(classpath.getName() + "ForProhibitedDependencies"),
CheckClasspathForProhibitedDependencies.class, (task) -> task.setClasspath(classpath));
project.getTasks().getByName(JavaBasePlugin.CHECK_TASK_NAME).dependsOn(checkClasspathForProhibitedDependencies);
}
private void configureFactoriesFilesChecks(Project project) {
SourceSetContainer sourceSets = project.getExtensions().getByType(JavaPluginExtension.class).getSourceSets();
sourceSets.matching((sourceSet) -> SourceSet.MAIN_SOURCE_SET_NAME.equals(sourceSet.getName()))
.configureEach((main) -> {
TaskProvider<Task> check = project.getTasks().named(JavaBasePlugin.CHECK_TASK_NAME);
TaskProvider<CheckAotFactories> checkAotFactories = project.getTasks()
.register("checkAotFactories", CheckAotFactories.class, (task) -> {
task.setSource(main.getResources());
task.setClasspath(main.getOutput().getClassesDirs());
task.setDescription("Checks the META-INF/spring/aot.factories file of the main source set.");
});
check.configure((task) -> task.dependsOn(checkAotFactories));
TaskProvider<CheckSpringFactories> checkSpringFactories = project.getTasks()
.register("checkSpringFactories", CheckSpringFactories.class, (task) -> {
task.setSource(main.getResources());
task.setClasspath(main.getOutput().getClassesDirs());
task.setDescription("Checks the META-INF/spring.factories file of the main source set.");
});
check.configure((task) -> task.dependsOn(checkSpringFactories));
});
}
private void configureNullability(Project project) {
project.getPlugins().apply(NullabilityPlugin.class);
NullabilityPluginExtension extension = project.getExtensions().getByType(NullabilityPluginExtension.class);
String nullAwayVersion = (String) project.findProperty("nullAwayVersion");
if (nullAwayVersion != null) {
extension.getNullAwayVersion().set(nullAwayVersion);
}
String errorProneVersion = (String) project.findProperty("errorProneVersion");
if (errorProneVersion != null) {
extension.getErrorProneVersion().set(errorProneVersion);
}
}
}
| JavaConventions |
java | netty__netty | buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java | {
"start": 2292,
"end": 21188
} | class ____ extends AbstractByteBufAllocatorTest<PooledByteBufAllocator> {
@Override
protected PooledByteBufAllocator newAllocator(boolean preferDirect) {
return new PooledByteBufAllocator(preferDirect);
}
@Override
protected PooledByteBufAllocator newUnpooledAllocator() {
return new PooledByteBufAllocator(0, 0, 8192, 1);
}
@Override
protected long expectedUsedMemory(PooledByteBufAllocator allocator, int capacity) {
return allocator.metric().chunkSize();
}
@Override
protected long expectedUsedMemoryAfterRelease(PooledByteBufAllocator allocator, int capacity) {
// This is the case as allocations will start in qInit and chunks in qInit will never be released until
// these are moved to q000.
// See https://www.bsdcan.org/2006/papers/jemalloc.pdf
return allocator.metric().chunkSize();
}
@Override
protected void trimCaches(PooledByteBufAllocator allocator) {
allocator.trimCurrentThreadCache();
}
@Test
public void testTrim() {
PooledByteBufAllocator allocator = newAllocator(true);
// Should return false as we never allocated from this thread yet.
assertFalse(allocator.trimCurrentThreadCache());
ByteBuf directBuffer = allocator.directBuffer();
assertTrue(directBuffer.release());
// Should return true now a cache exists for the calling thread.
assertTrue(allocator.trimCurrentThreadCache());
}
@Test
public void testPooledUnsafeHeapBufferAndUnsafeDirectBuffer() {
PooledByteBufAllocator allocator = newAllocator(true);
ByteBuf directBuffer = allocator.directBuffer();
assertInstanceOf(directBuffer,
PlatformDependent.hasUnsafe() ? PooledUnsafeDirectByteBuf.class : PooledDirectByteBuf.class);
directBuffer.release();
ByteBuf heapBuffer = allocator.heapBuffer();
assertInstanceOf(heapBuffer,
PlatformDependent.hasUnsafe() ? PooledUnsafeHeapByteBuf.class : PooledHeapByteBuf.class);
heapBuffer.release();
}
@Test
public void testIOBuffersAreDirectWhenCleanerAvailableOrDirectBuffersPooled() {
PooledByteBufAllocator allocator = newAllocator(true);
ByteBuf ioBuffer = allocator.ioBuffer();
assertTrue(ioBuffer.isDirect());
ioBuffer.release();
PooledByteBufAllocator unpooledAllocator = newUnpooledAllocator();
ioBuffer = unpooledAllocator.ioBuffer();
if (PlatformDependent.canReliabilyFreeDirectBuffers()) {
assertTrue(ioBuffer.isDirect());
} else {
assertFalse(ioBuffer.isDirect());
}
ioBuffer.release();
}
@Test
public void testWithoutUseCacheForAllThreads() {
assertThat(Thread.currentThread()).isNotInstanceOf(FastThreadLocalThread.class);
PooledByteBufAllocator pool = new PooledByteBufAllocator(
/*preferDirect=*/ false,
/*nHeapArena=*/ 1,
/*nDirectArena=*/ 1,
/*pageSize=*/8192,
/*maxOrder=*/ 9,
/*tinyCacheSize=*/ 0,
/*smallCacheSize=*/ 0,
/*normalCacheSize=*/ 0,
/*useCacheForAllThreads=*/ false);
ByteBuf buf = pool.buffer(1);
buf.release();
}
@Test
public void testArenaMetricsNoCache() {
testArenaMetrics0(new PooledByteBufAllocator(true, 2, 2, 8192, 9, 0, 0, 0), 100, 0, 100, 100);
}
@Test
public void testArenaMetricsCache() {
testArenaMetrics0(new PooledByteBufAllocator(true, 2, 2, 8192, 9, 1000, 1000, 1000, true, 0), 100, 1, 1, 0);
}
@Test
public void testArenaMetricsNoCacheAlign() {
assumeTrue(PooledByteBufAllocator.isDirectMemoryCacheAlignmentSupported());
testArenaMetrics0(new PooledByteBufAllocator(true, 2, 2, 8192, 9, 0, 0, 0, true, 64), 100, 0, 100, 100);
}
@Test
public void testArenaMetricsCacheAlign() {
assumeTrue(PooledByteBufAllocator.isDirectMemoryCacheAlignmentSupported());
testArenaMetrics0(new PooledByteBufAllocator(true, 2, 2, 8192, 9, 1000, 1000, 1000, true, 64), 100, 1, 1, 0);
}
private static void testArenaMetrics0(
PooledByteBufAllocator allocator, int num, int expectedActive, int expectedAlloc, int expectedDealloc) {
for (int i = 0; i < num; i++) {
assertTrue(allocator.directBuffer().release());
assertTrue(allocator.heapBuffer().release());
}
assertArenaMetrics(allocator.metric().directArenas(), expectedActive, expectedAlloc, expectedDealloc);
assertArenaMetrics(allocator.metric().heapArenas(), expectedActive, expectedAlloc, expectedDealloc);
}
private static void assertArenaMetrics(
List<PoolArenaMetric> arenaMetrics, int expectedActive, int expectedAlloc, int expectedDealloc) {
long active = 0;
long alloc = 0;
long dealloc = 0;
for (PoolArenaMetric arena : arenaMetrics) {
active += arena.numActiveAllocations();
alloc += arena.numAllocations();
dealloc += arena.numDeallocations();
}
assertEquals(expectedActive, active);
assertEquals(expectedAlloc, alloc);
assertEquals(expectedDealloc, dealloc);
}
@Test
public void testPoolChunkListMetric() {
for (PoolArenaMetric arenaMetric: PooledByteBufAllocator.DEFAULT.metric().heapArenas()) {
assertPoolChunkListMetric(arenaMetric);
}
}
private static void assertPoolChunkListMetric(PoolArenaMetric arenaMetric) {
List<PoolChunkListMetric> lists = arenaMetric.chunkLists();
assertEquals(6, lists.size());
assertPoolChunkListMetric(lists.get(0), 1, 25);
assertPoolChunkListMetric(lists.get(1), 1, 50);
assertPoolChunkListMetric(lists.get(2), 25, 75);
assertPoolChunkListMetric(lists.get(4), 75, 100);
assertPoolChunkListMetric(lists.get(5), 100, 100);
}
private static void assertPoolChunkListMetric(PoolChunkListMetric m, int min, int max) {
assertEquals(min, m.minUsage());
assertEquals(max, m.maxUsage());
}
@Test
public void testSmallSubpageMetric() {
PooledByteBufAllocator allocator = new PooledByteBufAllocator(true, 1, 1, 8192, 9, 0, 0, 0);
ByteBuf buffer = allocator.heapBuffer(500);
try {
PoolArenaMetric metric = allocator.metric().heapArenas().get(0);
PoolSubpageMetric subpageMetric = metric.smallSubpages().get(0);
assertEquals(1, subpageMetric.maxNumElements() - subpageMetric.numAvailable());
} finally {
buffer.release();
}
}
@Test
public void testAllocNotNull() {
PooledByteBufAllocator allocator = new PooledByteBufAllocator(true, 1, 1, 8192, 9, 0, 0, 0);
// Huge allocation
testAllocNotNull(allocator, allocator.metric().chunkSize() + 1);
// Normal allocation
testAllocNotNull(allocator, 1024);
// Small allocation
testAllocNotNull(allocator, 512);
testAllocNotNull(allocator, 1);
}
private static void testAllocNotNull(PooledByteBufAllocator allocator, int capacity) {
ByteBuf buffer = allocator.heapBuffer(capacity);
assertNotNull(buffer.alloc());
assertTrue(buffer.release());
assertNotNull(buffer.alloc());
}
@Test
public void testFreePoolChunk() {
int chunkSize = 16 * 1024 * 1024;
PooledByteBufAllocator allocator = new PooledByteBufAllocator(true, 1, 0, 8192, 11, 0, 0, 0);
ByteBuf buffer = allocator.heapBuffer(chunkSize);
List<PoolArenaMetric> arenas = allocator.metric().heapArenas();
assertEquals(1, arenas.size());
List<PoolChunkListMetric> lists = arenas.get(0).chunkLists();
assertEquals(6, lists.size());
assertFalse(lists.get(0).iterator().hasNext());
assertFalse(lists.get(1).iterator().hasNext());
assertFalse(lists.get(2).iterator().hasNext());
assertFalse(lists.get(3).iterator().hasNext());
assertFalse(lists.get(4).iterator().hasNext());
// Must end up in the 6th PoolChunkList
assertTrue(lists.get(5).iterator().hasNext());
assertTrue(buffer.release());
// Should be completely removed and so all PoolChunkLists must be empty
assertFalse(lists.get(0).iterator().hasNext());
assertFalse(lists.get(1).iterator().hasNext());
assertFalse(lists.get(2).iterator().hasNext());
assertFalse(lists.get(3).iterator().hasNext());
assertFalse(lists.get(4).iterator().hasNext());
assertFalse(lists.get(5).iterator().hasNext());
}
@Test
public void testCollapse() {
int pageSize = 8192;
//no cache
ByteBufAllocator allocator = new PooledByteBufAllocator(true, 1, 1, 8192, 9, 0, 0, 0);
ByteBuf b1 = allocator.buffer(pageSize * 4);
ByteBuf b2 = allocator.buffer(pageSize * 5);
ByteBuf b3 = allocator.buffer(pageSize * 6);
b2.release();
b3.release();
ByteBuf b4 = allocator.buffer(pageSize * 10);
PooledByteBuf<ByteBuffer> b = unwrapIfNeeded(b4);
//b2 and b3 are collapsed, b4 should start at offset 4
assertEquals(4, runOffset(b.handle));
assertEquals(10, runPages(b.handle));
b1.release();
b4.release();
//all ByteBuf are collapsed, b5 should start at offset 0
ByteBuf b5 = allocator.buffer(pageSize * 20);
b = unwrapIfNeeded(b5);
assertEquals(0, runOffset(b.handle));
assertEquals(20, runPages(b.handle));
b5.release();
}
@Test
public void testAllocateSmallOffset() {
int pageSize = 8192;
ByteBufAllocator allocator = new PooledByteBufAllocator(true, 1, 1, 8192, 9, 0, 0, 0);
int size = pageSize * 5;
ByteBuf[] bufs = new ByteBuf[10];
for (int i = 0; i < 10; i++) {
bufs[i] = allocator.buffer(size);
}
for (int i = 0; i < 5; i++) {
bufs[i].release();
}
//make sure we always allocate runs with small offset
for (int i = 0; i < 5; i++) {
ByteBuf buf = allocator.buffer(size);
PooledByteBuf<ByteBuffer> unwrapedBuf = unwrapIfNeeded(buf);
assertEquals(runOffset(unwrapedBuf.handle), i * 5);
bufs[i] = buf;
}
//release at reverse order
for (int i = 10 - 1; i >= 5; i--) {
bufs[i].release();
}
for (int i = 5; i < 10; i++) {
ByteBuf buf = allocator.buffer(size);
PooledByteBuf<ByteBuffer> unwrapedBuf = unwrapIfNeeded(buf);
assertEquals(runOffset(unwrapedBuf.handle), i * 5);
bufs[i] = buf;
}
for (int i = 0; i < 10; i++) {
bufs[i].release();
}
}
@Test
@Timeout(value = 10, threadMode = Timeout.ThreadMode.SEPARATE_THREAD)
public void testThreadCacheDestroyedByThreadCleaner() throws InterruptedException {
testThreadCacheDestroyed(false);
}
@Test
@Timeout(value = 10, threadMode = Timeout.ThreadMode.SEPARATE_THREAD)
public void testThreadCacheDestroyedAfterExitRun() throws InterruptedException {
testThreadCacheDestroyed(true);
}
private static void testThreadCacheDestroyed(boolean useRunnable) throws InterruptedException {
int numArenas = 11;
final PooledByteBufAllocator allocator =
new PooledByteBufAllocator(numArenas, numArenas, 8192, 1);
final AtomicBoolean threadCachesCreated = new AtomicBoolean(true);
final Runnable task = new Runnable() {
@Override
public void run() {
ByteBuf buf = allocator.newHeapBuffer(1024, 1024);
for (int i = 0; i < buf.capacity(); i++) {
buf.writeByte(0);
}
// Make sure that thread caches are actually created,
// so that down below we are not testing for zero
// thread caches without any of them ever having been initialized.
if (allocator.metric().numThreadLocalCaches() == 0) {
threadCachesCreated.set(false);
}
buf.release();
}
};
for (int i = 0; i < numArenas; i++) {
final FastThreadLocalThread thread;
if (useRunnable) {
thread = new FastThreadLocalThread(task);
assertTrue(thread.willCleanupFastThreadLocals());
} else {
thread = new FastThreadLocalThread() {
@Override
public void run() {
task.run();
}
};
assertFalse(thread.willCleanupFastThreadLocals());
}
thread.start();
thread.join();
}
// Wait for the ThreadDeathWatcher to have destroyed all thread caches
while (allocator.metric().numThreadLocalCaches() > 0) {
// Signal we want to have a GC run to ensure we can process our ThreadCleanerReference
System.gc();
System.runFinalization();
LockSupport.parkNanos(MILLISECONDS.toNanos(100));
}
assertTrue(threadCachesCreated.get());
}
@Test
@Timeout(value = 3000, unit = MILLISECONDS)
public void testNumThreadCachesWithNoDirectArenas() throws InterruptedException {
int numHeapArenas = 1;
final PooledByteBufAllocator allocator =
new PooledByteBufAllocator(numHeapArenas, 0, 8192, 1);
ThreadCache tcache0 = createNewThreadCache(allocator, false);
assertEquals(1, allocator.metric().numThreadLocalCaches());
ThreadCache tcache1 = createNewThreadCache(allocator, false);
assertEquals(2, allocator.metric().numThreadLocalCaches());
tcache0.destroy();
assertEquals(1, allocator.metric().numThreadLocalCaches());
tcache1.destroy();
assertEquals(0, allocator.metric().numThreadLocalCaches());
}
@Test
@Timeout(value = 3000, unit = MILLISECONDS)
public void testNumThreadCachesAccountForDirectAndHeapArenas() throws InterruptedException {
int numHeapArenas = 1;
final PooledByteBufAllocator allocator =
new PooledByteBufAllocator(numHeapArenas, 0, 8192, 1);
ThreadCache tcache0 = createNewThreadCache(allocator, false);
assertEquals(1, allocator.metric().numThreadLocalCaches());
ThreadCache tcache1 = createNewThreadCache(allocator, true);
assertEquals(2, allocator.metric().numThreadLocalCaches());
tcache0.destroy();
assertEquals(1, allocator.metric().numThreadLocalCaches());
tcache1.destroy();
assertEquals(0, allocator.metric().numThreadLocalCaches());
}
@Test
@Timeout(value = 3000, unit = MILLISECONDS)
public void testThreadCacheToArenaMappings() throws InterruptedException {
int numArenas = 2;
final PooledByteBufAllocator allocator =
new PooledByteBufAllocator(numArenas, numArenas, 8192, 1);
ThreadCache tcache0 = createNewThreadCache(allocator, false);
ThreadCache tcache1 = createNewThreadCache(allocator, false);
assertEquals(2, allocator.metric().numThreadLocalCaches());
assertEquals(1, allocator.metric().heapArenas().get(0).numThreadCaches());
assertEquals(1, allocator.metric().heapArenas().get(1).numThreadCaches());
assertEquals(1, allocator.metric().directArenas().get(0).numThreadCaches());
assertEquals(1, allocator.metric().directArenas().get(0).numThreadCaches());
tcache1.destroy();
assertEquals(1, allocator.metric().numThreadLocalCaches());
assertEquals(1, allocator.metric().heapArenas().get(0).numThreadCaches());
assertEquals(0, allocator.metric().heapArenas().get(1).numThreadCaches());
assertEquals(1, allocator.metric().directArenas().get(0).numThreadCaches());
assertEquals(0, allocator.metric().directArenas().get(1).numThreadCaches());
ThreadCache tcache2 = createNewThreadCache(allocator, false);
assertEquals(2, allocator.metric().numThreadLocalCaches());
assertEquals(1, allocator.metric().heapArenas().get(0).numThreadCaches());
assertEquals(1, allocator.metric().heapArenas().get(1).numThreadCaches());
assertEquals(1, allocator.metric().directArenas().get(0).numThreadCaches());
assertEquals(1, allocator.metric().directArenas().get(1).numThreadCaches());
tcache0.destroy();
assertEquals(1, allocator.metric().numThreadLocalCaches());
tcache2.destroy();
assertEquals(0, allocator.metric().numThreadLocalCaches());
assertEquals(0, allocator.metric().heapArenas().get(0).numThreadCaches());
assertEquals(0, allocator.metric().heapArenas().get(1).numThreadCaches());
assertEquals(0, allocator.metric().directArenas().get(0).numThreadCaches());
assertEquals(0, allocator.metric().directArenas().get(1).numThreadCaches());
}
private static ThreadCache createNewThreadCache(final PooledByteBufAllocator allocator, final boolean direct)
throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CountDownLatch cacheLatch = new CountDownLatch(1);
final Thread t = new FastThreadLocalThread(new Runnable() {
@Override
public void run() {
final ByteBuf buf;
if (direct) {
buf = allocator.newDirectBuffer(1024, 1024);
} else {
buf = allocator.newHeapBuffer(1024, 1024);
}
// Countdown the latch after we allocated a buffer. At this point the cache must exists.
cacheLatch.countDown();
buf.writeZero(buf.capacity());
try {
latch.await();
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
buf.release();
FastThreadLocal.removeAll();
}
});
t.start();
// Wait until we allocated a buffer and so be sure the thread was started and the cache exists.
cacheLatch.await();
return new ThreadCache() {
@Override
public void destroy() throws InterruptedException {
latch.countDown();
t.join();
}
};
}
private | PooledByteBufAllocatorTest |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/ConcreteIterableAssert.java | {
"start": 685,
"end": 1126
} | class ____<ELEMENT> extends
FactoryBasedNavigableIterableAssert<ConcreteIterableAssert<ELEMENT>, Iterable<ELEMENT>, ELEMENT, ObjectAssert<ELEMENT>> {
public ConcreteIterableAssert(Collection<ELEMENT> actual) {
super(actual, ConcreteIterableAssert.class, ObjectAssert::new);
}
@Override
public ObjectAssert<ELEMENT> toAssert(ELEMENT value, String description) {
return new ObjectAssert<>(value);
}
}
| ConcreteIterableAssert |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/type/internal/CustomMutabilityConvertedBasicTypeImpl.java | {
"start": 410,
"end": 1282
} | class ____<J> extends ConvertedBasicTypeImpl<J> {
private final MutabilityPlan<J> mutabilityPlan;
public CustomMutabilityConvertedBasicTypeImpl(
String name,
JdbcType jdbcType,
BasicValueConverter<J, ?> converter,
MutabilityPlan<J> mutabilityPlan) {
super( name, jdbcType, converter );
this.mutabilityPlan = mutabilityPlan;
}
public CustomMutabilityConvertedBasicTypeImpl(
String name,
String description,
JdbcType jdbcType,
BasicValueConverter<J, ?> converter,
MutabilityPlan<J> mutabilityPlan) {
super( name, description, jdbcType, converter );
this.mutabilityPlan = mutabilityPlan;
}
@Override
protected MutabilityPlan<J> getMutabilityPlan() {
return mutabilityPlan;
}
@Override
public JavaType<?> getRelationalJavaType() {
return getValueConverter().getRelationalJavaType();
}
}
| CustomMutabilityConvertedBasicTypeImpl |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/store/AsyncDirectIODirectoryTests.java | {
"start": 1951,
"end": 8237
} | class ____ extends BaseDirectoryTestCase {
static {
LogConfigurator.loadLog4jPlugins();
LogConfigurator.configureESLogging(); // native access requires logging to be initialized
}
@BeforeClass
public static void checkSupported() throws IOException {
assumeTrue(
"This test required a JDK version that has support for ExtendedOpenOption.DIRECT",
AsyncDirectIOIndexInput.ExtendedOpenOption_DIRECT != null
);
// jdk supports it, let's check that the filesystem does too
Path path = createTempDir("directIOProbe");
try (Directory dir = open(path); IndexOutput out = dir.createOutput("out", IOContext.DEFAULT)) {
out.writeString("test");
} catch (IOException e) {
assumeNoException("test requires filesystem that supports Direct IO", e);
}
}
@SuppressForbidden(reason = "requires Files.getFileStore")
private static int getBlockSize(Path path) throws IOException {
return Math.toIntExact(Files.getFileStore(path).getBlockSize());
}
@Override
protected Directory getDirectory(Path path) throws IOException {
return new FsDirectoryFactory.AlwaysDirectIODirectory(open(path), 8192, 8192, randomIntBetween(0, 32));
}
public void testIndexWriteRead() throws IOException {
try (Directory dir = getDirectory(createTempDir("testDirectIODirectory"))) {
try (RandomIndexWriter iw = new RandomIndexWriter(random(), dir)) {
Document doc = new Document();
Field field = newField("field", "foo bar", TextField.TYPE_STORED);
doc.add(field);
iw.addDocument(doc);
iw.commit();
}
try (IndexReader ir = DirectoryReader.open(dir)) {
IndexSearcher s = newSearcher(ir);
assertEquals(1, s.count(new PhraseQuery("field", "foo", "bar")));
}
}
}
public void testIllegalEOFWithFileSizeMultipleOfBlockSize() throws Exception {
Path path = createTempDir("testIllegalEOF");
final int fileSize = getBlockSize(path) * 2;
try (Directory dir = getDirectory(path)) {
IndexOutput o = dir.createOutput("out", newIOContext(random()));
byte[] b = new byte[fileSize];
o.writeBytes(b, 0, fileSize);
o.close();
try (IndexInput i = dir.openInput("out", newIOContext(random()))) {
i.seek(fileSize);
// Seeking past EOF should always throw EOFException
expectThrows(EOFException.class, () -> i.seek(fileSize + RandomizedTest.randomIntBetween(1, 2048)));
// Reading immediately after seeking past EOF should throw EOFException
expectThrows(EOFException.class, () -> i.readByte());
}
}
}
public void testReadPastEOFShouldThrowEOFExceptionWithEmptyFile() throws Exception {
// fileSize needs to be 0 to test this condition. Do not randomize.
final int fileSize = 0;
try (Directory dir = getDirectory(createTempDir("testReadPastEOF"))) {
try (IndexOutput o = dir.createOutput("out", newIOContext(random()))) {
o.writeBytes(new byte[fileSize], 0, fileSize);
}
try (IndexInput i = dir.openInput("out", newIOContext(random()))) {
i.seek(fileSize);
expectThrows(EOFException.class, () -> i.readByte());
expectThrows(EOFException.class, () -> i.readBytes(new byte[1], 0, 1));
}
try (IndexInput i = dir.openInput("out", newIOContext(random()))) {
expectThrows(EOFException.class, () -> i.seek(fileSize + RandomizedTest.randomIntBetween(1, 2048)));
expectThrows(EOFException.class, () -> i.readByte());
expectThrows(EOFException.class, () -> i.readBytes(new byte[1], 0, 1));
}
try (IndexInput i = dir.openInput("out", newIOContext(random()))) {
expectThrows(EOFException.class, () -> i.readByte());
}
try (IndexInput i = dir.openInput("out", newIOContext(random()))) {
expectThrows(EOFException.class, () -> i.readBytes(new byte[1], 0, 1));
}
}
}
public void testSeekPastEOFAndRead() throws Exception {
try (Directory dir = getDirectory(createTempDir("testSeekPastEOF"))) {
final int len = random().nextInt(2048);
try (IndexOutput o = dir.createOutput("out", newIOContext(random()))) {
byte[] b = new byte[len];
o.writeBytes(b, 0, len);
}
try (IndexInput i = dir.openInput("out", newIOContext(random()))) {
// Seeking past EOF should always throw EOFException
expectThrows(EOFException.class, () -> i.seek(len + RandomizedTest.randomIntBetween(1, 2048)));
// Reading immediately after seeking past EOF should throw EOFException
expectThrows(EOFException.class, () -> i.readByte());
}
}
}
// Ping-pong seeks should be really fast, since the position should be within buffer.
// The test should complete within sub-second times, not minutes.
public void testSeekSmall() throws IOException {
Path tmpDir = createTempDir("testSeekSmall");
try (Directory dir = getDirectory(tmpDir)) {
int len = atLeast(100);
try (IndexOutput o = dir.createOutput("out", newIOContext(random()))) {
byte[] b = new byte[len];
for (int i = 0; i < len; i++) {
b[i] = (byte) i;
}
o.writeBytes(b, 0, len);
}
try (IndexInput in = dir.openInput("out", newIOContext(random()))) {
for (int i = 0; i < 100_000; i++) {
in.seek(2);
assertEquals(2, in.readByte());
in.seek(1);
assertEquals(1, in.readByte());
in.seek(0);
assertEquals(0, in.readByte());
}
}
}
}
}
| AsyncDirectIODirectoryTests |
java | elastic__elasticsearch | x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/querydsl/query/WildcardQuery.java | {
"start": 511,
"end": 2339
} | class ____ extends Query {
private final String field, query;
private final boolean caseInsensitive;
private final boolean forceStringMatch;
public WildcardQuery(Source source, String field, String query, boolean caseInsensitive, boolean forceStringMatch) {
super(source);
this.field = field;
this.query = query;
this.caseInsensitive = caseInsensitive;
this.forceStringMatch = forceStringMatch;
}
public String field() {
return field;
}
public String query() {
return query;
}
public Boolean caseInsensitive() {
return caseInsensitive;
}
@Override
protected QueryBuilder asBuilder() {
WildcardQueryBuilder wb = new WildcardQueryBuilder(field, query, forceStringMatch);
// ES does not allow case_insensitive to be set to "false", it should be either "true" or not specified
return caseInsensitive == false ? wb : wb.caseInsensitive(caseInsensitive);
}
@Override
public int hashCode() {
return Objects.hash(field, query, caseInsensitive, forceStringMatch);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
WildcardQuery other = (WildcardQuery) obj;
return Objects.equals(field, other.field)
&& Objects.equals(query, other.query)
&& Objects.equals(caseInsensitive, other.caseInsensitive)
&& Objects.equals(forceStringMatch, other.forceStringMatch);
}
@Override
protected String innerToString() {
return field + ":" + query;
}
@Override
public boolean containsPlan() {
return false;
}
}
| WildcardQuery |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Issue784.java | {
"start": 152,
"end": 402
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
JSON.parse("[{\"args\":[\"150\",\"change\",{\"timeStamp\":1471595047319,\"value\":\"\"},{\"attrs\":{\"value\":\"\"}}],\"method\":\"fireEvent\"}]");
}
}
| Issue784 |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/Aws2KinesisComponentBuilderFactory.java | {
"start": 1910,
"end": 25417
} | interface ____ extends ComponentBuilder<Kinesis2Component> {
/**
* This option will set the CBOR_ENABLED property during the execution.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param cborEnabled the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder cborEnabled(boolean cborEnabled) {
doSetProperty("cborEnabled", cborEnabled);
return this;
}
/**
* Component configuration.
*
* The option is a:
* <code>org.apache.camel.component.aws2.kinesis.Kinesis2Configuration</code> type.
*
* Group: common
*
* @param configuration the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder configuration(org.apache.camel.component.aws2.kinesis.Kinesis2Configuration configuration) {
doSetProperty("configuration", configuration);
return this;
}
/**
* Set the need for overriding the endpoint. This option needs to be
* used in combination with uriEndpointOverride option.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param overrideEndpoint the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder overrideEndpoint(boolean overrideEndpoint) {
doSetProperty("overrideEndpoint", overrideEndpoint);
return this;
}
/**
* The region in which Kinesis Firehose client needs to work. When using
* this parameter, the configuration will expect the lowercase name of
* the region (for example ap-east-1) You'll need to use the name
* Region.EU_WEST_1.id().
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param region the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder region(java.lang.String region) {
doSetProperty("region", region);
return this;
}
/**
* Set the overriding uri endpoint. This option needs to be used in
* combination with overrideEndpoint option.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param uriEndpointOverride the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder uriEndpointOverride(java.lang.String uriEndpointOverride) {
doSetProperty("uriEndpointOverride", uriEndpointOverride);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Defines where in the Kinesis stream to start getting records.
*
* The option is a:
* <code>software.amazon.awssdk.services.kinesis.model.ShardIteratorType</code> type.
*
* Default: TRIM_HORIZON
* Group: consumer
*
* @param iteratorType the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder iteratorType(software.amazon.awssdk.services.kinesis.model.ShardIteratorType iteratorType) {
doSetProperty("iteratorType", iteratorType);
return this;
}
/**
* Maximum number of records that will be fetched in each poll.
*
* The option is a: <code>int</code> type.
*
* Default: 1
* Group: consumer
*
* @param maxResultsPerRequest the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder maxResultsPerRequest(int maxResultsPerRequest) {
doSetProperty("maxResultsPerRequest", maxResultsPerRequest);
return this;
}
/**
* The message timestamp to start polling from. Required if iteratorType
* is set to AT_TIMESTAMP.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param messageTimestamp the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder messageTimestamp(java.lang.String messageTimestamp) {
doSetProperty("messageTimestamp", messageTimestamp);
return this;
}
/**
* The sequence number to start polling from. Required if iteratorType
* is set to AFTER_SEQUENCE_NUMBER or AT_SEQUENCE_NUMBER.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param sequenceNumber the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder sequenceNumber(java.lang.String sequenceNumber) {
doSetProperty("sequenceNumber", sequenceNumber);
return this;
}
/**
* Define what will be the behavior in case of shard closed. Possible
* value are ignore, silent and fail. In case of ignore a WARN message
* will be logged once and the consumer will not process new messages
* until restarted,in case of silent there will be no logging and the
* consumer will not process new messages until restarted,in case of
* fail a ReachedClosedStateException will be thrown.
*
* The option is a:
* <code>org.apache.camel.component.aws2.kinesis.Kinesis2ShardClosedStrategyEnum</code> type.
*
* Default: ignore
* Group: consumer
*
* @param shardClosed the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder shardClosed(org.apache.camel.component.aws2.kinesis.Kinesis2ShardClosedStrategyEnum shardClosed) {
doSetProperty("shardClosed", shardClosed);
return this;
}
/**
* Defines which shardId in the Kinesis stream to get records from.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param shardId the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder shardId(java.lang.String shardId) {
doSetProperty("shardId", shardId);
return this;
}
/**
* The interval in milliseconds to wait between shard polling.
*
* The option is a: <code>long</code> type.
*
* Default: 10000
* Group: consumer (advanced)
*
* @param shardMonitorInterval the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder shardMonitorInterval(long shardMonitorInterval) {
doSetProperty("shardMonitorInterval", shardMonitorInterval);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Supply a pre-constructed Amazon Kinesis async client to use for the
* KCL Consumer.
*
* The option is a:
* <code>software.amazon.awssdk.services.kinesis.KinesisAsyncClient</code> type.
*
* Group: advanced
*
* @param amazonKinesisAsyncClient the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder amazonKinesisAsyncClient(software.amazon.awssdk.services.kinesis.KinesisAsyncClient amazonKinesisAsyncClient) {
doSetProperty("amazonKinesisAsyncClient", amazonKinesisAsyncClient);
return this;
}
/**
* Amazon Kinesis client to use for all requests for this endpoint.
*
* The option is a:
* <code>software.amazon.awssdk.services.kinesis.KinesisClient</code> type.
*
* Group: advanced
*
* @param amazonKinesisClient the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder amazonKinesisClient(software.amazon.awssdk.services.kinesis.KinesisClient amazonKinesisClient) {
doSetProperty("amazonKinesisClient", amazonKinesisClient);
return this;
}
/**
* Name of the KCL application. This defaults to the stream name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: advanced
*
* @param applicationName the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder applicationName(java.lang.String applicationName) {
doSetProperty("applicationName", applicationName);
return this;
}
/**
* If we want to a KinesisAsyncClient instance set it to true.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param asyncClient the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder asyncClient(boolean asyncClient) {
doSetProperty("asyncClient", asyncClient);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
/**
* If we want to a KCL Consumer, we can pass an instance of
* CloudWatchAsyncClient.
*
* The option is a:
* <code>software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient</code> type.
*
* Group: advanced
*
* @param cloudWatchAsyncClient the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder cloudWatchAsyncClient(software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient cloudWatchAsyncClient) {
doSetProperty("cloudWatchAsyncClient", cloudWatchAsyncClient);
return this;
}
/**
* If we want to a KCL Consumer, we can pass an instance of
* DynamoDbAsyncClient.
*
* The option is a:
* <code>software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient</code> type.
*
* Group: advanced
*
* @param dynamoDbAsyncClient the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder dynamoDbAsyncClient(software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient dynamoDbAsyncClient) {
doSetProperty("dynamoDbAsyncClient", dynamoDbAsyncClient);
return this;
}
/**
* If we want to use a KCL Consumer and disable the CloudWatch Metrics
* Export.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param kclDisableCloudwatchMetricsExport the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder kclDisableCloudwatchMetricsExport(boolean kclDisableCloudwatchMetricsExport) {
doSetProperty("kclDisableCloudwatchMetricsExport", kclDisableCloudwatchMetricsExport);
return this;
}
/**
* If we want to a KCL Consumer set it to true.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param useKclConsumers the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder useKclConsumers(boolean useKclConsumers) {
doSetProperty("useKclConsumers", useKclConsumers);
return this;
}
/**
* Used for enabling or disabling all consumer based health checks from
* this component.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: health
*
* @param healthCheckConsumerEnabled the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder healthCheckConsumerEnabled(boolean healthCheckConsumerEnabled) {
doSetProperty("healthCheckConsumerEnabled", healthCheckConsumerEnabled);
return this;
}
/**
* Used for enabling or disabling all producer based health checks from
* this component. Notice: Camel has by default disabled all producer
* based health-checks. You can turn on producer checks globally by
* setting camel.health.producersEnabled=true.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: health
*
* @param healthCheckProducerEnabled the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder healthCheckProducerEnabled(boolean healthCheckProducerEnabled) {
doSetProperty("healthCheckProducerEnabled", healthCheckProducerEnabled);
return this;
}
/**
* To define a proxy host when instantiating the Kinesis client.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: proxy
*
* @param proxyHost the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder proxyHost(java.lang.String proxyHost) {
doSetProperty("proxyHost", proxyHost);
return this;
}
/**
* To define a proxy port when instantiating the Kinesis client.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: proxy
*
* @param proxyPort the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder proxyPort(java.lang.Integer proxyPort) {
doSetProperty("proxyPort", proxyPort);
return this;
}
/**
* To define a proxy protocol when instantiating the Kinesis client.
*
* The option is a:
* <code>software.amazon.awssdk.core.Protocol</code> type.
*
* Default: HTTPS
* Group: proxy
*
* @param proxyProtocol the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder proxyProtocol(software.amazon.awssdk.core.Protocol proxyProtocol) {
doSetProperty("proxyProtocol", proxyProtocol);
return this;
}
/**
* Amazon AWS Access Key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param accessKey the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder accessKey(java.lang.String accessKey) {
doSetProperty("accessKey", accessKey);
return this;
}
/**
* If using a profile credentials provider this parameter will set the
* profile name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param profileCredentialsName the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder profileCredentialsName(java.lang.String profileCredentialsName) {
doSetProperty("profileCredentialsName", profileCredentialsName);
return this;
}
/**
* Amazon AWS Secret Key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param secretKey the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder secretKey(java.lang.String secretKey) {
doSetProperty("secretKey", secretKey);
return this;
}
/**
* Amazon AWS Session Token used when the user needs to assume a IAM
* role.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param sessionToken the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder sessionToken(java.lang.String sessionToken) {
doSetProperty("sessionToken", sessionToken);
return this;
}
/**
* If we want to trust all certificates in case of overriding the
* endpoint.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param trustAllCertificates the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder trustAllCertificates(boolean trustAllCertificates) {
doSetProperty("trustAllCertificates", trustAllCertificates);
return this;
}
/**
* Set whether the Kinesis client should expect to load credentials
* through a default credentials provider or to expect static
* credentials to be passed in.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useDefaultCredentialsProvider the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder useDefaultCredentialsProvider(boolean useDefaultCredentialsProvider) {
doSetProperty("useDefaultCredentialsProvider", useDefaultCredentialsProvider);
return this;
}
/**
* Set whether the Kinesis client should expect to load credentials
* through a profile credentials provider.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useProfileCredentialsProvider the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder useProfileCredentialsProvider(boolean useProfileCredentialsProvider) {
doSetProperty("useProfileCredentialsProvider", useProfileCredentialsProvider);
return this;
}
/**
* Set whether the Kinesis client should expect to use Session
* Credentials. This is useful in situation in which the user needs to
* assume a IAM role for doing operations in Kinesis.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useSessionCredentials the value to set
* @return the dsl builder
*/
default Aws2KinesisComponentBuilder useSessionCredentials(boolean useSessionCredentials) {
doSetProperty("useSessionCredentials", useSessionCredentials);
return this;
}
}
| Aws2KinesisComponentBuilder |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/type/DefaultArgumentValue.java | {
"start": 1055,
"end": 3165
} | class ____<V> implements ArgumentValue<V> {
private final Argument<V> argument;
private final V value;
/**
* @param argument The argument
* @param value The value
*/
DefaultArgumentValue(Argument<V> argument, V value) {
this.argument = argument;
this.value = value;
}
@Override
public String getName() {
return argument.getName();
}
@Override
public Class<V> getType() {
return argument.getType();
}
@Override
public Optional<Argument<?>> getFirstTypeVariable() {
return argument.getFirstTypeVariable();
}
@Override
public Argument[] getTypeParameters() {
return argument.getTypeParameters();
}
@Override
public Map<String, Argument<?>> getTypeVariables() {
return argument.getTypeVariables();
}
@Override
public V getValue() {
return value;
}
@Override
public <T extends Annotation> T synthesize(Class<T> annotationClass) {
return argument.synthesize(annotationClass);
}
@Nullable
@Override
public <T extends Annotation> T synthesize(@NonNull Class<T> annotationClass, @NonNull String sourceAnnotation) {
return argument.synthesize(annotationClass, sourceAnnotation);
}
@Nullable
@Override
public <T extends Annotation> T synthesizeDeclared(@NonNull Class<T> annotationClass, @NonNull String sourceAnnotation) {
return argument.synthesizeDeclared(annotationClass, sourceAnnotation);
}
@Override
public Annotation[] synthesizeAll() {
return argument.synthesizeAll();
}
@Override
public Annotation[] synthesizeDeclared() {
return argument.synthesizeDeclared();
}
@Override
public boolean equalsType(@Nullable Argument<?> o) {
return argument.equalsType(o);
}
@Override
public int typeHashCode() {
return argument.typeHashCode();
}
@Override
public AnnotationMetadata getAnnotationMetadata() {
return argument.getAnnotationMetadata();
}
}
| DefaultArgumentValue |
java | apache__spark | sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/AuthenticationProviderFactory.java | {
"start": 905,
"end": 1012
} | class ____ select a {@link PasswdAuthenticationProvider} for a given {@code AuthMethod}.
*/
public final | helps |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/builder/simple/staticfactorymethod/SimpleImmutablePersonWithStaticFactoryMethodBuilder.java | {
"start": 274,
"end": 1298
} | class ____ {
private final String name;
private final int age;
private final String job;
private final String city;
private final String address;
private final List<String> children;
SimpleImmutablePersonWithStaticFactoryMethodBuilder(Builder builder) {
this.name = builder.name;
this.age = builder.age;
this.job = builder.job;
this.city = builder.city;
this.address = builder.address;
this.children = new ArrayList<>( builder.children );
}
public static Builder builder() {
return new Builder();
}
public int getAge() {
return age;
}
public String getName() {
return name;
}
public String getJob() {
return job;
}
public String getCity() {
return city;
}
public String getAddress() {
return address;
}
public List<String> getChildren() {
return children;
}
public static | SimpleImmutablePersonWithStaticFactoryMethodBuilder |
java | google__guice | core/test/com/google/inject/spi/InjectionPointTest.java | {
"start": 13530,
"end": 14224
} | class ____<T> {
@Inject Set<T> setOfTees;
@Inject
public ParameterizedInjections(Map<T, T> map) {}
}
public void testSignature() throws Exception {
Signature fooA = new Signature(Foo.class.getDeclaredMethod("a", String.class, int.class));
Signature fooB = new Signature(Foo.class.getDeclaredMethod("b"));
Signature barA = new Signature(Bar.class.getDeclaredMethod("a", String.class, int.class));
Signature barB = new Signature(Bar.class.getDeclaredMethod("b"));
assertEquals(fooA.hashCode(), barA.hashCode());
assertEquals(fooB.hashCode(), barB.hashCode());
assertEquals(fooA, barA);
assertEquals(fooB, barB);
}
static | ParameterizedInjections |
java | google__error-prone | core/src/main/java/com/google/errorprone/refaster/UAnyOf.java | {
"start": 1074,
"end": 2350
} | class ____ extends UExpression {
public static UAnyOf create(UExpression... expressions) {
return create(ImmutableList.copyOf(expressions));
}
public static UAnyOf create(Iterable<? extends UExpression> expressions) {
return new AutoValue_UAnyOf(ImmutableList.copyOf(expressions));
}
abstract ImmutableList<UExpression> expressions();
@Override
public UExpression negate() {
ImmutableList.Builder<UExpression> negations = ImmutableList.builder();
for (UExpression expression : expressions()) {
negations.add(expression.negate());
}
return create(negations.build());
}
@Override
protected Choice<Unifier> defaultAction(Tree tree, Unifier unifier) {
return Choice.from(expressions())
.flatMap(
(UExpression expression) ->
expression.unify(ASTHelpers.stripParentheses(tree), unifier.fork()));
}
@Override
public JCExpression inline(Inliner inliner) throws CouldNotResolveImportException {
throw new UnsupportedOperationException("anyOf should not appear in an @AfterTemplate");
}
@Override
public <R, D> R accept(TreeVisitor<R, D> visitor, D data) {
return visitor.visitOther(this, data);
}
@Override
public Kind getKind() {
return Kind.OTHER;
}
}
| UAnyOf |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/config/packages/ConfigEntityPUAssigmentUsingInterfaceTest.java | {
"start": 822,
"end": 2244
} | class ____ {
private static final Formatter LOG_FORMATTER = new PatternFormatter("%s");
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(EntityImplementingInterface.class, INamedEntity.class))
// In a real-world scenario, this would be used only if there are multiple PUs,
// but this is simpler and enough to reproduce the issue.
.overrideConfigKey("quarkus.hibernate-orm.packages", EntityImplementingInterface.class.getPackageName())
.setLogRecordPredicate(record -> record.getLevel().intValue() >= Level.WARNING.intValue())
// We don't expect any warning, in particular not:
// "Could not find a suitable persistence unit for model classes:"
.assertLogRecords(records -> assertThat(records).extracting(LOG_FORMATTER::format).isEmpty());
@Inject
Session session;
@Test
@ActivateRequestContext
void smoke() {
// We just want to check the lack of warnings... but let's at least check the entity works correctly.
assertThatCode(() -> session.createSelectionQuery("select count(*) from EntityImplementingInterface", Long.class))
.doesNotThrowAnyException();
}
@Entity(name = "EntityImplementingInterface")
public static | ConfigEntityPUAssigmentUsingInterfaceTest |
java | apache__avro | lang/java/trevni/core/src/main/java/org/apache/trevni/BZip2Codec.java | {
"start": 1138,
"end": 2617
} | class ____ extends Codec {
private ByteArrayOutputStream outputBuffer;
public static final int DEFAULT_BUFFER_SIZE = 64 * 1024;
@Override
ByteBuffer compress(ByteBuffer uncompressedData) throws IOException {
ByteArrayOutputStream baos = getOutputBuffer(uncompressedData.remaining());
try (BZip2CompressorOutputStream outputStream = new BZip2CompressorOutputStream(baos)) {
outputStream.write(uncompressedData.array(), computeOffset(uncompressedData), uncompressedData.remaining());
}
return ByteBuffer.wrap(baos.toByteArray());
}
@Override
ByteBuffer decompress(ByteBuffer compressedData) throws IOException {
ByteArrayInputStream bais = new ByteArrayInputStream(compressedData.array(), computeOffset(compressedData),
compressedData.remaining());
try (BZip2CompressorInputStream inputStream = new BZip2CompressorInputStream(bais)) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
byte[] buffer = new byte[DEFAULT_BUFFER_SIZE];
int readCount = -1;
while ((readCount = inputStream.read(buffer, compressedData.position(), buffer.length)) > 0) {
baos.write(buffer, 0, readCount);
}
return ByteBuffer.wrap(baos.toByteArray());
}
}
private ByteArrayOutputStream getOutputBuffer(int suggestedLength) {
if (null == outputBuffer)
outputBuffer = new ByteArrayOutputStream(suggestedLength);
outputBuffer.reset();
return outputBuffer;
}
}
| BZip2Codec |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/type/MetaType.java | {
"start": 647,
"end": 4561
} | class ____ extends AbstractType {
public static final String[] REGISTRATION_KEYS = ArrayHelper.EMPTY_STRING_ARRAY;
private final Type valueType;
private final ImplicitDiscriminatorStrategy implicitValueStrategy;
private final Map<Object,String> discriminatorValuesToEntityNameMap;
private final Map<String,Object> entityNameToDiscriminatorValueMap;
public MetaType(
Type valueType,
ImplicitDiscriminatorStrategy implicitValueStrategy,
Map<Object,String> explicitValueMappings) {
this.valueType = valueType;
this.implicitValueStrategy = implicitValueStrategy;
if ( explicitValueMappings == null || explicitValueMappings.isEmpty() ) {
discriminatorValuesToEntityNameMap = new HashMap<>();
entityNameToDiscriminatorValueMap = new HashMap<>();
}
else {
discriminatorValuesToEntityNameMap = explicitValueMappings;
entityNameToDiscriminatorValueMap = new HashMap<>();
for ( var entry : discriminatorValuesToEntityNameMap.entrySet() ) {
entityNameToDiscriminatorValueMap.put( entry.getValue(), entry.getKey() );
}
}
}
public Type getBaseType() {
return valueType;
}
public ImplicitDiscriminatorStrategy getImplicitValueStrategy() {
return implicitValueStrategy;
}
public String[] getRegistrationKeys() {
return REGISTRATION_KEYS;
}
public Map<Object, String> getDiscriminatorValuesToEntityNameMap() {
return discriminatorValuesToEntityNameMap;
}
public Map<String,Object> getEntityNameToDiscriminatorValueMap(){
return entityNameToDiscriminatorValueMap;
}
public int[] getSqlTypeCodes(MappingContext mappingContext) throws MappingException {
return valueType.getSqlTypeCodes( mappingContext );
}
@Override
public int getColumnSpan(MappingContext mapping) throws MappingException {
return valueType.getColumnSpan(mapping);
}
@Override
public Class<?> getReturnedClass() {
return String.class;
}
@Override
public int compare(Object x, Object y, SessionFactoryImplementor sessionFactory) {
return compare( x, y );
}
@Override
public void nullSafeSet(
PreparedStatement st,
Object value,
int index,
SharedSessionContractImplementor session) throws HibernateException, SQLException {
throw new UnsupportedOperationException();
// baseType.nullSafeSet(st, value==null ? null : entityNameToDiscriminatorValueMap.get(value), index, session);
}
@Override
public void nullSafeSet(
PreparedStatement st,
Object value,
int index,
boolean[] settable,
SharedSessionContractImplementor session) throws HibernateException, SQLException {
if ( settable[0] ) {
nullSafeSet(st, value, index, session);
}
}
@Override
public String toLoggableString(Object value, SessionFactoryImplementor factory) throws HibernateException {
return toXMLString(value, factory);
}
public String toXMLString(Object value, SessionFactoryImplementor factory) throws HibernateException {
return (String) value; //value is the entity name
}
public Object fromXMLString(String xml, MappingContext mappingContext) throws HibernateException {
return xml; //xml is the entity name
}
@Override
public String getName() {
return valueType.getName(); //TODO!
}
@Override
public Object deepCopy(Object value, SessionFactoryImplementor factory) throws HibernateException {
return value;
}
@Override
public Object replace(
Object original,
Object target,
SharedSessionContractImplementor session,
Object owner,
Map<Object, Object> copyCache) {
return original;
}
@Override
public boolean isMutable() {
return false;
}
@Override
public boolean[] toColumnNullness(Object value, MappingContext mapping) {
throw new UnsupportedOperationException();
}
@Override
public boolean isDirty(Object old, Object current, boolean[] checkable, SharedSessionContractImplementor session) throws HibernateException {
return checkable[0] && isDirty(old, current, session);
}
}
| MetaType |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/appender/nosql/NoSqlProvider.java | {
"start": 889,
"end": 1104
} | class ____ plugins for configuring the {@link NoSqlAppender} with the proper provider
* (MongoDB, etc.).
*
* @param <C> Specifies which implementation of {@link NoSqlConnection} this provider provides.
*/
public | are |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/HttpsComponentBuilderFactory.java | {
"start": 1869,
"end": 31718
} | interface ____ extends ComponentBuilder<HttpComponent> {
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* To enable logging HTTP request and response. You can use a custom
* LoggingHttpActivityListener as httpActivityListener to control
* logging options.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param logHttpActivity the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder logHttpActivity(boolean logHttpActivity) {
doSetProperty("logHttpActivity", logHttpActivity);
return this;
}
/**
* Whether to skip Camel control headers (CamelHttp... headers) to
* influence this endpoint. Control headers from previous HTTP
* components can influence how this Camel component behaves such as
* CamelHttpPath, CamelHttpQuery, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param skipControlHeaders the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder skipControlHeaders(boolean skipControlHeaders) {
doSetProperty("skipControlHeaders", skipControlHeaders);
return this;
}
/**
* Whether to skip mapping all the Camel headers as HTTP request
* headers. This is useful when you know that calling the HTTP service
* should not include any custom headers.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param skipRequestHeaders the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder skipRequestHeaders(boolean skipRequestHeaders) {
doSetProperty("skipRequestHeaders", skipRequestHeaders);
return this;
}
/**
* Whether to skip mapping all the HTTP response headers to Camel
* headers.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param skipResponseHeaders the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder skipResponseHeaders(boolean skipResponseHeaders) {
doSetProperty("skipResponseHeaders", skipResponseHeaders);
return this;
}
/**
* Whether the Content-Type header should automatic include charset for
* string based content.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: producer (advanced)
*
* @param contentTypeCharsetEnabled the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder contentTypeCharsetEnabled(boolean contentTypeCharsetEnabled) {
doSetProperty("contentTypeCharsetEnabled", contentTypeCharsetEnabled);
return this;
}
/**
* To use a custom org.apache.hc.client5.http.cookie.CookieStore. By
* default the org.apache.hc.client5.http.cookie.BasicCookieStore is
* used which is an in-memory only cookie store. Notice if
* bridgeEndpoint=true then the cookie store is forced to be a noop
* cookie store as cookie shouldn't be stored as we are just bridging
* (eg acting as a proxy).
*
* The option is a:
* <code>org.apache.hc.client5.http.cookie.CookieStore</code> type.
*
* Group: producer (advanced)
*
* @param cookieStore the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder cookieStore(org.apache.hc.client5.http.cookie.CookieStore cookieStore) {
doSetProperty("cookieStore", cookieStore);
return this;
}
/**
* If this option is true then IN exchange headers will be copied to OUT
* exchange headers according to copy strategy. Setting this to false,
* allows to only include the headers from the HTTP response (not
* propagating IN headers).
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: producer (advanced)
*
* @param copyHeaders the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder copyHeaders(boolean copyHeaders) {
doSetProperty("copyHeaders", copyHeaders);
return this;
}
/**
* Whether to the HTTP request should follow redirects. By default the
* HTTP request does not follow redirects.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param followRedirects the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder followRedirects(boolean followRedirects) {
doSetProperty("followRedirects", followRedirects);
return this;
}
/**
* To use a custom activity listener.
*
* The option is a:
* <code>org.apache.camel.component.http.HttpActivityListener</code> type.
*
* Group: producer (advanced)
*
* @param httpActivityListener the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder httpActivityListener(org.apache.camel.component.http.HttpActivityListener httpActivityListener) {
doSetProperty("httpActivityListener", httpActivityListener);
return this;
}
/**
* This threshold in bytes controls whether the response payload should
* be stored in memory as a byte array or be streaming based. Set this
* to -1 to always use streaming mode.
*
* The option is a: <code>int</code> type.
*
* Default: 8192
* Group: producer (advanced)
*
* @param responsePayloadStreamingThreshold the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder responsePayloadStreamingThreshold(int responsePayloadStreamingThreshold) {
doSetProperty("responsePayloadStreamingThreshold", responsePayloadStreamingThreshold);
return this;
}
/**
* To set a custom HTTP User-Agent request header.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer (advanced)
*
* @param userAgent the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder userAgent(java.lang.String userAgent) {
doSetProperty("userAgent", userAgent);
return this;
}
/**
* Whether to allow java serialization when a request uses
* context-type=application/x-java-serialized-object. This is by default
* turned off. If you enable this then be aware that Java will
* deserialize the incoming data from the request to Java and that can
* be a potential security risk.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param allowJavaSerializedObject the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder allowJavaSerializedObject(boolean allowJavaSerializedObject) {
doSetProperty("allowJavaSerializedObject", allowJavaSerializedObject);
return this;
}
/**
* Disables authentication scheme caching.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param authCachingDisabled the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder authCachingDisabled(boolean authCachingDisabled) {
doSetProperty("authCachingDisabled", authCachingDisabled);
return this;
}
/**
* Disables automatic request recovery and re-execution.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param automaticRetriesDisabled the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder automaticRetriesDisabled(boolean automaticRetriesDisabled) {
doSetProperty("automaticRetriesDisabled", automaticRetriesDisabled);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
/**
* To use a custom and shared HttpClientConnectionManager to manage
* connections. If this has been configured then this is always used for
* all endpoints created by this component.
*
* The option is a:
* <code>org.apache.hc.client5.http.io.HttpClientConnectionManager</code> type.
*
* Group: advanced
*
* @param clientConnectionManager the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder clientConnectionManager(org.apache.hc.client5.http.io.HttpClientConnectionManager clientConnectionManager) {
doSetProperty("clientConnectionManager", clientConnectionManager);
return this;
}
/**
* The maximum number of connections per route.
*
* The option is a: <code>int</code> type.
*
* Default: 20
* Group: advanced
*
* @param connectionsPerRoute the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder connectionsPerRoute(int connectionsPerRoute) {
doSetProperty("connectionsPerRoute", connectionsPerRoute);
return this;
}
/**
* Disables connection state tracking.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param connectionStateDisabled the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder connectionStateDisabled(boolean connectionStateDisabled) {
doSetProperty("connectionStateDisabled", connectionStateDisabled);
return this;
}
/**
* The time for connection to live, the time unit is millisecond, the
* default value is always keepAlive.
*
* The option is a: <code>long</code> type.
*
* Group: advanced
*
* @param connectionTimeToLive the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder connectionTimeToLive(long connectionTimeToLive) {
doSetProperty("connectionTimeToLive", connectionTimeToLive);
return this;
}
/**
* Disables automatic content decompression.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param contentCompressionDisabled the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder contentCompressionDisabled(boolean contentCompressionDisabled) {
doSetProperty("contentCompressionDisabled", contentCompressionDisabled);
return this;
}
/**
* Disables state (cookie) management.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param cookieManagementDisabled the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder cookieManagementDisabled(boolean cookieManagementDisabled) {
doSetProperty("cookieManagementDisabled", cookieManagementDisabled);
return this;
}
/**
* Disables the default user agent set by this builder if none has been
* provided by the user.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param defaultUserAgentDisabled the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder defaultUserAgentDisabled(boolean defaultUserAgentDisabled) {
doSetProperty("defaultUserAgentDisabled", defaultUserAgentDisabled);
return this;
}
/**
* To use a custom HttpBinding to control the mapping between Camel
* message and HttpClient.
*
* The option is a:
* <code>org.apache.camel.http.common.HttpBinding</code>
* type.
*
* Group: advanced
*
* @param httpBinding the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder httpBinding(org.apache.camel.http.common.HttpBinding httpBinding) {
doSetProperty("httpBinding", httpBinding);
return this;
}
/**
* To use the custom HttpClientConfigurer to perform configuration of
* the HttpClient that will be used.
*
* The option is a:
* <code>org.apache.camel.component.http.HttpClientConfigurer</code> type.
*
* Group: advanced
*
* @param httpClientConfigurer the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder httpClientConfigurer(org.apache.camel.component.http.HttpClientConfigurer httpClientConfigurer) {
doSetProperty("httpClientConfigurer", httpClientConfigurer);
return this;
}
/**
* To use the shared HttpConfiguration as base configuration.
*
* The option is a:
* <code>org.apache.camel.http.common.HttpConfiguration</code> type.
*
* Group: advanced
*
* @param httpConfiguration the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder httpConfiguration(org.apache.camel.http.common.HttpConfiguration httpConfiguration) {
doSetProperty("httpConfiguration", httpConfiguration);
return this;
}
/**
* To use a custom org.apache.hc.core5.http.protocol.HttpContext when
* executing requests.
*
* The option is a:
* <code>org.apache.hc.core5.http.protocol.HttpContext</code> type.
*
* Group: advanced
*
* @param httpContext the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder httpContext(org.apache.hc.core5.http.protocol.HttpContext httpContext) {
doSetProperty("httpContext", httpContext);
return this;
}
/**
* The maximum number of connections.
*
* The option is a: <code>int</code> type.
*
* Default: 200
* Group: advanced
*
* @param maxTotalConnections the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder maxTotalConnections(int maxTotalConnections) {
doSetProperty("maxTotalConnections", maxTotalConnections);
return this;
}
/**
* Disables automatic redirect handling.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param redirectHandlingDisabled the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder redirectHandlingDisabled(boolean redirectHandlingDisabled) {
doSetProperty("redirectHandlingDisabled", redirectHandlingDisabled);
return this;
}
/**
* To use System Properties as fallback for configuration for
* configuring HTTP Client.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param useSystemProperties the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder useSystemProperties(boolean useSystemProperties) {
doSetProperty("useSystemProperties", useSystemProperties);
return this;
}
/**
* To use a custom org.apache.camel.spi.HeaderFilterStrategy to filter
* header to and from Camel message.
*
* The option is a:
* <code>org.apache.camel.spi.HeaderFilterStrategy</code>
* type.
*
* Group: filter
*
* @param headerFilterStrategy the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder headerFilterStrategy(org.apache.camel.spi.HeaderFilterStrategy headerFilterStrategy) {
doSetProperty("headerFilterStrategy", headerFilterStrategy);
return this;
}
/**
* Proxy authentication domain to use with NTLM.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: proxy
*
* @param proxyAuthDomain the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder proxyAuthDomain(java.lang.String proxyAuthDomain) {
doSetProperty("proxyAuthDomain", proxyAuthDomain);
return this;
}
/**
* Proxy server host.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: proxy
*
* @param proxyAuthHost the value to set
* @return the dsl builder
*/
@Deprecated
default HttpsComponentBuilder proxyAuthHost(java.lang.String proxyAuthHost) {
doSetProperty("proxyAuthHost", proxyAuthHost);
return this;
}
/**
* Proxy authentication method to use (NTLM is deprecated).
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: proxy
*
* @param proxyAuthMethod the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder proxyAuthMethod(java.lang.String proxyAuthMethod) {
doSetProperty("proxyAuthMethod", proxyAuthMethod);
return this;
}
/**
* Proxy authentication domain (workstation name) to use with NTLM (NTLM
* is deprecated).
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: proxy
*
* @param proxyAuthNtHost the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder proxyAuthNtHost(java.lang.String proxyAuthNtHost) {
doSetProperty("proxyAuthNtHost", proxyAuthNtHost);
return this;
}
/**
* Proxy server password.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: proxy
*
* @param proxyAuthPassword the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder proxyAuthPassword(java.lang.String proxyAuthPassword) {
doSetProperty("proxyAuthPassword", proxyAuthPassword);
return this;
}
/**
* Proxy server port.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: proxy
*
* @param proxyAuthPort the value to set
* @return the dsl builder
*/
@Deprecated
default HttpsComponentBuilder proxyAuthPort(java.lang.Integer proxyAuthPort) {
doSetProperty("proxyAuthPort", proxyAuthPort);
return this;
}
/**
* Proxy server authentication protocol scheme to use.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: proxy
*
* @param proxyAuthScheme the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder proxyAuthScheme(java.lang.String proxyAuthScheme) {
doSetProperty("proxyAuthScheme", proxyAuthScheme);
return this;
}
/**
* Proxy server username.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: proxy
*
* @param proxyAuthUsername the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder proxyAuthUsername(java.lang.String proxyAuthUsername) {
doSetProperty("proxyAuthUsername", proxyAuthUsername);
return this;
}
/**
* Proxy server host.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: proxy
*
* @param proxyHost the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder proxyHost(java.lang.String proxyHost) {
doSetProperty("proxyHost", proxyHost);
return this;
}
/**
* Proxy server port.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: proxy
*
* @param proxyPort the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder proxyPort(java.lang.Integer proxyPort) {
doSetProperty("proxyPort", proxyPort);
return this;
}
/**
* To configure security using SSLContextParameters. Important: Only one
* instance of org.apache.camel.support.jsse.SSLContextParameters is
* supported per HttpComponent. If you need to use 2 or more different
* instances, you need to define a new HttpComponent per instance you
* need.
*
* The option is a:
* <code>org.apache.camel.support.jsse.SSLContextParameters</code> type.
*
* Group: security
*
* @param sslContextParameters the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder sslContextParameters(org.apache.camel.support.jsse.SSLContextParameters sslContextParameters) {
doSetProperty("sslContextParameters", sslContextParameters);
return this;
}
/**
* Enable usage of global SSL context parameters.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useGlobalSslContextParameters the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder useGlobalSslContextParameters(boolean useGlobalSslContextParameters) {
doSetProperty("useGlobalSslContextParameters", useGlobalSslContextParameters);
return this;
}
/**
* To use a custom X509HostnameVerifier such as DefaultHostnameVerifier
* or NoopHostnameVerifier.
*
* The option is a:
* <code>javax.net.ssl.HostnameVerifier</code> type.
*
* Group: security
*
* @param x509HostnameVerifier the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder x509HostnameVerifier(javax.net.ssl.HostnameVerifier x509HostnameVerifier) {
doSetProperty("x509HostnameVerifier", x509HostnameVerifier);
return this;
}
/**
* Returns the connection lease request timeout (in millis) used when
* requesting a connection from the connection manager. A timeout value
* of zero is interpreted as a disabled timeout.
*
* The option is a: <code>long</code> type.
*
* Default: 180000
* Group: timeout
*
* @param connectionRequestTimeout the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder connectionRequestTimeout(long connectionRequestTimeout) {
doSetProperty("connectionRequestTimeout", connectionRequestTimeout);
return this;
}
/**
* Determines the timeout (in millis) until a new connection is fully
* established. A timeout value of zero is interpreted as an infinite
* timeout.
*
* The option is a: <code>long</code> type.
*
* Default: 180000
* Group: timeout
*
* @param connectTimeout the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder connectTimeout(long connectTimeout) {
doSetProperty("connectTimeout", connectTimeout);
return this;
}
/**
* Determines the timeout (in millis) until arrival of a response from
* the opposite endpoint. A timeout value of zero is interpreted as an
* infinite timeout. Please note that response timeout may be
* unsupported by HTTP transports with message multiplexing.
*
* The option is a: <code>long</code> type.
*
* Group: timeout
*
* @param responseTimeout the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder responseTimeout(long responseTimeout) {
doSetProperty("responseTimeout", responseTimeout);
return this;
}
/**
* Determines the default socket timeout (in millis) value for blocking
* I/O operations.
*
* The option is a: <code>long</code> type.
*
* Default: 180000
* Group: timeout
*
* @param soTimeout the value to set
* @return the dsl builder
*/
default HttpsComponentBuilder soTimeout(long soTimeout) {
doSetProperty("soTimeout", soTimeout);
return this;
}
}
| HttpsComponentBuilder |
java | google__auto | factory/src/main/java/com/google/auto/factory/processor/FactoryDescriptorGenerator.java | {
"start": 4667,
"end": 7371
} | interface
____.printMessage(
ERROR, "Auto-factory doesn't support being applied to interfaces.", type, mirror);
return ImmutableSet.of();
}
@Override
public ImmutableSet<FactoryMethodDescriptor> visitExecutableAsConstructor(
ExecutableElement e, Void p) {
// applied to a constructor of a type to be created
return ImmutableSet.of(generateDescriptorForConstructor(declaration.get(), e));
}
},
null);
}
FactoryMethodDescriptor generateDescriptorForConstructor(
final AutoFactoryDeclaration declaration, ExecutableElement constructor) {
checkNotNull(constructor);
checkArgument(constructor.getKind() == ElementKind.CONSTRUCTOR);
TypeElement classElement = MoreElements.asType(constructor.getEnclosingElement());
Map<Boolean, List<VariableElement>> parameterMap =
constructor.getParameters().stream()
.collect(partitioningBy(parameter -> isAnnotationPresent(parameter, Provided.class)));
// The map returned by partitioningBy always has entries for both key values but our
// null-checker isn't yet smart enough to know that.
ImmutableSet<Parameter> providedParameters =
Parameter.forParameterList(requireNonNull(parameterMap.get(true)), types, injectApi);
ImmutableSet<Parameter> passedParameters =
Parameter.forParameterList(requireNonNull(parameterMap.get(false)), types, injectApi);
return FactoryMethodDescriptor.builder(declaration)
.name("create")
.returnType(classElement.asType())
.publicMethod(classElement.getModifiers().contains(PUBLIC))
.providedParameters(providedParameters)
.passedParameters(passedParameters)
.creationParameters(
Parameter.forParameterList(constructor.getParameters(), types, injectApi))
.isVarArgs(constructor.isVarArgs())
.exceptions(constructor.getThrownTypes())
.overridingMethod(false)
.build();
}
private ImmutableSet<FactoryMethodDescriptor> generateDescriptorForDefaultConstructor(
AutoFactoryDeclaration declaration, TypeElement type) {
return ImmutableSet.of(
FactoryMethodDescriptor.builder(declaration)
.name("create")
.returnType(type.asType())
.publicMethod(type.getModifiers().contains(PUBLIC))
.providedParameters(ImmutableSet.of())
.passedParameters(ImmutableSet.of())
.creationParameters(ImmutableSet.of())
.isVarArgs(false)
.exceptions(ImmutableSet.of())
.overridingMethod(false)
.build());
}
}
| messager |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_1247/InternalDto.java | {
"start": 232,
"end": 646
} | class ____ {
private String data2;
private InternalData internalData;
public String getData2() {
return data2;
}
public void setData2(String data2) {
this.data2 = data2;
}
public InternalData getInternalData() {
return internalData;
}
public void setInternalData(InternalData internalData) {
this.internalData = internalData;
}
}
| InternalDto |
java | apache__hadoop | hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/common/Tasks.java | {
"start": 2213,
"end": 2306
} | interface ____<I, E extends Exception> {
void run(I item) throws E;
}
public static | Task |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/nullness/RedundantNullCheckTest.java | {
"start": 13307,
"end": 13939
} | class ____ {
void process() {
if (UnannotatedLib.getString() == null) {
/* This check should NOT be redundant */
}
}
}
""")
.doTest();
}
@Test
public void negative_methodCall_fromMapGet_inNullMarkedScope() {
// This test is similar to variableInitializedFromMapGet, but checks direct method call
compilationHelper
.addSourceLines(
"Test.java",
"""
import org.jspecify.annotations.NullMarked;
import java.util.Map;
@NullMarked
| Test |
java | apache__camel | components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaConfiguration.java | {
"start": 49032,
"end": 71077
} | class ____ keys (defaults to the same as for messages if nothing is given).
*/
public void setKeySerializer(String keySerializer) {
this.keySerializer = keySerializer;
}
public String getKerberosInitCmd() {
return kerberosInitCmd;
}
/**
* Kerberos kinit command path. Default is /usr/bin/kinit
*/
public void setKerberosInitCmd(String kerberosInitCmd) {
this.kerberosInitCmd = kerberosInitCmd;
}
public Integer getKerberosBeforeReloginMinTime() {
return kerberosBeforeReloginMinTime;
}
/**
* Login thread sleep time between refresh attempts.
*/
public void setKerberosBeforeReloginMinTime(Integer kerberosBeforeReloginMinTime) {
this.kerberosBeforeReloginMinTime = kerberosBeforeReloginMinTime;
}
public Double getKerberosRenewJitter() {
return kerberosRenewJitter;
}
/**
* Percentage of random jitter added to the renewal time.
*/
public void setKerberosRenewJitter(Double kerberosRenewJitter) {
this.kerberosRenewJitter = kerberosRenewJitter;
}
public Double getKerberosRenewWindowFactor() {
return kerberosRenewWindowFactor;
}
/**
* Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been
* reached, at which time it will try to renew the ticket.
*/
public void setKerberosRenewWindowFactor(Double kerberosRenewWindowFactor) {
this.kerberosRenewWindowFactor = kerberosRenewWindowFactor;
}
public String getKerberosPrincipalToLocalRules() {
return kerberosPrincipalToLocalRules;
}
/**
* A list of rules for mapping from principal names to short names (typically operating system usernames). The rules
* are evaluated in order, and the first rule that matches a principal name is used to map it to a short name. Any
* later rules in the list are ignored. By default, principal names of the form {username}/{hostname}@{REALM} are
* mapped to {username}. For more details on the format, please see the Security Authorization and ACLs
* documentation (at the Apache Kafka project website).
*
* Multiple values can be separated by comma
*/
public void setKerberosPrincipalToLocalRules(String kerberosPrincipalToLocalRules) {
this.kerberosPrincipalToLocalRules = kerberosPrincipalToLocalRules;
}
public String getSslCipherSuites() {
return sslCipherSuites;
}
/**
* A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange
* algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. By
* default, all the available cipher suites are supported.
*/
public void setSslCipherSuites(String sslCipherSuites) {
this.sslCipherSuites = sslCipherSuites;
}
public String getSslEndpointAlgorithm() {
return sslEndpointAlgorithm;
}
/**
* The endpoint identification algorithm to validate server hostname using server certificate. Use none or false to
* disable server hostname verification.
*/
public void setSslEndpointAlgorithm(String sslEndpointAlgorithm) {
this.sslEndpointAlgorithm = sslEndpointAlgorithm;
}
public String getSslKeymanagerAlgorithm() {
return sslKeymanagerAlgorithm;
}
/**
* The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm
* configured for the Java Virtual Machine.
*/
public void setSslKeymanagerAlgorithm(String sslKeymanagerAlgorithm) {
this.sslKeymanagerAlgorithm = sslKeymanagerAlgorithm;
}
public String getSslTrustmanagerAlgorithm() {
return sslTrustmanagerAlgorithm;
}
/**
* The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory
* algorithm configured for the Java Virtual Machine.
*/
public void setSslTrustmanagerAlgorithm(String sslTrustmanagerAlgorithm) {
this.sslTrustmanagerAlgorithm = sslTrustmanagerAlgorithm;
}
public String getSslEnabledProtocols() {
return sslEnabledProtocols;
}
/**
* The list of protocols enabled for SSL connections. The default is TLSv1.2,TLSv1.3 when running with Java 11 or
* newer, TLSv1.2 otherwise. With the default value for Java 11, clients and servers will prefer TLSv1.3 if both
* support it and fallback to TLSv1.2 otherwise (assuming both support at least TLSv1.2). This default should be
* fine for most cases. Also see the config documentation for SslProtocol.
*/
public void setSslEnabledProtocols(String sslEnabledProtocols) {
this.sslEnabledProtocols = sslEnabledProtocols;
}
public String getSslKeystoreType() {
return sslKeystoreType;
}
/**
* The file format of the key store file. This is optional for the client. The default value is JKS
*/
public void setSslKeystoreType(String sslKeystoreType) {
this.sslKeystoreType = sslKeystoreType;
}
public String getSslProtocol() {
return sslProtocol;
}
/**
* The SSL protocol used to generate the SSLContext. The default is TLSv1.3 when running with Java 11 or newer,
* TLSv1.2 otherwise. This value should be fine for most use cases. Allowed values in recent JVMs are TLSv1.2 and
* TLSv1.3. TLS, TLSv1.1, SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to
* known security vulnerabilities. With the default value for this config and sslEnabledProtocols, clients will
* downgrade to TLSv1.2 if the server does not support TLSv1.3. If this config is set to TLSv1.2, clients will not
* use TLSv1.3 even if it is one of the values in sslEnabledProtocols and the server only supports TLSv1.3.
*/
public void setSslProtocol(String sslProtocol) {
this.sslProtocol = sslProtocol;
}
public String getSslProvider() {
return sslProvider;
}
/**
* The name of the security provider used for SSL connections. Default value is the default security provider of the
* JVM.
*/
public void setSslProvider(String sslProvider) {
this.sslProvider = sslProvider;
}
public String getSslTruststoreType() {
return sslTruststoreType;
}
/**
* The file format of the trust store file. The default value is JKS.
*/
public void setSslTruststoreType(String sslTruststoreType) {
this.sslTruststoreType = sslTruststoreType;
}
public String getSaslKerberosServiceName() {
return saslKerberosServiceName;
}
/**
* The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's
* config.
*/
public void setSaslKerberosServiceName(String saslKerberosServiceName) {
this.saslKerberosServiceName = saslKerberosServiceName;
}
public String getSaslMechanism() {
return saslMechanism;
}
/**
* The Simple Authentication and Security Layer (SASL) Mechanism used. For the valid values see <a href=
* "http://www.iana.org/assignments/sasl-mechanisms/sasl-mechanisms.xhtml">http://www.iana.org/assignments/sasl-mechanisms/sasl-mechanisms.xhtml</a>
*/
public void setSaslMechanism(String saslMechanism) {
this.saslMechanism = saslMechanism;
}
public String getSaslJaasConfig() {
return saslJaasConfig;
}
/**
* Expose the kafka sasl.jaas.config parameter Example: org.apache.kafka.common.security.plain.PlainLoginModule
* required username="USERNAME" password="PASSWORD";
*/
public void setSaslJaasConfig(String saslJaasConfig) {
this.saslJaasConfig = saslJaasConfig;
}
public String getSecurityProtocol() {
return securityProtocol;
}
/**
* Protocol used to communicate with brokers. SASL_PLAINTEXT, PLAINTEXT, SASL_SSL and SSL are supported
*/
public void setSecurityProtocol(String securityProtocol) {
this.securityProtocol = securityProtocol;
}
public SSLContextParameters getSslContextParameters() {
return sslContextParameters;
}
/**
* SSL configuration using a Camel {@link SSLContextParameters} object. If configured, it's applied before the other
* SSL endpoint parameters.
*
* NOTE: Kafka only supports loading keystore from file locations, so prefix the location with file: in the
* KeyStoreParameters.resource option.
*/
public void setSslContextParameters(SSLContextParameters sslContextParameters) {
this.sslContextParameters = sslContextParameters;
}
public String getSslKeyPassword() {
return sslKeyPassword;
}
/**
* The password of the private key in the key store file or the PEM key specified in sslKeystoreKey. This is
* required for clients only if two-way authentication is configured.
*/
public void setSslKeyPassword(String sslKeyPassword) {
this.sslKeyPassword = sslKeyPassword;
}
public String getSslKeystoreLocation() {
return sslKeystoreLocation;
}
/**
* The location of the key store file. This is optional for the client and can be used for two-way authentication
* for the client.
*/
public void setSslKeystoreLocation(String sslKeystoreLocation) {
this.sslKeystoreLocation = sslKeystoreLocation;
}
public String getSslKeystorePassword() {
return sslKeystorePassword;
}
/**
* The store password for the key store file. This is optional for the client and only needed if sslKeystoreLocation
* is configured. Key store password is not supported for PEM format.
*/
public void setSslKeystorePassword(String sslKeystorePassword) {
this.sslKeystorePassword = sslKeystorePassword;
}
public String getSslTruststoreLocation() {
return sslTruststoreLocation;
}
/**
* The location of the trust store file.
*/
public void setSslTruststoreLocation(String sslTruststoreLocation) {
this.sslTruststoreLocation = sslTruststoreLocation;
}
public String getSslTruststorePassword() {
return sslTruststorePassword;
}
/**
* The password for the trust store file. If a password is not set, trust store file configured will still be used,
* but integrity checking is disabled. Trust store password is not supported for PEM format.
*/
public void setSslTruststorePassword(String sslTruststorePassword) {
this.sslTruststorePassword = sslTruststorePassword;
}
public Integer getBufferMemorySize() {
return bufferMemorySize;
}
/**
* The total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are
* sent faster than they can be delivered to the server, the producer will either block or throw an exception based
* on the preference specified by block.on.buffer.full.This setting should correspond roughly to the total memory
* the producer will use, but is not a hard bound since not all memory the producer uses is used for buffering. Some
* additional memory will be used for compression (if compression is enabled) as well as for maintaining in-flight
* requests.
*/
public void setBufferMemorySize(Integer bufferMemorySize) {
this.bufferMemorySize = bufferMemorySize;
}
public String getKey() {
return key;
}
/**
* The record key (or null if no key is specified). If this option has been configured then it take precedence over
* header {@link KafkaConstants#KEY}
*/
public void setKey(String key) {
this.key = key;
}
public Integer getPartitionKey() {
return partitionKey;
}
/**
* The partition to which the record will be sent (or null if no partition was specified). If this option has been
* configured then it take precedence over header {@link KafkaConstants#PARTITION_KEY}
*/
public void setPartitionKey(Integer partitionKey) {
this.partitionKey = partitionKey;
}
public boolean isUseIterator() {
return useIterator;
}
/**
* Sets whether sending to kafka should send the message body as a single record, or use a java.util.Iterator to
* send multiple records to kafka (if the message body can be iterated).
*/
public void setUseIterator(boolean useIterator) {
this.useIterator = useIterator;
}
public String getRequestRequiredAcks() {
return requestRequiredAcks;
}
/**
* The number of acknowledgments the producer requires the leader to have received before considering a request
* complete. This controls the durability of records that are sent. The following settings are allowed:
*
* acks=0 If set to zero, then the producer will not wait for any acknowledgment from the server at all. The record
* will be immediately added to the socket buffer and considered sent. No guarantee can be made that the server has
* received the record in this case, and the retry configuration will not take effect (as the client won't generally
* know of any failures). The offset given back for each record will always be set to -1. acks=1 This will mean the
* leader will write the record to its local log but will respond without awaiting full acknowledgment from all
* followers. In this case should the leader fail immediately after acknowledging the record, but before the
* followers have replicated it, then the record will be lost. acks=all This means the leader will wait for the full
* set of in-sync replicas to acknowledge the record. This guarantees that the record will not be lost as long as at
* least one in-sync replica remains alive. This is the strongest available guarantee. This is equivalent to the
* acks=-1 setting. Note that enabling idempotence requires this config value to be 'all'. If conflicting
* configurations are set and idempotence is not explicitly enabled, idempotence is disabled.
*/
public void setRequestRequiredAcks(String requestRequiredAcks) {
this.requestRequiredAcks = requestRequiredAcks;
}
public Integer getRetries() {
return retries;
}
/**
* Setting a value greater than zero will cause the client to resend any record that has failed to be sent due to a
* potentially transient error. Note that this retry is no different from if the client re-sending the record upon
* receiving the error. Produce requests will be failed before the number of retries has been exhausted if the
* timeout configured by delivery.timeout.ms expires first before successful acknowledgement. Users should generally
* prefer to leave this config unset and instead use delivery.timeout.ms to control retry behavior.
*
* Enabling idempotence requires this config value to be greater than 0. If conflicting configurations are set and
* idempotence is not explicitly enabled, idempotence is disabled.
*
* Allowing retries while setting enable.idempotence to false and max.in.flight.requests.per.connection to 1 will
* potentially change the ordering of records, because if two batches are sent to a single partition, and the first
* fails and is retried but the second succeeds; then the records in the second batch may appear first.
*/
public void setRetries(Integer retries) {
this.retries = retries;
}
public Integer getProducerBatchSize() {
return producerBatchSize;
}
/**
* The producer will attempt to batch records together into fewer requests whenever multiple records are being sent
* to the same partition. This helps performance on both the client and the server. This configuration controls the
* default batch size in bytes. No attempt will be made to batch records larger than this size. Requests sent to
* brokers will contain multiple batches, one for each partition with data available to be sent. A small batch size
* will make batching less common and may reduce throughput (a batch size of zero will disable batching entirely). A
* very large batch size may use memory a bit more wastefully as we will always allocate a buffer of the specified
* batch size in anticipation of additional records.
*/
public void setProducerBatchSize(Integer producerBatchSize) {
this.producerBatchSize = producerBatchSize;
}
public boolean isBatchWithIndividualHeaders() {
return batchWithIndividualHeaders;
}
/**
* If this feature is enabled and a single element of a batch is an Exchange or Message, the producer will generate
* individual kafka header values for it by using the batch Message to determine the values. Normal behavior
* consists of always using the same header values (which are determined by the parent Exchange which contains the
* Iterable or Iterator).
*/
public void setBatchWithIndividualHeaders(boolean batchWithIndividualHeaders) {
this.batchWithIndividualHeaders = batchWithIndividualHeaders;
}
public Integer getConnectionMaxIdleMs() {
return connectionMaxIdleMs;
}
/**
* Close idle connections after the number of milliseconds specified by this config.
*/
public void setConnectionMaxIdleMs(Integer connectionMaxIdleMs) {
this.connectionMaxIdleMs = connectionMaxIdleMs;
}
public Integer getLingerMs() {
return lingerMs;
}
/**
* The producer groups together any records that arrive in between request transmissions into a single, batched,
* request. Normally, this occurs only under load when records arrive faster than they can be sent out. However, in
* some circumstances, the client may want to reduce the number of requests even under a moderate load. This setting
* achieves this by adding a small amount of artificial delay. That is, rather than immediately sending out a
* record, the producer will wait for up to the given delay to allow other records to be sent so that they can be
* batched together. This can be thought of as analogous to Nagle's algorithm in TCP. This setting gives the upper
* bound on the delay for batching: once we get batch.size worth of records for a partition, it will be sent
* immediately regardless of this setting, however, if we have fewer than this many bytes accumulated for this
* partition, we will 'linger' for the specified time waiting for more records to show up. This setting defaults to
* 0 (i.e., no delay). Setting linger.ms=5, for example, would have the effect of reducing the number of requests
* sent but would add up to 5ms of latency to records sent in the absence of load.
*/
public void setLingerMs(Integer lingerMs) {
this.lingerMs = lingerMs;
}
public Integer getMaxBlockMs() {
return maxBlockMs;
}
/**
* The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(),
* sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block. For send() this
* timeout bounds the total time waiting for both metadata fetch and buffer allocation (blocking in the
* user-supplied serializers or partitioner is not counted against this timeout). For partitionsFor() this time out
* bounds the time spent waiting for metadata if it is unavailable. The transaction-related methods always block,
* but may time out if the transaction coordinator could not be discovered or did not respond within the timeout.
*/
public void setMaxBlockMs(Integer maxBlockMs) {
this.maxBlockMs = maxBlockMs;
}
public Integer getMaxRequestSize() {
return maxRequestSize;
}
/**
* The maximum size of a request. This is also effectively a cap on the maximum record size. Note that the server
* has its own cap on record size which may be different from this. This setting will limit the number of record
* batches the producer will send in a single request to avoid sending huge requests.
*/
public void setMaxRequestSize(Integer maxRequestSize) {
this.maxRequestSize = maxRequestSize;
}
public Integer getReceiveBufferBytes() {
return receiveBufferBytes;
}
/**
* The size of the TCP receive buffer (SO_RCVBUF) to use when reading data.
*/
public void setReceiveBufferBytes(Integer receiveBufferBytes) {
this.receiveBufferBytes = receiveBufferBytes;
}
public Integer getMaxInFlightRequest() {
return maxInFlightRequest;
}
/**
* The maximum number of unacknowledged requests the client will send on a single connection before blocking. Note
* that if this setting is set to be greater than 1 and there are failed sends, there is a risk of message
* re-ordering due to retries (i.e., if retries are enabled).
*/
public void setMaxInFlightRequest(Integer maxInFlightRequest) {
this.maxInFlightRequest = maxInFlightRequest;
}
public Integer getMetadataMaxAgeMs() {
return metadataMaxAgeMs;
}
/**
* The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any
* partition leadership changes to proactively discover any new brokers or partitions.
*/
public void setMetadataMaxAgeMs(Integer metadataMaxAgeMs) {
this.metadataMaxAgeMs = metadataMaxAgeMs;
}
public String getMetricReporters() {
return metricReporters;
}
/**
* A list of classes to use as metrics reporters. Implementing the MetricReporter | for |
java | spring-projects__spring-framework | spring-web/src/testFixtures/java/org/springframework/web/testfixture/method/MvcAnnotationPredicates.java | {
"start": 5905,
"end": 6612
} | class ____ implements Predicate<MethodParameter> {
private String name;
private boolean required = true;
public RequestPartPredicate name(String name) {
this.name = name;
return this;
}
public RequestPartPredicate noName() {
this.name = "";
return this;
}
public RequestPartPredicate notRequired() {
this.required = false;
return this;
}
@Override
public boolean test(MethodParameter parameter) {
RequestPart annotation = parameter.getParameterAnnotation(RequestPart.class);
return annotation != null &&
(this.name == null || annotation.name().equals(this.name)) &&
annotation.required() == this.required;
}
}
public static | RequestPartPredicate |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/path/JSONPath_paths_test4.java | {
"start": 246,
"end": 938
} | class ____ extends TestCase {
public void test_map() throws Exception {
List<Object> list = new ArrayList<Object>();
list.add(1001);
list.add("wenshao");
list.add(Collections.singletonMap("type", "emp"));
Map<String, Object> paths = JSONPath.paths(list);
Assert.assertEquals(5, paths.size());
Assert.assertSame(list, paths.get("/"));
Assert.assertEquals(1001, paths.get("/0"));
Assert.assertEquals("wenshao", paths.get("/1"));
Assert.assertSame(list.get(2), paths.get("/2"));
Assert.assertSame(((Map)list.get(2)).get("type"), paths.get("/2/type"));
}
}
| JSONPath_paths_test4 |
java | FasterXML__jackson-core | src/test/java/perf/MediaItem.java | {
"start": 1937,
"end": 3367
} | class ____
{
private String _uri;
private String _title;
private int _width;
private int _height;
private Size _size;
public Photo() {}
public Photo(String uri, String title, int w, int h, Size s)
{
_uri = uri;
_title = title;
_width = w;
_height = h;
_size = s;
}
public String getUri() { return _uri; }
public String getTitle() { return _title; }
public int getWidth() { return _width; }
public int getHeight() { return _height; }
public Size getSize() { return _size; }
public void setUri(String u) { _uri = u; }
public void setTitle(String t) { _title = t; }
public void setWidth(int w) { _width = w; }
public void setHeight(int h) { _height = h; }
public void setSize(Size s) { _size = s; }
public void write(JsonGenerator gen) throws IOException
{
gen.writeStartObject();
gen.writeStringProperty("uri", _uri);
gen.writeStringProperty("title", _title);
gen.writeNumberProperty("width", _width);
gen.writeNumberProperty("height", _height);
if (_size == null) {
gen.writeNullProperty("size");
} else {
gen.writeStringProperty("size", _size.name());
}
gen.writeEndObject();
}
}
public static | Photo |
java | mockito__mockito | mockito-core/src/test/java/org/mockito/internal/util/reflection/MemberAccessorTest.java | {
"start": 602,
"end": 4695
} | class ____ {
@Parameterized.Parameters
public static Collection<Object[]> data() {
List<Object[]> data = new ArrayList<>();
data.add(new Object[] {new ReflectionMemberAccessor()});
data.add(new Object[] {new ModuleMemberAccessor()});
return data;
}
private final MemberAccessor accessor;
public MemberAccessorTest(MemberAccessor accessor) {
this.accessor = accessor;
}
@Test
public void test_read_field() throws Exception {
assertThat(accessor.get(Sample.class.getDeclaredField("field"), new Sample("foo")))
.isEqualTo("foo");
}
@Test
public void test_read_static_field() throws Exception {
Sample.staticField = "foo";
assertThat(accessor.get(Sample.class.getDeclaredField("staticField"), null))
.isEqualTo("foo");
}
@Test
public void test_write_field() throws Exception {
Sample sample = new Sample("foo");
accessor.set(Sample.class.getDeclaredField("field"), sample, "bar");
assertThat(sample.field).isEqualTo("bar");
}
@Test
public void test_write_static_field() throws Exception {
Sample.staticField = "foo";
accessor.set(Sample.class.getDeclaredField("staticField"), null, "bar");
assertThat(Sample.staticField).isEqualTo("bar");
}
@Test
public void test_invoke() throws Exception {
assertThat(
accessor.invoke(
Sample.class.getDeclaredMethod("test", String.class),
new Sample(null),
"foo"))
.isEqualTo("foo");
}
@Test
public void test_invoke_invocation_exception() {
assertThatThrownBy(
() ->
accessor.invoke(
Sample.class.getDeclaredMethod("test", String.class),
new Sample(null),
"exception"))
.isInstanceOf(InvocationTargetException.class);
}
@Test
public void test_invoke_illegal_arguments() {
assertThatThrownBy(
() ->
accessor.invoke(
Sample.class.getDeclaredMethod("test", String.class),
new Sample(null),
42))
.isInstanceOf(IllegalArgumentException.class);
}
@Test
public void test_new_instance() throws Exception {
assertThat(accessor.newInstance(Sample.class.getDeclaredConstructor(String.class), "foo"))
.isInstanceOf(Sample.class);
}
@Test
public void test_new_instance_illegal_arguments() {
assertThatThrownBy(
() ->
accessor.newInstance(
Sample.class.getDeclaredConstructor(String.class), 42))
.isInstanceOf(IllegalArgumentException.class);
}
@Test
public void test_new_instance_invocation_exception() {
assertThatThrownBy(
() ->
accessor.newInstance(
Sample.class.getDeclaredConstructor(String.class),
"exception"))
.isInstanceOf(InvocationTargetException.class);
}
@Test
public void test_new_instance_instantiation_exception() {
assertThatThrownBy(
() -> accessor.newInstance(AbstractSample.class.getDeclaredConstructor()))
.isInstanceOf(InstantiationException.class);
}
@Test
public void test_set_final_field() throws Exception {
Sample sample = new Sample("foo");
accessor.set(Sample.class.getDeclaredField("finalField"), sample, "foo");
assertThat(sample.finalField).isEqualTo("foo");
}
private static | MemberAccessorTest |
java | spring-projects__spring-boot | module/spring-boot-webflux/src/test/java/org/springframework/boot/webflux/autoconfigure/actuate/endpoint/web/WebFluxHealthEndpointAdditionalPathIntegrationTests.java | {
"start": 2844,
"end": 4070
} | class ____ extends
AbstractHealthEndpointAdditionalPathIntegrationTests<ReactiveWebApplicationContextRunner, ConfigurableReactiveWebApplicationContext, AssertableReactiveWebApplicationContext> {
WebFluxHealthEndpointAdditionalPathIntegrationTests() {
super(new ReactiveWebApplicationContextRunner(AnnotationConfigReactiveWebServerApplicationContext::new)
.withConfiguration(AutoConfigurations.of(JacksonAutoConfiguration.class, CodecsAutoConfiguration.class,
WebFluxAutoConfiguration.class, HealthContributorAutoConfiguration.class,
HealthContributorRegistryAutoConfiguration.class, HttpHandlerAutoConfiguration.class,
EndpointAutoConfiguration.class, HealthEndpointAutoConfiguration.class,
WebFluxHealthEndpointExtensionAutoConfiguration.class,
DiskSpaceHealthContributorAutoConfiguration.class, WebEndpointAutoConfiguration.class,
ManagementContextAutoConfiguration.class, NettyReactiveWebServerAutoConfiguration.class,
NettyReactiveManagementContextAutoConfiguration.class, BeansEndpointAutoConfiguration.class))
.withInitializer(new ServerPortInfoApplicationContextInitializer())
.withPropertyValues("server.port=0"));
}
}
| WebFluxHealthEndpointAdditionalPathIntegrationTests |
java | spring-projects__spring-security | access/src/test/java/org/springframework/security/access/annotation/Jsr250MethodSecurityMetadataSourceTests.java | {
"start": 7908,
"end": 8170
} | class ____ implements IParent {
@Override
public void interfaceMethod() {
}
public void notOverriden() {
}
public void overriden() {
}
@RolesAllowed("OVERRIDENIGNORED")
public void overridenIgnored() {
}
}
@RolesAllowed("DERIVED")
| Parent |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/annotation/FunctionHint.java | {
"start": 4978,
"end": 9944
} | interface ____ {
// Note to implementers:
// Because "null" is not supported as an annotation value. Every annotation parameter *must*
// have some representation for unknown values in order to merge multi-level annotations.
/**
* Explicitly lists the argument types that a function takes as input.
*
* <p>By default, explicit input types are undefined and the reflection-based extraction is
* used.
*
* <p>Note: Specifying the input arguments manually disables the entire reflection-based
* extraction around arguments. This means that also {@link #isVarArgs()} needs to be specified
* manually if required.
*
* <p>Use {@link #arguments()} for more control about argument names and argument kinds.
*/
DataTypeHint[] input() default @DataTypeHint();
/**
* Defines that the last argument type defined in {@link #input()} should be treated as a
* variable-length argument.
*
* <p>By default, if {@link #input()} is defined, the last argument type is not a var-arg. If
* {@link #input()} is not defined, the reflection-based extraction is used to decide about the
* var-arg flag, thus, this parameter is ignored.
*/
boolean isVarArgs() default false;
/**
* Explicitly lists the arguments that a function takes as input. Including their names, data
* types, kinds, and whether they are optional.
*
* <p>It is recommended to use this parameter instead of {@link #input()}. Using both {@link
* #input()} and this parameter is not allowed. Specifying the list of arguments manually
* disables the entire reflection-based extraction around arguments.
*/
ArgumentHint[] arguments() default {};
/**
* Explicitly defines the intermediate result type (i.e. state entry) that an aggregating
* function uses as its accumulator. The entry is managed by the framework (usually via Flink's
* managed state).
*
* <p>By default, an explicit accumulator type is undefined and the reflection-based extraction
* is used.
*
* <p>This parameter is primarily intended for aggregating functions (i.e. {@link
* AggregateFunction} and {@link TableAggregateFunction}). It is recommended to use {@link
* #state()} for {@link ProcessTableFunction}.
*/
DataTypeHint accumulator() default @DataTypeHint();
/**
* Explicitly lists the intermediate results (i.e. state entries) of a function that is managed
* by the framework (i.e. Flink managed state). Including their names and data types.
*
* <p>State hints are primarily intended for {@link ProcessTableFunction}. A PTF supports
* multiple state entries at the beginning of an eval()/onTimer() method (after an optional
* context parameter).
*
* <p>Aggregating functions (i.e. {@link AggregateFunction} and {@link TableAggregateFunction})
* support a single state entry at the beginning of an accumulate()/retract() method (i.e. the
* accumulator).
*
* <p>By default, explicit state is undefined and the reflection-based extraction is used where
* {@link StateHint} is present.
*
* <p>Using both {@link #accumulator()} and this parameter is not allowed. Specifying the list
* of state entries manually disables the entire reflection-based extraction around {@link
* StateHint} and accumulators for aggregating functions.
*/
StateHint[] state() default {};
/**
* Explicitly defines the result type that a function uses as output.
*
* <p>By default, an explicit output type is undefined and the reflection-based extraction is
* used.
*/
DataTypeHint output() default @DataTypeHint();
// --------------------------------------------------------------------------------------------
// Legacy
// --------------------------------------------------------------------------------------------
/**
* Explicitly lists the argument names that a function takes as input.
*
* <p>By default, if {@link #input()} is defined, explicit argument names are undefined and this
* parameter can be used to provide argument names. If {@link #input()} is not defined, the
* reflection-based extraction is used, thus, this parameter is ignored.
*
* @deprecated Use {@link #arguments()} instead.
*/
@Deprecated
String[] argumentNames() default {""};
/**
* Explicitly lists the arguments that a function takes as input. Including their names, data
* types, kinds, and whether they are optional.
*
* <p>It is recommended to use this parameter instead of {@link #input()}. Specifying the list
* of arguments manually disables the entire reflection-based extraction around arguments.
*
* @deprecated Use {@link #arguments()} instead.
*/
@Deprecated
ArgumentHint[] argument() default {};
}
| FunctionHint |
java | google__guice | core/src/com/google/inject/spi/Elements.java | {
"start": 3230,
"end": 6269
} | class ____ {
private static final BindingTargetVisitor<Object, Object> GET_INSTANCE_VISITOR =
new DefaultBindingTargetVisitor<Object, Object>() {
@Override
public Object visit(InstanceBinding<?> binding) {
return binding.getInstance();
}
@Override
protected Object visitOther(Binding<?> binding) {
throw new IllegalArgumentException();
}
};
/** Records the elements executed by {@code modules}. */
public static List<Element> getElements(Module... modules) {
return getElements(Stage.DEVELOPMENT, Arrays.asList(modules));
}
/** Records the elements executed by {@code modules}. */
public static List<Element> getElements(Stage stage, Module... modules) {
return getElements(stage, Arrays.asList(modules));
}
/** Records the elements executed by {@code modules}. */
public static List<Element> getElements(Iterable<? extends Module> modules) {
return getElements(Stage.DEVELOPMENT, modules);
}
/** Records the elements executed by {@code modules}. */
public static List<Element> getElements(Stage stage, Iterable<? extends Module> modules) {
RecordingBinder binder = new RecordingBinder(stage);
for (Module module : modules) {
binder.install(module);
}
binder.scanForAnnotatedMethods();
for (RecordingBinder child : binder.privateBindersForScanning) {
child.scanForAnnotatedMethods();
}
binder.permitMapConstruction.finish();
// Free the memory consumed by the stack trace elements cache
StackTraceElements.clearCache();
return Collections.unmodifiableList(binder.elements);
}
/** Returns a list of the top-level modules installed by the input {@code module}. */
public static List<Module> getInstalledModules(Stage stage, Module module) {
if (module.equals(Modules.EMPTY_MODULE)) {
return ImmutableList.of();
}
RecordTopLevelModulesBinder binder = new RecordTopLevelModulesBinder(stage);
binder.record(module);
return ImmutableList.copyOf(binder.topLevelModules);
}
// TODO(user): Consider moving the RecordingBinder to com.google.inject.internal and removing these
// internal 'friend' methods.
/**
* Internal version of Binder.withSource for establishing a trusted ElementSource chain for
* source-restricting bindings that are re-written using {@link Element#applyTo}.
*
* <p>Using Binder.withSource is not trustworthy because it's a public API that external users can
* use to spoof the original ElementSource of a binding by calling withSource(bogusElementSource).
*
* @since 5.0
*/
public static Binder withTrustedSource(
GuiceInternal guiceInternal, Binder binder, Object source) {
checkNotNull(guiceInternal);
if (binder instanceof RecordingBinder) {
return ((RecordingBinder) binder).withTrustedSource(source);
}
// Preserve existing (untrusted) behavior for non-standard Binder implementations.
return binder.withSource(source);
}
private static | Elements |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/injection/assignability/generics/RawTypeAssignabilityTest.java | {
"start": 482,
"end": 1186
} | class ____ {
@RegisterExtension
public ArcTestContainer container = new ArcTestContainer(MyProducer.class, MyConsumer.class, Foo.class);
@Test
public void testAssignabilityWithRawType() {
ArcContainer container = Arc.container();
MyConsumer consumer = container.instance(MyConsumer.class).get();
Assertions.assertEquals(String.class.toString(), consumer.pingRaw());
Assertions.assertEquals(String.class.toString(), consumer.pingObject());
Assertions.assertEquals(Long.class.toString(), consumer.pingLong());
Assertions.assertEquals(Long.class.toString(), consumer.pingWild());
}
@ApplicationScoped
static | RawTypeAssignabilityTest |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/ConversionServiceDeducerTests.java | {
"start": 6890,
"end": 7177
} | class ____ {
@Bean
@ConfigurationPropertiesBinding
static Printer<InputStream> inputStreamPrinter() {
return (source, locale) -> ThrowingSupplier
.of(() -> StreamUtils.copyToString(source, StandardCharsets.UTF_8))
.get();
}
}
private static final | PrinterConfiguration |
java | apache__logging-log4j2 | log4j-core-test/src/test/java/org/apache/logging/log4j/core/pattern/NoConsoleNoAnsiTest.java | {
"start": 1520,
"end": 2458
} | class ____ {
private static final String EXPECTED =
"ERROR LoggerTest o.a.l.l.c.p.NoConsoleNoAnsiTest org.apache.logging.log4j.core.pattern.NoConsoleNoAnsiTest"
+ Strings.LINE_SEPARATOR;
private Logger logger;
private ListAppender app;
@BeforeEach
void setUp(final LoggerContext context, @Named("List") final ListAppender app) {
this.logger = context.getLogger("LoggerTest");
this.app = app.clear();
}
@Test
void testReplacement() {
logger.error(this.getClass().getName());
final List<String> msgs = app.getMessages();
assertNotNull(msgs);
assertEquals(1, msgs.size(), "Incorrect number of messages. Should be 1 is " + msgs.size());
assertTrue(
msgs.get(0).endsWith(EXPECTED),
"Replacement failed - expected ending " + EXPECTED + ", actual " + msgs.get(0));
}
}
| NoConsoleNoAnsiTest |
java | apache__maven | impl/maven-di/src/test/java/org/apache/maven/di/impl/InjectorImplTest.java | {
"start": 15448,
"end": 16339
} | class ____ implements MyService {}
}
@Test
void testDisposeClearsBindingsAndCache() {
final Injector injector = Injector.create()
// bind two simple beans
.bindImplicit(DisposeTest.Foo.class)
.bindImplicit(DisposeTest.Bar.class);
// make sure they really get created
assertNotNull(injector.getInstance(DisposeTest.Foo.class));
assertNotNull(injector.getInstance(DisposeTest.Bar.class));
// now dispose
injector.dispose();
// after dispose, bindings should be gone => DIException on lookup
assertThrows(DIException.class, () -> injector.getInstance(DisposeTest.Foo.class));
assertThrows(DIException.class, () -> injector.getInstance(DisposeTest.Bar.class));
}
/**
* Simple test classes for dispose().
*/
static | DefaultPriorityServiceImpl |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/function/ListaggFunction.java | {
"start": 1336,
"end": 4555
} | class ____ extends AbstractSqmSelfRenderingFunctionDescriptor {
private final String emptyWithinReplacement;
public ListaggFunction(String emptyWithinReplacement, TypeConfiguration typeConfiguration) {
super(
"listagg",
FunctionKind.ORDERED_SET_AGGREGATE,
new ArgumentTypesValidator( StandardArgumentsValidators.exactly( 2 ), STRING, STRING ),
StandardFunctionReturnTypeResolvers.invariant(
typeConfiguration.getBasicTypeRegistry().resolve( StandardBasicTypes.STRING )
),
StandardFunctionArgumentTypeResolvers.invariant( typeConfiguration, STRING, STRING )
);
this.emptyWithinReplacement = emptyWithinReplacement;
}
@Override
public void render(
SqlAppender sqlAppender,
List<? extends SqlAstNode> sqlAstArguments,
ReturnableType<?> returnType,
SqlAstTranslator<?> walker) {
render( sqlAppender, sqlAstArguments, null, Collections.emptyList(), returnType, walker );
}
@Override
public void render(
SqlAppender sqlAppender,
List<? extends SqlAstNode> sqlAstArguments,
Predicate filter,
ReturnableType<?> returnType,
SqlAstTranslator<?> walker) {
render( sqlAppender, sqlAstArguments, filter, Collections.emptyList(), returnType, walker );
}
@Override
public void render(
SqlAppender sqlAppender,
List<? extends SqlAstNode> sqlAstArguments,
Predicate filter,
List<SortSpecification> withinGroup,
ReturnableType<?> returnType,
SqlAstTranslator<?> translator) {
final boolean caseWrapper = filter != null && !filterClauseSupported( translator );
sqlAppender.appendSql( "listagg(" );
final SqlAstNode firstArg = sqlAstArguments.get( 0 );
final Expression arg;
if ( firstArg instanceof Distinct distinct ) {
sqlAppender.appendSql( "distinct " );
arg = distinct.getExpression();
}
else {
arg = (Expression) firstArg;
}
if ( caseWrapper ) {
sqlAppender.appendSql( "case when " );
translator.getCurrentClauseStack().push( Clause.WHERE );
filter.accept( translator );
translator.getCurrentClauseStack().pop();
sqlAppender.appendSql( " then " );
arg.accept( translator );
sqlAppender.appendSql( " else null end" );
}
else {
arg.accept( translator );
}
if ( sqlAstArguments.size() != 1 ) {
sqlAppender.appendSql( ',' );
sqlAstArguments.get( 1 ).accept( translator );
}
sqlAppender.appendSql( ')' );
if ( withinGroup != null && !withinGroup.isEmpty() ) {
translator.getCurrentClauseStack().push( Clause.WITHIN_GROUP );
sqlAppender.appendSql( " within group (order by " );
withinGroup.get( 0 ).accept( translator );
for ( int i = 1; i < withinGroup.size(); i++ ) {
sqlAppender.appendSql( ',' );
withinGroup.get( i ).accept( translator );
}
sqlAppender.appendSql( ')' );
translator.getCurrentClauseStack().pop();
}
else if ( emptyWithinReplacement != null ) {
sqlAppender.appendSql( ' ' );
sqlAppender.appendSql( emptyWithinReplacement );
}
if ( !caseWrapper && filter != null ) {
translator.getCurrentClauseStack().push( Clause.WHERE );
sqlAppender.appendSql( " filter (where " );
filter.accept( translator );
sqlAppender.appendSql( ')' );
translator.getCurrentClauseStack().pop();
}
}
}
| ListaggFunction |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_2913/Issue2913Mapper.java | {
"start": 368,
"end": 889
} | interface ____ {
Issue2913Mapper INSTANCE = Mappers.getMapper( Issue2913Mapper.class );
@Mapping(target = "doublePrimitiveValue", source = "rounding")
@Mapping(target = "doubleValue", source = "rounding")
@Mapping(target = "longPrimitiveValue", source = "rounding")
@Mapping(target = "longValue", source = "rounding")
Target map(Source source);
default Long mapAmount(BigDecimal amount) {
return amount != null ? amount.movePointRight( 2 ).longValue() : null;
}
| Issue2913Mapper |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/body/ReactiveByteBufferByteBody.java | {
"start": 1705,
"end": 4370
} | class ____ extends BaseStreamingByteBody<ReactiveByteBufferByteBody.SharedBuffer> implements CloseableByteBody {
public ReactiveByteBufferByteBody(SharedBuffer sharedBuffer) {
this(sharedBuffer, sharedBuffer.getRootUpstream());
}
private ReactiveByteBufferByteBody(SharedBuffer sharedBuffer, BufferConsumer.Upstream upstream) {
super(sharedBuffer, upstream);
}
@Override
public BufferConsumer.Upstream primary(BufferConsumer primary) {
BufferConsumer.Upstream upstream = this.upstream;
if (upstream == null) {
failClaim();
}
recordPrimaryOp();
this.upstream = null;
BaseSharedBuffer.logClaim();
sharedBuffer.subscribe(primary, upstream);
return upstream;
}
@Override
protected ReactiveByteBufferByteBody derive(BufferConsumer.Upstream upstream) {
return new ReactiveByteBufferByteBody(sharedBuffer, upstream);
}
@Override
public @NonNull CloseableByteBody split(@NonNull SplitBackpressureMode backpressureMode) {
BufferConsumer.Upstream upstream = this.upstream;
if (upstream == null) {
failClaim();
}
UpstreamBalancer.UpstreamPair pair = UpstreamBalancer.balancer(upstream, backpressureMode);
this.upstream = pair.left();
this.sharedBuffer.reserve();
return new ReactiveByteBufferByteBody(sharedBuffer, pair.right());
}
@Override
public @NonNull ExecutionFlow<? extends CloseableAvailableByteBody> bufferFlow() {
BufferConsumer.Upstream upstream = this.upstream;
if (upstream == null) {
failClaim();
}
recordPrimaryOp();
this.upstream = null;
BaseSharedBuffer.logClaim();
upstream.start();
upstream.onBytesConsumed(Long.MAX_VALUE);
return sharedBuffer.subscribeFull(upstream).map(AvailableByteArrayBody::create);
}
@Override
public void close() {
BufferConsumer.Upstream upstream = this.upstream;
if (upstream == null) {
return;
}
recordClosed();
this.upstream = null;
BaseSharedBuffer.logClaim();
upstream.allowDiscard();
upstream.disregardBackpressure();
upstream.start();
sharedBuffer.subscribe(null, upstream);
}
/**
* Simple implementation of {@link BaseSharedBuffer} that consumes {@link ByteBuffer}s.<br>
* Buffering is done using a {@link ByteArrayOutputStream}. Concurrency control is done through
* a non-reentrant lock based on {@link AtomicReference}.
*/
public static final | ReactiveByteBufferByteBody |
java | apache__camel | components/camel-mllp/src/test/java/org/apache/camel/test/stub/camel/MllpEndpointStub.java | {
"start": 1019,
"end": 1451
} | class ____ extends MllpEndpoint {
public MllpEndpointStub() {
this("mllp://endpoint-stub:1234", new MllpComponent());
}
public MllpEndpointStub(String uri, MllpComponent component) {
this(uri, component, new MllpConfiguration());
}
public MllpEndpointStub(String uri, MllpComponent component, MllpConfiguration configuration) {
super(uri, component, configuration);
}
}
| MllpEndpointStub |
java | quarkusio__quarkus | extensions/security-webauthn/runtime/src/main/java/io/quarkus/security/webauthn/WebAuthnRunTimeConfig.java | {
"start": 5011,
"end": 5532
} | enum ____ {
DISCOURAGED,
PREFERRED,
REQUIRED;
ResidentKeyRequirement toWebAuthn4J() {
switch (this) {
case DISCOURAGED:
return ResidentKeyRequirement.DISCOURAGED;
case PREFERRED:
return ResidentKeyRequirement.PREFERRED;
case REQUIRED:
return ResidentKeyRequirement.REQUIRED;
default:
throw new IllegalStateException("Illegal | ResidentKey |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/schedulers/Schedulers.java | {
"start": 10029,
"end": 14095
} | class ____ been initialized, you can override the returned {@code Scheduler} instance
* via the {@link RxJavaPlugins#setIoSchedulerHandler(io.reactivex.rxjava3.functions.Function)} method.
* <p>
* It is possible to create a fresh instance of this scheduler with a custom {@link ThreadFactory}, via the
* {@link RxJavaPlugins#createIoScheduler(ThreadFactory)} method. Note that such custom
* instances require a manual call to {@link Scheduler#shutdown()} to allow the JVM to exit or the
* (J2EE) container to unload properly.
* <p>Operators on the base reactive classes that use this scheduler are marked with the
* @{@link io.reactivex.rxjava3.annotations.SchedulerSupport SchedulerSupport}({@link io.reactivex.rxjava3.annotations.SchedulerSupport#IO IO})
* annotation.
* <p>
* When the {@link io.reactivex.rxjava3.core.Scheduler.Worker Scheduler.Worker} is disposed,
* the underlying worker can be released to the cached worker pool in two modes:
* <ul>
* <li>In <em>eager</em> mode (default), the underlying worker is returned immediately to the cached worker pool
* and can be reused much quicker by operators. The drawback is that if the currently running task doesn't
* respond to interruption in time or at all, this may lead to delays or deadlock with the reuse use of the
* underlying worker.
* </li>
* <li>In <em>scheduled</em> mode (enabled via the system parameter {@code rx3.io-scheduled-release}
* set to {@code true}), the underlying worker is returned to the cached worker pool only after the currently running task
* has finished. This can help prevent premature reuse of the underlying worker and likely won't lead to delays or
* deadlock with such reuses. The drawback is that the delay in release may lead to an excess amount of underlying
* workers being created.
* </li>
* </ul>
* @return a {@code Scheduler} meant for IO-bound work
*/
@NonNull
public static Scheduler io() {
return RxJavaPlugins.onIoScheduler(IO);
}
/**
* Returns a default, shared {@link Scheduler} instance whose {@link io.reactivex.rxjava3.core.Scheduler.Worker}
* instances queue work and execute them in a FIFO manner on one of the participating threads.
* <p>
* The default implementation's {@link Scheduler#scheduleDirect(Runnable)} methods execute the tasks on the current thread
* without any queueing and the timed overloads use blocking sleep as well.
* <p>
* Note that this scheduler can't be reliably used to return the execution of
* tasks to the "main" thread. Such behavior requires a blocking-queueing scheduler currently not provided
* by RxJava itself but may be found in external libraries.
* <p>
* This scheduler can't be overridden via an {@link RxJavaPlugins} method.
* @return a {@code Scheduler} that queues work on the current thread
*/
@NonNull
public static Scheduler trampoline() {
return TRAMPOLINE;
}
/**
* Returns a default, shared {@link Scheduler} instance that creates a new {@link Thread} for each unit of work.
* <p>
* The default implementation of this scheduler creates a new, single-threaded {@link ScheduledExecutorService} for
* each invocation of the {@link Scheduler#scheduleDirect(Runnable)} (plus its overloads) and {@link Scheduler#createWorker()}
* methods, thus an unbounded number of worker threads may be created that can
* result in system slowdowns or {@link OutOfMemoryError}. Therefore, for casual uses or when implementing an operator,
* the Worker instances must be disposed via {@link io.reactivex.rxjava3.core.Scheduler.Worker#dispose()}.
* <p>
* Unhandled errors will be delivered to the scheduler Thread's {@link java.lang.Thread.UncaughtExceptionHandler}.
* <p>
* You can control certain properties of this standard scheduler via system properties that have to be set
* before the {@code Schedulers} | has |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_3200/Issue3266.java | {
"start": 219,
"end": 531
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
VO vo = new VO();
vo.type = Color.Black;
String str = JSON.toJSONString(vo);
assertEquals("{\"type\":1003}", str);
VO vo2 = JSON.parseObject(str, VO.class);
}
public static | Issue3266 |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/property/FieldMappingWithGetterAndIsTest.java | {
"start": 713,
"end": 1054
} | class ____ {
@Test
public void testResolution(DomainModelScope modelScope, SessionFactoryScope factoryScope) {
final PersistentClass entityBinding = modelScope.getEntityBinding( Tester.class );
factoryScope.getCollectingStatementInspector();
}
@Entity(name="Tester")
@Table(name="Tester")
public static | FieldMappingWithGetterAndIsTest |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/util/ConcurrentLruCacheTests.java | {
"start": 858,
"end": 3939
} | class ____ {
private final ConcurrentLruCache<String, String> cache = new ConcurrentLruCache<>(2, key -> key + "value");
@Test
void zeroCapacity() {
ConcurrentLruCache<String, String> cache = new ConcurrentLruCache<>(0, key -> key + "value");
assertThat(cache.capacity()).isZero();
assertThat(cache.size()).isZero();
assertThat(cache.get("k1")).isEqualTo("k1value");
assertThat(cache.size()).isZero();
assertThat(cache.contains("k1")).isFalse();
assertThat(cache.get("k2")).isEqualTo("k2value");
assertThat(cache.size()).isZero();
assertThat(cache.contains("k1")).isFalse();
assertThat(cache.contains("k2")).isFalse();
assertThat(cache.get("k3")).isEqualTo("k3value");
assertThat(cache.size()).isZero();
assertThat(cache.contains("k1")).isFalse();
assertThat(cache.contains("k2")).isFalse();
assertThat(cache.contains("k3")).isFalse();
}
@Test
void getAndSize() {
assertThat(this.cache.capacity()).isEqualTo(2);
assertThat(this.cache.size()).isEqualTo(0);
assertThat(this.cache.get("k1")).isEqualTo("k1value");
assertThat(this.cache.size()).isEqualTo(1);
assertThat(this.cache.contains("k1")).isTrue();
assertThat(this.cache.get("k2")).isEqualTo("k2value");
assertThat(this.cache.size()).isEqualTo(2);
assertThat(this.cache.contains("k1")).isTrue();
assertThat(this.cache.contains("k2")).isTrue();
assertThat(this.cache.get("k3")).isEqualTo("k3value");
assertThat(this.cache.size()).isEqualTo(2);
assertThat(this.cache.contains("k1")).isFalse();
assertThat(this.cache.contains("k2")).isTrue();
assertThat(this.cache.contains("k3")).isTrue();
}
@Test
void removeAndSize() {
assertThat(this.cache.get("k1")).isEqualTo("k1value");
assertThat(this.cache.get("k2")).isEqualTo("k2value");
assertThat(this.cache.size()).isEqualTo(2);
assertThat(this.cache.contains("k1")).isTrue();
assertThat(this.cache.contains("k2")).isTrue();
this.cache.remove("k2");
assertThat(this.cache.size()).isEqualTo(1);
assertThat(this.cache.contains("k1")).isTrue();
assertThat(this.cache.contains("k2")).isFalse();
assertThat(this.cache.get("k3")).isEqualTo("k3value");
assertThat(this.cache.size()).isEqualTo(2);
assertThat(this.cache.contains("k1")).isTrue();
assertThat(this.cache.contains("k2")).isFalse();
assertThat(this.cache.contains("k3")).isTrue();
}
@Test
void clearAndSize() {
assertThat(this.cache.get("k1")).isEqualTo("k1value");
assertThat(this.cache.get("k2")).isEqualTo("k2value");
assertThat(this.cache.size()).isEqualTo(2);
assertThat(this.cache.contains("k1")).isTrue();
assertThat(this.cache.contains("k2")).isTrue();
this.cache.clear();
assertThat(this.cache.size()).isEqualTo(0);
assertThat(this.cache.contains("k1")).isFalse();
assertThat(this.cache.contains("k2")).isFalse();
assertThat(this.cache.get("k3")).isEqualTo("k3value");
assertThat(this.cache.size()).isEqualTo(1);
assertThat(this.cache.contains("k1")).isFalse();
assertThat(this.cache.contains("k2")).isFalse();
assertThat(this.cache.contains("k3")).isTrue();
}
}
| ConcurrentLruCacheTests |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/SealedTypesWithPolymorphicDeductionTest.java | {
"start": 1609,
"end": 1741
} | class ____ {
@JsonValue
public String ser = "value";
}
@JsonTypeInfo(use = JsonTypeInfo.Id.DEDUCTION)
static | Bean3711 |
java | apache__camel | components/camel-undertow/src/test/java/org/apache/camel/component/undertow/UndertowHttpProducerSessionTest.java | {
"start": 1405,
"end": 5431
} | class ____ extends CamelTestSupport {
private static volatile int port;
@BindToRegistry("instanceCookieHandler")
private InstanceCookieHandler instanceCookieHandler = new InstanceCookieHandler();
@BindToRegistry("exchangeCookieHandler")
private ExchangeCookieHandler exchangeCookieHandler = new ExchangeCookieHandler();
@BeforeAll
public static void initPort() {
port = AvailablePortFinder.getNextAvailable();
}
@Test
public void testNoSession() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived("New New World", "New New World");
template.sendBody("direct:start", "World");
template.sendBody("direct:start", "World");
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testInstanceSession() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived("Old New World", "Old Old World");
template.sendBody("direct:instance", "World");
template.sendBody("direct:instance", "World");
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testExchangeSession() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived("Old New World", "Old New World");
template.sendBody("direct:exchange", "World");
template.sendBody("direct:exchange", "World");
MockEndpoint.assertIsSatisfied(context);
}
private String getTestServerEndpointSessionUrl() {
// session handling will not work for localhost
return "http://127.0.0.1:" + port + "/session";
}
private String getTestServerEndpointSessionUri() {
return "undertow:" + getTestServerEndpointSessionUrl() + "?sessionSupport=true";
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.to("undertow:" + getTestServerEndpointSessionUrl())
.to("undertow:" + getTestServerEndpointSessionUrl())
.to("mock:result");
from("direct:instance")
.to("undertow:" + getTestServerEndpointSessionUrl() + "?cookieHandler=#instanceCookieHandler")
.to("undertow:" + getTestServerEndpointSessionUrl() + "?cookieHandler=#instanceCookieHandler")
.to("mock:result");
from("direct:exchange")
.to("undertow:" + getTestServerEndpointSessionUrl() + "?cookieHandler=#exchangeCookieHandler")
.to("undertow:" + getTestServerEndpointSessionUrl() + "?cookieHandler=#exchangeCookieHandler")
.to("mock:result");
from(getTestServerEndpointSessionUri())
.process(new Processor() {
@Override
public void process(Exchange exchange) {
Message message = exchange.getIn();
String body = message.getBody(String.class);
// Undertow servers do not support sessions or
// cookies, so we fake them
if (message.getHeader("Cookie") != null
&& message.getHeader("Cookie", String.class).contains("JSESSIONID")) {
message.setBody("Old " + body);
} else {
message.setHeader("Set-Cookie", "JSESSIONID=nxojb3aum8i5100j6lyvxdpn6;Path=/");
message.setHeader("Expires", "Thu, 01 Jan 1970 00:00:00 GMT");
message.setBody("New " + body);
}
}
});
}
};
}
}
| UndertowHttpProducerSessionTest |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/kstream/internals/OuterStreamJoinStoreFactory.java | {
"start": 2108,
"end": 2443
} | class ____<K, V1, V2> extends AbstractConfigurableStoreFactory {
private final String name;
private final StreamJoinedInternal<K, V1, V2> streamJoined;
private final JoinWindows windows;
private final DslStoreSuppliers passedInDslStoreSuppliers;
private boolean loggingEnabled;
public | OuterStreamJoinStoreFactory |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/ast/expr/SQLSomeExpr.java | {
"start": 1089,
"end": 3304
} | class ____ extends SQLExprImpl {
public SQLSelect subQuery;
public SQLSomeExpr() {
}
public SQLSomeExpr(SQLSelect select) {
this.setSubQuery(select);
}
public SQLSomeExpr clone() {
SQLSomeExpr x = new SQLSomeExpr();
if (subQuery != null) {
x.setSubQuery(subQuery.clone());
}
return x;
}
public SQLSelect getSubQuery() {
return this.subQuery;
}
public void setSubQuery(SQLSelect subQuery) {
if (subQuery != null) {
subQuery.setParent(this);
}
this.subQuery = subQuery;
}
@Override
protected void accept0(SQLASTVisitor visitor) {
if (visitor.visit(this)) {
if (this.subQuery != null) {
this.subQuery.accept(visitor);
}
}
visitor.endVisit(this);
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((subQuery == null) ? 0 : subQuery.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
SQLSomeExpr other = (SQLSomeExpr) obj;
if (subQuery == null) {
if (other.subQuery != null) {
return false;
}
} else if (!subQuery.equals(other.subQuery)) {
return false;
}
return true;
}
public SQLDataType computeDataType() {
if (subQuery == null) {
return null;
}
SQLSelectQueryBlock queryBlock = subQuery.getFirstQueryBlock();
if (queryBlock == null) {
return null;
}
List<SQLSelectItem> selectList = queryBlock.getSelectList();
if (selectList.size() == 1) {
return selectList.get(0).computeDataType();
}
return null;
}
@Override
public List<SQLObject> getChildren() {
return Collections.<SQLObject>singletonList(this.subQuery);
}
}
| SQLSomeExpr |
java | google__dagger | javatests/dagger/internal/codegen/InjectConstructorFactoryGeneratorTest.java | {
"start": 33315,
"end": 34115
} | class ____ {",
" @Inject InnerClass() {}",
" }",
" }",
"}");
daggerCompiler(file)
.withProcessingOptions(ImmutableMap.of("dagger.privateMemberValidation", "WARNING"))
.compile(
subject -> {
subject.hasErrorCount(0);
subject.hasWarningCount(1);
subject.hasWarningContaining("Dagger does not support injection into private classes")
.onSource(file)
.onLine(8);
});
}
@Test public void finalInjectField() {
Source file =
CompilerTests.javaSource(
"test.FinalInjectField",
"package test;",
"",
"import javax.inject.Inject;",
"",
" | InnerClass |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/rank/feature/RankFeatureResult.java | {
"start": 1047,
"end": 2585
} | class ____ extends SearchPhaseResult {
private RankFeatureShardResult rankShardResult;
public RankFeatureResult() {}
@SuppressWarnings("this-escape")
public RankFeatureResult(ShardSearchContextId id, SearchShardTarget shardTarget, ShardSearchRequest request) {
this.contextId = id;
setSearchShardTarget(shardTarget);
setShardSearchRequest(request);
}
@SuppressWarnings("this-escape")
public RankFeatureResult(StreamInput in) throws IOException {
contextId = new ShardSearchContextId(in);
rankShardResult = in.readOptionalWriteable(RankFeatureShardResult::new);
setShardSearchRequest(in.readOptionalWriteable(ShardSearchRequest::new));
setSearchShardTarget(in.readOptionalWriteable(SearchShardTarget::new));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
assert hasReferences();
contextId.writeTo(out);
out.writeOptionalWriteable(rankShardResult);
out.writeOptionalWriteable(getShardSearchRequest());
out.writeOptionalWriteable(getSearchShardTarget());
}
@Override
public RankFeatureResult rankFeatureResult() {
return this;
}
public void shardResult(RankFeatureShardResult shardResult) {
this.rankShardResult = shardResult;
}
public RankFeatureShardResult shardResult() {
return rankShardResult;
}
@Override
public boolean hasSearchContext() {
return rankShardResult != null;
}
}
| RankFeatureResult |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/TaskExecutionStateTransition.java | {
"start": 1209,
"end": 2794
} | class ____ {
private final TaskExecutionState taskExecutionState;
/**
* Indicating whether to send a RPC call to remove task from TaskManager. True if the failure is
* fired by JobManager and the execution is already deployed. Otherwise it should be false.
*/
private final boolean cancelTask;
private final boolean releasePartitions;
public TaskExecutionStateTransition(final TaskExecutionState taskExecutionState) {
this(taskExecutionState, false, false);
}
public TaskExecutionStateTransition(
final TaskExecutionState taskExecutionState,
final boolean cancelTask,
final boolean releasePartitions) {
this.taskExecutionState = checkNotNull(taskExecutionState);
this.cancelTask = cancelTask;
this.releasePartitions = releasePartitions;
}
public Throwable getError(ClassLoader userCodeClassloader) {
return taskExecutionState.getError(userCodeClassloader);
}
public ExecutionAttemptID getID() {
return taskExecutionState.getID();
}
public ExecutionState getExecutionState() {
return taskExecutionState.getExecutionState();
}
public AccumulatorSnapshot getAccumulators() {
return taskExecutionState.getAccumulators();
}
public IOMetrics getIOMetrics() {
return taskExecutionState.getIOMetrics();
}
public boolean getCancelTask() {
return cancelTask;
}
public boolean getReleasePartitions() {
return releasePartitions;
}
}
| TaskExecutionStateTransition |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/read/SortedSetOrdinalsBuilder.java | {
"start": 955,
"end": 6594
} | class ____ implements BlockLoader.SortedSetOrdinalsBuilder, Releasable, Block.Builder {
private final BlockFactory blockFactory;
private final SortedSetDocValues docValues;
private int minOrd = Integer.MAX_VALUE;
private int maxOrd = Integer.MIN_VALUE;
private int totalValueCount;
private final IntBlock.Builder ordsBuilder;
public SortedSetOrdinalsBuilder(BlockFactory blockFactory, SortedSetDocValues docValues, int count) {
this.blockFactory = blockFactory;
this.docValues = docValues;
this.ordsBuilder = blockFactory.newIntBlockBuilder(count).mvOrdering(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING);
}
@Override
public SortedSetOrdinalsBuilder appendNull() {
ordsBuilder.appendNull();
return this;
}
@Override
public SortedSetOrdinalsBuilder appendOrd(int ord) {
minOrd = Math.min(minOrd, ord);
maxOrd = Math.max(maxOrd, ord);
ordsBuilder.appendInt(ord);
totalValueCount++;
return this;
}
@Override
public SortedSetOrdinalsBuilder beginPositionEntry() {
ordsBuilder.beginPositionEntry();
return this;
}
@Override
public SortedSetOrdinalsBuilder endPositionEntry() {
ordsBuilder.endPositionEntry();
return this;
}
private BytesRefBlock buildBlock(IntBlock ordinals) {
final int numOrds = maxOrd - minOrd + 1;
final long breakerSize = arraySize(numOrds);
blockFactory.adjustBreaker(breakerSize);
BytesRefVector dict = null;
IntBlock mappedOrds = null;
try {
final int[] newOrds = new int[numOrds];
Arrays.fill(newOrds, -1);
for (int p = 0; p < ordinals.getPositionCount(); p++) {
int count = ordinals.getValueCount(p);
if (count > 0) {
int first = ordinals.getFirstValueIndex(p);
for (int i = 0; i < count; i++) {
int oldOrd = ordinals.getInt(first + i);
newOrds[oldOrd - minOrd] = 0;
}
}
}
int nextOrd = -1;
try (BytesRefVector.Builder dictBuilder = blockFactory.newBytesRefVectorBuilder(Math.min(newOrds.length, totalValueCount))) {
for (int i = 0; i < newOrds.length; i++) {
if (newOrds[i] != -1) {
newOrds[i] = ++nextOrd;
dictBuilder.appendBytesRef(docValues.lookupOrd(i + minOrd));
}
}
dict = dictBuilder.build();
} catch (IOException e) {
throw new UncheckedIOException("error resolving ordinals", e);
}
mappedOrds = remapOrdinals(ordinals, newOrds, minOrd);
final OrdinalBytesRefBlock result = new OrdinalBytesRefBlock(mappedOrds, dict);
dict = null;
mappedOrds = null;
return result;
} finally {
Releasables.close(() -> blockFactory.adjustBreaker(-breakerSize), mappedOrds, dict);
}
}
private IntBlock remapOrdinals(IntBlock ordinals, int[] newOrds, int shiftOrd) {
try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(totalValueCount)) {
for (int p = 0; p < ordinals.getPositionCount(); p++) {
int valueCount = ordinals.getValueCount(p);
switch (valueCount) {
case 0 -> builder.appendNull();
case 1 -> {
int ord = ordinals.getInt(ordinals.getFirstValueIndex(p));
builder.appendInt(newOrds[ord - shiftOrd]);
}
default -> {
int first = ordinals.getFirstValueIndex(p);
builder.beginPositionEntry();
int last = first + valueCount;
for (int i = first; i < last; i++) {
int ord = ordinals.getInt(i);
builder.appendInt(newOrds[ord - shiftOrd]);
}
builder.endPositionEntry();
}
}
}
builder.mvOrdering(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING);
return builder.build();
}
}
@Override
public long estimatedBytes() {
/*
* This is a *terrible* estimate because we have no idea how big the
* values in the ordinals are.
*/
final int numOrds = minOrd <= maxOrd ? maxOrd - minOrd + 1 : 0;
return totalValueCount * 4L + Math.min(numOrds, totalValueCount) * 20L;
}
@Override
public BytesRefBlock build() {
try (IntBlock ordinals = ordsBuilder.build()) {
if (ordinals.areAllValuesNull()) {
return (BytesRefBlock) blockFactory.newConstantNullBlock(ordinals.getPositionCount());
}
return buildBlock(ordinals);
}
}
@Override
public void close() {
ordsBuilder.close();
}
@Override
public Block.Builder copyFrom(Block block, int beginInclusive, int endExclusive) {
throw new UnsupportedOperationException();
}
@Override
public Block.Builder mvOrdering(Block.MvOrdering mvOrdering) {
throw new UnsupportedOperationException();
}
private static long arraySize(int ordsCount) {
return RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) ordsCount * Integer.BYTES;
}
}
| SortedSetOrdinalsBuilder |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/operators/AbstractUdfOperator.java | {
"start": 5235,
"end": 5535
} | class ____.
*
* @param <U> The type of the classes.
* @return An empty array of type <tt>Class<U></tt>.
*/
protected static <U> Class<U>[] emptyClassArray() {
@SuppressWarnings("unchecked")
Class<U>[] array = new Class[0];
return array;
}
}
| array |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1400/Issue1487.java | {
"start": 167,
"end": 589
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
Model model = new Model();
model._id = 1001L;
model.id = 1002L;
String json = JSON.toJSONString(model);
assertEquals("{\"_id\":1001,\"id\":1002}", json);
Model model1 = JSON.parseObject(json, Model.class);
assertEquals(json, JSON.toJSONString(model1));
}
public static | Issue1487 |
java | quarkusio__quarkus | extensions/vertx-http/runtime/src/main/java/io/quarkus/vertx/http/runtime/devmode/RouteMethodDescription.java | {
"start": 55,
"end": 1979
} | class ____ {
private String javaMethod;
private String httpMethod;
private String fullPath;
private String produces;
private String consumes;
public RouteMethodDescription() {
super();
}
public RouteMethodDescription(String httpMethod, String fullPath, String produces, String consumes) {
super();
this.javaMethod = null;
this.httpMethod = httpMethod;
this.fullPath = fullPath;
this.produces = produces;
this.consumes = consumes;
}
public RouteMethodDescription(String javaMethod, String httpMethod, String fullPath, String produces, String consumes) {
super();
this.javaMethod = javaMethod;
this.httpMethod = httpMethod;
this.fullPath = fullPath;
this.produces = produces;
this.consumes = consumes;
}
public String getJavaMethod() {
return javaMethod;
}
public void setJavaMethod(String javaMethod) {
this.javaMethod = javaMethod;
}
public String getHttpMethod() {
return httpMethod;
}
public void setHttpMethod(String httpMethod) {
this.httpMethod = httpMethod;
}
public String getFullPath() {
return fullPath;
}
public void setFullPath(String fullPath) {
this.fullPath = fullPath;
}
public String getProduces() {
return produces;
}
public void setProduces(String produces) {
this.produces = produces;
}
public String getConsumes() {
return consumes;
}
public void setConsumes(String consumes) {
this.consumes = consumes;
}
@Override
public String toString() {
return "RouteMethodDescription{" + "javaMethod=" + javaMethod + ", httpMethod=" + httpMethod + ", fullPath=" + fullPath
+ ", produces=" + produces + ", consumes=" + consumes + '}';
}
}
| RouteMethodDescription |
java | elastic__elasticsearch | libs/entitlement/src/main23/java/org/elasticsearch/entitlement/runtime/policy/Java23ElasticsearchEntitlementChecker.java | {
"start": 602,
"end": 1146
} | class ____ extends ElasticsearchEntitlementChecker implements Java23EntitlementChecker {
public Java23ElasticsearchEntitlementChecker(PolicyChecker policyChecker) {
super(policyChecker);
}
@Override
public void check$java_lang_Runtime$exit(Class<?> callerClass, Runtime runtime, int status) {
// TODO: this is just an example, we shouldn't really override a method implemented in the superclass
super.check$java_lang_Runtime$exit(callerClass, runtime, status);
}
}
| Java23ElasticsearchEntitlementChecker |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/OracleIbatisSelectTest1.java | {
"start": 971,
"end": 2080
} | class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = "SELECT * FROM T WHERE F1 = $id$";
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement statemen = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
statemen.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
assertTrue(visitor.getTables().containsKey(new TableStat.Name("T")));
assertEquals(1, visitor.getTables().size());
//assertEquals(2, visitor.getColumns().size());
// assertTrue(visitor.getColumns().contains(new TableStat.Column("bonuses", "employee_id")));
}
}
| OracleIbatisSelectTest1 |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_1005/Issue1005Test.java | {
"start": 2309,
"end": 2814
} | class ____ interface.")
})
public void shouldFailDueToInterfaceResultType() {
}
@WithClasses(Issue1005ErroneousInterfaceReturnTypeMapper.class)
@ProcessorTest
@ExpectedCompilationOutcome(value = CompilationResult.FAILED,
diagnostics = {
@Diagnostic(type = Issue1005ErroneousInterfaceReturnTypeMapper.class,
kind = javax.tools.Diagnostic.Kind.ERROR,
line = 16,
message = "The return type HasKey is an abstract | nor |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/util/introspection/FieldSupport.java | {
"start": 1352,
"end": 10927
} | enum ____ {
EXTRACTION(true), EXTRACTION_OF_PUBLIC_FIELD_ONLY(false), COMPARISON(true);
private static final String CHAR = "char";
private static final String BOOLEAN = "boolean";
private static final String DOUBLE = "double";
private static final String FLOAT = "float";
private static final String LONG = "long";
private static final String INT = "int";
private static final String SHORT = "short";
private static final String BYTE = "byte";
private static final String SEPARATOR = ".";
private boolean allowUsingPrivateFields;
/**
* Returns the instance dedicated to extraction of fields.
*
* @return the instance dedicated to extraction of fields.
*/
public static FieldSupport extraction() {
return EXTRACTION;
}
/**
* Returns the instance dedicated to comparison of fields.
*
* @return the instance dedicated to comparison of fields.
*/
public static FieldSupport comparison() {
return COMPARISON;
}
/**
* Build a new {@link FieldSupport}
*
* @param allowUsingPrivateFields whether to read private fields or not.
*/
FieldSupport(boolean allowUsingPrivateFields) {
this.allowUsingPrivateFields = allowUsingPrivateFields;
}
// TODO reduce the visibility of the fields annotated with @VisibleForTesting
public boolean isAllowedToUsePrivateFields() {
return allowUsingPrivateFields;
}
/**
* Sets whether the use of private fields is allowed.
* If a method tries to extract/compare private fields and is not allowed to, it will fail with an exception.
*
* @param allowUsingPrivateFields allow private fields extraction and comparison. Default {@code true}.
*/
public void setAllowUsingPrivateFields(boolean allowUsingPrivateFields) {
ConfigurationProvider.loadRegisteredConfiguration();
this.allowUsingPrivateFields = allowUsingPrivateFields;
}
/**
* Returns a <code>{@link List}</code> containing the values of the given field name, from the elements of the given
* <code>{@link Iterable}</code>. If the given {@code Iterable} is empty or {@code null}, this method will return an
* empty {@code List}. This method supports nested fields (e.g. "address.street.number").
*
* @param <T> the type of the extracted elements.
* @param fieldName the name of the field. It may be a nested field. It is left to the clients to validate for
* {@code null} or empty.
* @param fieldClass the expected type of the given field.
* @param target the given {@code Iterable}.
* @return an {@code Iterable} containing the values of the given field name, from the elements of the given
* {@code Iterable}.
* @throws IntrospectionError if an element in the given {@code Iterable} does not have a field with a matching name.
*/
public <T> List<T> fieldValues(String fieldName, Class<T> fieldClass, Iterable<?> target) {
if (isNullOrEmpty(target)) return emptyList();
if (isNestedField(fieldName)) {
String firstFieldName = popFieldNameFrom(fieldName);
Iterable<Object> fieldValues = fieldValues(firstFieldName, Object.class, target);
// extract next sub-field values until reaching the last sub-field
return fieldValues(nextFieldNameFrom(fieldName), fieldClass, fieldValues);
}
return simpleFieldValues(fieldName, fieldClass, target);
}
public List<Object> fieldValues(String fieldName, Iterable<?> target) {
return fieldValues(fieldName, Object.class, target);
}
/**
* Returns a <code>{@link List}</code> containing the values of the given field name, from the elements of the given
* <code>{@link Iterable}</code>. If the given {@code Iterable} is empty or {@code null}, this method will return an
* empty {@code List}. This method supports nested fields (e.g. "address.street.number").
*
* @param <T> the type of the extracted elements.
* @param fieldName the name of the field. It may be a nested field. It is left to the clients to validate for
* {@code null} or empty.
* @param fieldClass the expected type of the given field.
* @param target the given {@code Iterable}.
* @return an {@code Iterable} containing the values of the given field name, from the elements of the given
* {@code Iterable}.
* @throws IntrospectionError if an element in the given {@code Iterable} does not have a field with a matching name.
*/
public <T> List<T> fieldValues(String fieldName, Class<T> fieldClass, Object[] target) {
return fieldValues(fieldName, fieldClass, wrap(target));
}
private <T> List<T> simpleFieldValues(String fieldName, Class<T> clazz, Iterable<?> target) {
return stream(target).map(e -> e == null ? null : fieldValue(fieldName, clazz, e))
.collect(collectingAndThen(toList(), Collections::unmodifiableList));
}
private String popFieldNameFrom(String fieldNameChain) {
return isNestedField(fieldNameChain)
? fieldNameChain.substring(0, fieldNameChain.indexOf(SEPARATOR))
: fieldNameChain;
}
private String nextFieldNameFrom(String fieldNameChain) {
return isNestedField(fieldNameChain)
? fieldNameChain.substring(fieldNameChain.indexOf(SEPARATOR) + 1)
: "";
}
/*
* <pre><code class='java'> isNestedField("address.street"); // true
* isNestedField("address.street.name"); // true
* isNestedField("person"); // false
* isNestedField(".name"); // false
* isNestedField("person."); // false
* isNestedField("person.name."); // false
* isNestedField(".person.name"); // false
* isNestedField("."); // false
* isNestedField(""); // false</code></pre>
*/
private boolean isNestedField(String fieldName) {
return fieldName.contains(SEPARATOR) && !fieldName.startsWith(SEPARATOR) && !fieldName.endsWith(SEPARATOR);
}
/**
* Return the value of field from a target object. The field must not be static or synthetic (since 3.19.0).
* <p>
* Return null if field is nested and one of the nested value is null, ex :
* <pre><code class='java'> fieldValue(race.name, String.class, frodo); // will return null if frodo.race is null</code></pre>
*
* @param <T> the type of the extracted value.
* @param fieldName the name of the field. It may be a nested field. It is left to the clients to validate for
* {@code null} or empty.
* @param target the given object
* @param fieldClass type of field
* @return the value of the given field name
* @throws IntrospectionError if the given target does not have a field with a matching name.
*/
public <T> T fieldValue(String fieldName, Class<T> fieldClass, Object target) {
if (target == null) return null;
if (isNestedField(fieldName)) {
String outerFieldName = popFieldNameFrom(fieldName);
Object outerFieldValue = readSimpleField(outerFieldName, Object.class, target);
// extract next sub-field values until reaching the last sub-field
return fieldValue(nextFieldNameFrom(fieldName), fieldClass, outerFieldValue);
}
return readSimpleField(fieldName, fieldClass, target);
}
@SuppressWarnings("unchecked")
private <T> T readSimpleField(String fieldName, Class<T> clazz, Object target) {
try {
Object fieldValue = readField(target, fieldName, allowUsingPrivateFields);
if (clazz.isPrimitive()) {
switch (clazz.getSimpleName()) {
case BYTE:
Byte byteValue = (byte) fieldValue;
return (T) byteValue;
case SHORT:
Short shortValue = (short) fieldValue;
return (T) shortValue;
case INT:
Integer intValue = (int) fieldValue;
return (T) intValue;
case LONG:
Long longValue = (long) fieldValue;
return (T) longValue;
case FLOAT:
Float floatValue = (float) fieldValue;
return (T) floatValue;
case DOUBLE:
Double doubleValue = (double) fieldValue;
return (T) doubleValue;
case BOOLEAN:
Boolean booleanValue = (boolean) fieldValue;
return (T) booleanValue;
case CHAR:
Character charValue = (char) fieldValue;
return (T) charValue;
}
}
return clazz.cast(fieldValue);
} catch (ClassCastException e) {
String msg = "Unable to obtain the value of the field <'%s'> from <%s> - wrong field type specified <%s>".formatted(fieldName,
target,
clazz);
throw new IntrospectionError(msg, e);
} catch (IllegalAccessException iae) {
String msg = "Unable to obtain the value of the field <'%s'> from <%s>, check that field is public.".formatted(fieldName,
target);
throw new IntrospectionError(msg, iae);
} catch (Throwable unexpected) {
String msg = "Unable to obtain the value of the field <'%s'> from <%s>".formatted(fieldName, target);
throw new IntrospectionError(msg, unexpected);
}
}
public boolean isAllowedToRead(Field field) {
if (allowUsingPrivateFields) return true;
// only read public field
return isPublic(field.getModifiers());
}
}
| FieldSupport |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/single/SingleZipIterableTest.java | {
"start": 1224,
"end": 8311
} | class ____ extends RxJavaTest {
final Function<Object[], Object> addString = new Function<Object[], Object>() {
@Override
public Object apply(Object[] a) throws Exception {
return Arrays.toString(a);
}
};
@Test
public void firstError() {
Single.zip(Arrays.asList(Single.error(new TestException()), Single.just(1)), addString)
.test()
.assertFailure(TestException.class);
}
@Test
public void secondError() {
Single.zip(Arrays.asList(Single.just(1), Single.<Integer>error(new TestException())), addString)
.test()
.assertFailure(TestException.class);
}
@Test
public void dispose() {
PublishProcessor<Integer> pp = PublishProcessor.create();
TestObserver<Object> to = Single.zip(Arrays.asList(pp.single(0), pp.single(0)), addString)
.test();
assertTrue(pp.hasSubscribers());
to.dispose();
assertFalse(pp.hasSubscribers());
}
@Test
public void zipperThrows() {
Single.zip(Arrays.asList(Single.just(1), Single.just(2)), new Function<Object[], Object>() {
@Override
public Object apply(Object[] b) throws Exception {
throw new TestException();
}
})
.test()
.assertFailure(TestException.class);
}
@Test
public void zipperReturnsNull() {
Single.zip(Arrays.asList(Single.just(1), Single.just(2)), new Function<Object[], Object>() {
@Override
public Object apply(Object[] a) throws Exception {
return null;
}
})
.test()
.assertFailure(NullPointerException.class);
}
@Test
public void middleError() {
PublishProcessor<Integer> pp0 = PublishProcessor.create();
PublishProcessor<Integer> pp1 = PublishProcessor.create();
TestObserver<Object> to = Single.zip(
Arrays.asList(pp0.single(0), pp1.single(0), pp0.single(0)), addString)
.test();
pp1.onError(new TestException());
assertFalse(pp0.hasSubscribers());
to.assertFailure(TestException.class);
}
@Test
public void innerErrorRace() {
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
final PublishProcessor<Integer> pp0 = PublishProcessor.create();
final PublishProcessor<Integer> pp1 = PublishProcessor.create();
final TestObserver<Object> to = Single.zip(
Arrays.asList(pp0.single(0), pp1.single(0)), addString)
.test();
final TestException ex = new TestException();
Runnable r1 = new Runnable() {
@Override
public void run() {
pp0.onError(ex);
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
pp1.onError(ex);
}
};
TestHelper.race(r1, r2);
to.assertFailure(TestException.class);
if (!errors.isEmpty()) {
TestHelper.assertUndeliverable(errors, 0, TestException.class);
}
} finally {
RxJavaPlugins.reset();
}
}
}
@Test
public void iteratorThrows() {
Single.zip(new CrashingMappedIterable<>(1, 100, 100, new Function<Integer, Single<Integer>>() {
@Override
public Single<Integer> apply(Integer v) throws Exception {
return Single.just(v);
}
}), addString)
.to(TestHelper.<Object>testConsumer())
.assertFailureAndMessage(TestException.class, "iterator()");
}
@Test
public void hasNextThrows() {
Single.zip(new CrashingMappedIterable<>(100, 20, 100, new Function<Integer, Single<Integer>>() {
@Override
public Single<Integer> apply(Integer v) throws Exception {
return Single.just(v);
}
}), addString)
.to(TestHelper.<Object>testConsumer())
.assertFailureAndMessage(TestException.class, "hasNext()");
}
@Test
public void nextThrows() {
Single.zip(new CrashingMappedIterable<>(100, 100, 5, new Function<Integer, Single<Integer>>() {
@Override
public Single<Integer> apply(Integer v) throws Exception {
return Single.just(v);
}
}), addString)
.to(TestHelper.<Object>testConsumer())
.assertFailureAndMessage(TestException.class, "next()");
}
@Test(expected = NullPointerException.class)
public void zipIterableOneIsNull() {
Single.zip(Arrays.asList(null, Single.just(1)), new Function<Object[], Object>() {
@Override
public Object apply(Object[] v) {
return 1;
}
})
.blockingGet();
}
@Test(expected = NullPointerException.class)
public void zipIterableTwoIsNull() {
Single.zip(Arrays.asList(Single.just(1), null), new Function<Object[], Object>() {
@Override
public Object apply(Object[] v) {
return 1;
}
})
.blockingGet();
}
@Test
public void emptyIterable() {
Single.zip(Collections.<SingleSource<Integer>>emptyList(), new Function<Object[], Object[]>() {
@Override
public Object[] apply(Object[] a) throws Exception {
return a;
}
})
.test()
.assertFailure(NoSuchElementException.class);
}
@Test
public void oneIterable() {
Single.zip(Collections.singleton(Single.just(1)), new Function<Object[], Object>() {
@Override
public Object apply(Object[] a) throws Exception {
return (Integer)a[0] + 1;
}
})
.test()
.assertResult(2);
}
@Test
public void singleSourceZipperReturnsNull() {
Single.zip(Arrays.asList(Single.just(1)), Functions.justFunction(null))
.to(TestHelper.<Object>testConsumer())
.assertFailureAndMessage(NullPointerException.class, "The zipper returned a null value");
}
@Test
public void singleSourcesInIterable() {
SingleSource<Integer> source = new SingleSource<Integer>() {
@Override
public void subscribe(SingleObserver<? super Integer> observer) {
Single.just(1).subscribe(observer);
}
};
Single.zip(Arrays.asList(source, source), new Function<Object[], Integer>() {
@Override
public Integer apply(Object[] t) throws Throwable {
return 2;
}
})
.test()
.assertResult(2);
}
}
| SingleZipIterableTest |
java | resilience4j__resilience4j | resilience4j-feign/src/test/java/io/github/resilience4j/feign/test/TestServiceFallbackThrowingException.java | {
"start": 96,
"end": 288
} | class ____ implements TestService {
@Override
public String greeting() {
throw new RuntimeException("Exception in greeting fallback");
}
}
| TestServiceFallbackThrowingException |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulDoublesEvaluator.java | {
"start": 1131,
"end": 5193
} | class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(MulDoublesEvaluator.class);
private final Source source;
private final EvalOperator.ExpressionEvaluator lhs;
private final EvalOperator.ExpressionEvaluator rhs;
private final DriverContext driverContext;
private Warnings warnings;
public MulDoublesEvaluator(Source source, EvalOperator.ExpressionEvaluator lhs,
EvalOperator.ExpressionEvaluator rhs, DriverContext driverContext) {
this.source = source;
this.lhs = lhs;
this.rhs = rhs;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
try (DoubleBlock lhsBlock = (DoubleBlock) lhs.eval(page)) {
try (DoubleBlock rhsBlock = (DoubleBlock) rhs.eval(page)) {
DoubleVector lhsVector = lhsBlock.asVector();
if (lhsVector == null) {
return eval(page.getPositionCount(), lhsBlock, rhsBlock);
}
DoubleVector rhsVector = rhsBlock.asVector();
if (rhsVector == null) {
return eval(page.getPositionCount(), lhsBlock, rhsBlock);
}
return eval(page.getPositionCount(), lhsVector, rhsVector);
}
}
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += lhs.baseRamBytesUsed();
baseRamBytesUsed += rhs.baseRamBytesUsed();
return baseRamBytesUsed;
}
public DoubleBlock eval(int positionCount, DoubleBlock lhsBlock, DoubleBlock rhsBlock) {
try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
switch (lhsBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
switch (rhsBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
double lhs = lhsBlock.getDouble(lhsBlock.getFirstValueIndex(p));
double rhs = rhsBlock.getDouble(rhsBlock.getFirstValueIndex(p));
try {
result.appendDouble(Mul.processDoubles(lhs, rhs));
} catch (ArithmeticException e) {
warnings().registerException(e);
result.appendNull();
}
}
return result.build();
}
}
public DoubleBlock eval(int positionCount, DoubleVector lhsVector, DoubleVector rhsVector) {
try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
double lhs = lhsVector.getDouble(p);
double rhs = rhsVector.getDouble(p);
try {
result.appendDouble(Mul.processDoubles(lhs, rhs));
} catch (ArithmeticException e) {
warnings().registerException(e);
result.appendNull();
}
}
return result.build();
}
}
@Override
public String toString() {
return "MulDoublesEvaluator[" + "lhs=" + lhs + ", rhs=" + rhs + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(lhs, rhs);
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static | MulDoublesEvaluator |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UserRoleMapper.java | {
"start": 1413,
"end": 1522
} | class ____
* responsibility for determining the set roles that an authenticated user should have.
*/
public | the |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/EncryptionS3ClientFactory.java | {
"start": 11131,
"end": 11536
} | class ____.
*
* <p>This method attempts to instantiate a Keyring provider using reflection. It first tries
* to create an instance using the standard ReflectionUtils.newInstance method. If that fails,
* it falls back to an alternative instantiation method, which is primarily used for testing
* purposes (specifically for CustomKeyring.java).
*
* @param className The fully qualified | name |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/vertx/src/test/java/org/jboss/resteasy/reactive/server/vertx/test/resource/basic/BasicGenericTypesHandlingTest.java | {
"start": 1302,
"end": 2177
} | class ____ {
@RegisterExtension
static ResteasyReactiveUnitTest testExtension = new ResteasyReactiveUnitTest()
.setArchiveProducer(new Supplier<>() {
@Override
public JavaArchive get() {
JavaArchive archive = ShrinkWrap.create(JavaArchive.class);
archive.addClasses(AbstractResource.class, TestResource.class, Input.class, Output.class,
TestMessageBodyReader.class, TestMessageBodyWriter.class);
return archive;
}
});
@Test
public void test() {
RestAssured.with().body("hello").contentType("text/test").post("/test")
.then()
.statusCode(200)
.body(Matchers.equalTo("out / hello"));
}
public static abstract | BasicGenericTypesHandlingTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ImmutableCheckerTest.java | {
"start": 83484,
"end": 83946
} | class ____<T> {
private GenericWithImmutableParam<Container<T>> container;
}
""")
.doTest();
}
@Test
public void containerOfAsImmutableTypeParameterInSameClass_noViolation() {
withImmutableTypeParameterGeneric()
.addSourceLines(
"Container.java",
"""
import com.google.errorprone.annotations.Immutable;
@Immutable(containerOf = {"T"})
| Clazz |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-api/src/test/java/org/apache/dubbo/rpc/filter/tps/TpsLimitFilterTest.java | {
"start": 1467,
"end": 2732
} | class ____ {
private TpsLimitFilter filter = new TpsLimitFilter();
@Test
void testWithoutCount() throws Exception {
URL url = URL.valueOf("test://test");
url = url.addParameter(INTERFACE_KEY, "org.apache.dubbo.rpc.file.TpsService");
url = url.addParameter(TPS_LIMIT_RATE_KEY, 5);
Invoker<TpsLimitFilterTest> invoker = new MyInvoker<TpsLimitFilterTest>(url);
Invocation invocation = new MockInvocation();
filter.invoke(invoker, invocation);
}
@Test
void testFail() throws Exception {
Assertions.assertThrows(RpcException.class, () -> {
URL url = URL.valueOf("test://test");
url = url.addParameter(INTERFACE_KEY, "org.apache.dubbo.rpc.file.TpsService");
url = url.addParameter(TPS_LIMIT_RATE_KEY, 5);
Invoker<TpsLimitFilterTest> invoker = new MyInvoker<TpsLimitFilterTest>(url);
Invocation invocation = new MockInvocation();
for (int i = 0; i < 10; i++) {
Result re = filter.invoke(invoker, invocation);
if (i >= 5) {
assertTrue(re.hasException());
throw re.getException();
}
}
});
}
}
| TpsLimitFilterTest |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/jaxrs/AbstractInterceptorContext.java | {
"start": 423,
"end": 2621
} | class ____ implements InterceptorContext {
protected final ResteasyReactiveRequestContext context;
protected Annotation[] annotations;
protected Class<?> type;
protected Type genericType;
protected MediaType mediaType;
protected final ServerSerialisers serialisers;
// as the interceptors can change the type or mediaType, when that happens we need to find a new reader/writer
protected boolean rediscoveryNeeded = false;
public AbstractInterceptorContext(ResteasyReactiveRequestContext context, Annotation[] annotations,
Class<?> type,
Type genericType, MediaType mediaType, ServerSerialisers serialisers) {
this.context = context;
this.annotations = annotations;
this.type = type;
this.genericType = genericType;
this.mediaType = mediaType;
this.serialisers = serialisers;
}
public Object getProperty(String name) {
return context.getProperty(name);
}
public Collection<String> getPropertyNames() {
return context.getPropertyNames();
}
public void setProperty(String name, Object object) {
context.setProperty(name, object);
}
public void removeProperty(String name) {
context.removeProperty(name);
}
public Annotation[] getAnnotations() {
return annotations;
}
public void setAnnotations(Annotation[] annotations) {
Objects.requireNonNull(annotations);
this.annotations = annotations;
}
public Class<?> getType() {
return type;
}
public void setType(Class<?> type) {
if ((this.type != type) && (type != null)) {
rediscoveryNeeded = true;
}
this.type = type;
}
public Type getGenericType() {
return genericType;
}
public void setGenericType(Type genericType) {
this.genericType = genericType;
}
public MediaType getMediaType() {
return mediaType;
}
public void setMediaType(MediaType mediaType) {
if (this.mediaType != mediaType) {
rediscoveryNeeded = true;
}
this.mediaType = mediaType;
}
}
| AbstractInterceptorContext |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/support/BeanFactoryPostProcessorTests.java | {
"start": 2172,
"end": 7255
} | class ____ {
@Test
void registeredBeanFactoryPostProcessor() {
StaticApplicationContext ac = new StaticApplicationContext();
ac.registerSingleton("tb1", TestBean.class);
ac.registerSingleton("tb2", TestBean.class);
TestBeanFactoryPostProcessor bfpp = new TestBeanFactoryPostProcessor();
ac.addBeanFactoryPostProcessor(bfpp);
assertThat(bfpp.wasCalled).isFalse();
ac.refresh();
assertThat(bfpp.wasCalled).isTrue();
ac.close();
}
@Test
void definedBeanFactoryPostProcessor() {
StaticApplicationContext ac = new StaticApplicationContext();
ac.registerSingleton("tb1", TestBean.class);
ac.registerSingleton("tb2", TestBean.class);
ac.registerSingleton("bfpp", TestBeanFactoryPostProcessor.class);
ac.refresh();
TestBeanFactoryPostProcessor bfpp = (TestBeanFactoryPostProcessor) ac.getBean("bfpp");
assertThat(bfpp.wasCalled).isTrue();
ac.close();
}
@Test
@SuppressWarnings({"deprecation", "removal"})
void multipleDefinedBeanFactoryPostProcessors() {
StaticApplicationContext ac = new StaticApplicationContext();
ac.registerSingleton("tb1", TestBean.class);
ac.registerSingleton("tb2", TestBean.class);
MutablePropertyValues pvs1 = new MutablePropertyValues();
pvs1.add("initValue", "${key}");
ac.registerSingleton("bfpp1", TestBeanFactoryPostProcessor.class, pvs1);
MutablePropertyValues pvs2 = new MutablePropertyValues();
pvs2.add("properties", "key=value");
ac.registerSingleton("bfpp2", org.springframework.beans.factory.config.PropertyPlaceholderConfigurer.class, pvs2);
ac.refresh();
TestBeanFactoryPostProcessor bfpp = (TestBeanFactoryPostProcessor) ac.getBean("bfpp1");
assertThat(bfpp.initValue).isEqualTo("value");
assertThat(bfpp.wasCalled).isTrue();
ac.close();
}
@Test
void beanFactoryPostProcessorNotExecutedByBeanFactory() {
DefaultListableBeanFactory bf = new DefaultListableBeanFactory();
bf.registerBeanDefinition("tb1", new RootBeanDefinition(TestBean.class));
bf.registerBeanDefinition("tb2", new RootBeanDefinition(TestBean.class));
bf.registerBeanDefinition("bfpp", new RootBeanDefinition(TestBeanFactoryPostProcessor.class));
TestBeanFactoryPostProcessor bfpp = (TestBeanFactoryPostProcessor) bf.getBean("bfpp");
assertThat(bfpp.wasCalled).isFalse();
}
@Test
void beanDefinitionRegistryPostProcessor() {
StaticApplicationContext ac = new StaticApplicationContext();
ac.registerSingleton("tb1", TestBean.class);
ac.registerSingleton("tb2", TestBean.class);
ac.addBeanFactoryPostProcessor(new PrioritizedBeanDefinitionRegistryPostProcessor());
TestBeanDefinitionRegistryPostProcessor bdrpp = new TestBeanDefinitionRegistryPostProcessor();
ac.addBeanFactoryPostProcessor(bdrpp);
assertThat(bdrpp.wasCalled).isFalse();
ac.refresh();
assertThat(bdrpp.wasCalled).isTrue();
assertThat(ac.getBean("bfpp1", TestBeanFactoryPostProcessor.class).wasCalled).isTrue();
assertThat(ac.getBean("bfpp2", TestBeanFactoryPostProcessor.class).wasCalled).isTrue();
ac.close();
}
@Test
void beanDefinitionRegistryPostProcessorRegisteringAnother() {
StaticApplicationContext ac = new StaticApplicationContext();
ac.registerSingleton("tb1", TestBean.class);
ac.registerSingleton("tb2", TestBean.class);
ac.registerBeanDefinition("bdrpp2", new RootBeanDefinition(OuterBeanDefinitionRegistryPostProcessor.class));
ac.refresh();
assertThat(ac.getBean("bfpp1", TestBeanFactoryPostProcessor.class).wasCalled).isTrue();
assertThat(ac.getBean("bfpp2", TestBeanFactoryPostProcessor.class).wasCalled).isTrue();
ac.close();
}
@Test
void prioritizedBeanDefinitionRegistryPostProcessorRegisteringAnother() {
StaticApplicationContext ac = new StaticApplicationContext();
ac.registerSingleton("tb1", TestBean.class);
ac.registerSingleton("tb2", TestBean.class);
ac.registerBeanDefinition("bdrpp2", new RootBeanDefinition(PrioritizedOuterBeanDefinitionRegistryPostProcessor.class));
ac.refresh();
assertThat(ac.getBean("bfpp1", TestBeanFactoryPostProcessor.class).wasCalled).isTrue();
assertThat(ac.getBean("bfpp2", TestBeanFactoryPostProcessor.class).wasCalled).isTrue();
ac.close();
}
@Test
void beanFactoryPostProcessorAsApplicationListener() {
StaticApplicationContext ac = new StaticApplicationContext();
ac.registerBeanDefinition("bfpp", new RootBeanDefinition(ListeningBeanFactoryPostProcessor.class));
ac.refresh();
assertThat(ac.getBean(ListeningBeanFactoryPostProcessor.class).received).isInstanceOf(ContextRefreshedEvent.class);
ac.close();
}
@Test
void beanFactoryPostProcessorWithInnerBeanAsApplicationListener() {
StaticApplicationContext ac = new StaticApplicationContext();
RootBeanDefinition rbd = new RootBeanDefinition(NestingBeanFactoryPostProcessor.class);
rbd.getPropertyValues().add("listeningBean", new RootBeanDefinition(ListeningBean.class));
ac.registerBeanDefinition("bfpp", rbd);
ac.refresh();
assertThat(ac.getBean(NestingBeanFactoryPostProcessor.class).getListeningBean().received).isInstanceOf(ContextRefreshedEvent.class);
ac.close();
}
public static | BeanFactoryPostProcessorTests |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/SqlUsageTransportAction.java | {
"start": 1406,
"end": 2876
} | class ____ extends XPackUsageFeatureTransportAction {
private final Client client;
@Inject
public SqlUsageTransportAction(
TransportService transportService,
ClusterService clusterService,
ThreadPool threadPool,
ActionFilters actionFilters,
Client client
) {
super(XPackUsageFeatureAction.SQL.name(), transportService, clusterService, threadPool, actionFilters);
this.client = client;
}
@Override
protected void localClusterStateOperation(
Task task,
XPackUsageRequest request,
ClusterState state,
ActionListener<XPackUsageFeatureResponse> listener
) {
SqlStatsRequest sqlRequest = new SqlStatsRequest();
sqlRequest.includeStats(true);
sqlRequest.setParentTask(clusterService.localNode().getId(), task.getId());
client.execute(SqlStatsAction.INSTANCE, sqlRequest, listener.delegateFailureAndWrap((l, r) -> {
List<Counters> countersPerNode = r.getNodes()
.stream()
.map(SqlStatsResponse.NodeStatsResponse::getStats)
.filter(Objects::nonNull)
.collect(Collectors.toList());
Counters mergedCounters = Counters.merge(countersPerNode);
SqlFeatureSetUsage usage = new SqlFeatureSetUsage(mergedCounters.toNestedMap());
l.onResponse(new XPackUsageFeatureResponse(usage));
}));
}
}
| SqlUsageTransportAction |
java | google__dagger | hilt-android-testing/main/java/dagger/hilt/testing/TestInstallIn.java | {
"start": 996,
"end": 1330
} | class ____ also be annotated with {@link dagger.Module}.
*
* <p>Example:
*
* <pre><code>
* // Replaces FooModule with FakeFooModule, and installs it into the same component as FooModule.
* {@literal @}Module
* {@literal @}TestInstallIn(components = SingletonComponent.class, replaces = FooModule.class)
* public final | must |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/objectid/ObjectId3838Test.java | {
"start": 3857,
"end": 4202
} | class ____ extends BaseType3838 {
public String location;
protected Concrete3838() {}
public Concrete3838(String id, String loc) {
this.id = id;
location = loc;
}
}
@JsonIdentityInfo(generator = ObjectIdGenerators.IntSequenceGenerator.class, property = "id")
static | Concrete3838 |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlStopAsyncAction.java | {
"start": 919,
"end": 1635
} | class ____ extends BaseRestHandler {
@Override
public List<Route> routes() {
return List.of(new Route(POST, "/_query/async/{id}/stop"));
}
@Override
public String getName() {
return "esql_async_stop";
}
@Override
protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) {
AsyncStopRequest stopReq = new AsyncStopRequest(request.param("id"));
return channel -> client.execute(EsqlAsyncStopAction.INSTANCE, stopReq, new EsqlResponseListener(channel, request));
}
@Override
protected Set<String> responseParams() {
return Set.of(URL_PARAM_DELIMITER, DROP_NULL_COLUMNS_OPTION);
}
}
| RestEsqlStopAsyncAction |
java | quarkusio__quarkus | integration-tests/infinispan-cache-jpa/src/main/java/io/quarkus/it/infinispan/cache/jpa/Item.java | {
"start": 460,
"end": 1629
} | class ____ {
private long id;
private String name;
private String description;
private int version;
public Item() {
}
public Item(String name, String description) {
this.name = name;
this.description = description;
}
@Id
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "itemSeq")
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Version
public int getVersion() {
return version;
}
public void setVersion(int version) {
this.version = version;
}
public void describeFully(StringBuilder sb) {
sb.append("Item with id=").append(id)
.append(", name='").append(name).append("'")
.append(", description='").append(description).append("'");
}
}
| Item |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/future/ShouldNotBeCancelled.java | {
"start": 809,
"end": 1222
} | class ____ extends BasicErrorMessageFactory {
private static final String SHOULD_NOT_BE_CANCELLED = "%nExpecting%n <%s>%nnot to be cancelled.%n" + Warning.WARNING;
public static ErrorMessageFactory shouldNotBeCancelled(Future<?> actual) {
return new ShouldNotBeCancelled(actual);
}
private ShouldNotBeCancelled(Future<?> actual) {
super(SHOULD_NOT_BE_CANCELLED, actual);
}
}
| ShouldNotBeCancelled |
java | apache__logging-log4j2 | log4j-jdbc-dbcp2/src/test/java/org/apache/logging/log4j/core/appender/db/jdbc/PoolingDriverConnectionSourceTest.java | {
"start": 1264,
"end": 5345
} | class ____ {
@Test
void testH2Properties() throws SQLException {
final Property[] properties = new Property[] {
// @formatter:off
Property.createProperty("username", JdbcH2TestHelper.USER_NAME),
Property.createProperty("password", JdbcH2TestHelper.PASSWORD),
// @formatter:on
};
// @formatter:off
final PoolingDriverConnectionSource source =
PoolingDriverConnectionSource.newPoolingDriverConnectionSourceBuilder()
.setConnectionString(JdbcH2TestHelper.CONNECTION_STRING_IN_MEMORY)
.setProperties(properties)
.build();
// @formatter:on
openAndClose(source);
}
@Test
void testH2PropertiesAndPoolName() throws SQLException {
final Property[] properties = new Property[] {
// @formatter:off
Property.createProperty("username", JdbcH2TestHelper.USER_NAME),
Property.createProperty("password", JdbcH2TestHelper.PASSWORD),
// @formatter:on
};
// @formatter:off
final PoolingDriverConnectionSource source =
PoolingDriverConnectionSource.newPoolingDriverConnectionSourceBuilder()
.setConnectionString(JdbcH2TestHelper.CONNECTION_STRING_IN_MEMORY)
.setProperties(properties)
.setPoolName("MyPoolName")
.build();
openAndClose(source);
}
@Test
void testH2UserAndPassword() throws SQLException {
// @formatter:off
final PoolingDriverConnectionSource source =
PoolingDriverConnectionSource.newPoolingDriverConnectionSourceBuilder()
.setConnectionString(JdbcH2TestHelper.CONNECTION_STRING_IN_MEMORY)
.setUserName(JdbcH2TestHelper.USER_NAME.toCharArray())
.setPassword(JdbcH2TestHelper.PASSWORD.toCharArray())
.build();
// @formatter:on
openAndClose(source);
}
private void openAndClose(final PoolingDriverConnectionSource source) throws SQLException {
assertNotNull(source, "PoolingDriverConnectionSource is null");
try (final Connection conn = source.getConnection()) {
assertFalse(conn.isClosed());
} finally {
source.stop();
}
}
@Test
void testH2UserPasswordAndPoolName() throws SQLException {
// @formatter:off
final PoolingDriverConnectionSource source =
PoolingDriverConnectionSource.newPoolingDriverConnectionSourceBuilder()
.setConnectionString(JdbcH2TestHelper.CONNECTION_STRING_IN_MEMORY)
.setUserName(JdbcH2TestHelper.USER_NAME.toCharArray())
.setPassword(JdbcH2TestHelper.PASSWORD.toCharArray())
.setPoolName("MyPoolName")
.build();
// @formatter:on
openAndClose(source);
}
@Test
void testPoolableConnectionFactoryConfig() throws SQLException {
final PoolableConnectionFactoryConfig poolableConnectionFactoryConfig =
PoolableConnectionFactoryConfig.newBuilder()
.setMaxConnLifetimeMillis(30000)
.build();
// @formatter:off
final PoolingDriverConnectionSource source =
PoolingDriverConnectionSource.newPoolingDriverConnectionSourceBuilder()
.setConnectionString(JdbcH2TestHelper.CONNECTION_STRING_IN_MEMORY)
.setUserName(JdbcH2TestHelper.USER_NAME.toCharArray())
.setPassword(JdbcH2TestHelper.PASSWORD.toCharArray())
.setPoolName("MyPoolName")
.setPoolableConnectionFactoryConfig(poolableConnectionFactoryConfig)
.build();
// @formatter:on
openAndClose(source);
}
}
| PoolingDriverConnectionSourceTest |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/Equals.java | {
"start": 1657,
"end": 9288
} | class ____ extends EsqlBinaryComparison implements Negatable<EsqlBinaryComparison> {
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(
Expression.class,
"Equals",
EsqlBinaryComparison::readFrom
);
private static final Map<DataType, EsqlArithmeticOperation.BinaryEvaluator> evaluatorMap = Map.ofEntries(
Map.entry(DataType.BOOLEAN, EqualsBoolsEvaluator.Factory::new),
Map.entry(DataType.INTEGER, EqualsIntsEvaluator.Factory::new),
Map.entry(DataType.DOUBLE, EqualsDoublesEvaluator.Factory::new),
Map.entry(DataType.LONG, EqualsLongsEvaluator.Factory::new),
Map.entry(DataType.UNSIGNED_LONG, EqualsLongsEvaluator.Factory::new),
Map.entry(DataType.DATETIME, EqualsLongsEvaluator.Factory::new),
Map.entry(DataType.DATE_NANOS, EqualsLongsEvaluator.Factory::new),
Map.entry(DataType.GEO_POINT, EqualsGeometriesEvaluator.Factory::new),
Map.entry(DataType.CARTESIAN_POINT, EqualsGeometriesEvaluator.Factory::new),
Map.entry(DataType.GEO_SHAPE, EqualsGeometriesEvaluator.Factory::new),
Map.entry(DataType.CARTESIAN_SHAPE, EqualsGeometriesEvaluator.Factory::new),
Map.entry(DataType.GEOHASH, EqualsLongsEvaluator.Factory::new),
Map.entry(DataType.GEOTILE, EqualsLongsEvaluator.Factory::new),
Map.entry(DataType.GEOHEX, EqualsLongsEvaluator.Factory::new),
Map.entry(DataType.KEYWORD, EqualsKeywordsEvaluator.Factory::new),
Map.entry(DataType.TEXT, EqualsKeywordsEvaluator.Factory::new),
Map.entry(DataType.VERSION, EqualsKeywordsEvaluator.Factory::new),
Map.entry(DataType.IP, EqualsKeywordsEvaluator.Factory::new)
);
@FunctionInfo(
operator = "==",
returnType = { "boolean" },
description = "Check if two fields are equal. "
+ "If either field is <<esql-multivalued-fields,multivalued>> then the result is `null`.",
note = "This is pushed to the underlying search index if one side of the comparison is constant "
+ "and the other side is a field in the index that has both an <<mapping-index>> and <<doc-values>>."
)
public Equals(
Source source,
@Param(
name = "lhs",
type = {
"boolean",
"cartesian_point",
"cartesian_shape",
"date",
"double",
"geo_point",
"geo_shape",
"geohash",
"geotile",
"geohex",
"integer",
"ip",
"keyword",
"long",
"text",
"unsigned_long",
"version" },
description = "An expression."
) Expression left,
@Param(
name = "rhs",
type = {
"boolean",
"cartesian_point",
"cartesian_shape",
"date",
"double",
"geo_point",
"geo_shape",
"geohash",
"geotile",
"geohex",
"integer",
"ip",
"keyword",
"long",
"text",
"unsigned_long",
"version" },
description = "An expression."
) Expression right
) {
super(
source,
left,
right,
BinaryComparisonOperation.EQ,
evaluatorMap,
EqualsNanosMillisEvaluator.Factory::new,
EqualsMillisNanosEvaluator.Factory::new
);
}
public Equals(Source source, Expression left, Expression right, ZoneId zoneId) {
super(
source,
left,
right,
BinaryComparisonOperation.EQ,
zoneId,
evaluatorMap,
EqualsNanosMillisEvaluator.Factory::new,
EqualsMillisNanosEvaluator.Factory::new
);
}
@Override
public Translatable translatable(LucenePushdownPredicates pushdownPredicates) {
if (right() instanceof Literal lit) {
// Multi-valued literals are not supported going further. This also makes sure that we are handling multi-valued literals with
// a "warning" header, as well (see EqualsKeywordsEvaluator, for example, where lhs and rhs are both dealt with equally when
// it comes to multi-value handling).
if (lit.value() instanceof Collection<?>) {
return Translatable.NO;
}
if (left().dataType() == DataType.TEXT && left() instanceof FieldAttribute fa) {
if (pushdownPredicates.canUseEqualityOnSyntheticSourceDelegate(fa, ((BytesRef) lit.value()).utf8ToString())) {
return Translatable.YES_BUT_RECHECK_NEGATED;
}
}
}
return super.translatable(pushdownPredicates);
}
@Override
public Query asQuery(LucenePushdownPredicates pushdownPredicates, TranslatorHandler handler) {
if (right() instanceof Literal lit) {
if (left().dataType() == DataType.TEXT && left() instanceof FieldAttribute fa) {
String value = ((BytesRef) lit.value()).utf8ToString();
if (pushdownPredicates.canUseEqualityOnSyntheticSourceDelegate(fa, value)) {
String name = handler.nameOf(fa);
return new SingleValueQuery(new EqualsSyntheticSourceDelegate(source(), name, value), name, true);
}
}
}
return super.asQuery(pushdownPredicates, handler);
}
@Override
public String getWriteableName() {
return ENTRY.name;
}
@Override
protected NodeInfo<Equals> info() {
return NodeInfo.create(this, Equals::new, left(), right(), zoneId());
}
@Override
protected Equals replaceChildren(Expression newLeft, Expression newRight) {
return new Equals(source(), newLeft, newRight, zoneId());
}
@Override
public Equals swapLeftAndRight() {
return new Equals(source(), right(), left(), zoneId());
}
@Override
public EsqlBinaryComparison reverse() {
return this;
}
@Override
public EsqlBinaryComparison negate() {
return new NotEquals(source(), left(), right(), zoneId());
}
@Evaluator(extraName = "Ints")
static boolean processInts(int lhs, int rhs) {
return lhs == rhs;
}
@Evaluator(extraName = "Longs")
static boolean processLongs(long lhs, long rhs) {
return lhs == rhs;
}
@Evaluator(extraName = "MillisNanos")
static boolean processMillisNanos(long lhs, long rhs) {
return DateUtils.compareNanosToMillis(rhs, lhs) == 0;
}
@Evaluator(extraName = "NanosMillis")
static boolean processNanosMillis(long lhs, long rhs) {
return DateUtils.compareNanosToMillis(lhs, rhs) == 0;
}
@Evaluator(extraName = "Doubles")
static boolean processDoubles(double lhs, double rhs) {
return lhs == rhs;
}
@Evaluator(extraName = "Keywords")
static boolean processKeywords(BytesRef lhs, BytesRef rhs) {
return lhs.equals(rhs);
}
@Evaluator(extraName = "Bools")
static boolean processBools(boolean lhs, boolean rhs) {
return lhs == rhs;
}
@Evaluator(extraName = "Geometries")
static boolean processGeometries(BytesRef lhs, BytesRef rhs) {
return lhs.equals(rhs);
}
}
| Equals |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/appender/db/jdbc/ConnectionSource.java | {
"start": 1191,
"end": 1786
} | interface ____ extends LifeCycle {
/**
* This should return a new connection every time it is called.
*
* @return the SQL connection object.
* @throws SQLException if a database error occurs.
*/
Connection getConnection() throws SQLException;
/**
* All implementations must override {@link Object#toString()} to provide information about the connection
* configuration (obscuring passwords with one-way hashes).
*
* @return the string representation of this connection source.
*/
@Override
String toString();
}
| ConnectionSource |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.