language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | quarkusio__quarkus | test-framework/maven/src/main/java/io/quarkus/maven/it/verifier/RunningInvoker.java | {
"start": 1072,
"end": 6095
} | class ____ extends MavenProcessInvoker {
private final boolean parallel;
private final boolean debug;
private MavenProcessInvocationResult result;
private final File log;
private final PrintStreamHandler outStreamHandler;
private final PrintStreamHandler errStreamHandler;
public RunningInvoker(File basedir, boolean debug) {
this(basedir, debug, false);
}
public RunningInvoker(File basedir, boolean debug, boolean parallel) {
this.parallel = parallel;
this.debug = debug;
setWorkingDirectory(basedir);
String repo = System.getProperty("maven.repo.local");
if (repo == null) {
repo = new File(System.getProperty("user.home"), ".m2/repository").getAbsolutePath();
}
setLocalRepositoryDirectory(new File(repo));
log = new File(basedir, "build-" + basedir.getName() + ".log");
PrintStream outStream;
try {
outStream = createTeePrintStream(System.out, Files.newOutputStream(log.toPath()));
} catch (IOException ioe) {
outStream = System.out;
}
this.outStreamHandler = new PrintStreamHandler(outStream, true);
setOutputHandler(this.outStreamHandler);
PrintStream errStream;
try {
errStream = createTeePrintStream(System.err, Files.newOutputStream(log.toPath()));
} catch (IOException ioe) {
errStream = System.err;
}
this.errStreamHandler = new PrintStreamHandler(errStream, true);
setErrorHandler(this.errStreamHandler);
setLogger(new PrintStreamLogger(outStream, debug ? InvokerLogger.DEBUG : InvokerLogger.INFO));
}
/**
* Creates a {@link PrintStream} with an underlying {@link TeeOutputStream} composed of {@code one}
* and {@code two} outputstreams
*
* @param one
* @param two
* @return
*/
private static PrintStream createTeePrintStream(final OutputStream one, final OutputStream two) {
final OutputStream tee = new TeeOutputStream(one, two);
PrintStream stream;
try {
stream = new PrintStream(tee, true, "UTF-8");
} catch (UnsupportedEncodingException e) {
stream = new PrintStream(tee, true);
}
return stream;
}
public void stop() {
if (result == null) {
return;
}
// Kill all processes that were (indirectly) spawned by the current process.
// It's important to do it this way (instead of calling result.destroy() first)
// because otherwise children of that process can become orphaned zombies.
DevModeClient.killDescendingProcesses();
// This is now more or less "symbolic" since the previous call should have also killed that result's process.
result.destroy();
}
public MavenProcessInvocationResult execute(List<String> goals, Map<String, String> envVars)
throws MavenInvocationException {
return execute(goals, envVars, new Properties());
}
public MavenProcessInvocationResult execute(List<String> goals, Map<String, String> envVars, Properties properties)
throws MavenInvocationException {
DefaultInvocationRequest request = new DefaultInvocationRequest();
request.setGoals(goals);
request.setShowErrors(true);
request.setDebug(debug);
if (parallel) {
request.setThreads("1C");
}
request.setLocalRepositoryDirectory(getLocalRepositoryDirectory());
request.setBaseDirectory(getWorkingDirectory());
request.setPomFile(new File(getWorkingDirectory(), "pom.xml"));
request.setProperties(properties);
if (System.getProperty("mavenOpts") != null) {
request.setMavenOpts(System.getProperty("mavenOpts"));
} else {
//we need to limit the memory consumption, as we can have a lot of these processes
//running at once, if they add default to 75% of total mem we can easily run out
//of physical memory as they will consume way more than what they need instead of
//just running GC
request.setMavenOpts("-Xmx192m");
}
request.setShellEnvironmentInherited(true);
envVars.forEach(request::addShellEnvironment);
request.setOutputHandler(outStreamHandler);
request.setErrorHandler(errStreamHandler);
this.result = (MavenProcessInvocationResult) execute(request);
return result;
}
@Override
public InvocationResult execute(InvocationRequest request) throws MavenInvocationException {
MojoTestBase.passUserSettings(request);
return super.execute(request);
}
public String log() throws IOException {
if (log == null) {
return null;
}
return FileUtils.readFileToString(log, "UTF-8");
}
public MavenProcessInvocationResult getResult() {
return result;
}
}
| RunningInvoker |
java | google__gson | gson/src/test/java/com/google/gson/functional/ReflectionAccessFilterTest.java | {
"start": 11326,
"end": 12596
} | class ____ {
private ClassWithPrivateNoArgsConstructor() {}
}
@Test
public void testInaccessibleNoArgsConstructor() {
Gson gson =
new GsonBuilder()
.addReflectionAccessFilter(
new ReflectionAccessFilter() {
@Override
public FilterResult check(Class<?> rawClass) {
return FilterResult.BLOCK_INACCESSIBLE;
}
})
.create();
var e =
assertThrows(
"Expected exception; test needs to be run with Java >= 9",
JsonIOException.class,
() -> gson.fromJson("{}", ClassWithPrivateNoArgsConstructor.class));
assertThat(e)
.hasMessageThat()
.isEqualTo(
"Unable to invoke no-args constructor of class"
+ " com.google.gson.functional.ReflectionAccessFilterTest$ClassWithPrivateNoArgsConstructor;"
+ " constructor is not accessible and ReflectionAccessFilter does not permit making"
+ " it accessible. Register an InstanceCreator or a TypeAdapter for this type,"
+ " change the visibility of the constructor or adjust the access filter.");
}
private static | ClassWithPrivateNoArgsConstructor |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/oracle/ast/stmt/OracleXmlColumnProperties.java | {
"start": 245,
"end": 1476
} | class ____ extends OracleSQLObjectImpl {
private SQLName column;
private OracleXMLTypeStorage storage;
private Boolean allowNonSchema;
private Boolean allowAnySchema;
@Override
public void accept0(OracleASTVisitor visitor) {
if (visitor.visit(this)) {
acceptChild(visitor, storage);
}
visitor.endVisit(this);
}
public SQLName getColumn() {
return column;
}
public void setColumn(SQLName x) {
if (x != null) {
x.setParent(this);
}
this.column = x;
}
public OracleXMLTypeStorage getStorage() {
return storage;
}
public void setStorage(OracleXMLTypeStorage x) {
if (x != null) {
x.setParent(this);
}
this.storage = x;
}
public Boolean getAllowNonSchema() {
return allowNonSchema;
}
public void setAllowNonSchema(Boolean allowNonSchema) {
this.allowNonSchema = allowNonSchema;
}
public Boolean getAllowAnySchema() {
return allowAnySchema;
}
public void setAllowAnySchema(Boolean allowAnySchema) {
this.allowAnySchema = allowAnySchema;
}
public static | OracleXmlColumnProperties |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/spi/ParameterMetadataImplementor.java | {
"start": 430,
"end": 1329
} | interface ____ extends ParameterMetadata {
void visitParameters(Consumer<QueryParameter<?>> consumer);
default void collectAllParameters(Consumer<QueryParameter<?>> collector) {
visitParameters( collector );
}
@Override
default void visitRegistrations(Consumer<QueryParameter<?>> action) {
visitParameters( action );
}
boolean hasAnyMatching(Predicate<QueryParameterImplementor<?>> filter);
@Override
QueryParameterImplementor<?> findQueryParameter(String name);
@Override
QueryParameterImplementor<?> getQueryParameter(String name);
@Override
QueryParameterImplementor<?> findQueryParameter(int positionLabel);
@Override
QueryParameterImplementor<?> getQueryParameter(int positionLabel);
@Override
<P> QueryParameterImplementor<P> resolve(Parameter<P> param);
QueryParameterBindings createBindings(SessionFactoryImplementor sessionFactory);
}
| ParameterMetadataImplementor |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/typeinfo/descriptor/ListTypeDescriptorImpl.java | {
"start": 1390,
"end": 2217
} | class ____<T> implements TypeDescriptor<List<T>> {
private final ListTypeInfo<T> listTypeInfo;
public ListTypeDescriptorImpl(Class<T> elementClass) {
listTypeInfo = new ListTypeInfo<>(elementClass);
}
public ListTypeDescriptorImpl(TypeDescriptor<T> typeDescriptor) {
listTypeInfo = new ListTypeInfo<>(typeDescriptor.getTypeClass());
}
public ListTypeInfo<?> getListTypeInfo() {
return listTypeInfo;
}
@Override
public Class<List<T>> getTypeClass() {
return listTypeInfo.getTypeClass();
}
public Class<T> getComponentType() {
return listTypeInfo.getElementTypeInfo().getTypeClass();
}
@Override
public String toString() {
return "ListTypeDescriptorImpl [listTypeInfo=" + listTypeInfo + "]";
}
}
| ListTypeDescriptorImpl |
java | elastic__elasticsearch | x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueTests.java | {
"start": 11348,
"end": 13929
} | class ____ extends TypeSafeMatcher<GeoPoint> {
private final Point[] encodedPositions;
private RectangleLabelPosition(Rectangle... rectangles) {
encodedPositions = new Point[rectangles.length * 4];
for (int i = 0; i < rectangles.length; i++) {
Rectangle rectangle = rectangles[i];
GeoPoint a = new GeoPoint(rectangle.getMinY(), rectangle.getMinX());
GeoPoint b = new GeoPoint(rectangle.getMinY(), rectangle.getMaxX());
GeoPoint c = new GeoPoint(rectangle.getMaxY(), rectangle.getMaxX());
GeoPoint d = new GeoPoint(rectangle.getMaxY(), rectangle.getMinX());
encodedPositions[i * 4 + 0] = average(a, b, c);
encodedPositions[i * 4 + 1] = average(b, c, d);
encodedPositions[i * 4 + 2] = average(c, d, a);
encodedPositions[i * 4 + 3] = average(d, a, b);
}
}
private Point average(GeoPoint... points) {
double lon = 0;
double lat = 0;
for (GeoPoint point : points) {
lon += point.lon();
lat += point.lat();
}
int x = CoordinateEncoder.GEO.encodeX(lon / points.length);
int y = CoordinateEncoder.GEO.encodeY(lat / points.length);
return new Point(x, y);
}
@Override
public boolean matchesSafely(GeoPoint point) {
int x = CoordinateEncoder.GEO.encodeX(point.lon());
int y = CoordinateEncoder.GEO.encodeY(point.lat());
return is(oneOf(encodedPositions)).matches(new Point(x, y));
}
@Override
public void describeTo(Description description) {
description.appendText("is(oneOf(" + Arrays.toString(encodedPositions) + ")");
}
}
private static Extent getExtentFromBox(double bottomLeftX, double bottomLeftY, double topRightX, double topRightY) {
return Extent.fromPoints(
CoordinateEncoder.GEO.encodeX(bottomLeftX),
CoordinateEncoder.GEO.encodeY(bottomLeftY),
CoordinateEncoder.GEO.encodeX(topRightX),
CoordinateEncoder.GEO.encodeY(topRightY)
);
}
private static void assertDimensionalShapeType(Geometry geometry, DimensionalShapeType expected) throws IOException {
GeometryDocValueReader reader = GeoTestUtils.geometryDocValueReader(geometry, CoordinateEncoder.GEO);
assertThat(reader.getDimensionalShapeType(), equalTo(expected));
}
}
| RectangleLabelPosition |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/builder/BuilderWithCreatorTest.java | {
"start": 2350,
"end": 2478
} | class ____
{
final int value;
protected IntCreatorValue(int v) { value = v; }
}
static | IntCreatorValue |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/file/FileProducerChecksumFileAlgorithmTest.java | {
"start": 1087,
"end": 2366
} | class ____ extends ContextTestSupport {
@Test
public void testProducerChecksumFileMd5() throws Exception {
template.sendBodyAndHeader(fileUri("?checksumFileAlgorithm=md5"), "Hello World", Exchange.FILE_NAME, "hello.txt");
assertFileExists(testFile("hello.txt"));
assertFileExists(testFile("hello.txt.md5"), "b10a8db164e0754105b7a99be72e3fe5");
}
@Test
public void testProducerChecksumFileSha256() throws Exception {
template.sendBodyAndHeader(fileUri("?checksumFileAlgorithm=sha256"), "Hello World", Exchange.FILE_NAME, "hello.txt");
assertFileExists(testFile("hello.txt"));
assertFileExists(testFile("hello.txt.sha256"), "a591a6d40bf420404a011733cfb7b190d62c65bf0bcda32b57b277d9ad9f146e");
}
@Test
public void testProducerChecksumFileSha256WithStreamCaching() throws Exception {
InputStreamCache cache = new InputStreamCache("Hello World".getBytes());
template.sendBodyAndHeader(fileUri("?checksumFileAlgorithm=sha256"), cache, Exchange.FILE_NAME, "hello.txt");
assertFileExists(testFile("hello.txt"));
assertFileExists(testFile("hello.txt.sha256"), "a591a6d40bf420404a011733cfb7b190d62c65bf0bcda32b57b277d9ad9f146e");
}
}
| FileProducerChecksumFileAlgorithmTest |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/over/RowTimeUnboundedPrecedingOverFunctionV2.java | {
"start": 3442,
"end": 10734
} | class ____<K>
extends KeyedProcessFunctionWithCleanupState<K, RowData, RowData> {
public static final int SECOND_OVER_VERSION = 2;
private static final long serialVersionUID = 1L;
private static final Logger LOG =
LoggerFactory.getLogger(RowTimeUnboundedPrecedingOverFunctionV2.class);
// whether this is a ROWS or RANGE operation
private final boolean isRowsWindow;
private final GeneratedAggsHandleFunction genAggsHandler;
private final LogicalType[] accTypes;
private final LogicalType[] inputFieldTypes;
private final int rowTimeIdx;
protected transient JoinedRowData output;
// state to hold the accumulators of the aggregations
private transient ValueState<RowData> accState;
// state to hold rows until the next watermark arrives
private transient MapState<Long, List<RowData>> inputState;
protected transient AggsHandleFunction function;
private transient Counter numLateRecordsDropped;
@VisibleForTesting
protected Counter getCounter() {
return numLateRecordsDropped;
}
public RowTimeUnboundedPrecedingOverFunctionV2(
boolean isRowsWindow,
long minRetentionTime,
long maxRetentionTime,
GeneratedAggsHandleFunction genAggsHandler,
LogicalType[] accTypes,
LogicalType[] inputFieldTypes,
int rowTimeIdx) {
super(minRetentionTime, maxRetentionTime);
this.isRowsWindow = isRowsWindow;
this.genAggsHandler = genAggsHandler;
this.accTypes = accTypes;
this.inputFieldTypes = inputFieldTypes;
this.rowTimeIdx = rowTimeIdx;
}
@Override
public void open(OpenContext openContext) throws Exception {
function = genAggsHandler.newInstance(getRuntimeContext().getUserCodeClassLoader());
function.open(new PerKeyStateDataViewStore(getRuntimeContext()));
output = new JoinedRowData();
// initialize accumulator state
InternalTypeInfo<RowData> accTypeInfo = InternalTypeInfo.ofFields(accTypes);
ValueStateDescriptor<RowData> accStateDesc =
new ValueStateDescriptor<>(ACCUMULATOR_STATE_NAME, accTypeInfo);
accState = getRuntimeContext().getState(accStateDesc);
// input element are all binary row as they are came from network
InternalTypeInfo<RowData> inputType = InternalTypeInfo.ofFields(inputFieldTypes);
ListTypeInfo<RowData> rowListTypeInfo = new ListTypeInfo<>(inputType);
MapStateDescriptor<Long, List<RowData>> inputStateDesc =
new MapStateDescriptor<>(INPUT_STATE_NAME, Types.LONG, rowListTypeInfo);
inputState = getRuntimeContext().getMapState(inputStateDesc);
initCleanupTimeState(CLEANUP_STATE_NAME);
// metrics
this.numLateRecordsDropped =
getRuntimeContext().getMetricGroup().counter(LATE_ELEMENTS_DROPPED_METRIC_NAME);
}
/**
* Puts an element from the input stream into state if it is not late. Registers a timer for the
* next watermark.
*
* @param input The input value.
* @param ctx A {@link Context} that allows querying the timestamp of the element and getting
* TimerService for registering timers and querying the time. The context is only valid
* during the invocation of this method, do not store it.
* @param out The collector for returning result values.
* @throws Exception
*/
@Override
public void processElement(
RowData input,
KeyedProcessFunction<K, RowData, RowData>.Context ctx,
Collector<RowData> out)
throws Exception {
// register state-cleanup timer
registerProcessingCleanupTimer(ctx, ctx.timerService().currentProcessingTime());
long timestamp = input.getLong(rowTimeIdx);
long curWatermark = ctx.timerService().currentWatermark();
if (timestamp <= curWatermark) {
// discard late record
numLateRecordsDropped.inc();
return;
}
// put row into state
List<RowData> rowList = inputState.get(timestamp);
if (rowList == null) {
rowList = new ArrayList<>();
// if that's the first timestamp for the given key, register the timer to process
// those records.
ctx.timerService().registerEventTimeTimer(timestamp);
}
rowList.add(input);
inputState.put(timestamp, rowList);
}
@Override
public void onTimer(
long timestamp,
KeyedProcessFunction<K, RowData, RowData>.OnTimerContext ctx,
Collector<RowData> out)
throws Exception {
if (isProcessingTimeTimer(ctx)) {
cleanupState(ctx);
return;
}
RowData lastAccumulator = accState.value();
if (lastAccumulator == null) {
lastAccumulator = function.createAccumulators();
}
function.setAccumulators(lastAccumulator);
processElementsWithSameTimestamp(timestamp, out);
lastAccumulator = function.getAccumulators();
accState.update(lastAccumulator);
registerProcessingCleanupTimer(ctx, ctx.timerService().currentProcessingTime());
}
/**
* Process the same timestamp datas, the mechanism is different between rows and range window.
*/
private void processElementsWithSameTimestamp(long timestamp, Collector<RowData> out)
throws Exception {
List<RowData> curRowList = inputState.get(timestamp);
if (curRowList == null) {
// Ignore the same timestamp datas if the state is cleared already.
LOG.warn(
"The state is cleared because of state ttl. "
+ "This will result in incorrect result. "
+ "You can increase the state ttl to avoid this.");
} else {
if (isRowsWindow) {
processElementsWithSameTimestampRows(function, output, curRowList, out);
} else {
processElementsWithSameTimestampRange(function, output, curRowList, out);
}
}
inputState.remove(timestamp);
}
private void cleanupState(OnTimerContext ctx) throws Exception {
if (stateCleaningEnabled) {
// we check whether there are still records which have not been processed yet
if (inputState.isEmpty()) {
// we clean the state
cleanupState(inputState, accState);
function.cleanup();
} else {
// There are records left to process because a watermark has not been received
// yet.
// This would only happen if the input stream has stopped. So we don't need to
// clean up.
// We leave the state as it is and schedule a new cleanup timer
registerProcessingCleanupTimer(ctx, ctx.timerService().currentProcessingTime());
}
}
}
@Override
public void close() throws Exception {
if (null != function) {
function.close();
}
}
}
| RowTimeUnboundedPrecedingOverFunctionV2 |
java | spring-projects__spring-security | web/src/main/java/org/springframework/security/web/context/SupplierDeferredSecurityContext.java | {
"start": 1167,
"end": 2280
} | class ____ implements DeferredSecurityContext {
private static final Log logger = LogFactory.getLog(SupplierDeferredSecurityContext.class);
private final Supplier<SecurityContext> supplier;
private final SecurityContextHolderStrategy strategy;
private @Nullable SecurityContext securityContext;
private boolean missingContext;
SupplierDeferredSecurityContext(Supplier<SecurityContext> supplier, SecurityContextHolderStrategy strategy) {
this.supplier = supplier;
this.strategy = strategy;
}
@Override
public @Nullable SecurityContext get() {
init();
return this.securityContext;
}
@Override
public boolean isGenerated() {
init();
return this.missingContext;
}
private void init() {
if (this.securityContext != null) {
return;
}
this.securityContext = this.supplier.get();
this.missingContext = (this.securityContext == null);
if (this.missingContext) {
this.securityContext = this.strategy.createEmptyContext();
if (logger.isTraceEnabled()) {
logger.trace(LogMessage.format("Created %s", this.securityContext));
}
}
}
}
| SupplierDeferredSecurityContext |
java | spring-projects__spring-security | core/src/test/java/org/springframework/security/authentication/rememberme/RememberMeAuthenticationProviderTests.java | {
"start": 1478,
"end": 3360
} | class ____ {
@Test
public void testDetectsAnInvalidKey() {
RememberMeAuthenticationProvider aap = new RememberMeAuthenticationProvider("qwerty");
RememberMeAuthenticationToken token = new RememberMeAuthenticationToken("WRONG_KEY", "Test",
AuthorityUtils.createAuthorityList("ROLE_ONE", "ROLE_TWO"));
assertThatExceptionOfType(BadCredentialsException.class).isThrownBy(() -> aap.authenticate(token));
}
@Test
public void testDetectsMissingKey() {
assertThatIllegalArgumentException().isThrownBy(() -> new RememberMeAuthenticationProvider(null));
}
@Test
public void testGettersSetters() throws Exception {
RememberMeAuthenticationProvider aap = new RememberMeAuthenticationProvider("qwerty");
aap.afterPropertiesSet();
assertThat(aap.getKey()).isEqualTo("qwerty");
}
@Test
public void testIgnoresClassesItDoesNotSupport() {
RememberMeAuthenticationProvider aap = new RememberMeAuthenticationProvider("qwerty");
TestingAuthenticationToken token = new TestingAuthenticationToken("user", "password", "ROLE_A");
assertThat(aap.supports(TestingAuthenticationToken.class)).isFalse();
// Try it anyway
assertThat(aap.authenticate(token)).isNull();
}
@Test
public void testNormalOperation() {
RememberMeAuthenticationProvider aap = new RememberMeAuthenticationProvider("qwerty");
RememberMeAuthenticationToken token = new RememberMeAuthenticationToken("qwerty", "Test",
AuthorityUtils.createAuthorityList("ROLE_ONE", "ROLE_TWO"));
Authentication result = aap.authenticate(token);
assertThat(token).isEqualTo(result);
}
@Test
public void testSupports() {
RememberMeAuthenticationProvider aap = new RememberMeAuthenticationProvider("qwerty");
assertThat(aap.supports(RememberMeAuthenticationToken.class)).isTrue();
assertThat(aap.supports(TestingAuthenticationToken.class)).isFalse();
}
}
| RememberMeAuthenticationProviderTests |
java | spring-projects__spring-security | oauth2/oauth2-authorization-server/src/main/java/org/springframework/security/oauth2/server/authorization/authentication/OAuth2AccessTokenAuthenticationToken.java | {
"start": 1580,
"end": 5150
} | class ____ extends AbstractAuthenticationToken {
@Serial
private static final long serialVersionUID = 2773767853287774441L;
private final RegisteredClient registeredClient;
private final Authentication clientPrincipal;
private final OAuth2AccessToken accessToken;
private final OAuth2RefreshToken refreshToken;
private final Map<String, Object> additionalParameters;
/**
* Constructs an {@code OAuth2AccessTokenAuthenticationToken} using the provided
* parameters.
* @param registeredClient the registered client
* @param clientPrincipal the authenticated client principal
* @param accessToken the access token
*/
public OAuth2AccessTokenAuthenticationToken(RegisteredClient registeredClient, Authentication clientPrincipal,
OAuth2AccessToken accessToken) {
this(registeredClient, clientPrincipal, accessToken, null);
}
/**
* Constructs an {@code OAuth2AccessTokenAuthenticationToken} using the provided
* parameters.
* @param registeredClient the registered client
* @param clientPrincipal the authenticated client principal
* @param accessToken the access token
* @param refreshToken the refresh token
*/
public OAuth2AccessTokenAuthenticationToken(RegisteredClient registeredClient, Authentication clientPrincipal,
OAuth2AccessToken accessToken, @Nullable OAuth2RefreshToken refreshToken) {
this(registeredClient, clientPrincipal, accessToken, refreshToken, Collections.emptyMap());
}
/**
* Constructs an {@code OAuth2AccessTokenAuthenticationToken} using the provided
* parameters.
* @param registeredClient the registered client
* @param clientPrincipal the authenticated client principal
* @param accessToken the access token
* @param refreshToken the refresh token
* @param additionalParameters the additional parameters
*/
public OAuth2AccessTokenAuthenticationToken(RegisteredClient registeredClient, Authentication clientPrincipal,
OAuth2AccessToken accessToken, @Nullable OAuth2RefreshToken refreshToken,
Map<String, Object> additionalParameters) {
super(Collections.emptyList());
Assert.notNull(registeredClient, "registeredClient cannot be null");
Assert.notNull(clientPrincipal, "clientPrincipal cannot be null");
Assert.notNull(accessToken, "accessToken cannot be null");
Assert.notNull(additionalParameters, "additionalParameters cannot be null");
this.registeredClient = registeredClient;
this.clientPrincipal = clientPrincipal;
this.accessToken = accessToken;
this.refreshToken = refreshToken;
this.additionalParameters = additionalParameters;
}
@Override
public Object getPrincipal() {
return this.clientPrincipal;
}
@Override
public Object getCredentials() {
return "";
}
/**
* Returns the {@link RegisteredClient registered client}.
* @return the {@link RegisteredClient}
*/
public RegisteredClient getRegisteredClient() {
return this.registeredClient;
}
/**
* Returns the {@link OAuth2AccessToken access token}.
* @return the {@link OAuth2AccessToken}
*/
public OAuth2AccessToken getAccessToken() {
return this.accessToken;
}
/**
* Returns the {@link OAuth2RefreshToken refresh token}.
* @return the {@link OAuth2RefreshToken} or {@code null} if not available
*/
@Nullable
public OAuth2RefreshToken getRefreshToken() {
return this.refreshToken;
}
/**
* Returns the additional parameters.
* @return a {@code Map} of the additional parameters, may be empty
*/
public Map<String, Object> getAdditionalParameters() {
return this.additionalParameters;
}
}
| OAuth2AccessTokenAuthenticationToken |
java | quarkusio__quarkus | extensions/amazon-lambda/deployment/src/test/java/io/quarkus/amazon/lambda/deployment/testing/LambdaWithHierarchyTest.java | {
"start": 1528,
"end": 1728
} | class ____ extends AbstractRequestHandler<Person, String> {
public String getName(Person input) {
return "Hello " + input.getName();
}
}
public static | GreetingLambda |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/search/query/GeoFilterParams.java | {
"start": 738,
"end": 1802
} | class ____ implements GeoFilter,
GeoFilterRadius,
QueryFilter {
private String fieldName;
private double longitude;
private double latitude;
private double radius;
private GeoUnit unit;
GeoFilterParams(String fieldName) {
this.fieldName = fieldName;
}
@Override
public GeoFilterRadius from(double longitude, double latitude) {
this.longitude = longitude;
this.latitude = latitude;
return this;
}
@Override
public QueryFilter radius(double radius, GeoUnit geoUnit) {
this.radius = radius;
this.unit = geoUnit;
return this;
}
public String getFieldName() {
return fieldName;
}
public double getLongitude() {
return longitude;
}
public double getLatitude() {
return latitude;
}
public double getRadius() {
return radius;
}
public GeoUnit getUnit() {
return unit;
}
}
| GeoFilterParams |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableOnBackpressureBuffer.java | {
"start": 1940,
"end": 8238
} | class ____<T> extends BasicIntQueueSubscription<T> implements FlowableSubscriber<T> {
private static final long serialVersionUID = -2514538129242366402L;
final Subscriber<? super T> downstream;
final SimplePlainQueue<T> queue;
final boolean delayError;
final Action onOverflow;
final Consumer<? super T> onDropped;
Subscription upstream;
volatile boolean cancelled;
volatile boolean done;
Throwable error;
final AtomicLong requested = new AtomicLong();
boolean outputFused;
BackpressureBufferSubscriber(Subscriber<? super T> actual, int bufferSize,
boolean unbounded, boolean delayError, Action onOverflow, Consumer<? super T> onDropped) {
this.downstream = actual;
this.onOverflow = onOverflow;
this.delayError = delayError;
this.onDropped = onDropped;
SimplePlainQueue<T> q;
if (unbounded) {
q = new SpscLinkedArrayQueue<>(bufferSize);
} else {
q = new SpscArrayQueue<>(bufferSize);
}
this.queue = q;
}
@Override
public void onSubscribe(Subscription s) {
if (SubscriptionHelper.validate(this.upstream, s)) {
this.upstream = s;
downstream.onSubscribe(this);
s.request(Long.MAX_VALUE);
}
}
@Override
public void onNext(T t) {
if (!queue.offer(t)) {
upstream.cancel();
MissingBackpressureException ex = new MissingBackpressureException("Buffer is full");
try {
onOverflow.run();
onDropped.accept(t);
} catch (Throwable e) {
Exceptions.throwIfFatal(e);
ex.initCause(e);
}
onError(ex);
return;
}
if (outputFused) {
downstream.onNext(null);
} else {
drain();
}
}
@Override
public void onError(Throwable t) {
error = t;
done = true;
if (outputFused) {
downstream.onError(t);
} else {
drain();
}
}
@Override
public void onComplete() {
done = true;
if (outputFused) {
downstream.onComplete();
} else {
drain();
}
}
@Override
public void request(long n) {
if (!outputFused) {
if (SubscriptionHelper.validate(n)) {
BackpressureHelper.add(requested, n);
drain();
}
}
}
@Override
public void cancel() {
if (!cancelled) {
cancelled = true;
upstream.cancel();
if (!outputFused && getAndIncrement() == 0) {
queue.clear();
}
}
}
void drain() {
if (getAndIncrement() == 0) {
int missed = 1;
final SimplePlainQueue<T> q = queue;
final Subscriber<? super T> a = downstream;
for (;;) {
if (checkTerminated(done, q.isEmpty(), a)) {
return;
}
long r = requested.get();
long e = 0L;
while (e != r) {
boolean d = done;
T v = q.poll();
boolean empty = v == null;
if (checkTerminated(d, empty, a)) {
return;
}
if (empty) {
break;
}
a.onNext(v);
e++;
}
if (e == r) {
boolean d = done;
boolean empty = q.isEmpty();
if (checkTerminated(d, empty, a)) {
return;
}
}
if (e != 0L) {
if (r != Long.MAX_VALUE) {
requested.addAndGet(-e);
}
}
missed = addAndGet(-missed);
if (missed == 0) {
break;
}
}
}
}
boolean checkTerminated(boolean d, boolean empty, Subscriber<? super T> a) {
if (cancelled) {
queue.clear();
return true;
}
if (d) {
if (delayError) {
if (empty) {
Throwable e = error;
if (e != null) {
a.onError(e);
} else {
a.onComplete();
}
return true;
}
} else {
Throwable e = error;
if (e != null) {
queue.clear();
a.onError(e);
return true;
} else
if (empty) {
a.onComplete();
return true;
}
}
}
return false;
}
@Override
public int requestFusion(int mode) {
if ((mode & ASYNC) != 0) {
outputFused = true;
return ASYNC;
}
return NONE;
}
@Nullable
@Override
public T poll() {
return queue.poll();
}
@Override
public void clear() {
queue.clear();
}
@Override
public boolean isEmpty() {
return queue.isEmpty();
}
}
}
| BackpressureBufferSubscriber |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/params/ParameterizedClassIntegrationTests.java | {
"start": 55503,
"end": 55717
} | class ____ {
@Parameter(0)
ArgumentsAccessor accessor;
@Test
void test() {
fail("should not be called");
}
}
@ParameterizedClass
@ValueSource(ints = 1)
static | InvalidAggregatorFieldWithIndexTestCase |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/util/KotlinUtils.java | {
"start": 1001,
"end": 2164
} | class ____ {
/**
* Constant indicating whether coroutines are supported.
*/
public static final boolean KOTLIN_COROUTINES_SUPPORTED;
public static final Object COROUTINE_SUSPENDED;
static {
boolean areKotlinCoroutinesSupportedCandidate;
Object coroutineSuspendedCandidate;
try {
coroutineSuspendedCandidate = IntrinsicsKt.getCOROUTINE_SUSPENDED();
areKotlinCoroutinesSupportedCandidate = true;
} catch (NoClassDefFoundError e) {
coroutineSuspendedCandidate = null;
areKotlinCoroutinesSupportedCandidate = false;
}
KOTLIN_COROUTINES_SUPPORTED = areKotlinCoroutinesSupportedCandidate;
COROUTINE_SUSPENDED = coroutineSuspendedCandidate;
}
/**
* Kotlin <code>suspend</code> function result check.
*
* @param obj object to be checked
* @return True if given object is an indicating that a <code>suspend</code> function suspended.
*/
public static boolean isKotlinCoroutineSuspended(@Nullable Object obj) {
return KOTLIN_COROUTINES_SUPPORTED && obj == COROUTINE_SUSPENDED;
}
}
| KotlinUtils |
java | quarkusio__quarkus | core/launcher/src/main/java/io/quarkus/launcher/RuntimeLaunchClassLoader.java | {
"start": 145,
"end": 1578
} | class ____ extends ClassLoader {
static {
registerAsParallelCapable();
}
public RuntimeLaunchClassLoader(ClassLoader parent) {
super(parent);
}
@Override
protected Class<?> findClass(String name) throws ClassNotFoundException {
String resourceName = "META-INF/ide-deps/" + name.replace(".", "/") + ".class.ide-launcher-res";
try {
try (InputStream is = getParent().getResourceAsStream(resourceName)) {
if (is == null) {
throw new ClassNotFoundException(name);
}
definePackage(name);
byte[] bytes = is.readAllBytes();
return defineClass(name, bytes, 0, bytes.length);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private void definePackage(String name) {
var pkgName = getPackageNameFromClassName(name);
if (pkgName == null) {
return;
}
if (getDefinedPackage(pkgName) != null) {
return;
}
try {
// this could certainly be improved to use the actual manifest
definePackage(pkgName, null, null, null, null, null, null, null);
} catch (IllegalArgumentException e) {
// retry, thrown by definePackage(), if a package for the same name is already defines by this | RuntimeLaunchClassLoader |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/config/SingletonBeanRegistry.java | {
"start": 1047,
"end": 1317
} | interface ____ this interface.
*
* @author Juergen Hoeller
* @since 2.0
* @see ConfigurableBeanFactory
* @see org.springframework.beans.factory.support.DefaultSingletonBeanRegistry
* @see org.springframework.beans.factory.support.AbstractBeanFactory
*/
public | extends |
java | redisson__redisson | redisson/src/main/java/org/redisson/client/protocol/convertor/LongNumberConvertor.java | {
"start": 738,
"end": 1832
} | class ____ implements Convertor<Object> {
private Class<?> resultClass;
public LongNumberConvertor(Class<?> resultClass) {
super();
this.resultClass = resultClass;
}
@Override
public Object convert(Object result) {
if (result instanceof Long) {
Long res = (Long) result;
if (resultClass.isAssignableFrom(Long.class)) {
return res;
}
if (resultClass.isAssignableFrom(Integer.class)) {
return res.intValue();
}
if (resultClass.isAssignableFrom(BigDecimal.class)) {
return new BigDecimal(res);
}
}
if (result instanceof Double) {
Double res = (Double) result;
if (resultClass.isAssignableFrom(Float.class)) {
return ((Double) result).floatValue();
}
if (resultClass.isAssignableFrom(Double.class)) {
return res;
}
}
throw new IllegalStateException("Wrong value type!");
}
}
| LongNumberConvertor |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/joinfetch/User.java | {
"start": 199,
"end": 556
} | class ____ {
private String name;
private Map groups = new HashMap();
public User(String name) {
this.name = name;
}
User() {}
public Map getGroups() {
return groups;
}
public void setGroups(Map groups) {
this.groups = groups;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
| User |
java | apache__camel | components/camel-guava-eventbus/src/generated/java/org/apache/camel/component/guava/eventbus/GuavaEventBusComponentConfigurer.java | {
"start": 741,
"end": 3714
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
GuavaEventBusComponent target = (GuavaEventBusComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "eventbus":
case "eventBus": target.setEventBus(property(camelContext, com.google.common.eventbus.EventBus.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "listenerinterface":
case "listenerInterface": target.setListenerInterface(property(camelContext, java.lang.Class.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "eventbus":
case "eventBus": return com.google.common.eventbus.EventBus.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "listenerinterface":
case "listenerInterface": return java.lang.Class.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
GuavaEventBusComponent target = (GuavaEventBusComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "eventbus":
case "eventBus": return target.getEventBus();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "listenerinterface":
case "listenerInterface": return target.getListenerInterface();
default: return null;
}
}
@Override
public Object getCollectionValueType(Object target, String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "listenerinterface":
case "listenerInterface": return java.lang.Object.class;
default: return null;
}
}
}
| GuavaEventBusComponentConfigurer |
java | quarkusio__quarkus | independent-projects/bootstrap/app-model/src/test/java/io/quarkus/bootstrap/model/AppArtifactCoordsTest.java | {
"start": 237,
"end": 2337
} | class ____ {
@Test
void testFails() {
String message = assertThrows(IllegalArgumentException.class, () -> AppArtifactCoords.fromString("test-artifact"))
.getMessage();
Assertions.assertTrue(message.contains("Invalid AppArtifactCoords string without any separator"));
}
@Test
void testGAFails() {
String message = assertThrows(IllegalArgumentException.class,
() -> AppArtifactCoords.fromString("io.quarkus:test-artifact")).getMessage();
Assertions.assertTrue(message.contains("Use AppArtifactKey instead of AppArtifactCoords"));
}
@Test
void testGAV() {
final AppArtifactCoords appArtifactCoords = AppArtifactCoords.fromString("io.quarkus:test-artifact:1.1");
assertEquals("io.quarkus", appArtifactCoords.getGroupId());
assertEquals("test-artifact", appArtifactCoords.getArtifactId());
assertEquals("1.1", appArtifactCoords.getVersion());
assertEquals("", appArtifactCoords.getClassifier());
assertEquals("jar", appArtifactCoords.getType());
}
@Test
void testGACV() {
final AppArtifactCoords appArtifactCoords = AppArtifactCoords.fromString("io.quarkus:test-artifact:classif:1.1");
assertEquals("io.quarkus", appArtifactCoords.getGroupId());
assertEquals("test-artifact", appArtifactCoords.getArtifactId());
assertEquals("1.1", appArtifactCoords.getVersion());
assertEquals("classif", appArtifactCoords.getClassifier());
assertEquals("jar", appArtifactCoords.getType());
}
@Test
void testGACTV() {
final AppArtifactCoords appArtifactCoords = AppArtifactCoords.fromString("io.quarkus:test-artifact:classif:json:1.1");
assertEquals("io.quarkus", appArtifactCoords.getGroupId());
assertEquals("test-artifact", appArtifactCoords.getArtifactId());
assertEquals("1.1", appArtifactCoords.getVersion());
assertEquals("classif", appArtifactCoords.getClassifier());
assertEquals("json", appArtifactCoords.getType());
}
}
| AppArtifactCoordsTest |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/functions/aggfunctions/SingleValueAggFunction.java | {
"start": 7019,
"end": 7360
} | class ____ extends SingleValueAggFunction {
private static final long serialVersionUID = 320495723666949978L;
@Override
public DataType getResultType() {
return DataTypes.BIGINT();
}
}
/** Built-in float single value aggregate function. */
public static final | LongSingleValueAggFunction |
java | google__dagger | javatests/dagger/internal/codegen/ComponentProcessorTest.java | {
"start": 1965,
"end": 3217
} | interface ____ {}");
private static final XTypeSpec GENERATED_INJECT_TYPE =
XTypeSpecs.classBuilder("GeneratedInjectType")
.addFunction(
constructorBuilder()
.addAnnotation(XClassName.get("javax.inject", "Inject"))
.build())
.build();
private static final XTypeSpec GENERATED_QUALIFIER =
XTypeSpecs.annotationBuilder("GeneratedQualifier")
.addAnnotation(XClassName.get("javax.inject", "Qualifier"))
.build();
private static final XTypeSpec GENERATED_MODULE =
XTypeSpecs.classBuilder("GeneratedModule")
.addAnnotation(XTypeNames.MODULE)
.build();
@Rule public GoldenFileRule goldenFileRule = new GoldenFileRule();
private final CompilerMode compilerMode;
public ComponentProcessorTest(CompilerMode compilerMode) {
this.compilerMode = compilerMode;
}
@Test
public void doubleBindingFromResolvedModules() {
Source parent = CompilerTests.javaSource("test.ParentModule",
"package test;",
"",
"import dagger.Module;",
"import dagger.Provides;",
"import java.util.List;",
"",
"@Module",
"abstract | Nullable |
java | apache__spark | common/network-common/src/main/java/org/apache/spark/network/protocol/StreamRequest.java | {
"start": 1111,
"end": 2069
} | class ____ extends AbstractMessage implements RequestMessage {
public final String streamId;
public StreamRequest(String streamId) {
this.streamId = streamId;
}
@Override
public Message.Type type() { return Type.StreamRequest; }
@Override
public int encodedLength() {
return Encoders.Strings.encodedLength(streamId);
}
@Override
public void encode(ByteBuf buf) {
Encoders.Strings.encode(buf, streamId);
}
public static StreamRequest decode(ByteBuf buf) {
String streamId = Encoders.Strings.decode(buf);
return new StreamRequest(streamId);
}
@Override
public int hashCode() {
return Objects.hashCode(streamId);
}
@Override
public boolean equals(Object other) {
if (other instanceof StreamRequest o) {
return streamId.equals(o.streamId);
}
return false;
}
@Override
public String toString() {
return "StreamRequest[streamId=" + streamId + "]";
}
}
| StreamRequest |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/util/ResourceUtils.java | {
"start": 8062,
"end": 9651
} | class ____ location)
* @return a corresponding File object
* @throws FileNotFoundException if the URL cannot be resolved to
* a file in the file system
*/
public static File getFile(URL resourceUrl, String description) throws FileNotFoundException {
Assert.notNull(resourceUrl, "Resource URL must not be null");
if (!URL_PROTOCOL_FILE.equals(resourceUrl.getProtocol())) {
throw new FileNotFoundException(
description + " cannot be resolved to absolute file path " +
"because it does not reside in the file system: " + resourceUrl);
}
try {
// URI decoding for special characters such as spaces.
return new File(toURI(resourceUrl).getSchemeSpecificPart());
}
catch (URISyntaxException ex) {
// Fallback for URLs that are not valid URIs (should hardly ever happen).
return new File(resourceUrl.getFile());
}
}
/**
* Resolve the given resource URI to a {@code java.io.File},
* i.e. to a file in the file system.
* @param resourceUri the resource URI to resolve
* @return a corresponding File object
* @throws FileNotFoundException if the URL cannot be resolved to
* a file in the file system
* @since 2.5
* @see #getFile(URI, String)
*/
public static File getFile(URI resourceUri) throws FileNotFoundException {
return getFile(resourceUri, "URI");
}
/**
* Resolve the given resource URI to a {@code java.io.File},
* i.e. to a file in the file system.
* @param resourceUri the resource URI to resolve
* @param description a description of the original resource that
* the URI was created for (for example, a | path |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlMetrics.java | {
"start": 2321,
"end": 3075
} | class ____ two types of ML metrics to the meter registry, such that they can be collected by Elastic APM.
* <p>
* 1. Per-node ML native memory statistics for ML nodes
* 2. Cluster-wide job/model statuses for master-eligible nodes
* <p>
* The memory metrics relate solely to the ML node they are collected from.
* <p>
* The job/model metrics are cluster-wide because a key problem we want to be able to detect is when there are
* jobs or models that are not assigned to any node. The consumer of the data needs to account for the fact that
* multiple master-eligible nodes are reporting the same information. The es.ml.is_master attribute in the records
* indicates which one was actually master, so can be used to deduplicate.
*/
public final | adds |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/cluster/InternalClusterInfoServiceSchedulingTests.java | {
"start": 2705,
"end": 11756
} | class ____ extends ESTestCase {
public void testScheduling() {
final DiscoveryNode discoveryNode = DiscoveryNodeUtils.create("test");
final DiscoveryNodes noMaster = DiscoveryNodes.builder().add(discoveryNode).localNodeId(discoveryNode.getId()).build();
final DiscoveryNodes localMaster = noMaster.withMasterNodeId(discoveryNode.getId());
final DiscoveryNode joiner = DiscoveryNodeUtils.create("joiner");
final DiscoveryNodes withJoiner = DiscoveryNodes.builder(localMaster).add(joiner).build();
final Settings.Builder settingsBuilder = Settings.builder()
.put(Node.NODE_NAME_SETTING.getKey(), discoveryNode.getName())
.put(InternalClusterInfoService.CLUSTER_ROUTING_ALLOCATION_ESTIMATED_HEAP_THRESHOLD_DECIDER_ENABLED.getKey(), true)
.put(ClusterApplierService.CLUSTER_APPLIER_THREAD_WATCHDOG_INTERVAL.getKey(), TimeValue.ZERO)
.put(
WriteLoadConstraintSettings.WRITE_LOAD_DECIDER_ENABLED_SETTING.getKey(),
randomBoolean()
? WriteLoadConstraintSettings.WriteLoadDeciderStatus.ENABLED
: WriteLoadConstraintSettings.WriteLoadDeciderStatus.LOW_THRESHOLD_ONLY
);
if (randomBoolean()) {
settingsBuilder.put(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), randomIntBetween(10000, 60000) + "ms");
}
final Settings settings = settingsBuilder.build();
final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue();
final ThreadPool threadPool = deterministicTaskQueue.getThreadPool();
final ClusterApplierService clusterApplierService = new ClusterApplierService("test", settings, clusterSettings, threadPool) {
@Override
protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() {
return deterministicTaskQueue.getPrioritizedEsThreadPoolExecutor();
}
};
final MasterService masterService = new FakeThreadPoolMasterService("test", threadPool, r -> {
fail("master service should not run any tasks");
});
final ClusterService clusterService = new ClusterService(settings, clusterSettings, masterService, clusterApplierService);
final FakeClusterInfoServiceClient client = new FakeClusterInfoServiceClient(threadPool);
final EstimatedHeapUsageCollector mockEstimatedHeapUsageCollector = spy(new StubEstimatedEstimatedHeapUsageCollector());
final NodeUsageStatsForThreadPoolsCollector nodeUsageStatsForThreadPoolsCollector = spy(
new NodeUsageStatsForThreadPoolsCollector()
);
final InternalClusterInfoService clusterInfoService = new InternalClusterInfoService(
settings,
clusterService,
threadPool,
client,
mockEstimatedHeapUsageCollector,
nodeUsageStatsForThreadPoolsCollector
);
final WriteLoadConstraintMonitor usageMonitor = spy(
new WriteLoadConstraintMonitor(
clusterService.getClusterSettings(),
threadPool.relativeTimeInMillisSupplier(),
clusterService::state,
new RerouteService() {
@Override
public void reroute(String reason, Priority priority, ActionListener<Void> listener) {}
}
)
);
clusterInfoService.addListener(usageMonitor::onNewInfo);
clusterService.addListener(clusterInfoService);
clusterInfoService.addListener(ignored -> {});
clusterService.setNodeConnectionsService(ClusterServiceUtils.createNoOpNodeConnectionsService());
clusterApplierService.setInitialState(ClusterState.builder(new ClusterName("cluster")).nodes(noMaster).build());
masterService.setClusterStatePublisher((clusterChangedEvent, publishListener, ackListener) -> fail("should not publish"));
masterService.setClusterStateSupplier(clusterApplierService::state);
clusterService.start();
final AtomicBoolean becameMaster1 = new AtomicBoolean();
clusterApplierService.onNewClusterState(
"become master 1",
() -> ClusterState.builder(new ClusterName("cluster")).nodes(localMaster).build(),
setFlagOnSuccess(becameMaster1)
);
runUntilFlag(deterministicTaskQueue, becameMaster1);
// A node joins the cluster
{
Mockito.clearInvocations(mockEstimatedHeapUsageCollector, nodeUsageStatsForThreadPoolsCollector);
final int initialRequestCount = client.requestCount;
final AtomicBoolean nodeJoined = new AtomicBoolean();
clusterApplierService.onNewClusterState(
"node joins",
() -> ClusterState.builder(new ClusterName("cluster")).nodes(withJoiner).build(),
setFlagOnSuccess(nodeJoined)
);
// Don't use runUntilFlag because we don't want the scheduled task to run
deterministicTaskQueue.runAllRunnableTasks();
assertTrue(nodeJoined.get());
// Addition of node should have triggered refresh
// should have run two client requests: nodes stats request and indices stats request
assertThat(client.requestCount, equalTo(initialRequestCount + 2));
verify(mockEstimatedHeapUsageCollector).collectClusterHeapUsage(any()); // Should have polled for heap usage
verify(nodeUsageStatsForThreadPoolsCollector).collectUsageStats(any(), any(), any());
}
// ... then leaves
{
Mockito.clearInvocations(mockEstimatedHeapUsageCollector, nodeUsageStatsForThreadPoolsCollector);
final int initialRequestCount = client.requestCount;
final AtomicBoolean nodeLeft = new AtomicBoolean();
clusterApplierService.onNewClusterState(
"node leaves",
() -> ClusterState.builder(new ClusterName("cluster")).nodes(localMaster).build(),
setFlagOnSuccess(nodeLeft)
);
// Don't use runUntilFlag because we don't want the scheduled task to run
deterministicTaskQueue.runAllRunnableTasks();
assertTrue(nodeLeft.get());
// departing nodes don't trigger refreshes
assertThat(client.requestCount, equalTo(initialRequestCount));
verifyNoInteractions(mockEstimatedHeapUsageCollector);
verifyNoInteractions(nodeUsageStatsForThreadPoolsCollector);
}
final AtomicBoolean failMaster1 = new AtomicBoolean();
clusterApplierService.onNewClusterState(
"fail master 1",
() -> ClusterState.builder(new ClusterName("cluster")).nodes(noMaster).build(),
setFlagOnSuccess(failMaster1)
);
runUntilFlag(deterministicTaskQueue, failMaster1);
final AtomicBoolean becameMaster2 = new AtomicBoolean();
clusterApplierService.onNewClusterState(
"become master 2",
() -> ClusterState.builder(new ClusterName("cluster")).nodes(localMaster).build(),
setFlagOnSuccess(becameMaster2)
);
runUntilFlag(deterministicTaskQueue, becameMaster2);
deterministicTaskQueue.runAllRunnableTasks();
for (int i = 0; i < 3; i++) {
Mockito.clearInvocations(mockEstimatedHeapUsageCollector);
Mockito.clearInvocations(nodeUsageStatsForThreadPoolsCollector);
final int initialRequestCount = client.requestCount;
final long duration = INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.get(settings).millis();
runFor(deterministicTaskQueue, duration);
deterministicTaskQueue.runAllRunnableTasks();
assertThat(client.requestCount, equalTo(initialRequestCount + 2)); // should have run two client requests per interval
verify(mockEstimatedHeapUsageCollector).collectClusterHeapUsage(any()); // Should poll for heap usage once per interval
verify(nodeUsageStatsForThreadPoolsCollector).collectUsageStats(any(), any(), any());
}
final AtomicBoolean failMaster2 = new AtomicBoolean();
clusterApplierService.onNewClusterState(
"fail master 2",
() -> ClusterState.builder(new ClusterName("cluster")).nodes(noMaster).build(),
setFlagOnSuccess(failMaster2)
);
runUntilFlag(deterministicTaskQueue, failMaster2);
runFor(deterministicTaskQueue, INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.get(settings).millis());
deterministicTaskQueue.runAllRunnableTasks();
assertFalse(deterministicTaskQueue.hasRunnableTasks());
assertFalse(deterministicTaskQueue.hasDeferredTasks());
}
private static | InternalClusterInfoServiceSchedulingTests |
java | google__error-prone | test_helpers/src/test/java/com/google/errorprone/BugCheckerRefactoringTestHelperTest.java | {
"start": 7743,
"end": 8301
} | class ____ extends NoSuch {}")
.expectUnchanged()
.doTest();
} catch (AssertionError e) {
assertThat(e).hasMessageThat().contains("error: cannot find symbol");
return;
}
fail("compilation succeeded unexpectedly");
}
@Test
public void staticLastImportOrder() {
BugCheckerRefactoringTestHelper.newInstance(ImportArrayList.class, getClass())
.setImportOrder("static-last")
.addInputLines(
"pkg/A.java",
"""
import static java.lang.Math.min;
| Test |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/IndicesMetricsIT.java | {
"start": 2002,
"end": 21287
} | class ____ extends Plugin {
@Override
public List<Setting<?>> getSettings() {
return List.of(
Setting.timeSetting("telemetry.agent.metrics_interval", TimeValue.timeValueSeconds(0), Setting.Property.NodeScope)
);
}
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return List.of(TestTelemetryPlugin.class, TestAPMInternalSettings.class, FailingFieldPlugin.class);
}
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal, otherSettings))
.put("telemetry.agent.metrics_interval", TimeValue.timeValueSeconds(0)) // disable metrics cache refresh delay
.build();
}
static final String STANDARD_INDEX_COUNT = "es.indices.standard.total";
static final String STANDARD_BYTES_SIZE = "es.indices.standard.size";
static final String STANDARD_DOCS_COUNT = "es.indices.standard.docs.total";
static final String STANDARD_QUERY_COUNT = "es.indices.standard.query.total";
static final String STANDARD_QUERY_TIME = "es.indices.standard.query.time";
static final String STANDARD_QUERY_FAILURE = "es.indices.standard.query.failure.total";
static final String STANDARD_FETCH_COUNT = "es.indices.standard.fetch.total";
static final String STANDARD_FETCH_TIME = "es.indices.standard.fetch.time";
static final String STANDARD_FETCH_FAILURE = "es.indices.standard.fetch.failure.total";
static final String STANDARD_INDEXING_COUNT = "es.indices.standard.indexing.total";
static final String STANDARD_INDEXING_TIME = "es.indices.standard.indexing.time";
static final String STANDARD_INDEXING_FAILURE = "es.indices.standard.indexing.failure.total";
static final String STANDARD_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT = "es.indices.standard.indexing.failure.version_conflict.total";
static final String TIME_SERIES_INDEX_COUNT = "es.indices.time_series.total";
static final String TIME_SERIES_BYTES_SIZE = "es.indices.time_series.size";
static final String TIME_SERIES_DOCS_COUNT = "es.indices.time_series.docs.total";
static final String TIME_SERIES_QUERY_COUNT = "es.indices.time_series.query.total";
static final String TIME_SERIES_QUERY_TIME = "es.indices.time_series.query.time";
static final String TIME_SERIES_QUERY_FAILURE = "es.indices.time_series.query.failure.total";
static final String TIME_SERIES_FETCH_COUNT = "es.indices.time_series.fetch.total";
static final String TIME_SERIES_FETCH_TIME = "es.indices.time_series.fetch.time";
static final String TIME_SERIES_FETCH_FAILURE = "es.indices.time_series.fetch.failure.total";
static final String TIME_SERIES_INDEXING_COUNT = "es.indices.time_series.indexing.total";
static final String TIME_SERIES_INDEXING_TIME = "es.indices.time_series.indexing.time";
static final String TIME_SERIES_INDEXING_FAILURE = "es.indices.time_series.indexing.failure.total";
static final String TIME_SERIES_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT =
"es.indices.time_series.indexing.failure.version_conflict.total";
static final String LOGSDB_INDEX_COUNT = "es.indices.logsdb.total";
static final String LOGSDB_BYTES_SIZE = "es.indices.logsdb.size";
static final String LOGSDB_DOCS_COUNT = "es.indices.logsdb.docs.total";
static final String LOGSDB_QUERY_COUNT = "es.indices.logsdb.query.total";
static final String LOGSDB_QUERY_TIME = "es.indices.logsdb.query.time";
static final String LOGSDB_QUERY_FAILURE = "es.indices.logsdb.query.failure.total";
static final String LOGSDB_FETCH_COUNT = "es.indices.logsdb.fetch.total";
static final String LOGSDB_FETCH_TIME = "es.indices.logsdb.fetch.time";
static final String LOGSDB_FETCH_FAILURE = "es.indices.logsdb.fetch.failure.total";
static final String LOGSDB_INDEXING_COUNT = "es.indices.logsdb.indexing.total";
static final String LOGSDB_INDEXING_TIME = "es.indices.logsdb.indexing.time";
static final String LOGSDB_INDEXING_FAILURE = "es.indices.logsdb.indexing.failure.total";
static final String LOGSDB_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT = "es.indices.logsdb.indexing.failure.version_conflict.total";
public void testIndicesMetrics() {
String indexNode = internalCluster().startNode();
ensureStableCluster(1);
TestTelemetryPlugin telemetry = internalCluster().getInstance(PluginsService.class, indexNode)
.filterPlugins(TestTelemetryPlugin.class)
.findFirst()
.orElseThrow();
IndicesService indicesService = internalCluster().getInstance(IndicesService.class, indexNode);
var indexing0 = indicesService.stats(CommonStatsFlags.ALL, false).getIndexing().getTotal();
telemetry.resetMeter();
long numStandardIndices = randomIntBetween(1, 5);
long numStandardDocs = populateStandardIndices(numStandardIndices);
var indexing1 = indicesService.stats(CommonStatsFlags.ALL, false).getIndexing().getTotal();
collectThenAssertMetrics(
telemetry,
1,
Map.of(
STANDARD_INDEX_COUNT,
equalTo(numStandardIndices),
STANDARD_DOCS_COUNT,
equalTo(numStandardDocs),
STANDARD_BYTES_SIZE,
greaterThan(0L),
STANDARD_INDEXING_COUNT,
equalTo(numStandardDocs),
STANDARD_INDEXING_TIME,
greaterThanOrEqualTo(0L),
STANDARD_INDEXING_FAILURE,
equalTo(indexing1.getIndexFailedCount() - indexing0.getIndexFailedCount()),
STANDARD_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT,
equalTo(indexing1.getIndexFailedDueToVersionConflictCount() - indexing0.getIndexFailedDueToVersionConflictCount())
)
);
long numTimeSeriesIndices = randomIntBetween(1, 5);
long numTimeSeriesDocs = populateTimeSeriesIndices(numTimeSeriesIndices);
var indexing2 = indicesService.stats(CommonStatsFlags.ALL, false).getIndexing().getTotal();
collectThenAssertMetrics(
telemetry,
2,
Map.of(
TIME_SERIES_INDEX_COUNT,
equalTo(numTimeSeriesIndices),
TIME_SERIES_DOCS_COUNT,
equalTo(numTimeSeriesDocs),
TIME_SERIES_BYTES_SIZE,
greaterThan(20L),
TIME_SERIES_INDEXING_COUNT,
equalTo(numTimeSeriesDocs),
TIME_SERIES_INDEXING_TIME,
greaterThanOrEqualTo(0L),
TIME_SERIES_INDEXING_FAILURE,
equalTo(indexing1.getIndexFailedCount() - indexing0.getIndexFailedCount()),
TIME_SERIES_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT,
equalTo(indexing1.getIndexFailedDueToVersionConflictCount() - indexing0.getIndexFailedDueToVersionConflictCount())
)
);
long numLogsdbIndices = randomIntBetween(1, 5);
long numLogsdbDocs = populateLogsdbIndices(numLogsdbIndices);
var indexing3 = indicesService.stats(CommonStatsFlags.ALL, false).getIndexing().getTotal();
collectThenAssertMetrics(
telemetry,
3,
Map.of(
LOGSDB_INDEX_COUNT,
equalTo(numLogsdbIndices),
LOGSDB_DOCS_COUNT,
equalTo(numLogsdbDocs),
LOGSDB_BYTES_SIZE,
greaterThan(0L),
LOGSDB_INDEXING_COUNT,
equalTo(numLogsdbDocs),
LOGSDB_INDEXING_TIME,
greaterThanOrEqualTo(0L),
LOGSDB_INDEXING_FAILURE,
equalTo(indexing3.getIndexFailedCount() - indexing2.getIndexFailedCount()),
LOGSDB_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT,
equalTo(indexing3.getIndexFailedDueToVersionConflictCount() - indexing2.getIndexFailedDueToVersionConflictCount())
)
);
// already collected indexing stats
Map<String, Matcher<Long>> zeroMatchers = new HashMap<>();
zeroMatchers.putAll(
Map.of(
STANDARD_INDEXING_COUNT,
equalTo(0L),
STANDARD_INDEXING_TIME,
equalTo(0L),
STANDARD_INDEXING_FAILURE,
equalTo(0L),
STANDARD_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT,
equalTo(0L)
)
);
zeroMatchers.putAll(
Map.of(
TIME_SERIES_INDEXING_COUNT,
equalTo(0L),
TIME_SERIES_INDEXING_TIME,
equalTo(0L),
TIME_SERIES_INDEXING_FAILURE,
equalTo(0L),
TIME_SERIES_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT,
equalTo(0L)
)
);
zeroMatchers.putAll(
Map.of(
LOGSDB_INDEXING_COUNT,
equalTo(0L),
LOGSDB_INDEXING_TIME,
equalTo(0L),
LOGSDB_INDEXING_FAILURE,
equalTo(0L),
LOGSDB_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT,
equalTo(0L)
)
);
collectThenAssertMetrics(telemetry, 4, zeroMatchers);
String searchNode = internalCluster().startDataOnlyNode();
indicesService = internalCluster().getInstance(IndicesService.class, searchNode);
telemetry = internalCluster().getInstance(PluginsService.class, searchNode)
.filterPlugins(TestTelemetryPlugin.class)
.findFirst()
.orElseThrow();
ensureGreen("st*", "log*", "time*");
// search and fetch
String preference = "_only_local";
client(searchNode).prepareSearch("standard*").setPreference(preference).setSize(100).get().decRef();
var search1 = indicesService.stats(CommonStatsFlags.ALL, false).getSearch().getTotal();
collectThenAssertMetrics(
telemetry,
1,
Map.of(
STANDARD_QUERY_COUNT,
equalTo(numStandardIndices),
STANDARD_QUERY_TIME,
equalTo(search1.getQueryTimeInMillis()),
STANDARD_FETCH_COUNT,
equalTo(search1.getFetchCount()),
STANDARD_FETCH_TIME,
equalTo(search1.getFetchTimeInMillis()),
TIME_SERIES_QUERY_COUNT,
equalTo(0L),
TIME_SERIES_QUERY_TIME,
equalTo(0L),
LOGSDB_QUERY_COUNT,
equalTo(0L),
LOGSDB_QUERY_TIME,
equalTo(0L)
)
);
client(searchNode).prepareSearch("time*").setPreference(preference).setSize(100).get().decRef();
var search2 = indicesService.stats(CommonStatsFlags.ALL, false).getSearch().getTotal();
collectThenAssertMetrics(
telemetry,
2,
Map.of(
STANDARD_QUERY_COUNT,
equalTo(0L),
STANDARD_QUERY_TIME,
equalTo(0L),
TIME_SERIES_QUERY_COUNT,
equalTo(numTimeSeriesIndices),
TIME_SERIES_QUERY_TIME,
equalTo(search2.getQueryTimeInMillis() - search1.getQueryTimeInMillis()),
TIME_SERIES_FETCH_COUNT,
equalTo(search2.getFetchCount() - search1.getFetchCount()),
TIME_SERIES_FETCH_TIME,
equalTo(search2.getFetchTimeInMillis() - search1.getFetchTimeInMillis()),
LOGSDB_QUERY_COUNT,
equalTo(0L),
LOGSDB_QUERY_TIME,
equalTo(0L)
)
);
client(searchNode).prepareSearch("logs*").setPreference(preference).setSize(100).get().decRef();
var search3 = indicesService.stats(CommonStatsFlags.ALL, false).getSearch().getTotal();
collectThenAssertMetrics(
telemetry,
3,
Map.of(
STANDARD_QUERY_COUNT,
equalTo(0L),
STANDARD_QUERY_TIME,
equalTo(0L),
TIME_SERIES_QUERY_COUNT,
equalTo(0L),
TIME_SERIES_QUERY_TIME,
equalTo(0L),
LOGSDB_QUERY_COUNT,
equalTo(numLogsdbIndices),
LOGSDB_QUERY_TIME,
equalTo(search3.getQueryTimeInMillis() - search2.getQueryTimeInMillis()),
LOGSDB_FETCH_COUNT,
equalTo(search3.getFetchCount() - search2.getFetchCount()),
LOGSDB_FETCH_TIME,
equalTo(search3.getFetchTimeInMillis() - search2.getFetchTimeInMillis())
)
);
// search failures
expectThrows(
Exception.class,
() -> { client(searchNode).prepareSearch("logs*").setPreference(preference).setRuntimeMappings(parseMapping("""
{
"fail_me": {
"type": "long",
"script": {"source": "<>", "lang": "failing_field"}
}
}
""")).setQuery(new RangeQueryBuilder("fail_me").gte(0)).setAllowPartialSearchResults(true).get(); }
);
collectThenAssertMetrics(
telemetry,
4,
Map.of(
STANDARD_QUERY_FAILURE,
equalTo(0L),
STANDARD_FETCH_FAILURE,
equalTo(0L),
TIME_SERIES_QUERY_FAILURE,
equalTo(0L),
TIME_SERIES_FETCH_FAILURE,
equalTo(0L),
LOGSDB_QUERY_FAILURE,
equalTo(numLogsdbIndices),
LOGSDB_FETCH_FAILURE,
equalTo(0L)
)
);
verifyStatsPerIndexMode(
Map.of(IndexMode.STANDARD, numStandardDocs, IndexMode.LOGSDB, numLogsdbDocs, IndexMode.TIME_SERIES, numTimeSeriesDocs)
);
}
void collectThenAssertMetrics(TestTelemetryPlugin telemetry, int times, Map<String, Matcher<Long>> matchers) {
telemetry.collect();
for (Map.Entry<String, Matcher<Long>> e : matchers.entrySet()) {
String name = e.getKey();
List<Measurement> measurements = telemetry.getLongGaugeMeasurement(name);
assertThat(name, measurements, hasSize(times));
assertThat(name, measurements.getLast().getLong(), e.getValue());
}
}
int populateStandardIndices(long numIndices) {
int totalDocs = 0;
for (int i = 0; i < numIndices; i++) {
String indexName = "standard-" + i;
createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build());
int numDocs = between(1, 5);
for (int d = 0; d < numDocs; d++) {
indexDoc(indexName, Integer.toString(d), "f", Integer.toString(d));
}
totalDocs += numDocs;
flush(indexName);
}
return totalDocs;
}
int populateTimeSeriesIndices(long numIndices) {
int totalDocs = 0;
for (int i = 0; i < numIndices; i++) {
String indexName = "time_series-" + i;
Settings settings = Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put("mode", "time_series")
.putList("routing_path", List.of("host"))
.build();
client().admin()
.indices()
.prepareCreate(indexName)
.setSettings(settings)
.setMapping(
"@timestamp",
"type=date",
"host",
"type=keyword,time_series_dimension=true",
"cpu",
"type=long,time_series_metric=gauge"
)
.get();
long timestamp = DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-04-15T00:00:00Z");
int numDocs = between(1, 5);
for (int d = 0; d < numDocs; d++) {
timestamp += between(1, 5) * 1000L;
client().prepareIndex(indexName)
.setSource("@timestamp", timestamp, "host", randomFrom("prod", "qa"), "cpu", randomIntBetween(1, 100))
.get();
}
totalDocs += numDocs;
flush(indexName);
refresh(indexName);
}
return totalDocs;
}
int populateLogsdbIndices(long numIndices) {
int totalDocs = 0;
for (int i = 0; i < numIndices; i++) {
String indexName = "logsdb-" + i;
Settings settings = Settings.builder().put("mode", "logsdb").put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build();
client().admin()
.indices()
.prepareCreate(indexName)
.setSettings(settings)
.setMapping("@timestamp", "type=date", "host.name", "type=keyword", "cpu", "type=long")
.get();
long timestamp = DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-04-15T00:00:00Z");
int numDocs = between(1, 5);
for (int d = 0; d < numDocs; d++) {
timestamp += between(1, 5) * 1000L;
client().prepareIndex(indexName)
.setSource("@timestamp", timestamp, "host.name", randomFrom("prod", "qa"), "cpu", randomIntBetween(1, 100))
.get();
}
int numFailures = between(0, 2);
for (int d = 0; d < numFailures; d++) {
expectThrows(Exception.class, () -> {
client().prepareIndex(indexName)
.setSource(
"@timestamp",
"malformed-timestamp",
"host.name",
randomFrom("prod", "qa"),
"cpu",
randomIntBetween(1, 100)
)
.get();
});
}
totalDocs += numDocs;
flush(indexName);
refresh(indexName);
}
return totalDocs;
}
private void verifyStatsPerIndexMode(Map<IndexMode, Long> expectedDocs) {
var nodes = clusterService().state().nodes().stream().toArray(DiscoveryNode[]::new);
var request = new IndexModeStatsActionType.StatsRequest(nodes);
var resp = client().execute(IndexModeStatsActionType.TYPE, request).actionGet();
var stats = resp.stats();
for (Map.Entry<IndexMode, Long> e : expectedDocs.entrySet()) {
assertThat(stats.get(e.getKey()).numDocs(), equalTo(e.getValue()));
}
}
private Map<String, Object> parseMapping(String mapping) throws IOException {
try (XContentParser parser = createParser(JsonXContent.jsonXContent, mapping)) {
return parser.map();
}
}
}
| TestAPMInternalSettings |
java | google__guava | android/guava/src/com/google/common/collect/Streams.java | {
"start": 31616,
"end": 33206
} | interface ____<R extends @Nullable Object> {
/** Applies this function to the given argument and its index within a stream. */
@ParametricNullness
R apply(double from, long index);
}
/**
* Returns the last element of the specified stream, or {@link java.util.Optional#empty} if the
* stream is empty.
*
* <p>Equivalent to {@code stream.reduce((a, b) -> b)}, but may perform significantly better. This
* method's runtime will be between O(log n) and O(n), performing better on <a
* href="http://gee.cs.oswego.edu/dl/html/StreamParallelGuidance.html">efficiently splittable</a>
* streams.
*
* <p>If the stream has nondeterministic order, this has equivalent semantics to {@link
* Stream#findAny} (which you might as well use).
*
* @see Stream#findFirst()
* @throws NullPointerException if the last element of the stream is null
*/
/*
* By declaring <T> instead of <T extends @Nullable Object>, we declare this method as requiring a
* stream whose elements are non-null. However, the method goes out of its way to still handle
* nulls in the stream. This means that the method can safely be used with a stream that contains
* nulls as long as the *last* element is *not* null.
*
* (To "go out of its way," the method tracks a `set` bit so that it can distinguish "the final
* split has a last element of null, so throw NPE" from "the final split was empty, so look for an
* element in the prior one.")
*/
public static <T> java.util.Optional<T> findLast(Stream<T> stream) {
final | DoubleFunctionWithIndex |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/serializer/ObjectSerializerTest.java | {
"start": 372,
"end": 811
} | class ____ extends TestCase {
public void test_serialize() throws Exception {
SerializeConfig config = new SerializeConfig();
config.put(ResultCode.class, new ResultCodeSerilaizer());
Result result = new Result();
result.code = ResultCode.SIGN_ERROR;
String json = JSON.toJSONString(result, config);
Assert.assertEquals("{\"code\":17}", json);
}
public static | ObjectSerializerTest |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/ast/expr/SQLMatchAgainstExpr.java | {
"start": 950,
"end": 2667
} | class ____ extends SQLExprImpl implements SQLReplaceable {
private List<SQLExpr> columns = new ArrayList<SQLExpr>();
private SQLExpr against;
private SearchModifier searchModifier;
public SQLMatchAgainstExpr() {
}
public SQLMatchAgainstExpr clone() {
SQLMatchAgainstExpr x = new SQLMatchAgainstExpr();
for (SQLExpr column : columns) {
SQLExpr column2 = column.clone();
column2.setParent(x);
x.columns.add(column2);
}
if (against != null) {
x.setAgainst(against.clone());
}
x.searchModifier = searchModifier;
return x;
}
@Override
public boolean replace(SQLExpr expr, SQLExpr target) {
if (this.against == expr) {
setAgainst(target);
return true;
}
for (int i = 0; i < columns.size(); i++) {
if (columns.get(i) == expr) {
target.setParent(this);
columns.set(i, target);
return true;
}
}
return false;
}
public List<SQLExpr> getColumns() {
return columns;
}
public void setColumns(List<SQLExpr> columns) {
this.columns = columns;
}
public SQLExpr getAgainst() {
return against;
}
public void setAgainst(SQLExpr against) {
if (against != null) {
against.setParent(this);
}
this.against = against;
}
public SearchModifier getSearchModifier() {
return searchModifier;
}
public void setSearchModifier(SearchModifier searchModifier) {
this.searchModifier = searchModifier;
}
public static | SQLMatchAgainstExpr |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/discovery/UniqueIdSelector.java | {
"start": 1135,
"end": 2348
} | class ____ implements DiscoverySelector {
private final UniqueId uniqueId;
UniqueIdSelector(UniqueId uniqueId) {
this.uniqueId = uniqueId;
}
/**
* Get the selected {@link UniqueId}.
*/
public UniqueId getUniqueId() {
return this.uniqueId;
}
/**
* @since 1.3
*/
@API(status = STABLE, since = "1.3")
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
UniqueIdSelector that = (UniqueIdSelector) o;
return Objects.equals(this.uniqueId, that.uniqueId);
}
/**
* @since 1.3
*/
@API(status = STABLE, since = "1.3")
@Override
public int hashCode() {
return this.uniqueId.hashCode();
}
@Override
public String toString() {
return new ToStringBuilder(this).append("uniqueId", this.uniqueId).toString();
}
@Override
public Optional<DiscoverySelectorIdentifier> toIdentifier() {
return Optional.of(DiscoverySelectorIdentifier.create(IdentifierParser.PREFIX, this.uniqueId.toString()));
}
/**
* The {@link DiscoverySelectorIdentifierParser} for
* {@link UniqueIdSelector UniqueIdSelectors}.
*/
@API(status = INTERNAL, since = "1.11")
public static | UniqueIdSelector |
java | elastic__elasticsearch | libs/lz4/src/test/java/org/elasticsearch/lz4/AbstractLZ4TestCase.java | {
"start": 9686,
"end": 10008
} | interface ____<T> extends TesterBase<T> {
int compress(LZ4Compressor compressor, T src, T dest);
int decompress(LZ4FastDecompressor decompressor, T src, T dest);
int decompress(LZ4SafeDecompressor decompressor, T src, T dest);
// Modified to remove redundant modifiers
| SrcDestTester |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/injection/guice/Guice.java | {
"start": 1694,
"end": 2415
} | class ____ {
private Guice() {}
/**
* Creates an injector for the given set of modules.
*
* @throws CreationException if one or more errors occur during Injector
* construction
*/
public static Injector createInjector(Module... modules) {
return createInjector(Arrays.asList(modules));
}
/**
* Creates an injector for the given set of modules.
*
* @throws CreationException if one or more errors occur during Injector
* creation
*/
public static Injector createInjector(Iterable<? extends Module> modules) {
return new InjectorBuilder().addModules(modules).build();
}
}
| Guice |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/java/typeutils/TypeExtractorTest.java | {
"start": 44347,
"end": 45255
} | class ____<T> extends RichMapFunction<T, T> {
private static final long serialVersionUID = 1L;
@Override
public T map(T value) throws Exception {
return null;
}
}
@Test
void testFunctionDependingOnInputFromInput() {
IdentityMapper<Boolean> function = new IdentityMapper<Boolean>();
TypeInformation<?> ti =
TypeExtractor.getMapReturnTypes(function, BasicTypeInfo.BOOLEAN_TYPE_INFO);
assertThat(ti.isBasicType()).isTrue();
assertThat(ti).isEqualTo(BasicTypeInfo.BOOLEAN_TYPE_INFO);
}
@Test
void testFunctionDependingOnInputWithMissingInput() {
IdentityMapper<Boolean> function = new IdentityMapper<Boolean>();
assertThatThrownBy(() -> TypeExtractor.getMapReturnTypes(function, null))
.isInstanceOf(InvalidTypesException.class);
}
public | IdentityMapper |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/metrics/MetricsTrackingListState.java | {
"start": 8048,
"end": 10975
} | class ____ extends StateMetricBase {
private static final String LIST_STATE_GET_LATENCY = "listStateGetLatency";
private static final String LIST_STATE_ADD_LATENCY = "listStateAddLatency";
private static final String LIST_STATE_ADD_ALL_LATENCY = "listStateAddAllLatency";
private static final String LIST_STATE_UPDATE_LATENCY = "listStateUpdateLatency";
private static final String LIST_STATE_MERGE_NAMESPACES_LATENCY =
"listStateMergeNamespacesLatency";
private static final String LIST_STATE_GET_KEY_SIZE = "listStateGetKeySize";
private static final String LIST_STATE_GET_VALUE_SIZE = "listStateGetValueSize";
private static final String LIST_STATE_ADD_KEY_SIZE = "listStateAddKeySize";
private static final String LIST_STATE_ADD_VALUE_SIZE = "listStateAddValueSize";
private static final String LIST_STATE_ADD_ALL_KEY_SIZE = "listStateAddAllKeySize";
private static final String LIST_STATE_ADD_ALL_VALUE_SIZE = "listStateAddAllValueSize";
private static final String LIST_STATE_UPDATE_KEY_SIZE = "listStateUpdateKeySize";
private static final String LIST_STATE_UPDATE_VALUE_SIZE = "listStateUpdateValueSize";
private int getCount = 0;
private int addCount = 0;
private int addAllCount = 0;
private int updateCount = 0;
private int mergeNamespaceCount = 0;
private ListStateMetrics(
String stateName,
MetricGroup metricGroup,
int sampleInterval,
int historySize,
boolean stateNameAsVariable) {
super(stateName, metricGroup, sampleInterval, historySize, stateNameAsVariable);
}
int getGetCount() {
return getCount;
}
int getAddCount() {
return addCount;
}
int getAddAllCount() {
return addAllCount;
}
int getUpdateCount() {
return updateCount;
}
int getMergeNamespaceCount() {
return mergeNamespaceCount;
}
private boolean trackMetricsOnGet() {
getCount = loopUpdateCounter(getCount);
return getCount == 1;
}
private boolean trackMetricsOnAdd() {
addCount = loopUpdateCounter(addCount);
return addCount == 1;
}
private boolean trackMetricsOnAddAll() {
addAllCount = loopUpdateCounter(addAllCount);
return addAllCount == 1;
}
private boolean trackMetricsOnUpdate() {
updateCount = loopUpdateCounter(updateCount);
return updateCount == 1;
}
private boolean trackMetricsOnMergeNamespace() {
mergeNamespaceCount = loopUpdateCounter(mergeNamespaceCount);
return mergeNamespaceCount == 1;
}
}
}
| ListStateMetrics |
java | alibaba__fastjson | src/main/java/com/alibaba/fastjson/serializer/EnumerationSerializer.java | {
"start": 184,
"end": 1806
} | class ____ implements ObjectSerializer {
public static EnumerationSerializer instance = new EnumerationSerializer();
public void write(JSONSerializer serializer, Object object, Object fieldName, Type fieldType, int features) throws IOException {
SerializeWriter out = serializer.out;
if (object == null) {
out.writeNull(SerializerFeature.WriteNullListAsEmpty);
return;
}
Type elementType = null;
if (out.isEnabled(SerializerFeature.WriteClassName)) {
if (fieldType instanceof ParameterizedType) {
ParameterizedType param = (ParameterizedType) fieldType;
elementType = param.getActualTypeArguments()[0];
}
}
Enumeration<?> e = (Enumeration<?>) object;
SerialContext context = serializer.context;
serializer.setContext(context, object, fieldName, 0);
try {
int i = 0;
out.append('[');
while (e.hasMoreElements()) {
Object item = e.nextElement();
if (i++ != 0) {
out.append(',');
}
if (item == null) {
out.writeNull();
continue;
}
ObjectSerializer itemSerializer = serializer.getObjectWriter(item.getClass());
itemSerializer.write(serializer, item, i - 1, elementType, 0);
}
out.append(']');
} finally {
serializer.context = context;
}
}
}
| EnumerationSerializer |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/internal/impl/SisuDiBridgeModule.java | {
"start": 3859,
"end": 5113
} | class ____ extends InjectorImpl {
final Provider<BeanLocator> locator;
final Binder binder;
BridgeInjectorImpl(Provider<BeanLocator> locator, Binder binder) {
this.locator = locator;
this.binder = binder;
}
@Override
protected <U> Injector bind(Key<U> key, Binding<U> binding) {
super.bind(key, binding);
if (key.getQualifier() != null) {
com.google.inject.Key<U> k = toGuiceKey(key);
this.binder.bind(k).toProvider(new BridgeProvider<>(binding));
}
return this;
}
@SuppressWarnings("unchecked")
private static <U> com.google.inject.Key<U> toGuiceKey(Key<U> key) {
if (key.getQualifier() instanceof String s) {
return (com.google.inject.Key<U>) com.google.inject.Key.get(key.getType(), Names.named(s));
} else if (key.getQualifier() instanceof Annotation a) {
return (com.google.inject.Key<U>) com.google.inject.Key.get(key.getType(), a);
} else {
return (com.google.inject.Key<U>) com.google.inject.Key.get(key.getType(), Named.class);
}
}
static | BridgeInjectorImpl |
java | apache__camel | components/camel-test/camel-test-main-junit5/src/test/java/org/apache/camel/test/main/junit5/annotation/OverridePropertiesTest.java | {
"start": 2531,
"end": 2759
} | class ____ {
@Test
void shouldSupportNestedTest() throws Exception {
assertEquals("John", name);
assertEquals("Willow", name2);
}
}
}
}
| SuperNestedTest |
java | micronaut-projects__micronaut-core | inject-java/src/main/java/io/micronaut/annotation/processing/visitor/JavaPackageElement.java | {
"start": 967,
"end": 2151
} | class ____ extends AbstractJavaElement implements io.micronaut.inject.ast.PackageElement {
private final PackageElement element;
/**
* @param element The {@link PackageElement}
* @param annotationMetadataFactory The annotation metadata factory
* @param visitorContext The Java visitor context
*/
public JavaPackageElement(PackageElement element,
ElementAnnotationMetadataFactory annotationMetadataFactory,
JavaVisitorContext visitorContext) {
super(new JavaNativeElement.Package(element), annotationMetadataFactory, visitorContext);
this.element = element;
}
@Override
protected AbstractJavaElement copyThis() {
return new JavaPackageElement(element, elementAnnotationMetadataFactory, visitorContext);
}
@NonNull
@Override
public String getName() {
return element.getQualifiedName().toString();
}
@NonNull
@Override
public String getSimpleName() {
return element.getSimpleName().toString();
}
@Override
public boolean isUnnamed() {
return element.isUnnamed();
}
}
| JavaPackageElement |
java | apache__logging-log4j2 | log4j-api/src/main/java/org/apache/logging/log4j/Logger.java | {
"start": 1728,
"end": 1955
} | class ____ {
* private static final Logger LOGGER = LogManager.getLogger();
* // ...
* }
* </pre>
* <p>
* For ease of filtering, searching, sorting, etc., it is generally a good idea to create Loggers for each | MyClass |
java | quarkusio__quarkus | extensions/hibernate-validator/deployment/src/test/java/io/quarkus/hibernate/validator/test/config/ConfigMappingInvalidTest.java | {
"start": 5784,
"end": 5954
} | interface ____ {
@Size(min = 5)
String host();
@Min(8000)
int port();
}
}
| Origin |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/buffer/DataOutputStream.java | {
"start": 1048,
"end": 1438
} | class ____ extends OutputStream implements DataOutput {
/**
* Check whether this buffer has enough space to store length of bytes
*
* @param length length of bytes
*/
public abstract boolean shortOfSpace(int length) throws IOException;
/**
* Check whether there is unflushed data stored in the stream
*/
public abstract boolean hasUnFlushedData();
}
| DataOutputStream |
java | quarkusio__quarkus | independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/bcextensions/AnnotationInfoImpl.java | {
"start": 357,
"end": 2837
} | class ____ implements AnnotationInfo {
final org.jboss.jandex.IndexView jandexIndex;
final org.jboss.jandex.MutableAnnotationOverlay annotationOverlay;
final org.jboss.jandex.AnnotationInstance jandexAnnotation;
AnnotationInfoImpl(org.jboss.jandex.IndexView jandexIndex, org.jboss.jandex.MutableAnnotationOverlay annotationOverlay,
org.jboss.jandex.AnnotationInstance jandexAnnotation) {
this.jandexIndex = jandexIndex;
this.annotationOverlay = annotationOverlay;
this.jandexAnnotation = jandexAnnotation;
}
@Override
public ClassInfo declaration() {
DotName annotationClassName = jandexAnnotation.name();
org.jboss.jandex.ClassInfo annotationClass = jandexIndex.getClassByName(annotationClassName);
if (annotationClass == null) {
throw new IllegalStateException("Class " + annotationClassName + " not found in Jandex");
}
return new ClassInfoImpl(jandexIndex, annotationOverlay, annotationClass);
}
@Override
public boolean hasMember(String name) {
return jandexAnnotation.valueWithDefault(jandexIndex, name) != null;
}
@Override
public AnnotationMember member(String name) {
return new AnnotationMemberImpl(jandexIndex, annotationOverlay,
jandexAnnotation.valueWithDefault(jandexIndex, name));
}
@Override
public Map<String, AnnotationMember> members() {
Map<String, AnnotationMember> result = new HashMap<>();
for (org.jboss.jandex.AnnotationValue jandexAnnotationMember : jandexAnnotation.valuesWithDefaults(jandexIndex)) {
result.put(jandexAnnotationMember.name(),
new AnnotationMemberImpl(jandexIndex, annotationOverlay, jandexAnnotationMember));
}
return Collections.unmodifiableMap(result);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
AnnotationInfoImpl that = (AnnotationInfoImpl) o;
return Objects.equals(jandexAnnotation.name(), that.jandexAnnotation.name())
&& Objects.equals(members(), that.members());
}
@Override
public int hashCode() {
return Objects.hash(jandexAnnotation.name(), members());
}
@Override
public String toString() {
return jandexAnnotation.toString(false);
}
}
| AnnotationInfoImpl |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/RequestSlotMatchingStrategy.java | {
"start": 1766,
"end": 2438
} | class ____ {
private final PendingRequest pendingRequest;
private final PhysicalSlot matchedSlot;
private RequestSlotMatch(PendingRequest pendingRequest, PhysicalSlot matchedSlot) {
this.pendingRequest = pendingRequest;
this.matchedSlot = matchedSlot;
}
PhysicalSlot getSlot() {
return matchedSlot;
}
PendingRequest getPendingRequest() {
return pendingRequest;
}
static RequestSlotMatch createFor(PendingRequest pendingRequest, PhysicalSlot newSlot) {
return new RequestSlotMatch(pendingRequest, newSlot);
}
}
}
| RequestSlotMatch |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationTimeoutType.java | {
"start": 1051,
"end": 1445
} | enum ____ {
/**
* <p>
* Timeout imposed on overall application life time. It includes actual
* run-time plus non-runtime. Non-runtime delays are time elapsed by scheduler
* to allocate container, time taken to store in RMStateStore and etc.
* </p>
* If this is set, then timeout monitoring start from application submission
* time.
*/
LIFETIME;
}
| ApplicationTimeoutType |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/reservedstate/ReservedStateHandlerProvider.java | {
"start": 582,
"end": 711
} | interface ____ supplying {@link ReservedClusterStateHandler} implementations to Elasticsearch
* from plugins/modules.
*/
public | for |
java | resilience4j__resilience4j | resilience4j-spring-cloud2/src/main/java/io/github/resilience4j/retry/autoconfigure/RefreshScopedRetryAutoConfiguration.java | {
"start": 1400,
"end": 2598
} | class ____ {
private final RetryConfiguration retryConfiguration;
protected RefreshScopedRetryAutoConfiguration() {
this.retryConfiguration = new RetryConfiguration();
}
/**
* @param retryConfigurationProperties retry spring configuration properties
* @param retryEventConsumerRegistry the retry event consumer registry
* @return the RefreshScoped RetryRegistry
*/
@Bean
@org.springframework.cloud.context.config.annotation.RefreshScope
@ConditionalOnMissingBean
public RetryRegistry retryRegistry(RetryConfigurationProperties retryConfigurationProperties,
EventConsumerRegistry<RetryEvent> retryEventConsumerRegistry,
RegistryEventConsumer<Retry> retryRegistryEventConsumer,
@Qualifier("compositeRetryCustomizer") CompositeCustomizer<RetryConfigCustomizer> compositeRetryCustomizer) {
return retryConfiguration
.retryRegistry(retryConfigurationProperties, retryEventConsumerRegistry,
retryRegistryEventConsumer, compositeRetryCustomizer);
}
}
| RefreshScopedRetryAutoConfiguration |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/artifact/resolver/filter/AndArtifactFilter.java | {
"start": 1082,
"end": 2360
} | class ____ implements ArtifactFilter {
private Set<ArtifactFilter> filters;
public AndArtifactFilter() {
this.filters = new LinkedHashSet<>();
}
public AndArtifactFilter(List<ArtifactFilter> filters) {
this.filters = new LinkedHashSet<>(filters);
}
@Override
public boolean include(Artifact artifact) {
boolean include = true;
for (Iterator<ArtifactFilter> i = filters.iterator(); i.hasNext() && include; ) {
ArtifactFilter filter = i.next();
if (!filter.include(artifact)) {
include = false;
}
}
return include;
}
public void add(ArtifactFilter artifactFilter) {
filters.add(artifactFilter);
}
public List<ArtifactFilter> getFilters() {
return new ArrayList<>(filters);
}
@Override
public int hashCode() {
int hash = 17;
hash = hash * 31 + filters.hashCode();
return hash;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof AndArtifactFilter other) {
return filters.equals(other.filters);
} else {
return false;
}
}
}
| AndArtifactFilter |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/convert/ConvertingAbstractSerializer795Test.java | {
"start": 1398,
"end": 1568
} | class ____ {
final String value;
public NonAbstractCustomType(String v) {
this.value = v;
}
}
public static | NonAbstractCustomType |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/introspect/DefaultCreatorDetection4584Test.java | {
"start": 2820,
"end": 9582
} | class ____ extends ImplicitNameIntrospector
{
private static final long serialVersionUID = 1L;
private final Class<?>[] _argTypes;
private JsonCreator.Mode _mode;
private final String _factoryName;
public PrimaryCreatorFindingIntrospector(JsonCreator.Mode mode,
Class<?>... argTypes) {
_mode = mode;
_factoryName = null;
_argTypes = argTypes;
}
public PrimaryCreatorFindingIntrospector(JsonCreator.Mode mode,
String factoryName) {
_mode = mode;
_factoryName = factoryName;
_argTypes = new Class<?>[0];
}
@Override
public PotentialCreator findPreferredCreator(MapperConfig<?> config,
AnnotatedClass valueClass,
List<PotentialCreator> declaredConstructors,
List<PotentialCreator> declaredFactories,
Optional<PotentialCreator> zeroParamsConstructor)
{
// Apply to all test POJOs here but nothing else
if (!valueClass.getRawType().toString().contains("4584")) {
return null;
}
if (_factoryName != null) {
for (PotentialCreator ctor : declaredFactories) {
if (ctor.creator().getName().equals(_factoryName)) {
return ctor;
}
}
return null;
}
List<PotentialCreator> combo = new ArrayList<>(declaredConstructors);
combo.addAll(declaredFactories);
final int argCount = _argTypes.length;
for (PotentialCreator ctor : combo) {
if (ctor.paramCount() == argCount) {
int i = 0;
for (; i < argCount; ++i) {
if (_argTypes[i] != ctor.param(i).getRawType()) {
break;
}
}
if (i == argCount) {
ctor.overrideMode(_mode);
return ctor;
}
}
}
return null;
}
}
/*
/**********************************************************************
/* Test methods; simple properties-based Creators
/**********************************************************************
*/
@Test
public void testCanonicalConstructor1ArgPropertiesCreator() throws Exception
{
// Instead of delegating, try denoting List-taking 1-arg one:
assertEquals(POJO4584.factoryString("List[2]"),
readerWith(new PrimaryCreatorFindingIntrospector(JsonCreator.Mode.PROPERTIES,
List.class))
.readValue(a2q("{'list':[ 1, 2]}")));
// ok to map from empty Object too
assertEquals(POJO4584.factoryString("List[-1]"),
readerWith(new PrimaryCreatorFindingIntrospector(JsonCreator.Mode.PROPERTIES,
List.class))
.readValue(a2q("{}")));
}
@Test
public void testCanonicalConstructor2ArgPropertiesCreator() throws Exception
{
// Mark the "true" canonical
assertEquals(POJO4584.factoryString("abc"),
readerWith(new PrimaryCreatorFindingIntrospector(JsonCreator.Mode.PROPERTIES,
String.class, Integer.TYPE))
.readValue(a2q("{'bogus':12, 'v':'abc' }")));
// ok to map from empty Object too
assertEquals(POJO4584.factoryString(null),
readerWith(new PrimaryCreatorFindingIntrospector(JsonCreator.Mode.PROPERTIES,
String.class, Integer.TYPE))
.without(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES)
.readValue(a2q("{}")));
}
/*
/**********************************************************************
/* Test methods; simple delegation-based Creators
/**********************************************************************
*/
@Test
public void testCanonicalConstructorDelegatingIntCreator() throws Exception
{
assertEquals(POJO4584.factoryString("int[42]"),
readerWith(new PrimaryCreatorFindingIntrospector(JsonCreator.Mode.DELEGATING,
Integer.TYPE))
.readValue(a2q("42")));
}
@Test
public void testCanonicalConstructorDelegatingListCreator() throws Exception
{
assertEquals(POJO4584.factoryString("List[3]"),
readerWith(new PrimaryCreatorFindingIntrospector(JsonCreator.Mode.DELEGATING,
List.class))
.readValue(a2q("[1, 2, 3]")));
}
@Test
public void testCanonicalConstructorDelegatingArrayCreator() throws Exception
{
assertEquals(POJO4584.factoryString("Array[1]"),
readerWith(new PrimaryCreatorFindingIntrospector(JsonCreator.Mode.DELEGATING,
Object[].class))
.readValue(a2q("[true]")));
}
/*
/**********************************************************************
/* Test methods; deal with explicitly annotated types
/**********************************************************************
*/
// Here we test to ensure that
@Test
public void testDelegatingVsExplicit() throws Exception
{
assertEquals(POJO4584Annotated.factoryString("abc"),
mapperWith(new PrimaryCreatorFindingIntrospector(JsonCreator.Mode.DELEGATING,
"wrongInt"))
.readerFor(POJO4584Annotated.class)
.readValue(a2q("{'v':'abc','bogus':3}")));
}
@Test
public void testPropertiesBasedVsExplicit() throws Exception
{
assertEquals(POJO4584Annotated.factoryString("abc"),
mapperWith(new PrimaryCreatorFindingIntrospector(JsonCreator.Mode.PROPERTIES,
Integer.TYPE, String.class))
.readerFor(POJO4584Annotated.class)
.readValue(a2q("{'v':'abc','bogus':3}")));
}
/*
/**********************************************************************
/* Helper methods
/**********************************************************************
*/
private ObjectReader readerWith(AnnotationIntrospector intr) {
return mapperWith(intr).readerFor(POJO4584.class);
}
private ObjectMapper mapperWith(AnnotationIntrospector intr) {
return JsonMapper.builder()
.annotationIntrospector(intr)
.build();
}
}
| PrimaryCreatorFindingIntrospector |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/dataformat/ZipDeflaterDataFormat.java | {
"start": 1546,
"end": 2737
} | class ____ extends DataFormatDefinition {
@XmlAttribute
@Metadata(javaType = "java.lang.Integer", defaultValue = "-1", enums = "-1,0,1,2,3,4,5,6,7,8,9")
private String compressionLevel;
public ZipDeflaterDataFormat() {
super("zipDeflater");
}
protected ZipDeflaterDataFormat(ZipDeflaterDataFormat source) {
super(source);
this.compressionLevel = source.compressionLevel;
}
private ZipDeflaterDataFormat(Builder builder) {
this();
this.compressionLevel = builder.compressionLevel;
}
@Override
public ZipDeflaterDataFormat copyDefinition() {
return new ZipDeflaterDataFormat(this);
}
public String getCompressionLevel() {
return compressionLevel;
}
/**
* To specify a specific compression between 0-9. -1 is default compression, 0 is no compression, and 9 is the best
* compression.
*/
public void setCompressionLevel(String compressionLevel) {
this.compressionLevel = compressionLevel;
}
/**
* {@code Builder} is a specific builder for {@link ZipDeflaterDataFormat}.
*/
@XmlTransient
public static | ZipDeflaterDataFormat |
java | quarkusio__quarkus | devtools/gradle/gradle-application-plugin/src/main/java/io/quarkus/gradle/tasks/worker/BuildWorkerParams.java | {
"start": 48,
"end": 102
} | interface ____ extends QuarkusParams {
}
| BuildWorkerParams |
java | apache__camel | components/camel-cxf/camel-cxf-spring-soap/src/test/java/org/apache/camel/component/cxf/mtom/HelloImpl12.java | {
"start": 1246,
"end": 1304
} | class ____ extends HelloImpl implements Hello {
}
| HelloImpl12 |
java | google__guava | android/guava-tests/test/com/google/common/eventbus/SubscriberRegistryTest.java | {
"start": 4949,
"end": 5396
} | class ____ {
@Subscribe
public void handle(Object o) {}
}
public void testFlattenHierarchy() {
assertEquals(
ImmutableSet.of(
Object.class,
HierarchyFixtureInterface.class,
HierarchyFixtureSubinterface.class,
HierarchyFixtureParent.class,
HierarchyFixture.class),
SubscriberRegistry.flattenHierarchy(HierarchyFixture.class));
}
private | ObjectSubscriber |
java | quarkusio__quarkus | integration-tests/test-extension/extension/runtime/src/main/java/io/quarkus/extest/runtime/RuntimeXmlConfigService.java | {
"start": 2755,
"end": 3364
} | class ____ {
private InputStream is;
private OutputStream os;
CommandHandler(InputStream inputStream, OutputStream outputStream) {
this.is = inputStream;
this.os = outputStream;
}
void run() throws IOException {
BufferedReader reader = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8));
String command = reader.readLine();
log.infof("Received command: %s", command);
String reply = command + "-ack";
os.write(reply.getBytes("UTF-8"));
}
}
}
| CommandHandler |
java | apache__camel | components/camel-infinispan/camel-infinispan/src/test/java/org/apache/camel/component/infinispan/remote/InfinispanRemoteProducerIT.java | {
"start": 1498,
"end": 4234
} | class ____ extends InfinispanRemoteTestSupport implements InfinispanProducerTestSupport {
@BindToRegistry("mappingFunction")
public BiFunction<String, String, String> mappingFunction() {
return (k, v) -> v + "replay";
}
@Test
public void statsOperation() {
fluentTemplate()
.to("direct:start")
.withHeader(InfinispanConstants.KEY, InfinispanProducerTestSupport.KEY_ONE)
.withHeader(InfinispanConstants.VALUE, InfinispanProducerTestSupport.VALUE_ONE)
.withHeader(InfinispanConstants.OPERATION, InfinispanOperation.PUT)
.send();
assertEquals(InfinispanProducerTestSupport.VALUE_ONE, getCache().get(InfinispanProducerTestSupport.KEY_ONE));
fluentTemplate()
.to("direct:start")
.withHeader(InfinispanConstants.KEY, InfinispanProducerTestSupport.KEY_TWO)
.withHeader(InfinispanConstants.VALUE, InfinispanProducerTestSupport.VALUE_TWO)
.withHeader(InfinispanConstants.OPERATION, InfinispanOperation.PUT)
.send();
assertEquals(InfinispanProducerTestSupport.VALUE_TWO, getCache().get(InfinispanProducerTestSupport.KEY_TWO));
assertEquals(
-1,
fluentTemplate()
.to("direct:start")
.withHeader(InfinispanConstants.OPERATION, InfinispanOperation.STATS)
.request(ServerStatistics.class)
.getIntStatistic(ServerStatistics.APPROXIMATE_ENTRIES));
}
// *****************************
//
// *****************************
@BeforeEach
protected void beforeEach() {
// cleanup the default test cache before each run
getCache().clear();
Awaitility.await().atMost(Duration.ofSeconds(1)).until(() -> cacheContainer.isStarted());
}
@Override
public BasicCache<Object, Object> getCache() {
return super.getCache();
}
@Override
public BasicCache<Object, Object> getCache(String name) {
return super.getCache(name);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.toF("infinispan:%s", getCacheName());
from("direct:compute")
.toF("infinispan:%s?remappingFunction=#mappingFunction", getCacheName());
from("direct:explicitput")
.toF("infinispan:%s?operation=PUT&key=a&value=3", getCacheName());
}
};
}
}
| InfinispanRemoteProducerIT |
java | alibaba__nacos | test/core-test/src/test/java/com/alibaba/nacos/test/core/auth/AuthBase.java | {
"start": 1225,
"end": 1317
} | class ____ authentication tests in Nacos.`
*
* @author nkorange
* @since 1.2.0
*/
public | for |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/scheduling/annotation/ScheduledAnnotationBeanPostProcessorTests.java | {
"start": 40816,
"end": 40987
} | class ____ {
@Scheduled(fixedDelayString = "${fixedDelay}", initialDelayString = "${initialDelay}")
void fixedDelay() {
}
}
static | PropertyPlaceholderWithFixedDelay |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/discovery/DiscoverySelectors.java | {
"start": 20002,
"end": 20176
} | class ____ to use to load the class, or {@code null}
* to signal that the default {@code ClassLoader} should be used
* @param className the fully qualified name of the | loader |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/functions/Functions.java | {
"start": 9263,
"end": 9708
} | class ____<T> implements Consumer<Throwable> {
final Consumer<? super Notification<T>> onNotification;
NotificationOnError(Consumer<? super Notification<T>> onNotification) {
this.onNotification = onNotification;
}
@Override
public void accept(Throwable v) throws Throwable {
onNotification.accept(Notification.createOnError(v));
}
}
static final | NotificationOnError |
java | elastic__elasticsearch | x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4TransportCloseNotifyIT.java | {
"start": 2066,
"end": 7575
} | class ____ extends SecurityIntegTestCase {
@Override
protected boolean addMockHttpTransport() {
return false;
}
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
final Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings));
addSSLSettingsForNodePEMFiles(builder, "xpack.security.http.", randomBoolean());
return builder.put("xpack.security.http.ssl.enabled", true).build();
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return CollectionUtils.appendToCopy(super.nodePlugins(), CancellableActionTestPlugin.class);
}
private static Bootstrap setupNettyClient(String node, Consumer<FullHttpResponse> responseHandler) throws Exception {
var sslCtx = SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build();
var httpServer = internalCluster().getInstance(HttpServerTransport.class, node);
var remoteAddr = randomFrom(httpServer.boundAddress().boundAddresses());
return new Bootstrap().group(new NioEventLoopGroup(1))
.channel(NioSocketChannel.class)
.remoteAddress(remoteAddr.getAddress(), remoteAddr.getPort())
.handler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) {
var p = ch.pipeline();
p.addLast(sslCtx.newHandler(ch.alloc()));
p.addLast(new HttpRequestEncoder());
p.addLast(new HttpResponseDecoder());
p.addLast(new HttpObjectAggregator(4096));
p.addLast(new SimpleChannelInboundHandler<FullHttpResponse>() {
@Override
protected void channelRead0(ChannelHandlerContext ctx, FullHttpResponse msg) {
responseHandler.accept(msg);
}
});
}
});
}
/**
* Ensures that receiving close_notify on server will close connection.
* Simulates normal connection flow where client and server exchange a few requests and responses.
* After an exchange client sends close_notify and expects the server to close connection.
*/
public void testSendCloseNotifyAfterHttpGetRequests() throws Exception {
final var nReq = randomIntBetween(0, 10); // nothing particular about number 10
final var responsesReceivedLatch = new CountDownLatch(nReq);
final var client = setupNettyClient(internalCluster().getRandomNodeName(), response -> {
assertEquals(200, response.status().code());
responsesReceivedLatch.countDown();
});
try {
var channel = client.connect().sync().channel();
// send some HTTP GET requests before closing a channel
for (int i = 0; i < nReq; i++) {
channel.write(newHttpGetReq("/"));
if (randomBoolean()) {
channel.flush();
}
}
channel.flush();
safeAwait(responsesReceivedLatch);
// send close_notify alert and wait for channel closure
var sslHandler = channel.pipeline().get(SslHandler.class);
sslHandler.closeOutbound();
try {
assertTrue("server must close connection", channel.closeFuture().await(SAFE_AWAIT_TIMEOUT.millis()));
} finally {
channel.close().sync();
}
} finally {
client.config().group().shutdownGracefully(0, 0, TimeUnit.SECONDS).sync();
}
}
/**
* Ensures that receiving close_notify will close connection and cancel running action.
*/
public void testSendCloseNotifyCancelAction() throws Exception {
var node = internalCluster().getRandomNodeName();
var indexName = "close-notify-cancel";
createIndex(indexName);
ensureGreen(indexName);
var gotResponse = new AtomicBoolean(false);
var client = setupNettyClient(node, resp -> gotResponse.set(true));
var actionName = ClusterStateAction.NAME;
try (var capturingAction = CancellableActionTestPlugin.capturingActionOnNode(actionName, node)) {
var channel = client.connect().sync().channel();
var req = newHttpGetReq("/_cluster/state");
channel.writeAndFlush(req);
var ssl = channel.pipeline().get(SslHandler.class);
capturingAction.captureAndCancel(ssl::closeOutbound);
try {
assertTrue("server must close connection", channel.closeFuture().await(SAFE_AWAIT_TIMEOUT.millis()));
assertAllTasksHaveFinished(actionName);
assertFalse("must cancel action before http response", gotResponse.get());
} finally {
channel.close().sync();
}
} finally {
client.config().group().shutdownGracefully(0, 0, TimeUnit.SECONDS).sync();
}
}
private DefaultFullHttpRequest newHttpGetReq(String uri) {
var req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, uri);
req.headers().add(HttpHeaderNames.AUTHORIZATION, basicAuthHeaderValue(nodeClientUsername(), nodeClientPassword()));
return req;
}
}
| SecurityNetty4TransportCloseNotifyIT |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java | {
"start": 64809,
"end": 67002
} | class ____<R extends TransportRequest> extends ConcreteShardRequest<R> {
private final long globalCheckpoint;
private final long maxSeqNoOfUpdatesOrDeletes;
public ConcreteReplicaRequest(Reader<R> requestReader, StreamInput in) throws IOException {
super(requestReader, in);
globalCheckpoint = in.readZLong();
maxSeqNoOfUpdatesOrDeletes = in.readZLong();
}
public ConcreteReplicaRequest(
final R request,
final String targetAllocationID,
final long primaryTerm,
final long globalCheckpoint,
final long maxSeqNoOfUpdatesOrDeletes
) {
super(request, targetAllocationID, primaryTerm);
this.globalCheckpoint = globalCheckpoint;
this.maxSeqNoOfUpdatesOrDeletes = maxSeqNoOfUpdatesOrDeletes;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeZLong(globalCheckpoint);
out.writeZLong(maxSeqNoOfUpdatesOrDeletes);
}
public long getGlobalCheckpoint() {
return globalCheckpoint;
}
public long getMaxSeqNoOfUpdatesOrDeletes() {
return maxSeqNoOfUpdatesOrDeletes;
}
@Override
public String toString() {
return "ConcreteReplicaRequest{"
+ "targetAllocationID='"
+ getTargetAllocationID()
+ '\''
+ ", primaryTerm='"
+ getPrimaryTerm()
+ '\''
+ ", request="
+ getRequest()
+ ", globalCheckpoint="
+ globalCheckpoint
+ ", maxSeqNoOfUpdatesOrDeletes="
+ maxSeqNoOfUpdatesOrDeletes
+ '}';
}
}
/**
* Sets the current phase on the task if it isn't null. Pulled into its own
* method because its more convenient that way.
*/
static void setPhase(ReplicationTask task, String phase) {
if (task != null) {
task.setPhase(phase);
}
}
}
| ConcreteReplicaRequest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/joinfetch/Bid.java | {
"start": 208,
"end": 923
} | class ____ {
private float amount;
private Item item;
private Calendar timestamp;
private Long id;
public float getAmount() {
return amount;
}
public void setAmount(float amount) {
this.amount = amount;
}
public Item getItem() {
return item;
}
public void setItem(Item item) {
this.item = item;
}
public Calendar getTimestamp() {
return timestamp;
}
public void setTimestamp(Calendar timestamp) {
this.timestamp = timestamp;
}
Bid() {}
public Bid(Item item, float amount) {
this.amount = amount;
this.item = item;
item.getBids().add(this);
this.timestamp = Calendar.getInstance();
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
}
| Bid |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/aop/framework/ObjenesisProxyTests.java | {
"start": 1044,
"end": 1547
} | class ____ {
@Test
void appliesAspectToClassWithComplexConstructor() {
ApplicationContext context = new ClassPathXmlApplicationContext("ObjenesisProxyTests-context.xml", getClass());
ClassWithComplexConstructor bean = context.getBean(ClassWithComplexConstructor.class);
bean.method();
DebugInterceptor interceptor = context.getBean(DebugInterceptor.class);
assertThat(interceptor.getCount()).isEqualTo(1L);
assertThat(bean.getDependency().getValue()).isEqualTo(1);
}
}
| ObjenesisProxyTests |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegDoublesEvaluator.java | {
"start": 3830,
"end": 4391
} | class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory v;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory v) {
this.source = source;
this.v = v;
}
@Override
public NegDoublesEvaluator get(DriverContext context) {
return new NegDoublesEvaluator(source, v.get(context), context);
}
@Override
public String toString() {
return "NegDoublesEvaluator[" + "v=" + v + "]";
}
}
}
| Factory |
java | google__guice | extensions/throwingproviders/test/com/google/inject/throwingproviders/CheckedProvidersTest.java | {
"start": 3936,
"end": 4574
} | class ____ extends Exception {}
public void testCheckThrowable_checkedExceptionNotDeclared_throwsIllegalArgumentException()
throws Exception {
String message =
String.format(
"Thrown exception <%s> is not declared to be thrown by <%s>",
BarException.class.getName(), FooCheckedProvider.class.getMethod("get"));
try {
CheckedProviders.throwing(FooCheckedProvider.class, BarException.class);
fail("Expected an exception to be thrown");
} catch (IllegalArgumentException e) {
assertThat(e).hasMessageThat().isEqualTo(message);
}
}
private static final | BarException |
java | quarkusio__quarkus | extensions/reactive-oracle-client/deployment/src/main/java/io/quarkus/reactive/oracle/client/deployment/ReactiveOracleClientProcessor.java | {
"start": 3322,
"end": 14163
} | class ____ {
private static final Type ORACLE_POOL_CREATOR = ClassType.create(DotName.createSimple(OraclePoolCreator.class.getName()));
private static final ParameterizedType POOL_CREATOR_INJECTION_TYPE = ParameterizedType.create(INJECT_INSTANCE,
new Type[] { ORACLE_POOL_CREATOR }, null);
private static final DotName VERTX_ORACLE_POOL = DotName.createSimple(OraclePool.class);
private static final Type VERTX_ORACLE_POOL_TYPE = ClassType.create(VERTX_ORACLE_POOL);
@BuildStep
@Record(ExecutionTime.RUNTIME_INIT)
ServiceStartBuildItem build(BuildProducer<FeatureBuildItem> feature,
BuildProducer<OraclePoolBuildItem> oraclePool,
BuildProducer<VertxPoolBuildItem> vertxPool,
OraclePoolRecorder recorder,
VertxBuildItem vertx,
EventLoopCountBuildItem eventLoopCount,
ShutdownContextBuildItem shutdown,
BuildProducer<SyntheticBeanBuildItem> syntheticBeans,
BuildProducer<ExtensionSslNativeSupportBuildItem> sslNativeSupport,
DataSourcesBuildTimeConfig dataSourcesBuildTimeConfig,
DataSourcesReactiveBuildTimeConfig dataSourcesReactiveBuildTimeConfig,
List<DefaultDataSourceDbKindBuildItem> defaultDataSourceDbKindBuildItems,
CurateOutcomeBuildItem curateOutcomeBuildItem) {
feature.produce(new FeatureBuildItem(Feature.REACTIVE_ORACLE_CLIENT));
Stream.Builder<String> oraclePoolNamesBuilder = Stream.builder();
for (String dataSourceName : dataSourcesBuildTimeConfig.dataSources().keySet()) {
if (!isReactiveOraclePoolDefined(dataSourcesBuildTimeConfig, dataSourcesReactiveBuildTimeConfig, dataSourceName,
defaultDataSourceDbKindBuildItems, curateOutcomeBuildItem)) {
continue;
}
createPool(recorder, vertx, eventLoopCount, shutdown, oraclePool, syntheticBeans, dataSourceName);
oraclePoolNamesBuilder.add(dataSourceName);
}
Set<String> oraclePoolNames = oraclePoolNamesBuilder.build().collect(toSet());
if (!oraclePoolNames.isEmpty()) {
syntheticBeans.produce(SyntheticBeanBuildItem.configure(OraclePoolSupport.class)
.scope(Singleton.class)
.unremovable()
.runtimeValue(recorder.createOraclePoolSupport(oraclePoolNames))
.setRuntimeInit()
.done());
}
// Enable SSL support by default
sslNativeSupport.produce(new ExtensionSslNativeSupportBuildItem(Feature.REACTIVE_ORACLE_CLIENT));
vertxPool.produce(new VertxPoolBuildItem());
return new ServiceStartBuildItem("reactive-oracle-client");
}
@BuildStep
DevServicesDatasourceConfigurationHandlerBuildItem devDbHandler() {
return DevServicesDatasourceConfigurationHandlerBuildItem.reactive(DatabaseKind.ORACLE);
}
@BuildStep
void unremoveableBeans(BuildProducer<UnremovableBeanBuildItem> producer) {
producer.produce(UnremovableBeanBuildItem.beanTypes(OraclePoolCreator.class));
}
@BuildStep
void validateBeans(ValidationPhaseBuildItem validationPhase,
BuildProducer<ValidationPhaseBuildItem.ValidationErrorBuildItem> errors) {
// no two OraclePoolCreator beans can be associated with the same datasource
Map<String, Boolean> seen = new HashMap<>();
for (BeanInfo beanInfo : validationPhase.getContext().beans()
.matchBeanTypes(new OraclePoolCreatorBeanClassPredicate())) {
Set<Name> qualifiers = new TreeSet<>(); // use a TreeSet in order to get a predictable iteration order
for (AnnotationInstance qualifier : beanInfo.getQualifiers()) {
qualifiers.add(Name.from(qualifier));
}
String qualifiersStr = qualifiers.stream().map(Name::toString).collect(Collectors.joining("_"));
if (seen.getOrDefault(qualifiersStr, false)) {
errors.produce(new ValidationPhaseBuildItem.ValidationErrorBuildItem(
new IllegalStateException(
"There can be at most one bean of type '" + OraclePoolCreator.class.getName()
+ "' for each datasource.")));
} else {
seen.put(qualifiersStr, true);
}
}
}
@BuildStep
void registerDriver(BuildProducer<ServiceProviderBuildItem> serviceProvider) {
serviceProvider.produce(new ServiceProviderBuildItem("io.vertx.sqlclient.spi.Driver", OracleDriver.class.getName()));
}
@BuildStep
void registerServiceBinding(Capabilities capabilities, BuildProducer<ServiceProviderBuildItem> serviceProvider,
BuildProducer<DefaultDataSourceDbKindBuildItem> dbKind) {
if (capabilities.isPresent(Capability.KUBERNETES_SERVICE_BINDING)) {
serviceProvider.produce(
new ServiceProviderBuildItem("io.quarkus.kubernetes.service.binding.runtime.ServiceBindingConverter",
OracleServiceBindingConverter.class.getName()));
}
dbKind.produce(new DefaultDataSourceDbKindBuildItem(DatabaseKind.ORACLE));
}
/**
* The health check needs to be produced in a separate method to avoid a circular dependency (the Vert.x instance creation
* consumes the AdditionalBeanBuildItems).
*/
@BuildStep
void addHealthCheck(
Capabilities capabilities,
BuildProducer<HealthBuildItem> healthChecks,
DataSourcesBuildTimeConfig dataSourcesBuildTimeConfig,
DataSourcesReactiveBuildTimeConfig dataSourcesReactiveBuildTimeConfig,
List<DefaultDataSourceDbKindBuildItem> defaultDataSourceDbKindBuildItems,
CurateOutcomeBuildItem curateOutcomeBuildItem) {
if (!capabilities.isPresent(Capability.SMALLRYE_HEALTH)) {
return;
}
if (!hasPools(dataSourcesBuildTimeConfig, dataSourcesReactiveBuildTimeConfig, defaultDataSourceDbKindBuildItems,
curateOutcomeBuildItem)) {
return;
}
healthChecks.produce(
new HealthBuildItem("io.quarkus.reactive.oracle.client.runtime.health.ReactiveOracleDataSourcesHealthCheck",
dataSourcesBuildTimeConfig.healthEnabled()));
}
private void createPool(OraclePoolRecorder recorder,
VertxBuildItem vertx,
EventLoopCountBuildItem eventLoopCount,
ShutdownContextBuildItem shutdown,
BuildProducer<OraclePoolBuildItem> oraclePool,
BuildProducer<SyntheticBeanBuildItem> syntheticBeans,
String dataSourceName) {
Function<SyntheticCreationalContext<OraclePool>, OraclePool> poolFunction = recorder
.configureOraclePool(vertx.getVertx(), eventLoopCount.getEventLoopCount(), dataSourceName, shutdown);
oraclePool.produce(new OraclePoolBuildItem(dataSourceName, poolFunction));
ExtendedBeanConfigurator oraclePoolBeanConfigurator = SyntheticBeanBuildItem.configure(OraclePool.class)
.defaultBean()
.addType(Pool.class)
.scope(ApplicationScoped.class)
.qualifiers(qualifiers(dataSourceName))
.addInjectionPoint(POOL_CREATOR_INJECTION_TYPE, qualifier(dataSourceName))
.checkActive(recorder.poolCheckActiveSupplier(dataSourceName))
.createWith(poolFunction)
.unremovable()
.setRuntimeInit()
.startup();
syntheticBeans.produce(oraclePoolBeanConfigurator.done());
ExtendedBeanConfigurator mutinyOraclePoolConfigurator = SyntheticBeanBuildItem
.configure(io.vertx.mutiny.oracleclient.OraclePool.class)
.defaultBean()
.addType(io.vertx.mutiny.sqlclient.Pool.class)
.scope(ApplicationScoped.class)
.qualifiers(qualifiers(dataSourceName))
.addInjectionPoint(VERTX_ORACLE_POOL_TYPE, qualifier(dataSourceName))
.checkActive(recorder.poolCheckActiveSupplier(dataSourceName))
.createWith(recorder.mutinyOraclePool(dataSourceName))
.unremovable()
.setRuntimeInit()
.startup();
syntheticBeans.produce(mutinyOraclePoolConfigurator.done());
}
private static boolean isReactiveOraclePoolDefined(DataSourcesBuildTimeConfig dataSourcesBuildTimeConfig,
DataSourcesReactiveBuildTimeConfig dataSourcesReactiveBuildTimeConfig, String dataSourceName,
List<DefaultDataSourceDbKindBuildItem> defaultDataSourceDbKindBuildItems,
CurateOutcomeBuildItem curateOutcomeBuildItem) {
DataSourceBuildTimeConfig dataSourceBuildTimeConfig = dataSourcesBuildTimeConfig
.dataSources().get(dataSourceName);
DataSourceReactiveBuildTimeConfig dataSourceReactiveBuildTimeConfig = dataSourcesReactiveBuildTimeConfig
.dataSources().get(dataSourceName).reactive();
Optional<String> dbKind = DefaultDataSourceDbKindBuildItem.resolve(dataSourceBuildTimeConfig.dbKind(),
defaultDataSourceDbKindBuildItems,
!DataSourceUtil.isDefault(dataSourceName) || dataSourceBuildTimeConfig.devservices().enabled()
.orElse(!dataSourcesBuildTimeConfig.hasNamedDataSources()),
curateOutcomeBuildItem);
if (!dbKind.isPresent()) {
return false;
}
if (!DatabaseKind.isOracle(dbKind.get())
|| !dataSourceReactiveBuildTimeConfig.enabled()) {
return false;
}
return true;
}
private boolean hasPools(DataSourcesBuildTimeConfig dataSourcesBuildTimeConfig,
DataSourcesReactiveBuildTimeConfig dataSourcesReactiveBuildTimeConfig,
List<DefaultDataSourceDbKindBuildItem> defaultDataSourceDbKindBuildItems,
CurateOutcomeBuildItem curateOutcomeBuildItem) {
if (isReactiveOraclePoolDefined(dataSourcesBuildTimeConfig, dataSourcesReactiveBuildTimeConfig,
DataSourceUtil.DEFAULT_DATASOURCE_NAME, defaultDataSourceDbKindBuildItems, curateOutcomeBuildItem)) {
return true;
}
for (String dataSourceName : dataSourcesBuildTimeConfig.dataSources().keySet()) {
if (isReactiveOraclePoolDefined(dataSourcesBuildTimeConfig, dataSourcesReactiveBuildTimeConfig,
dataSourceName, defaultDataSourceDbKindBuildItems, curateOutcomeBuildItem)) {
return true;
}
}
return false;
}
private static | ReactiveOracleClientProcessor |
java | apache__maven | impl/maven-executor/src/main/java/org/apache/maven/cling/executor/embedded/EmbeddedMavenExecutor.java | {
"start": 2760,
"end": 10937
} | class ____ {
private final URLClassLoader bootClassLoader;
private final String version;
private final Object classWorld;
private final Set<String> originalClassRealmIds;
private final ClassLoader tccl;
private final Map<String, Function<ExecutorRequest, Integer>> commands; // the commands
private final Collection<Object> keepAlive; // refs things to make sure no GC takes it away
private Context(
URLClassLoader bootClassLoader,
String version,
Object classWorld,
Set<String> originalClassRealmIds,
ClassLoader tccl,
Map<String, Function<ExecutorRequest, Integer>> commands,
Collection<Object> keepAlive) {
this.bootClassLoader = bootClassLoader;
this.version = version;
this.classWorld = classWorld;
this.originalClassRealmIds = originalClassRealmIds;
this.tccl = tccl;
this.commands = commands;
this.keepAlive = keepAlive;
}
}
protected final boolean cacheContexts;
protected final boolean useMavenArgsEnv;
protected final AtomicBoolean closed;
protected final InputStream originalStdin;
protected final PrintStream originalStdout;
protected final PrintStream originalStderr;
protected final Properties originalProperties;
protected final ClassLoader originalClassLoader;
protected final ConcurrentHashMap<Path, Context> contexts;
public EmbeddedMavenExecutor() {
this(true, true);
}
public EmbeddedMavenExecutor(boolean cacheContexts, boolean useMavenArgsEnv) {
this.cacheContexts = cacheContexts;
this.useMavenArgsEnv = useMavenArgsEnv;
this.closed = new AtomicBoolean(false);
this.originalStdin = System.in;
this.originalStdout = System.out;
this.originalStderr = System.err;
this.originalClassLoader = Thread.currentThread().getContextClassLoader();
this.contexts = new ConcurrentHashMap<>();
this.originalProperties = new Properties();
this.originalProperties.putAll(System.getProperties());
}
@Override
public int execute(ExecutorRequest executorRequest) throws ExecutorException {
requireNonNull(executorRequest);
if (closed.get()) {
throw new ExecutorException("Executor is closed");
}
validate(executorRequest);
Context context = mayCreate(executorRequest);
String command = executorRequest.command();
Function<ExecutorRequest, Integer> exec = context.commands.get(command);
if (exec == null) {
throw new IllegalArgumentException(
"Unknown command: '" + command + "' for '" + executorRequest.installationDirectory() + "'");
}
Thread.currentThread().setContextClassLoader(context.tccl);
try {
return exec.apply(executorRequest);
} catch (Exception e) {
throw new ExecutorException("Failed to execute", e);
} finally {
try {
disposeRuntimeCreatedRealms(context);
} finally {
System.setIn(originalStdin);
System.setOut(originalStdout);
System.setErr(originalStderr);
Thread.currentThread().setContextClassLoader(originalClassLoader);
System.setProperties(originalProperties);
if (!cacheContexts) {
doClose(context);
}
}
}
}
/**
* Unloads dynamically loaded things, like extensions created realms. Makes sure we go back to "initial state".
*/
protected void disposeRuntimeCreatedRealms(Context context) {
try {
Method getRealms = context.classWorld.getClass().getMethod("getRealms");
Method disposeRealm = context.classWorld.getClass().getMethod("disposeRealm", String.class);
List<Object> realms = (List<Object>) getRealms.invoke(context.classWorld);
for (Object realm : realms) {
String realmId = (String) realm.getClass().getMethod("getId").invoke(realm);
if (!context.originalClassRealmIds.contains(realmId)) {
disposeRealm.invoke(context.classWorld, realmId);
}
}
} catch (Exception e) {
throw new ExecutorException("Failed to dispose runtime created realms", e);
}
}
@Override
public String mavenVersion(ExecutorRequest executorRequest) throws ExecutorException {
requireNonNull(executorRequest);
if (closed.get()) {
throw new ExecutorException("Executor is closed");
}
validate(executorRequest);
return mayCreate(executorRequest).version;
}
protected Context mayCreate(ExecutorRequest executorRequest) {
Path mavenHome = ExecutorRequest.getCanonicalPath(executorRequest.installationDirectory());
if (cacheContexts) {
return contexts.computeIfAbsent(mavenHome, k -> doCreate(mavenHome, executorRequest));
} else {
return doCreate(mavenHome, executorRequest);
}
}
protected Context doCreate(Path mavenHome, ExecutorRequest executorRequest) {
if (!Files.isDirectory(mavenHome)) {
throw new IllegalArgumentException("Installation directory must point to existing directory: " + mavenHome);
}
if (!MVN4_MAIN_CLASSES.containsKey(executorRequest.command())) {
throw new IllegalArgumentException(
getClass().getSimpleName() + " does not support command " + executorRequest.command());
}
if (executorRequest.environmentVariables().isPresent()) {
throw new IllegalArgumentException(getClass().getSimpleName() + " does not support environment variables: "
+ executorRequest.environmentVariables().get());
}
if (executorRequest.jvmArguments().isPresent()) {
throw new IllegalArgumentException(getClass().getSimpleName() + " does not support jvmArguments: "
+ executorRequest.jvmArguments().get());
}
Path boot = mavenHome.resolve("boot");
Path m2conf = mavenHome.resolve("bin/m2.conf");
if (!Files.isDirectory(boot) || !Files.isRegularFile(m2conf)) {
throw new IllegalArgumentException(
"Installation directory does not point to Maven installation: " + mavenHome);
}
ArrayList<String> mavenArgs = new ArrayList<>();
String mavenArgsEnv = System.getenv("MAVEN_ARGS");
if (useMavenArgsEnv && mavenArgsEnv != null && !mavenArgsEnv.isEmpty()) {
Arrays.stream(mavenArgsEnv.split(" "))
.filter(s -> !s.trim().isEmpty())
.forEach(s -> mavenArgs.add(0, s));
}
Properties properties = prepareProperties(executorRequest);
// set ahead of time, if the mavenHome points to Maven4, as ClassWorld Launcher needs this property
properties.setProperty(
"maven.mainClass", requireNonNull(MVN4_MAIN_CLASSES.get(ExecutorRequest.MVN), "mainClass"));
System.setProperties(properties);
URLClassLoader bootClassLoader = createMavenBootClassLoader(boot, Collections.emptyList());
Thread.currentThread().setContextClassLoader(bootClassLoader);
try {
Class<?> launcherClass = bootClassLoader.loadClass("org.codehaus.plexus.classworlds.launcher.Launcher");
Object launcher = launcherClass.getDeclaredConstructor().newInstance();
Method configure = launcherClass.getMethod("configure", InputStream.class);
try (InputStream inputStream = Files.newInputStream(m2conf)) {
configure.invoke(launcher, inputStream);
}
Object classWorld = launcherClass.getMethod("getWorld").invoke(launcher);
Set<String> originalClassRealmIds = new HashSet<>();
// collect pre-created (in m2.conf) | Context |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/annotation/AnnotationUtilsTests.java | {
"start": 51461,
"end": 51627
} | class ____ extends ImplementsInterfaceWithAnnotatedMethod {
@Override
public void foo() {
}
}
public abstract static | SubOfImplementsInterfaceWithAnnotatedMethod |
java | apache__camel | components/camel-telemetry/src/main/java/org/apache/camel/telemetry/decorators/Sjms2SpanDecorator.java | {
"start": 927,
"end": 1623
} | class ____ extends AbstractMessagingSpanDecorator {
@Override
public String getComponent() {
return "sjms2";
}
@Override
protected String getDestination(Exchange exchange, Endpoint endpoint) {
// when using toD for dynamic destination then extract from header
String destination = exchange.getMessage().getHeader("CamelJMSDestinationName", String.class);
if (destination == null) {
destination = super.getDestination(exchange, endpoint);
}
return destination;
}
@Override
public String getComponentClassName() {
return "org.apache.camel.component.sjms2.Sjms2Component";
}
}
| Sjms2SpanDecorator |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/filter/log/LogFilterTest2.java | {
"start": 263,
"end": 1016
} | class ____ extends TestCase {
private DruidDataSource dataSource;
protected void setUp() throws Exception {
dataSource = new DruidDataSource();
dataSource.setUrl("jdbc:derby:classpath:petstore-db");
dataSource.setFilters("log4j");
}
public void test_select() throws Exception {
Connection conn = dataSource.getConnection();
PreparedStatement stmt = conn.prepareStatement("SELECT * FROM ITEM WHERE LISTPRICE > 10");
for (int i = 0; i < 10; ++i) {
ResultSet rs = stmt.executeQuery();
rs.close();
}
stmt.close();
conn.close();
}
protected void tearDown() throws Exception {
JdbcUtils.close(dataSource);
}
}
| LogFilterTest2 |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryAssignmentTest.java | {
"start": 2677,
"end": 3062
} | class ____ {
@Mock Object mockObject;
}
""")
.doTest();
}
@Test
public void initializedViaInitMocks() {
refactoringHelper
.addInputLines(
"Test.java",
"""
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
| Test |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/api/DisplayNameGenerationTests.java | {
"start": 14642,
"end": 14763
} | class ____ {
@Test
void is_instantiated_using_its_noarg_constructor() {
new Stack<>();
}
@Nested
| StackTestCase |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/discriminator/CaseStatementWithTypeTest.java | {
"start": 5219,
"end": 5433
} | class ____ {
@Id
private Long id;
public JoinedParent() {
}
public JoinedParent(Long id) {
this.id = id;
}
}
@SuppressWarnings("unused")
@Entity( name = "JoinedChildA" )
public static | JoinedParent |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/ShouldNotBeNull.java | {
"start": 802,
"end": 1508
} | class ____ extends BasicErrorMessageFactory {
private static final ShouldNotBeNull INSTANCE = new ShouldNotBeNull("%nExpecting actual not to be null");
/**
* Returns the default instance of this class.
* @return the default instance of this class.
*/
public static ErrorMessageFactory shouldNotBeNull() {
return INSTANCE;
}
/**
* Create a instance specifying a label
* @param label of what should not be null
* @return the new instance
*/
public static ShouldNotBeNull shouldNotBeNull(String label) {
return new ShouldNotBeNull("%nExpecting %s not to be null".formatted(label));
}
private ShouldNotBeNull(String label) {
super(label);
}
}
| ShouldNotBeNull |
java | google__guice | core/test/com/google/inject/RestrictedBindingSourceTest.java | {
"start": 9786,
"end": 11766
} | class ____ extends AbstractModule {
@Override
protected void configure() {
install(
new PrivateModule() {
@Override
protected void configure() {
// Non-exempt module.
install(new BazRogueDnsModule());
}
});
}
}
@Test
public void exemptModulesCanCreateRestrictedBinding() {
Guice.createInjector(new FooRogueDnsModule());
Guice.createInjector(new BarRogueDnsModule());
}
@Test
public void nonExemptModuleCantCreateRestrictedBinding() {
CreationException expected = assertThatInjectorCreationFails(new BazRogueDnsModule());
assertThat(expected).hasMessageThat().contains(BINDING_PERMISSION_ERROR);
assertThat(expected).hasMessageThat().contains(USE_DNS_MODULE);
}
@Test
public void parentModuleExeptionAppliesToChildPrivateModule() {
Guice.createInjector(new TopLevelModulePrivatelyBindingDnsAddress());
}
@Test
public void exemptModuleCanBeOverridenIfRestrictedBindingIsNotOverriden() {
// This tests that we check for exemptions on the module stack of the original element source.
Guice.createInjector(
Modules.override(
new AbstractModule() {
@Override
protected void configure() {
install(new BarRogueDnsModule());
}
@Provides
String random() {
return "foo";
}
})
.with(
new AbstractModule() {
@Provides
String random() {
return "bar";
}
}));
}
// --------------------------------------------------------------------------
// Binder.withSource Tests
// --------------------------------------------------------------------------
@NetworkLibrary
private static | TopLevelModulePrivatelyBindingDnsAddress |
java | spring-projects__spring-boot | module/spring-boot-r2dbc/src/main/java/org/springframework/boot/r2dbc/testcontainers/MySqlR2dbcContainerConnectionDetailsFactory.java | {
"start": 1459,
"end": 2029
} | class ____
extends ContainerConnectionDetailsFactory<MySQLContainer, R2dbcConnectionDetails> {
MySqlR2dbcContainerConnectionDetailsFactory() {
super(ANY_CONNECTION_NAME, "io.r2dbc.spi.ConnectionFactoryOptions");
}
@Override
public R2dbcConnectionDetails getContainerConnectionDetails(ContainerConnectionSource<MySQLContainer> source) {
return new MySqlR2dbcDatabaseContainerConnectionDetails(source);
}
/**
* {@link R2dbcConnectionDetails} backed by a {@link ContainerConnectionSource}.
*/
private static final | MySqlR2dbcContainerConnectionDetailsFactory |
java | apache__camel | core/camel-util/src/main/java/org/apache/camel/util/IOHelper.java | {
"start": 29025,
"end": 33281
} | class ____ extends OutputStreamWriter {
private final FileOutputStream out;
/**
* @param out file to write
* @param charset character set to use
*/
public EncodingFileWriter(FileOutputStream out, String charset) throws UnsupportedEncodingException {
super(out, charset);
this.out = out;
}
/**
* @param out file to write
* @param charset character set to use
*/
public EncodingFileWriter(FileOutputStream out, Charset charset) {
super(out, charset);
this.out = out;
}
@Override
public void close() throws IOException {
try {
super.close();
} finally {
out.close();
}
}
}
/**
* Converts the given {@link File} with the given charset to {@link InputStream} with the JVM default charset
*
* @param file the file to be converted
* @param charset the charset the file is read with
* @return the input stream with the JVM default charset
*/
public static InputStream toInputStream(File file, String charset) throws IOException {
return toInputStream(file.toPath(), charset);
}
/**
* Converts the given {@link File} with the given charset to {@link InputStream} with the JVM default charset
*
* @param file the file to be converted
* @param charset the charset the file is read with
* @return the input stream with the JVM default charset
*/
public static InputStream toInputStream(Path file, String charset) throws IOException {
if (charset != null) {
return new EncodingInputStream(file, charset);
} else {
return buffered(Files.newInputStream(file));
}
}
public static BufferedReader toReader(Path file, String charset) throws IOException {
return toReader(file, charset != null ? Charset.forName(charset) : null);
}
public static BufferedReader toReader(File file, String charset) throws IOException {
return toReader(file, charset != null ? Charset.forName(charset) : null);
}
public static BufferedReader toReader(File file, Charset charset) throws IOException {
return toReader(file.toPath(), charset);
}
public static BufferedReader toReader(Path file, Charset charset) throws IOException {
if (charset != null) {
return Files.newBufferedReader(file, charset);
} else {
return Files.newBufferedReader(file);
}
}
public static BufferedWriter toWriter(FileOutputStream os, String charset) throws IOException {
return IOHelper.buffered(new EncodingFileWriter(os, charset));
}
public static BufferedWriter toWriter(FileOutputStream os, Charset charset) {
return IOHelper.buffered(new EncodingFileWriter(os, charset));
}
/**
* Reads the file under the given {@code path}, strips lines starting with {@code commentPrefix} and optionally also
* strips blank lines (the ones for which {@link String#isBlank()} returns {@code true}. Normalizes EOL characters
* to {@code '\n'}.
*
* @param path the path of the file to read
* @param commentPrefix the leading character sequence of comment lines.
* @param stripBlankLines if true {@code true} the lines matching {@link String#isBlank()} will not appear in the
* result
* @return the filtered content of the file
*/
public static String stripLineComments(Path path, String commentPrefix, boolean stripBlankLines) {
StringBuilder result = new StringBuilder(2048);
try (Stream<String> lines = Files.lines(path)) {
lines
.filter(l -> !stripBlankLines || !l.isBlank())
.filter(line -> !line.startsWith(commentPrefix))
.forEach(line -> result.append(line).append('\n'));
} catch (IOException e) {
throw new RuntimeException("Cannot read file: " + path, e);
}
return result.toString();
}
}
| EncodingFileWriter |
java | spring-projects__spring-boot | module/spring-boot-health/src/test/java/org/springframework/boot/health/autoconfigure/actuate/endpoint/HealthEndpointAutoConfigurationTests.java | {
"start": 4208,
"end": 17291
} | class ____ {
private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner()
.withUserConfiguration(HealthIndicatorsConfiguration.class)
.withConfiguration(AutoConfigurations.of(HealthEndpointAutoConfiguration.class,
HealthContributorRegistryAutoConfiguration.class, HealthContributorAutoConfiguration.class));
private final ReactiveWebApplicationContextRunner reactiveContextRunner = new ReactiveWebApplicationContextRunner()
.withUserConfiguration(HealthIndicatorsConfiguration.class)
.withConfiguration(AutoConfigurations.of(HealthEndpointAutoConfiguration.class,
HealthContributorRegistryAutoConfiguration.class, HealthContributorAutoConfiguration.class,
WebEndpointAutoConfiguration.class, EndpointAutoConfiguration.class));
@Test
void runWhenHealthEndpointIsDisabledDoesNotCreateBeans() {
this.contextRunner.withPropertyValues("management.endpoint.health.enabled=false").run((context) -> {
assertThat(context).doesNotHaveBean(StatusAggregator.class);
assertThat(context).doesNotHaveBean(HttpCodeStatusMapper.class);
assertThat(context).doesNotHaveBean(HealthEndpointGroups.class);
assertThat(context).doesNotHaveBean(HealthEndpoint.class);
assertThat(context).doesNotHaveBean(HealthEndpointWebExtension.class);
assertThat(context).doesNotHaveBean(ReactiveHealthEndpointWebExtension.class);
});
}
@Test
void runCreatesStatusAggregatorFromProperties() {
this.contextRunner.withPropertyValues("management.endpoint.health.status.order=up,down").run((context) -> {
StatusAggregator aggregator = context.getBean(StatusAggregator.class);
assertThat(aggregator.getAggregateStatus(Status.UP, Status.DOWN)).isEqualTo(Status.UP);
});
}
@Test
void runWhenHasStatusAggregatorBeanIgnoresProperties() {
this.contextRunner.withUserConfiguration(StatusAggregatorConfiguration.class)
.withPropertyValues("management.endpoint.health.status.order=up,down")
.run((context) -> {
StatusAggregator aggregator = context.getBean(StatusAggregator.class);
assertThat(aggregator.getAggregateStatus(Status.UP, Status.DOWN)).isEqualTo(Status.UNKNOWN);
});
}
@Test
void runCreatesHttpCodeStatusMapperFromProperties() {
this.contextRunner.withPropertyValues("management.endpoint.health.status.http-mapping.up=123")
.run((context) -> {
HttpCodeStatusMapper mapper = context.getBean(HttpCodeStatusMapper.class);
assertThat(mapper.getStatusCode(Status.UP)).isEqualTo(123);
});
}
@Test
void runWhenHasHttpCodeStatusMapperBeanIgnoresProperties() {
this.contextRunner.withUserConfiguration(HttpCodeStatusMapperConfiguration.class)
.withPropertyValues("management.endpoint.health.status.http-mapping.up=123")
.run((context) -> {
HttpCodeStatusMapper mapper = context.getBean(HttpCodeStatusMapper.class);
assertThat(mapper.getStatusCode(Status.UP)).isEqualTo(456);
});
}
@Test
void runCreatesHealthEndpointGroups() {
this.contextRunner.withPropertyValues("management.endpoint.health.group.ready.include=*").run((context) -> {
HealthEndpointGroups groups = context.getBean(HealthEndpointGroups.class);
assertThat(groups).isInstanceOf(AutoConfiguredHealthEndpointGroups.class);
assertThat(groups.getNames()).containsOnly("ready");
});
}
@Test
void runFailsWhenHealthEndpointGroupIncludesContributorThatDoesNotExist() {
this.contextRunner.withUserConfiguration(CompositeHealthIndicatorConfiguration.class)
.withPropertyValues("management.endpoint.health.group.ready.include=composite/b/c,nope")
.run((context) -> {
assertThat(context).hasFailed();
assertThat(context.getStartupFailure()).isInstanceOf(NoSuchHealthContributorException.class)
.hasMessage("Included health contributor 'nope' in group 'ready' does not exist");
});
}
@Test
void runFailsWhenHealthEndpointGroupExcludesContributorThatDoesNotExist() {
this.contextRunner
.withPropertyValues("management.endpoint.health.group.ready.exclude=composite/b/d",
"management.endpoint.health.group.ready.include=*")
.run((context) -> {
assertThat(context).hasFailed();
assertThat(context.getStartupFailure()).isInstanceOf(NoSuchHealthContributorException.class)
.hasMessage("Excluded health contributor 'composite/b/d' in group 'ready' does not exist");
});
}
@Test
void runCreatesHealthEndpointGroupThatIncludesContributorThatDoesNotExistWhenValidationIsDisabled() {
this.contextRunner
.withPropertyValues("management.endpoint.health.validate-group-membership=false",
"management.endpoint.health.group.ready.include=nope")
.run((context) -> {
HealthEndpointGroups groups = context.getBean(HealthEndpointGroups.class);
assertThat(groups).isInstanceOf(AutoConfiguredHealthEndpointGroups.class);
assertThat(groups.getNames()).containsOnly("ready");
});
}
@Test
void runWhenHasHealthEndpointGroupsBeanDoesNotCreateAdditionalHealthEndpointGroups() {
this.contextRunner.withUserConfiguration(HealthEndpointGroupsConfiguration.class)
.withPropertyValues("management.endpoint.health.group.ready.include=*")
.run((context) -> {
HealthEndpointGroups groups = context.getBean(HealthEndpointGroups.class);
assertThat(groups.getNames()).containsOnly("mock");
});
}
@Test
void runCreatesHealthContributorRegistryContainingHealthBeans() {
this.contextRunner.run((context) -> {
HealthContributorRegistry registry = context.getBean(HealthContributorRegistry.class);
Object[] names = registry.stream().map(HealthContributors.Entry::name).toArray();
assertThat(names).containsExactlyInAnyOrder("simple", "additional", "ping");
});
}
@Test
void runWhenNoReactorCreatesHealthContributorRegistryContainingHealthBeans() {
ClassLoader classLoader = new FilteredClassLoader(Mono.class, Flux.class);
this.contextRunner.withClassLoader(classLoader).run((context) -> {
HealthContributorRegistry registry = context.getBean(HealthContributorRegistry.class);
Object[] names = registry.stream().map(HealthContributors.Entry::name).toArray();
assertThat(names).containsExactlyInAnyOrder("simple", "additional", "ping");
});
}
@Test
void runWhenHasHealthContributorRegistryBeanDoesNotCreateAdditionalRegistry() {
this.contextRunner.withUserConfiguration(HealthContributorRegistryConfiguration.class).run((context) -> {
HealthContributorRegistry registry = context.getBean(HealthContributorRegistry.class);
Object[] names = registry.stream().map(HealthContributors.Entry::name).toArray();
assertThat(names).isEmpty();
});
}
@Test
void runCreatesHealthEndpoint() {
this.contextRunner.withPropertyValues("management.endpoint.health.show-details=always").run((context) -> {
HealthEndpoint endpoint = context.getBean(HealthEndpoint.class);
IndicatedHealthDescriptor descriptor = (IndicatedHealthDescriptor) endpoint.healthForPath("simple");
assertThat(descriptor).isNotNull();
assertThat(descriptor.getDetails()).containsEntry("counter", 42);
});
}
@Test
void runWhenHasHealthEndpointBeanDoesNotCreateAdditionalHealthEndpoint() {
this.contextRunner.withUserConfiguration(HealthEndpointConfiguration.class).run((context) -> {
HealthEndpoint endpoint = context.getBean(HealthEndpoint.class);
assertThat(endpoint.health()).isNull();
});
}
@Test
void runCreatesReactiveHealthContributorRegistryContainingReactiveHealthBeans() {
this.reactiveContextRunner.run((context) -> {
ReactiveHealthContributorRegistry reactiveRegistry = context
.getBean(ReactiveHealthContributorRegistry.class);
Object[] names = reactiveRegistry.stream().map(ReactiveHealthContributors.Entry::name).toArray();
assertThat(names).containsExactlyInAnyOrder("reactive");
});
}
@Test
void runWhenHasReactiveHealthContributorRegistryBeanDoesNotCreateAdditionalReactiveHealthContributorRegistry() {
this.reactiveContextRunner.withUserConfiguration(ReactiveHealthContributorRegistryConfiguration.class)
.run((context) -> {
ReactiveHealthContributorRegistry registry = context.getBean(ReactiveHealthContributorRegistry.class);
Object[] names = registry.stream().map(ReactiveHealthContributors.Entry::name).toArray();
assertThat(names).isEmpty();
});
}
@Test
void runCreatesHealthEndpointWebExtension() {
this.contextRunner.run((context) -> {
HealthEndpointWebExtension webExtension = context.getBean(HealthEndpointWebExtension.class);
WebEndpointResponse<HealthDescriptor> response = webExtension.health(ApiVersion.V3,
WebServerNamespace.SERVER, SecurityContext.NONE, true, "simple");
IndicatedHealthDescriptor descriptor = (IndicatedHealthDescriptor) response.getBody();
assertThat(response.getStatus()).isEqualTo(200);
assertThat(descriptor).isNotNull();
assertThat(descriptor.getDetails()).containsEntry("counter", 42);
});
}
@Test
void runWhenHasHealthEndpointWebExtensionBeanDoesNotCreateExtraHealthEndpointWebExtension() {
this.contextRunner.withUserConfiguration(HealthEndpointWebExtensionConfiguration.class).run((context) -> {
HealthEndpointWebExtension webExtension = context.getBean(HealthEndpointWebExtension.class);
WebEndpointResponse<HealthDescriptor> response = webExtension.health(ApiVersion.V3,
WebServerNamespace.SERVER, SecurityContext.NONE, true, "simple");
assertThat(response).isNull();
});
}
@Test
void runCreatesReactiveHealthEndpointWebExtension() {
this.reactiveContextRunner.run((context) -> {
ReactiveHealthEndpointWebExtension webExtension = context.getBean(ReactiveHealthEndpointWebExtension.class);
Mono<WebEndpointResponse<? extends HealthDescriptor>> responseMono = webExtension.health(ApiVersion.V3,
WebServerNamespace.SERVER, SecurityContext.NONE, true, "simple");
WebEndpointResponse<? extends HealthDescriptor> response = responseMono.block();
assertThat(response).isNotNull();
IndicatedHealthDescriptor descriptor = (IndicatedHealthDescriptor) (response.getBody());
assertThat(descriptor).isNotNull();
assertThat(descriptor.getDetails()).containsEntry("counter", 42);
});
}
@Test
void runWhenHasReactiveHealthEndpointWebExtensionBeanDoesNotCreateExtraReactiveHealthEndpointWebExtension() {
this.reactiveContextRunner.withUserConfiguration(ReactiveHealthEndpointWebExtensionConfiguration.class)
.run((context) -> {
ReactiveHealthEndpointWebExtension webExtension = context
.getBean(ReactiveHealthEndpointWebExtension.class);
Mono<WebEndpointResponse<? extends HealthDescriptor>> response = webExtension.health(ApiVersion.V3,
WebServerNamespace.SERVER, SecurityContext.NONE, true, "simple");
assertThat(response).isNull();
});
}
@Test
void runWhenHasHealthEndpointGroupsPostProcessorPerformsProcessing() {
this.contextRunner.withPropertyValues("management.endpoint.health.group.ready.include=*")
.withUserConfiguration(HealthEndpointGroupsConfiguration.class, TestHealthEndpointGroupsPostProcessor.class)
.run((context) -> {
HealthEndpointGroups groups = context.getBean(HealthEndpointGroups.class);
assertThatExceptionOfType(RuntimeException.class).isThrownBy(() -> groups.get("test"))
.withMessage("postprocessed");
});
}
@Test
void runWithIndicatorsInParentContextFindsIndicators() {
new ApplicationContextRunner().withUserConfiguration(HealthIndicatorsConfiguration.class)
.run((parent) -> new WebApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(HealthEndpointAutoConfiguration.class,
HealthContributorRegistryAutoConfiguration.class, HealthContributorAutoConfiguration.class))
.withParent(parent)
.run((context) -> {
HealthDescriptor descriptor = context.getBean(HealthEndpoint.class).health();
Map<String, HealthDescriptor> components = ((CompositeHealthDescriptor) descriptor).getComponents();
assertThat(components).containsKeys("additional", "ping", "simple");
}));
}
@Test
void runWithReactiveContextAndIndicatorsInParentContextFindsIndicators() {
new ApplicationContextRunner().withUserConfiguration(HealthIndicatorsConfiguration.class)
.run((parent) -> new ReactiveWebApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(HealthEndpointAutoConfiguration.class,
HealthContributorRegistryAutoConfiguration.class, HealthContributorAutoConfiguration.class,
WebEndpointAutoConfiguration.class, EndpointAutoConfiguration.class))
.withParent(parent)
.run((context) -> {
HealthDescriptor descriptor = context.getBean(HealthEndpoint.class).health();
Map<String, HealthDescriptor> components = ((CompositeHealthDescriptor) descriptor).getComponents();
assertThat(components).containsKeys("additional", "ping", "simple");
}));
}
@Test
void runWithClashingGroupNameThrowsException() {
this.contextRunner.withPropertyValues("management.endpoint.health.group.ping.include=*")
.run((context) -> assertThat(context).getFailure()
.hasMessageContaining("HealthContributor with name \"ping\" clashes with group"));
}
@Configuration(proxyBeanMethods = false)
static | HealthEndpointAutoConfigurationTests |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/function/FailableLongFunction.java | {
"start": 910,
"end": 1111
} | interface ____ {@link LongFunction} that declares a {@link Throwable}.
*
* @param <R> Return type.
* @param <E> The kind of thrown exception or error.
* @since 3.11
*/
@FunctionalInterface
public | like |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/EnumUtils.java | {
"start": 14531,
"end": 14832
} | enum ____ not found.
* @since 3.13.0
*/
public static <E extends Enum<E>> E getEnumSystemProperty(final Class<E> enumClass, final String propName, final E defaultEnum) {
return getEnum(enumClass, SystemProperties.getProperty(propName), defaultEnum);
}
/**
* Gets the | if |
java | mockito__mockito | mockito-core/src/test/java/org/mockito/internal/stubbing/defaultanswers/ReturnsGenericDeepStubsTest.java | {
"start": 6638,
"end": 8098
} | class ____ implements MiddleInterface<T> {}
}
@Test
public void
cannot_handle_deep_stubs_with_generics_declared_upper_bounds_at_end_of_deep_invocation()
throws Exception {
OwningClassWithDeclaredUpperBounds.AbstractInner mock =
mock(OwningClassWithDeclaredUpperBounds.AbstractInner.class, RETURNS_DEEP_STUBS);
// It seems that while the syntax used on OwningClassWithDeclaredUpperBounds.AbstractInner
// appear to be legal, the javac compiler does not follow through
// hence we need casting, this may also explain why GenericMetadataSupport has trouble to
// extract matching data as well.
assertThat(mock.generic())
.describedAs("mock should implement first bound : 'Iterable'")
.isInstanceOf(Iterable.class);
assertThat(((Iterable<Article>) mock.generic()).iterator())
.describedAs("Iterable returns Iterator")
.isInstanceOf(Iterator.class);
assertThat(((Iterable<Article>) mock.generic()).iterator().next())
.describedAs(
"Cannot yet extract Type argument 'Article' so return null instead of a mock "
+ "of type Object (which would raise CCE on the call-site)")
.isNull();
assertThat(mock.generic())
.describedAs("mock should implement second | AbstractInner |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/metamodel/mapping/CompositeIdentifierMapping.java | {
"start": 269,
"end": 966
} | interface ____ extends EntityIdentifierMapping, EmbeddableValuedModelPart {
@Override
default int getFetchableKey() {
return -1;
}
@Override
default IdentifierValue getUnsavedStrategy() {
return IdentifierValue.UNDEFINED;
}
/**
* Does the identifier have a corresponding EmbeddableId or IdClass?
*
* @return false if there is not an IdCass or an EmbeddableId
*/
boolean hasContainingClass();
EmbeddableMappingType getPartMappingType();
/**
* Returns the embeddable type descriptor of the id-class, if there is one,
* otherwise the one of the virtual embeddable mapping type.
*/
EmbeddableMappingType getMappedIdEmbeddableTypeDescriptor();
}
| CompositeIdentifierMapping |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/CommandShell.java | {
"start": 1047,
"end": 2813
} | class ____ extends Configured implements Tool {
private PrintStream out = System.out;
private PrintStream err = System.err;
/** The subcommand instance for this shell command, if any. */
private SubCommand subcommand = null;
/**
* Return usage string for the command including any summary of subcommands.
* @return command usage.
*/
public abstract String getCommandUsage();
public void setSubCommand(SubCommand cmd) {
subcommand = cmd;
}
public void setOut(PrintStream p) {
out = p;
}
public PrintStream getOut() {
return out;
}
public void setErr(PrintStream p) {
err = p;
}
public PrintStream getErr() {
return err;
}
@Override
public int run(String[] args) throws Exception {
int exitCode = 0;
try {
exitCode = init(args);
if (exitCode != 0 || subcommand == null) {
printShellUsage();
return exitCode;
}
if (subcommand.validate()) {
subcommand.execute();
} else {
printShellUsage();
exitCode = 1;
}
} catch (Exception e) {
printShellUsage();
printException(e);
return 1;
}
return exitCode;
}
/**
* Parse the command line arguments and initialize subcommand instance.
* @param args arguments.
* @return 0 if the argument(s) were recognized, 1 otherwise
* @throws Exception init exception.
*/
protected abstract int init(String[] args) throws Exception;
protected final void printShellUsage() {
if (subcommand != null) {
out.println(subcommand.getUsage());
} else {
out.println(getCommandUsage());
}
out.flush();
}
protected void printException(Exception ex){
ex.printStackTrace(err);
}
/**
* Base | CommandShell |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_2245/Issue2245Test.java | {
"start": 601,
"end": 1088
} | class ____ {
@RegisterExtension
final GeneratedSource generatedSource = new GeneratedSource()
.addComparisonToFixtureFor( TestMapper.class );
@ProcessorTest
public void shouldGenerateSourceGetMethodOnce() {
TestMapper.Tenant tenant =
TestMapper.INSTANCE.map( new TestMapper.TenantDTO( new TestMapper.Inner( "acme" ) ) );
assertThat( tenant ).isNotNull();
assertThat( tenant.getId() ).isEqualTo( "acme" );
}
}
| Issue2245Test |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy/deployment/src/test/java/io/quarkus/resteasy/test/security/authzpolicy/ProactiveAuthAuthorizationPolicyTest.java | {
"start": 214,
"end": 580
} | class ____ extends AbstractAuthorizationPolicyTest {
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(TEST_CLASSES)
.addAsResource(new StringAsset(APPLICATION_PROPERTIES), "application.properties"));
}
| ProactiveAuthAuthorizationPolicyTest |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java | {
"start": 2215,
"end": 2578
} | class ____<T extends DBWritable>
extends InputFormat<LongWritable, T> implements Configurable {
private static final Logger LOG =
LoggerFactory.getLogger(DBInputFormat.class);
protected String dbProductName = "DEFAULT";
/**
* A Class that does nothing, implementing DBWritable
*/
@InterfaceStability.Evolving
public static | DBInputFormat |
java | netty__netty | codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameListenerDecorator.java | {
"start": 918,
"end": 4122
} | class ____ implements Http2FrameListener {
protected final Http2FrameListener listener;
public Http2FrameListenerDecorator(Http2FrameListener listener) {
this.listener = checkNotNull(listener, "listener");
}
@Override
public int onDataRead(ChannelHandlerContext ctx, int streamId, ByteBuf data, int padding, boolean endOfStream)
throws Http2Exception {
return listener.onDataRead(ctx, streamId, data, padding, endOfStream);
}
@Override
public void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding,
boolean endStream) throws Http2Exception {
listener.onHeadersRead(ctx, streamId, headers, padding, endStream);
}
@Override
public void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int streamDependency,
short weight, boolean exclusive, int padding, boolean endStream) throws Http2Exception {
listener.onHeadersRead(ctx, streamId, headers, streamDependency, weight, exclusive, padding, endStream);
}
@Override
public void onPriorityRead(ChannelHandlerContext ctx, int streamId, int streamDependency, short weight,
boolean exclusive) throws Http2Exception {
listener.onPriorityRead(ctx, streamId, streamDependency, weight, exclusive);
}
@Override
public void onRstStreamRead(ChannelHandlerContext ctx, int streamId, long errorCode) throws Http2Exception {
listener.onRstStreamRead(ctx, streamId, errorCode);
}
@Override
public void onSettingsAckRead(ChannelHandlerContext ctx) throws Http2Exception {
listener.onSettingsAckRead(ctx);
}
@Override
public void onSettingsRead(ChannelHandlerContext ctx, Http2Settings settings) throws Http2Exception {
listener.onSettingsRead(ctx, settings);
}
@Override
public void onPingRead(ChannelHandlerContext ctx, long data) throws Http2Exception {
listener.onPingRead(ctx, data);
}
@Override
public void onPingAckRead(ChannelHandlerContext ctx, long data) throws Http2Exception {
listener.onPingAckRead(ctx, data);
}
@Override
public void onPushPromiseRead(ChannelHandlerContext ctx, int streamId, int promisedStreamId, Http2Headers headers,
int padding) throws Http2Exception {
listener.onPushPromiseRead(ctx, streamId, promisedStreamId, headers, padding);
}
@Override
public void onGoAwayRead(ChannelHandlerContext ctx, int lastStreamId, long errorCode, ByteBuf debugData)
throws Http2Exception {
listener.onGoAwayRead(ctx, lastStreamId, errorCode, debugData);
}
@Override
public void onWindowUpdateRead(ChannelHandlerContext ctx, int streamId, int windowSizeIncrement)
throws Http2Exception {
listener.onWindowUpdateRead(ctx, streamId, windowSizeIncrement);
}
@Override
public void onUnknownFrame(ChannelHandlerContext ctx, byte frameType, int streamId, Http2Flags flags,
ByteBuf payload) throws Http2Exception {
listener.onUnknownFrame(ctx, frameType, streamId, flags, payload);
}
}
| Http2FrameListenerDecorator |
java | quarkusio__quarkus | extensions/smallrye-fault-tolerance/deployment/src/test/java/io/quarkus/smallrye/faulttolerance/test/asynchronous/noncompat/NoncompatNonblockingTest.java | {
"start": 351,
"end": 1627
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar.addClasses(NoncompatNonblockingService.class));
@Inject
NoncompatNonblockingService service;
@Test
public void noThreadOffloadAndFallback() throws Exception {
Thread mainThread = Thread.currentThread();
CompletionStage<String> future = service.hello();
assertThat(future.toCompletableFuture().get()).isEqualTo("hello");
// no delay between retries, all executions happen on the same thread
// if there _was_ a delay, subsequent retries would be offloaded to another thread
assertThat(service.getHelloThreads()).allSatisfy(thread -> {
assertThat(thread).isSameAs(mainThread);
});
assertThat(service.getHelloStackTraces()).allSatisfy(stackTrace -> {
assertThat(stackTrace).anySatisfy(frame -> {
assertThat(frame.getClassName()).contains("io.smallrye.faulttolerance.core");
});
});
// 1 initial execution + 3 retries
assertThat(service.getInvocationCounter()).hasValue(4);
assertThat(service.getFallbackThread()).isSameAs(mainThread);
}
}
| NoncompatNonblockingTest |
java | google__guice | core/test/com/google/inject/spi/InjectionPointTest.java | {
"start": 18875,
"end": 19411
} | class ____ extends Super {
@Override
@SuppressWarnings("OverridesJavaxInjectableMethod")
public void atInject() {}
@Override
@SuppressWarnings("OverridesGuiceInjectableMethod")
public void gInject() {}
@com.google.inject.Inject
public void privateAtAndPublicG() {}
@jakarta.inject.Inject
public void privateGAndPublicAt() {}
@com.google.inject.Inject
@Override
public void atFirstThenG() {}
@jakarta.inject.Inject
@Override
public void gFirstThenAt() {}
}
static | Sub |
java | apache__camel | components/camel-solr/src/main/java/org/apache/camel/component/solr/converter/SolrRequestConverter.java | {
"start": 2155,
"end": 14276
} | class ____ {
public static final String DEFAULT_UPDATE_REQUEST_HANDLER = "/update";
private SolrRequestConverter() {
}
@Converter
public static SolrPing createSolrPing(Object body, Exchange exchange) {
return new SolrPing();
}
@Converter
public static QueryRequest createQueryRequest(Object body, Exchange exchange) {
if (body instanceof QueryRequest queryRequest) {
return queryRequest;
}
SolrQuery solrQuery;
// set query
if (body instanceof SolrQuery solrQuery1) {
solrQuery = solrQuery1;
} else {
String queryString = exchange.getMessage().getHeader(SolrConstants.PARAM_QUERY_STRING, String.class);
if (ObjectHelper.isEmpty(queryString)) {
queryString = exchange.getMessage().getBody(String.class);
}
solrQuery = new SolrQuery(queryString);
}
SolrProducer.ActionContext ctx
= exchange.getProperty(SolrConstants.PROPERTY_ACTION_CONTEXT, SolrProducer.ActionContext.class);
SolrConfiguration configuration = ctx.configuration();
// Set size parameter and from parameter for search
Integer from = exchange.getMessage().getHeader(SolrConstants.PARAM_FROM, configuration.getFrom(), Integer.class);
if (from != null) {
solrQuery.setStart(from);
}
Integer size = exchange.getMessage().getHeader(SolrConstants.PARAM_SIZE, configuration.getSize(), Integer.class);
if (size != null) {
solrQuery.setRows(size);
}
// Set requestHandler parameter as solr param qt (search only)
String requestHandler = ctx.requestHandler();
if (requestHandler != null) {
solrQuery.add("qt", requestHandler);
}
solrQuery.add(ctx.solrParams());
return new QueryRequest(solrQuery);
}
public static boolean isUseContentStreamUpdateRequest(SolrProducer.ActionContext ctx) {
if (!SolrOperation.INSERT.equals(ctx.operation())) {
return false;
}
Object body = ctx.exchange().getMessage().getBody();
if (body instanceof String bodyAsString) {
// string body --> determine content type to use for the string
// if not detected, use regular update request
String contentType = ctx.exchange().getMessage().getHeader(SolrConstants.PARAM_CONTENT_TYPE, String.class);
if (ObjectHelper.isEmpty(contentType)) {
contentType = ContentStreamBase.StringStream.detect(bodyAsString);
}
if (ObjectHelper.isEmpty(contentType)) {
// couldn't detect -> use regular update request
return false;
}
appendAddCommandToXML(ctx, bodyAsString, contentType);
return true;
}
return (body instanceof File || body instanceof WrappedFile<?>);
}
private static ContentStreamUpdateRequest createNewContentStreamUpdateRequest(SolrProducer.ActionContext ctx) {
ContentStreamUpdateRequest updateRequest = ctx.requestHandler() != null
? new ContentStreamUpdateRequest(ctx.requestHandler())
: new ContentStreamUpdateRequest(DEFAULT_UPDATE_REQUEST_HANDLER);
updateRequest.setParams(ctx.solrParams());
return updateRequest;
}
@Converter
public static ContentStreamUpdateRequest createContentStreamUpdateRequest(Object body, Exchange exchange)
throws NoTypeConversionAvailableException {
SolrProducer.ActionContext ctx
= exchange.getProperty(SolrConstants.PROPERTY_ACTION_CONTEXT, SolrProducer.ActionContext.class);
String contentType = ctx.exchange().getMessage().getHeader(SolrConstants.PARAM_CONTENT_TYPE, String.class);
ContentStreamUpdateRequest streamUpdateRequest = createNewContentStreamUpdateRequest(ctx);
if (body instanceof WrappedFile<?> wrappedFile) {
body = wrappedFile.getFile();
}
if (body instanceof File file) {
ContentStreamBase.FileStream stream = new ContentStreamBase.FileStream(file);
if (ObjectHelper.isEmpty(contentType)) {
contentType = stream.getContentType();
}
stream.setContentType(contentType);
streamUpdateRequest.addContentStream(stream);
return streamUpdateRequest;
}
if (body instanceof String string) {
ContentStreamBase.StringStream stream;
if (ObjectHelper.isEmpty(contentType)) {
stream = new ContentStreamBase.StringStream(string);
} else {
stream = new ContentStreamBase.StringStream(string, contentType);
}
streamUpdateRequest.addContentStream(stream);
return streamUpdateRequest;
}
throw new NoTypeConversionAvailableException(body, SolrRequestConverter.class);
}
private static UpdateRequest createNewUpdateRequest(SolrProducer.ActionContext ctx) {
UpdateRequest updateRequest = ctx.requestHandler() != null
? new UpdateRequest(ctx.requestHandler())
: new UpdateRequest();
updateRequest.setParams(ctx.solrParams());
return updateRequest;
}
@Converter
public static UpdateRequest createUpdateRequest(Object body, Exchange exchange) throws NoTypeConversionAvailableException {
SolrProducer.ActionContext ctx
= exchange.getProperty(SolrConstants.PROPERTY_ACTION_CONTEXT, SolrProducer.ActionContext.class);
switch (ctx.operation()) {
case DELETE -> {
return createUpdateRequestForDelete(body, exchange, ctx);
}
case INSERT -> {
return createUpdateRequestForInsert(body, exchange, ctx);
}
default -> throw new IllegalArgumentException(
SolrConstants.PARAM_OPERATION + " value '" + ctx.operation() + "' is not implemented");
}
}
private static UpdateRequest createUpdateRequestForDelete(Object body, Exchange exchange, SolrProducer.ActionContext ctx) {
UpdateRequest updateRequest = createNewUpdateRequest(ctx);
boolean deleteByQuery = ctx.exchange().getMessage()
.getHeader(SolrConstants.PARAM_DELETE_BY_QUERY, ctx.configuration().isDeleteByQuery(), Boolean.class);
// for now, keep old operation supported until deprecation
deleteByQuery = deleteByQuery
|| SolrConstants.OPERATION_DELETE_BY_QUERY
.equalsIgnoreCase(exchange.getMessage()
.getHeader(SolrConstants.PARAM_OPERATION, "", String.class));
if (deleteByQuery) {
if (SolrUtils.isCollectionOfType(body, String.class)) {
updateRequest.setDeleteQuery(SolrUtils.convertToList((Collection<String>) body));
return updateRequest;
} else {
return updateRequest.deleteByQuery(String.valueOf(body));
}
}
if (SolrUtils.isCollectionOfType(body, String.class)) {
return updateRequest.deleteById(SolrUtils.convertToList((Collection<String>) body));
}
return updateRequest.deleteById(String.valueOf(body));
}
private static UpdateRequest createUpdateRequestForInsert(Object body, Exchange exchange, SolrProducer.ActionContext ctx)
throws NoTypeConversionAvailableException {
UpdateRequest updateRequest = createNewUpdateRequest(ctx);
// SolrInputDocument
if (body instanceof SolrInputDocument solrInputDocument) {
updateRequest.add(solrInputDocument);
return updateRequest;
}
// Collection<SolrInputDocument>
if (SolrUtils.isCollectionOfType(body, SolrInputDocument.class)) {
updateRequest.add((Collection<SolrInputDocument>) body);
return updateRequest;
}
// Collection<Map>
if (SolrUtils.isCollectionOfType(body, Map.class)) {
Optional<Collection<SolrInputDocument>> docs
= getOptionalCollectionOfSolrInputDocument((Collection<Map<?, ?>>) body, exchange);
docs.ifPresent(updateRequest::add);
return updateRequest;
}
// Map: gather solr fields from body and merge with solr fields from headers (gathered from SolrField.xxx headers)
// The header solr fields have priority
Map<String, Object> map = new LinkedHashMap<>(getMapFromBody(body));
map.putAll(getMapFromHeaderSolrFields(exchange));
if (!map.isEmpty()) {
body = map;
}
// Map: translate to SolrInputDocument (possibly gathered from SolrField.xxx headers
Optional<SolrInputDocument> doc = getOptionalSolrInputDocumentFromMap(body, exchange);
if (doc.isPresent()) {
updateRequest.add(doc.get());
return updateRequest;
}
// beans
try {
DocumentObjectBinder binder = new DocumentObjectBinder();
if (SolrUtils.isCollectionOfType(body, Object.class)) {
Collection<?> objects = (Collection<?>) body;
objects.forEach(o -> updateRequest.add(binder.toSolrInputDocument(o)));
return updateRequest;
}
updateRequest.add(binder.toSolrInputDocument(body));
return updateRequest;
} catch (BindingException ignored) {
}
// when "invalid" body with solr params, allow processing (e.g. commit) without "body"
if (ctx.solrParams().size() > 0) {
return updateRequest;
}
throw new NoTypeConversionAvailableException(body, SolrRequestConverter.class);
}
private static void appendAddCommandToXML(SolrProducer.ActionContext ctx, String bodyAsString, String contentType) {
if ((contentType.startsWith(ContentStreamBase.TEXT_XML)
|| contentType.startsWith(ContentStreamBase.APPLICATION_XML))
&& !(bodyAsString.startsWith("<add"))) {
ctx.exchange().getMessage().setBody("<add>" + bodyAsString + "</add>");
}
}
private static Map<String, Object> getMapFromBody(Object body) {
if (body instanceof Map) {
return ((Map<?, ?>) body).entrySet().stream()
.collect(
Collectors.toMap(
entry -> String.valueOf(entry.getKey()),
Map.Entry::getValue));
}
return Collections.emptyMap();
}
private static Map<String, Object> getMapFromHeaderSolrFields(Exchange exchange) {
return exchange.getMessage().getHeaders().entrySet().stream()
.filter(entry -> entry.getKey().startsWith(SolrConstants.HEADER_FIELD_PREFIX))
.collect(
Collectors.toMap(
entry -> entry.getKey().substring(SolrConstants.HEADER_FIELD_PREFIX.length()),
Map.Entry::getValue));
}
private static Optional<Collection<SolrInputDocument>> getOptionalCollectionOfSolrInputDocument(
Collection<Map<?, ?>> maps, Exchange exchange) {
Collection<SolrInputDocument> docs = new ArrayList<>();
for (Map<?, ?> map : maps) {
Optional<SolrInputDocument> doc = getOptionalSolrInputDocumentFromMap(map, exchange);
doc.ifPresent(docs::add);
}
return docs.isEmpty() ? Optional.empty() : Optional.of(docs);
}
private static Optional<SolrInputDocument> getOptionalSolrInputDocumentFromMap(Object body, Exchange exchange) {
Map<String, Object> map = getMapFromBody(body);
if (!map.isEmpty()) {
SolrInputDocument doc = new SolrInputDocument();
map.forEach(doc::setField);
return Optional.of(doc);
}
return Optional.empty();
}
}
| SolrRequestConverter |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/async/CompletedOperationCache.java | {
"start": 2383,
"end": 9346
} | class ____<K extends OperationKey, R extends Serializable>
implements AutoCloseableAsync {
private static final Logger LOGGER = LoggerFactory.getLogger(CompletedOperationCache.class);
/** In-progress asynchronous operations. */
private final Map<K, ResultAccessTracker<R>> registeredOperationTriggers =
new ConcurrentHashMap<>();
/** Caches the result of completed operations. */
private final Cache<K, ResultAccessTracker<R>> completedOperations;
private final Object lock = new Object();
@Nullable private CompletableFuture<Void> terminationFuture;
private Duration cacheDuration;
public CompletedOperationCache(final Duration cacheDuration) {
this(cacheDuration, Ticker.systemTicker());
}
@VisibleForTesting
CompletedOperationCache(final Ticker ticker) {
this(RestOptions.ASYNC_OPERATION_STORE_DURATION.defaultValue(), ticker);
}
@VisibleForTesting
CompletedOperationCache(final Duration cacheDuration, final Ticker ticker) {
this.cacheDuration = Preconditions.checkNotNull(cacheDuration);
CacheBuilder<Object, Object> cacheBuilder = CacheBuilder.newBuilder();
if (cacheDuration.getSeconds() != 0) {
cacheBuilder = cacheBuilder.expireAfterWrite(cacheDuration);
}
completedOperations =
cacheBuilder
.removalListener(
(RemovalListener<K, ResultAccessTracker<R>>)
removalNotification -> {
if (removalNotification.wasEvicted()) {
Preconditions.checkState(
removalNotification.getKey() != null);
Preconditions.checkState(
removalNotification.getValue() != null);
// When shutting down the cache, we wait until all
// results are accessed.
// When a result gets evicted from the cache, it
// will not be possible to access
// it any longer, and we might be in the process of
// shutting down, so we mark
// the result as accessed to avoid waiting
// indefinitely.
removalNotification.getValue().markAccessed();
LOGGER.info(
"Evicted result with trigger id {} because its TTL of {}s has expired.",
removalNotification.getKey().getTriggerId(),
cacheDuration.getSeconds());
}
})
.ticker(ticker)
.build();
}
/**
* Registers an ongoing operation with the cache.
*
* @param operationResultFuture A future containing the operation result.
* @throws IllegalStateException if the cache is already shutting down
*/
public void registerOngoingOperation(
final K operationKey, final CompletableFuture<R> operationResultFuture) {
final ResultAccessTracker<R> inProgress = ResultAccessTracker.inProgress();
synchronized (lock) {
checkState(isRunning(), "The CompletedOperationCache has already been closed.");
registeredOperationTriggers.put(operationKey, inProgress);
}
operationResultFuture.whenComplete(
(result, error) -> {
if (error == null) {
completedOperations.put(
operationKey,
inProgress.finishOperation(OperationResult.success(result)));
} else {
completedOperations.put(
operationKey,
inProgress.finishOperation(OperationResult.failure(error)));
}
registeredOperationTriggers.remove(operationKey);
});
}
@GuardedBy("lock")
private boolean isRunning() {
return terminationFuture == null;
}
/** Returns whether this cache contains an operation under the given operation key. */
public boolean containsOperation(final K operationKey) {
return registeredOperationTriggers.containsKey(operationKey)
|| completedOperations.getIfPresent(operationKey) != null;
}
/**
* Returns an optional containing the {@link OperationResult} for the specified key, or an empty
* optional if no operation is registered under the key.
*/
public Optional<OperationResult<R>> get(final K operationKey) {
ResultAccessTracker<R> resultAccessTracker;
if ((resultAccessTracker = registeredOperationTriggers.get(operationKey)) == null
&& (resultAccessTracker = completedOperations.getIfPresent(operationKey)) == null) {
return Optional.empty();
}
return Optional.of(resultAccessTracker.accessOperationResultOrError());
}
@Override
public CompletableFuture<Void> closeAsync() {
synchronized (lock) {
if (isRunning()) {
terminationFuture =
FutureUtils.orTimeout(
asyncWaitForResultsToBeAccessed(),
cacheDuration.getSeconds(),
TimeUnit.SECONDS,
String.format(
"Waiting for results to be accessed timed out after %s seconds.",
cacheDuration.getSeconds()));
}
return terminationFuture;
}
}
private CompletableFuture<Void> asyncWaitForResultsToBeAccessed() {
return FutureUtils.waitForAll(
Stream.concat(
registeredOperationTriggers.values().stream(),
completedOperations.asMap().values().stream())
.map(ResultAccessTracker::getAccessedFuture)
.collect(Collectors.toList()));
}
@VisibleForTesting
void cleanUp() {
completedOperations.cleanUp();
}
/** Stores the result of an asynchronous operation, and tracks accesses to it. */
private static | CompletedOperationCache |
java | elastic__elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/SplitPackagesAuditTask.java | {
"start": 11453,
"end": 14593
} | class ____ a package
if (currentClasses.isEmpty()) {
splitPackages.remove(packageName);
}
}
}
if (filterErrorsFound) {
throw new GradleException("Unnecessary split package ignores found");
}
}
// TODO: want to read packages the same for src dirs and jars, but src dirs we also want the files in the src package dir
private static Set<String> readPackages(File classpathElement) {
Set<String> packages = new HashSet<>();
Consumer<Path> addClassPackage = p -> packages.add(getPackageName(p));
try {
if (classpathElement.isDirectory()) {
walkJavaFiles(classpathElement.toPath(), ".class", addClassPackage);
} else if (classpathElement.getName().endsWith(".jar")) {
try (FileSystem jar = FileSystems.newFileSystem(classpathElement.toPath(), Map.of())) {
for (Path root : jar.getRootDirectories()) {
walkJavaFiles(root, ".class", addClassPackage);
}
}
} else {
throw new GradleException("Unsupported classpath element: " + classpathElement);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return packages;
}
private static void walkJavaFiles(Path root, String suffix, Consumer<Path> classConsumer) throws IOException {
if (Files.exists(root) == false) {
return;
}
try (var paths = Files.walk(root)) {
paths.filter(p -> p.toString().endsWith(suffix))
.map(root::relativize)
.filter(p -> p.getNameCount() > 1) // module-info or other things without a package can be skipped
.filter(p -> p.toString().startsWith("META-INF") == false)
.forEach(classConsumer);
}
}
private static String getPackageName(Path path) {
List<String> subpackages = new ArrayList<>();
for (int i = 0; i < path.getNameCount() - 1; ++i) {
subpackages.add(path.getName(i).toString());
}
return String.join(".", subpackages);
}
private String formatDependency(File dependencyFile) {
if (dependencyFile.isDirectory()) {
while (dependencyFile.getName().equals("build") == false) {
dependencyFile = dependencyFile.getParentFile();
}
String projectName = getParameters().getProjectBuildDirs().get().get(dependencyFile);
if (projectName == null) {
throw new IllegalStateException("Build directory unknown to gradle: " + dependencyFile);
}
return "project " + projectName;
}
return dependencyFile.getName(); // just the jar filename
}
}
| in |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/classrealm/ClassRealmManager.java | {
"start": 3630,
"end": 4069
} | class ____, may be {@code null}. Unresolved artifacts (i.e. with a
* missing file) will automatically be excluded from the realm.
* @return The new plugin realm, never {@code null}.
*/
ClassRealm createPluginRealm(
Plugin plugin,
ClassLoader parent,
List<String> parentImports,
Map<String, ClassLoader> foreignImports,
List<Artifact> artifacts);
}
| realm |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/AzureCosmosdbComponentBuilderFactory.java | {
"start": 1967,
"end": 26826
} | interface ____ extends ComponentBuilder<CosmosDbComponent> {
/**
* Sets the flag to enable client telemetry which will periodically
* collect database operations aggregation statistics, system
* information like cpu/memory and send it to cosmos monitoring service,
* which will be helpful during debugging. DEFAULT value is false
* indicating this is an opt-in feature, by default no telemetry
* collection.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param clientTelemetryEnabled the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder clientTelemetryEnabled(boolean clientTelemetryEnabled) {
doSetProperty("clientTelemetryEnabled", clientTelemetryEnabled);
return this;
}
/**
* The component configurations.
*
* The option is a:
* <code>org.apache.camel.component.azure.cosmosdb.CosmosDbConfiguration</code> type.
*
* Group: common
*
* @param configuration the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder configuration(org.apache.camel.component.azure.cosmosdb.CosmosDbConfiguration configuration) {
doSetProperty("configuration", configuration);
return this;
}
/**
* Enables connections sharing across multiple Cosmos Clients. The
* default is false. When you have multiple instances of Cosmos Client
* in the same JVM interacting with multiple Cosmos accounts, enabling
* this allows connection sharing in Direct mode if possible between
* instances of Cosmos Client. Please note, when setting this option,
* the connection configuration (e.g., socket timeout config, idle
* timeout config) of the first instantiated client will be used for all
* other client instances.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param connectionSharingAcrossClientsEnabled the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder connectionSharingAcrossClientsEnabled(boolean connectionSharingAcrossClientsEnabled) {
doSetProperty("connectionSharingAcrossClientsEnabled", connectionSharingAcrossClientsEnabled);
return this;
}
/**
* Sets the consistency levels supported for Azure Cosmos DB client
* operations in the Azure Cosmos DB service. The requested
* ConsistencyLevel must match or be weaker than that provisioned for
* the database account. Consistency levels by order of strength are
* STRONG, BOUNDED_STALENESS, SESSION and EVENTUAL. Refer to consistency
* level documentation for additional details:
* https://docs.microsoft.com/en-us/azure/cosmos-db/consistency-levels.
*
* The option is a:
* <code>com.azure.cosmos.ConsistencyLevel</code> type.
*
* Default: SESSION
* Group: common
*
* @param consistencyLevel the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder consistencyLevel(com.azure.cosmos.ConsistencyLevel consistencyLevel) {
doSetProperty("consistencyLevel", consistencyLevel);
return this;
}
/**
* Sets the container partition key path.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param containerPartitionKeyPath the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder containerPartitionKeyPath(java.lang.String containerPartitionKeyPath) {
doSetProperty("containerPartitionKeyPath", containerPartitionKeyPath);
return this;
}
/**
* Sets the boolean to only return the headers and status code in Cosmos
* DB response in case of Create, Update and Delete operations on
* CosmosItem. In Consumer, it is enabled by default because of the
* ChangeFeed in the consumer that needs this flag to be enabled, and
* thus it shouldn't be overridden. In Producer, it is advised to
* disable it since it reduces the network overhead.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param contentResponseOnWriteEnabled the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder contentResponseOnWriteEnabled(boolean contentResponseOnWriteEnabled) {
doSetProperty("contentResponseOnWriteEnabled", contentResponseOnWriteEnabled);
return this;
}
/**
* Inject an external CosmosAsyncClient into the component which
* provides a client-side logical representation of the Azure Cosmos DB
* service. This asynchronous client is used to configure and execute
* requests against the service.
*
* The option is a:
* <code>com.azure.cosmos.CosmosAsyncClient</code> type.
*
* Group: common
*
* @param cosmosAsyncClient the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder cosmosAsyncClient(com.azure.cosmos.CosmosAsyncClient cosmosAsyncClient) {
doSetProperty("cosmosAsyncClient", cosmosAsyncClient);
return this;
}
/**
* Sets if the component should create the Cosmos container
* automatically in case it doesn't exist in the Cosmos database.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param createContainerIfNotExists the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder createContainerIfNotExists(boolean createContainerIfNotExists) {
doSetProperty("createContainerIfNotExists", createContainerIfNotExists);
return this;
}
/**
* Sets if the component should create the Cosmos database automatically
* in case it doesn't exist in the Cosmos account.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param createDatabaseIfNotExists the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder createDatabaseIfNotExists(boolean createDatabaseIfNotExists) {
doSetProperty("createDatabaseIfNotExists", createDatabaseIfNotExists);
return this;
}
/**
* Sets the Azure Cosmos database endpoint the component will connect
* to.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param databaseEndpoint the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder databaseEndpoint(java.lang.String databaseEndpoint) {
doSetProperty("databaseEndpoint", databaseEndpoint);
return this;
}
/**
* Sets the flag to enable writes on any regions for geo-replicated
* database accounts in the Azure Cosmos DB service. When the value of
* this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account.
* Writable regions are ordered by PreferredRegions property. Setting
* the property value to true has no effect until
* EnableMultipleWriteRegions in DatabaseAccount is also set to true.
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param multipleWriteRegionsEnabled the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder multipleWriteRegionsEnabled(boolean multipleWriteRegionsEnabled) {
doSetProperty("multipleWriteRegionsEnabled", multipleWriteRegionsEnabled);
return this;
}
/**
* Sets the comma separated preferred regions for geo-replicated
* database accounts. For example, East US as the preferred region. When
* EnableEndpointDiscovery is true and PreferredRegions is non-empty,
* the SDK will prefer to use the regions in the container in the order
* they are specified to perform operations.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param preferredRegions the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder preferredRegions(java.lang.String preferredRegions) {
doSetProperty("preferredRegions", preferredRegions);
return this;
}
/**
* Sets whether to allow for reads to go to multiple regions configured
* on an account of Azure Cosmos DB service. DEFAULT value is true. If
* this property is not set, the default is true for all Consistency
* Levels other than Bounded Staleness, The default is false for Bounded
* Staleness. 1. endpointDiscoveryEnabled is true 2. the Azure Cosmos DB
* account has more than one region.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param readRequestsFallbackEnabled the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder readRequestsFallbackEnabled(boolean readRequestsFallbackEnabled) {
doSetProperty("readRequestsFallbackEnabled", readRequestsFallbackEnabled);
return this;
}
/**
* Sets throughput of the resources in the Azure Cosmos DB service.
*
* The option is a:
* <code>com.azure.cosmos.models.ThroughputProperties</code>
* type.
*
* Group: common
*
* @param throughputProperties the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder throughputProperties(com.azure.cosmos.models.ThroughputProperties throughputProperties) {
doSetProperty("throughputProperties", throughputProperties);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Sets the ChangeFeedProcessorOptions to be used. Unless specifically
* set the default values that will be used are: maximum items per page
* or FeedResponse: 100 lease renew interval: 17 seconds lease acquire
* interval: 13 seconds lease expiration interval: 60 seconds feed poll
* delay: 5 seconds maximum scale count: unlimited.
*
* The option is a:
* <code>com.azure.cosmos.models.ChangeFeedProcessorOptions</code> type.
*
* Group: consumer
*
* @param changeFeedProcessorOptions the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder changeFeedProcessorOptions(com.azure.cosmos.models.ChangeFeedProcessorOptions changeFeedProcessorOptions) {
doSetProperty("changeFeedProcessorOptions", changeFeedProcessorOptions);
return this;
}
/**
* Sets if the component should create Cosmos lease container for the
* consumer automatically in case it doesn't exist in Cosmos database.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param createLeaseContainerIfNotExists the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder createLeaseContainerIfNotExists(boolean createLeaseContainerIfNotExists) {
doSetProperty("createLeaseContainerIfNotExists", createLeaseContainerIfNotExists);
return this;
}
/**
* Sets if the component should create the Cosmos lease database for the
* consumer automatically in case it doesn't exist in the Cosmos
* account.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param createLeaseDatabaseIfNotExists the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder createLeaseDatabaseIfNotExists(boolean createLeaseDatabaseIfNotExists) {
doSetProperty("createLeaseDatabaseIfNotExists", createLeaseDatabaseIfNotExists);
return this;
}
/**
* Sets the hostname. The host: a host is an application instance that
* uses the change feed processor to listen for changes. Multiple
* instances with the same lease configuration can run in parallel, but
* each instance should have a different instance name. If not
* specified, this will be a generated random hostname.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param hostName the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder hostName(java.lang.String hostName) {
doSetProperty("hostName", hostName);
return this;
}
/**
* Sets the lease container which acts as a state storage and
* coordinates processing the change feed across multiple workers. The
* lease container can be stored in the same account as the monitored
* container or in a separate account. It will be auto-created if
* createLeaseContainerIfNotExists is set to true.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: camel-lease
* Group: consumer
*
* @param leaseContainerName the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder leaseContainerName(java.lang.String leaseContainerName) {
doSetProperty("leaseContainerName", leaseContainerName);
return this;
}
/**
* Sets the lease database where the leaseContainerName will be stored.
* If it is not specified, this component will store the lease container
* in the same database that is specified in databaseName. It will be
* auto-created if createLeaseDatabaseIfNotExists is set to true.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param leaseDatabaseName the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder leaseDatabaseName(java.lang.String leaseDatabaseName) {
doSetProperty("leaseDatabaseName", leaseDatabaseName);
return this;
}
/**
* Sets the itemId in case needed for operation on item like delete,
* replace.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param itemId the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder itemId(java.lang.String itemId) {
doSetProperty("itemId", itemId);
return this;
}
/**
* Sets partition key. Represents a partition key value in the Azure
* Cosmos DB database service. A partition key identifies the partition
* where the item is stored in.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param itemPartitionKey the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder itemPartitionKey(java.lang.String itemPartitionKey) {
doSetProperty("itemPartitionKey", itemPartitionKey);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* The CosmosDB operation that can be used with this component on the
* producer.
*
* The option is a:
* <code>org.apache.camel.component.azure.cosmosdb.CosmosDbOperationsDefinition</code> type.
*
* Default: listDatabases
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder operation(org.apache.camel.component.azure.cosmosdb.CosmosDbOperationsDefinition operation) {
doSetProperty("operation", operation);
return this;
}
/**
* An SQL query to execute on a given resources. To learn more about
* Cosmos SQL API, check this link {link
* https://docs.microsoft.com/en-us/azure/cosmos-db/sql-query-getting-started}.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param query the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder query(java.lang.String query) {
doSetProperty("query", query);
return this;
}
/**
* Set additional QueryRequestOptions that can be used with queryItems,
* queryContainers, queryDatabases, listDatabases, listItems,
* listContainers operations.
*
* The option is a:
* <code>com.azure.cosmos.models.CosmosQueryRequestOptions</code> type.
*
* Group: producer
*
* @param queryRequestOptions the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder queryRequestOptions(com.azure.cosmos.models.CosmosQueryRequestOptions queryRequestOptions) {
doSetProperty("queryRequestOptions", queryRequestOptions);
return this;
}
/**
* The CosmosDB Indexing Policy that will be set in case of container
* creation, this option is related to createLeaseContainerIfNotExists
* and it will be taken into account when the latter is true.
*
* The option is a:
* <code>com.azure.cosmos.models.IndexingPolicy</code> type.
*
* Group: advanced
*
* @param indexingPolicy the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder indexingPolicy(com.azure.cosmos.models.IndexingPolicy indexingPolicy) {
doSetProperty("indexingPolicy", indexingPolicy);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
/**
* Sets either a master or readonly key used to perform authentication
* for accessing resource.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param accountKey the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder accountKey(java.lang.String accountKey) {
doSetProperty("accountKey", accountKey);
return this;
}
/**
* Determines the credential strategy to adopt.
*
* The option is a:
* <code>org.apache.camel.component.azure.cosmosdb.CredentialType</code> type.
*
* Default: SHARED_ACCOUNT_KEY
* Group: security
*
* @param credentialType the value to set
* @return the dsl builder
*/
default AzureCosmosdbComponentBuilder credentialType(org.apache.camel.component.azure.cosmosdb.CredentialType credentialType) {
doSetProperty("credentialType", credentialType);
return this;
}
}
| AzureCosmosdbComponentBuilder |
java | apache__camel | core/camel-management-api/src/main/java/org/apache/camel/api/management/mbean/ManagedProcessMBean.java | {
"start": 1141,
"end": 1251
} | class ____ of the Processor in use (may be null if not resolved yet)")
String getProcessorClassName();
}
| name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.