language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | netty__netty | codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionFilter.java | {
"start": 876,
"end": 1794
} | interface ____ {
/**
* A {@link WebSocketExtensionFilter} that never skip the evaluation of an
* any given extensions {@link WebSocketExtension}.
*/
WebSocketExtensionFilter NEVER_SKIP = new WebSocketExtensionFilter() {
@Override
public boolean mustSkip(WebSocketFrame frame) {
return false;
}
};
/**
* A {@link WebSocketExtensionFilter} that always skip the evaluation of an
* any given extensions {@link WebSocketExtension}.
*/
WebSocketExtensionFilter ALWAYS_SKIP = new WebSocketExtensionFilter() {
@Override
public boolean mustSkip(WebSocketFrame frame) {
return true;
}
};
/**
* Returns {@code true} if the evaluation of the extension must skipped
* for the given frame otherwise {@code false}.
*/
boolean mustSkip(WebSocketFrame frame);
}
| WebSocketExtensionFilter |
java | quarkusio__quarkus | integration-tests/grpc-external-proto-test/src/main/java/io/quarkus/grpc/external/proto/MyGrpcService.java | {
"start": 193,
"end": 469
} | class ____ implements MyTest {
@Override
public Uni<TextContainer> doTest(TextContainer request) {
String response = "reply_to:" + request.getText();
return Uni.createFrom().item(TextContainer.newBuilder().setText(response).build());
}
}
| MyGrpcService |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/function/FailableDoubleBinaryOperator.java | {
"start": 1100,
"end": 1453
} | interface ____<E extends Throwable> {
/**
* Applies this operator to the given operands.
*
* @param left the first operand
* @param right the second operand
* @return the operator result
* @throws E if the operation fails
*/
double applyAsDouble(double left, double right) throws E;
}
| FailableDoubleBinaryOperator |
java | junit-team__junit5 | junit-jupiter-params/src/main/java/org/junit/jupiter/params/provider/ArgumentsProvider.java | {
"start": 1775,
"end": 2718
} | interface ____ {
/**
* Provide a {@link Stream} of {@link Arguments} to be passed to a
* {@code @ParameterizedTest} method.
*
* @param context the current extension context; never {@code null}
* @return a stream of arguments; never {@code null}
* @deprecated Please implement
* {@link #provideArguments(ParameterDeclarations, ExtensionContext)} instead.
*/
@Deprecated(since = "5.13")
@API(status = DEPRECATED, since = "5.13")
default Stream<? extends Arguments> provideArguments(@SuppressWarnings("unused") ExtensionContext context)
throws Exception {
throw new UnsupportedOperationException(
"Please implement provideArguments(ParameterDeclarations, ExtensionContext) instead.");
}
/**
* Provide a {@link Stream} of {@link Arguments} to be passed to a
* {@code @ParameterizedClass} or {@code @ParameterizedTest}.
*
* @param parameters the parameter declarations for the parameterized
* | ArgumentsProvider |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/cdi/bcextensions/CustomNormalScopeTest.java | {
"start": 4423,
"end": 5312
} | class ____ implements BuildCompatibleExtension {
@Discovery
public void discovery(MetaAnnotations meta, ScannedClasses scan) {
meta.addContext(CommandScoped.class, CommandContext.class);
scan.add(CommandExecutor.class.getName());
scan.add(CommandDecorator.class.getName());
}
@Synthesis
public void synthesis(SyntheticComponents syn) {
syn.addBean(CommandContextController.class)
.type(CommandContextController.class)
.scope(Dependent.class)
.createWith(CommandContextControllerCreator.class);
syn.addBean(CommandExecution.class)
.type(CommandExecution.class)
.scope(CommandScoped.class)
.createWith(CommandExecutionCreator.class);
}
}
static | MyExtension |
java | netty__netty | codec-compression/src/test/java/io/netty/handler/codec/compression/Lz4FrameIntegrationTest.java | {
"start": 739,
"end": 1061
} | class ____ extends AbstractIntegrationTest {
@Override
protected EmbeddedChannel createEncoder() {
return new EmbeddedChannel(new Lz4FrameEncoder());
}
@Override
protected EmbeddedChannel createDecoder() {
return new EmbeddedChannel(new Lz4FrameDecoder());
}
}
| Lz4FrameIntegrationTest |
java | dropwizard__dropwizard | dropwizard-request-logging/src/test/java/io/dropwizard/request/logging/old/LogbackClassicRequestLogFactoryTest.java | {
"start": 1633,
"end": 5773
} | class ____ {
static {
BootstrapLogging.bootstrap();
}
private static RequestLogFactory<?> requestLog;
@BeforeAll
static void setUp() throws Exception {
final ObjectMapper objectMapper = Jackson.newObjectMapper();
objectMapper.getSubtypeResolver().registerSubtypes(ConsoleAppenderFactory.class, FileAppenderFactory.class,
SyslogAppenderFactory.class);
requestLog = new YamlConfigurationFactory<>(RequestLogFactory.class,
BaseValidator.newValidator(), objectMapper, "dw")
.build(new ResourceConfigurationSourceProvider(), "yaml/logbackClassicRequestLog.yml");
}
@Test
void testDeserialized() {
assertThat(requestLog)
.isInstanceOfSatisfying(LogbackClassicRequestLogFactory.class, logFactory -> assertThat(logFactory)
.satisfies(classicRequestLogFactory -> assertThat(classicRequestLogFactory.getTimeZone()).isEqualTo(TimeZone.getTimeZone("Europe/Amsterdam")))
.satisfies(classicRequestLogFactory -> assertThat(classicRequestLogFactory.getAppenders()).hasSize(3).extractingResultOf("getClass")
.containsOnly(ConsoleAppenderFactory.class, FileAppenderFactory.class, SyslogAppenderFactory.class
)));
}
@Test
void testLogFormat() throws Exception {
final LogbackClassicRequestLogFactory factory = new LogbackClassicRequestLogFactory();
@SuppressWarnings("unchecked")
final Appender<ILoggingEvent> appender = mock(Appender.class);
final Request request = mock(Request.class);
final Response response = mock(Response.class, RETURNS_DEEP_STUBS);
final HttpChannelState channelState = mock(HttpChannelState.class);
factory.setAppenders(Collections.singletonList(
(context, applicationName, layoutFactory, levelFilterFactory, asyncAppenderFactory) -> appender));
final String tz = TimeZone.getAvailableIDs((int)TimeUnit.HOURS.toMillis(-5))[0];
factory.setTimeZone(TimeZone.getTimeZone(tz));
CustomRequestLog logger = null;
try (MockedStatic<Request> staticRequest = mockStatic(Request.class);
MockedStatic<Response> staticResponse = mockStatic(Response.class)) {
staticRequest.when(() -> Request.getRemoteAddr(request)).thenReturn("10.0.0.1");
// Jetty log format compares against System.currentTimeMillis, so there
// isn't a way for us to set our own clock
when(request.getHeadersNanoTime()).thenReturn(0L);
when(request.getMethod()).thenReturn("GET");
HttpURI httpURI = mock(HttpURI.class);
when(httpURI.getPath()).thenReturn("/test/things");
when(request.getHttpURI()).thenReturn(httpURI);
ConnectionMetaData connectionMetaData = mock(ConnectionMetaData.class);
when(connectionMetaData.getProtocol()).thenReturn("HTTP/1.1");
when(request.getConnectionMetaData()).thenReturn(connectionMetaData);
when(request.getHeaders()).thenReturn(HttpFields.build());
staticResponse.when(() -> Response.getContentBytesWritten(response)).thenReturn(8290L);
final ArgumentCaptor<ILoggingEvent> captor = ArgumentCaptor.forClass(ILoggingEvent.class);
logger = (CustomRequestLog)factory.build("my-app");
logger.log(request, response);
verify(appender, timeout(1000)).doAppend(captor.capture());
final ILoggingEvent event = captor.getValue();
assertThat(event.getFormattedMessage())
.startsWith("10.0.0.1")
.doesNotContain("%")
.contains("\"GET /test/things HTTP/1.1\"")
.contains("-0500");
} catch (Exception e) {
if (logger != null) {
logger.stop();
}
throw e;
}
}
@Test
void isDiscoverable() {
assertThat(new DiscoverableSubtypeResolver().getDiscoveredSubtypes())
.contains(LogbackClassicRequestLogFactory.class);
}
}
| LogbackClassicRequestLogFactoryTest |
java | google__guava | android/guava-testlib/src/com/google/common/collect/testing/SampleElements.java | {
"start": 1671,
"end": 2194
} | class ____ extends SampleElements<String> {
public Strings() {
// elements aren't sorted, to better test SortedSet iteration ordering
super("b", "a", "c", "d", "e");
}
// for testing SortedSet and SortedMap methods
public static final String BEFORE_FIRST = "\0";
public static final String BEFORE_FIRST_2 = "\0\0";
public static final String MIN_ELEMENT = "a";
public static final String AFTER_LAST = "z";
public static final String AFTER_LAST_2 = "zz";
}
public static | Strings |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/oracle/visitor/OracleRowNumToLimit.java | {
"start": 458,
"end": 17329
} | class ____ extends OracleASTVisitorAdapter {
private Context context;
private boolean removeSelectListRownum = true;
@Override
public boolean visit(SQLSelect x) {
if (x.getWithSubQuery() != null) {
x.getWithSubQuery().accept(this);
}
if (x.getQuery() != null) {
x.getQuery().accept(this);
}
SQLSelectQueryBlock queryBlock = x.getQueryBlock();
if (queryBlock != null && queryBlock.getLimit() != null) {
SQLExpr rowCount = queryBlock.getLimit().getRowCount();
if (rowCount instanceof SQLIntegerExpr && SQLIntegerExpr.isZero((SQLIntegerExpr) rowCount)) {
x.setOrderBy(null);
}
}
return false;
}
@Override
public boolean visit(OracleSelectQueryBlock x) {
context = new Context(context);
context.queryBlock = x;
SQLExpr where = x.getWhere();
if (where != null) {
where.accept(this);
}
SQLTableSource from = x.getFrom();
if (from != null) {
from.accept(this);
}
removeSelectListRowNum(x);
List<SQLSelectItem> selectList = x.getSelectList();
for (SQLSelectItem selectItem : selectList) {
selectItem.accept(this);
}
SQLExpr startWith = x.getStartWith();
if (startWith != null) {
startWith.accept(this);
}
boolean allColumn = false;
if (selectList.size() == 1) {
SQLExpr expr = selectList.get(0).getExpr();
if (expr instanceof SQLAllColumnExpr) {
allColumn = true;
} else if (expr instanceof SQLPropertyExpr && ((SQLPropertyExpr) expr).getName().equals("*")) {
allColumn = true;
}
}
if ((!allColumn)
&& x.getFrom() instanceof SQLSubqueryTableSource
&& ((SQLSubqueryTableSource) x.getFrom()).getSelect().getQuery() instanceof SQLSelectQueryBlock) {
SQLSelectQueryBlock subQuery = ((SQLSubqueryTableSource) x.getFrom()).getSelect().getQueryBlock();
List<SQLSelectItem> subSelectList = subQuery.getSelectList();
if (subSelectList.size() >= selectList.size()) {
boolean match = true;
for (int i = 0; i < selectList.size(); i++) {
if (!selectList.get(i).equals(subSelectList.get(i))) {
match = false;
break;
}
}
if (match) {
allColumn = true;
}
}
}
if (x.getParent() instanceof SQLSelect
&& x.getWhere() == null
&& x.getOrderBy() == null
&& allColumn
&& x.getLimit() != null
&& x.getFrom() instanceof SQLSubqueryTableSource
&& ((SQLSubqueryTableSource) x.getFrom()).getSelect().getQuery() instanceof SQLSelectQueryBlock) {
SQLSelect select = (SQLSelect) x.getParent();
SQLSelectQueryBlock subQuery = ((SQLSubqueryTableSource) x.getFrom()).getSelect().getQueryBlock();
subQuery.mergeLimit(x.getLimit());
x.setLimit(null);
select.setQuery(subQuery);
context.queryBlock = subQuery;
context.fixLimit();
subQuery.accept(this);
}
if (x.getParent() instanceof SQLUnionQuery
&& x.getWhere() == null
&& x.getOrderBy() == null
&& allColumn
&& x.getLimit() != null
&& x.getFrom() instanceof SQLSubqueryTableSource
&& ((SQLSubqueryTableSource) x.getFrom()).getSelect().getQuery() instanceof SQLSelectQueryBlock) {
SQLUnionQuery union = (SQLUnionQuery) x.getParent();
SQLSelectQueryBlock subQuery = ((SQLSubqueryTableSource) x.getFrom()).getSelect().getQueryBlock();
subQuery.mergeLimit(x.getLimit());
x.setLimit(null);
if (union.getLeft() == x) {
union.setLeft(subQuery);
} else {
union.setRight(subQuery);
}
context.queryBlock = subQuery;
context.fixLimit();
subQuery.accept(this);
}
context = context.parent;
return false;
}
@Override
public boolean visit(SQLUnionQuery x) {
if (x.getLeft() != null) {
x.getLeft().accept(this);
}
if (x.getRight() != null) {
x.getRight().accept(this);
}
if (x.getLeft() instanceof SQLSelectQueryBlock && x.getRight() instanceof SQLSelectQueryBlock) {
if (x.getOperator() == SQLUnionOperator.MINUS) {
boolean eqNonLimit;
{
SQLSelectQueryBlock left = (SQLSelectQueryBlock) x.getLeft().clone();
SQLSelectQueryBlock right = (SQLSelectQueryBlock) x.getRight().clone();
left.setLimit(null);
right.setLimit(null);
eqNonLimit = left.toString().equals(right.toString());
}
if (eqNonLimit) {
SQLSelectQueryBlock merged = (SQLSelectQueryBlock) x.getLeft().clone();
SQLSelectQueryBlock right = (SQLSelectQueryBlock) x.getRight();
SQLLimit leftLimit = merged.getLimit();
SQLLimit rightLimit = right.getLimit();
if ((leftLimit == null && rightLimit == null)
|| (leftLimit != null && leftLimit.equals(rightLimit))) {
merged.setLimit(new SQLLimit(0));
} else if (leftLimit == null) {
SQLExpr rightOffset = rightLimit.getOffset();
if (rightOffset != null && !SQLIntegerExpr.isZero(rightOffset)) {
return false; // can not merge
}
SQLLimit limit = new SQLLimit();
limit.setOffset(rightLimit.getRowCount());
merged.setLimit(limit);
} else {
SQLExpr rightOffset = rightLimit.getOffset();
if (rightOffset != null && !SQLIntegerExpr.isZero(rightOffset)) {
return false; // can not merge
}
SQLExpr leftOffset = leftLimit.getOffset();
if (leftOffset != null && !SQLIntegerExpr.isZero(leftOffset)) {
return false; // todo
}
SQLExpr rightRowCount = rightLimit.getRowCount();
SQLExpr leftRowCount = leftLimit.getRowCount();
SQLLimit limit = new SQLLimit();
limit.setOffset(rightRowCount);
limit.setRowCount(substract(leftRowCount, rightRowCount));
if (SQLIntegerExpr.isZero(limit.getRowCount())) {
limit.setRowCount(0);
limit.setOffset(null);
if (merged.getOrderBy() != null) {
merged.setOrderBy(null);
}
}
merged.setLimit(limit);
}
SQLObject parent = x.getParent();
if (parent instanceof SQLSelect) {
SQLSelect select = (SQLSelect) parent;
select.setQuery(merged);
} else if (parent instanceof SQLUnionQuery) {
SQLUnionQuery union = (SQLUnionQuery) parent;
if (union.getLeft() == x) {
union.setLeft(merged);
} else {
union.setRight(merged);
}
}
}
} else if (x.getOperator() == SQLUnionOperator.INTERSECT) {
boolean eqNonLimit;
{
SQLSelectQueryBlock left = (SQLSelectQueryBlock) x.getLeft().clone();
SQLSelectQueryBlock right = (SQLSelectQueryBlock) x.getRight().clone();
left.setLimit(null);
right.setLimit(null);
eqNonLimit = left.toString().equals(right.toString());
}
if (eqNonLimit) {
SQLSelectQueryBlock merged = (SQLSelectQueryBlock) x.getLeft().clone();
SQLSelectQueryBlock right = (SQLSelectQueryBlock) x.getRight();
SQLLimit leftLimit = merged.getLimit();
SQLLimit rightLimit = right.getLimit();
if (rightLimit == null
|| (rightLimit.equals(leftLimit))) {
// skip
} else if (leftLimit == null) {
merged.setLimit(rightLimit.clone());
} else {
SQLLimit limit = new SQLLimit();
SQLExpr rightOffset = rightLimit.getOffset();
SQLExpr leftOffset = leftLimit.getOffset();
if (leftOffset == null) {
limit.setOffset(rightOffset);
} else if (rightOffset == null) {
limit.setOffset(leftOffset);
} else if (rightOffset.equals(leftOffset)) {
limit.setOffset(leftOffset);
} else {
if ((!(leftOffset instanceof SQLIntegerExpr)) || !(rightOffset instanceof SQLIntegerExpr)) {
return false; // can not merged
}
limit.setOffset(SQLIntegerExpr.greatst((SQLIntegerExpr) leftOffset, (SQLIntegerExpr) rightOffset));
}
SQLExpr rightRowCount = rightLimit.getRowCount();
SQLExpr leftRowCount = leftLimit.getRowCount();
SQLExpr leftEnd = leftOffset == null ? leftRowCount : substract(leftRowCount, leftOffset);
SQLExpr rightEnd = rightOffset == null ? rightRowCount : substract(rightRowCount, rightOffset);
if ((leftEnd != null && !(leftEnd instanceof SQLIntegerExpr)) || (rightEnd != null && !(rightEnd instanceof SQLIntegerExpr))) {
return false; // can not merged
}
SQLIntegerExpr end = SQLIntegerExpr.least((SQLIntegerExpr) leftEnd, (SQLIntegerExpr) rightEnd);
if (limit.getOffset() == null) {
limit.setRowCount(end);
} else {
limit.setRowCount(substract(end, limit.getOffset()));
}
merged.setLimit(limit);
}
SQLObject parent = x.getParent();
if (parent instanceof SQLSelect) {
SQLSelect select = (SQLSelect) parent;
select.setQuery(merged);
} else if (parent instanceof SQLUnionQuery) {
SQLUnionQuery union = (SQLUnionQuery) parent;
if (union.getLeft() == x) {
union.setLeft(merged);
} else {
union.setRight(merged);
}
}
}
}
}
return false;
}
private void removeSelectListRowNum(SQLSelectQueryBlock x) {
SQLTableSource from = x.getFrom();
SQLLimit limit = x.getLimit();
if (limit == null
&& from instanceof SQLSubqueryTableSource
&& ((SQLSubqueryTableSource) from).getSelect().getQuery() instanceof SQLSelectQueryBlock) {
limit = ((SQLSubqueryTableSource) from).getSelect().getQueryBlock().getLimit();
}
if (!removeSelectListRownum) {
return;
}
List<SQLSelectItem> selectList = x.getSelectList();
for (int i = selectList.size() - 1; i >= 0; i--) {
SQLSelectItem selectItem = selectList.get(i);
SQLExpr expr = selectItem.getExpr();
if (isRowNum(expr)
&& limit != null) {
selectList.remove(i);
}
}
}
@Override
public boolean visit(SQLBinaryOpExpr x) {
SQLExpr left = x.getLeft();
SQLExpr right = x.getRight();
SQLBinaryOperator op = x.getOperator();
if (context == null || context.queryBlock == null) {
return false;
}
boolean isRowNum = isRowNum(left);
if (isRowNum) {
if (op == SQLBinaryOperator.LessThan) {
if (SQLUtils.replaceInParent(x, null)) {
context.setLimit(decrement(right));
// 如果存在 offset, 重新计算 rowCount
context.fixLimit();
}
return false;
} else if (op == SQLBinaryOperator.LessThanOrEqual) {
if (SQLUtils.replaceInParent(x, null)) {
context.setLimit(right);
// 如果存在 offset, 重新计算 rowCount
context.fixLimit();
}
return false;
} else if (op == SQLBinaryOperator.Equality) {
if (SQLUtils.replaceInParent(x, null)) {
context.setLimit(right);
// 如果存在 offset, 重新计算 rowCount
context.fixLimit();
}
return false;
} else if (op == SQLBinaryOperator.GreaterThanOrEqual) {
if (SQLUtils.replaceInParent(x, null)) {
context.setOffset(decrement(right));
// 如果存在 offset, 重新计算 rowCount
context.fixLimit();
}
return false;
} else if (op == SQLBinaryOperator.GreaterThan) {
if (SQLUtils.replaceInParent(x, null)) {
context.setOffset(right);
// 如果存在 offset, 重新计算 rowCount
context.fixLimit();
}
return false;
}
}
return true;
}
@Override
public boolean visit(SQLBetweenExpr x) {
if (!isRowNum(x.getTestExpr())) {
return true;
}
if (SQLUtils.replaceInParent(x, null)) {
SQLExpr offset = decrement(x.getBeginExpr());
context.setOffset(offset);
if (offset instanceof SQLIntegerExpr) {
int val = ((SQLIntegerExpr) offset).getNumber().intValue();
if (val < 0) {
offset = new SQLIntegerExpr(0);
}
}
context.setLimit(substract(x.getEndExpr(), offset));
SQLLimit limit = context.queryBlock.getLimit();
if (limit != null) {
limit.putAttribute("oracle.isFixLimit", Boolean.TRUE);
}
}
return false;
}
public boolean isRowNum(SQLExpr x) {
if (x instanceof SQLIdentifierExpr) {
SQLIdentifierExpr identifierExpr = (SQLIdentifierExpr) x;
long nameHashCode64 = identifierExpr.nameHashCode64();
if (nameHashCode64 == FnvHash.Constants.ROWNUM) {
return true;
}
if (context != null
&& context.queryBlock != null
&& context.queryBlock.getFrom() instanceof SQLSubqueryTableSource
&& ((SQLSubqueryTableSource) context.queryBlock.getFrom()).getSelect().getQuery() instanceof SQLSelectQueryBlock) {
SQLSelectQueryBlock subQueryBlock = ((SQLSubqueryTableSource) context.queryBlock.getFrom()).getSelect().getQueryBlock();
SQLSelectItem selectItem = subQueryBlock.findSelectItem(nameHashCode64);
this.context = new Context(this.context);
this.context.queryBlock = subQueryBlock;
try {
if (selectItem != null && isRowNum(selectItem.getExpr())) {
return true;
}
} finally {
this.context = this.context.parent;
}
}
}
return false;
}
public static | OracleRowNumToLimit |
java | elastic__elasticsearch | x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene80/BWCLucene80Codec.java | {
"start": 1631,
"end": 3322
} | class ____ extends BWCCodec {
private final LiveDocsFormat liveDocsFormat = new Lucene50LiveDocsFormat();
private final CompoundFormat compoundFormat = new Lucene50CompoundFormat();
private final DocValuesFormat docValuesFormat = new PerFieldDocValuesFormat() {
@Override
public DocValuesFormat getDocValuesFormatForField(String field) {
return defaultDVFormat;
}
};
private final DocValuesFormat defaultDVFormat = new Lucene80DocValuesFormat();
private final StoredFieldsFormat storedFieldsFormat;
private final PointsFormat pointsFormat = new Lucene60MetadataOnlyPointsFormat();
// Needed for SPI loading
@SuppressWarnings("unused")
public BWCLucene80Codec() {
super("BWCLucene80Codec");
this.storedFieldsFormat = new Lucene50StoredFieldsFormat(Lucene50StoredFieldsFormat.Mode.BEST_SPEED);
}
@Override
protected FieldInfosFormat originalFieldInfosFormat() {
return new Lucene60FieldInfosFormat();
}
@Override
protected SegmentInfoFormat originalSegmentInfoFormat() {
return new Lucene70SegmentInfoFormat();
}
@Override
public final StoredFieldsFormat storedFieldsFormat() {
return storedFieldsFormat;
}
@Override
public final LiveDocsFormat liveDocsFormat() {
return liveDocsFormat;
}
@Override
public final CompoundFormat compoundFormat() {
return compoundFormat;
}
@Override
public final PointsFormat pointsFormat() {
return pointsFormat;
}
@Override
public final DocValuesFormat docValuesFormat() {
return docValuesFormat;
}
}
| BWCLucene80Codec |
java | spring-projects__spring-framework | buildSrc/src/main/java/org/springframework/build/CheckstyleConventions.java | {
"start": 1307,
"end": 3283
} | class ____ {
/**
* Applies the Spring Java Format and Checkstyle plugins with the project conventions.
* @param project the current project
*/
public void apply(Project project) {
project.getPlugins().withType(JavaBasePlugin.class, (java) -> {
if (project.getRootProject() == project) {
configureNoHttpPlugin(project);
}
project.getPlugins().apply(CheckstylePlugin.class);
project.getTasks().withType(Checkstyle.class).forEach(checkstyle -> checkstyle.getMaxHeapSize().set("1g"));
CheckstyleExtension checkstyle = project.getExtensions().getByType(CheckstyleExtension.class);
checkstyle.setToolVersion("12.1.2");
checkstyle.getConfigDirectory().set(project.getRootProject().file("src/checkstyle"));
String version = SpringJavaFormatPlugin.class.getPackage().getImplementationVersion();
DependencySet checkstyleDependencies = project.getConfigurations().getByName("checkstyle").getDependencies();
checkstyleDependencies.add(
project.getDependencies().create("io.spring.javaformat:spring-javaformat-checkstyle:" + version));
});
}
private static void configureNoHttpPlugin(Project project) {
project.getPlugins().apply(NoHttpPlugin.class);
NoHttpExtension noHttp = project.getExtensions().getByType(NoHttpExtension.class);
noHttp.setAllowlistFile(project.file("src/nohttp/allowlist.lines"));
noHttp.getSource().exclude("**/test-output/**", "**/.settings/**", "**/.classpath",
"**/.project", "**/.gradle/**", "**/node_modules/**", "**/spring-jcl/**", "buildSrc/build/**");
List<String> buildFolders = List.of("bin", "build", "out");
project.allprojects(subproject -> {
Path rootPath = project.getRootDir().toPath();
Path projectPath = rootPath.relativize(subproject.getProjectDir().toPath());
for (String buildFolder : buildFolders) {
Path innerBuildDir = projectPath.resolve(buildFolder);
noHttp.getSource().exclude(innerBuildDir + File.separator + "**");
}
});
}
}
| CheckstyleConventions |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/ValidationException.java | {
"start": 846,
"end": 1113
} | class ____ any validation exception, such as
* {@link org.apache.camel.support.processor.validation.SchemaValidationException} so that it is easy to treat all
* validation errors in a similar way irrespective of the particular validation technology used.
*/
public | for |
java | apache__avro | lang/java/protobuf/src/test/java/org/apache/avro/protobuf/multiplefiles/MOrBuilder.java | {
"start": 205,
"end": 371
} | interface ____ extends
// @@protoc_insertion_point(interface_extends:org.apache.avro.protobuf.multiplefiles.M)
com.google.protobuf.MessageOrBuilder {
}
| MOrBuilder |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/Http2ProtocolDetector.java | {
"start": 1179,
"end": 1939
} | class ____ implements ProtocolDetector {
private final ChannelBuffer clientPrefaceString = new ByteBufferBackedChannelBuffer(
Http2CodecUtil.connectionPrefaceBuf().nioBuffer());
@Override
public Result detect(ChannelBuffer in) {
int prefaceLen = clientPrefaceString.readableBytes();
int bytesRead = min(in.readableBytes(), prefaceLen);
// If the input so far doesn't match the preface, break the connection.
if (bytesRead == 0 || !ChannelBuffers.prefixEquals(in, clientPrefaceString, bytesRead)) {
return Result.unrecognized();
}
if (bytesRead == prefaceLen) {
return Result.recognized();
}
return Result.needMoreData();
}
}
| Http2ProtocolDetector |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/transport/LeakTracker.java | {
"start": 5620,
"end": 10350
} | class ____ implements Runnable {
private static final AtomicReferenceFieldUpdater<Leak, Record> headUpdater = AtomicReferenceFieldUpdater.newUpdater(
Leak.class,
Record.class,
"head"
);
private static final AtomicIntegerFieldUpdater<Leak> droppedRecordsUpdater = AtomicIntegerFieldUpdater.newUpdater(
Leak.class,
"droppedRecords"
);
@SuppressWarnings("unused")
private volatile Record head;
@SuppressWarnings("unused")
private volatile int droppedRecords;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final Cleaner.Cleanable cleanable;
@SuppressWarnings("this-escape")
private Leak(Object referent) {
this.cleanable = cleaner.register(referent, this);
headUpdater.set(this, new Record(Record.BOTTOM));
}
@Override
public void run() {
if (closed.compareAndSet(false, true) == false || logger.isErrorEnabled() == false) {
return;
}
String records = toString();
if (reportedLeaks.putIfAbsent(records, Boolean.TRUE) == null) {
logger.error("LEAK: resource was not cleaned up before it was garbage-collected.{}", records);
}
}
/**
* Adds an access record that includes the current stack trace to the leak.
*/
public void record() {
Record oldHead;
Record newHead;
boolean dropped;
do {
Record prevHead;
if ((prevHead = oldHead = headUpdater.get(this)) == null) {
// already closed.
return;
}
final int numElements = oldHead.pos + 1;
if (numElements >= TARGET_RECORDS) {
final int backOffFactor = Math.min(numElements - TARGET_RECORDS, 30);
if (dropped = Randomness.get().nextInt(1 << backOffFactor) != 0) {
prevHead = oldHead.next;
}
} else {
dropped = false;
}
newHead = new Record(prevHead);
} while (headUpdater.compareAndSet(this, oldHead, newHead) == false);
if (dropped) {
droppedRecordsUpdater.incrementAndGet(this);
}
}
/**
* Stop tracking the object that this leak was created for.
*
* @return true if the leak was released by this call, false if the leak had already been released
*/
public boolean close() {
if (closed.compareAndSet(false, true)) {
cleanable.clean();
headUpdater.set(this, null);
return true;
}
return false;
}
@Override
public String toString() {
Record oldHead = headUpdater.get(this);
if (oldHead == null) {
// Already closed
return "";
}
final int dropped = droppedRecordsUpdater.get(this);
int duped = 0;
int present = oldHead.pos + 1;
// Guess about 2 kilobytes per stack trace
StringBuilder buf = new StringBuilder(present * 2048).append('\n');
buf.append("Recent access records: ").append('\n');
int i = 1;
Set<String> seen = Sets.newHashSetWithExpectedSize(present);
for (; oldHead != Record.BOTTOM; oldHead = oldHead.next) {
String s = oldHead.toString();
if (seen.add(s)) {
if (oldHead.next == Record.BOTTOM) {
buf.append("Created at:").append('\n').append(s);
} else {
buf.append('#').append(i++).append(':').append('\n').append(s);
}
} else {
duped++;
}
}
if (duped > 0) {
buf.append(": ").append(duped).append(" leak records were discarded because they were duplicates").append('\n');
}
if (dropped > 0) {
buf.append(": ")
.append(dropped)
.append(" leak records were discarded because the leak record count is targeted to ")
.append(TARGET_RECORDS)
.append('.')
.append('\n');
}
buf.setLength(buf.length() - "\n".length());
return buf.toString();
}
}
private static final | Leak |
java | apache__camel | components/camel-openstack/src/main/java/org/apache/camel/component/openstack/nova/NovaComponent.java | {
"start": 1050,
"end": 1407
} | class ____ extends DefaultComponent {
@Override
protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception {
NovaEndpoint endpoint = new NovaEndpoint(uri, this);
setProperties(endpoint, parameters);
endpoint.setHost(remaining);
return endpoint;
}
}
| NovaComponent |
java | apache__camel | core/camel-core-processor/src/main/java/org/apache/camel/processor/aggregate/UseLatestAggregationStrategy.java | {
"start": 1841,
"end": 3843
} | class ____ implements AggregationStrategy {
@Override
public Exchange aggregate(Exchange oldExchange, Exchange newExchange) {
if (newExchange == null) {
return oldExchange;
}
if (oldExchange == null) {
return newExchange;
}
Exchange answer = null;
// propagate exception first
propagateException(oldExchange, newExchange);
if (newExchange.getException() != null) {
answer = newExchange;
}
if (answer == null) {
// the propagate failures
answer = propagateFailure(oldExchange, newExchange);
}
return answer;
}
protected void propagateException(Exchange oldExchange, Exchange newExchange) {
if (oldExchange == null) {
return;
}
// propagate exception from old exchange if there isn't already an exception
if (newExchange.getException() == null) {
newExchange.setException(oldExchange.getException());
newExchange.setProperty(ExchangePropertyKey.FAILURE_ENDPOINT,
oldExchange.getProperty(ExchangePropertyKey.FAILURE_ENDPOINT));
}
}
protected Exchange propagateFailure(Exchange oldExchange, Exchange newExchange) {
if (oldExchange == null) {
return newExchange;
}
// propagate exception from old exchange if there isn't already an exception
if (oldExchange.isFailed() || oldExchange.isRollbackOnly() || oldExchange.isRollbackOnlyLast()
|| oldExchange.getExchangeExtension().isErrorHandlerHandledSet()
&& oldExchange.getExchangeExtension().isErrorHandlerHandled()) {
// propagate failure by using old exchange as the answer
return oldExchange;
}
return newExchange;
}
@Override
public String toString() {
return "UseLatestAggregationStrategy";
}
}
| UseLatestAggregationStrategy |
java | apache__camel | components/camel-kubernetes/src/generated/java/org/apache/camel/component/kubernetes/secrets/KubernetesSecretsEndpointConfigurer.java | {
"start": 745,
"end": 9070
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
KubernetesSecretsEndpoint target = (KubernetesSecretsEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiversion":
case "apiVersion": target.getConfiguration().setApiVersion(property(camelContext, java.lang.String.class, value)); return true;
case "cacertdata":
case "caCertData": target.getConfiguration().setCaCertData(property(camelContext, java.lang.String.class, value)); return true;
case "cacertfile":
case "caCertFile": target.getConfiguration().setCaCertFile(property(camelContext, java.lang.String.class, value)); return true;
case "clientcertdata":
case "clientCertData": target.getConfiguration().setClientCertData(property(camelContext, java.lang.String.class, value)); return true;
case "clientcertfile":
case "clientCertFile": target.getConfiguration().setClientCertFile(property(camelContext, java.lang.String.class, value)); return true;
case "clientkeyalgo":
case "clientKeyAlgo": target.getConfiguration().setClientKeyAlgo(property(camelContext, java.lang.String.class, value)); return true;
case "clientkeydata":
case "clientKeyData": target.getConfiguration().setClientKeyData(property(camelContext, java.lang.String.class, value)); return true;
case "clientkeyfile":
case "clientKeyFile": target.getConfiguration().setClientKeyFile(property(camelContext, java.lang.String.class, value)); return true;
case "clientkeypassphrase":
case "clientKeyPassphrase": target.getConfiguration().setClientKeyPassphrase(property(camelContext, java.lang.String.class, value)); return true;
case "connectiontimeout":
case "connectionTimeout": target.getConfiguration().setConnectionTimeout(property(camelContext, java.lang.Integer.class, value)); return true;
case "dnsdomain":
case "dnsDomain": target.getConfiguration().setDnsDomain(property(camelContext, java.lang.String.class, value)); return true;
case "kubernetesclient":
case "kubernetesClient": target.getConfiguration().setKubernetesClient(property(camelContext, io.fabric8.kubernetes.client.KubernetesClient.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "namespace": target.getConfiguration().setNamespace(property(camelContext, java.lang.String.class, value)); return true;
case "oauthtoken":
case "oauthToken": target.getConfiguration().setOauthToken(property(camelContext, java.lang.String.class, value)); return true;
case "operation": target.getConfiguration().setOperation(property(camelContext, java.lang.String.class, value)); return true;
case "password": target.getConfiguration().setPassword(property(camelContext, java.lang.String.class, value)); return true;
case "portname":
case "portName": target.getConfiguration().setPortName(property(camelContext, java.lang.String.class, value)); return true;
case "portprotocol":
case "portProtocol": target.getConfiguration().setPortProtocol(property(camelContext, java.lang.String.class, value)); return true;
case "trustcerts":
case "trustCerts": target.getConfiguration().setTrustCerts(property(camelContext, java.lang.Boolean.class, value)); return true;
case "username": target.getConfiguration().setUsername(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiversion":
case "apiVersion": return java.lang.String.class;
case "cacertdata":
case "caCertData": return java.lang.String.class;
case "cacertfile":
case "caCertFile": return java.lang.String.class;
case "clientcertdata":
case "clientCertData": return java.lang.String.class;
case "clientcertfile":
case "clientCertFile": return java.lang.String.class;
case "clientkeyalgo":
case "clientKeyAlgo": return java.lang.String.class;
case "clientkeydata":
case "clientKeyData": return java.lang.String.class;
case "clientkeyfile":
case "clientKeyFile": return java.lang.String.class;
case "clientkeypassphrase":
case "clientKeyPassphrase": return java.lang.String.class;
case "connectiontimeout":
case "connectionTimeout": return java.lang.Integer.class;
case "dnsdomain":
case "dnsDomain": return java.lang.String.class;
case "kubernetesclient":
case "kubernetesClient": return io.fabric8.kubernetes.client.KubernetesClient.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "namespace": return java.lang.String.class;
case "oauthtoken":
case "oauthToken": return java.lang.String.class;
case "operation": return java.lang.String.class;
case "password": return java.lang.String.class;
case "portname":
case "portName": return java.lang.String.class;
case "portprotocol":
case "portProtocol": return java.lang.String.class;
case "trustcerts":
case "trustCerts": return java.lang.Boolean.class;
case "username": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
KubernetesSecretsEndpoint target = (KubernetesSecretsEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiversion":
case "apiVersion": return target.getConfiguration().getApiVersion();
case "cacertdata":
case "caCertData": return target.getConfiguration().getCaCertData();
case "cacertfile":
case "caCertFile": return target.getConfiguration().getCaCertFile();
case "clientcertdata":
case "clientCertData": return target.getConfiguration().getClientCertData();
case "clientcertfile":
case "clientCertFile": return target.getConfiguration().getClientCertFile();
case "clientkeyalgo":
case "clientKeyAlgo": return target.getConfiguration().getClientKeyAlgo();
case "clientkeydata":
case "clientKeyData": return target.getConfiguration().getClientKeyData();
case "clientkeyfile":
case "clientKeyFile": return target.getConfiguration().getClientKeyFile();
case "clientkeypassphrase":
case "clientKeyPassphrase": return target.getConfiguration().getClientKeyPassphrase();
case "connectiontimeout":
case "connectionTimeout": return target.getConfiguration().getConnectionTimeout();
case "dnsdomain":
case "dnsDomain": return target.getConfiguration().getDnsDomain();
case "kubernetesclient":
case "kubernetesClient": return target.getConfiguration().getKubernetesClient();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "namespace": return target.getConfiguration().getNamespace();
case "oauthtoken":
case "oauthToken": return target.getConfiguration().getOauthToken();
case "operation": return target.getConfiguration().getOperation();
case "password": return target.getConfiguration().getPassword();
case "portname":
case "portName": return target.getConfiguration().getPortName();
case "portprotocol":
case "portProtocol": return target.getConfiguration().getPortProtocol();
case "trustcerts":
case "trustCerts": return target.getConfiguration().getTrustCerts();
case "username": return target.getConfiguration().getUsername();
default: return null;
}
}
}
| KubernetesSecretsEndpointConfigurer |
java | quarkusio__quarkus | extensions/smallrye-reactive-messaging/runtime/src/main/java/io/quarkus/smallrye/reactivemessaging/runtime/SmallRyeReactiveMessagingRecorder.java | {
"start": 322,
"end": 1630
} | class ____ {
public Supplier<Object> createContext(List<QuarkusMediatorConfiguration> mediatorConfigurations,
List<WorkerConfiguration> workerConfigurations, List<EmitterConfiguration> emitterConfigurations,
List<ChannelConfiguration> channelConfigurations) {
return new Supplier<Object>() {
@Override
public Object get() {
return new SmallRyeReactiveMessagingContext() {
@Override
public List<WorkerConfiguration> getWorkerConfigurations() {
return workerConfigurations;
}
@Override
public List<QuarkusMediatorConfiguration> getMediatorConfigurations() {
return mediatorConfigurations;
}
@Override
public List<EmitterConfiguration> getEmitterConfigurations() {
return emitterConfigurations;
}
@Override
public List<ChannelConfiguration> getChannelConfigurations() {
return channelConfigurations;
}
};
}
};
}
public | SmallRyeReactiveMessagingRecorder |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/annotation/JsonDeserialize.java | {
"start": 1584,
"end": 1712
} | interface ____
{
// // // Annotations for explicitly specifying deserialize/builder
/**
* Deserializer | JsonDeserialize |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/connections/Other.java | {
"start": 181,
"end": 508
} | class ____ {
private Long id;
private String name;
public Other() {
}
public Other(String name) {
this.name = name;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
| Other |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/SnmpEndpointBuilderFactory.java | {
"start": 1476,
"end": 1605
} | interface ____ {
/**
* Builder for endpoint consumers for the SNMP component.
*/
public | SnmpEndpointBuilderFactory |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/InitialClusterStateIT.java | {
"start": 1206,
"end": 3609
} | class ____ extends ESIntegTestCase {
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal, otherSettings))
.put(INITIAL_STATE_TIMEOUT_SETTING.getKey(), TimeValue.ZERO)
.build();
}
private static void assertClusterUuid(boolean expectCommitted, String expectedValue) {
for (String nodeName : internalCluster().getNodeNames()) {
final Metadata metadata = client(nodeName).admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata();
assertEquals(expectCommitted, metadata.clusterUUIDCommitted());
assertEquals(expectedValue, metadata.clusterUUID());
final ClusterStatsResponse response = safeAwait(
listener -> client(nodeName).execute(TransportClusterStatsAction.TYPE, new ClusterStatsRequest(), listener)
);
assertEquals(expectedValue, response.getClusterUUID());
}
}
public void testClusterUuidInInitialClusterState() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0);
try {
internalCluster().startDataOnlyNode();
assertClusterUuid(false, Metadata.UNKNOWN_CLUSTER_UUID);
internalCluster().startMasterOnlyNode();
internalCluster().validateClusterFormed();
final var clusterUUID = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state().metadata().clusterUUID();
assertNotEquals(Metadata.UNKNOWN_CLUSTER_UUID, clusterUUID);
assertClusterUuid(true, clusterUUID);
internalCluster().stopCurrentMasterNode();
assertClusterUuid(true, clusterUUID);
internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() {
@Override
public boolean validateClusterForming() {
return false;
}
});
assertClusterUuid(true, clusterUUID);
} finally {
while (true) {
var node = internalCluster().getRandomNodeName();
if (node == null) {
break;
}
assertTrue(internalCluster().stopNode(node));
}
}
}
}
| InitialClusterStateIT |
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/ServletAnnotationControllerHandlerMethodTests.java | {
"start": 140720,
"end": 140953
} | interface ____<E extends Entity, P extends EntityPredicate<?>> {
Collection<E> find(String pageable, P predicate) throws IOException;
List<E> find(boolean sort, P predicate) throws IOException;
}
abstract static | ResourceEndpoint |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/scripting/support/StaticScriptSourceTests.java | {
"start": 948,
"end": 2632
} | class ____ {
private static final String SCRIPT_TEXT = "print($hello) if $true;";
private final StaticScriptSource source = new StaticScriptSource(SCRIPT_TEXT);
@Test
void createWithNullScript() {
assertThatIllegalArgumentException().isThrownBy(() ->
new StaticScriptSource(null));
}
@Test
void createWithEmptyScript() {
assertThatIllegalArgumentException().isThrownBy(() ->
new StaticScriptSource(""));
}
@Test
void createWithWhitespaceOnlyScript() {
assertThatIllegalArgumentException().isThrownBy(() ->
new StaticScriptSource(" \n\n\t \t\n"));
}
@Test
void isModifiedIsTrueByDefault() {
assertThat(source.isModified()).as("Script must be flagged as 'modified' when first created.").isTrue();
}
@Test
void gettingScriptTogglesIsModified() {
source.getScriptAsString();
assertThat(source.isModified()).as("Script must be flagged as 'not modified' after script is read.").isFalse();
}
@Test
void gettingScriptViaToStringDoesNotToggleIsModified() {
boolean isModifiedState = source.isModified();
source.toString();
assertThat(source.isModified()).as("Script's 'modified' flag must not change after script is read via toString().").isEqualTo(isModifiedState);
}
@Test
void isModifiedToggledWhenDifferentScriptIsSet() {
source.setScript("use warnings;");
assertThat(source.isModified()).as("Script must be flagged as 'modified' when different script is passed in.").isTrue();
}
@Test
void isModifiedNotToggledWhenSameScriptIsSet() {
source.setScript(SCRIPT_TEXT);
assertThat(source.isModified()).as("Script must not be flagged as 'modified' when same script is passed in.").isFalse();
}
}
| StaticScriptSourceTests |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestReadBufferManager.java | {
"start": 2133,
"end": 7497
} | class ____ extends AbstractAbfsIntegrationTest {
/**
* Time before the JUnit test times out for eventually() clauses
* to fail. This copes with slow network connections and debugging
* sessions, yet still allows for tests to fail with meaningful
* messages.
*/
public static final int TIMEOUT_OFFSET = 5 * 60_000;
/**
* Interval between eventually preobes.
*/
public static final int PROBE_INTERVAL_MILLIS = 1_000;
public ITestReadBufferManager() throws Exception {
}
@Test
public void testPurgeBufferManagerForParallelStreams() throws Exception {
describe("Testing purging of buffers from ReadBufferManagerV1 for "
+ "parallel input streams");
final int numBuffers = 16;
final LinkedList<Integer> freeList = new LinkedList<>();
for (int i=0; i < numBuffers; i++) {
freeList.add(i);
}
ExecutorService executorService = Executors.newFixedThreadPool(4);
AzureBlobFileSystem fs = getABFSWithReadAheadConfig();
// verify that the fs has the capability to validate the fix
Assertions.assertThat(fs.hasPathCapability(new Path("/"), CAPABILITY_SAFE_READAHEAD))
.describedAs("path capability %s in %s", CAPABILITY_SAFE_READAHEAD, fs)
.isTrue();
try {
for (int i = 0; i < 4; i++) {
final String fileName = methodName.getMethodName() + i;
executorService.submit((Callable<Void>) () -> {
byte[] fileContent = getRandomBytesArray(ONE_MB);
Path testFilePath = createFileWithContent(fs, fileName, fileContent);
try (FSDataInputStream iStream = fs.open(testFilePath)) {
iStream.read();
}
return null;
});
}
} finally {
executorService.shutdown();
// wait for all tasks to finish
executorService.awaitTermination(1, TimeUnit.MINUTES);
}
ReadBufferManager bufferManager = getBufferManager(fs);
// readahead queue is empty
assertListEmpty("ReadAheadQueue", bufferManager.getReadAheadQueueCopy());
// verify the in progress list eventually empties out.
eventually(getTestTimeoutMillis() - TIMEOUT_OFFSET, PROBE_INTERVAL_MILLIS, () ->
assertListEmpty("InProgressList", bufferManager.getInProgressListCopy()));
}
private void assertListEmpty(String listName, List<ReadBuffer> list) {
Assertions.assertThat(list)
.describedAs("After closing all streams %s should be empty", listName)
.hasSize(0);
}
@Test
public void testPurgeBufferManagerForSequentialStream() throws Exception {
describe("Testing purging of buffers in ReadBufferManagerV1 for "
+ "sequential input streams");
AzureBlobFileSystem fs = getABFSWithReadAheadConfig();
final String fileName = methodName.getMethodName();
byte[] fileContent = getRandomBytesArray(ONE_MB);
Path testFilePath = createFileWithContent(fs, fileName, fileContent);
AbfsInputStream iStream1 = null;
// stream1 will be closed right away.
try {
iStream1 = (AbfsInputStream) fs.open(testFilePath).getWrappedStream();
// Just reading one byte will trigger all read ahead calls.
iStream1.read();
} finally {
IOUtils.closeStream(iStream1);
}
ReadBufferManager bufferManager = getBufferManager(fs);
AbfsInputStream iStream2 = null;
try {
iStream2 = (AbfsInputStream) fs.open(testFilePath).getWrappedStream();
iStream2.read();
// After closing stream1, no queued buffers of stream1 should be present
// assertions can't be made about the state of the other lists as it is
// too prone to race conditions.
assertListDoesnotContainBuffersForIstream(bufferManager.getReadAheadQueueCopy(), iStream1);
} finally {
// closing the stream later.
IOUtils.closeStream(iStream2);
}
// After closing stream2, no queued buffers of stream2 should be present.
assertListDoesnotContainBuffersForIstream(bufferManager.getReadAheadQueueCopy(), iStream2);
// After closing both the streams, read queue should be empty.
assertListEmpty("ReadAheadQueue", bufferManager.getReadAheadQueueCopy());
}
private void assertListDoesnotContainBuffersForIstream(List<ReadBuffer> list,
AbfsInputStream inputStream) {
for (ReadBuffer buffer : list) {
Assertions.assertThat(buffer.getStream())
.describedAs("Buffers associated with closed input streams shouldn't be present")
.isNotEqualTo(inputStream);
}
}
private AzureBlobFileSystem getABFSWithReadAheadConfig() throws Exception {
Configuration conf = getRawConfiguration();
conf.setLong(FS_AZURE_READ_AHEAD_QUEUE_DEPTH, 8);
conf.setInt(AZURE_READ_BUFFER_SIZE, MIN_BUFFER_SIZE);
conf.setInt(FS_AZURE_READ_AHEAD_BLOCK_SIZE, MIN_BUFFER_SIZE);
return (AzureBlobFileSystem) FileSystem.newInstance(conf);
}
private ReadBufferManager getBufferManager(AzureBlobFileSystem fs) {
int blockSize = fs.getAbfsStore().getAbfsConfiguration().getReadAheadBlockSize();
if (getConfiguration().isReadAheadV2Enabled()) {
ReadBufferManagerV2.setReadBufferManagerConfigs(blockSize,
getConfiguration());
return ReadBufferManagerV2.getBufferManager();
}
ReadBufferManagerV1.setReadBufferManagerConfigs(blockSize);
return ReadBufferManagerV1.getBufferManager();
}
}
| ITestReadBufferManager |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java | {
"start": 3683,
"end": 13510
} | class ____ {
public static final String FREE_CONTEXT_SCROLL_ACTION_NAME = "indices:data/read/search[free_context/scroll]";
public static final String FREE_CONTEXT_ACTION_NAME = "indices:data/read/search[free_context]";
public static final String CLEAR_SCROLL_CONTEXTS_ACTION_NAME = "indices:data/read/search[clear_scroll_contexts]";
/**
* Part of DFS_QUERY_THEN_FETCH, which fetches distributed term frequencies and executes KNN.
*/
public static final String DFS_ACTION_NAME = "indices:data/read/search[phase/dfs]";
public static final String QUERY_ACTION_NAME = "indices:data/read/search[phase/query]";
/**
* Part of DFS_QUERY_THEN_FETCH, which fetches distributed term frequencies and executes KNN.
*/
public static final String QUERY_ID_ACTION_NAME = "indices:data/read/search[phase/query/id]";
public static final String QUERY_SCROLL_ACTION_NAME = "indices:data/read/search[phase/query/scroll]";
public static final String QUERY_FETCH_SCROLL_ACTION_NAME = "indices:data/read/search[phase/query+fetch/scroll]";
public static final String FETCH_ID_SCROLL_ACTION_NAME = "indices:data/read/search[phase/fetch/id/scroll]";
public static final String FETCH_ID_ACTION_NAME = "indices:data/read/search[phase/fetch/id]";
public static final String RANK_FEATURE_SHARD_ACTION_NAME = "indices:data/read/search[phase/rank/feature]";
/**
* The Can-Match phase. It is executed to pre-filter shards that a search request hits. It rewrites the query on
* the shard and checks whether the result of the rewrite matches no documents, in which case the shard can be
* filtered out.
*/
public static final String QUERY_CAN_MATCH_NODE_NAME = "indices:data/read/search[can_match][n]";
private static final Logger logger = LogManager.getLogger(SearchTransportService.class);
private final TransportService transportService;
private final NodeClient client;
private final BiFunction<
Transport.Connection,
ActionListener<? super SearchPhaseResult>,
ActionListener<? super SearchPhaseResult>> responseWrapper;
private final Map<String, Long> clientConnections = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
public SearchTransportService(
TransportService transportService,
NodeClient client,
BiFunction<
Transport.Connection,
ActionListener<? super SearchPhaseResult>,
ActionListener<? super SearchPhaseResult>> responseWrapper
) {
this.transportService = transportService;
this.client = client;
this.responseWrapper = responseWrapper;
}
public TransportService transportService() {
return transportService;
}
public void sendFreeContext(
Transport.Connection connection,
ShardSearchContextId contextId,
ActionListener<SearchFreeContextResponse> listener
) {
transportService.sendRequest(
connection,
FREE_CONTEXT_SCROLL_ACTION_NAME,
new ScrollFreeContextRequest(contextId),
TransportRequestOptions.EMPTY,
new ActionListenerResponseHandler<>(listener, SearchFreeContextResponse::readFrom, TransportResponseHandler.TRANSPORT_WORKER)
);
}
public void sendCanMatch(
Transport.Connection connection,
final CanMatchNodeRequest request,
SearchTask task,
final ActionListener<CanMatchNodeResponse> listener
) {
transportService.sendChildRequest(
connection,
QUERY_CAN_MATCH_NODE_NAME,
request,
task,
new ActionListenerResponseHandler<>(listener, CanMatchNodeResponse::new, TransportResponseHandler.TRANSPORT_WORKER)
);
}
public void sendClearAllScrollContexts(Transport.Connection connection, final ActionListener<TransportResponse> listener) {
transportService.sendRequest(
connection,
CLEAR_SCROLL_CONTEXTS_ACTION_NAME,
new ClearScrollContextsRequest(),
TransportRequestOptions.EMPTY,
new ActionListenerResponseHandler<>(listener, in -> ActionResponse.Empty.INSTANCE, TransportResponseHandler.TRANSPORT_WORKER)
);
}
public void sendExecuteDfs(
Transport.Connection connection,
final ShardSearchRequest request,
SearchTask task,
final ActionListener<DfsSearchResult> listener
) {
transportService.sendChildRequest(
connection,
DFS_ACTION_NAME,
request,
task,
new ConnectionCountingHandler<>(listener, DfsSearchResult::new, connection)
);
}
public void sendExecuteQuery(
Transport.Connection connection,
final ShardSearchRequest request,
SearchTask task,
final ActionListener<SearchPhaseResult> listener
) {
// we optimize this and expect a QueryFetchSearchResult if we only have a single shard in the search request
// this used to be the QUERY_AND_FETCH which doesn't exist anymore.
final boolean fetchDocuments = request.numberOfShards() == 1
&& (request.source() == null || request.source().rankBuilder() == null);
Writeable.Reader<SearchPhaseResult> reader = fetchDocuments ? QueryFetchSearchResult::new : in -> new QuerySearchResult(in, true);
final ActionListener<? super SearchPhaseResult> handler = responseWrapper.apply(connection, listener);
transportService.sendChildRequest(
connection,
QUERY_ACTION_NAME,
request,
task,
new ConnectionCountingHandler<>(handler, reader, connection)
);
}
public void sendExecuteQuery(
Transport.Connection connection,
final QuerySearchRequest request,
SearchTask task,
final ActionListener<QuerySearchResult> listener
) {
transportService.sendChildRequest(
connection,
QUERY_ID_ACTION_NAME,
request,
task,
new ConnectionCountingHandler<>(listener, QuerySearchResult::new, connection)
);
}
public void sendExecuteScrollQuery(
Transport.Connection connection,
final InternalScrollSearchRequest request,
SearchTask task,
final ActionListener<ScrollQuerySearchResult> listener
) {
transportService.sendChildRequest(
connection,
QUERY_SCROLL_ACTION_NAME,
request,
task,
new ConnectionCountingHandler<>(listener, ScrollQuerySearchResult::new, connection)
);
}
public void sendExecuteRankFeature(
Transport.Connection connection,
final RankFeatureShardRequest request,
SearchTask task,
final ActionListener<RankFeatureResult> listener
) {
transportService.sendChildRequest(
connection,
RANK_FEATURE_SHARD_ACTION_NAME,
request,
task,
new ConnectionCountingHandler<>(listener, RankFeatureResult::new, connection)
);
}
public void sendExecuteScrollFetch(
Transport.Connection connection,
final InternalScrollSearchRequest request,
SearchTask task,
final ActionListener<ScrollQueryFetchSearchResult> listener
) {
transportService.sendChildRequest(
connection,
QUERY_FETCH_SCROLL_ACTION_NAME,
request,
task,
new ConnectionCountingHandler<>(listener, ScrollQueryFetchSearchResult::new, connection)
);
}
public void sendExecuteFetch(
Transport.Connection connection,
final ShardFetchSearchRequest request,
SearchTask task,
final ActionListener<FetchSearchResult> listener
) {
sendExecuteFetch(connection, FETCH_ID_ACTION_NAME, request, task, listener);
}
public void sendExecuteFetchScroll(
Transport.Connection connection,
final ShardFetchRequest request,
SearchTask task,
final ActionListener<FetchSearchResult> listener
) {
sendExecuteFetch(connection, FETCH_ID_SCROLL_ACTION_NAME, request, task, listener);
}
private void sendExecuteFetch(
Transport.Connection connection,
String action,
final ShardFetchRequest request,
SearchTask task,
final ActionListener<FetchSearchResult> listener
) {
transportService.sendChildRequest(
connection,
action,
request,
task,
new ConnectionCountingHandler<>(listener, FetchSearchResult::new, connection)
);
}
/**
* Used by {@link TransportSearchAction} to send the expand queries (field collapsing).
*/
void sendExecuteMultiSearch(final MultiSearchRequest request, SearchTask task, final ActionListener<MultiSearchResponse> listener) {
final Transport.Connection connection = transportService.getConnection(transportService.getLocalNode());
transportService.sendChildRequest(
connection,
TransportMultiSearchAction.TYPE.name(),
request,
task,
new ConnectionCountingHandler<>(listener, MultiSearchResponse::new, connection)
);
}
public RemoteClusterService getRemoteClusterService() {
return transportService.getRemoteClusterService();
}
/**
* Return a map of nodeId to pending number of search requests.
* This is a snapshot of the current pending search and not a live map.
*/
public Map<String, Long> getPendingSearchRequests() {
return new HashMap<>(clientConnections);
}
static | SearchTransportService |
java | spring-projects__spring-framework | spring-web/src/test/java/org/springframework/web/client/RestClientObservationTests.java | {
"start": 12805,
"end": 13336
} | class ____ implements ResponseErrorHandler {
final TestObservationRegistry observationRegistry;
ObservationErrorHandler(TestObservationRegistry observationRegistry) {
this.observationRegistry = observationRegistry;
}
@Override
public boolean hasError(ClientHttpResponse response) {
return true;
}
@Override
public void handleError(URI uri, HttpMethod httpMethod, ClientHttpResponse response) {
assertThat(this.observationRegistry.getCurrentObservationScope()).isNotNull();
}
}
}
| ObservationErrorHandler |
java | quarkusio__quarkus | independent-projects/tools/codestarts/src/test/java/io/quarkus/devtools/codestarts/core/CodestartProcessorTest.java | {
"start": 387,
"end": 2058
} | class ____ {
@Test
void checkSelectedDefaultStrategy() {
Map<String, String> spec = new HashMap<>();
spec.put("test/foo.tt", "forbidden");
spec.put("*", "replace");
spec.put("docker-compose-include-test.yml", "docker-compose-includes");
final CodestartProcessor processor = new CodestartProcessor(
MessageWriter.info(),
"a",
Paths.get("test"),
CodestartProcessor.buildStrategies(spec),
Collections.emptyMap());
assertThat(processor.getSelectedDefaultStrategy()).isEqualTo(CodestartFileStrategyHandler.BY_NAME.get("replace"));
assertThat(processor.getStrategy("test/foo.tt")).hasValue(CodestartFileStrategyHandler.BY_NAME.get("forbidden"));
assertThat(processor.getStrategy("docker-compose-include-test.yml"))
.hasValue(CodestartFileStrategyHandler.BY_NAME.get("docker-compose-includes"));
}
@Test
void checkDefaultStrategy() {
Map<String, String> spec = new HashMap<>();
spec.put("test/foo.tt", "forbidden");
final CodestartProcessor processor = new CodestartProcessor(
MessageWriter.info(),
"a",
Paths.get("test"),
CodestartProcessor.buildStrategies(spec),
Collections.emptyMap());
assertThat(processor.getSelectedDefaultStrategy())
.isEqualTo(CodestartFileStrategyHandler.BY_NAME.get("fail-on-duplicate"));
assertThat(processor.getStrategy("test/foo.tt")).hasValue(CodestartFileStrategyHandler.BY_NAME.get("forbidden"));
}
}
| CodestartProcessorTest |
java | junit-team__junit5 | documentation/src/test/java/example/extensions/ParameterResolverCustomAnnotationDemo.java | {
"start": 855,
"end": 1163
} | class ____ {
@Test
void testInt(@FirstInteger Integer first, @SecondInteger Integer second) {
assertEquals(1, first);
assertEquals(2, second);
}
@Target(ElementType.PARAMETER)
@Retention(RetentionPolicy.RUNTIME)
@ExtendWith(FirstInteger.Extension.class)
public @ | ParameterResolverCustomAnnotationDemo |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/InitializeInlineTest.java | {
"start": 2618,
"end": 3078
} | class ____ {
int test() {
int c;
try {
c = 1;
} catch (Exception e) {
throw e;
}
return c;
}
}
""")
.expectUnchanged()
.doTest();
}
@Test
public void unstylishBlocks() {
compilationHelper
.addInputLines(
"Test.java",
"""
| Test |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/vectors/VectorsFormatProvider.java | {
"start": 738,
"end": 792
} | interface ____ provide custom vector formats
*/
public | to |
java | quarkusio__quarkus | extensions/security/deployment/src/test/java/io/quarkus/security/test/rolesallowed/RolesAllowedExpressionTest.java | {
"start": 6779,
"end": 7055
} | class ____ {
private SecuredUtils() {
// UTIL CLASS
}
@RolesAllowed("${sudo}")
public static String staticSecuredMethod() {
return ConfigProvider.getConfig().getValue("sudo", String.class);
}
}
}
| SecuredUtils |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/MonoFlatMap.java | {
"start": 1371,
"end": 2221
} | class ____<T, R> extends InternalMonoOperator<T, R> implements Fuseable {
final Function<? super T, ? extends Mono<? extends R>> mapper;
MonoFlatMap(Mono<? extends T> source,
Function<? super T, ? extends Mono<? extends R>> mapper) {
super(source);
this.mapper = Objects.requireNonNull(mapper, "mapper");
}
@Override
public @Nullable CoreSubscriber<? super T> subscribeOrReturn(CoreSubscriber<? super R> actual) {
//for now Mono in general doesn't support onErrorContinue, so the scalar version shouldn't either
if (FluxFlatMap.trySubscribeScalarMap(source, actual, mapper, true, false)) {
return null;
}
return new FlatMapMain<>(actual, mapper);
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return super.scanUnsafe(key);
}
static final | MonoFlatMap |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java | {
"start": 2705,
"end": 16490
} | class ____ extends ESAllocationTestCase {
private static final TransportVersion PROJECT_ID_IN_SNAPSHOTS_DELETIONS_AND_REPO_CLEANUP = TransportVersion.fromName(
"project_id_in_snapshots_deletions_and_repo_cleanup"
);
public void testClusterStateSerialization() throws Exception {
IndexLongFieldRange eventIngestedRangeInput = randomFrom(
IndexLongFieldRange.UNKNOWN,
IndexLongFieldRange.NO_SHARDS,
IndexLongFieldRange.EMPTY,
IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(100000, 200000))
);
IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder("test")
.settings(settings(IndexVersion.current()))
.numberOfShards(10)
.numberOfReplicas(1)
.eventIngestedRange(eventIngestedRangeInput);
ClusterStateTestRecord result = createAndSerializeClusterState(indexMetadataBuilder, TransportVersion.current());
assertThat(result.serializedClusterState().getClusterName().value(), equalTo(result.clusterState().getClusterName().value()));
assertThat(result.serializedClusterState().routingTable().toString(), equalTo(result.clusterState().routingTable().toString()));
IndexLongFieldRange eventIngestedRangeOutput = result.serializedClusterState()
.getMetadata()
.getProject()
.index("test")
.getEventIngestedRange();
assertThat(eventIngestedRangeInput, equalTo(eventIngestedRangeOutput));
if (eventIngestedRangeInput.containsAllShardRanges() && eventIngestedRangeInput != IndexLongFieldRange.EMPTY) {
assertThat(eventIngestedRangeOutput.getMin(), equalTo(100000L));
assertThat(eventIngestedRangeOutput.getMax(), equalTo(200000L));
}
}
/**
* @param clusterState original ClusterState created by helper method
* @param serializedClusterState serialized version of the clusterState
*/
private record ClusterStateTestRecord(ClusterState clusterState, ClusterState serializedClusterState) {}
private static ClusterStateTestRecord createAndSerializeClusterState(
IndexMetadata.Builder indexMetadataBuilder,
TransportVersion transportVersion
) throws IOException {
final ProjectId projectId = Metadata.DEFAULT_PROJECT_ID;
final ProjectMetadata projectMetadata = ProjectMetadata.builder(projectId).put(indexMetadataBuilder).build();
final Metadata metadata = Metadata.builder().put(projectMetadata).build();
RoutingTable routingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNew(metadata.getProject().index("test"))
.build();
DiscoveryNodes nodes = DiscoveryNodes.builder()
.add(newNode("node1"))
.add(newNode("node2"))
.add(newNode("node3"))
.localNodeId("node1")
.masterNodeId("node2")
.build();
ClusterState clusterState = ClusterState.builder(new ClusterName("clusterName1"))
.nodes(nodes)
.metadata(metadata)
.routingTable(projectId, routingTable)
.build();
AllocationService strategy = createAllocationService();
clusterState = ClusterState.builder(clusterState)
.routingTable(strategy.reroute(clusterState, "reroute", ActionListener.noop()).routingTable())
.build();
BytesStreamOutput outStream = new BytesStreamOutput();
outStream.setTransportVersion(transportVersion);
clusterState.writeTo(outStream);
StreamInput inStream = new NamedWriteableAwareStreamInput(
outStream.bytes().streamInput(),
new NamedWriteableRegistry(ClusterModule.getNamedWriteables())
);
inStream.setTransportVersion(transportVersion);
ClusterState serializedClusterState = ClusterState.readFrom(inStream, null);
return new ClusterStateTestRecord(clusterState, serializedClusterState);
}
public void testRoutingTableSerialization() throws Exception {
Metadata metadata = Metadata.builder()
.put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(10).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNew(metadata.getProject().index("test"))
.build();
DiscoveryNodes nodes = DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.nodes(nodes)
.metadata(metadata)
.routingTable(routingTable)
.build();
AllocationService strategy = createAllocationService();
RoutingTable source = strategy.reroute(clusterState, "reroute", ActionListener.noop()).routingTable();
BytesStreamOutput outStream = new BytesStreamOutput();
source.writeTo(outStream);
StreamInput inStream = outStream.bytes().streamInput();
RoutingTable target = RoutingTable.readFrom(inStream);
assertThat(target.toString(), equalTo(source.toString()));
}
public void testSnapshotDeletionsInProgressSerialization() throws Exception {
TransportVersion version = TransportVersionUtils.randomVersionBetween(
random(),
TransportVersion.minimumCompatible(),
TransportVersion.current()
);
boolean includeRestore = randomBoolean();
ClusterState.Builder builder = ClusterState.builder(ClusterState.EMPTY_STATE)
.putCustom(
SnapshotDeletionsInProgress.TYPE,
SnapshotDeletionsInProgress.of(
List.of(
new SnapshotDeletionsInProgress.Entry(
version.supports(PROJECT_ID_IN_SNAPSHOTS_DELETIONS_AND_REPO_CLEANUP)
? randomProjectIdOrDefault()
: ProjectId.DEFAULT,
"repo1",
Collections.singletonList(new SnapshotId("snap1", UUIDs.randomBase64UUID())),
randomNonNegativeLong(),
randomNonNegativeLong(),
SnapshotDeletionsInProgress.State.STARTED
)
)
)
);
if (includeRestore) {
builder.putCustom(
RestoreInProgress.TYPE,
new RestoreInProgress.Builder().add(
new RestoreInProgress.Entry(
UUIDs.randomBase64UUID(),
new Snapshot("repo2", new SnapshotId("snap2", UUIDs.randomBase64UUID())),
RestoreInProgress.State.STARTED,
false,
Collections.singletonList("index_name"),
Map.of()
)
).build()
);
}
ClusterState clusterState = builder.incrementVersion().build();
Diff<ClusterState> diffs = clusterState.diff(ClusterState.EMPTY_STATE);
// serialize with current version
BytesStreamOutput outStream = new BytesStreamOutput();
outStream.setTransportVersion(version);
diffs.writeTo(outStream);
StreamInput inStream = outStream.bytes().streamInput();
inStream = new NamedWriteableAwareStreamInput(inStream, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()));
inStream.setTransportVersion(version);
Diff<ClusterState> serializedDiffs = ClusterState.readDiffFrom(inStream, clusterState.nodes().getLocalNode());
ClusterState stateAfterDiffs = serializedDiffs.apply(ClusterState.EMPTY_STATE);
assertThat(stateAfterDiffs.custom(RestoreInProgress.TYPE), includeRestore ? notNullValue() : nullValue());
assertThat(stateAfterDiffs.custom(SnapshotDeletionsInProgress.TYPE), notNullValue());
// remove the custom and try serializing again
clusterState = ClusterState.builder(clusterState).removeCustom(SnapshotDeletionsInProgress.TYPE).incrementVersion().build();
outStream = new BytesStreamOutput();
outStream.setTransportVersion(version);
diffs.writeTo(outStream);
inStream = outStream.bytes().streamInput();
inStream = new NamedWriteableAwareStreamInput(inStream, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()));
inStream.setTransportVersion(version);
serializedDiffs = ClusterState.readDiffFrom(inStream, clusterState.nodes().getLocalNode());
stateAfterDiffs = serializedDiffs.apply(stateAfterDiffs);
assertThat(stateAfterDiffs.custom(RestoreInProgress.TYPE), includeRestore ? notNullValue() : nullValue());
assertThat(stateAfterDiffs.custom(SnapshotDeletionsInProgress.TYPE), notNullValue());
}
private ClusterState updateUsingSerialisedDiff(ClusterState original, Diff<ClusterState> diff) throws IOException {
BytesStreamOutput outStream = new BytesStreamOutput();
outStream.setTransportVersion(TransportVersion.current());
diff.writeTo(outStream);
StreamInput inStream = new NamedWriteableAwareStreamInput(
outStream.bytes().streamInput(),
new NamedWriteableRegistry(ClusterModule.getNamedWriteables())
);
diff = ClusterState.readDiffFrom(inStream, newNode("node-name"));
return diff.apply(original);
}
public void testObjectReuseWhenApplyingClusterStateDiff() throws Exception {
IndexMetadata indexMetadata = IndexMetadata.builder("test")
.settings(settings(IndexVersion.current()))
.numberOfShards(10)
.numberOfReplicas(1)
.build();
IndexTemplateMetadata indexTemplateMetadata = IndexTemplateMetadata.builder("test-template")
.patterns(Arrays.asList(generateRandomStringArray(10, 100, false, false)))
.build();
Metadata metadata = Metadata.builder().put(indexMetadata, true).put(indexTemplateMetadata).build();
RoutingTable routingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNew(metadata.getProject().index("test"))
.build();
ClusterState clusterState1 = ClusterState.builder(new ClusterName("clusterName1"))
.metadata(metadata)
.routingTable(routingTable)
.build();
BytesStreamOutput outStream = new BytesStreamOutput();
outStream.setTransportVersion(TransportVersion.current());
clusterState1.writeTo(outStream);
StreamInput inStream = new NamedWriteableAwareStreamInput(
outStream.bytes().streamInput(),
new NamedWriteableRegistry(ClusterModule.getNamedWriteables())
);
ClusterState serializedClusterState1 = ClusterState.readFrom(inStream, null);
// Create a new, albeit equal, IndexMetadata object
ClusterState clusterState2 = ClusterState.builder(clusterState1)
.incrementVersion()
.metadata(Metadata.builder().put(IndexMetadata.builder(indexMetadata).numberOfReplicas(1).build(), true))
.build();
assertNotSame(
"Should have created a new, equivalent, IndexMetadata object in clusterState2",
clusterState1.metadata().getProject().index("test"),
clusterState2.metadata().getProject().index("test")
);
ClusterState serializedClusterState2 = updateUsingSerialisedDiff(serializedClusterState1, clusterState2.diff(clusterState1));
assertSame(
"Unchanged metadata should not create new IndexMetadata objects",
serializedClusterState1.metadata().getProject().index("test"),
serializedClusterState2.metadata().getProject().index("test")
);
assertSame(
"Unchanged routing table should not create new IndexRoutingTable objects",
serializedClusterState1.routingTable().index("test"),
serializedClusterState2.routingTable().index("test")
);
// Create a new and different IndexMetadata object
ClusterState clusterState3 = ClusterState.builder(clusterState1)
.incrementVersion()
.metadata(Metadata.builder().put(IndexMetadata.builder(indexMetadata).numberOfReplicas(2).build(), true))
.build();
ClusterState serializedClusterState3 = updateUsingSerialisedDiff(serializedClusterState2, clusterState3.diff(clusterState2));
assertNotEquals(
"Should have a new IndexMetadata object",
serializedClusterState2.metadata().getProject().index("test"),
serializedClusterState3.metadata().getProject().index("test")
);
assertSame(
"Unchanged routing table should not create new IndexRoutingTable objects",
serializedClusterState2.routingTable().index("test"),
serializedClusterState3.routingTable().index("test")
);
assertSame("nodes", serializedClusterState2.nodes(), serializedClusterState3.nodes());
assertSame("blocks", serializedClusterState2.blocks(), serializedClusterState3.blocks());
assertSame(
"template",
serializedClusterState2.metadata().getProject().templates().get("test-template"),
serializedClusterState3.metadata().getProject().templates().get("test-template")
);
}
public static | ClusterSerializationTests |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/MicrometerEndpointBuilderFactory.java | {
"start": 10691,
"end": 13629
} | class ____ {
/**
* The internal instance of the builder used to access to all the
* methods representing the name of headers.
*/
private static final MicrometerHeaderNameBuilder INSTANCE = new MicrometerHeaderNameBuilder();
/**
* Override timer action in URI.
*
* The option is a: {@code
* org.apache.camel.component.micrometer.MicrometerTimerAction} type.
*
* Group: producer
*
* @return the name of the header {@code MetricsTimerAction}.
*/
public String metricsTimerAction() {
return "CamelMetricsTimerAction";
}
/**
* Override histogram value in URI.
*
* The option is a: {@code long} type.
*
* Group: producer
*
* @return the name of the header {@code MetricsHistogramValue}.
*/
public String metricsHistogramValue() {
return "CamelMetricsHistogramValue";
}
/**
* Override decrement value in URI.
*
* The option is a: {@code Double} type.
*
* Group: producer
*
* @return the name of the header {@code MetricsCounterDecrement}.
*/
public String metricsCounterDecrement() {
return "CamelMetricsCounterDecrement";
}
/**
* Override increment value in URI.
*
* The option is a: {@code Double} type.
*
* Group: producer
*
* @return the name of the header {@code MetricsCounterIncrement}.
*/
public String metricsCounterIncrement() {
return "CamelMetricsCounterIncrement";
}
/**
* Override name value in URI.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code MetricsName}.
*/
public String metricsName() {
return "CamelMetricsName";
}
/**
* Override description value in URI.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code MetricsDescription}.
*/
public String metricsDescription() {
return "CamelMetricsDescription";
}
/**
* To augment meter tags defined as URI parameters.
*
* The option is a: {@code java.lang.Iterable<Tag>} type.
*
* Group: producer
*
* @return the name of the header {@code MetricsTags}.
*/
public String metricsTags() {
return "CamelMetricsTags";
}
}
static MicrometerEndpointBuilder endpointBuilder(String componentName, String path) {
| MicrometerHeaderNameBuilder |
java | ReactiveX__RxJava | src/jmh/java/io/reactivex/rxjava3/xmapz/FlowableSwitchMapSinglePerf.java | {
"start": 1111,
"end": 2743
} | class ____ {
@Param({ "1", "10", "100", "1000", "10000", "100000", "1000000" })
public int count;
Flowable<Integer> flowableConvert;
Flowable<Integer> flowableDedicated;
Flowable<Integer> flowablePlain;
@Setup
public void setup() {
Integer[] sourceArray = new Integer[count];
Arrays.fill(sourceArray, 777);
Flowable<Integer> source = Flowable.fromArray(sourceArray);
flowablePlain = source.switchMap(new Function<Integer, Publisher<? extends Integer>>() {
@Override
public Publisher<? extends Integer> apply(Integer v) {
return Flowable.just(v);
}
});
flowableConvert = source.switchMap(new Function<Integer, Publisher<? extends Integer>>() {
@Override
public Publisher<? extends Integer> apply(Integer v) {
return Single.just(v).toFlowable();
}
});
flowableDedicated = source.switchMapSingle(new Function<Integer, Single<Integer>>() {
@Override
public Single<Integer> apply(Integer v) {
return Single.just(v);
}
});
}
@Benchmark
public Object flowablePlain(Blackhole bh) {
return flowablePlain.subscribeWith(new PerfConsumer(bh));
}
@Benchmark
public Object flowableConvert(Blackhole bh) {
return flowableConvert.subscribeWith(new PerfConsumer(bh));
}
@Benchmark
public Object flowableDedicated(Blackhole bh) {
return flowableDedicated.subscribeWith(new PerfConsumer(bh));
}
}
| FlowableSwitchMapSinglePerf |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/StringBuilderFieldTest.java | {
"start": 2337,
"end": 2569
} | class ____ {
private StringBuilder value;
public StringBuilder getValue() {
return value;
}
public void setValue(StringBuilder value) {
this.value = value;
}
}
}
| V0 |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/MergeExec.java | {
"start": 692,
"end": 2220
} | class ____ extends PhysicalPlan {
private final List<Attribute> output;
public MergeExec(Source source, List<PhysicalPlan> children, List<Attribute> output) {
super(source, children);
this.output = output;
}
/**
* Extracts the children as a list of suppliers. All children must be LocalSourceExec.
*/
public List<LocalSupplier> suppliers() {
return children().stream().map(LocalSourceExec.class::cast).map(LocalSourceExec::supplier).toList();
}
@Override
public String getWriteableName() {
throw new UnsupportedOperationException("not serialized");
}
@Override
public void writeTo(StreamOutput out) throws IOException {
throw new UnsupportedOperationException("not serialized");
}
@Override
public PhysicalPlan replaceChildren(List<PhysicalPlan> newChildren) {
return new MergeExec(source(), newChildren, output());
}
@Override
protected NodeInfo<MergeExec> info() {
return NodeInfo.create(this, MergeExec::new, children(), output);
}
@Override
public List<Attribute> output() {
return output;
}
@Override
public int hashCode() {
return Objects.hash(children());
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MergeExec other = (MergeExec) o;
return Objects.equals(this.children(), other.children());
}
}
| MergeExec |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/inject/qualifiers/multiple/MultipleCompositeQualifierSpec.java | {
"start": 397,
"end": 1344
} | class ____ {
@Test
void testQualifiers() {
try (ApplicationContext context = ApplicationContext.run()) {
final XMyBean bean = context.getBean(XMyBean.class);
assertTrue(bean.creditCartProcessor1 instanceof XCreditCardProcessor);
assertTrue(bean.creditCartProcessor2 instanceof XCreditCardProcessor);
assertTrue(bean.creditCartProcessor3 instanceof XCreditCardProcessor);
assertTrue(bean.bankTransferProcessor1 instanceof XBankTransferProcessor);
assertTrue(bean.fromCtorCreditCartProcessor1 instanceof XCreditCardProcessor);
assertTrue(bean.fromCtorCreditCartProcessor2 instanceof XCreditCardProcessor);
assertTrue(bean.fromCtorCreditCartProcessor3 instanceof XCreditCardProcessor);
assertTrue(bean.fromCtorBankTransferProcessor1 instanceof XBankTransferProcessor);
}
}
}
@Singleton
| MultipleCompositeQualifierSpec |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/security/DenyAllJaxRsTest.java | {
"start": 8530,
"end": 8728
} | class ____ {
@GET
public String explicit() {
return "explicit";
}
public String implicit() {
return "implicit";
}
}
}
| SpecialResource |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/emops/GetReferenceTest.java | {
"start": 673,
"end": 2002
} | class ____ {
@Test
public void testWrongIdType(EntityManagerFactoryScope scope) {
scope.inEntityManager(
entityManager -> {
try {
entityManager.getReference( Competitor.class, "30" );
fail("Expected IllegalArgumentException");
}
catch (IllegalArgumentException e) {
//success
}
catch ( Exception e ) {
fail("Wrong exception: " + e );
}
try {
entityManager.getReference( Mail.class, 1 );
fail("Expected IllegalArgumentException");
}
catch (IllegalArgumentException e) {
//success
}
catch ( Exception e ) {
fail("Wrong exception: " + e );
}
}
);
}
@Test
public void testWrongIdTypeFind(EntityManagerFactoryScope scope) {
scope.inEntityManager(
entityManager -> {
try {
entityManager.find( Competitor.class, "30" );
fail("Expected IllegalArgumentException");
}
catch (IllegalArgumentException e) {
//success
}
catch ( Exception e ) {
fail("Wrong exception: " + e );
}
try {
entityManager.find( Mail.class, 1 );
fail("Expected IllegalArgumentException");
}
catch (IllegalArgumentException e) {
//success
}
catch ( Exception e ) {
fail("Wrong exception: " + e );
}
}
);
}
}
| GetReferenceTest |
java | spring-projects__spring-security | oauth2/oauth2-authorization-server/src/main/java/org/springframework/security/oauth2/server/authorization/client/JdbcRegisteredClientRepository.java | {
"start": 3916,
"end": 10695
} | class ____ implements RegisteredClientRepository {
// @formatter:off
private static final String COLUMN_NAMES = "id, "
+ "client_id, "
+ "client_id_issued_at, "
+ "client_secret, "
+ "client_secret_expires_at, "
+ "client_name, "
+ "client_authentication_methods, "
+ "authorization_grant_types, "
+ "redirect_uris, "
+ "post_logout_redirect_uris, "
+ "scopes, "
+ "client_settings,"
+ "token_settings";
// @formatter:on
private static final String TABLE_NAME = "oauth2_registered_client";
private static final String PK_FILTER = "id = ?";
private static final String LOAD_REGISTERED_CLIENT_SQL = "SELECT " + COLUMN_NAMES + " FROM " + TABLE_NAME
+ " WHERE ";
// @formatter:off
private static final String INSERT_REGISTERED_CLIENT_SQL = "INSERT INTO " + TABLE_NAME
+ "(" + COLUMN_NAMES + ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
// @formatter:on
// @formatter:off
private static final String UPDATE_REGISTERED_CLIENT_SQL = "UPDATE " + TABLE_NAME
+ " SET client_secret = ?, client_secret_expires_at = ?, client_name = ?, client_authentication_methods = ?,"
+ " authorization_grant_types = ?, redirect_uris = ?, post_logout_redirect_uris = ?, scopes = ?,"
+ " client_settings = ?, token_settings = ?"
+ " WHERE " + PK_FILTER;
// @formatter:on
private static final String COUNT_REGISTERED_CLIENT_SQL = "SELECT COUNT(*) FROM " + TABLE_NAME + " WHERE ";
private final JdbcOperations jdbcOperations;
private RowMapper<RegisteredClient> registeredClientRowMapper;
private Function<RegisteredClient, List<SqlParameterValue>> registeredClientParametersMapper;
/**
* Constructs a {@code JdbcRegisteredClientRepository} using the provided parameters.
* @param jdbcOperations the JDBC operations
*/
public JdbcRegisteredClientRepository(JdbcOperations jdbcOperations) {
Assert.notNull(jdbcOperations, "jdbcOperations cannot be null");
this.jdbcOperations = jdbcOperations;
this.registeredClientRowMapper = new JsonMapperRegisteredClientRowMapper();
this.registeredClientParametersMapper = new JsonMapperRegisteredClientParametersMapper();
}
@Override
public void save(RegisteredClient registeredClient) {
Assert.notNull(registeredClient, "registeredClient cannot be null");
RegisteredClient existingRegisteredClient = findBy(PK_FILTER, registeredClient.getId());
if (existingRegisteredClient != null) {
updateRegisteredClient(registeredClient);
}
else {
insertRegisteredClient(registeredClient);
}
}
private void updateRegisteredClient(RegisteredClient registeredClient) {
List<SqlParameterValue> parameters = new ArrayList<>(
this.registeredClientParametersMapper.apply(registeredClient));
SqlParameterValue id = parameters.remove(0);
parameters.remove(0); // remove client_id
parameters.remove(0); // remove client_id_issued_at
parameters.add(id);
PreparedStatementSetter pss = new ArgumentPreparedStatementSetter(parameters.toArray());
this.jdbcOperations.update(UPDATE_REGISTERED_CLIENT_SQL, pss);
}
private void insertRegisteredClient(RegisteredClient registeredClient) {
assertUniqueIdentifiers(registeredClient);
List<SqlParameterValue> parameters = this.registeredClientParametersMapper.apply(registeredClient);
PreparedStatementSetter pss = new ArgumentPreparedStatementSetter(parameters.toArray());
this.jdbcOperations.update(INSERT_REGISTERED_CLIENT_SQL, pss);
}
private void assertUniqueIdentifiers(RegisteredClient registeredClient) {
Integer count = this.jdbcOperations.queryForObject(COUNT_REGISTERED_CLIENT_SQL + "client_id = ?", Integer.class,
registeredClient.getClientId());
if (count != null && count > 0) {
throw new IllegalArgumentException("Registered client must be unique. "
+ "Found duplicate client identifier: " + registeredClient.getClientId());
}
if (StringUtils.hasText(registeredClient.getClientSecret())) {
count = this.jdbcOperations.queryForObject(COUNT_REGISTERED_CLIENT_SQL + "client_secret = ?", Integer.class,
registeredClient.getClientSecret());
if (count != null && count > 0) {
throw new IllegalArgumentException("Registered client must be unique. "
+ "Found duplicate client secret for identifier: " + registeredClient.getId());
}
}
}
@Override
public RegisteredClient findById(String id) {
Assert.hasText(id, "id cannot be empty");
return findBy("id = ?", id);
}
@Override
public RegisteredClient findByClientId(String clientId) {
Assert.hasText(clientId, "clientId cannot be empty");
return findBy("client_id = ?", clientId);
}
private RegisteredClient findBy(String filter, Object... args) {
List<RegisteredClient> result = this.jdbcOperations.query(LOAD_REGISTERED_CLIENT_SQL + filter,
this.registeredClientRowMapper, args);
return !result.isEmpty() ? result.get(0) : null;
}
/**
* Sets the {@link RowMapper} used for mapping the current row in
* {@code java.sql.ResultSet} to {@link RegisteredClient}. The default is
* {@link JsonMapperRegisteredClientRowMapper}.
* @param registeredClientRowMapper the {@link RowMapper} used for mapping the current
* row in {@code ResultSet} to {@link RegisteredClient}
*/
public final void setRegisteredClientRowMapper(RowMapper<RegisteredClient> registeredClientRowMapper) {
Assert.notNull(registeredClientRowMapper, "registeredClientRowMapper cannot be null");
this.registeredClientRowMapper = registeredClientRowMapper;
}
/**
* Sets the {@code Function} used for mapping {@link RegisteredClient} to a
* {@code List} of {@link SqlParameterValue}. The default is
* {@link JsonMapperRegisteredClientParametersMapper}.
* @param registeredClientParametersMapper the {@code Function} used for mapping
* {@link RegisteredClient} to a {@code List} of {@link SqlParameterValue}
*/
public final void setRegisteredClientParametersMapper(
Function<RegisteredClient, List<SqlParameterValue>> registeredClientParametersMapper) {
Assert.notNull(registeredClientParametersMapper, "registeredClientParametersMapper cannot be null");
this.registeredClientParametersMapper = registeredClientParametersMapper;
}
protected final JdbcOperations getJdbcOperations() {
return this.jdbcOperations;
}
protected final RowMapper<RegisteredClient> getRegisteredClientRowMapper() {
return this.registeredClientRowMapper;
}
protected final Function<RegisteredClient, List<SqlParameterValue>> getRegisteredClientParametersMapper() {
return this.registeredClientParametersMapper;
}
/**
* The default {@link RowMapper} that maps the current row in
* {@code java.sql.ResultSet} to {@link RegisteredClient} using Jackson 3's
* {@link JsonMapper}.
*
* @author Joe Grandja
* @since 7.0
*/
public static | JdbcRegisteredClientRepository |
java | apache__logging-log4j2 | log4j-core-test/src/test/java/org/apache/logging/log4j/core/async/AbstractAsyncThreadContextTestBase.java | {
"start": 4089,
"end": 10277
} | enum ____ {
WEBAPP("WebApp", "org.apache.logging.log4j.spi.DefaultThreadContextMap"),
GARBAGE_FREE(
"GarbageFree", "org.apache.logging.log4j.core.context.internal.GarbageFreeSortedArrayThreadContextMap");
private final String threadContextMap;
private final String implClass;
ContextImpl(final String threadContextMap, final String implClass) {
this.threadContextMap = threadContextMap;
this.implClass = implClass;
}
void init() {
props.setProperty("log4j2.threadContextMap", threadContextMap);
ThreadContextTestAccess.init();
}
public String getImplClassSimpleName() {
return StringUtils.substringAfterLast(implClass, '.');
}
public String getImplClass() {
return implClass;
}
}
private void init(final ContextImpl contextImpl, final Mode asyncMode) {
asyncMode.initSelector();
asyncMode.initConfigFile();
// Verify that we are using the requested context map
contextImpl.init();
final ThreadContextMap threadContextMap = ProviderUtil.getProvider().getThreadContextMapInstance();
assertThat(threadContextMap.getClass().getName())
.as("Check `ThreadContextMap` implementation")
.isEqualTo(contextImpl.getImplClass());
}
private LongSupplier remainingCapacity(final LoggerContext loggerContext, final LoggerConfig loggerConfig) {
final LongSupplier contextSupplier;
if (loggerContext instanceof AsyncLoggerContext) {
final RingBufferAdmin ringBufferAdmin = ((AsyncLoggerContext) loggerContext).createRingBufferAdmin();
contextSupplier = ringBufferAdmin::getRemainingCapacity;
} else {
contextSupplier = null;
}
if (loggerConfig instanceof AsyncLoggerConfig) {
final RingBufferAdmin ringBufferAdmin = ((AsyncLoggerConfig) loggerConfig)
.createRingBufferAdmin(((org.apache.logging.log4j.core.LoggerContext) loggerContext).getName());
return contextSupplier == null
? ringBufferAdmin::getRemainingCapacity
: () -> Math.min(contextSupplier.getAsLong(), ringBufferAdmin.getRemainingCapacity());
}
return contextSupplier != null ? contextSupplier : () -> Long.MAX_VALUE;
}
protected void testAsyncLogWritesToLog(final ContextImpl contextImpl, final Mode asyncMode, final Path loggingPath)
throws Exception {
final Path testLoggingPath = loggingPath.resolve(asyncMode.toString());
props.setProperty("logging.path", testLoggingPath.toString());
init(contextImpl, asyncMode);
final Path[] files = new Path[] {
testLoggingPath.resolve("AsyncLoggerTest.log"),
testLoggingPath.resolve("SynchronousContextTest.log"),
testLoggingPath.resolve("AsyncLoggerAndAsyncAppenderTest.log"),
testLoggingPath.resolve("AsyncAppenderContextTest.log"),
};
ThreadContext.push("stackvalue");
ThreadContext.put("KEY", "mapvalue");
final Logger log = LogManager.getLogger("com.foo.Bar");
final LoggerConfig loggerConfig = ((org.apache.logging.log4j.core.Logger) log).get();
final LoggerContext loggerContext = LogManager.getContext(false);
final String loggerContextName = loggerContext.getClass().getSimpleName();
final LongSupplier remainingCapacity = remainingCapacity(loggerContext, loggerConfig);
for (int i = 0; i < LINE_COUNT; i++) {
// buffer may be full
if (i >= 128) {
waitAtMost(5, TimeUnit.SECONDS)
.pollDelay(10, TimeUnit.MILLISECONDS)
.until(() -> remainingCapacity.getAsLong() > 0);
}
if ((i & 1) == 1) {
ThreadContext.put("count", String.valueOf(i));
} else {
ThreadContext.remove("count");
}
log.info("{} {} {} i={}", contextImpl, contextMap(), loggerContextName, Unbox.box(i));
}
ThreadContext.pop();
CoreLoggerContexts.stopLoggerContext(false, files[0].toFile()); // stop async thread
checkResult(files[0], loggerContextName, contextImpl);
if (asyncMode == Mode.MIXED || asyncMode == Mode.BOTH_ALL_ASYNC_AND_MIXED) {
for (int i = 1; i < files.length; i++) {
checkResult(files[i], loggerContextName, contextImpl);
}
}
LogManager.shutdown();
FileUtils.deleteDirectory(testLoggingPath.toFile());
}
private static String contextMap() {
final ReadOnlyThreadContextMap impl = ThreadContext.getThreadContextMap();
return impl == null
? ContextImpl.WEBAPP.getImplClassSimpleName()
: impl.getClass().getSimpleName();
}
private void checkResult(final Path file, final String loggerContextName, final ContextImpl contextImpl)
throws IOException {
final String contextDesc = contextImpl + " " + contextImpl.getImplClassSimpleName() + " " + loggerContextName;
try (final BufferedReader reader = Files.newBufferedReader(file)) {
String expect;
for (int i = 0; i < LINE_COUNT; i++) {
final String line = reader.readLine();
if ((i & 1) == 1) {
expect = "INFO c.f.Bar mapvalue [stackvalue] {KEY=mapvalue, configProp=configValue,"
+ " configProp2=configValue2, count="
+ i + "} " + contextDesc + " i=" + i;
} else {
expect = "INFO c.f.Bar mapvalue [stackvalue] {KEY=mapvalue, configProp=configValue,"
+ " configProp2=configValue2} "
+ contextDesc + " i=" + i;
}
assertThat(line).as("Log file '%s'", file.getFileName()).isEqualTo(expect);
}
assertThat(reader.readLine()).as("Last line").isNull();
}
}
}
| ContextImpl |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableDistinctUntilChanged.java | {
"start": 1865,
"end": 4275
} | class ____<T, K> extends BasicFuseableSubscriber<T, T>
implements ConditionalSubscriber<T> {
final Function<? super T, K> keySelector;
final BiPredicate<? super K, ? super K> comparer;
K last;
boolean hasValue;
DistinctUntilChangedSubscriber(Subscriber<? super T> actual,
Function<? super T, K> keySelector,
BiPredicate<? super K, ? super K> comparer) {
super(actual);
this.keySelector = keySelector;
this.comparer = comparer;
}
@Override
public void onNext(T t) {
if (!tryOnNext(t)) {
upstream.request(1);
}
}
@Override
public boolean tryOnNext(T t) {
if (done) {
return false;
}
if (sourceMode != NONE) {
downstream.onNext(t);
return true;
}
K key;
try {
key = keySelector.apply(t);
if (hasValue) {
boolean equal = comparer.test(last, key);
last = key;
if (equal) {
return false;
}
} else {
hasValue = true;
last = key;
}
} catch (Throwable ex) {
fail(ex);
return true;
}
downstream.onNext(t);
return true;
}
@Override
public int requestFusion(int mode) {
return transitiveBoundaryFusion(mode);
}
@Nullable
@Override
public T poll() throws Throwable {
for (;;) {
T v = qs.poll();
if (v == null) {
return null;
}
K key = keySelector.apply(v);
if (!hasValue) {
hasValue = true;
last = key;
return v;
}
if (!comparer.test(last, key)) {
last = key;
return v;
}
last = key;
if (sourceMode != SYNC) {
upstream.request(1);
}
}
}
}
static final | DistinctUntilChangedSubscriber |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/AbstractMutableHashTable.java | {
"start": 1216,
"end": 3478
} | class ____<T> {
/** The utilities to serialize the build side data types. */
protected final TypeSerializer<T> buildSideSerializer;
/** The utilities to hash and compare the build side data types. */
protected final TypeComparator<T> buildSideComparator;
/** The lock to synchronize state changes (open / close) on */
protected final Object stateLock = new Object();
/**
* Flag to mark the table as open / closed. Because we allow to open and close multiple times,
* the state is initially closed.
*/
protected boolean closed = true;
public AbstractMutableHashTable(
TypeSerializer<T> buildSideSerializer, TypeComparator<T> buildSideComparator) {
this.buildSideSerializer = buildSideSerializer;
this.buildSideComparator = buildSideComparator;
}
public TypeSerializer<T> getBuildSideSerializer() {
return this.buildSideSerializer;
}
public TypeComparator<T> getBuildSideComparator() {
return this.buildSideComparator;
}
// ------------- Life-cycle functions -------------
/** Initialize the hash table */
public abstract void open();
/**
* Closes the hash table. This effectively releases all internal structures and closes all open
* files and removes them. The call to this method is valid both as a cleanup after the complete
* inputs were properly processed, and as a cancellation call, which cleans up all resources
* that are currently held by the hash table. If another process still accesses the hash table
* after close has been called, no operations will be performed.
*/
public abstract void close();
public abstract void abort();
public abstract List<MemorySegment> getFreeMemory();
// ------------- Modifier -------------
public abstract void insert(T record) throws IOException;
public abstract void insertOrReplaceRecord(T record) throws IOException;
// ------------- Accessors -------------
public abstract MutableObjectIterator<T> getEntryIterator();
public abstract <PT> AbstractHashTableProber<PT, T> getProber(
TypeComparator<PT> probeSideComparator, TypePairComparator<PT, T> pairComparator);
}
| AbstractMutableHashTable |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/testutils/ZooKeeperTestUtils.java | {
"start": 1533,
"end": 5609
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(ZooKeeperTestUtils.class);
/**
* Creates a new {@link TestingServer}, setting additional configuration properties for
* stability purposes.
*/
public static TestingServer createAndStartZookeeperTestingServer() throws Exception {
return new TestingServer(getZookeeperInstanceSpecWithIncreasedSessionTimeout(), true);
}
private static InstanceSpec getZookeeperInstanceSpecWithIncreasedSessionTimeout() {
// this gives us the default settings
final InstanceSpec instanceSpec = InstanceSpec.newInstanceSpec();
final Map<String, Object> properties = new HashMap<>();
properties.put("maxSessionTimeout", "60000");
final boolean deleteDataDirectoryOnClose = true;
return new InstanceSpec(
instanceSpec.getDataDirectory(),
instanceSpec.getPort(),
instanceSpec.getElectionPort(),
instanceSpec.getQuorumPort(),
deleteDataDirectoryOnClose,
instanceSpec.getServerId(),
instanceSpec.getTickTime(),
instanceSpec.getMaxClientCnxns(),
properties,
instanceSpec.getHostname());
}
/**
* Creates a configuration to operate in {@link HighAvailabilityMode#ZOOKEEPER}.
*
* @param zooKeeperQuorum ZooKeeper quorum to connect to
* @param fsStateHandlePath Base path for file system state backend (for checkpoints and
* recovery)
* @return A new configuration to operate in {@link HighAvailabilityMode#ZOOKEEPER}.
*/
public static Configuration createZooKeeperHAConfig(
String zooKeeperQuorum, String fsStateHandlePath) {
return configureZooKeeperHA(new Configuration(), zooKeeperQuorum, fsStateHandlePath);
}
/**
* Sets all necessary configuration keys to operate in {@link HighAvailabilityMode#ZOOKEEPER}.
*
* @param config Configuration to use
* @param zooKeeperQuorum ZooKeeper quorum to connect to
* @param fsStateHandlePath Base path for file system state backend (for checkpoints and
* recovery)
* @return The modified configuration to operate in {@link HighAvailabilityMode#ZOOKEEPER}.
*/
public static Configuration configureZooKeeperHA(
Configuration config, String zooKeeperQuorum, String fsStateHandlePath) {
checkNotNull(config, "Configuration");
checkNotNull(zooKeeperQuorum, "ZooKeeper quorum");
checkNotNull(fsStateHandlePath, "File state handle backend path");
// ZooKeeper recovery mode
config.set(HighAvailabilityOptions.HA_MODE, "ZOOKEEPER");
config.set(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, zooKeeperQuorum);
int connTimeout = 5000;
if (runsOnCIInfrastructure()) {
// The regular timeout is to aggressive for Travis and connections are often lost.
LOG.info(
"Detected CI environment: Configuring connection and session timeout of 30 seconds");
connTimeout = 30000;
}
config.set(
HighAvailabilityOptions.ZOOKEEPER_CONNECTION_TIMEOUT,
Duration.ofMillis(connTimeout));
config.set(
HighAvailabilityOptions.ZOOKEEPER_SESSION_TIMEOUT, Duration.ofMillis(connTimeout));
// File system state backend
config.set(StateBackendOptions.STATE_BACKEND, "hashmap");
config.set(CheckpointingOptions.CHECKPOINTS_DIRECTORY, fsStateHandlePath + "/checkpoints");
config.set(HighAvailabilityOptions.HA_STORAGE_PATH, fsStateHandlePath + "/recovery");
config.set(RpcOptions.ASK_TIMEOUT_DURATION, Duration.ofSeconds(100));
return config;
}
/**
* @return true, if a CI environment is detected.
*/
public static boolean runsOnCIInfrastructure() {
return System.getenv().containsKey("CI") || System.getenv().containsKey("TF_BUILD");
}
}
| ZooKeeperTestUtils |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/pool/basic/PreparedStatementKeyTest.java | {
"start": 1152,
"end": 6810
} | class ____ extends TestCase {
public void test_equals_0() throws Exception {
PreparedStatementKey k1 = new PreparedStatementKey("x1", "c1", MethodType.M1);
PreparedStatementKey k2 = new PreparedStatementKey("x1", "c2", MethodType.M1);
PreparedStatementKey k3 = new PreparedStatementKey("x1", "c3", MethodType.M1);
assertFalse(k1.equals(k2));
assertFalse(k1.equals(k3));
assertFalse(k2.equals(k1));
assertFalse(k2.equals(k3));
assertFalse(k3.equals(k2));
assertFalse(k3.equals(k1));
}
public void test_equals_2() throws Exception {
PreparedStatementKey k1 = new PreparedStatementKey("x1", "c1", MethodType.M1);
PreparedStatementKey k2 = new PreparedStatementKey("x2", "c1", MethodType.M1);
PreparedStatementKey k3 = new PreparedStatementKey("x3", "c1", MethodType.M1);
assertFalse(k1.equals(k2));
assertFalse(k1.equals(k3));
assertFalse(k2.equals(k1));
assertFalse(k2.equals(k3));
assertFalse(k3.equals(k2));
assertFalse(k3.equals(k1));
}
public void test_equals_3() throws Exception {
PreparedStatementKey k1 = new PreparedStatementKey("x1", "c1", MethodType.M1);
PreparedStatementKey k2 = new PreparedStatementKey("x1", "c1", MethodType.M2);
PreparedStatementKey k3 = new PreparedStatementKey("x1", "c1", MethodType.M3);
assertFalse(k1.equals(k2));
assertFalse(k1.equals(k3));
assertFalse(k2.equals(k1));
assertFalse(k2.equals(k3));
assertFalse(k3.equals(k2));
assertFalse(k3.equals(k1));
}
public void test_equals_4() throws Exception {
PreparedStatementKey k1 = new PreparedStatementKey("x1", "c1", MethodType.M1);
PreparedStatementKey k2 = new PreparedStatementKey("x1", "c2", MethodType.M1);
PreparedStatementKey k3 = new PreparedStatementKey("x1", null, MethodType.M1);
assertFalse(k1.equals(k2));
assertFalse(k1.equals(k3));
assertFalse(k2.equals(k1));
assertFalse(k2.equals(k3));
assertFalse(k3.equals(k2));
assertFalse(k3.equals(k1));
}
public void test_equals_5() throws Exception {
PreparedStatementKey k1 = new PreparedStatementKey("x1", null, MethodType.M1);
PreparedStatementKey k2 = new PreparedStatementKey("x1", null, MethodType.M2);
PreparedStatementKey k3 = new PreparedStatementKey("x1", null, MethodType.M3);
k1.hashCode();
assertFalse(k1.equals(k2));
assertFalse(k1.equals(k3));
assertFalse(k2.equals(k1));
assertFalse(k2.equals(k3));
assertFalse(k3.equals(k2));
assertFalse(k3.equals(k1));
}
public void test_equals_6() throws Exception {
PreparedStatementKey k1 = new PreparedStatementKey("x1", null, MethodType.M1);
PreparedStatementKey k2 = new PreparedStatementKey("x2", null, MethodType.M1);
PreparedStatementKey k3 = new PreparedStatementKey("x3", null, MethodType.M1);
k1.hashCode();
assertFalse(k1.equals(k2));
assertFalse(k1.equals(k3));
assertFalse(k2.equals(k1));
assertFalse(k2.equals(k3));
assertFalse(k3.equals(k2));
assertFalse(k3.equals(k1));
}
public void test_equals_7() throws Exception {
PreparedStatementKey k1 = new PreparedStatementKey("x1", null, MethodType.M1, 0, 0);
PreparedStatementKey k2 = new PreparedStatementKey("x1", null, MethodType.M1, 1, 0);
PreparedStatementKey k3 = new PreparedStatementKey("x2", null, MethodType.M1, 0, 1);
k1.hashCode();
assertFalse(k1.equals(k2));
assertFalse(k1.equals(k3));
assertFalse(k2.equals(k1));
assertFalse(k2.equals(k3));
assertFalse(k3.equals(k2));
assertFalse(k3.equals(k1));
}
public void test_equals_8() throws Exception {
PreparedStatementKey k1 = new PreparedStatementKey("x1", null, MethodType.M1, 0, 0, 0);
PreparedStatementKey k2 = new PreparedStatementKey("x1", null, MethodType.M1, 0, 0, 1);
PreparedStatementKey k3 = new PreparedStatementKey("x2", null, MethodType.M1, 0, 1, 0);
k1.hashCode();
assertFalse(k1.equals(k2));
assertFalse(k1.equals(k3));
assertFalse(k2.equals(k1));
assertFalse(k2.equals(k3));
assertFalse(k3.equals(k2));
assertFalse(k3.equals(k1));
}
public void test_equals_9() throws Exception {
PreparedStatementKey k1 = new PreparedStatementKey("x1", null, MethodType.M1, 2);
PreparedStatementKey k2 = new PreparedStatementKey("x1", null, MethodType.M1, new int[]{});
PreparedStatementKey k3 = new PreparedStatementKey("x2", null, MethodType.M1, new String[]{});
k1.hashCode();
assertFalse(k1.equals(k2));
assertFalse(k1.equals(k3));
assertFalse(k2.equals(k1));
assertFalse(k2.equals(k3));
assertFalse(k3.equals(k2));
assertFalse(k3.equals(k1));
}
public void test_contains() throws Exception {
DruidDataSource dataSource = new DruidDataSource();
MockConnection conn = new MockConnection();
PreparedStatementKey k1 = new PreparedStatementKey("x1", "c1", MethodType.M1);
PreparedStatementPool pool = new PreparedStatementPool(new DruidConnectionHolder(dataSource, conn, 0));
MockPreparedStatement raw = new MockPreparedStatement(null, null);
pool.put(new PreparedStatementHolder(k1, raw));
assertTrue(pool.get(k1) != null);
assertTrue(pool.get(k1) != null);
}
}
| PreparedStatementKeyTest |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/Aws2Ec2ComponentBuilderFactory.java | {
"start": 1852,
"end": 15863
} | interface ____ extends ComponentBuilder<AWS2EC2Component> {
/**
* To use an existing configured AmazonEC2Client client.
*
* The option is a:
* <code>software.amazon.awssdk.services.ec2.Ec2Client</code> type.
*
* Group: producer
*
* @param amazonEc2Client the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder amazonEc2Client(software.amazon.awssdk.services.ec2.Ec2Client amazonEc2Client) {
doSetProperty("amazonEc2Client", amazonEc2Client);
return this;
}
/**
* The component configuration.
*
* The option is a:
* <code>org.apache.camel.component.aws2.ec2.AWS2EC2Configuration</code> type.
*
* Group: producer
*
* @param configuration the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder configuration(org.apache.camel.component.aws2.ec2.AWS2EC2Configuration configuration) {
doSetProperty("configuration", configuration);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* The operation to perform. It can be createAndRunInstances,
* startInstances, stopInstances, terminateInstances, describeInstances,
* describeInstancesStatus, rebootInstances, monitorInstances,
* unmonitorInstances, createTags or deleteTags.
*
* The option is a:
* <code>org.apache.camel.component.aws2.ec2.AWS2EC2Operations</code> type.
*
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder operation(org.apache.camel.component.aws2.ec2.AWS2EC2Operations operation) {
doSetProperty("operation", operation);
return this;
}
/**
* Set the need for overriding the endpoint. This option needs to be
* used in combination with the uriEndpointOverride option.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param overrideEndpoint the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder overrideEndpoint(boolean overrideEndpoint) {
doSetProperty("overrideEndpoint", overrideEndpoint);
return this;
}
/**
* If we want to use a POJO request as body or not.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param pojoRequest the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder pojoRequest(boolean pojoRequest) {
doSetProperty("pojoRequest", pojoRequest);
return this;
}
/**
* The region in which EC2 client needs to work. When using this
* parameter, the configuration will expect the lowercase name of the
* region (for example, ap-east-1) You'll need to use the name
* Region.EU_WEST_1.id().
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param region the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder region(java.lang.String region) {
doSetProperty("region", region);
return this;
}
/**
* Set the overriding uri endpoint. This option needs to be used in
* combination with overrideEndpoint option.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param uriEndpointOverride the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder uriEndpointOverride(java.lang.String uriEndpointOverride) {
doSetProperty("uriEndpointOverride", uriEndpointOverride);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
/**
* Used for enabling or disabling all consumer based health checks from
* this component.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: health
*
* @param healthCheckConsumerEnabled the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder healthCheckConsumerEnabled(boolean healthCheckConsumerEnabled) {
doSetProperty("healthCheckConsumerEnabled", healthCheckConsumerEnabled);
return this;
}
/**
* Used for enabling or disabling all producer based health checks from
* this component. Notice: Camel has by default disabled all producer
* based health-checks. You can turn on producer checks globally by
* setting camel.health.producersEnabled=true.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: health
*
* @param healthCheckProducerEnabled the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder healthCheckProducerEnabled(boolean healthCheckProducerEnabled) {
doSetProperty("healthCheckProducerEnabled", healthCheckProducerEnabled);
return this;
}
/**
* To define a proxy host when instantiating the EC2 client.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: proxy
*
* @param proxyHost the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder proxyHost(java.lang.String proxyHost) {
doSetProperty("proxyHost", proxyHost);
return this;
}
/**
* To define a proxy port when instantiating the EC2 client.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: proxy
*
* @param proxyPort the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder proxyPort(java.lang.Integer proxyPort) {
doSetProperty("proxyPort", proxyPort);
return this;
}
/**
* To define a proxy protocol when instantiating the EC2 client.
*
* The option is a:
* <code>software.amazon.awssdk.core.Protocol</code> type.
*
* Default: HTTPS
* Group: proxy
*
* @param proxyProtocol the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder proxyProtocol(software.amazon.awssdk.core.Protocol proxyProtocol) {
doSetProperty("proxyProtocol", proxyProtocol);
return this;
}
/**
* Amazon AWS Access Key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param accessKey the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder accessKey(java.lang.String accessKey) {
doSetProperty("accessKey", accessKey);
return this;
}
/**
* If using a profile credentials provider, this parameter will set the
* profile name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param profileCredentialsName the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder profileCredentialsName(java.lang.String profileCredentialsName) {
doSetProperty("profileCredentialsName", profileCredentialsName);
return this;
}
/**
* Amazon AWS Secret Key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param secretKey the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder secretKey(java.lang.String secretKey) {
doSetProperty("secretKey", secretKey);
return this;
}
/**
* Amazon AWS Session Token used when the user needs to assume an IAM
* role.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param sessionToken the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder sessionToken(java.lang.String sessionToken) {
doSetProperty("sessionToken", sessionToken);
return this;
}
/**
* If we want to trust all certificates in case of overriding the
* endpoint.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param trustAllCertificates the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder trustAllCertificates(boolean trustAllCertificates) {
doSetProperty("trustAllCertificates", trustAllCertificates);
return this;
}
/**
* Set whether the EC2 client should expect to load credentials through
* a default credentials provider or to expect static credentials to be
* passed in.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useDefaultCredentialsProvider the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder useDefaultCredentialsProvider(boolean useDefaultCredentialsProvider) {
doSetProperty("useDefaultCredentialsProvider", useDefaultCredentialsProvider);
return this;
}
/**
* Set whether the EC2 client should expect to load credentials through
* a profile credentials provider.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useProfileCredentialsProvider the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder useProfileCredentialsProvider(boolean useProfileCredentialsProvider) {
doSetProperty("useProfileCredentialsProvider", useProfileCredentialsProvider);
return this;
}
/**
* Set whether the EC2 client should expect to use Session Credentials.
* This is useful in a situation in which the user needs to assume an
* IAM role for doing operations in EC2.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useSessionCredentials the value to set
* @return the dsl builder
*/
default Aws2Ec2ComponentBuilder useSessionCredentials(boolean useSessionCredentials) {
doSetProperty("useSessionCredentials", useSessionCredentials);
return this;
}
}
| Aws2Ec2ComponentBuilder |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIndexedSort.java | {
"start": 8830,
"end": 11201
} | class ____ implements IndexedSortable {
private static Random r = new Random();
private final int eob;
private final int[] indices;
private final int[] offsets;
private final byte[] bytes;
private final WritableComparator comparator;
private final String[] check;
private final long seed;
public WritableSortable() throws IOException {
this(100);
}
public WritableSortable(int j) throws IOException {
seed = r.nextLong();
r.setSeed(seed);
Text t = new Text();
StringBuilder sb = new StringBuilder();
indices = new int[j];
offsets = new int[j];
check = new String[j];
DataOutputBuffer dob = new DataOutputBuffer();
for (int i = 0; i < j; ++i) {
indices[i] = i;
offsets[i] = dob.getLength();
genRandom(t, r.nextInt(15) + 1, sb);
t.write(dob);
check[i] = t.toString();
}
eob = dob.getLength();
bytes = dob.getData();
comparator = WritableComparator.get(Text.class);
}
public long getSeed() {
return seed;
}
private static void genRandom(Text t, int len, StringBuilder sb) {
sb.setLength(0);
for (int i = 0; i < len; ++i) {
sb.append(Integer.toString(r.nextInt(26) + 10, 36));
}
t.set(sb.toString());
}
@Override
public int compare(int i, int j) {
final int ii = indices[i];
final int ij = indices[j];
return comparator.compare(bytes, offsets[ii],
((ii + 1 == indices.length) ? eob : offsets[ii + 1]) - offsets[ii],
bytes, offsets[ij],
((ij + 1 == indices.length) ? eob : offsets[ij + 1]) - offsets[ij]);
}
@Override
public void swap(int i, int j) {
int tmp = indices[i];
indices[i] = indices[j];
indices[j] = tmp;
}
public String[] getValues() {
return check;
}
public String[] getSorted() throws IOException {
String[] ret = new String[indices.length];
Text t = new Text();
DataInputBuffer dib = new DataInputBuffer();
for (int i = 0; i < ret.length; ++i) {
int ii = indices[i];
dib.reset(bytes, offsets[ii],
((ii + 1 == indices.length) ? eob : offsets[ii + 1]) - offsets[ii]);
t.readFields(dib);
ret[i] = t.toString();
}
return ret;
}
}
}
| WritableSortable |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/time/TimeUnitConversionChecker.java | {
"start": 2363,
"end": 6557
} | class ____ extends BugChecker
implements MethodInvocationTreeMatcher {
// TODO(kak): We should probably also extend this to recognize TimeUnit.convertTo() invocations
private static final Matcher<ExpressionTree> MATCHER =
instanceMethod()
.onExactClass("java.util.concurrent.TimeUnit")
.namedAnyOf(
"toDays", "toHours", "toMinutes", "toSeconds", "toMillis", "toMicros", "toNanos");
@Override
public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) {
if (!MATCHER.matches(tree, state)) {
return Description.NO_MATCH;
}
Tree receiverOfConversion = ASTHelpers.getReceiver(tree);
if (receiverOfConversion == null) {
// Usage inside TimeUnit itself, no changes we can make here.
return Description.NO_MATCH;
}
// This trips up on code like:
// TimeUnit SECONDS = TimeUnit.MINUTES;
// long about2500 = SECONDS.toSeconds(42);
// but... I think that's bad enough to ignore here :)
Symbol receiverOfConversionSymbol = ASTHelpers.getSymbol(receiverOfConversion);
if (receiverOfConversionSymbol == null) {
return Description.NO_MATCH;
}
String timeUnitName = receiverOfConversionSymbol.getSimpleName().toString();
Optional<TimeUnit> receiver = Enums.getIfPresent(TimeUnit.class, timeUnitName);
if (!receiver.isPresent()) {
return Description.NO_MATCH;
}
String methodName = ASTHelpers.getSymbol(tree).getSimpleName().toString();
TimeUnit convertTo = methodNameToTimeUnit(methodName);
ExpressionTree arg0 = tree.getArguments().getFirst();
// if we have a constant and can Long-parse it...
Long constant = Longs.tryParse(String.valueOf(state.getSourceForNode(arg0)));
if (constant != null) {
long converted = invokeConversion(receiver.get(), methodName, constant);
// ... and the conversion results in 0 or 1, just inline it!
if (converted == 0 || converted == 1 || constant == converted) {
SuggestedFix fix = replaceTreeWith(tree, convertTo, converted + "L");
return describeMatch(tree, fix);
}
// otherwise we have a suspect case: SMALLER_UNIT.toLargerUnit(constantValue)
// because: "people usually don't like to have constants like 60_000_000 and use"
// "libraries to turn them into smaller numbers"
if (receiver.get().compareTo(convertTo) < 0) {
// We can't suggest a replacement here, so we just have to error out.
return describeMatch(tree);
}
}
// if we're trying to convert the unit to itself, just return the arg
if (receiver.get().equals(convertTo)) {
SuggestedFix fix = replaceTreeWith(tree, convertTo, state.getSourceForNode(arg0));
return describeMatch(tree, fix);
}
return Description.NO_MATCH;
}
private static SuggestedFix replaceTreeWith(
MethodInvocationTree tree, TimeUnit units, String replacement) {
return SuggestedFix.builder()
.postfixWith(tree, " /* " + units.toString().toLowerCase() + " */")
.replace(tree, replacement)
.build();
}
private static long invokeConversion(TimeUnit timeUnit, String methodName, long duration) {
return switch (methodName) {
case "toDays" -> timeUnit.toDays(duration);
case "toHours" -> timeUnit.toHours(duration);
case "toMinutes" -> timeUnit.toMinutes(duration);
case "toSeconds" -> timeUnit.toSeconds(duration);
case "toMillis" -> timeUnit.toMillis(duration);
case "toMicros" -> timeUnit.toMicros(duration);
case "toNanos" -> timeUnit.toNanos(duration);
default -> throw new IllegalArgumentException();
};
}
private static TimeUnit methodNameToTimeUnit(String methodName) {
return switch (methodName) {
case "toDays" -> TimeUnit.DAYS;
case "toHours" -> TimeUnit.HOURS;
case "toMinutes" -> TimeUnit.MINUTES;
case "toSeconds" -> TimeUnit.SECONDS;
case "toMillis" -> TimeUnit.MILLISECONDS;
case "toMicros" -> TimeUnit.MICROSECONDS;
case "toNanos" -> TimeUnit.NANOSECONDS;
default -> throw new IllegalArgumentException();
};
}
}
| TimeUnitConversionChecker |
java | apache__camel | components/camel-velocity/src/generated/java/org/apache/camel/component/velocity/VelocityEndpointUriFactory.java | {
"start": 518,
"end": 2359
} | class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":resourceUri";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(8);
props.add("allowContextMapAll");
props.add("allowTemplateFromHeader");
props.add("contentCache");
props.add("encoding");
props.add("lazyStartProducer");
props.add("loaderCache");
props.add("propertiesFile");
props.add("resourceUri");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
SECRET_PROPERTY_NAMES = Collections.emptySet();
MULTI_VALUE_PREFIXES = Collections.emptyMap();
}
@Override
public boolean isEnabled(String scheme) {
return "velocity".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "resourceUri", null, true, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
| VelocityEndpointUriFactory |
java | apache__camel | core/camel-util/src/main/java/org/apache/camel/util/concurrent/AsyncCompletionService.java | {
"start": 1249,
"end": 4714
} | class ____<V> {
private final Executor executor;
private final boolean ordered;
private final PriorityQueue<Task> queue;
private final AtomicInteger nextId = new AtomicInteger();
private final AtomicInteger index = new AtomicInteger();
private final ReentrantLock lock;
private final Condition available;
public AsyncCompletionService(Executor executor, boolean ordered) {
this(executor, ordered, null, 0);
}
public AsyncCompletionService(Executor executor, boolean ordered, ReentrantLock lock) {
this(executor, ordered, lock, 0);
}
public AsyncCompletionService(Executor executor, boolean ordered, ReentrantLock lock, int capacity) {
this.executor = executor;
this.ordered = ordered;
this.lock = lock != null ? lock : new ReentrantLock();
this.available = this.lock.newCondition();
if (capacity > 0) {
queue = new PriorityQueue<>(capacity);
} else {
queue = new PriorityQueue<>();
}
}
public ReentrantLock getLock() {
return lock;
}
public void submit(Consumer<Consumer<V>> runner) {
Task f = new Task(nextId.getAndIncrement(), runner);
this.executor.execute(f);
}
public void skip() {
index.incrementAndGet();
}
public V pollUnordered() {
final ReentrantLock lock = this.lock;
lock.lock();
try {
Task t = queue.poll();
return t != null ? t.result : null;
} finally {
lock.unlock();
}
}
public V poll() {
final ReentrantLock lock = this.lock;
lock.lock();
try {
Task t = queue.peek();
if (t != null && (!ordered || index.compareAndSet(t.id, t.id + 1))) {
queue.poll();
return t.result;
} else {
return null;
}
} finally {
lock.unlock();
}
}
public V poll(long timeout, TimeUnit unit) throws InterruptedException {
long nanos = unit.toNanos(timeout);
final ReentrantLock lock = this.lock;
lock.lockInterruptibly();
try {
for (;;) {
Task t = queue.peek();
if (t != null && (!ordered || index.compareAndSet(t.id, t.id + 1))) {
queue.poll();
return t.result;
}
if (nanos <= 0) {
return null;
} else {
nanos = available.awaitNanos(nanos);
}
}
} finally {
lock.unlock();
}
}
public V take() throws InterruptedException {
final ReentrantLock lock = this.lock;
lock.lockInterruptibly();
try {
for (;;) {
Task t = queue.peek();
if (t != null && (!ordered || index.compareAndSet(t.id, t.id + 1))) {
queue.poll();
return t.result;
}
available.await();
}
} finally {
lock.unlock();
}
}
private void complete(Task task) {
final ReentrantLock lock = this.lock;
lock.lock();
try {
queue.add(task);
available.signalAll();
} finally {
lock.unlock();
}
}
private | AsyncCompletionService |
java | google__error-prone | check_api/src/test/java/com/google/errorprone/util/testdata/TargetTypeTest.java | {
"start": 16447,
"end": 16477
} | class ____<T> extends B<T> {}
| C |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng4874UpdateLatestPluginVersionTest.java | {
"start": 1128,
"end": 2074
} | class ____ extends AbstractMavenIntegrationTestCase {
/**
* Verify that deployment of a plugin updates the metadata's "latest" field.
*
* @throws Exception in case of failure
*/
@Test
public void testit() throws Exception {
File testDir = extractResources("/mng-4874");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.deleteArtifacts("org.apache.maven.its.mng4874");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
File metadataFile = new File(testDir, "target/repo/org/apache/maven/its/mng4874/test/maven-metadata.xml");
String xml = Files.readString(metadataFile.toPath());
assertTrue(xml.matches("(?s).*<latest>0\\.1-SNAPSHOT</latest>.*"), xml);
}
}
| MavenITmng4874UpdateLatestPluginVersionTest |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/issues/StopRouteFromRouteTest.java | {
"start": 1332,
"end": 4542
} | class ____ {
final CountDownLatch latch = new CountDownLatch(1);
// START SNIPPET: e1
@Test
public void testStopRouteFromRoute() throws Exception {
// create camel, add routes, and start camel
CamelContext context = new DefaultCamelContext();
context.addRoutes(createMyRoutes());
context.start();
assertTrue(context.getRouteController().getRouteStatus("myRoute").isStarted(), "Route myRoute should be started");
assertTrue(context.getRouteController().getRouteStatus("bar").isStarted(), "Route bar should be started");
// setup mock expectations for unit test
MockEndpoint start = context.getEndpoint("mock:start", MockEndpoint.class);
start.expectedMessageCount(1);
MockEndpoint done = context.getEndpoint("mock:done", MockEndpoint.class);
done.expectedMessageCount(1);
// send a message to the route
ProducerTemplate template = context.createProducerTemplate();
template.sendBody("direct:start", "Hello Camel");
// just wait a bit for the thread to stop the route
latch.await(5, TimeUnit.SECONDS);
// the route should now be stopped
assertTrue(context.getRouteController().getRouteStatus("myRoute").isStopped(), "Route myRoute should be stopped");
assertTrue(context.getRouteController().getRouteStatus("bar").isStarted(), "Route bar should be started");
// stop camel
context.stop();
// unit test assertions
start.assertIsSatisfied();
done.assertIsSatisfied();
}
// END SNIPPET: e1
// START SNIPPET: e2
public RouteBuilder createMyRoutes() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").routeId("myRoute").to("mock:start").process(new Processor() {
Thread stop;
@Override
public void process(final Exchange exchange) {
// stop this route using a thread that will stop
// this route gracefully while we are still running
if (stop == null) {
stop = new Thread() {
@Override
public void run() {
try {
exchange.getContext().getRouteController().stopRoute("myRoute");
} catch (Exception e) {
// ignore
} finally {
// signal we stopped the route
latch.countDown();
}
}
};
}
// start the thread that stops this route
stop.start();
}
}).to("mock:done");
from("direct:bar").routeId("bar").to("mock:bar");
}
};
}
// END SNIPPET: e2
}
| StopRouteFromRouteTest |
java | apache__camel | components/camel-csimple-joor/src/test/java/org/apache/camel/language/csimple/joor/OriginalSimpleTest.java | {
"start": 92765,
"end": 93116
} | class ____ {
private List<OrderLine> lines;
public Order(List<OrderLine> lines) {
this.lines = lines;
}
public List<OrderLine> getLines() {
return lines;
}
public void setLines(List<OrderLine> lines) {
this.lines = lines;
}
}
public static final | Order |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/recursive/comparison/properties/RecursiveComparisonAssert_isEqualTo_withIntrospectionStrategy_Test.java | {
"start": 6127,
"end": 7181
} | class ____ implements Message {
String template;
boolean empty;
public GenericMessage(String template) {
this.template = template;
this.empty = template == null || template.isEmpty();
}
@Override
public String getTemplate() {
return template;
}
@Override
public boolean isEmpty() {
return empty;
}
}
// https://github.com/assertj/assertj/issues/2108
@Test
void should_detect_badly_set_optional_2108() {
// GIVEN
Bean actual = new Bean();
Bean expected = new Bean();
// WHEN
Throwable throwable = catchThrowable(() -> then(actual).usingRecursiveComparison(recursiveComparisonConfiguration)
.withIntrospectionStrategy(COMPARING_PROPERTIES)
.isEqualTo(expected));
// THEN fails due to getString failing as it tries to build an optional for a null value.
then(throwable).isInstanceOf(IntrospectionError.class);
}
static | GenericMessage |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/engine/spi/CascadeStyle.java | {
"start": 333,
"end": 1510
} | interface ____ extends Serializable {
/**
* For this style, should the given action be cascaded?
*
* @param action The action to be checked for cascade-ability.
*
* @return True if the action should be cascaded under this style; false otherwise.
*/
boolean doCascade(CascadingAction<?> action);
/**
* Probably more aptly named something like doCascadeToCollectionElements(); it is
* however used from both the collection and to-one logic branches...
* <p>
* For this style, should the given action really be cascaded? The default
* implementation is simply to return {@link #doCascade}; for certain
* styles (currently only delete-orphan), however, we need to be able to
* control this separately.
*
* @param action The action to be checked for cascade-ability.
*
* @return True if the action should be really cascaded under this style;
* false otherwise.
*/
boolean reallyDoCascade(CascadingAction<?> action);
/**
* Do we need to delete orphaned collection elements?
*
* @return True if this style need to account for orphan delete
* operations; false otherwise.
*/
boolean hasOrphanDelete();
}
| CascadeStyle |
java | quarkusio__quarkus | extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/upgrade/HttpUpgradeCheckPathParamsTest.java | {
"start": 2531,
"end": 2869
} | class ____ implements HttpUpgradeCheck {
@Override
public Uni<CheckResult> perform(HttpUpgradeContext context) {
if ("reject".equals(context.pathParam("action"))) {
return CheckResult.rejectUpgrade(404);
}
return CheckResult.permitUpgrade();
}
}
}
| UpgradeCheck |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSRedirectException.java | {
"start": 1008,
"end": 1308
} | class ____ extends AWSServiceIOException {
/**
* Instantiate.
* @param operation operation which triggered this
* @param cause the underlying cause
*/
public AWSRedirectException(String operation,
AwsServiceException cause) {
super(operation, cause);
}
}
| AWSRedirectException |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/nullness/ReturnMissingNullableTest.java | {
"start": 70823,
"end": 71493
} | class ____ {
@SuppressWarnings("nullness:argument")
public String getMessage(boolean b) {
if (b) {
doSomethingElse(null);
return "negative";
} else {
return "negative";
}
}
public void doSomethingElse(Object c) {
return;
}
}
""")
.addOutputLines(
"com/google/errorprone/bugpatterns/nullness/LiteralNullReturnTest.java",
"""
package com.google.errorprone.bugpatterns.nullness;
public | LiteralNullReturnTest |
java | playframework__playframework | documentation/manual/working/javaGuide/main/pekko/code/javaguide/pekko/JavaPekko.java | {
"start": 761,
"end": 845
} | class ____ {
private static volatile CountDownLatch latch;
public static | JavaPekko |
java | apache__kafka | jmh-benchmarks/src/main/java/org/apache/kafka/jmh/assignor/ShareGroupAssignorBenchmark.java | {
"start": 3121,
"end": 3627
} | enum ____ {
SIMPLE(new SimpleAssignor());
private final PartitionAssignor assignor;
AssignorType(PartitionAssignor assignor) {
this.assignor = assignor;
}
public PartitionAssignor assignor() {
return assignor;
}
}
/**
* The assignment type is decided based on whether all the members are assigned partitions
* for the first time (full), or incrementally when a rebalance is triggered.
*/
public | AssignorType |
java | apache__dubbo | dubbo-common/src/test/java/org/apache/dubbo/common/function/StreamsTest.java | {
"start": 1395,
"end": 1965
} | class ____ {
@Test
void testFilterStream() {
Stream<Integer> stream = filterStream(asList(1, 2, 3, 4, 5), i -> i % 2 == 0);
assertEquals(asList(2, 4), stream.collect(toList()));
}
@Test
void testFilterList() {
List<Integer> list = filterList(asList(1, 2, 3, 4, 5), i -> i % 2 == 0);
assertEquals(asList(2, 4), list);
}
@Test
void testFilterSet() {
Set<Integer> set = filterSet(asList(1, 2, 3, 4, 5), i -> i % 2 == 0);
assertEquals(new LinkedHashSet<>(asList(2, 4)), set);
}
}
| StreamsTest |
java | apache__rocketmq | broker/src/main/java/org/apache/rocketmq/broker/offset/ConsumerOrderInfoLockManager.java | {
"start": 1501,
"end": 5524
} | class ____ {
private static final Logger POP_LOGGER = LoggerFactory.getLogger(LoggerName.ROCKETMQ_POP_LOGGER_NAME);
private final BrokerController brokerController;
private final Map<Key, Timeout> timeoutMap = new ConcurrentHashMap<>();
private final Timer timer;
private static final int TIMER_TICK_MS = 100;
public ConsumerOrderInfoLockManager(BrokerController brokerController) {
this.brokerController = brokerController;
this.timer = new HashedWheelTimer(
new ThreadFactoryImpl("ConsumerOrderInfoLockManager_"),
TIMER_TICK_MS, TimeUnit.MILLISECONDS);
}
/**
* when ConsumerOrderInfoManager load from disk, recover data
*/
public void recover(Map<String/* topic@group*/, ConcurrentHashMap<Integer/*queueId*/, ConsumerOrderInfoManager.OrderInfo>> table) {
if (!this.brokerController.getBrokerConfig().isEnableNotifyAfterPopOrderLockRelease()) {
return;
}
for (Map.Entry<String, ConcurrentHashMap<Integer, ConsumerOrderInfoManager.OrderInfo>> entry : table.entrySet()) {
String topicAtGroup = entry.getKey();
ConcurrentHashMap<Integer/*queueId*/, ConsumerOrderInfoManager.OrderInfo> qs = entry.getValue();
String[] arrays = ConsumerOrderInfoManager.decodeKey(topicAtGroup);
if (arrays.length != 2) {
continue;
}
String topic = arrays[0];
String group = arrays[1];
for (Map.Entry<Integer, ConsumerOrderInfoManager.OrderInfo> qsEntry : qs.entrySet()) {
Long lockFreeTimestamp = qsEntry.getValue().getLockFreeTimestamp();
if (lockFreeTimestamp == null || lockFreeTimestamp <= System.currentTimeMillis()) {
continue;
}
this.updateLockFreeTimestamp(topic, group, qsEntry.getKey(), lockFreeTimestamp);
}
}
}
public void updateLockFreeTimestamp(String topic, String group, int queueId, ConsumerOrderInfoManager.OrderInfo orderInfo) {
this.updateLockFreeTimestamp(topic, group, queueId, orderInfo.getLockFreeTimestamp());
}
public void updateLockFreeTimestamp(String topic, String group, int queueId, Long lockFreeTimestamp) {
if (!this.brokerController.getBrokerConfig().isEnableNotifyAfterPopOrderLockRelease()) {
return;
}
if (lockFreeTimestamp == null) {
return;
}
try {
this.timeoutMap.compute(new Key(topic, group, queueId), (key, oldTimeout) -> {
try {
long delay = lockFreeTimestamp - System.currentTimeMillis();
Timeout newTimeout = this.timer.newTimeout(new NotifyLockFreeTimerTask(key), delay, TimeUnit.MILLISECONDS);
if (oldTimeout != null) {
// cancel prev timerTask
oldTimeout.cancel();
}
return newTimeout;
} catch (Exception e) {
POP_LOGGER.warn("add timeout task failed. key:{}, lockFreeTimestamp:{}", key, lockFreeTimestamp, e);
return oldTimeout;
}
});
} catch (Exception e) {
POP_LOGGER.error("unexpect error when updateLockFreeTimestamp. topic:{}, group:{}, queueId:{}, lockFreeTimestamp:{}",
topic, group, queueId, lockFreeTimestamp, e);
}
}
protected void notifyLockIsFree(Key key) {
try {
this.brokerController.getPopMessageProcessor().notifyLongPollingRequestIfNeed(key.topic, key.group, key.queueId);
} catch (Exception e) {
POP_LOGGER.error("unexpect error when notifyLockIsFree. key:{}", key, e);
}
}
public void shutdown() {
this.timer.stop();
}
@VisibleForTesting
protected Map<Key, Timeout> getTimeoutMap() {
return timeoutMap;
}
private | ConsumerOrderInfoLockManager |
java | apache__camel | components/camel-digitalocean/src/main/java/org/apache/camel/component/digitalocean/producer/DigitalOceanTagsProducer.java | {
"start": 1486,
"end": 4306
} | class ____ extends DigitalOceanProducer {
public DigitalOceanTagsProducer(DigitalOceanEndpoint endpoint, DigitalOceanConfiguration configuration) {
super(endpoint, configuration);
}
@Override
public void process(Exchange exchange) throws RequestUnsuccessfulException, DigitalOceanException {
switch (determineOperation(exchange)) {
case list:
getTags(exchange);
break;
case create:
createTag(exchange);
break;
case get:
getTag(exchange);
break;
case delete:
deleteTag(exchange);
break;
default:
throw new IllegalArgumentException("Unsupported operation");
}
}
private void createTag(Exchange exchange) throws RequestUnsuccessfulException, DigitalOceanException {
String name = exchange.getIn().getHeader(DigitalOceanHeaders.NAME, String.class);
if (ObjectHelper.isEmpty(name)) {
throw new IllegalArgumentException(DigitalOceanHeaders.NAME + " must be specified");
}
Tag tag = getEndpoint().getDigitalOceanClient().createTag(name);
LOG.trace("Create Tag [{}] ", tag);
exchange.getMessage().setBody(tag);
}
private void getTag(Exchange exchange) throws RequestUnsuccessfulException, DigitalOceanException {
String name = exchange.getIn().getHeader(DigitalOceanHeaders.NAME, String.class);
if (ObjectHelper.isEmpty(name)) {
throw new IllegalArgumentException(DigitalOceanHeaders.NAME + " must be specified");
}
Tag tag = getEndpoint().getDigitalOceanClient().getTag(name);
LOG.trace("Tag [{}] ", tag);
exchange.getMessage().setBody(tag);
}
private void getTags(Exchange exchange) throws RequestUnsuccessfulException, DigitalOceanException {
Tags tags = getEndpoint().getDigitalOceanClient().getAvailableTags(configuration.getPage(), configuration.getPerPage());
LOG.trace("All Tags : page {} / {} per page [{}] ", configuration.getPage(), configuration.getPerPage(),
tags.getTags());
exchange.getMessage().setBody(tags.getTags());
}
private void deleteTag(Exchange exchange) throws RequestUnsuccessfulException, DigitalOceanException {
String name = exchange.getIn().getHeader(DigitalOceanHeaders.NAME, String.class);
if (ObjectHelper.isEmpty(name)) {
throw new IllegalArgumentException(DigitalOceanHeaders.NAME + " must be specified");
}
Delete delete = getEndpoint().getDigitalOceanClient().deleteTag(name);
LOG.trace("Delete Tag [{}] ", delete);
exchange.getMessage().setBody(delete);
}
}
| DigitalOceanTagsProducer |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-api/src/test/java/org/apache/dubbo/rpc/FutureContextTest.java | {
"start": 958,
"end": 1930
} | class ____ {
@Test
void testFutureContext() throws Exception {
Thread thread1 = new Thread(() -> {
FutureContext.getContext().setFuture(CompletableFuture.completedFuture("future from thread1"));
try {
Thread.sleep(500);
Assertions.assertEquals(
"future from thread1",
FutureContext.getContext().getCompletableFuture().get());
} catch (Exception e) {
e.printStackTrace();
}
});
thread1.start();
Thread.sleep(100);
Thread thread2 = new Thread(() -> {
CompletableFuture future = FutureContext.getContext().getCompletableFuture();
Assertions.assertNull(future);
FutureContext.getContext().setFuture(CompletableFuture.completedFuture("future from thread2"));
});
thread2.start();
Thread.sleep(1000);
}
}
| FutureContextTest |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/annotation/ProfileValueUtils.java | {
"start": 5835,
"end": 8152
} | class ____. Class-level usage overrides
* method-level usage.
* <p>Defaults to {@code true} if no {@link IfProfileValue
* @IfProfileValue} annotation is declared.
* @param profileValueSource the ProfileValueSource to use to determine if
* the test is enabled
* @param testMethod the test method
* @param testClass the test class
* @return {@code true} if the test is <em>enabled</em> in the current
* environment
*/
public static boolean isTestEnabledInThisEnvironment(ProfileValueSource profileValueSource, Method testMethod,
Class<?> testClass) {
IfProfileValue ifProfileValue = AnnotatedElementUtils.findMergedAnnotation(testClass, IfProfileValue.class);
boolean classLevelEnabled = isTestEnabledInThisEnvironment(profileValueSource, ifProfileValue);
if (classLevelEnabled) {
ifProfileValue = AnnotatedElementUtils.findMergedAnnotation(testMethod, IfProfileValue.class);
return isTestEnabledInThisEnvironment(profileValueSource, ifProfileValue);
}
return false;
}
/**
* Determine if the {@code value} (or one of the {@code values})
* in the supplied {@link IfProfileValue @IfProfileValue} annotation is
* <em>enabled</em> in the current environment.
* @param profileValueSource the ProfileValueSource to use to determine if
* the test is enabled
* @param ifProfileValue the annotation to introspect; may be
* {@code null}
* @return {@code true} if the test is <em>enabled</em> in the current
* environment or if the supplied {@code ifProfileValue} is
* {@code null}
*/
private static boolean isTestEnabledInThisEnvironment(ProfileValueSource profileValueSource,
@Nullable IfProfileValue ifProfileValue) {
if (ifProfileValue == null) {
return true;
}
String environmentValue = profileValueSource.get(ifProfileValue.name());
String[] annotatedValues = ifProfileValue.values();
if (StringUtils.hasLength(ifProfileValue.value())) {
Assert.isTrue(annotatedValues.length == 0, "Setting both the 'value' and 'values' attributes " +
"of @IfProfileValue is not allowed: choose one or the other.");
annotatedValues = new String[] { ifProfileValue.value() };
}
for (String value : annotatedValues) {
if (ObjectUtils.nullSafeEquals(value, environmentValue)) {
return true;
}
}
return false;
}
}
| level |
java | apache__camel | components/camel-sjms/src/test/java/org/apache/camel/component/sjms/consumer/InOnlyConsumerQueueTest.java | {
"start": 1059,
"end": 2250
} | class ____ extends JmsTestSupport {
private static final String SJMS_QUEUE_NAME = "sjms:queue:in.only.consumer.queue.InOnlyConsumerQueueTest";
private static final String MOCK_RESULT = "mock:result";
@Test
public void testSynchronous() throws Exception {
final String expectedBody = "Hello World";
MockEndpoint mock = getMockEndpoint(MOCK_RESULT);
mock.expectedMessageCount(1);
mock.expectedBodiesReceived(expectedBody);
template.sendBody(SJMS_QUEUE_NAME, expectedBody);
mock.assertIsSatisfied();
}
@Test
public void testTwoSynchronous() throws Exception {
MockEndpoint mock = getMockEndpoint(MOCK_RESULT);
mock.expectedBodiesReceived("Hello World", "Bye World");
template.sendBody(SJMS_QUEUE_NAME, "Hello World");
template.sendBody(SJMS_QUEUE_NAME, "Bye World");
mock.assertIsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from(SJMS_QUEUE_NAME)
.to(MOCK_RESULT);
}
};
}
}
| InOnlyConsumerQueueTest |
java | bumptech__glide | library/test/src/test/java/com/bumptech/glide/load/resource/gif/GifFrameResourceDecoderTest.java | {
"start": 793,
"end": 1679
} | class ____ {
private GifDecoder gifDecoder;
private GifFrameResourceDecoder resourceDecoder;
private Options options;
@Before
public void setUp() {
gifDecoder = mock(GifDecoder.class);
resourceDecoder = new GifFrameResourceDecoder(mock(BitmapPool.class));
options = new Options();
}
@Test
public void testReturnsFrameFromGifDecoder() throws IOException {
Bitmap expected = Bitmap.createBitmap(100, 100, Bitmap.Config.ARGB_4444);
when(gifDecoder.getNextFrame()).thenReturn(expected);
assertEquals(
expected,
Preconditions.checkNotNull(resourceDecoder.decode(gifDecoder, 100, 100, options)).get());
}
@Test
public void testReturnsNullIfGifDecoderReturnsNullFrame() {
when(gifDecoder.getNextFrame()).thenReturn(null);
assertNull(resourceDecoder.decode(gifDecoder, 100, 100, options));
}
}
| GifFrameResourceDecoderTest |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/test/java/org/springframework/boot/actuate/context/properties/ConfigurationPropertiesReportEndpointTests.java | {
"start": 29618,
"end": 29938
} | class ____ {
@Bean
SanitizingFunction testSanitizingFunction() {
return (data) -> {
if (data.getKey().contains("custom") || data.getKey().contains("test")) {
return data.withValue("$$$");
}
return data;
};
}
}
@Configuration(proxyBeanMethods = false)
static | SanitizingFunctionConfiguration |
java | apache__camel | components/camel-openapi-java/src/test/java/org/apache/camel/openapi/model/SampleComplexResponseType.java | {
"start": 1632,
"end": 1771
} | class ____ {
double doubleField;
public double getDoubleField() {
return doubleField;
}
}
}
| InnerClass |
java | apache__camel | components/camel-schematron/src/main/java/org/apache/camel/component/schematron/constant/Constants.java | {
"start": 1784,
"end": 1829
} | class ____ not be instantiated");
}
}
| should |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandlerTests.java | {
"start": 871,
"end": 4904
} | class ____ extends ESTestCase {
private static Map<Class<? extends Error>, Integer> EXPECTED_STATUS = Map.of(
InternalError.class,
128,
OutOfMemoryError.class,
127,
StackOverflowError.class,
126,
UnknownError.class,
125,
IOError.class,
124
);
public void testUncaughtError() throws InterruptedException {
final Error error = randomFrom(
new InternalError(),
new OutOfMemoryError(),
new StackOverflowError(),
new UnknownError(),
new IOError(new IOException("fatal")),
new Error() {
}
);
final Thread thread = new Thread(() -> { throw error; });
final String name = randomAlphaOfLength(10);
thread.setName(name);
final AtomicBoolean halt = new AtomicBoolean();
final AtomicInteger observedStatus = new AtomicInteger();
final AtomicReference<String> threadNameReference = new AtomicReference<>();
final AtomicReference<Throwable> throwableReference = new AtomicReference<>();
thread.setUncaughtExceptionHandler(new ElasticsearchUncaughtExceptionHandler() {
@Override
void halt(int status) {
halt.set(true);
observedStatus.set(status);
}
@Override
void onFatalUncaught(String threadName, Throwable t) {
threadNameReference.set(threadName);
throwableReference.set(t);
}
@Override
void onNonFatalUncaught(String threadName, Throwable t) {
fail();
}
});
thread.start();
thread.join();
assertTrue(halt.get());
final int status;
if (EXPECTED_STATUS.containsKey(error.getClass())) {
status = EXPECTED_STATUS.get(error.getClass());
} else {
status = 1;
}
assertThat(observedStatus.get(), equalTo(status));
assertThat(threadNameReference.get(), equalTo(name));
assertThat(throwableReference.get(), equalTo(error));
}
public void testUncaughtException() throws InterruptedException {
final RuntimeException e = new RuntimeException("boom");
final Thread thread = new Thread(() -> { throw e; });
final String name = randomAlphaOfLength(10);
thread.setName(name);
final AtomicReference<String> threadNameReference = new AtomicReference<>();
final AtomicReference<Throwable> throwableReference = new AtomicReference<>();
thread.setUncaughtExceptionHandler(new ElasticsearchUncaughtExceptionHandler() {
@Override
void halt(int status) {
fail();
}
@Override
void onFatalUncaught(String threadName, Throwable t) {
fail();
}
@Override
void onNonFatalUncaught(String threadName, Throwable t) {
threadNameReference.set(threadName);
throwableReference.set(t);
}
});
thread.start();
thread.join();
assertThat(threadNameReference.get(), equalTo(name));
assertThat(throwableReference.get(), equalTo(e));
}
public void testIsFatalCause() {
assertFatal(new OutOfMemoryError());
assertFatal(new StackOverflowError());
assertFatal(new InternalError());
assertFatal(new UnknownError());
assertFatal(new IOError(new IOException()));
assertNonFatal(new RuntimeException());
assertNonFatal(new UncheckedIOException(new IOException()));
}
private void assertFatal(Throwable cause) {
assertTrue(ElasticsearchUncaughtExceptionHandler.isFatalUncaught(cause));
}
private void assertNonFatal(Throwable cause) {
assertFalse(ElasticsearchUncaughtExceptionHandler.isFatalUncaught(cause));
}
}
| ElasticsearchUncaughtExceptionHandlerTests |
java | junit-team__junit5 | junit-platform-reporting/src/main/java/org/junit/platform/reporting/legacy/xml/LegacyXmlReportGeneratingListener.java | {
"start": 1618,
"end": 4428
} | class ____ implements TestExecutionListener {
private final Path reportsDir;
private final PrintWriter out;
private final Clock clock;
private @Nullable XmlReportData reportData;
public LegacyXmlReportGeneratingListener(Path reportsDir, PrintWriter out) {
this(reportsDir, out, Clock.system(ZoneId.systemDefault()));
}
// For tests only
LegacyXmlReportGeneratingListener(String reportsDir, PrintWriter out, Clock clock) {
this(Path.of(reportsDir), out, clock);
}
private LegacyXmlReportGeneratingListener(Path reportsDir, PrintWriter out, Clock clock) {
this.reportsDir = reportsDir;
this.out = out;
this.clock = clock;
}
@Override
public void testPlanExecutionStarted(TestPlan testPlan) {
this.reportData = new XmlReportData(testPlan, clock);
try {
Files.createDirectories(this.reportsDir);
}
catch (IOException e) {
printException("Could not create reports directory: " + this.reportsDir, e);
}
}
@Override
public void testPlanExecutionFinished(TestPlan testPlan) {
this.reportData = null;
}
@Override
public void executionSkipped(TestIdentifier testIdentifier, String reason) {
requiredReportData().markSkipped(testIdentifier, reason);
writeXmlReportInCaseOfRoot(testIdentifier);
}
@Override
public void executionStarted(TestIdentifier testIdentifier) {
requiredReportData().markStarted(testIdentifier);
}
@Override
public void reportingEntryPublished(TestIdentifier testIdentifier, ReportEntry entry) {
requiredReportData().addReportEntry(testIdentifier, entry);
}
@Override
public void executionFinished(TestIdentifier testIdentifier, TestExecutionResult result) {
requiredReportData().markFinished(testIdentifier, result);
writeXmlReportInCaseOfRoot(testIdentifier);
}
private void writeXmlReportInCaseOfRoot(TestIdentifier testIdentifier) {
if (isRoot(testIdentifier)) {
String rootName = testIdentifier.getUniqueIdObject().getSegments().get(0).getValue();
writeXmlReportSafely(testIdentifier, rootName);
}
}
private void writeXmlReportSafely(TestIdentifier testIdentifier, String rootName) {
Path xmlFile = this.reportsDir.resolve("TEST-" + rootName + ".xml");
try (Writer fileWriter = Files.newBufferedWriter(xmlFile)) {
new XmlReportWriter(requiredReportData()).writeXmlReport(testIdentifier, fileWriter);
}
catch (XMLStreamException | IOException e) {
printException("Could not write XML report: " + xmlFile, e);
}
}
private XmlReportData requiredReportData() {
return requireNonNull(this.reportData);
}
private boolean isRoot(TestIdentifier testIdentifier) {
return testIdentifier.getParentIdObject().isEmpty();
}
private void printException(String message, Exception exception) {
out.println(message);
exception.printStackTrace(out);
}
}
| LegacyXmlReportGeneratingListener |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ClassInitializationDeadlockTest.java | {
"start": 6475,
"end": 6713
} | interface ____ {}
static final I i = new I() {};
}
""")
.doTest();
}
@Test
public void intermediateNonPrivate() {
testHelper
.addSourceLines(
"A.java",
"""
public | I |
java | netty__netty | codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandshaker.java | {
"start": 762,
"end": 1417
} | interface ____ {
/**
* Return extension configuration to submit to the server.
*
* @return the desired extension configuration.
*/
WebSocketExtensionData newRequestData();
/**
* Handshake based on server response. It should always succeed because server response
* should be a request acknowledge.
*
* @param extensionData
* the extension configuration sent by the server.
* @return an initialized extension if handshake phase succeed or null if failed.
*/
WebSocketClientExtension handshakeExtension(WebSocketExtensionData extensionData);
}
| WebSocketClientExtensionHandshaker |
java | apache__camel | components/camel-knative/camel-knative-api/src/main/java/org/apache/camel/component/knative/spi/KnativeEnvironment.java | {
"start": 1572,
"end": 7188
} | class ____ {
private final List<KnativeResource> resources;
public KnativeEnvironment() {
this.resources = new ArrayList<>();
}
public KnativeEnvironment(Collection<KnativeResource> resources) {
this.resources = new ArrayList<>(resources);
}
@JsonAlias("services")
@JsonProperty(value = "resources", required = true)
public List<KnativeResource> getResources() {
return resources;
}
@JsonAlias("services")
@JsonProperty(value = "resources", required = true)
public void setResources(List<KnativeResource> resources) {
this.resources.clear();
this.resources.addAll(resources);
}
public Stream<KnativeResource> stream() {
return resources.stream();
}
public Stream<KnativeResource> lookup(Knative.Type type, String name) {
return stream().filter(definition -> definition.matches(type, name));
}
// ************************
//
// Helpers
//
// ************************
/**
* Construct an instance o a {@link KnativeEnvironment} from a json serialized string.
*
* <pre>
* {@code
* {
* "resources": [
* {
* "type": "channel|endpoint|event",
* "name": "",
* "url": "",
* "path": "",
* "eventType": "",
* "objectKind": "",
* "objectApiVersion": "",
* "endpointKind": "source|sink",
* "filters": {
* "header": "value"
* },
* "ceOverrides": {
* "ce-type": "something"
* }
* },
* ]
* }
* }
* </pre>
*
* @param configuration the serialized representation of the Knative environment
* @return an instance of {@link KnativeEnvironment}
* @throws IOException if an error occur while parsing the file
*/
public static KnativeEnvironment mandatoryLoadFromSerializedString(String configuration) throws IOException {
try (Reader reader = new StringReader(configuration)) {
return Knative.MAPPER.readValue(reader, KnativeEnvironment.class);
}
}
/**
* Construct an instance o a {@link KnativeEnvironment} from a properties.
*
* <pre>
* {@code
* resources[0].name = ...
* resources[0].type = channel|endpoint|event
* resources[0].endpointKind = source|sink
* resources[0].url = ...
* }
* </pre>
*
* @param context the {@link CamelContext}
* @param properties the properties from which to construct the {@link KnativeEnvironment}
* @return an instance of {@link KnativeEnvironment}
* @throws IOException if an error occur while parsing the file
*/
public static KnativeEnvironment mandatoryLoadFromProperties(CamelContext context, Map<String, Object> properties) {
final ExtendedCamelContext econtext = context.getCamelContextExtension();
final KnativeEnvironment environment = new KnativeEnvironment();
PropertyBindingSupport.build()
.withIgnoreCase(true)
.withCamelContext(context)
.withTarget(environment)
.withProperties(properties)
.withRemoveParameters(true)
.withConfigurer(
PluginHelper.getConfigurerResolver(econtext)
.resolvePropertyConfigurer(KnativeEnvironment.class.getName(), context))
.withMandatory(true)
.bind();
return environment;
}
/**
* Construct an instance o a {@link KnativeEnvironment} from a json file.
*
* <pre>
* {@code
* {
* "resources": [
* {
* "type": "channel|endpoint|event",
* "name": "",
* "url": "",
* "path": "",
* "eventType": "",
* "objectKind": "",
* "objectApiVersion": "",
* "endpointKind": "source|sink",
* "filters": {
* "header": "value"
* },
* "ceOverrides": {
* "ce-type": "something"
* }
* },
* ]
* }
* }
* </pre>
*
* @param context the {@link CamelContext}
* @param path URI of the resource
* @return an instance of {@link KnativeEnvironment}
* @throws IOException if an error occur while parsing the file
*/
public static KnativeEnvironment mandatoryLoadFromResource(CamelContext context, String path) throws IOException {
try (InputStream is = ResourceHelper.resolveMandatoryResourceAsInputStream(context, path)) {
return Knative.MAPPER.readValue(is, KnativeEnvironment.class);
}
}
public static KnativeEnvironment on(KnativeResource... definitions) {
KnativeEnvironment env = new KnativeEnvironment();
for (KnativeResource definition : definitions) {
env.getResources().add(definition);
}
return env;
}
public static KnativeServiceBuilder serviceBuilder(Knative.Type type, String name) {
return new KnativeServiceBuilder(type, name);
}
// ************************
//
// Types
//
// ************************
public static final | KnativeEnvironment |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/nullness/VoidMissingNullableTest.java | {
"start": 3785,
"end": 4176
} | class ____ {
@Nullable Void v;
@Nullable
Void f() {
return v;
}
}
""")
.doTest();
}
@Test
public void negativeNotVoid() {
aggressiveCompilationHelper
.addSourceLines(
"Test.java",
"""
import javax.annotation.Nullable;
| Test |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/engine/jdbc/spi/TypeNullability.java | {
"start": 299,
"end": 1236
} | enum ____ {
/**
* The data type can accept nulls
* @see DatabaseMetaData#typeNullable
*/
NULLABLE,
/**
* The data type cannot accept nulls
* @see DatabaseMetaData#typeNoNulls
*/
NON_NULLABLE,
/**
* It is unknown if the data type accepts nulls
* @see DatabaseMetaData#typeNullableUnknown
*/
UNKNOWN;
/**
* Based on the code retrieved from {@link DatabaseMetaData#getTypeInfo()} for the {@code NULLABLE}
* column, return the appropriate enum.
*
* @param code The retrieved code value.
*
* @return The corresponding enum.
*/
public static TypeNullability interpret(short code) {
return switch (code) {
case DatabaseMetaData.typeNullable -> NULLABLE;
case DatabaseMetaData.typeNoNulls -> NON_NULLABLE;
case DatabaseMetaData.typeNullableUnknown -> UNKNOWN;
default -> throw new IllegalArgumentException( "Unknown type nullability code [" + code + "] encountered" );
};
}
}
| TypeNullability |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/objectid/TestObjectIdWithPolymorphic.java | {
"start": 942,
"end": 1275
} | class ____ extends Base
{
public int extra;
public Impl() { this(0, 0); }
protected Impl(int v, int e) {
super(v);
extra = e;
}
}
// [JACKSON-811] types
@JsonIdentityInfo(generator=ObjectIdGenerators.PropertyGenerator.class, property="id")
public static | Impl |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/annotation/AnnotationsScanner.java | {
"start": 2516,
"end": 7597
} | class ____ the class
* supplied to the predicate
* @param processor the processor that receives the annotations
* @return the result of {@link AnnotationsProcessor#finish(Object)}
*/
static <C, R> @Nullable R scan(C context, AnnotatedElement source, SearchStrategy searchStrategy,
Predicate<Class<?>> searchEnclosingClass, AnnotationsProcessor<C, R> processor) {
R result = process(context, source, searchStrategy, searchEnclosingClass, processor);
return processor.finish(result);
}
private static <C, R> @Nullable R process(C context, AnnotatedElement source,
SearchStrategy searchStrategy, Predicate<Class<?>> searchEnclosingClass,
AnnotationsProcessor<C, R> processor) {
if (source instanceof Class<?> clazz) {
return processClass(context, clazz, searchStrategy, searchEnclosingClass, processor);
}
if (source instanceof Method method) {
return processMethod(context, method, searchStrategy, processor);
}
return processElement(context, source, processor);
}
private static <C, R> @Nullable R processClass(C context, Class<?> source, SearchStrategy searchStrategy,
Predicate<Class<?>> searchEnclosingClass, AnnotationsProcessor<C, R> processor) {
return switch (searchStrategy) {
case DIRECT -> processElement(context, source, processor);
case INHERITED_ANNOTATIONS -> processClassInheritedAnnotations(context, source, processor);
case SUPERCLASS -> processClassHierarchy(context, source, processor, false, Search.never);
case TYPE_HIERARCHY -> processClassHierarchy(context, source, processor, true, searchEnclosingClass);
};
}
private static <C, R> @Nullable R processClassInheritedAnnotations(C context, Class<?> source,
AnnotationsProcessor<C, R> processor) {
try {
if (isWithoutHierarchy(source, Search.never)) {
return processElement(context, source, processor);
}
@Nullable Annotation[] relevant = null;
int remaining = Integer.MAX_VALUE;
int aggregateIndex = 0;
Class<?> root = source;
while (source != null && source != Object.class && remaining > 0 && !hasPlainJavaAnnotationsOnly(source)) {
R result = processor.doWithAggregate(context, aggregateIndex);
if (result != null) {
return result;
}
@Nullable Annotation[] declaredAnns = getDeclaredAnnotations(source, true);
if (declaredAnns.length > 0) {
if (relevant == null) {
relevant = root.getAnnotations();
remaining = relevant.length;
}
for (int i = 0; i < declaredAnns.length; i++) {
if (declaredAnns[i] != null) {
boolean isRelevant = false;
for (int relevantIndex = 0; relevantIndex < relevant.length; relevantIndex++) {
//noinspection DataFlowIssue
if (relevant[relevantIndex] != null &&
declaredAnns[i].annotationType() == relevant[relevantIndex].annotationType()) {
isRelevant = true;
relevant[relevantIndex] = null;
remaining--;
break;
}
}
if (!isRelevant) {
declaredAnns[i] = null;
}
}
}
}
result = processor.doWithAnnotations(context, aggregateIndex, source, declaredAnns);
if (result != null) {
return result;
}
source = source.getSuperclass();
aggregateIndex++;
}
}
catch (Throwable ex) {
AnnotationUtils.handleIntrospectionFailure(source, ex);
}
return null;
}
private static <C, R> @Nullable R processClassHierarchy(C context, Class<?> source,
AnnotationsProcessor<C, R> processor, boolean includeInterfaces,
Predicate<Class<?>> searchEnclosingClass) {
return processClassHierarchy(context, new int[] {0}, source, processor,
includeInterfaces, searchEnclosingClass);
}
private static <C, R> @Nullable R processClassHierarchy(C context, int[] aggregateIndex, Class<?> source,
AnnotationsProcessor<C, R> processor, boolean includeInterfaces,
Predicate<Class<?>> searchEnclosingClass) {
try {
R result = processor.doWithAggregate(context, aggregateIndex[0]);
if (result != null) {
return result;
}
if (hasPlainJavaAnnotationsOnly(source)) {
return null;
}
@Nullable Annotation[] annotations = getDeclaredAnnotations(source, false);
result = processor.doWithAnnotations(context, aggregateIndex[0], source, annotations);
if (result != null) {
return result;
}
aggregateIndex[0]++;
if (includeInterfaces) {
for (Class<?> interfaceType : source.getInterfaces()) {
R interfacesResult = processClassHierarchy(context, aggregateIndex,
interfaceType, processor, true, searchEnclosingClass);
if (interfacesResult != null) {
return interfacesResult;
}
}
}
Class<?> superclass = source.getSuperclass();
if (superclass != Object.class && superclass != null) {
R superclassResult = processClassHierarchy(context, aggregateIndex,
superclass, processor, includeInterfaces, searchEnclosingClass);
if (superclassResult != null) {
return superclassResult;
}
}
if (searchEnclosingClass.test(source)) {
// Since merely attempting to load the enclosing | of |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/annotation/SpyAnnotationTest.java | {
"start": 1347,
"end": 2453
} | class ____ extends TestBase {
@Spy final List<String> spiedList = new ArrayList<String>();
@Spy InnerStaticClassWithNoArgConstructor staticTypeWithNoArgConstructor;
@Spy InnerStaticClassWithoutDefinedConstructor staticTypeWithoutDefinedConstructor;
@Spy MockTranslator translator;
@Rule public final ExpectedException shouldThrow = ExpectedException.none();
@Test
public void should_init_spy_by_instance() throws Exception {
doReturn("foo").when(spiedList).get(10);
assertEquals("foo", spiedList.get(10));
assertTrue(spiedList.isEmpty());
}
@Test
public void should_init_spy_and_automatically_create_instance() throws Exception {
when(staticTypeWithNoArgConstructor.toString()).thenReturn("x");
when(staticTypeWithoutDefinedConstructor.toString()).thenReturn("y");
assertEquals("x", staticTypeWithNoArgConstructor.toString());
assertEquals("y", staticTypeWithoutDefinedConstructor.toString());
}
@Test
public void should_allow_spying_on_interfaces() throws Exception {
| SpyAnnotationTest |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/UnusedNestedClass.java | {
"start": 1984,
"end": 2093
} | class ____ unused, and can be removed.",
severity = WARNING,
documentSuppression = false)
public final | is |
java | google__error-prone | core/src/main/java/com/google/errorprone/refaster/UMemberSelect.java | {
"start": 1165,
"end": 3425
} | class ____ extends UExpression implements MemberSelectTree {
/**
* Use of this string as an expression in a member select will cause this method select to be
* inlined as an identifier. I.e., "".foo will be inlined as foo.
*/
public static final String CONVERT_TO_IDENT = "";
public static UMemberSelect create(UExpression expression, CharSequence identifier, UType type) {
return new AutoValue_UMemberSelect(expression, StringName.of(identifier), type);
}
@Override
public abstract UExpression getExpression();
@Override
public abstract StringName getIdentifier();
abstract UType type();
@Override
public Choice<Unifier> visitMemberSelect(MemberSelectTree fieldAccess, Unifier unifier) {
if (ASTHelpers.getSymbol(fieldAccess) != null) {
return getIdentifier()
.unify(fieldAccess.getIdentifier(), unifier)
.flatMap(unifications(getExpression(), fieldAccess.getExpression()))
.flatMap(unifications(type(), ASTHelpers.getSymbol(fieldAccess).asType()));
}
return Choice.none();
}
@Override
public Choice<Unifier> visitIdentifier(IdentifierTree ident, Unifier unifier) {
Symbol sym = ASTHelpers.getSymbol(ident);
if (sym != null && sym.owner.type != null && sym.owner.type.isReference()) {
JCExpression thisIdent = unifier.thisExpression(sym.owner.type);
return getIdentifier()
.unify(ident.getName(), unifier)
.flatMap(unifications(getExpression(), thisIdent))
.flatMap(unifications(type(), sym.asType()));
}
return Choice.none();
}
@Override
public Kind getKind() {
return Kind.MEMBER_SELECT;
}
@Override
public <R, D> R accept(TreeVisitor<R, D> visitor, D data) {
return visitor.visitMemberSelect(this, data);
}
@Override
public JCExpression inline(Inliner inliner) throws CouldNotResolveImportException {
JCExpression expression = getExpression().inline(inliner);
if (expression.toString().equals(CONVERT_TO_IDENT)) {
return inliner.maker().Ident(getIdentifier().inline(inliner));
}
// TODO(lowasser): consider inlining this.foo() as foo()
return inliner.maker().Select(getExpression().inline(inliner), getIdentifier().inline(inliner));
}
}
| UMemberSelect |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/odps/OdpsFormatCommentTest13.java | {
"start": 255,
"end": 1436
} | class ____ extends TestCase {
public void test_column_comment() throws Exception {
String sql = "select * from t where f0 > 0 -- comment_0"
+ "\n and -- comment_1"
+ "\n f1 > 1 -- comment_2"
+ "\n and -- comment_3"
+ "\n f2 > 2 -- comment_4";
SQLStatement stmt = SQLUtils
.parseSingleStatement(sql, DbType.odps, SQLParserFeature.KeepComments,
SQLParserFeature.EnableSQLBinaryOpExprGroup);
System.out.println("第一次生成的sql===" + stmt.toString());
SQLStatement stmt2 = SQLUtils
.parseSingleStatement(stmt.toString(), DbType.odps, SQLParserFeature.KeepComments,
SQLParserFeature.EnableSQLBinaryOpExprGroup);
System.out.println("第二次生成的sql===" + stmt2.toString());
assertEquals(
"SELECT *\n"
+ "FROM t\n"
+ "WHERE f0 > 0 -- comment_0\n"
+ "\tAND -- comment_1\n"
+ "\tf1 > 1 -- comment_2\n"
+ "\tAND -- comment_3\n"
+ "\tf2 > 2 -- comment_4",
SQLUtils.formatOdps(sql));
}
}
| OdpsFormatCommentTest13 |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/recursive/comparison/RecursiveComparisonAssert_isEqualTo_withIntrospectionStrategy_Test.java | {
"start": 6970,
"end": 7479
} | class ____ {
String first_name;
String last_name;
int _age;
String phone_number_1;
String phone_number_2;
String profile_url;
AuthorDto(String firstName, String lastName, int age, String phoneNumber1, String phoneNumber2, String profileUrl) {
this.first_name = firstName;
this.last_name = lastName;
this._age = age;
this.phone_number_1 = phoneNumber1;
this.phone_number_2 = phoneNumber2;
this.profile_url = profileUrl;
}
}
static | AuthorDto |
java | apache__camel | components/camel-ai/camel-djl/src/main/java/org/apache/camel/component/djl/DJLProducer.java | {
"start": 1066,
"end": 1602
} | class ____ extends DefaultProducer {
private final AbstractPredictor predictor;
public DJLProducer(DJLEndpoint endpoint) throws Exception {
super(endpoint);
if (endpoint.getArtifactId() != null) {
this.predictor = ModelPredictorProducer.getZooPredictor(endpoint);
} else {
this.predictor = ModelPredictorProducer.getCustomPredictor(endpoint);
}
}
public void process(Exchange exchange) throws Exception {
this.predictor.process(exchange);
}
}
| DJLProducer |
java | micronaut-projects__micronaut-core | inject/src/main/java/io/micronaut/inject/beans/AbstractInitializableBeanIntrospection.java | {
"start": 36576,
"end": 41044
} | class ____<P> implements UnsafeBeanProperty<B, P> {
private final BeanPropertyRef<P> ref;
private final Class<?> typeOrWrapperType;
private final AnnotationMetadata annotationMetadata;
private BeanPropertyImpl(BeanPropertyRef<P> ref) {
this.ref = ref;
this.typeOrWrapperType = ReflectionUtils.getWrapperType(getType());
this.annotationMetadata = EvaluatedAnnotationMetadata.wrapIfNecessary(ref.argument.getAnnotationMetadata());
}
@NonNull
@Override
public String getName() {
return ref.argument.getName();
}
@NonNull
@Override
public Class<P> getType() {
return ref.argument.getType();
}
@Override
@NonNull
public Argument<P> asArgument() {
return ref.argument;
}
@NonNull
@Override
public BeanIntrospection<B> getDeclaringBean() {
return AbstractInitializableBeanIntrospection.this;
}
@Override
public AnnotationMetadata getAnnotationMetadata() {
return annotationMetadata;
}
@Nullable
@Override
public P get(@NonNull B bean) {
ArgumentUtils.requireNonNull("bean", bean);
if (!beanType.isInstance(bean)) {
throw new IllegalArgumentException("Invalid bean [" + bean + "] for type: " + beanType);
}
if (isWriteOnly()) {
throw new UnsupportedOperationException("Cannot read from a write-only property: " + getName());
}
return dispatchOne(ref.getMethodIndex, bean, null);
}
@Override
public P getUnsafe(B bean) {
return dispatchOne(ref.getMethodIndex, bean, null);
}
@Override
public void set(@NonNull B bean, @Nullable P value) {
ArgumentUtils.requireNonNull("bean", bean);
if (!beanType.isInstance(bean)) {
throw new IllegalArgumentException("Invalid bean [" + bean + "] for type: " + beanType);
}
if (isReadOnly()) {
throw new UnsupportedOperationException("Cannot write a read-only property: " + getName());
}
if (value != null && !typeOrWrapperType.isInstance(value)) {
throw new IllegalArgumentException("Specified value [" + value + "] is not of the correct type: " + getType());
}
dispatchOne(ref.setMethodIndex, bean, value);
}
@Override
public void setUnsafe(B bean, P value) {
dispatchOne(ref.setMethodIndex, bean, value);
}
@Override
public B withValue(@NonNull B bean, @Nullable P value) {
ArgumentUtils.requireNonNull("bean", bean);
if (!beanType.isInstance(bean)) {
throw new IllegalArgumentException("Invalid bean [" + bean + "] for type: " + beanType);
}
return withValueUnsafe(bean, value);
}
@Override
public B withValueUnsafe(B bean, P value) {
if (value == getUnsafe(bean)) {
return bean;
} else if (ref.withMethodIndex == -1) {
if (!ref.readyOnly && ref.setMethodIndex != -1) {
dispatchOne(ref.setMethodIndex, bean, value);
return bean;
}
return UnsafeBeanProperty.super.withValue(bean, value);
} else {
return dispatchOne(ref.withMethodIndex, bean, value);
}
}
@Override
public boolean isReadOnly() {
return ref.readyOnly;
}
@Override
public boolean isWriteOnly() {
return ref.writeOnly;
}
@Override
public boolean hasSetterOrConstructorArgument() {
return ref.mutable;
}
@Override
public String toString() {
return "BeanProperty{" +
"beanType=" + beanType +
", type=" + ref.argument.getType() +
", name='" + ref.argument.getName() + '\'' +
'}';
}
}
/**
* Implementation of {@link UnsafeBeanWriteProperty} that is using {@link BeanPropertyRef} and method dispatch.
*
* @param <P> The property type
*/
private final | BeanPropertyImpl |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java | {
"start": 5927,
"end": 7993
} | class ____ implements RpcInvocationHandler {
private final Map<String, Message> returnTypes =
new ConcurrentHashMap<String, Message>();
private boolean isClosed = false;
private final Client.ConnectionId remoteId;
private final Client client;
private final long clientProtocolVersion;
private final String protocolName;
private AtomicBoolean fallbackToSimpleAuth;
private AlignmentContext alignmentContext;
protected Invoker(Class<?> protocol, InetSocketAddress addr,
UserGroupInformation ticket, Configuration conf, SocketFactory factory,
int rpcTimeout, RetryPolicy connectionRetryPolicy,
AtomicBoolean fallbackToSimpleAuth, AlignmentContext alignmentContext)
throws IOException {
this(protocol, Client.ConnectionId.getConnectionId(
addr, protocol, ticket, rpcTimeout, connectionRetryPolicy, conf),
conf, factory, alignmentContext);
this.fallbackToSimpleAuth = fallbackToSimpleAuth;
}
/**
* This constructor takes a connectionId, instead of creating a new one.
*
* @param protocol input protocol.
* @param connId input connId.
* @param conf input Configuration.
* @param factory input factory.
* @param alignmentContext Alignment context
*/
protected Invoker(Class<?> protocol, Client.ConnectionId connId,
Configuration conf, SocketFactory factory, AlignmentContext alignmentContext) {
this.remoteId = connId;
this.client = CLIENTS.getClient(conf, factory, RpcWritable.Buffer.class);
this.protocolName = RPC.getProtocolName(protocol);
this.clientProtocolVersion = RPC
.getProtocolVersion(protocol);
this.alignmentContext = alignmentContext;
}
private RequestHeaderProto constructRpcRequestHeader(Method method) {
RequestHeaderProto.Builder builder = RequestHeaderProto
.newBuilder();
builder.setMethodName(method.getName());
// For protobuf, {@code protocol} used when creating client side proxy is
// the | Invoker |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java | {
"start": 6993,
"end": 11325
} | class ____ implements ActionListener<ShardResponse> {
private final ShardId shardId;
private final Map<String, IndexMetadata> indexMetadataByName;
ReplicationResponseActionListener(ShardId shardId, Map<String, IndexMetadata> indexMetadataByName) {
this.shardId = shardId;
this.indexMetadataByName = indexMetadataByName;
}
@Override
public void onResponse(ShardResponse shardResponse) {
assert shardResponse != null;
logger.trace("{}: got response from {}", actionName, shardId);
addShardResponse(
shardResponse.getShardInfo().getTotal(),
shardResponse.getShardInfo().getSuccessful(),
Arrays.stream(shardResponse.getShardInfo().getFailures())
.map(
f -> new DefaultShardOperationFailedException(
new BroadcastShardOperationFailedException(shardId, f.getCause())
)
)
.toList()
);
}
@Override
public void onFailure(Exception e) {
logger.trace("{}: got failure from {}", actionName, shardId);
final int numCopies = indexMetadataByName.get(shardId.getIndexName()).getNumberOfReplicas() + 1;
final List<DefaultShardOperationFailedException> result;
if (TransportActions.isShardNotAvailableException(e)) {
result = List.of();
} else {
final var failures = new DefaultShardOperationFailedException[numCopies];
Arrays.fill(
failures,
new DefaultShardOperationFailedException(new BroadcastShardOperationFailedException(shardId, e))
);
result = Arrays.asList(failures);
}
addShardResponse(numCopies, 0, result);
}
}
};
}
protected void shardExecute(
Task task,
Request request,
ShardId shardId,
SplitShardCountSummary shardCountSummary,
ActionListener<ShardResponse> shardActionListener
) {
assert Transports.assertNotTransportThread("may hit all the shards");
ShardRequest shardRequest = newShardRequest(request, shardId, shardCountSummary);
shardRequest.setParentTask(clusterService.localNode().getId(), task.getId());
client.executeLocally(replicatedBroadcastShardAction, shardRequest, shardActionListener);
}
/**
* @return all shard ids the request should run on
*/
protected List<ShardRecord> shards(Request request, ProjectState projectState) {
assert Transports.assertNotTransportThread("may hit all the shards");
List<ShardRecord> shards = new ArrayList<>();
OperationRouting operationRouting = clusterService.operationRouting();
ProjectMetadata project = projectState.metadata();
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(project, request);
for (String index : concreteIndices) {
Iterator<IndexShardRoutingTable> iterator = operationRouting.allWritableShards(projectState, index);
IndexMetadata indexMetadata = project.index(index);
while (iterator.hasNext()) {
ShardId shardId = iterator.next().shardId();
SplitShardCountSummary splitSummary = SplitShardCountSummary.forIndexing(indexMetadata, shardId.getId());
shards.add(new ShardRecord(shardId, splitSummary));
}
}
return shards;
}
protected abstract ShardRequest newShardRequest(Request request, ShardId shardId, SplitShardCountSummary shardCountSummary);
protected abstract Response newResponse(
int successfulShards,
int failedShards,
int totalNumCopies,
List<DefaultShardOperationFailedException> shardFailures
);
}
| ReplicationResponseActionListener |
java | elastic__elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ValidateJsonAgainstSchemaTask.java | {
"start": 1723,
"end": 5727
} | class ____ extends DefaultTask {
private File jsonSchema;
private File report;
private FileCollection inputFiles;
@Incremental
@InputFiles
public FileCollection getInputFiles() {
return inputFiles;
}
public void setInputFiles(FileCollection inputFiles) {
this.inputFiles = inputFiles;
}
@InputFile
public File getJsonSchema() {
return jsonSchema;
}
public void setJsonSchema(File jsonSchema) {
this.jsonSchema = jsonSchema;
}
public void setReport(File report) {
this.report = report;
}
@OutputFile
public File getReport() {
return this.report;
}
@Internal
protected ObjectMapper getMapper() {
return new ObjectMapper();
}
@Internal
protected String getFileType() {
return "JSON";
}
@TaskAction
public void validate(InputChanges inputChanges) throws IOException {
final File jsonSchemaOnDisk = getJsonSchema();
final JsonSchema jsonSchema = buildSchemaObject(jsonSchemaOnDisk);
final Map<File, Set<String>> errors = new LinkedHashMap<>();
final ObjectMapper mapper = this.getMapper();
// incrementally evaluate input files
// validate all files and hold on to errors for a complete report if there are failures
StreamSupport.stream(inputChanges.getFileChanges(getInputFiles()).spliterator(), false)
.filter(f -> f.getChangeType() != ChangeType.REMOVED)
.map(FileChange::getFile)
.filter(file -> file.isDirectory() == false)
.forEach(file -> {
try {
Set<ValidationMessage> validationMessages = jsonSchema.validate(mapper.readTree(file));
maybeLogAndCollectError(validationMessages, errors, file);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
});
if (errors.isEmpty()) {
Files.writeString(getReport().toPath(), "Success! No validation errors found.", StandardOpenOption.CREATE);
} else {
try (PrintWriter printWriter = new PrintWriter(getReport())) {
printWriter.printf("Schema: %s%n", jsonSchemaOnDisk);
printWriter.println("----------Validation Errors-----------");
errors.values().stream().flatMap(Collection::stream).forEach(printWriter::println);
}
StringBuilder sb = new StringBuilder();
sb.append("Verification failed. See the report at: ");
sb.append(getReport().toURI().toASCIIString());
sb.append(System.lineSeparator());
sb.append(
String.format(
"Error validating %s: %d files contained %d violations",
getFileType(),
errors.keySet().size(),
errors.values().size()
)
);
throw new JsonSchemaException(sb.toString());
}
}
private JsonSchema buildSchemaObject(File jsonSchemaOnDisk) throws IOException {
final ObjectMapper jsonMapper = new ObjectMapper();
final SchemaValidatorsConfig config = new SchemaValidatorsConfig();
final JsonSchemaFactory factory = JsonSchemaFactory.getInstance(SpecVersion.VersionFlag.V7);
return factory.getSchema(jsonMapper.readTree(jsonSchemaOnDisk), config);
}
private void maybeLogAndCollectError(Set<ValidationMessage> messages, Map<File, Set<String>> errors, File file) {
final String fileType = getFileType();
for (ValidationMessage message : messages) {
getLogger().error("[validate {}][ERROR][{}][{}]", fileType, file.getName(), message.toString());
errors.computeIfAbsent(file, k -> new LinkedHashSet<>())
.add(String.format("%s: %s", file.getAbsolutePath(), message.toString()));
}
}
}
| ValidateJsonAgainstSchemaTask |
java | apache__camel | components/camel-mdc/src/test/java/org/apache/camel/mdc/MDCSelectedPropertiesTest.java | {
"start": 1391,
"end": 3480
} | class ____ extends ExchangeTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(MDCAllPropertiesTest.class);
@Override
protected CamelContext createCamelContext() throws Exception {
MDCService mdcSvc = new MDCService();
mdcSvc.setCustomProperties("prop1,prop2,prop3");
CamelContext context = super.createCamelContext();
CamelContextAware.trySetCamelContext(mdcSvc, context);
mdcSvc.init(context);
return context;
}
@Test
void testRouteSingleRequest() throws IOException {
template.request("direct:start", null);
// We should get no MDC after the route has been executed
assertEquals(0, MDC.getCopyOfContextMap().size());
}
@Override
protected RoutesBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.routeId("start")
.log("A message")
.setProperty("prop1", simple("Property1"))
.setProperty("prop2", simple("Property2"))
// prop3 is missing on purpose!
.setProperty("prop4", simple("Property4"))
.process(exchange -> {
LOG.info("A process");
assertNotNull(MDC.get(MDCService.MDC_MESSAGE_ID));
assertNotNull(MDC.get(MDCService.MDC_EXCHANGE_ID));
assertNotNull(MDC.get(MDCService.MDC_ROUTE_ID));
assertNotNull(MDC.get(MDCService.MDC_CAMEL_CONTEXT_ID));
assertEquals("Property1", MDC.get("prop1"));
assertEquals("Property2", MDC.get("prop2"));
assertNull(MDC.get("prop3"));
assertNull(MDC.get("prop4"));
})
.to("log:info");
}
};
}
}
| MDCSelectedPropertiesTest |
java | spring-projects__spring-framework | spring-tx/src/test/java/org/springframework/transaction/annotation/EnableTransactionManagementTests.java | {
"start": 18481,
"end": 18665
} | class ____ extends EnableTxConfig {
}
@Configuration
@EnableTransactionManagement
@Import(PlaceholderConfig.class)
@Conditional(NeverCondition.class)
static | InheritedEnableTxConfig |
java | apache__camel | components/camel-consul/src/main/java/org/apache/camel/component/consul/endpoint/ConsulAgentActions.java | {
"start": 863,
"end": 1092
} | interface ____ {
String CHECKS = "CHECKS";
String SERVICES = "SERVICES";
String MEMBERS = "MEMBERS";
String AGENT = "AGENT";
String REGISTER = "REGISTER";
String DEREGISTER = "DEREGISTER";
}
| ConsulAgentActions |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/create/OracleCreateTypeTest9.java | {
"start": 1021,
"end": 3584
} | class ____ extends OracleTest {
public void test_types() throws Exception {
String sql = "CREATE OR REPLACE TYPE \"T_EVA_PM_INFO_OBJECT\" \n" +
"under ecc_pm.t_pminfo_loan_object(\n" +
" marster_dept_id number,\n" +
" marster_dept_name varchar2(500), --鎵�灞炲姙浜嬪\uE629\n" +
" area_id number,\n" +
" area_name varchar2(500), --鎵�灞炲浗瀹舵垨鍦板尯\n" +
" cust2_id2 number(10), --鏈�缁堝\uE179鎴� Id\n" +
" cust2_name2 varchar2(256), --鏈�缁堝\uE179鎴� 鍚嶇О\n" +
" finalUsage2 varchar2(256), --鏈�缁堢敤閫�\n" +
" sanctionedParty12 varchar2(256), --椤圭洰瀹㈡埛鍙楀埗瑁佷富浣�\n" +
" sanctionedParty22 varchar2(256) --鏈�缁堝\uE179鎴峰彈鍒惰\uE5C6涓讳綋\n" +
" );";
System.out.println(sql);
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
assertEquals("CREATE OR REPLACE TYPE \"T_EVA_PM_INFO_OBJECT\" UNDER ecc_pm.t_pminfo_loan_object (\n" +
"\tmarster_dept_id number, \n" +
"\tmarster_dept_name varchar2(500), \n" +
"\tarea_id number, \n" +
"\tarea_name varchar2(500), \n" +
"\tcust2_id2 number(10), \n" +
"\tcust2_name2 varchar2(256), \n" +
"\tfinalUsage2 varchar2(256), \n" +
"\tsanctionedParty12 varchar2(256), \n" +
"\tsanctionedParty22 varchar2(256)\n" +
");",
SQLUtils.toSQLString(stmt, JdbcConstants.ORACLE));
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
stmt.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(0, visitor.getTables().size());
assertEquals(0, visitor.getColumns().size());
// assertTrue(visitor.getColumns().contains(new TableStat.Column("orders", "order_total")));
}
}
| OracleCreateTypeTest9 |
java | redisson__redisson | redisson-spring-data/redisson-spring-data-26/src/main/java/org/redisson/spring/data/connection/RedissonReactiveGeoCommands.java | {
"start": 2242,
"end": 18326
} | class ____ extends RedissonBaseReactive implements ReactiveGeoCommands {
RedissonReactiveGeoCommands(CommandReactiveExecutor executorService) {
super(executorService);
}
@Override
public Flux<NumericResponse<GeoAddCommand, Long>> geoAdd(Publisher<GeoAddCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getGeoLocations(), "Locations must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
List<Object> args = new ArrayList<Object>();
args.add(keyBuf);
for (GeoLocation<ByteBuffer> location : command.getGeoLocations()) {
args.add(location.getPoint().getX());
args.add(location.getPoint().getY());
args.add(toByteArray(location.getName()));
}
Mono<Long> m = write(keyBuf, StringCodec.INSTANCE, RedisCommands.GEOADD, args.toArray());
return m.map(v -> new NumericResponse<>(command, v));
});
}
@Override
public Flux<CommandResponse<GeoDistCommand, Distance>> geoDist(Publisher<GeoDistCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getFrom(), "From member must not be null!");
Assert.notNull(command.getTo(), "To member must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
byte[] fromBuf = toByteArray(command.getFrom());
byte[] toBuf = toByteArray(command.getTo());
Metric metric = RedisGeoCommands.DistanceUnit.METERS;
if (command.getMetric().isPresent()) {
metric = command.getMetric().get();
}
Mono<Distance> m = write(keyBuf, DoubleCodec.INSTANCE, new RedisCommand<Distance>("GEODIST", new DistanceConvertor(metric)),
keyBuf, fromBuf, toBuf, metric.getAbbreviation());
return m.map(v -> new CommandResponse<>(command, v));
});
}
private static final RedisCommand<List<Object>> GEOHASH = new RedisCommand<List<Object>>("GEOHASH", new ObjectListReplayDecoder<Object>());
@Override
public Flux<MultiValueResponse<GeoHashCommand, String>> geoHash(Publisher<GeoHashCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getMembers(), "Members must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
List<Object> args = new ArrayList<Object>(command.getMembers().size() + 1);
args.add(keyBuf);
args.addAll(command.getMembers().stream().map(buf -> toByteArray(buf)).collect(Collectors.toList()));
Mono<List<String>> m = read(keyBuf, StringCodec.INSTANCE, GEOHASH, args.toArray());
return m.map(v -> new MultiValueResponse<>(command, v));
});
}
private final MultiDecoder<Map<Object, Object>> geoDecoder = new ListMultiDecoder2(new ObjectListReplayDecoder2(), new PointDecoder());
@Override
public Flux<MultiValueResponse<GeoPosCommand, Point>> geoPos(Publisher<GeoPosCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getMembers(), "Members must not be null!");
RedisCommand<Map<Object, Object>> cmd = new RedisCommand<Map<Object, Object>>("GEOPOS", geoDecoder);
byte[] keyBuf = toByteArray(command.getKey());
List<Object> args = new ArrayList<Object>(command.getMembers().size() + 1);
args.add(keyBuf);
args.addAll(command.getMembers().stream().map(buf -> toByteArray(buf)).collect(Collectors.toList()));
Mono<List<Point>> m = read(keyBuf, StringCodec.INSTANCE, cmd, args.toArray());
return m.map(v -> new MultiValueResponse<>(command, v));
});
}
private final MultiDecoder<GeoResults<GeoLocation<ByteBuffer>>> postitionDecoder = new ListMultiDecoder2(new ByteBufferGeoResultsDecoder(), new CodecDecoder(), new PointDecoder(), new ObjectListReplayDecoder());
@Override
public Flux<CommandResponse<GeoRadiusCommand, Flux<GeoResult<GeoLocation<ByteBuffer>>>>> geoRadius(
Publisher<GeoRadiusCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getPoint(), "Point must not be null!");
Assert.notNull(command.getDistance(), "Distance must not be null!");
GeoRadiusCommandArgs args = command.getArgs().orElse(GeoRadiusCommandArgs.newGeoRadiusArgs());
byte[] keyBuf = toByteArray(command.getKey());
List<Object> params = new ArrayList<Object>();
params.add(keyBuf);
params.add(BigDecimal.valueOf(command.getPoint().getX()).toPlainString());
params.add(BigDecimal.valueOf(command.getPoint().getY()).toPlainString());
params.add(command.getDistance().getValue());
params.add(command.getDistance().getMetric().getAbbreviation());
RedisCommand<GeoResults<GeoLocation<ByteBuffer>>> cmd;
if (args.getFlags().contains(GeoRadiusCommandArgs.Flag.WITHCOORD)) {
cmd = new RedisCommand<>("GEORADIUS_RO", postitionDecoder);
params.add("WITHCOORD");
} else {
MultiDecoder<GeoResults<GeoLocation<ByteBuffer>>> distanceDecoder = new ListMultiDecoder2(new ByteBufferGeoResultsDecoder(command.getDistance().getMetric()), new GeoDistanceDecoder());
cmd = new RedisCommand<>("GEORADIUS_RO", distanceDecoder);
params.add("WITHDIST");
}
if (args.getLimit() != null) {
params.add("COUNT");
params.add(args.getLimit());
}
if (args.getSortDirection() != null) {
params.add(args.getSortDirection().name());
}
Mono<GeoResults<GeoLocation<ByteBuffer>>> m = read(keyBuf, ByteArrayCodec.INSTANCE, cmd, params.toArray());
return m.map(v -> new CommandResponse<>(command, Flux.fromIterable(v.getContent())));
});
}
@Override
public Flux<CommandResponse<GeoRadiusByMemberCommand, Flux<GeoResult<GeoLocation<ByteBuffer>>>>> geoRadiusByMember(
Publisher<GeoRadiusByMemberCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getMember(), "Member must not be null!");
Assert.notNull(command.getDistance(), "Distance must not be null!");
GeoRadiusCommandArgs args = command.getArgs().orElse(GeoRadiusCommandArgs.newGeoRadiusArgs());
byte[] keyBuf = toByteArray(command.getKey());
byte[] memberBuf = toByteArray(command.getMember());
List<Object> params = new ArrayList<Object>();
params.add(keyBuf);
params.add(memberBuf);
params.add(command.getDistance().getValue());
params.add(command.getDistance().getMetric().getAbbreviation());
RedisCommand<GeoResults<GeoLocation<ByteBuffer>>> cmd;
if (args.getFlags().contains(GeoRadiusCommandArgs.Flag.WITHCOORD)) {
cmd = new RedisCommand<>("GEORADIUSBYMEMBER_RO", postitionDecoder);
params.add("WITHCOORD");
} else {
MultiDecoder<GeoResults<GeoLocation<ByteBuffer>>> distanceDecoder = new ListMultiDecoder2(new ByteBufferGeoResultsDecoder(command.getDistance().getMetric()), new GeoDistanceDecoder());
cmd = new RedisCommand<>("GEORADIUSBYMEMBER_RO", distanceDecoder);
params.add("WITHDIST");
}
if (args.getLimit() != null) {
params.add("COUNT");
params.add(args.getLimit());
}
if (args.getSortDirection() != null) {
params.add(args.getSortDirection().name());
}
Mono<GeoResults<GeoLocation<ByteBuffer>>> m = read(keyBuf, ByteArrayCodec.INSTANCE, cmd, params.toArray());
return m.map(v -> new CommandResponse<>(command, Flux.fromIterable(v.getContent())));
});
}
private String convert(double longitude) {
return BigDecimal.valueOf(longitude).toPlainString();
}
private ByteBuf encode(Object value) {
return executorService.encode(ByteArrayCodec.INSTANCE, value);
}
@Override
public Flux<CommandResponse<GeoSearchCommand, Flux<GeoResult<GeoLocation<ByteBuffer>>>>> geoSearch(Publisher<GeoSearchCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getArgs(), "Args must not be null!");
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getShape(), "Shape must not be null!");
Assert.notNull(command.getReference(), "Reference must not be null!");
List<Object> commandParams = new ArrayList<>();
byte[] keyBuf = toByteArray(command.getKey());
commandParams.add(keyBuf);
if (command.getReference() instanceof GeoReference.GeoCoordinateReference) {
GeoReference.GeoCoordinateReference ref = (GeoReference.GeoCoordinateReference) command.getReference();
commandParams.add("FROMLONLAT");
commandParams.add(convert(ref.getLongitude()));
commandParams.add(convert(ref.getLatitude()));
} else if (command.getReference() instanceof GeoReference.GeoMemberReference) {
GeoReference.GeoMemberReference ref = (GeoReference.GeoMemberReference) command.getReference();
commandParams.add("FROMMEMBER");
commandParams.add(encode(ref.getMember()));
}
if (command.getShape() instanceof RadiusShape) {
commandParams.add("BYRADIUS");
RadiusShape shape = (RadiusShape) command.getShape();
commandParams.add(shape.getRadius().getValue());
commandParams.add(convert(shape.getMetric()).getAbbreviation());
} else if (command.getShape() instanceof BoxShape) {
BoxShape shape = (BoxShape) command.getShape();
commandParams.add("BYBOX");
commandParams.add(shape.getBoundingBox().getWidth().getValue());
commandParams.add(shape.getBoundingBox().getHeight().getValue());
commandParams.add(convert(shape.getMetric()).getAbbreviation());
}
RedisGeoCommands.GeoSearchCommandArgs args = command.getArgs()
.orElse(RedisGeoCommands.GeoSearchCommandArgs.newGeoSearchArgs());
if (args.hasSortDirection()) {
commandParams.add(args.getSortDirection());
}
if (args.getLimit() != null) {
commandParams.add("COUNT");
commandParams.add(args.getLimit());
if (args.hasAnyLimit()) {
commandParams.add("ANY");
}
}
RedisCommand<GeoResults<GeoLocation<ByteBuffer>>> cmd;
if (args.getFlags().contains(GeoRadiusCommandArgs.Flag.WITHCOORD)) {
cmd = new RedisCommand<>("GEOSEARCH", postitionDecoder);
commandParams.add("WITHCOORD");
} else {
MultiDecoder<GeoResults<GeoLocation<ByteBuffer>>> distanceDecoder = new ListMultiDecoder2(new ByteBufferGeoResultsDecoder(command.getShape().getMetric()), new GeoDistanceDecoder());
cmd = new RedisCommand<>("GEOSEARCH", distanceDecoder);
commandParams.add("WITHDIST");
}
Mono<GeoResults<GeoLocation<ByteBuffer>>> m = read(keyBuf, ByteArrayCodec.INSTANCE, cmd, commandParams.toArray());
return m.map(v -> new CommandResponse<>(command, Flux.fromIterable(v.getContent())));
});
}
@Override
public Flux<NumericResponse<GeoSearchStoreCommand, Long>> geoSearchStore(Publisher<GeoSearchStoreCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getArgs(), "Args must not be null!");
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getDestKey(), "DestKey must not be null!");
Assert.notNull(command.getShape(), "Shape must not be null!");
Assert.notNull(command.getReference(), "Reference must not be null!");
List<Object> commandParams = new ArrayList<>();
byte[] destKeyBuf = toByteArray(command.getDestKey());
commandParams.add(destKeyBuf);
byte[] keyBuf = toByteArray(command.getKey());
commandParams.add(keyBuf);
if (command.getReference() instanceof GeoReference.GeoCoordinateReference) {
GeoReference.GeoCoordinateReference ref = (GeoReference.GeoCoordinateReference) command.getReference();
commandParams.add("FROMLONLAT");
commandParams.add(convert(ref.getLongitude()));
commandParams.add(convert(ref.getLatitude()));
} else if (command.getReference() instanceof GeoReference.GeoMemberReference) {
GeoReference.GeoMemberReference ref = (GeoReference.GeoMemberReference) command.getReference();
commandParams.add("FROMMEMBER");
commandParams.add(encode(ref.getMember()));
}
if (command.getShape() instanceof RadiusShape) {
RadiusShape shape = (RadiusShape) command.getShape();
commandParams.add("BYRADIUS");
commandParams.add(shape.getRadius().getValue());
commandParams.add(convert(shape.getMetric()).getAbbreviation());
} else if (command.getShape() instanceof BoxShape) {
BoxShape shape = (BoxShape) command.getShape();
commandParams.add("BYBOX");
commandParams.add(shape.getBoundingBox().getWidth().getValue());
commandParams.add(shape.getBoundingBox().getHeight().getValue());
commandParams.add(convert(shape.getMetric()).getAbbreviation());
}
RedisGeoCommands.GeoSearchStoreCommandArgs args = command.getArgs()
.orElse(RedisGeoCommands.GeoSearchStoreCommandArgs.newGeoSearchStoreArgs());
if (args.hasSortDirection()) {
commandParams.add(args.getSortDirection());
}
if (args.getLimit() != null) {
commandParams.add("COUNT");
commandParams.add(args.getLimit());
if (args.hasAnyLimit()) {
commandParams.add("ANY");
}
}
if (args.isStoreDistance()) {
commandParams.add("STOREDIST");
}
Mono<Long> m = write(keyBuf, LongCodec.INSTANCE, RedisCommands.GEOSEARCHSTORE_STORE, commandParams.toArray());
return m.map(v -> new NumericResponse<>(command, v));
});
}
private Metric convert(Metric metric) {
if (metric == Metrics.NEUTRAL) {
return RedisGeoCommands.DistanceUnit.METERS;
}
return metric;
}
}
| RedissonReactiveGeoCommands |
java | google__guava | android/guava-tests/test/com/google/common/util/concurrent/FuturesTest.java | {
"start": 109323,
"end": 110183
} | class ____ {
final ListenableFuture<String> future;
final String name;
final Runnable finisher;
TestFuture(ListenableFuture<String> future, String name, Runnable finisher) {
this.future = future;
this.name = name;
this.finisher = finisher;
}
}
/**
* A collection of several futures, covering cancellation, success, and failure (both {@link
* ExecutionException} and {@link RuntimeException}), both immediate and delayed. We use each
* possible pair of these futures in {@link FuturesTest#runExtensiveMergerTest}.
*
* <p>Each test requires a new {@link TestFutureBatch} because we need new delayed futures each
* time, as the old delayed futures were completed as part of the old test.
*/
@J2ktIncompatible
@GwtIncompatible // used only in GwtIncompatible tests
private static final | TestFuture |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/handler/ConversionServiceExposingInterceptor.java | {
"start": 1409,
"end": 2196
} | class ____ implements HandlerInterceptor {
private final ConversionService conversionService;
/**
* Creates a new {@link ConversionServiceExposingInterceptor}.
* @param conversionService the conversion service to export to request scope when this interceptor is invoked
*/
public ConversionServiceExposingInterceptor(ConversionService conversionService) {
Assert.notNull(conversionService, "The ConversionService may not be null");
this.conversionService = conversionService;
}
@Override
public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler)
throws ServletException, IOException {
request.setAttribute(ConversionService.class.getName(), this.conversionService);
return true;
}
}
| ConversionServiceExposingInterceptor |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.