language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/inlineme/InlinerTest.java | {
"start": 36488,
"end": 36775
} | class ____ {
public void doTest() {
Client client = new Client();
int x = client.cast(1) * 10;
}
}
""")
.addOutputLines(
"out/Caller.java",
"""
public final | Caller |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/FlameGraphTypeQueryParameter.java | {
"start": 1791,
"end": 2193
} | enum ____ {
/** Type of the Flame Graph that includes threads in all possible states. */
FULL,
/** Type of the Flame Graph that includes threads in states Thread.State.[RUNNABLE, NEW]. */
ON_CPU,
/**
* Type of the Flame Graph that includes threads in states Thread.State.[TIMED_WAITING,
* BLOCKED, WAITING].
*/
OFF_CPU
}
}
| Type |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/producer/illegal/ProducerFieldWithInjectTest.java | {
"start": 530,
"end": 941
} | class ____ {
@RegisterExtension
public ArcTestContainer container = ArcTestContainer.builder().beanClasses(IllegalProducer.class).shouldFail()
.build();
@Test
public void testFailure() {
Throwable error = container.getFailure();
assertNotNull(error);
assertTrue(error instanceof DefinitionException);
}
@Dependent
static | ProducerFieldWithInjectTest |
java | apache__kafka | generator/src/main/java/org/apache/kafka/message/ApiMessageTypeGenerator.java | {
"start": 1550,
"end": 5112
} | class ____ {
short apiKey;
MessageSpec requestSpec;
MessageSpec responseSpec;
ApiData(short apiKey) {
this.apiKey = apiKey;
}
String name() {
if (requestSpec != null) {
return MessageGenerator.stripSuffix(requestSpec.name(),
MessageGenerator.REQUEST_SUFFIX);
} else if (responseSpec != null) {
return MessageGenerator.stripSuffix(responseSpec.name(),
MessageGenerator.RESPONSE_SUFFIX);
} else {
throw new RuntimeException("Neither requestSpec nor responseSpec is defined " +
"for API key " + apiKey);
}
}
String requestSchema() {
if (requestSpec == null) {
return "null";
} else if (!requestSpec.hasValidVersion()) {
return "new Schema[0]";
} else {
return String.format("%sData.SCHEMAS", requestSpec.name());
}
}
String responseSchema() {
if (responseSpec == null) {
return "null";
} else if (!requestSpec.hasValidVersion()) {
return "new Schema[0]";
} else {
return String.format("%sData.SCHEMAS", responseSpec.name());
}
}
}
public ApiMessageTypeGenerator(String packageName) {
this.headerGenerator = new HeaderGenerator(packageName);
this.apis = new TreeMap<>();
this.buffer = new CodeBuffer();
}
@Override
public String outputName() {
return MessageGenerator.API_MESSAGE_TYPE_JAVA;
}
@Override
public void registerMessageType(MessageSpec spec) {
switch (spec.type()) {
case REQUEST: {
short apiKey = spec.apiKey().get();
ApiData data = apis.get(apiKey);
if (!apis.containsKey(apiKey)) {
data = new ApiData(apiKey);
apis.put(apiKey, data);
}
if (data.requestSpec != null) {
throw new RuntimeException("Found more than one request with " +
"API key " + spec.apiKey().get());
}
data.requestSpec = spec;
if (spec.listeners() != null) {
for (RequestListenerType listener : spec.listeners()) {
apisByListener.putIfAbsent(listener, new ArrayList<>());
apisByListener.get(listener).add(data);
}
}
break;
}
case RESPONSE: {
short apiKey = spec.apiKey().get();
ApiData data = apis.get(apiKey);
if (!apis.containsKey(apiKey)) {
data = new ApiData(apiKey);
apis.put(apiKey, data);
}
if (data.responseSpec != null) {
throw new RuntimeException("Found more than one response with " +
"API key " + spec.apiKey().get());
}
data.responseSpec = spec;
break;
}
default:
// do nothing
break;
}
}
@Override
public void generateAndWrite(BufferedWriter writer) throws IOException {
generate();
write(writer);
}
private void generate() {
buffer.printf("public | ApiData |
java | google__error-prone | core/src/test/java/com/google/errorprone/matchers/EnclosingTest.java | {
"start": 5306,
"end": 5895
} | class ____ {
A() {
boolean foo = true;
for (int i = 0; i < 100; i++) {
foo = !foo;
}
}
}
""");
assertCompiles(fooIsUsedUnderLoopCondition(false));
assertCompiles(fooIsChildOfLoopCondition(false));
assertCompiles(fooIsUsedUnderLoopStatement(true));
assertCompiles(fooIsUsedUnderLoopStatementAccordingToBlockOrCase(true));
}
/** Make sure the scanners are doing what we expect. */
@Test
public void usedElsewhereInLoop() {
writeFile(
"A.java",
"""
public | A |
java | spring-projects__spring-framework | spring-tx/src/main/java/org/springframework/transaction/jta/JtaTransactionObject.java | {
"start": 1386,
"end": 2764
} | class ____ implements SmartTransactionObject {
private final UserTransaction userTransaction;
boolean resetTransactionTimeout = false;
/**
* Create a new JtaTransactionObject for the given JTA UserTransaction.
* @param userTransaction the JTA UserTransaction for the current transaction
* (either a shared object or retrieved through a fresh per-transaction lookup)
*/
public JtaTransactionObject(UserTransaction userTransaction) {
this.userTransaction = userTransaction;
}
/**
* Return the JTA UserTransaction object for the current transaction.
*/
public final UserTransaction getUserTransaction() {
return this.userTransaction;
}
/**
* This implementation checks the UserTransaction's rollback-only flag.
*/
@Override
public boolean isRollbackOnly() {
try {
int jtaStatus = this.userTransaction.getStatus();
return (jtaStatus == Status.STATUS_MARKED_ROLLBACK || jtaStatus == Status.STATUS_ROLLEDBACK);
}
catch (SystemException ex) {
throw new TransactionSystemException("JTA failure on getStatus", ex);
}
}
/**
* This implementation triggers flush callbacks,
* assuming that they will flush all affected ORM sessions.
* @see org.springframework.transaction.support.TransactionSynchronization#flush()
*/
@Override
public void flush() {
TransactionSynchronizationUtils.triggerFlush();
}
}
| JtaTransactionObject |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentStateWrappedLongitudeState.java | {
"start": 1020,
"end": 3666
} | class ____ implements AggregatorState {
// Only geo points support longitude wrapping.
private static final PointType POINT_TYPE = PointType.GEO;
private boolean seen = false;
private int top = Integer.MIN_VALUE;
private int bottom = Integer.MAX_VALUE;
private int negLeft = Integer.MAX_VALUE;
private int negRight = Integer.MIN_VALUE;
private int posLeft = Integer.MAX_VALUE;
private int posRight = Integer.MIN_VALUE;
private final SpatialEnvelopeVisitor.GeoPointVisitor geoPointVisitor = new SpatialEnvelopeVisitor.GeoPointVisitor(
SpatialEnvelopeVisitor.WrapLongitude.WRAP
);
@Override
public void close() {}
@Override
public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) {
assert blocks.length >= offset + 6;
var blockFactory = driverContext.blockFactory();
blocks[offset + 0] = blockFactory.newConstantIntBlockWith(top, 1);
blocks[offset + 1] = blockFactory.newConstantIntBlockWith(bottom, 1);
blocks[offset + 2] = blockFactory.newConstantIntBlockWith(negLeft, 1);
blocks[offset + 3] = blockFactory.newConstantIntBlockWith(negRight, 1);
blocks[offset + 4] = blockFactory.newConstantIntBlockWith(posLeft, 1);
blocks[offset + 5] = blockFactory.newConstantIntBlockWith(posRight, 1);
}
public void add(Geometry geo) {
geoPointVisitor.reset();
if (geo.visit(new SpatialEnvelopeVisitor(geoPointVisitor))) {
add(
POINT_TYPE.encoder().encodeY(geoPointVisitor.getTop()),
POINT_TYPE.encoder().encodeY(geoPointVisitor.getBottom()),
SpatialAggregationUtils.encodeLongitude(geoPointVisitor.getNegLeft()),
SpatialAggregationUtils.encodeLongitude(geoPointVisitor.getNegRight()),
SpatialAggregationUtils.encodeLongitude(geoPointVisitor.getPosLeft()),
SpatialAggregationUtils.encodeLongitude(geoPointVisitor.getPosRight())
);
}
}
/**
* This method is used when extents are extracted from the doc-values field by the {@link GeometryDocValueReader}.
* This optimization is enabled when the field has doc-values and is only used in the ST_EXTENT aggregation.
*/
public void add(int p, IntBlock values) {
if (values.getValueCount(p) != 6) {
throw new IllegalArgumentException("Expected 6 values, got " + values.getValueCount(p));
}
int i = values.getFirstValueIndex(p);
// Values are stored according to the order defined in the Extent | SpatialExtentStateWrappedLongitudeState |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/stat/JdbcStatementStat.java | {
"start": 946,
"end": 5416
} | class ____ implements JdbcStatementStatMBean {
private final AtomicLong createCount = new AtomicLong(0); // 执行createStatement的计数
private final AtomicLong prepareCount = new AtomicLong(0); // 执行parepareStatement的计数
private final AtomicLong prepareCallCount = new AtomicLong(0); // 执行preCall的计数
private final AtomicLong closeCount = new AtomicLong(0); // Statement关闭的计数
private final AtomicInteger runningCount = new AtomicInteger();
private final AtomicInteger concurrentMax = new AtomicInteger();
private final AtomicLong count = new AtomicLong();
private final AtomicLong errorCount = new AtomicLong();
private final AtomicLong nanoTotal = new AtomicLong();
private Throwable lastError;
private long lastErrorTime;
private long lastSampleTime;
private final Histogram histogram = new Histogram(new long[]{10, 100, 1000, 1000 * 10});
public long[] getHistogramRanges() {
return histogram.getRanges();
}
public long[] getHistogramValues() {
return histogram.toArray();
}
public void reset() {
runningCount.set(0);
concurrentMax.set(0);
count.set(0);
errorCount.set(0);
nanoTotal.set(0);
lastError = null;
lastErrorTime = 0;
lastSampleTime = 0;
createCount.set(0);
prepareCount.set(0);
prepareCallCount.set(0);
closeCount.set(0);
histogram.reset();
}
public void afterExecute(long nanoSpan) {
runningCount.decrementAndGet();
nanoTotal.addAndGet(nanoSpan);
long millis = nanoSpan / (1000 * 1000);
histogram.record(millis);
}
public void beforeExecute() {
int invoking = runningCount.incrementAndGet();
for (; ; ) {
int max = concurrentMax.get();
if (invoking > max) {
if (concurrentMax.compareAndSet(max, invoking)) {
break;
}
} else {
break;
}
}
count.incrementAndGet();
lastSampleTime = System.currentTimeMillis();
}
public long getErrorCount() {
return errorCount.get();
}
public int getRunningCount() {
return runningCount.get();
}
public int getConcurrentMax() {
return concurrentMax.get();
}
public long getExecuteCount() {
return count.get();
}
public Date getExecuteLastTime() {
if (lastSampleTime == 0) {
return null;
}
return new Date(lastSampleTime);
}
public long getNanoTotal() {
return nanoTotal.get();
}
public long getMillisTotal() {
return nanoTotal.get() / (1000 * 1000);
}
public Throwable getLastException() {
return lastError;
}
public Date getLastErrorTime() {
if (lastErrorTime <= 0) {
return null;
}
return new Date(lastErrorTime);
}
public void error(Throwable error) {
errorCount.incrementAndGet();
lastError = error;
lastErrorTime = System.currentTimeMillis();
}
@Override
public long getCloseCount() {
return closeCount.get();
}
@Override
public long getCreateCount() {
return createCount.get();
}
@Override
public long getExecuteMillisTotal() {
return this.getNanoTotal() / (1000 * 1000);
}
@Override
public long getPrepareCallCount() {
return prepareCallCount.get();
}
@Override
public long getPrepareCount() {
return prepareCount.get();
}
@Override
public long getExecuteSuccessCount() {
return this.getExecuteCount() - this.getErrorCount() - this.getRunningCount();
}
@Override
public CompositeData getLastError() throws JMException {
return JMXUtils.getErrorCompositeData(this.getLastException());
}
public void incrementCreateCounter() {
createCount.incrementAndGet();
}
public void incrementPrepareCallCount() {
prepareCallCount.incrementAndGet();
}
public void incrementPrepareCounter() {
prepareCount.incrementAndGet();
}
public void incrementStatementCloseCounter() {
closeCount.incrementAndGet();
}
}
| JdbcStatementStat |
java | google__guice | core/test/com/google/inject/RestrictedBindingSourceTest.java | {
"start": 9362,
"end": 9498
} | class ____ extends AbstractModule {
@Provides
@DnsAddress
int rogueDns() {
return 4;
}
}
static | FooRogueDnsModule |
java | apache__camel | components/camel-freemarker/src/generated/java/org/apache/camel/component/freemarker/FreemarkerEndpointUriFactory.java | {
"start": 520,
"end": 2372
} | class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":resourceUri";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(8);
props.add("allowContextMapAll");
props.add("allowTemplateFromHeader");
props.add("configuration");
props.add("contentCache");
props.add("encoding");
props.add("lazyStartProducer");
props.add("resourceUri");
props.add("templateUpdateDelay");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
SECRET_PROPERTY_NAMES = Collections.emptySet();
MULTI_VALUE_PREFIXES = Collections.emptyMap();
}
@Override
public boolean isEnabled(String scheme) {
return "freemarker".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "resourceUri", null, true, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
| FreemarkerEndpointUriFactory |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/hql/SameTableAliasInSubqueryWithEmbeddedTest.java | {
"start": 4465,
"end": 4752
} | class ____ implements Serializable {
private String value;
public PrimaryKey() {
value = SafeRandomUUIDGenerator.safeRandomUUIDAsString();
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
}
public | PrimaryKey |
java | quarkusio__quarkus | extensions/smallrye-reactive-messaging-kafka/deployment/src/test/java/io/quarkus/smallrye/reactivemessaging/kafka/deployment/dev/PriceResource.java | {
"start": 314,
"end": 698
} | class ____ {
private final Publisher<Double> processedPrices;
public PriceResource(@Channel("processed-prices") Publisher<Double> processedPrices) {
this.processedPrices = processedPrices;
}
@GET
@Path("/stream")
@Produces(MediaType.SERVER_SENT_EVENTS)
public Publisher<Double> ssePrices() {
return this.processedPrices;
}
}
| PriceResource |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/taobao/IntegerAsStringTest.java | {
"start": 147,
"end": 390
} | class ____ extends TestCase {
public void test_0 () throws Exception {
VO vo = JSON.parseObject("{\"value\":\"1001\"}", VO.class);
Assert.assertEquals(1001, vo.value.intValue());
}
public static | IntegerAsStringTest |
java | netty__netty | codec-classes-quic/src/main/java/io/netty/handler/codec/quic/BoringSSLTask.java | {
"start": 1697,
"end": 1775
} | interface ____ {
void onResult(long ssl, int result);
}
}
| TaskCallback |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/array/BooleanPrimitiveArraySerializerTest.java | {
"start": 1056,
"end": 1745
} | class ____ extends SerializerTestBase<boolean[]> {
@Override
protected TypeSerializer<boolean[]> createSerializer() {
return new BooleanPrimitiveArraySerializer();
}
@Override
protected Class<boolean[]> getTypeClass() {
return boolean[].class;
}
@Override
protected int getLength() {
return -1;
}
@Override
protected boolean[][] getTestData() {
return new boolean[][] {
new boolean[] {true, false, true, true, true, true, false, true},
new boolean[] {},
new boolean[] {false, true, false, false, false, false, true, false}
};
}
}
| BooleanPrimitiveArraySerializerTest |
java | apache__flink | flink-table/flink-sql-parser/src/test/java/org/apache/flink/sql/parser/TableApiIdentifierParsingTest.java | {
"start": 1708,
"end": 3740
} | class ____ {
private static final String ANTHROPOS_IN_GREEK_IN_UNICODE =
"#03B1#03BD#03B8#03C1#03C9#03C0#03BF#03C2";
private static final String ANTHROPOS_IN_GREEK = "ανθρωπος";
static Stream<Arguments> parameters() {
return Stream.of(
of("array", singletonList("array")),
of("table", singletonList("table")),
of("cat.db.array", asList("cat", "db", "array")),
of("`cat.db`.table", asList("cat.db", "table")),
of("db.table", asList("db", "table")),
of("`ta``ble`", singletonList("ta`ble")),
of("`c``at`.`d``b`.`ta``ble`", asList("c`at", "d`b", "ta`ble")),
of(
"db.U&\"" + ANTHROPOS_IN_GREEK_IN_UNICODE + "\" UESCAPE '#'",
asList("db", ANTHROPOS_IN_GREEK)),
of("db.ανθρωπος", asList("db", ANTHROPOS_IN_GREEK)));
}
@ParameterizedTest(name = "Parsing: {0}. Expected identifier: {1}")
@MethodSource("parameters")
void testTableApiIdentifierParsing(
String stringIdentifier, List<String> expectedParsedIdentifier) throws ParseException {
FlinkSqlParserImpl parser = createFlinkParser(stringIdentifier);
SqlIdentifier sqlIdentifier = parser.TableApiIdentifier();
assertThat(sqlIdentifier.names).isEqualTo(expectedParsedIdentifier);
}
private FlinkSqlParserImpl createFlinkParser(String expr) {
SourceStringReader reader = new SourceStringReader(expr);
FlinkSqlParserImpl parser =
(FlinkSqlParserImpl) FlinkSqlParserImpl.FACTORY.getParser(reader);
parser.setTabSize(1);
parser.setUnquotedCasing(Lex.JAVA.unquotedCasing);
parser.setQuotedCasing(Lex.JAVA.quotedCasing);
parser.setIdentifierMaxLength(256);
parser.setConformance(FlinkSqlConformance.DEFAULT);
parser.switchTo(SqlAbstractParserImpl.LexicalState.BTID);
return parser;
}
}
| TableApiIdentifierParsingTest |
java | apache__camel | components/camel-aws/camel-aws2-s3/src/test/java/org/apache/camel/component/aws2/s3/integration/S3StreamUploadTimeoutIT.java | {
"start": 1453,
"end": 3369
} | class ____ extends Aws2S3Base {
@EndpointInject
private ProducerTemplate template;
@EndpointInject("mock:result")
private MockEndpoint result;
@Test
public void sendIn() throws Exception {
for (int i = 1; i <= 2; i++) {
int count = i * 23;
result.expectedMessageCount(count);
for (int j = 0; j < 23; j++) {
template.sendBody("direct:stream1", "Andrea\n");
}
Awaitility.await().atMost(11, TimeUnit.SECONDS)
.untilAsserted(() -> MockEndpoint.assertIsSatisfied(context));
Awaitility.await().atMost(11, TimeUnit.SECONDS)
.untilAsserted(() -> {
Exchange ex = template.request("direct:listObjects", this::process);
List<S3Object> resp = ex.getMessage().getBody(List.class);
assertEquals(1, resp.size());
});
}
}
private void process(Exchange exchange) {
exchange.getIn().setHeader(AWS2S3Constants.S3_OPERATION, AWS2S3Operations.listObjects);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
String awsEndpoint1
= String.format(
"aws2-s3://%s?autoCreateBucket=true&streamingUploadMode=true&keyName=fileTest.txt&batchMessageNumber=25&namingStrategy=random&streamingUploadTimeout=10000",
name.get());
from("direct:stream1").to(awsEndpoint1).to("mock:result");
String awsEndpoint = String.format("aws2-s3://%s?autoCreateBucket=true",
name.get());
from("direct:listObjects").to(awsEndpoint);
}
};
}
}
| S3StreamUploadTimeoutIT |
java | google__dagger | javatests/dagger/functional/producers/monitoring/MonitoredComponent.java | {
"start": 880,
"end": 950
} | interface ____ {
ListenableFuture<String> output();
}
| MonitoredComponent |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/iterable/IterableAssert_filtered_baseTest.java | {
"start": 1025,
"end": 1827
} | class ____ {
protected Employee yoda;
protected Employee obiwan;
protected Employee luke;
protected Employee noname;
protected List<Employee> employees;
@BeforeEach
public void setUp() {
yoda = new Employee(1L, new Name("Yoda"), 800);
obiwan = new Employee(2L, new Name("Obi"), 800);
luke = new Employee(3L, new Name("Luke", "Skywalker"), 26);
noname = new Employee(4L, null, 10);
employees = newArrayList(yoda, luke, obiwan, noname);
}
public IterableAssert_filtered_baseTest() {
super();
}
protected static Iterable<TolkienCharacter> hobbits() {
TolkienCharacter frodo = TolkienCharacter.of("Frodo", 33, HOBBIT);
TolkienCharacter sam = TolkienCharacter.of("Sam", 35, HOBBIT);
return asList(frodo, sam);
}
}
| IterableAssert_filtered_baseTest |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/support/descriptor/FileSource.java | {
"start": 1112,
"end": 4492
} | class ____ implements FileSystemSource {
@Serial
private static final long serialVersionUID = 1L;
/**
* Create a new {@code FileSource} using the supplied {@link File file}.
*
* @param file the source file; must not be {@code null}
*/
public static FileSource from(File file) {
return new FileSource(file);
}
/**
* Create a new {@code FileSource} using the supplied {@link File file} and
* {@link FilePosition filePosition}.
*
* @param file the source file; must not be {@code null}
* @param filePosition the position in the source file; may be {@code null}
* @see #withPosition(FilePosition)
*/
public static FileSource from(File file, @Nullable FilePosition filePosition) {
return new FileSource(file, filePosition);
}
private final File file;
private final @Nullable FilePosition filePosition;
private FileSource(File file) {
this(file, null);
}
private FileSource(File file, @Nullable FilePosition filePosition) {
Preconditions.notNull(file, "file must not be null");
try {
this.file = file.getCanonicalFile();
}
catch (IOException ex) {
throw new JUnitException("Failed to retrieve canonical path for file: " + file, ex);
}
this.filePosition = filePosition;
}
private FileSource(FileSource fileSource, @Nullable FilePosition filePosition) {
this.file = fileSource.file;
this.filePosition = filePosition;
}
/**
* Get the {@link URI} for the source {@linkplain #getFile file}.
*
* @return the source {@code URI}; never {@code null}
*/
@Override
public URI getUri() {
return getFile().toURI();
}
/**
* Get the source {@linkplain File file}.
*
* @return the source file; never {@code null}
*/
@Override
public File getFile() {
return this.file;
}
/**
* Get the {@link FilePosition}, if available.
*/
public Optional<FilePosition> getPosition() {
return Optional.ofNullable(this.filePosition);
}
/**
* {@return a {@code FileSource} based on this instance but with the
* supplied {@link FilePosition}}
*
* <p>If the supplied {@code FilePosition}
* {@linkplain Objects#equals(Object, Object) equals} the existing one, this
* method returns {@code this}. Otherwise, a new instance is created and
* returned.
*
* <p>Calling this method rather than creating a new {@code FileSource} via
* {@link #from(File, FilePosition)} avoids the overhead of redundant
* canonical path resolution.
*
* @param filePosition the position in the source file; may be {@code null}
* @since 1.14
*/
@API(status = EXPERIMENTAL, since = "1.14")
public FileSource withPosition(@Nullable FilePosition filePosition) {
if (Objects.equals(this.filePosition, filePosition)) {
return this;
}
return new FileSource(this, filePosition);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
FileSource that = (FileSource) o;
return Objects.equals(this.file, that.file) //
&& Objects.equals(this.filePosition, that.filePosition);
}
@Override
public int hashCode() {
return Objects.hash(this.file, this.filePosition);
}
@Override
public String toString() {
// @formatter:off
return new ToStringBuilder(this)
.append("file", this.file)
.append("filePosition", this.filePosition)
.toString();
// @formatter:on
}
}
| FileSource |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/TypeParameterUnusedInFormalsTest.java | {
"start": 4367,
"end": 4691
} | class ____ {
static <T> T noop(T t) {
return t;
}
}
""")
.doTest();
}
@Test
public void okNotMyParam() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import java.util.List;
| Test |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/config/PlaceholderConfigurerSupport.java | {
"start": 3795,
"end": 9946
} | class ____ extends PropertyResourceConfigurer
implements BeanNameAware, BeanFactoryAware {
/** Default placeholder prefix: {@value}. */
public static final String DEFAULT_PLACEHOLDER_PREFIX = SystemPropertyUtils.PLACEHOLDER_PREFIX;
/** Default placeholder suffix: {@value}. */
public static final String DEFAULT_PLACEHOLDER_SUFFIX = SystemPropertyUtils.PLACEHOLDER_SUFFIX;
/** Default value separator: {@value}. */
public static final String DEFAULT_VALUE_SEPARATOR = SystemPropertyUtils.VALUE_SEPARATOR;
/**
* Default escape character: {@code '\'}.
* @since 6.2
* @see AbstractPropertyResolver#getDefaultEscapeCharacter()
*/
public static final Character DEFAULT_ESCAPE_CHARACTER = SystemPropertyUtils.ESCAPE_CHARACTER;
/** Defaults to {@value #DEFAULT_PLACEHOLDER_PREFIX}. */
protected String placeholderPrefix = DEFAULT_PLACEHOLDER_PREFIX;
/** Defaults to {@value #DEFAULT_PLACEHOLDER_SUFFIX}. */
protected String placeholderSuffix = DEFAULT_PLACEHOLDER_SUFFIX;
/** Defaults to {@value #DEFAULT_VALUE_SEPARATOR}. */
protected @Nullable String valueSeparator = DEFAULT_VALUE_SEPARATOR;
/**
* The default is determined by {@link AbstractPropertyResolver#getDefaultEscapeCharacter()}.
*/
protected @Nullable Character escapeCharacter = AbstractPropertyResolver.getDefaultEscapeCharacter();
protected boolean trimValues = false;
protected @Nullable String nullValue;
protected boolean ignoreUnresolvablePlaceholders = false;
private @Nullable String beanName;
private @Nullable BeanFactory beanFactory;
/**
* Set the prefix that a placeholder string starts with.
* <p>The default is {@value #DEFAULT_PLACEHOLDER_PREFIX}.
*/
public void setPlaceholderPrefix(String placeholderPrefix) {
this.placeholderPrefix = placeholderPrefix;
}
/**
* Set the suffix that a placeholder string ends with.
* <p>The default is {@value #DEFAULT_PLACEHOLDER_SUFFIX}.
*/
public void setPlaceholderSuffix(String placeholderSuffix) {
this.placeholderSuffix = placeholderSuffix;
}
/**
* Specify the separating character between the placeholder variable and the
* associated default value, or {@code null} if no such special character
* should be processed as a value separator.
* <p>The default is {@value #DEFAULT_VALUE_SEPARATOR}.
*/
public void setValueSeparator(@Nullable String valueSeparator) {
this.valueSeparator = valueSeparator;
}
/**
* Set the escape character to use to ignore the
* {@linkplain #setPlaceholderPrefix(String) placeholder prefix} and the
* {@linkplain #setValueSeparator(String) value separator}, or {@code null}
* if no escaping should take place.
* <p>The default is determined by {@link AbstractPropertyResolver#getDefaultEscapeCharacter()}.
* @since 6.2
*/
public void setEscapeCharacter(@Nullable Character escapeCharacter) {
this.escapeCharacter = escapeCharacter;
}
/**
* Specify whether to trim resolved values before applying them,
* removing superfluous whitespace from the beginning and end.
* <p>Default is {@code false}.
* @since 4.3
*/
public void setTrimValues(boolean trimValues) {
this.trimValues = trimValues;
}
/**
* Set a value that should be treated as {@code null} when resolved
* as a placeholder value: for example, "" (empty String) or "null".
* <p>Note that this will only apply to full property values,
* not to parts of concatenated values.
* <p>By default, no such null value is defined. This means that
* there is no way to express {@code null} as a property value
* unless you explicitly map a corresponding value here.
*/
public void setNullValue(String nullValue) {
this.nullValue = nullValue;
}
/**
* Set whether to ignore unresolvable placeholders.
* <p>Default is "false": An exception will be thrown if a placeholder fails
* to resolve. Switch this flag to "true" in order to preserve the placeholder
* String as-is in such a case, leaving it up to other placeholder configurers
* to resolve it.
*/
public void setIgnoreUnresolvablePlaceholders(boolean ignoreUnresolvablePlaceholders) {
this.ignoreUnresolvablePlaceholders = ignoreUnresolvablePlaceholders;
}
/**
* Only necessary to check that we're not parsing our own bean definition,
* to avoid failing on unresolvable placeholders in properties file locations.
* The latter case can happen with placeholders for system properties in
* resource locations.
* @see #setLocations
* @see org.springframework.core.io.ResourceEditor
*/
@Override
public void setBeanName(String beanName) {
this.beanName = beanName;
}
/**
* Only necessary to check that we're not parsing our own bean definition,
* to avoid failing on unresolvable placeholders in properties file locations.
* The latter case can happen with placeholders for system properties in
* resource locations.
* @see #setLocations
* @see org.springframework.core.io.ResourceEditor
*/
@Override
public void setBeanFactory(BeanFactory beanFactory) {
this.beanFactory = beanFactory;
}
protected void doProcessProperties(ConfigurableListableBeanFactory beanFactoryToProcess,
StringValueResolver valueResolver) {
BeanDefinitionVisitor visitor = new BeanDefinitionVisitor(valueResolver);
String[] beanNames = beanFactoryToProcess.getBeanDefinitionNames();
for (String curName : beanNames) {
// Check that we're not parsing our own bean definition,
// to avoid failing on unresolvable placeholders in properties file locations.
if (!(curName.equals(this.beanName) && beanFactoryToProcess.equals(this.beanFactory))) {
BeanDefinition bd = beanFactoryToProcess.getBeanDefinition(curName);
try {
visitor.visitBeanDefinition(bd);
}
catch (Exception ex) {
throw new BeanDefinitionStoreException(bd.getResourceDescription(), curName, ex.getMessage(), ex);
}
}
}
// Resolve placeholders in alias target names and aliases as well.
beanFactoryToProcess.resolveAliases(valueResolver);
// Resolve placeholders in embedded values such as annotation attributes.
beanFactoryToProcess.addEmbeddedValueResolver(valueResolver);
}
}
| PlaceholderConfigurerSupport |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExponentialAverageCalculationContext.java | {
"start": 2521,
"end": 9532
} | class ____ implements Writeable, ToXContentObject {
public static final ParseField INCREMENTAL_METRIC_VALUE_MS = new ParseField("incremental_metric_value_ms");
public static final ParseField LATEST_TIMESTAMP = new ParseField("latest_timestamp");
public static final ParseField PREVIOUS_EXPONENTIAL_AVERAGE_MS = new ParseField("previous_exponential_average_ms");
public static final ConstructingObjectParser<ExponentialAverageCalculationContext, Void> PARSER = new ConstructingObjectParser<>(
"exponential_average_calculation_context",
true,
args -> {
Double incrementalMetricValueMs = (Double) args[0];
Instant latestTimestamp = (Instant) args[1];
Double previousExponentialAverageMs = (Double) args[2];
return new ExponentialAverageCalculationContext(
getOrDefault(incrementalMetricValueMs, 0.0),
latestTimestamp,
previousExponentialAverageMs
);
}
);
static {
PARSER.declareDouble(optionalConstructorArg(), INCREMENTAL_METRIC_VALUE_MS);
PARSER.declareField(
optionalConstructorArg(),
p -> TimeUtils.parseTimeFieldToInstant(p, LATEST_TIMESTAMP.getPreferredName()),
LATEST_TIMESTAMP,
ObjectParser.ValueType.VALUE
);
PARSER.declareDouble(optionalConstructorArg(), PREVIOUS_EXPONENTIAL_AVERAGE_MS);
}
private static final TemporalUnit WINDOW_UNIT = ChronoUnit.HOURS;
private static final Duration WINDOW_SIZE = WINDOW_UNIT.getDuration();
private double incrementalMetricValueMs;
private Instant latestTimestamp;
private Double previousExponentialAverageMs;
public ExponentialAverageCalculationContext() {
this(0.0, null, null);
}
public ExponentialAverageCalculationContext(
double incrementalMetricValueMs,
@Nullable Instant latestTimestamp,
@Nullable Double previousExponentialAverageMs
) {
this.incrementalMetricValueMs = incrementalMetricValueMs;
this.latestTimestamp = latestTimestamp != null ? Instant.ofEpochMilli(latestTimestamp.toEpochMilli()) : null;
this.previousExponentialAverageMs = previousExponentialAverageMs;
}
public ExponentialAverageCalculationContext(ExponentialAverageCalculationContext lhs) {
this(lhs.incrementalMetricValueMs, lhs.latestTimestamp, lhs.previousExponentialAverageMs);
}
public ExponentialAverageCalculationContext(StreamInput in) throws IOException {
this.incrementalMetricValueMs = in.readDouble();
this.latestTimestamp = in.readOptionalInstant();
this.previousExponentialAverageMs = in.readOptionalDouble();
}
// Visible for testing
public double getIncrementalMetricValueMs() {
return incrementalMetricValueMs;
}
// Visible for testing
public Instant getLatestTimestamp() {
return latestTimestamp;
}
// Visible for testing
public Double getPreviousExponentialAverageMs() {
return previousExponentialAverageMs;
}
public Double getCurrentExponentialAverageMs() {
if (previousExponentialAverageMs == null || latestTimestamp == null) return incrementalMetricValueMs;
Instant currentWindowStartTimestamp = latestTimestamp.truncatedTo(WINDOW_UNIT);
double alpha = Math.exp(
-(double) Duration.between(currentWindowStartTimestamp, latestTimestamp).toMillis() / WINDOW_SIZE.toMillis()
);
return alpha * previousExponentialAverageMs + (1 - alpha) * incrementalMetricValueMs;
}
/**
* Increments the current accumulated metric value by the given delta.
*/
public void increment(double metricValueDeltaMs) {
incrementalMetricValueMs += metricValueDeltaMs;
}
/**
* Sets the latest timestamp that serves as an indication of the current point in time.
* Before calling this method make sure all the associated calls to {@link #increment} were already made.
*/
public void setLatestTimestamp(Instant newLatestTimestamp) {
Objects.requireNonNull(newLatestTimestamp);
if (this.latestTimestamp != null) {
Instant nextWindowStartTimestamp = this.latestTimestamp.truncatedTo(WINDOW_UNIT).plus(WINDOW_SIZE);
if (newLatestTimestamp.compareTo(nextWindowStartTimestamp) >= 0) {
// When we cross the boundary between windows, we update the exponential average with metric values accumulated so far in
// incrementalMetricValueMs variable.
this.previousExponentialAverageMs = getCurrentExponentialAverageMs();
this.incrementalMetricValueMs = 0.0;
}
} else {
// This is the first time {@link #setLatestRecordTimestamp} is called on this object.
}
if (this.latestTimestamp == null || newLatestTimestamp.isAfter(this.latestTimestamp)) {
this.latestTimestamp = newLatestTimestamp;
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeDouble(incrementalMetricValueMs);
out.writeOptionalInstant(latestTimestamp);
out.writeOptionalDouble(previousExponentialAverageMs);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(INCREMENTAL_METRIC_VALUE_MS.getPreferredName(), incrementalMetricValueMs);
if (latestTimestamp != null) {
builder.timestampFieldsFromUnixEpochMillis(
LATEST_TIMESTAMP.getPreferredName(),
LATEST_TIMESTAMP.getPreferredName() + "_string",
latestTimestamp.toEpochMilli()
);
}
if (previousExponentialAverageMs != null) {
builder.field(PREVIOUS_EXPONENTIAL_AVERAGE_MS.getPreferredName(), previousExponentialAverageMs);
}
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if (o == null || getClass() != o.getClass()) return false;
ExponentialAverageCalculationContext that = (ExponentialAverageCalculationContext) o;
return this.incrementalMetricValueMs == that.incrementalMetricValueMs
&& Objects.equals(this.latestTimestamp, that.latestTimestamp)
&& Objects.equals(this.previousExponentialAverageMs, that.previousExponentialAverageMs);
}
@Override
public int hashCode() {
return Objects.hash(incrementalMetricValueMs, latestTimestamp, previousExponentialAverageMs);
}
@Override
public String toString() {
return Strings.toString(this);
}
@SuppressWarnings("unchecked")
private static <T> T getOrDefault(@Nullable T value, T defaultValue) {
return value != null ? value : defaultValue;
}
}
| ExponentialAverageCalculationContext |
java | apache__flink | flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActor.java | {
"start": 25406,
"end": 26808
} | enum ____ implements State {
STOPPED;
@Override
public State start(PekkoRpcActor<?> pekkoRpcActor, ClassLoader flinkClassLoader) {
pekkoRpcActor.mainThreadValidator.enterMainThread();
try {
runWithContextClassLoader(
() -> pekkoRpcActor.rpcEndpoint.internalCallOnStart(), flinkClassLoader);
} catch (Throwable throwable) {
pekkoRpcActor.stop(
RpcEndpointTerminationResult.failure(
new RpcException(
String.format(
"Could not start RpcEndpoint %s.",
pekkoRpcActor.rpcEndpoint.getEndpointId()),
throwable)));
} finally {
pekkoRpcActor.mainThreadValidator.exitMainThread();
}
return StartedState.STARTED;
}
@Override
public State stop() {
return STOPPED;
}
@Override
public State terminate(PekkoRpcActor<?> pekkoRpcActor, ClassLoader flinkClassLoader) {
pekkoRpcActor.stop(RpcEndpointTerminationResult.success());
return TerminatingState.TERMINATING;
}
}
@SuppressWarnings("Singleton")
| StoppedState |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/cid/EmbeddedInsideEmbeddedIdTest.java | {
"start": 2214,
"end": 2682
} | class ____ {
private InnerWrappingId artifactId;
private String idType;
public static OuterWrappingId of(InnerWrappingId id) {
return new OuterWrappingId( id );
}
protected OuterWrappingId() {
}
public OuterWrappingId(InnerWrappingId artifactId) {
this();
this.artifactId = artifactId;
this.idType = artifactId.getClass().getSimpleName();
}
public String getIdType() {
return idType;
}
}
@Embeddable
public static | OuterWrappingId |
java | apache__spark | common/network-common/src/main/java/org/apache/spark/network/TransportContext.java | {
"start": 3541,
"end": 4241
} | class ____ implements Closeable {
private static final SparkLogger logger = SparkLoggerFactory.getLogger(TransportContext.class);
private static final NettyLogger nettyLogger = new NettyLogger();
private final TransportConf conf;
private final RpcHandler rpcHandler;
private final boolean closeIdleConnections;
// Non-null if SSL is enabled, null otherwise.
@Nullable private final SSLFactory sslFactory;
// Number of registered connections to the shuffle service
private Counter registeredConnections = new Counter();
/**
* Force to create MessageEncoder and MessageDecoder so that we can make sure they will be created
* before switching the current context | TransportContext |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolPB.java | {
"start": 1487,
"end": 1654
} | interface ____ extends
ZKFCProtocolService.BlockingInterface, VersionedProtocol {
/**
* If any methods need annotation, it can be added here
*/
} | ZKFCProtocolPB |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/type/InstantTypeHandlerTest.java | {
"start": 1017,
"end": 2909
} | class ____ extends BaseTypeHandlerTest {
private static final TypeHandler<Instant> TYPE_HANDLER = new InstantTypeHandler();
private static final Instant INSTANT = Instant.now();
private static final Timestamp TIMESTAMP = Timestamp.from(INSTANT);
@Override
@Test
public void shouldSetParameter() throws Exception {
TYPE_HANDLER.setParameter(ps, 1, INSTANT, null);
verify(ps).setTimestamp(1, TIMESTAMP);
}
@Override
@Test
public void shouldGetResultFromResultSetByName() throws Exception {
when(rs.getTimestamp("column")).thenReturn(TIMESTAMP);
assertEquals(INSTANT, TYPE_HANDLER.getResult(rs, "column"));
verify(rs, never()).wasNull();
}
@Override
@Test
public void shouldGetResultNullFromResultSetByName() throws Exception {
when(rs.getTimestamp("column")).thenReturn(null);
assertNull(TYPE_HANDLER.getResult(rs, "column"));
verify(rs, never()).wasNull();
}
@Override
@Test
public void shouldGetResultFromResultSetByPosition() throws Exception {
when(rs.getTimestamp(1)).thenReturn(TIMESTAMP);
assertEquals(INSTANT, TYPE_HANDLER.getResult(rs, 1));
verify(rs, never()).wasNull();
}
@Override
@Test
public void shouldGetResultNullFromResultSetByPosition() throws Exception {
when(rs.getTimestamp(1)).thenReturn(null);
assertNull(TYPE_HANDLER.getResult(rs, 1));
verify(rs, never()).wasNull();
}
@Override
@Test
public void shouldGetResultFromCallableStatement() throws Exception {
when(cs.getTimestamp(1)).thenReturn(TIMESTAMP);
assertEquals(INSTANT, TYPE_HANDLER.getResult(cs, 1));
verify(cs, never()).wasNull();
}
@Override
@Test
public void shouldGetResultNullFromCallableStatement() throws Exception {
when(cs.getTimestamp(1)).thenReturn(null);
assertNull(TYPE_HANDLER.getResult(cs, 1));
verify(cs, never()).wasNull();
}
}
| InstantTypeHandlerTest |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/error/ShouldMatchPattern_create_Test.java | {
"start": 1137,
"end": 1911
} | class ____ {
@Test
void should_create_error_message() {
// GIVEN
ErrorMessageFactory factory = shouldMatch("Yoda", "Luke");
// WHEN
String message = factory.create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo("[Test] %nExpecting actual:%n \"Yoda\"%nto match pattern:%n \"Luke\"".formatted());
}
@Test
void should_create_error_message_escaping_percent() {
// GIVEN
ErrorMessageFactory factory = shouldMatch("%%E", "fffff");
// WHEN
String message = factory.create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo("[Test] %nExpecting actual:%n \"%%%%E\"%nto match pattern:%n \"fffff\"".formatted());
}
}
| ShouldMatchPattern_create_Test |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java | {
"start": 2735,
"end": 3128
} | class ____ extends Configured implements Tool {
private static final Logger LOG = LoggerFactory.getLogger(DistSum.class);
private static final String NAME = DistSum.class.getSimpleName();
private static final String N_PARTS = "mapreduce.pi." + NAME + ".nParts";
/////////////////////////////////////////////////////////////////////////////
/** DistSum job parameters */
static | DistSum |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java | {
"start": 25118,
"end": 28813
} | class ____ {
private ClientRequestInterceptor rootInterceptor;
/**
* Initializes the wrapper with the specified parameters.
*
* @param interceptor the first interceptor in the pipeline
*/
public synchronized void init(ClientRequestInterceptor interceptor) {
this.rootInterceptor = interceptor;
}
/**
* Gets the root request interceptor.
*
* @return the root request interceptor
*/
public synchronized ClientRequestInterceptor getRootInterceptor() {
return rootInterceptor;
}
/**
* Shutdown the chain of interceptors when the object is destroyed.
*/
@Override
protected void finalize() {
rootInterceptor.shutdown();
}
}
@VisibleForTesting
public Map<String, RequestInterceptorChainWrapper> getUserPipelineMap() {
return userPipelineMap;
}
/**
* Create RouterRMDelegationTokenSecretManager.
* In the YARN federation, the Router will replace the RM to
* manage the RMDelegationToken (generate, update, cancel),
* so the relevant configuration parameters still obtain the configuration parameters of the RM.
*
* @param conf Configuration
* @return RouterDelegationTokenSecretManager.
*/
protected RouterDelegationTokenSecretManager createRouterRMDelegationTokenSecretManager(
Configuration conf) {
long secretKeyInterval = conf.getLong(
YarnConfiguration.RM_DELEGATION_KEY_UPDATE_INTERVAL_KEY,
YarnConfiguration.RM_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT);
long tokenMaxLifetime = conf.getLong(
YarnConfiguration.RM_DELEGATION_TOKEN_MAX_LIFETIME_KEY,
YarnConfiguration.RM_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT);
long tokenRenewInterval = conf.getLong(
YarnConfiguration.RM_DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
YarnConfiguration.RM_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT);
long removeScanInterval = conf.getTimeDuration(
YarnConfiguration.RM_DELEGATION_TOKEN_REMOVE_SCAN_INTERVAL_KEY,
YarnConfiguration.RM_DELEGATION_TOKEN_REMOVE_SCAN_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS);
return new RouterDelegationTokenSecretManager(secretKeyInterval,
tokenMaxLifetime, tokenRenewInterval, removeScanInterval, conf);
}
@VisibleForTesting
public RouterDelegationTokenSecretManager getRouterDTSecretManager() {
return routerDTSecretManager;
}
@VisibleForTesting
public void setRouterDTSecretManager(RouterDelegationTokenSecretManager routerDTSecretManager) {
this.routerDTSecretManager = routerDTSecretManager;
}
@VisibleForTesting
public void initUserPipelineMap(Configuration conf) {
int maxCacheSize = conf.getInt(YarnConfiguration.ROUTER_PIPELINE_CACHE_MAX_SIZE,
YarnConfiguration.DEFAULT_ROUTER_PIPELINE_CACHE_MAX_SIZE);
this.userPipelineMap = Collections.synchronizedMap(new LRUCacheHashMap<>(maxCacheSize, true));
}
private URL getRedirectURL() throws Exception {
Configuration conf = getConfig();
String webAppAddress = WebAppUtils.getWebAppBindURL(conf, YarnConfiguration.ROUTER_BIND_HOST,
WebAppUtils.getRouterWebAppURLWithoutScheme(conf));
String[] hostPort = StringUtils.split(webAppAddress, ':');
if (hostPort.length != 2) {
throw new YarnRuntimeException("Router can't get valid redirect proxy url");
}
String host = hostPort[0];
int port = Integer.parseInt(hostPort[1]);
if (StringUtils.isBlank(host) || host.equals("0.0.0.0")) {
host = InetAddress.getLocalHost().getCanonicalHostName();
}
return new URL(YarnConfiguration.useHttps(this.getConfig()) ? "https" : "http", host, port, "");
}
}
| RequestInterceptorChainWrapper |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/context/bean/override/BeanOverrideProcessor.java | {
"start": 3211,
"end": 3520
} | class ____ process
* @return the list of {@code BeanOverrideHandlers} for the annotated class
* @since 6.2.2
* @see #createHandler(Annotation, Class, Field)
*/
default List<BeanOverrideHandler> createHandlers(Annotation overrideAnnotation, Class<?> testClass) {
return Collections.emptyList();
}
}
| to |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/CacheMode.java | {
"start": 4534,
"end": 4637
} | enum ____.
*
* @throws MappingException Indicates the external form was not recognized as a valid | value |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/TestingLeaderElectionListener.java | {
"start": 1320,
"end": 5864
} | class ____ implements LeaderElectionDriver.Listener {
private final BlockingQueue<LeaderElectionEvent> leaderElectionEvents =
new ArrayBlockingQueue<>(10);
@Override
public void onGrantLeadership(UUID leaderSessionID) {
put(new LeaderElectionEvent.IsLeaderEvent(leaderSessionID));
}
@Override
public void onRevokeLeadership() {
put(new LeaderElectionEvent.NotLeaderEvent());
}
@Override
public void onLeaderInformationChange(String componentId, LeaderInformation leaderInformation) {
put(new LeaderElectionEvent.LeaderInformationChangeEvent(componentId, leaderInformation));
}
@Override
public void onLeaderInformationChange(LeaderInformationRegister leaderInformationRegister) {
put(new LeaderElectionEvent.AllLeaderInformationChangeEvent(leaderInformationRegister));
}
@Override
public void onError(Throwable t) {
put(new LeaderElectionEvent.ErrorEvent(t));
}
private void put(LeaderElectionEvent leaderElectionEvent) {
try {
leaderElectionEvents.put(leaderElectionEvent);
} catch (InterruptedException e) {
ExceptionUtils.rethrow(e);
}
}
public <T> T assertNextEvent(Class<T> expectedEventClass) throws InterruptedException {
final LeaderElectionEvent leaderElectionEvent = leaderElectionEvents.take();
assertThat(leaderElectionEvent)
.as(
"The next event didn't match the expected event type %s.",
expectedEventClass.getSimpleName())
.isInstanceOf(expectedEventClass);
return leaderElectionEvent.as(expectedEventClass);
}
public <T> T await(Class<T> clazz) throws InterruptedException {
while (true) {
final LeaderElectionEvent leaderElectionEvent = leaderElectionEvents.take();
if (clazz.isAssignableFrom(leaderElectionEvent.getClass())) {
return clazz.cast(leaderElectionEvent);
}
}
}
public <T> Optional<T> await(Class<T> clazz, Duration timeout) throws InterruptedException {
final Deadline deadline = Deadline.fromNow(timeout);
while (true) {
final Duration timeLeft = deadline.timeLeft();
if (timeLeft.isNegative()) {
return Optional.empty();
} else {
final Optional<LeaderElectionEvent> optLeaderElectionEvent =
Optional.ofNullable(
leaderElectionEvents.poll(
timeLeft.toMillis(), TimeUnit.MILLISECONDS));
if (optLeaderElectionEvent.isPresent()) {
final LeaderElectionEvent leaderElectionEvent = optLeaderElectionEvent.get();
if (clazz.isAssignableFrom(leaderElectionEvent.getClass())) {
return Optional.of(clazz.cast(optLeaderElectionEvent));
}
} else {
return Optional.empty();
}
}
}
}
/**
* Returns the next {@link
* org.apache.flink.runtime.leaderelection.LeaderElectionEvent.ErrorEvent} or an empty {@code
* Optional} if no such event happened. Any other not-yet processed events that happened before
* the error will be removed from the queue. This method doesn't wait for events but processes
* the queue in its current state.
*/
public Optional<LeaderElectionEvent.ErrorEvent> getNextErrorEvent() {
while (!leaderElectionEvents.isEmpty()) {
final LeaderElectionEvent event = leaderElectionEvents.remove();
if (event.isErrorEvent()) {
return Optional.of(event.as(LeaderElectionEvent.ErrorEvent.class));
}
}
return Optional.empty();
}
/**
* Throws an {@code AssertionError} if an {@link LeaderElectionEvent.ErrorEvent} was observed.
* This method can be used to ensure that any error that was triggered unexpectedly is exposed
* within the test.
*/
public void failIfErrorEventHappened() {
getNextErrorEvent()
.ifPresent(
error -> {
throw new AssertionError(
"An error was reported that wasn't properly handled.",
error.getError());
});
}
}
| TestingLeaderElectionListener |
java | micronaut-projects__micronaut-core | http-client/src/main/java/io/micronaut/http/client/netty/Pool40.java | {
"start": 17227,
"end": 17815
} | enum ____ {
/**
* There are no pending changes, and nobody is currently executing {@link #doSomeWork()}.
*/
IDLE,
/**
* Someone is currently executing {@link #doSomeWork()}, but there were further changes
* after {@link #doSomeWork()} was called, so it needs to be called again.
*/
ACTIVE_WITH_PENDING_WORK,
/**
* Someone is currently executing {@link #doSomeWork()}, and there were no other changes
* since then.
*/
ACTIVE_WITHOUT_PENDING_WORK,
}
final | WorkState |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng3836PluginConfigInheritanceTest.java | {
"start": 1134,
"end": 2958
} | class ____ extends AbstractMavenIntegrationTestCase {
/**
* Verify that submodules can *override* inherited plugin configuration.
*
* @throws Exception in case of failure
*/
@Test
public void testitMNG3836() throws Exception {
File testDir = extractResources("/mng-3836");
Verifier verifier = newVerifier(new File(testDir, "child").getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
Properties props = verifier.loadProperties("target/plugin-config.properties");
assertEquals("4", props.getProperty("stringParams"));
assertEquals("PASSED-1", props.getProperty("stringParams.0"));
assertEquals("PASSED-3", props.getProperty("stringParams.1"));
assertEquals("PASSED-2", props.getProperty("stringParams.2"));
assertEquals("PASSED-4", props.getProperty("stringParams.3"));
assertEquals("4", props.getProperty("listParam"));
assertEquals("PASSED-1", props.getProperty("listParam.0"));
assertEquals("PASSED-3", props.getProperty("listParam.1"));
assertEquals("PASSED-2", props.getProperty("listParam.2"));
assertEquals("PASSED-4", props.getProperty("listParam.3"));
assertEquals("4", props.getProperty("domParam.children"));
assertEquals("PASSED-1", props.getProperty("domParam.children.echo.0.value"));
assertEquals("PASSED-3", props.getProperty("domParam.children.echo.1.value"));
assertEquals("PASSED-2", props.getProperty("domParam.children.echo.2.value"));
assertEquals("PASSED-4", props.getProperty("domParam.children.echo.3.value"));
}
}
| MavenITmng3836PluginConfigInheritanceTest |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/context/annotation/Jsr330ScopeMetadataResolver.java | {
"start": 1690,
"end": 4123
} | class ____ implements ScopeMetadataResolver {
private final Map<String, String> scopeMap = new HashMap<>();
public Jsr330ScopeMetadataResolver() {
registerScope("jakarta.inject.Singleton", BeanDefinition.SCOPE_SINGLETON);
}
/**
* Register an extended JSR-330 scope annotation, mapping it onto a
* specific Spring scope by name.
* @param annotationType the JSR-330 annotation type as a Class
* @param scopeName the Spring scope name
*/
public final void registerScope(Class<?> annotationType, String scopeName) {
this.scopeMap.put(annotationType.getName(), scopeName);
}
/**
* Register an extended JSR-330 scope annotation, mapping it onto a
* specific Spring scope by name.
* @param annotationType the JSR-330 annotation type by name
* @param scopeName the Spring scope name
*/
public final void registerScope(String annotationType, String scopeName) {
this.scopeMap.put(annotationType, scopeName);
}
/**
* Resolve the given annotation type into a named Spring scope.
* <p>The default implementation simply checks against registered scopes.
* Can be overridden for custom mapping rules, for example, naming conventions.
* @param annotationType the JSR-330 annotation type
* @return the Spring scope name
*/
protected @Nullable String resolveScopeName(String annotationType) {
return this.scopeMap.get(annotationType);
}
@Override
public ScopeMetadata resolveScopeMetadata(BeanDefinition definition) {
ScopeMetadata metadata = new ScopeMetadata();
metadata.setScopeName(BeanDefinition.SCOPE_PROTOTYPE);
if (definition instanceof AnnotatedBeanDefinition annDef) {
Set<String> annTypes = annDef.getMetadata().getAnnotationTypes();
String found = null;
for (String annType : annTypes) {
Set<String> metaAnns = annDef.getMetadata().getMetaAnnotationTypes(annType);
if (metaAnns.contains("jakarta.inject.Scope")) {
if (found != null) {
throw new IllegalStateException("Found ambiguous scope annotations on bean class [" +
definition.getBeanClassName() + "]: " + found + ", " + annType);
}
found = annType;
String scopeName = resolveScopeName(annType);
if (scopeName == null) {
throw new IllegalStateException(
"Unsupported scope annotation - not mapped onto Spring scope name: " + annType);
}
metadata.setScopeName(scopeName);
}
}
}
return metadata;
}
}
| Jsr330ScopeMetadataResolver |
java | apache__maven | its/core-it-suite/src/test/resources/mng-2054/project/project-level2/project-level3/project-jar/src/test/java/com/stchome/mavenTest/AppTest.java | {
"start": 988,
"end": 1439
} | class ____ extends TestCase {
/**
* Create the test case
*
* @param testName name of the test case
*/
public AppTest(String testName) {
super(testName);
}
/**
* @return the suite of tests being tested
*/
public static Test suite() {
return new TestSuite(AppTest.class);
}
/**
* Rigourous Test :-)
*/
public void testApp() {
assertTrue(true);
}
}
| AppTest |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java | {
"start": 1527,
"end": 3311
} | class ____ extends FileSystemContractBaseTest {
private MiniDFSCluster cluster;
private String defaultWorkingDirectory;
@BeforeEach
public void setUp() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,
FileSystemContractBaseTest.TEST_UMASK);
File basedir = GenericTestUtils.getRandomizedTestDir();
cluster = new MiniDFSCluster.Builder(conf, basedir).numDataNodes(2)
.build();
fs = cluster.getFileSystem();
defaultWorkingDirectory = "/user/" +
UserGroupInformation.getCurrentUser().getShortUserName();
}
@AfterEach
public void tearDown() throws Exception {
super.tearDown();
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
@Override
protected String getDefaultWorkingDirectory() {
return defaultWorkingDirectory;
}
@Override
protected int getGlobalTimeout() {
return 60 * 1000;
}
@Test
public void testAppend() throws IOException {
AppendTestUtil.testAppend(fs, new Path("/testAppend/f"));
}
@Test
public void testFileSystemCapabilities() throws Exception {
final Path p = new Path("testFileSystemCapabilities");
// ViewFileSystem does not support LeaseRecoverable and SafeMode.
if (fs instanceof DistributedFileSystem) {
final boolean leaseRecovery = fs.hasPathCapability(p, LEASE_RECOVERABLE);
assertThat(leaseRecovery).describedAs("path capabilities %s=%s in %s", LEASE_RECOVERABLE,
leaseRecovery, fs).isTrue();
assertThat(fs).describedAs("filesystem %s", fs).isInstanceOf(LeaseRecoverable.class);
assertThat(fs).describedAs("filesystem %s", fs).isInstanceOf(SafeMode.class);
}
}
}
| TestHDFSFileSystemContract |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/error/ShouldBeEqualWithTimePrecision_create_Test.java | {
"start": 1290,
"end": 4368
} | class ____ {
@Test
void should_create_error_message_ignoring_milliseconds() {
ErrorMessageFactory factory = shouldBeEqual(parseDatetimeWithMs("2011-01-01T05:00:00.000"),
parseDatetimeWithMs("2011-01-01T06:05:17.003"), TimeUnit.MILLISECONDS);
String message = factory.create(new TextDescription("Test"), new StandardRepresentation());
then(message).isEqualTo(format("[Test] %nExpecting actual:%n" +
" 2011-01-01T05:00:00.000 (java.util.Date)%n" +
"to have same year, month, day, hour, minute and second as:%n" +
" 2011-01-01T06:05:17.003 (java.util.Date)%n" +
"but had not."));
}
@Test
void should_create_error_message_ignoring_seconds() {
ErrorMessageFactory factory = shouldBeEqual(parseDatetimeWithMs("2011-01-01T05:00:00.000"),
parseDatetimeWithMs("2011-01-01T06:05:17.003"), TimeUnit.SECONDS);
String message = factory.create(new TextDescription("Test"), new StandardRepresentation());
then(message).isEqualTo(format("[Test] %nExpecting actual:%n" +
" 2011-01-01T05:00:00.000 (java.util.Date)%n" +
"to have same year, month, day, hour and minute as:%n" +
" 2011-01-01T06:05:17.003 (java.util.Date)%n" +
"but had not."));
}
@Test
void should_create_error_message_ignoring_minutes() {
ErrorMessageFactory factory = shouldBeEqual(parseDatetimeWithMs("2011-01-01T05:00:00.000"),
parseDatetimeWithMs("2011-01-01T06:05:17.003"), TimeUnit.MINUTES);
String message = factory.create(new TextDescription("Test"), new StandardRepresentation());
then(message).isEqualTo(format("[Test] %nExpecting actual:%n" +
" 2011-01-01T05:00:00.000 (java.util.Date)%n" +
"to have same year, month, day and hour as:%n" +
" 2011-01-01T06:05:17.003 (java.util.Date)%n" +
"but had not."));
}
@Test
void should_create_error_message_ignoring_hours() {
ErrorMessageFactory factory = shouldBeEqual(parseDatetimeWithMs("2011-01-01T05:00:00.000"),
parseDatetimeWithMs("2011-01-01T06:05:17.003"), TimeUnit.HOURS);
String message = factory.create(new TextDescription("Test"), new StandardRepresentation());
then(message).isEqualTo(format("[Test] %nExpecting actual:%n" +
" 2011-01-01T05:00:00.000 (java.util.Date)%n" +
"to have same year, month and day as:%n" +
" 2011-01-01T06:05:17.003 (java.util.Date)%n" +
"but had not."));
}
}
| ShouldBeEqualWithTimePrecision_create_Test |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/injectionstrategy/spring/_default/SpringDefaultMapperTest.java | {
"start": 1688,
"end": 3130
} | class ____ {
@RegisterExtension
final GeneratedSource generatedSource = new GeneratedSource();
@Autowired
private CustomerSpringDefaultMapper customerMapper;
private ConfigurableApplicationContext context;
@BeforeEach
public void springUp() {
context = new AnnotationConfigApplicationContext( getClass() );
context.getAutowireCapableBeanFactory().autowireBean( this );
}
@AfterEach
public void springDown() {
if ( context != null ) {
context.close();
}
}
@ProcessorTest
public void shouldConvertToTarget() {
// given
CustomerEntity customerEntity = new CustomerEntity();
customerEntity.setName( "Samuel" );
customerEntity.setGender( Gender.MALE );
// when
CustomerDto customerDto = customerMapper.asTarget( customerEntity );
// then
assertThat( customerDto ).isNotNull();
assertThat( customerDto.getName() ).isEqualTo( "Samuel" );
assertThat( customerDto.getGender() ).isEqualTo( GenderDto.M );
}
@ProcessorTest
public void shouldHaveFieldInjection() {
generatedSource.forMapper( CustomerSpringDefaultMapper.class )
.content()
.contains( "@Autowired" + lineSeparator() + " private GenderSpringDefaultMapper" )
.doesNotContain( "public CustomerSpringDefaultMapperImpl(" );
}
}
| SpringDefaultMapperTest |
java | spring-projects__spring-boot | core/spring-boot-testcontainers/src/dockerTest/java/org/springframework/boot/testcontainers/ImportTestcontainersTests.java | {
"start": 5997,
"end": 6065
} | interface ____ {
}
@ImportTestcontainers
static | ContainerAnnotation |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/hive/visitor/HiveASTVisitor.java | {
"start": 1284,
"end": 2460
} | interface ____ extends SQLASTVisitor {
default boolean visit(HiveInsert x) {
return true;
}
default void endVisit(HiveInsert x) {
}
default boolean visit(HiveMultiInsertStatement x) {
return true;
}
default void endVisit(HiveMultiInsertStatement x) {
}
default boolean visit(HiveInsertStatement x) {
return true;
}
default void endVisit(HiveInsertStatement x) {
}
default boolean visit(HiveCreateFunctionStatement x) {
return true;
}
default void endVisit(HiveCreateFunctionStatement x) {
}
default boolean visit(HiveLoadDataStatement x) {
return true;
}
default void endVisit(HiveLoadDataStatement x) {
}
default boolean visit(HiveMsckRepairStatement x) {
return true;
}
default void endVisit(HiveMsckRepairStatement x) {
}
default boolean visit(HiveAddJarStatement x) {
return true;
}
default void endVisit(HiveAddJarStatement x) {
}
default boolean visit(HiveCreateTableStatement x) {
return true;
}
default void endVisit(HiveCreateTableStatement x) {
}
}
| HiveASTVisitor |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java | {
"start": 5715,
"end": 59783
} | class ____ extends Receiver implements Runnable {
public static final Logger LOG = DataNode.LOG;
static final Logger CLIENT_TRACE_LOG = DataNode.CLIENT_TRACE_LOG;
private Peer peer;
private final String remoteAddress; // address of remote side
private final String remoteAddressWithoutPort; // only the address, no port
private final String localAddress; // local address of this daemon
private final DataNode datanode;
private final DNConf dnConf;
private final DataXceiverServer dataXceiverServer;
private final boolean connectToDnViaHostname;
private long opStartTime; //the start time of receiving an Op
private final InputStream socketIn;
private OutputStream socketOut;
private BlockReceiver blockReceiver = null;
private final int ioFileBufferSize;
private final int smallBufferSize;
private Thread xceiver = null;
/**
* Client Name used in previous operation. Not available on first request
* on the socket.
*/
private String previousOpClientName;
public static DataXceiver create(Peer peer, DataNode dn,
DataXceiverServer dataXceiverServer) throws IOException {
return new DataXceiver(peer, dn, dataXceiverServer);
}
private DataXceiver(Peer peer, DataNode datanode,
DataXceiverServer dataXceiverServer) throws IOException {
super(FsTracer.get(null));
this.peer = peer;
this.dnConf = datanode.getDnConf();
this.socketIn = peer.getInputStream();
this.socketOut = peer.getOutputStream();
this.datanode = datanode;
this.dataXceiverServer = dataXceiverServer;
this.connectToDnViaHostname = datanode.getDnConf().connectToDnViaHostname;
this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(datanode.getConf());
this.smallBufferSize = DFSUtilClient.getSmallBufferSize(datanode.getConf());
remoteAddress = peer.getRemoteAddressString();
final int colonIdx = remoteAddress.indexOf(':');
remoteAddressWithoutPort =
(colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx);
localAddress = peer.getLocalAddressString();
LOG.debug("Number of active connections is: {}",
datanode.getXceiverCount());
}
/**
* Update the current thread's name to contain the current status.
* Use this only after this receiver has started on its thread, i.e.,
* outside the constructor.
*/
private void updateCurrentThreadName(String status) {
StringBuilder sb = new StringBuilder();
sb.append("DataXceiver for client ");
if (previousOpClientName != null) {
sb.append(previousOpClientName).append(" at ");
}
sb.append(remoteAddress);
if (status != null) {
sb.append(" [").append(status).append("]");
}
Thread.currentThread().setName(sb.toString());
}
/** Return the datanode object. */
DataNode getDataNode() {return datanode;}
private OutputStream getOutputStream() {
return socketOut;
}
public void sendOOB() throws IOException, InterruptedException {
BlockReceiver br = getCurrentBlockReceiver();
if (br == null) {
return;
}
// This doesn't need to be in a critical section. Although the client
// can reuse the connection to issue a different request, trying sending
// an OOB through the recently closed block receiver is harmless.
LOG.info("Sending OOB to peer: {}", peer);
br.sendOOB();
}
public void stopWriter() {
// We want to interrupt the xceiver only when it is serving writes.
synchronized(this) {
if (getCurrentBlockReceiver() == null) {
return;
}
xceiver.interrupt();
}
LOG.info("Stopped the writer: {}", peer);
}
/**
* blockReceiver is updated at multiple places. Use the synchronized setter
* and getter.
*/
private synchronized void setCurrentBlockReceiver(BlockReceiver br) {
blockReceiver = br;
}
private synchronized BlockReceiver getCurrentBlockReceiver() {
return blockReceiver;
}
/**
* Read/write data from/to the DataXceiverServer.
*/
@Override
public void run() {
int opsProcessed = 0;
Op op = null;
Op firstOp = null;
try {
synchronized(this) {
xceiver = Thread.currentThread();
}
dataXceiverServer.addPeer(peer, Thread.currentThread(), this);
peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
InputStream input = socketIn;
try {
IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut,
socketIn, datanode.getXferAddress().getPort(),
datanode.getDatanodeId());
input = new BufferedInputStream(saslStreams.in,
smallBufferSize);
socketOut = saslStreams.out;
} catch (InvalidMagicNumberException imne) {
if (imne.isHandshake4Encryption()) {
LOG.info("Failed to read expected encryption handshake from client " +
"at {}. Perhaps the client " +
"is running an older version of Hadoop which does not support " +
"encryption", peer.getRemoteAddressString(), imne);
} else {
LOG.info("Failed to read expected SASL data transfer protection " +
"handshake from client at {}" +
". Perhaps the client is running an older version of Hadoop " +
"which does not support SASL data transfer protection",
peer.getRemoteAddressString(), imne);
}
return;
}
super.initialize(new DataInputStream(input));
// We process requests in a loop, and stay around for a short timeout.
// This optimistic behaviour allows the other end to reuse connections.
// Setting keepalive timeout to 0 disable this behavior.
do {
updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));
try {
if (opsProcessed != 0) {
assert dnConf.socketKeepaliveTimeout > 0;
peer.setReadTimeout(dnConf.socketKeepaliveTimeout);
} else {
peer.setReadTimeout(dnConf.socketTimeout);
}
op = readOp();
} catch (InterruptedIOException ignored) {
// Time out while we wait for client rpc
break;
} catch (EOFException | ClosedChannelException e) {
// Since we optimistically expect the next op, it's quite normal to
// get EOF here.
LOG.debug("Cached {} closing after {} ops. " +
"This message is usually benign.", peer, opsProcessed);
break;
} catch (IOException err) {
incrDatanodeNetworkErrors();
throw err;
}
// restore normal timeout
if (opsProcessed != 0) {
peer.setReadTimeout(dnConf.socketTimeout);
}
opStartTime = monotonicNow();
// compatible with loop retry requests
if (firstOp == null) {
firstOp = op;
incrReadWriteOpMetrics(op);
}
processOp(op);
++opsProcessed;
} while ((peer != null) &&
(!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0));
} catch (Throwable t) {
String s = datanode.getDisplayName() + ":DataXceiver error processing "
+ ((op == null) ? "unknown" : op.name()) + " operation "
+ " src: " + remoteAddress + " dst: " + localAddress;
if (op == Op.WRITE_BLOCK && t instanceof ReplicaAlreadyExistsException) {
// For WRITE_BLOCK, it is okay if the replica already exists since
// client and replication may write the same block to the same datanode
// at the same time.
if (LOG.isTraceEnabled()) {
LOG.trace(s, t);
} else {
LOG.info("{}; {}", s, t.toString());
}
} else if (op == Op.READ_BLOCK && t instanceof SocketTimeoutException) {
String s1 =
"Likely the client has stopped reading, disconnecting it";
s1 += " (" + s + ")";
if (LOG.isTraceEnabled()) {
LOG.trace(s1, t);
} else {
LOG.info("{}; {}", s1, t.toString());
}
} else if (t instanceof InvalidToken ||
t.getCause() instanceof InvalidToken) {
// The InvalidToken exception has already been logged in
// checkAccess() method and this is not a server error.
LOG.trace(s, t);
} else {
LOG.error(s, t);
}
} finally {
collectThreadLocalStates();
LOG.debug("{}:Number of active connections is: {}",
datanode.getDisplayName(), datanode.getXceiverCount());
updateCurrentThreadName("Cleaning up");
if (peer != null) {
if (firstOp != null) {
decrReadWriteOpMetrics(op);
}
dataXceiverServer.closePeer(peer);
IOUtils.closeStream(in);
}
}
}
/**
* In this short living thread, any local states should be collected before
* the thread dies away.
*/
private void collectThreadLocalStates() {
if (datanode.getDnConf().peerStatsEnabled && datanode.getPeerMetrics() != null) {
datanode.getPeerMetrics().collectThreadLocalStates();
}
}
@Override
public void requestShortCircuitFds(final ExtendedBlock blk,
final Token<BlockTokenIdentifier> token,
SlotId slotId, int maxVersion, boolean supportsReceiptVerification)
throws IOException {
updateCurrentThreadName("Passing file descriptors for block " + blk);
DataOutputStream out = getBufferedOutputStream();
checkAccess(out, true, blk, token,
Op.REQUEST_SHORT_CIRCUIT_FDS, BlockTokenIdentifier.AccessMode.READ,
null, null);
BlockOpResponseProto.Builder bld = BlockOpResponseProto.newBuilder();
FileInputStream fis[] = null;
SlotId registeredSlotId = null;
boolean success = false;
try {
try {
if (peer.getDomainSocket() == null) {
throw new IOException("You cannot pass file descriptors over " +
"anything but a UNIX domain socket.");
}
if (slotId != null) {
boolean isCached = datanode.data.
isCached(blk.getBlockPoolId(), blk.getBlockId());
datanode.shortCircuitRegistry.registerSlot(
ExtendedBlockId.fromExtendedBlock(blk), slotId, isCached);
registeredSlotId = slotId;
}
fis = datanode.requestShortCircuitFdsForRead(blk, token, maxVersion);
Preconditions.checkState(fis != null);
bld.setStatus(SUCCESS);
bld.setShortCircuitAccessVersion(DataNode.CURRENT_BLOCK_FORMAT_VERSION);
} catch (ShortCircuitFdsVersionException e) {
bld.setStatus(ERROR_UNSUPPORTED);
bld.setShortCircuitAccessVersion(DataNode.CURRENT_BLOCK_FORMAT_VERSION);
bld.setMessage(e.getMessage());
} catch (ShortCircuitFdsUnsupportedException e) {
bld.setStatus(ERROR_UNSUPPORTED);
bld.setMessage(e.getMessage());
} catch (IOException e) {
bld.setStatus(ERROR);
bld.setMessage(e.getMessage());
LOG.error("Request short-circuit read file descriptor" +
" failed with unknown error.", e);
}
bld.build().writeDelimitedTo(socketOut);
if (fis != null) {
FileDescriptor fds[] = new FileDescriptor[fis.length];
for (int i = 0; i < fds.length; i++) {
fds[i] = fis[i].getFD();
}
byte buf[] = new byte[1];
if (supportsReceiptVerification) {
buf[0] = (byte)USE_RECEIPT_VERIFICATION.getNumber();
} else {
buf[0] = (byte)DO_NOT_USE_RECEIPT_VERIFICATION.getNumber();
}
DomainSocket sock = peer.getDomainSocket();
sock.sendFileDescriptors(fds, buf, 0, buf.length);
if (supportsReceiptVerification) {
LOG.trace("Reading receipt verification byte for {}", slotId);
int val = sock.getInputStream().read();
if (val < 0) {
throw new EOFException();
}
} else {
LOG.trace("Receipt verification is not enabled on the DataNode. " +
"Not verifying {}", slotId);
}
success = true;
// update metrics
datanode.metrics.addReadBlockOp(elapsed());
datanode.metrics.incrReadsFromClient(true, blk.getNumBytes());
}
} finally {
if ((!success) && (registeredSlotId != null)) {
LOG.info("Unregistering {} because the " +
"requestShortCircuitFdsForRead operation failed.",
registeredSlotId);
datanode.shortCircuitRegistry.unregisterSlot(registeredSlotId);
}
if (CLIENT_TRACE_LOG.isInfoEnabled()) {
DatanodeRegistration dnR = datanode.getDNRegistrationForBP(blk
.getBlockPoolId());
BlockSender.CLIENT_TRACE_LOG.info(String.format(
"src: 127.0.0.1, dest: 127.0.0.1, op: REQUEST_SHORT_CIRCUIT_FDS," +
" blockid: %s, srvID: %s, success: %b",
blk.getBlockId(), dnR.getDatanodeUuid(), success));
}
if (fis != null) {
IOUtils.cleanupWithLogger(null, fis);
}
}
}
@Override
public void releaseShortCircuitFds(SlotId slotId) throws IOException {
boolean success = false;
try {
String error;
Status status;
try {
datanode.shortCircuitRegistry.unregisterSlot(slotId);
error = null;
status = Status.SUCCESS;
} catch (UnsupportedOperationException e) {
error = "unsupported operation";
status = Status.ERROR_UNSUPPORTED;
} catch (Throwable e) {
error = e.getMessage();
status = Status.ERROR_INVALID;
}
ReleaseShortCircuitAccessResponseProto.Builder bld =
ReleaseShortCircuitAccessResponseProto.newBuilder();
bld.setStatus(status);
if (error != null) {
bld.setError(error);
}
bld.build().writeDelimitedTo(socketOut);
success = true;
} finally {
if (CLIENT_TRACE_LOG.isInfoEnabled()) {
BlockSender.CLIENT_TRACE_LOG.info(String.format(
"src: 127.0.0.1, dest: 127.0.0.1, op: RELEASE_SHORT_CIRCUIT_FDS," +
" shmId: %016x%016x, slotIdx: %d, srvID: %s, success: %b",
slotId.getShmId().getHi(), slotId.getShmId().getLo(),
slotId.getSlotIdx(), datanode.getDatanodeUuid(), success));
}
}
}
private void sendShmErrorResponse(Status status, String error)
throws IOException {
ShortCircuitShmResponseProto.newBuilder().setStatus(status).
setError(error).build().writeDelimitedTo(socketOut);
}
private void sendShmSuccessResponse(DomainSocket sock, NewShmInfo shmInfo)
throws IOException {
DataNodeFaultInjector.get().sendShortCircuitShmResponse();
ShortCircuitShmResponseProto.newBuilder().setStatus(SUCCESS).
setId(PBHelperClient.convert(shmInfo.getShmId())).build().
writeDelimitedTo(socketOut);
// Send the file descriptor for the shared memory segment.
byte buf[] = new byte[] { (byte)0 };
FileDescriptor shmFdArray[] =
new FileDescriptor[] {shmInfo.getFileStream().getFD()};
sock.sendFileDescriptors(shmFdArray, buf, 0, buf.length);
}
@Override
public void requestShortCircuitShm(String clientName) throws IOException {
NewShmInfo shmInfo = null;
boolean success = false;
DomainSocket sock = peer.getDomainSocket();
try {
if (sock == null) {
sendShmErrorResponse(ERROR_INVALID, "Bad request from " +
peer + ": must request a shared " +
"memory segment over a UNIX domain socket.");
return;
}
try {
shmInfo = datanode.shortCircuitRegistry.
createNewMemorySegment(clientName, sock);
// After calling #{ShortCircuitRegistry#createNewMemorySegment}, the
// socket is managed by the DomainSocketWatcher, not the DataXceiver.
releaseSocket();
} catch (UnsupportedOperationException e) {
sendShmErrorResponse(ERROR_UNSUPPORTED,
"This datanode has not been configured to support " +
"short-circuit shared memory segments.");
return;
} catch (IOException e) {
sendShmErrorResponse(ERROR,
"Failed to create shared file descriptor: " + e.getMessage());
return;
}
sendShmSuccessResponse(sock, shmInfo);
success = true;
} finally {
if (CLIENT_TRACE_LOG.isInfoEnabled()) {
if (success) {
BlockSender.CLIENT_TRACE_LOG.info(String.format(
"cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " +
"op: REQUEST_SHORT_CIRCUIT_SHM," +
" shmId: %016x%016x, srvID: %s, success: true",
clientName, shmInfo.getShmId().getHi(),
shmInfo.getShmId().getLo(),
datanode.getDatanodeUuid()));
} else {
BlockSender.CLIENT_TRACE_LOG.info(String.format(
"cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " +
"op: REQUEST_SHORT_CIRCUIT_SHM, " +
"shmId: n/a, srvID: %s, success: false",
clientName, datanode.getDatanodeUuid()));
}
}
if ((!success) && (peer == null)) {
// The socket is now managed by the DomainSocketWatcher. However,
// we failed to pass it to the client. We call shutdown() on the
// UNIX domain socket now. This will trigger the DomainSocketWatcher
// callback. The callback will close the domain socket.
// We don't want to close the socket here, since that might lead to
// bad behavior inside the poll() call. See HADOOP-11802 for details.
try {
LOG.warn("Failed to send success response back to the client. " +
"Shutting down socket for {}", shmInfo.getShmId());
sock.shutdown();
} catch (IOException e) {
LOG.warn("Failed to shut down socket in error handler", e);
}
}
IOUtils.cleanupWithLogger(null, shmInfo);
}
}
void releaseSocket() {
dataXceiverServer.releasePeer(peer);
peer = null;
}
@Override
public void readBlock(final ExtendedBlock block,
final Token<BlockTokenIdentifier> blockToken,
final String clientName,
final long blockOffset,
final long length,
final boolean sendChecksum,
final CachingStrategy cachingStrategy) throws IOException {
previousOpClientName = clientName;
long read = 0;
updateCurrentThreadName("Sending block " + block);
OutputStream baseStream = getOutputStream();
DataOutputStream out = getBufferedOutputStream();
checkAccess(out, true, block, blockToken, Op.READ_BLOCK,
BlockTokenIdentifier.AccessMode.READ);
// send the block
BlockSender blockSender = null;
DatanodeRegistration dnR =
datanode.getDNRegistrationForBP(block.getBlockPoolId());
final String clientTraceFmt = clientName.length() > 0 && CLIENT_TRACE_LOG.isInfoEnabled() ?
String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress, "", "%d", "HDFS_READ",
clientName, "%d", dnR.getDatanodeUuid(), block, "%d") :
dnR + " Served block " + block + " to " + remoteAddress;
try {
try {
blockSender = new BlockSender(block, blockOffset, length,
true, false, sendChecksum, datanode, clientTraceFmt,
cachingStrategy);
} catch(IOException e) {
String msg = "opReadBlock " + block + " received exception " + e;
LOG.info(msg);
sendResponse(ERROR, msg);
throw e;
}
// send op status
writeSuccessWithChecksumInfo(blockSender, new DataOutputStream(getOutputStream()));
long beginReadInNS = Time.monotonicNowNanos();
// send data
read = blockSender.sendBlock(out, baseStream, dataXceiverServer.getReadThrottler());
long durationInNS = Time.monotonicNowNanos() - beginReadInNS;
if (blockSender.didSendEntireByteRange()) {
// If we sent the entire range, then we should expect the client
// to respond with a Status enum.
try {
ClientReadStatusProto stat = ClientReadStatusProto.parseFrom(
PBHelperClient.vintPrefixed(in));
if (!stat.hasStatus()) {
LOG.warn("Client {} did not send a valid status code " +
"after reading. Will close connection.",
peer.getRemoteAddressString());
IOUtils.closeStream(out);
}
} catch (IOException ioe) {
LOG.debug("Error reading client status response. Will close connection.", ioe);
IOUtils.closeStream(out);
incrDatanodeNetworkErrors();
}
} else {
IOUtils.closeStream(out);
}
datanode.metrics.incrBytesRead((int) read);
datanode.metrics.incrBlocksRead();
datanode.metrics.incrTotalReadTime(TimeUnit.NANOSECONDS.toMillis(durationInNS));
DFSUtil.addTransferRateMetric(datanode.metrics, read, durationInNS);
} catch ( SocketException ignored ) {
LOG.trace("{}:Ignoring exception while serving {} to {}",
dnR, block, remoteAddress, ignored);
// Its ok for remote side to close the connection anytime.
datanode.metrics.incrBlocksRead();
IOUtils.closeStream(out);
} catch ( IOException ioe ) {
/* What exactly should we do here?
* Earlier version shutdown() datanode if there is disk error.
*/
if (!(ioe instanceof SocketTimeoutException)) {
LOG.warn("{}:Got exception while serving {} to {}",
dnR, block, remoteAddress, ioe);
incrDatanodeNetworkErrors();
}
// Normally the client reports a bad block to the NN. However if the
// meta file is corrupt or an disk error occurs (EIO), then the client
// never gets a chance to do validation, and hence will never report
// the block as bad. For some classes of IO exception, the DN should
// report the block as bad, via the handleBadBlock() method
datanode.handleBadBlock(block, ioe, false);
throw ioe;
} finally {
IOUtils.closeStream(blockSender);
}
//update metrics
datanode.metrics.addReadBlockOp(elapsed());
datanode.metrics.incrReadsFromClient(peer.isLocal(), read);
}
@Override
public void writeBlock(final ExtendedBlock block,
final StorageType storageType,
final Token<BlockTokenIdentifier> blockToken,
final String clientname,
final DatanodeInfo[] targets,
final StorageType[] targetStorageTypes,
final DatanodeInfo srcDataNode,
final BlockConstructionStage stage,
final int pipelineSize,
final long minBytesRcvd,
final long maxBytesRcvd,
final long latestGenerationStamp,
DataChecksum requestedChecksum,
CachingStrategy cachingStrategy,
boolean allowLazyPersist,
final boolean pinning,
final boolean[] targetPinnings,
final String storageId,
final String[] targetStorageIds) throws IOException {
previousOpClientName = clientname;
updateCurrentThreadName("Receiving block " + block);
final boolean isDatanode = clientname.length() == 0;
final boolean isClient = !isDatanode;
final boolean isTransfer = stage == BlockConstructionStage.TRANSFER_RBW
|| stage == BlockConstructionStage.TRANSFER_FINALIZED;
allowLazyPersist = allowLazyPersist &&
(dnConf.getAllowNonLocalLazyPersist() || peer.isLocal());
long size = 0;
// reply to upstream datanode or client
final DataOutputStream replyOut = getBufferedOutputStream();
int nst = targetStorageTypes.length;
StorageType[] storageTypes = new StorageType[nst + 1];
storageTypes[0] = storageType;
if (targetStorageTypes.length > 0) {
System.arraycopy(targetStorageTypes, 0, storageTypes, 1, nst);
}
// To support older clients, we don't pass in empty storageIds
final int nsi = targetStorageIds.length;
final String[] storageIds;
if (nsi > 0) {
storageIds = new String[nsi + 1];
storageIds[0] = storageId;
if (targetStorageTypes.length > 0) {
System.arraycopy(targetStorageIds, 0, storageIds, 1, nsi);
}
} else {
storageIds = new String[0];
}
checkAccess(replyOut, isClient, block, blockToken, Op.WRITE_BLOCK,
BlockTokenIdentifier.AccessMode.WRITE,
storageTypes, storageIds);
// check single target for transfer-RBW/Finalized
if (isTransfer && targets.length > 0) {
throw new IOException(stage + " does not support multiple targets "
+ Arrays.asList(targets));
}
if (LOG.isDebugEnabled()) {
LOG.debug("opWriteBlock: stage={}, clientname={}\n " +
"block ={}, newGs={}, bytesRcvd=[{}, {}]\n " +
"targets={}; pipelineSize={}, srcDataNode={}, pinning={}",
stage, clientname, block, latestGenerationStamp, minBytesRcvd,
maxBytesRcvd, Arrays.asList(targets), pipelineSize, srcDataNode,
pinning);
LOG.debug("isDatanode={}, isClient={}, isTransfer={}",
isDatanode, isClient, isTransfer);
LOG.debug("writeBlock receive buf size {} tcp no delay {}",
peer.getReceiveBufferSize(), peer.getTcpNoDelay());
}
// We later mutate block's generation stamp and length, but we need to
// forward the original version of the block to downstream mirrors, so
// make a copy here.
final ExtendedBlock originalBlock = new ExtendedBlock(block);
if (block.getNumBytes() == 0) {
block.setNumBytes(dataXceiverServer.estimateBlockSize);
}
LOG.info("Receiving {} src: {} dest: {}",
block, remoteAddress, localAddress);
DataOutputStream mirrorOut = null; // stream to next target
DataInputStream mirrorIn = null; // reply from next target
Socket mirrorSock = null; // socket to next target
String mirrorNode = null; // the name:port of next target
String firstBadLink = ""; // first datanode that failed in connection setup
Status mirrorInStatus = SUCCESS;
final String storageUuid;
final boolean isOnTransientStorage;
try {
final Replica replica;
if (isDatanode ||
stage != BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
// open a block receiver
setCurrentBlockReceiver(getBlockReceiver(block, storageType, in,
peer.getRemoteAddressString(),
peer.getLocalAddressString(),
stage, latestGenerationStamp, minBytesRcvd, maxBytesRcvd,
clientname, srcDataNode, datanode, requestedChecksum,
cachingStrategy, allowLazyPersist, pinning, storageId));
replica = blockReceiver.getReplica();
} else {
replica = datanode.data.recoverClose(
block, latestGenerationStamp, minBytesRcvd);
}
storageUuid = replica.getStorageUuid();
isOnTransientStorage = replica.isOnTransientStorage();
//
// Connect to downstream machine, if appropriate
//
if (targets.length > 0) {
InetSocketAddress mirrorTarget = null;
// Connect to backup machine
mirrorNode = targets[0].getXferAddr(connectToDnViaHostname);
LOG.debug("Connecting to datanode {}", mirrorNode);
mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
mirrorSock = datanode.newSocket();
try {
DataNodeFaultInjector.get().failMirrorConnection();
int timeoutValue = dnConf.socketTimeout +
(HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length);
int writeTimeout = dnConf.socketWriteTimeout +
(HdfsConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
mirrorSock.setTcpNoDelay(dnConf.getDataTransferServerTcpNoDelay());
mirrorSock.setSoTimeout(timeoutValue);
mirrorSock.setKeepAlive(true);
if (dnConf.getTransferSocketSendBufferSize() > 0) {
mirrorSock.setSendBufferSize(
dnConf.getTransferSocketSendBufferSize());
}
OutputStream unbufMirrorOut = NetUtils.getOutputStream(mirrorSock,
writeTimeout);
InputStream unbufMirrorIn = NetUtils.getInputStream(mirrorSock);
DataEncryptionKeyFactory keyFactory =
datanode.getDataEncryptionKeyFactoryForBlock(block);
SecretKey secretKey = null;
if (dnConf.overwriteDownstreamDerivedQOP) {
String bpid = block.getBlockPoolId();
BlockKey blockKey = datanode.blockPoolTokenSecretManager
.get(bpid).getCurrentKey();
secretKey = blockKey.getKey();
}
IOStreamPair saslStreams = datanode.saslClient.socketSend(
mirrorSock, unbufMirrorOut, unbufMirrorIn, keyFactory,
blockToken, targets[0], secretKey);
unbufMirrorOut = saslStreams.out;
unbufMirrorIn = saslStreams.in;
mirrorOut = new DataOutputStream(new BufferedOutputStream(unbufMirrorOut,
smallBufferSize));
mirrorIn = new DataInputStream(unbufMirrorIn);
String targetStorageId = null;
if (targetStorageIds.length > 0) {
// Older clients may not have provided any targetStorageIds
targetStorageId = targetStorageIds[0];
}
if (targetPinnings != null && targetPinnings.length > 0) {
new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0],
blockToken, clientname, targets, targetStorageTypes,
srcDataNode, stage, pipelineSize, minBytesRcvd, maxBytesRcvd,
latestGenerationStamp, requestedChecksum, cachingStrategy,
allowLazyPersist, targetPinnings[0], targetPinnings,
targetStorageId, targetStorageIds);
} else {
new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0],
blockToken, clientname, targets, targetStorageTypes,
srcDataNode, stage, pipelineSize, minBytesRcvd, maxBytesRcvd,
latestGenerationStamp, requestedChecksum, cachingStrategy,
allowLazyPersist, false, targetPinnings,
targetStorageId, targetStorageIds);
}
mirrorOut.flush();
DataNodeFaultInjector.get().writeBlockAfterFlush();
// read connect ack (only for clients, not for replication req)
if (isClient) {
BlockOpResponseProto connectAck =
BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(mirrorIn));
mirrorInStatus = connectAck.getStatus();
firstBadLink = connectAck.getFirstBadLink();
if (mirrorInStatus != SUCCESS) {
LOG.debug("Datanode {} got response for connect" +
"ack from downstream datanode with firstbadlink as {}",
targets.length, firstBadLink);
}
}
} catch (IOException e) {
if (isClient) {
BlockOpResponseProto.newBuilder()
.setStatus(ERROR)
// NB: Unconditionally using the xfer addr w/o hostname
.setFirstBadLink(targets[0].getXferAddr())
.build()
.writeDelimitedTo(replyOut);
replyOut.flush();
}
IOUtils.closeStream(mirrorOut);
mirrorOut = null;
IOUtils.closeStream(mirrorIn);
mirrorIn = null;
IOUtils.closeSocket(mirrorSock);
mirrorSock = null;
if (isClient) {
LOG.error("{}:Exception transferring block {} to mirror {}",
datanode, block, mirrorNode, e);
throw e;
} else {
LOG.info("{}:Exception transferring {} to mirror {}- continuing " +
"without the mirror", datanode, block, mirrorNode, e);
incrDatanodeNetworkErrors();
}
}
}
// send connect-ack to source for clients and not transfer-RBW/Finalized
if (isClient && !isTransfer) {
if (mirrorInStatus != SUCCESS) {
LOG.debug("Datanode {} forwarding connect ack to upstream " +
"firstbadlink is {}", targets.length, firstBadLink);
}
BlockOpResponseProto.newBuilder()
.setStatus(mirrorInStatus)
.setFirstBadLink(firstBadLink)
.build()
.writeDelimitedTo(replyOut);
replyOut.flush();
}
// receive the block and mirror to the next target
if (blockReceiver != null) {
String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr,
dataXceiverServer.getWriteThrottler(), targets, false);
// send close-ack for transfer-RBW/Finalized
if (isTransfer) {
LOG.trace("TRANSFER: send close-ack");
writeResponse(SUCCESS, null, replyOut);
}
}
// update its generation stamp
if (isClient &&
stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
block.setGenerationStamp(latestGenerationStamp);
block.setNumBytes(minBytesRcvd);
}
// if this write is for a replication request or recovering
// a failed close for client, then confirm block. For other client-writes,
// the block is finalized in the PacketResponder.
if (isDatanode ||
stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
datanode.closeBlock(block, null, storageUuid, isOnTransientStorage);
LOG.info("Received {} src: {} dest: {} volume: {} of size {}",
block, remoteAddress, localAddress, replica.getVolume(),
block.getNumBytes());
}
if(isClient) {
size = block.getNumBytes();
}
} catch (IOException ioe) {
LOG.info("opWriteBlock {} received exception {}",
block, ioe.toString());
incrDatanodeNetworkErrors();
throw ioe;
} finally {
// close all opened streams
IOUtils.closeStream(mirrorOut);
IOUtils.closeStream(mirrorIn);
IOUtils.closeStream(replyOut);
IOUtils.closeSocket(mirrorSock);
if (blockReceiver != null) {
blockReceiver.releaseAnyRemainingReservedSpace();
}
IOUtils.closeStream(blockReceiver);
setCurrentBlockReceiver(null);
}
//update metrics
datanode.getMetrics().addWriteBlockOp(elapsed());
datanode.getMetrics().incrWritesFromClient(peer.isLocal(), size);
}
@Override
public void transferBlock(final ExtendedBlock blk,
final Token<BlockTokenIdentifier> blockToken,
final String clientName,
final DatanodeInfo[] targets,
final StorageType[] targetStorageTypes,
final String[] targetStorageIds) throws IOException {
previousOpClientName = clientName;
updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
final DataOutputStream out = new DataOutputStream(
getOutputStream());
checkAccess(out, true, blk, blockToken, Op.TRANSFER_BLOCK,
BlockTokenIdentifier.AccessMode.COPY, targetStorageTypes,
targetStorageIds);
try {
datanode.transferReplicaForPipelineRecovery(blk, targets,
targetStorageTypes, targetStorageIds, clientName);
writeResponse(Status.SUCCESS, null, out);
} catch (IOException ioe) {
LOG.info("transferBlock {} received exception {}",
blk, ioe.toString());
incrDatanodeNetworkErrors();
throw ioe;
} finally {
IOUtils.closeStream(out);
}
}
@Override
public void blockChecksum(ExtendedBlock block,
Token<BlockTokenIdentifier> blockToken,
BlockChecksumOptions blockChecksumOptions)
throws IOException {
updateCurrentThreadName("Getting checksum for block " + block);
final DataOutputStream out = new DataOutputStream(
getOutputStream());
checkAccess(out, true, block, blockToken, Op.BLOCK_CHECKSUM,
BlockTokenIdentifier.AccessMode.READ);
BlockChecksumComputer maker = new ReplicatedBlockChecksumComputer(
datanode, block, blockChecksumOptions);
try {
maker.compute();
//write reply
BlockOpResponseProto.newBuilder()
.setStatus(SUCCESS)
.setChecksumResponse(OpBlockChecksumResponseProto.newBuilder()
.setBytesPerCrc(maker.getBytesPerCRC())
.setCrcPerBlock(maker.getCrcPerBlock())
.setBlockChecksum(ByteString.copyFrom(maker.getOutBytes()))
.setCrcType(PBHelperClient.convert(maker.getCrcType()))
.setBlockChecksumOptions(
PBHelperClient.convert(blockChecksumOptions)))
.build()
.writeDelimitedTo(out);
out.flush();
} catch (IOException ioe) {
LOG.info("blockChecksum {} received exception {}",
block, ioe.toString());
incrDatanodeNetworkErrors();
throw ioe;
} finally {
IOUtils.closeStream(out);
}
//update metrics
datanode.metrics.addBlockChecksumOp(elapsed());
}
@Override
public void blockGroupChecksum(final StripedBlockInfo stripedBlockInfo,
final Token<BlockTokenIdentifier> blockToken,
long requestedNumBytes,
BlockChecksumOptions blockChecksumOptions)
throws IOException {
final ExtendedBlock block = stripedBlockInfo.getBlock();
updateCurrentThreadName("Getting checksum for block group" +
block);
final DataOutputStream out = new DataOutputStream(getOutputStream());
checkAccess(out, true, block, blockToken, Op.BLOCK_GROUP_CHECKSUM,
BlockTokenIdentifier.AccessMode.READ);
AbstractBlockChecksumComputer maker =
new BlockGroupNonStripedChecksumComputer(datanode, stripedBlockInfo,
requestedNumBytes, blockChecksumOptions);
try {
maker.compute();
//write reply
BlockOpResponseProto.newBuilder()
.setStatus(SUCCESS)
.setChecksumResponse(OpBlockChecksumResponseProto.newBuilder()
.setBytesPerCrc(maker.getBytesPerCRC())
.setCrcPerBlock(maker.getCrcPerBlock())
.setBlockChecksum(ByteString.copyFrom(maker.getOutBytes()))
.setCrcType(PBHelperClient.convert(maker.getCrcType()))
.setBlockChecksumOptions(
PBHelperClient.convert(blockChecksumOptions)))
.build()
.writeDelimitedTo(out);
out.flush();
} catch (IOException ioe) {
LOG.info("blockChecksum {} received exception {}",
stripedBlockInfo.getBlock(), ioe.toString());
incrDatanodeNetworkErrors();
throw ioe;
} finally {
IOUtils.closeStream(out);
}
//update metrics
datanode.metrics.addBlockChecksumOp(elapsed());
}
@Override
public void copyBlock(final ExtendedBlock block,
final Token<BlockTokenIdentifier> blockToken) throws IOException {
updateCurrentThreadName("Copying block " + block);
DataOutputStream reply = getBufferedOutputStream();
checkAccess(reply, true, block, blockToken, Op.COPY_BLOCK,
BlockTokenIdentifier.AccessMode.COPY);
if (datanode.data.getPinning(block)) {
String msg = "Not able to copy block " + block.getBlockId() + " " +
"to " + peer.getRemoteAddressString() + " because it's pinned ";
LOG.info(msg);
sendResponse(Status.ERROR_BLOCK_PINNED, msg);
return;
}
if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start
String msg = "Not able to copy block " + block.getBlockId() + " " +
"to " + peer.getRemoteAddressString() + " because threads " +
"quota=" + dataXceiverServer.balanceThrottler.getMaxConcurrentMovers() + " is exceeded.";
LOG.info(msg);
sendResponse(ERROR, msg);
return;
}
BlockSender blockSender = null;
boolean isOpSuccess = true;
try {
// check if the block exists or not
blockSender = new BlockSender(block, 0, -1, false, false, true, datanode,
null, CachingStrategy.newDropBehind());
OutputStream baseStream = getOutputStream();
// send status first
writeSuccessWithChecksumInfo(blockSender, reply);
long beginReadInNS = Time.monotonicNowNanos();
// send block content to the target
long read = blockSender.sendBlock(reply, baseStream,
dataXceiverServer.balanceThrottler);
long durationInNS = Time.monotonicNowNanos() - beginReadInNS;
datanode.metrics.incrBytesRead((int) read);
datanode.metrics.incrBlocksRead();
datanode.metrics.incrTotalReadTime(TimeUnit.NANOSECONDS.toMillis(durationInNS));
DFSUtil.addTransferRateMetric(datanode.metrics, read, durationInNS);
LOG.info("Copied {} to {}", block, peer.getRemoteAddressString());
} catch (IOException ioe) {
isOpSuccess = false;
LOG.info("opCopyBlock {} received exception {}", block, ioe.toString());
incrDatanodeNetworkErrors();
// Normally the client reports a bad block to the NN. However if the
// meta file is corrupt or an disk error occurs (EIO), then the client
// never gets a chance to do validation, and hence will never report
// the block as bad. For some classes of IO exception, the DN should
// report the block as bad, via the handleBadBlock() method
datanode.handleBadBlock(block, ioe, false);
throw ioe;
} finally {
dataXceiverServer.balanceThrottler.release();
if (isOpSuccess) {
try {
// send one last byte to indicate that the resource is cleaned.
reply.writeChar('d');
} catch (IOException ignored) {
}
}
IOUtils.closeStream(reply);
IOUtils.closeStream(blockSender);
}
//update metrics
datanode.metrics.addCopyBlockOp(elapsed());
}
@Override
public void replaceBlock(final ExtendedBlock block,
final StorageType storageType,
final Token<BlockTokenIdentifier> blockToken,
final String delHint,
final DatanodeInfo proxySource,
final String storageId) throws IOException {
updateCurrentThreadName("Replacing block " + block + " from " + delHint);
DataOutputStream replyOut = new DataOutputStream(getOutputStream());
checkAccess(replyOut, true, block, blockToken,
Op.REPLACE_BLOCK, BlockTokenIdentifier.AccessMode.REPLACE,
new StorageType[]{storageType},
new String[]{storageId});
if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start
String msg = "Not able to receive block " + block.getBlockId() +
" from " + peer.getRemoteAddressString() + " because threads " +
"quota=" + dataXceiverServer.balanceThrottler.getMaxConcurrentMovers() + " is exceeded.";
LOG.warn(msg);
sendResponse(ERROR, msg);
return;
}
Socket proxySock = null;
DataOutputStream proxyOut = null;
Status opStatus = SUCCESS;
String errMsg = null;
DataInputStream proxyReply = null;
boolean IoeDuringCopyBlockOperation = false;
try {
// Move the block to different storage in the same datanode
if (proxySource.equals(datanode.getDatanodeId())) {
ReplicaInfo oldReplica = datanode.data.moveBlockAcrossStorage(block,
storageType, storageId);
if (oldReplica != null) {
LOG.info("Moved {} from StorageType {} to {}",
block, oldReplica.getVolume().getStorageType(), storageType);
}
} else {
block.setNumBytes(dataXceiverServer.estimateBlockSize);
// get the output stream to the proxy
final String dnAddr = proxySource.getXferAddr(connectToDnViaHostname);
LOG.debug("Connecting to datanode {}", dnAddr);
InetSocketAddress proxyAddr = NetUtils.createSocketAddr(dnAddr);
proxySock = datanode.newSocket();
NetUtils.connect(proxySock, proxyAddr, dnConf.socketTimeout);
proxySock.setTcpNoDelay(dnConf.getDataTransferServerTcpNoDelay());
proxySock.setSoTimeout(dnConf.socketTimeout);
proxySock.setKeepAlive(true);
OutputStream unbufProxyOut = NetUtils.getOutputStream(proxySock,
dnConf.socketWriteTimeout);
InputStream unbufProxyIn = NetUtils.getInputStream(proxySock);
DataEncryptionKeyFactory keyFactory =
datanode.getDataEncryptionKeyFactoryForBlock(block);
IOStreamPair saslStreams = datanode.saslClient.socketSend(proxySock,
unbufProxyOut, unbufProxyIn, keyFactory, blockToken, proxySource);
unbufProxyOut = saslStreams.out;
unbufProxyIn = saslStreams.in;
proxyOut = new DataOutputStream(new BufferedOutputStream(unbufProxyOut,
smallBufferSize));
proxyReply = new DataInputStream(new BufferedInputStream(unbufProxyIn,
ioFileBufferSize));
/* send request to the proxy */
IoeDuringCopyBlockOperation = true;
new Sender(proxyOut).copyBlock(block, blockToken);
IoeDuringCopyBlockOperation = false;
// receive the response from the proxy
BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom(
PBHelperClient.vintPrefixed(proxyReply));
String logInfo = "copy block " + block + " from "
+ proxySock.getRemoteSocketAddress();
DataTransferProtoUtil.checkBlockOpStatus(copyResponse, logInfo, true);
// get checksum info about the block we're copying
ReadOpChecksumInfoProto checksumInfo = copyResponse.getReadOpChecksumInfo();
DataChecksum remoteChecksum = DataTransferProtoUtil.fromProto(
checksumInfo.getChecksum());
// open a block receiver and check if the block does not exist
setCurrentBlockReceiver(getBlockReceiver(block, storageType,
proxyReply, proxySock.getRemoteSocketAddress().toString(),
proxySock.getLocalSocketAddress().toString(),
null, 0, 0, 0, "", null, datanode, remoteChecksum,
CachingStrategy.newDropBehind(), false, false, storageId));
// receive a block
blockReceiver.receiveBlock(null, null, replyOut, null,
dataXceiverServer.balanceThrottler, null, true);
// notify name node
final Replica r = blockReceiver.getReplica();
datanode.notifyNamenodeReceivedBlock(
block, delHint, r.getStorageUuid(), r.isOnTransientStorage());
LOG.info("Moved {} from {}, delHint={}",
block, peer.getRemoteAddressString(), delHint);
datanode.metrics.incrReplaceBlockOpToOtherHost();
}
} catch (IOException ioe) {
opStatus = ERROR;
if (ioe instanceof BlockPinningException) {
opStatus = Status.ERROR_BLOCK_PINNED;
}
errMsg = "opReplaceBlock " + block + " received exception " + ioe;
LOG.info(errMsg);
if (!IoeDuringCopyBlockOperation) {
// Don't double count IO errors
incrDatanodeNetworkErrors();
}
throw ioe;
} finally {
// receive the last byte that indicates the proxy released its thread resource
if (opStatus == SUCCESS && proxyReply != null) {
try {
proxyReply.readChar();
} catch (IOException ignored) {
}
}
// now release the thread resource
dataXceiverServer.balanceThrottler.release();
// send response back
try {
sendResponse(opStatus, errMsg);
} catch (IOException ioe) {
LOG.warn("Error writing reply back to {}",
peer.getRemoteAddressString());
incrDatanodeNetworkErrors();
}
IOUtils.closeStream(proxyOut);
IOUtils.closeStream(blockReceiver);
IOUtils.closeStream(proxyReply);
IOUtils.closeStream(replyOut);
}
//update metrics
datanode.metrics.addReplaceBlockOp(elapsed());
}
/**
* Separated for testing.
*/
@VisibleForTesting
BlockReceiver getBlockReceiver(
final ExtendedBlock block, final StorageType storageType,
final DataInputStream in,
final String inAddr, final String myAddr,
final BlockConstructionStage stage,
final long newGs, final long minBytesRcvd, final long maxBytesRcvd,
final String clientname, final DatanodeInfo srcDataNode,
final DataNode dn, DataChecksum requestedChecksum,
CachingStrategy cachingStrategy,
final boolean allowLazyPersist,
final boolean pinning,
final String storageId) throws IOException {
return new BlockReceiver(block, storageType, in,
inAddr, myAddr, stage, newGs, minBytesRcvd, maxBytesRcvd,
clientname, srcDataNode, dn, requestedChecksum,
cachingStrategy, allowLazyPersist, pinning, storageId);
}
/**
* Separated for testing.
* @return
*/
@VisibleForTesting
DataOutputStream getBufferedOutputStream() {
return new DataOutputStream(
new BufferedOutputStream(getOutputStream(), smallBufferSize));
}
private long elapsed() {
return monotonicNow() - opStartTime;
}
/**
* Utility function for sending a response.
*
* @param status status message to write
* @param message message to send to the client or other DN
*/
private void sendResponse(Status status,
String message) throws IOException {
writeResponse(status, message, getOutputStream());
}
private static void writeResponse(Status status, String message, OutputStream out)
throws IOException {
BlockOpResponseProto.Builder response = BlockOpResponseProto.newBuilder()
.setStatus(status);
if (message != null) {
response.setMessage(message);
}
response.build().writeDelimitedTo(out);
out.flush();
}
private void writeSuccessWithChecksumInfo(BlockSender blockSender,
DataOutputStream out) throws IOException {
ReadOpChecksumInfoProto ckInfo = ReadOpChecksumInfoProto.newBuilder()
.setChecksum(DataTransferProtoUtil.toProto(blockSender.getChecksum()))
.setChunkOffset(blockSender.getOffset())
.build();
BlockOpResponseProto response = BlockOpResponseProto.newBuilder()
.setStatus(SUCCESS)
.setReadOpChecksumInfo(ckInfo)
.build();
response.writeDelimitedTo(out);
out.flush();
}
private void incrDatanodeNetworkErrors() {
datanode.incrDatanodeNetworkErrors(remoteAddressWithoutPort);
}
/**
* Wait until the BP is registered, upto the configured amount of time.
* Throws an exception if times out, which should fail the client request.
* @param block requested block
*/
void checkAndWaitForBP(final ExtendedBlock block)
throws IOException {
String bpId = block.getBlockPoolId();
// The registration is only missing in relatively short time window.
// Optimistically perform this first.
try {
datanode.getDNRegistrationForBP(bpId);
return;
} catch (IOException ioe) {
// not registered
}
// retry
long bpReadyTimeout = dnConf.getBpReadyTimeout();
StopWatch sw = new StopWatch();
sw.start();
while (sw.now(TimeUnit.SECONDS) <= bpReadyTimeout) {
try {
datanode.getDNRegistrationForBP(bpId);
return;
} catch (IOException ioe) {
// not registered
}
// sleep before trying again
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
throw new IOException("Interrupted while serving request. Aborting.");
}
}
// failed to obtain registration.
throw new IOException("Not ready to serve the block pool, " + bpId + ".");
}
private void checkAccess(OutputStream out, final boolean reply,
ExtendedBlock blk, Token<BlockTokenIdentifier> t, Op op,
BlockTokenIdentifier.AccessMode mode) throws IOException {
checkAccess(out, reply, blk, t, op, mode, null, null);
}
private void checkAccess(OutputStream out, final boolean reply,
final ExtendedBlock blk,
final Token<BlockTokenIdentifier> t,
final Op op,
final BlockTokenIdentifier.AccessMode mode,
final StorageType[] storageTypes,
final String[] storageIds) throws IOException {
checkAndWaitForBP(blk);
if (datanode.isBlockTokenEnabled) {
LOG.debug("Checking block access token for block '{}' with mode '{}'",
blk.getBlockId(), mode);
try {
datanode.blockPoolTokenSecretManager.checkAccess(t, null, blk, mode,
storageTypes, storageIds);
} catch(InvalidToken e) {
try {
if (reply) {
BlockOpResponseProto.Builder resp = BlockOpResponseProto.newBuilder()
.setStatus(ERROR_ACCESS_TOKEN);
if (mode == BlockTokenIdentifier.AccessMode.WRITE) {
DatanodeRegistration dnR =
datanode.getDNRegistrationForBP(blk.getBlockPoolId());
// NB: Unconditionally using the xfer addr w/o hostname
resp.setFirstBadLink(dnR.getXferAddr());
}
resp.build().writeDelimitedTo(out);
out.flush();
}
LOG.warn("Block token verification failed: op={}, " +
"remoteAddress={}, message={}",
op, remoteAddress, e.getLocalizedMessage());
throw e;
} finally {
IOUtils.closeStream(out);
}
}
}
}
private void incrReadWriteOpMetrics(Op op) {
if (Op.READ_BLOCK.equals(op)) {
datanode.getMetrics().incrDataNodeReadActiveXceiversCount();
} else if (Op.WRITE_BLOCK.equals(op)) {
datanode.getMetrics().incrDataNodeWriteActiveXceiversCount();
}
}
private void decrReadWriteOpMetrics(Op op) {
if (Op.READ_BLOCK.equals(op)) {
datanode.getMetrics().decrDataNodeReadActiveXceiversCount();
} else if (Op.WRITE_BLOCK.equals(op)) {
datanode.getMetrics().decrDataNodeWriteActiveXceiversCount();
}
}
}
| DataXceiver |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/NettyConnectionManagerTest.java | {
"start": 1470,
"end": 4225
} | class ____ {
/**
* Tests that the number of arenas and number of threads of the client and server are set to the
* same number, that is the number of configured task slots.
*/
@Test
void testMatchingNumberOfArenasAndThreadsAsDefault() throws Exception {
// Expected number of arenas and threads
int numberOfSlots = 2;
NettyConnectionManager connectionManager;
{
NettyConfig config =
new NettyConfig(
InetAddress.getLocalHost(),
0,
1024,
numberOfSlots,
new Configuration());
connectionManager = createNettyConnectionManager(config);
connectionManager.start();
}
assertThat(connectionManager)
.withFailMessage("connectionManager is null due to fail to get a free port")
.isNotNull();
assertThat(connectionManager.getBufferPool().getNumberOfArenas()).isEqualTo(numberOfSlots);
{
// Client event loop group
Bootstrap boostrap = connectionManager.getClient().getBootstrap();
EventLoopGroup group = boostrap.config().group();
Field f = group.getClass().getSuperclass().getSuperclass().getDeclaredField("children");
f.setAccessible(true);
Object[] eventExecutors = (Object[]) f.get(group);
assertThat(eventExecutors).hasSize(numberOfSlots);
}
{
// Server event loop group
ServerBootstrap bootstrap = connectionManager.getServer().getBootstrap();
EventLoopGroup group = bootstrap.config().group();
Field f = group.getClass().getSuperclass().getSuperclass().getDeclaredField("children");
f.setAccessible(true);
Object[] eventExecutors = (Object[]) f.get(group);
assertThat(eventExecutors).hasSize(numberOfSlots);
}
{
// Server child event loop group
ServerBootstrap bootstrap = connectionManager.getServer().getBootstrap();
EventLoopGroup group = bootstrap.childGroup();
Field f = group.getClass().getSuperclass().getSuperclass().getDeclaredField("children");
f.setAccessible(true);
Object[] eventExecutors = (Object[]) f.get(group);
assertThat(eventExecutors).hasSize(numberOfSlots);
}
}
private NettyConnectionManager createNettyConnectionManager(NettyConfig config) {
return new NettyConnectionManager(
new ResultPartitionManager(), new TaskEventDispatcher(), config, true);
}
}
| NettyConnectionManagerTest |
java | hibernate__hibernate-orm | hibernate-vector/src/main/java/org/hibernate/vector/internal/MySQLFunctionContributor.java | {
"start": 331,
"end": 1400
} | class ____ implements FunctionContributor {
@Override
public void contributeFunctions(FunctionContributions functionContributions) {
final Dialect dialect = functionContributions.getDialect();
if ( dialect instanceof MySQLDialect mySQLDialect && mySQLDialect.getMySQLVersion().isSameOrAfter( 9, 0 ) ) {
final VectorFunctionFactory vectorFunctionFactory = new VectorFunctionFactory( functionContributions );
vectorFunctionFactory.cosineDistance( "distance(?1,?2,'cosine')" );
vectorFunctionFactory.euclideanDistance( "distance(?1,?2,'euclidean')" );
vectorFunctionFactory.innerProduct( "distance(?1,?2,'dot')*-1" );
vectorFunctionFactory.negativeInnerProduct( "distance(?1,?2,'dot')" );
vectorFunctionFactory.registerNamedVectorFunction(
"vector_dim",
functionContributions.getTypeConfiguration().getBasicTypeForJavaType( Integer.class ),
1
);
functionContributions.getFunctionRegistry().registerAlternateKey( "vector_dims", "vector_dim" );
}
}
@Override
public int ordinal() {
return 200;
}
}
| MySQLFunctionContributor |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java | {
"start": 10392,
"end": 36640
} | class ____ {
// sample stat in a single line : 3910 (gpm) S 1 3910 3910 0 -1 4194624
// 83 0 0 0 0 0 0 0 16 0 1 0 7852 2408448 88 4294967295 134512640
// 134590050 3220521392 3220520036 10975138 0 0 4096 134234626
// 4294967295 0 0 17 1 0 0
String pid;
String name;
String ppid;
String pgrpId;
String session;
String vmem = "0";
String rssmemPage = "0";
String utime = "0";
String stime = "0";
public ProcessStatInfo(String[] statEntries) {
pid = statEntries[0];
name = statEntries[1];
ppid = statEntries[2];
pgrpId = statEntries[3];
session = statEntries[4];
vmem = statEntries[5];
if (statEntries.length > 6) {
rssmemPage = statEntries[6];
}
if (statEntries.length > 7) {
utime = statEntries[7];
stime = statEntries[8];
}
}
// construct a line that mimics the procfs stat file.
// all unused numerical entries are set to 0.
public String getStatLine() {
return String.format("%s (%s) S %s %s %s 0 0 0"
+ " 0 0 0 0 %s %s 0 0 0 0 0 0 0 %s %s 0 0" + " 0 0 0 0 0 0 0 0"
+ " 0 0 0 0 0", pid, name, ppid, pgrpId, session, utime, stime, vmem,
rssmemPage);
}
}
public ProcessSmapMemoryInfo constructMemoryMappingInfo(String address,
String[] entries) {
ProcessSmapMemoryInfo info = new ProcessSmapMemoryInfo(address);
info.setMemInfo(MemInfo.SIZE.name(), entries[0]);
info.setMemInfo(MemInfo.RSS.name(), entries[1]);
info.setMemInfo(MemInfo.PSS.name(), entries[2]);
info.setMemInfo(MemInfo.SHARED_CLEAN.name(), entries[3]);
info.setMemInfo(MemInfo.SHARED_DIRTY.name(), entries[4]);
info.setMemInfo(MemInfo.PRIVATE_CLEAN.name(), entries[5]);
info.setMemInfo(MemInfo.PRIVATE_DIRTY.name(), entries[6]);
info.setMemInfo(MemInfo.REFERENCED.name(), entries[7]);
info.setMemInfo(MemInfo.ANONYMOUS.name(), entries[8]);
info.setMemInfo(MemInfo.ANON_HUGE_PAGES.name(), entries[9]);
info.setMemInfo(MemInfo.SWAP.name(), entries[10]);
info.setMemInfo(MemInfo.KERNEL_PAGE_SIZE.name(), entries[11]);
info.setMemInfo(MemInfo.MMU_PAGE_SIZE.name(), entries[12]);
return info;
}
public void createMemoryMappingInfo(ProcessTreeSmapMemInfo[] procMemInfo) {
for (int i = 0; i < procMemInfo.length; i++) {
// Construct 4 memory mappings per process.
// As per min(Shared_Dirty, Pss) + Private_Clean + Private_Dirty
// and not including r--s, r-xs, we should get 100 KB per process
List<ProcessSmapMemoryInfo> memoryMappingList =
procMemInfo[i].getMemoryInfoList();
memoryMappingList.add(constructMemoryMappingInfo(
"7f56c177c000-7f56c177d000 "
+ "rw-p 00010000 08:02 40371558 "
+ "/grid/0/jdk1.7.0_25/jre/lib/amd64/libnio.so",
// Format: size, rss, pss, shared_clean, shared_dirty, private_clean
// private_dirty, referenced, anon, anon-huge-pages, swap,
// kernel_page_size, mmu_page_size
new String[] {"4", "4", "25", "4", "25", "15", "10", "4", "10", "0",
"0", "4", "4"}));
memoryMappingList.add(constructMemoryMappingInfo(
"7fb09382e000-7fb09382f000 r--s 00003000 " + "08:02 25953545",
new String[] {"4", "4", "25", "4", "0", "15", "10", "4", "10", "0",
"0", "4", "4"}));
memoryMappingList.add(constructMemoryMappingInfo(
"7e8790000-7e8b80000 r-xs 00000000 00:00 0", new String[] {"4", "4",
"25", "4", "0", "15", "10", "4", "10", "0", "0", "4", "4"}));
memoryMappingList.add(constructMemoryMappingInfo(
"7da677000-7e0dcf000 rw-p 00000000 00:00 0", new String[] {"4", "4",
"25", "4", "50", "15", "10", "4", "10", "0", "0", "4", "4"}));
}
}
/**
* A basic test that creates a few process directories and writes stat files.
* Verifies that the cpu time and memory is correctly computed.
*
* @throws IOException
* if there was a problem setting up the fake procfs directories or
* files.
*/
@Test
@Timeout(30000)
void testCpuAndMemoryForProcessTree() throws IOException {
// test processes
String[] pids = {"100", "200", "300", "400"};
ControlledClock testClock = new ControlledClock();
testClock.setTime(0);
// create the fake procfs root directory.
File procfsRootDir = new File(TEST_ROOT_DIR, "proc");
try {
setupProcfsRootDir(procfsRootDir);
setupPidDirs(procfsRootDir, pids);
// create stat objects.
// assuming processes 100, 200, 300 are in tree and 400 is not.
ProcessStatInfo[] procInfos = new ProcessStatInfo[4];
procInfos[0] =
new ProcessStatInfo(new String[]{"100", "proc1", "1", "100", "100",
"100000", "100", "1000", "200"});
procInfos[1] =
new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
"100", "200000", "200", "2000", "400"});
procInfos[2] =
new ProcessStatInfo(new String[]{"300", "proc(3)", "200", "100",
"100", "300000", "300", "3000", "600"});
procInfos[3] =
new ProcessStatInfo(new String[]{"400", "proc4", "1", "400", "400",
"400000", "400", "4000", "800"});
ProcessTreeSmapMemInfo[] memInfo = new ProcessTreeSmapMemInfo[4];
memInfo[0] = new ProcessTreeSmapMemInfo("100");
memInfo[1] = new ProcessTreeSmapMemInfo("200");
memInfo[2] = new ProcessTreeSmapMemInfo("300");
memInfo[3] = new ProcessTreeSmapMemInfo("400");
createMemoryMappingInfo(memInfo);
writeStatFiles(procfsRootDir, pids, procInfos, memInfo);
// crank up the process tree class.
Configuration conf = new Configuration();
ProcfsBasedProcessTree processTree =
createProcessTree("100", procfsRootDir.getAbsolutePath(), testClock);
processTree.setConf(conf);
// build the process tree.
processTree.updateProcessTree();
// verify virtual memory
assertEquals(600000L, processTree.getVirtualMemorySize(), "Virtual memory does not match");
// verify rss memory
long cumuRssMem =
ProcfsBasedProcessTree.PAGE_SIZE > 0
? 600L * ProcfsBasedProcessTree.PAGE_SIZE :
ResourceCalculatorProcessTree.UNAVAILABLE;
assertEquals(cumuRssMem,
processTree.getRssMemorySize(),
"rss memory does not match");
// verify cumulative cpu time
long cumuCpuTime =
ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0
? 7200L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L;
assertEquals(cumuCpuTime,
processTree.getCumulativeCpuTime(),
"Cumulative cpu time does not match");
// verify CPU usage
assertEquals(-1.0, processTree.getCpuUsagePercent(),
0.01,
"Percent CPU time should be set to -1 initially");
// Check by enabling smaps
setSmapsInProceTree(processTree, true);
// anon (exclude r-xs,r--s)
assertEquals((20 * KB_TO_BYTES * 3), processTree.getRssMemorySize(),
"rss memory does not match");
// test the cpu time again to see if it cumulates
procInfos[0] =
new ProcessStatInfo(new String[]{"100", "proc1", "1", "100", "100",
"100000", "100", "2000", "300"});
procInfos[1] =
new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
"100", "200000", "200", "3000", "500"});
writeStatFiles(procfsRootDir, pids, procInfos, memInfo);
long elapsedTimeBetweenUpdatesMsec = 200000;
testClock.setTime(elapsedTimeBetweenUpdatesMsec);
// build the process tree.
processTree.updateProcessTree();
// verify cumulative cpu time again
long prevCumuCpuTime = cumuCpuTime;
cumuCpuTime =
ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0
? 9400L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L;
assertEquals(cumuCpuTime,
processTree.getCumulativeCpuTime(),
"Cumulative cpu time does not match");
double expectedCpuUsagePercent =
(ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0) ?
(cumuCpuTime - prevCumuCpuTime) * 100.0 /
elapsedTimeBetweenUpdatesMsec : 0;
// expectedCpuUsagePercent is given by (94000L - 72000) * 100/
// 200000;
// which in this case is 11. Lets verify that first
assertEquals(11, expectedCpuUsagePercent, 0.001);
assertEquals(expectedCpuUsagePercent,
processTree.getCpuUsagePercent(),
0.01,
"Percent CPU time is not correct expected " +
expectedCpuUsagePercent);
} finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
private void setSmapsInProceTree(ProcfsBasedProcessTree processTree,
boolean enableFlag) {
Configuration conf = processTree.getConf();
if (conf == null) {
conf = new Configuration();
}
conf.setBoolean(YarnConfiguration.PROCFS_USE_SMAPS_BASED_RSS_ENABLED, enableFlag);
processTree.setConf(conf);
processTree.updateProcessTree();
}
/**
* Tests that cumulative memory is computed only for processes older than a
* given age.
*
* @throws IOException
* if there was a problem setting up the fake procfs directories or
* files.
*/
@Test
@Timeout(30000)
void testMemForOlderProcesses() throws IOException {
testMemForOlderProcesses(false);
testMemForOlderProcesses(true);
}
private void testMemForOlderProcesses(boolean smapEnabled) throws IOException {
// initial list of processes
String[] pids = { "100", "200", "300", "400" };
// create the fake procfs root directory.
File procfsRootDir = new File(TEST_ROOT_DIR, "proc");
try {
setupProcfsRootDir(procfsRootDir);
setupPidDirs(procfsRootDir, pids);
// create stat objects.
// assuming 100, 200 and 400 are in tree, 300 is not.
ProcessStatInfo[] procInfos = new ProcessStatInfo[4];
procInfos[0] =
new ProcessStatInfo(new String[]{"100", "proc1", "1", "100", "100",
"100000", "100"});
procInfos[1] =
new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
"100", "200000", "200"});
procInfos[2] =
new ProcessStatInfo(new String[]{"300", "proc(3)", "1", "300", "300",
"300000", "300"});
procInfos[3] =
new ProcessStatInfo(new String[]{"400", "proc4", "100", "100",
"100", "400000", "400"});
// write smap information invariably for testing
ProcessTreeSmapMemInfo[] memInfo = new ProcessTreeSmapMemInfo[4];
memInfo[0] = new ProcessTreeSmapMemInfo("100");
memInfo[1] = new ProcessTreeSmapMemInfo("200");
memInfo[2] = new ProcessTreeSmapMemInfo("300");
memInfo[3] = new ProcessTreeSmapMemInfo("400");
createMemoryMappingInfo(memInfo);
writeStatFiles(procfsRootDir, pids, procInfos, memInfo);
// crank up the process tree class.
ProcfsBasedProcessTree processTree =
createProcessTree("100", procfsRootDir.getAbsolutePath(),
SystemClock.getInstance());
setSmapsInProceTree(processTree, smapEnabled);
// verify virtual memory
assertEquals(700000L, processTree.getVirtualMemorySize(), "Virtual memory does not match");
// write one more process as child of 100.
String[] newPids = { "500" };
setupPidDirs(procfsRootDir, newPids);
ProcessStatInfo[] newProcInfos = new ProcessStatInfo[1];
newProcInfos[0] =
new ProcessStatInfo(new String[] { "500", "proc5", "100", "100",
"100", "500000", "500" });
ProcessTreeSmapMemInfo[] newMemInfos = new ProcessTreeSmapMemInfo[1];
newMemInfos[0] = new ProcessTreeSmapMemInfo("500");
createMemoryMappingInfo(newMemInfos);
writeStatFiles(procfsRootDir, newPids, newProcInfos, newMemInfos);
// check memory includes the new process.
processTree.updateProcessTree();
assertEquals(1200000L, processTree.getVirtualMemorySize(),
"vmem does not include new process");
if (!smapEnabled) {
long cumuRssMem = ProcfsBasedProcessTree.PAGE_SIZE > 0 ?
1200L * ProcfsBasedProcessTree.PAGE_SIZE :
ResourceCalculatorProcessTree.UNAVAILABLE;
assertEquals(cumuRssMem, processTree.getRssMemorySize(),
"rssmem does not include new process");
} else {
assertEquals(20 * KB_TO_BYTES * 4, processTree.getRssMemorySize(),
"rssmem does not include new process");
}
// however processes older than 1 iteration will retain the older value
assertEquals(700000L, processTree.getVirtualMemorySize(1),
"vmem shouldn't have included new process");
if (!smapEnabled) {
long cumuRssMem = ProcfsBasedProcessTree.PAGE_SIZE > 0 ?
700L * ProcfsBasedProcessTree.PAGE_SIZE :
ResourceCalculatorProcessTree.UNAVAILABLE;
assertEquals(cumuRssMem, processTree.getRssMemorySize(1),
"rssmem shouldn't have included new process");
} else {
assertEquals(20 * KB_TO_BYTES * 3, processTree.getRssMemorySize(1),
"rssmem shouldn't have included new process");
}
// one more process
newPids = new String[] { "600" };
setupPidDirs(procfsRootDir, newPids);
newProcInfos = new ProcessStatInfo[1];
newProcInfos[0] =
new ProcessStatInfo(new String[] { "600", "proc6", "100", "100",
"100", "600000", "600" });
newMemInfos = new ProcessTreeSmapMemInfo[1];
newMemInfos[0] = new ProcessTreeSmapMemInfo("600");
createMemoryMappingInfo(newMemInfos);
writeStatFiles(procfsRootDir, newPids, newProcInfos, newMemInfos);
// refresh process tree
processTree.updateProcessTree();
// processes older than 2 iterations should be same as before.
assertEquals(700000L, processTree.getVirtualMemorySize(2),
"vmem shouldn't have included new processes");
if (!smapEnabled) {
long cumuRssMem =
ProcfsBasedProcessTree.PAGE_SIZE > 0
? 700L * ProcfsBasedProcessTree.PAGE_SIZE :
ResourceCalculatorProcessTree.UNAVAILABLE;
assertEquals(cumuRssMem, processTree.getRssMemorySize(2),
"rssmem shouldn't have included new processes");
} else {
assertEquals(20 * KB_TO_BYTES * 3, processTree.getRssMemorySize(2),
"rssmem shouldn't have included new processes");
}
// processes older than 1 iteration should not include new process,
// but include process 500
assertEquals(1200000L, processTree.getVirtualMemorySize(1),
"vmem shouldn't have included new processes");
if (!smapEnabled) {
long cumuRssMem =
ProcfsBasedProcessTree.PAGE_SIZE > 0
? 1200L * ProcfsBasedProcessTree.PAGE_SIZE :
ResourceCalculatorProcessTree.UNAVAILABLE;
assertEquals(cumuRssMem, processTree.getRssMemorySize(1),
"rssmem shouldn't have included new processes");
} else {
assertEquals(20 * KB_TO_BYTES * 4, processTree.getRssMemorySize(1),
"rssmem shouldn't have included new processes");
}
// no processes older than 3 iterations
assertEquals(0, processTree.getVirtualMemorySize(3),
"Getting non-zero vmem for processes older than 3 iterations");
assertEquals(0, processTree.getRssMemorySize(3),
"Getting non-zero rssmem for processes older than 3 iterations");
} finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
/**
* Verifies ProcfsBasedProcessTree.checkPidPgrpidForMatch() in case of
* 'constructProcessInfo() returning null' by not writing stat file for the
* mock process
*
* @throws IOException
* if there was a problem setting up the fake procfs directories or
* files.
*/
@Test
@Timeout(30000)
void testDestroyProcessTree() throws IOException {
// test process
String pid = "100";
// create the fake procfs root directory.
File procfsRootDir = new File(TEST_ROOT_DIR, "proc");
try {
setupProcfsRootDir(procfsRootDir);
// crank up the process tree class.
createProcessTree(pid, procfsRootDir.getAbsolutePath(),
SystemClock.getInstance());
// Let us not create stat file for pid 100.
assertTrue(ProcfsBasedProcessTree.checkPidPgrpidForMatch(pid,
procfsRootDir.getAbsolutePath()));
} finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
/**
* Test the correctness of process-tree dump.
*
* @throws IOException
*/
@Test
@Timeout(30000)
void testProcessTreeDump() throws IOException {
String[] pids = {"100", "200", "300", "400", "500", "600"};
File procfsRootDir = new File(TEST_ROOT_DIR, "proc");
try {
setupProcfsRootDir(procfsRootDir);
setupPidDirs(procfsRootDir, pids);
int numProcesses = pids.length;
// Processes 200, 300, 400 and 500 are descendants of 100. 600 is not.
ProcessStatInfo[] procInfos = new ProcessStatInfo[numProcesses];
procInfos[0] =
new ProcessStatInfo(new String[]{"100", "proc1", "1", "100", "100",
"100000", "100", "1000", "200"});
procInfos[1] =
new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
"100", "200000", "200", "2000", "400"});
procInfos[2] =
new ProcessStatInfo(new String[]{"300", "proc(3)", "200", "100",
"100", "300000", "300", "3000", "600"});
procInfos[3] =
new ProcessStatInfo(new String[]{"400", "proc4", "200", "100",
"100", "400000", "400", "4000", "800"});
procInfos[4] =
new ProcessStatInfo(new String[]{"500", "proc5", "400", "100",
"100", "400000", "400", "4000", "800"});
procInfos[5] =
new ProcessStatInfo(new String[]{"600", "proc6", "1", "1", "1",
"400000", "400", "4000", "800"});
ProcessTreeSmapMemInfo[] memInfos = new ProcessTreeSmapMemInfo[6];
memInfos[0] = new ProcessTreeSmapMemInfo("100");
memInfos[1] = new ProcessTreeSmapMemInfo("200");
memInfos[2] = new ProcessTreeSmapMemInfo("300");
memInfos[3] = new ProcessTreeSmapMemInfo("400");
memInfos[4] = new ProcessTreeSmapMemInfo("500");
memInfos[5] = new ProcessTreeSmapMemInfo("600");
String[] cmdLines = new String[numProcesses];
cmdLines[0] = "proc1 arg1 arg2";
cmdLines[1] = "process two arg3 arg4";
cmdLines[2] = "proc(3) arg5 arg6";
cmdLines[3] = "proc4 arg7 arg8";
cmdLines[4] = "proc5 arg9 arg10";
cmdLines[5] = "proc6 arg11 arg12";
createMemoryMappingInfo(memInfos);
writeStatFiles(procfsRootDir, pids, procInfos, memInfos);
writeCmdLineFiles(procfsRootDir, pids, cmdLines);
ProcfsBasedProcessTree processTree =
createProcessTree("100", procfsRootDir.getAbsolutePath(),
SystemClock.getInstance());
// build the process tree.
processTree.updateProcessTree();
// Get the process-tree dump
String processTreeDump = processTree.getProcessTreeDump();
LOG.info("Process-tree dump follows: \n" + processTreeDump);
assertTrue(processTreeDump.startsWith("\t|- PID PPID PGRPID SESSID CMD_NAME "
+ "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) "
+ "RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n"),
"Process-tree dump doesn't start with a proper header");
for (int i = 0; i < 5; i++) {
ProcessStatInfo p = procInfos[i];
assertTrue(
processTreeDump.contains("\t|- " + p.pid + " " + p.ppid + " "
+ p.pgrpId + " " + p.session + " (" + p.name + ") " + p.utime
+ " " + p.stime + " " + p.vmem + " " + p.rssmemPage + " "
+ cmdLines[i]),
"Process-tree dump doesn't contain the cmdLineDump of process "
+ p.pid);
}
// 600 should not be in the dump
ProcessStatInfo p = procInfos[5];
assertFalse(
processTreeDump.contains("\t|- " + p.pid + " " + p.ppid + " "
+ p.pgrpId + " " + p.session + " (" + p.name + ") " + p.utime + " "
+ p.stime + " " + p.vmem + " " + cmdLines[5]),
"Process-tree dump shouldn't contain the cmdLineDump of process "
+ p.pid);
} finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
protected static boolean isSetsidAvailable() {
ShellCommandExecutor shexec = null;
boolean setsidSupported = true;
try {
String[] args = { "setsid", "bash", "-c", "echo $$" };
shexec = new ShellCommandExecutor(args);
shexec.execute();
} catch (IOException ioe) {
LOG.warn("setsid is not available on this machine. So not using it.");
setsidSupported = false;
} finally { // handle the exit code
LOG.info("setsid exited with exit code " + shexec.getExitCode());
}
return setsidSupported;
}
/**
* Is the root-process alive? Used only in tests.
*
* @return true if the root-process is alive, false otherwise.
*/
private static boolean isAlive(String pid) {
try {
final String sigpid = isSetsidAvailable() ? "-" + pid : pid;
try {
sendSignal(sigpid, 0);
} catch (ExitCodeException e) {
return false;
}
return true;
} catch (IOException ignored) {
}
return false;
}
private static void sendSignal(String pid, int signal) throws IOException {
ShellCommandExecutor shexec = null;
String[] arg = { "kill", "-" + signal, "--", pid };
shexec = new ShellCommandExecutor(arg);
shexec.execute();
}
/**
* Is any of the subprocesses in the process-tree alive? Used only in tests.
*
* @return true if any of the processes in the process-tree is alive, false
* otherwise.
*/
private static boolean isAnyProcessInTreeAlive(
ProcfsBasedProcessTree processTree) {
for (String pId : processTree.getCurrentProcessIDs()) {
if (isAlive(pId)) {
return true;
}
}
return false;
}
/**
* Create a directory to mimic the procfs file system's root.
*
* @param procfsRootDir
* root directory to create.
* @throws IOException
* if could not delete the procfs root directory
*/
public static void setupProcfsRootDir(File procfsRootDir) throws IOException {
// cleanup any existing process root dir.
if (procfsRootDir.exists()) {
assertTrue(FileUtil.fullyDelete(procfsRootDir));
}
// create afresh
assertTrue(procfsRootDir.mkdirs());
}
/**
* Create PID directories under the specified procfs root directory
*
* @param procfsRootDir
* root directory of procfs file system
* @param pids
* the PID directories to create.
* @throws IOException
* If PID dirs could not be created
*/
public static void setupPidDirs(File procfsRootDir, String[] pids)
throws IOException {
for (String pid : pids) {
File pidDir = new File(procfsRootDir, pid);
FileUtils.forceMkdir(pidDir);
LOG.info("created pid dir: " + pidDir);
}
}
/**
* Write stat files under the specified pid directories with data setup in the
* corresponding ProcessStatInfo objects
*
* @param procfsRootDir
* root directory of procfs file system
* @param pids
* the PID directories under which to create the stat file
* @param procs
* corresponding ProcessStatInfo objects whose data should be written
* to the stat files.
* @throws IOException
* if stat files could not be written
*/
public static void writeStatFiles(File procfsRootDir, String[] pids,
ProcessStatInfo[] procs, ProcessTreeSmapMemInfo[] smaps)
throws IOException {
for (int i = 0; i < pids.length; i++) {
File statFile =
new File(new File(procfsRootDir, pids[i]),
ProcfsBasedProcessTree.PROCFS_STAT_FILE);
BufferedWriter bw = null;
try {
FileWriter fw = new FileWriter(statFile);
bw = new BufferedWriter(fw);
bw.write(procs[i].getStatLine());
LOG.info("wrote stat file for " + pids[i] + " with contents: "
+ procs[i].getStatLine());
} finally {
// not handling exception - will throw an error and fail the test.
if (bw != null) {
bw.close();
}
}
if (smaps != null) {
File smapFile =
new File(new File(procfsRootDir, pids[i]),
ProcfsBasedProcessTree.SMAPS);
bw = null;
try {
FileWriter fw = new FileWriter(smapFile);
bw = new BufferedWriter(fw);
bw.write(smaps[i].toString());
bw.flush();
LOG.info("wrote smap file for " + pids[i] + " with contents: "
+ smaps[i].toString());
} finally {
// not handling exception - will throw an error and fail the test.
if (bw != null) {
bw.close();
}
}
}
}
}
private static void writeCmdLineFiles(File procfsRootDir, String[] pids,
String[] cmdLines) throws IOException {
for (int i = 0; i < pids.length; i++) {
File statFile =
new File(new File(procfsRootDir, pids[i]),
ProcfsBasedProcessTree.PROCFS_CMDLINE_FILE);
BufferedWriter bw = null;
try {
bw = new BufferedWriter(new FileWriter(statFile));
bw.write(cmdLines[i]);
LOG.info("wrote command-line file for " + pids[i] + " with contents: "
+ cmdLines[i]);
} finally {
// not handling exception - will throw an error and fail the test.
if (bw != null) {
bw.close();
}
}
}
}
}
| ProcessStatInfo |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/odps/parser/OdpsStatementParser.java | {
"start": 1963,
"end": 60248
} | class ____ extends HiveStatementParser {
public OdpsStatementParser(String sql) {
super(new OdpsExprParser(sql));
dbType = DbType.odps;
}
public OdpsStatementParser(String sql, SQLParserFeature... features) {
super(new OdpsExprParser(sql, features));
dbType = DbType.odps;
}
public OdpsStatementParser(SQLExprParser exprParser) {
super(exprParser);
dbType = DbType.odps;
}
public SQLSelectStatement parseSelect() {
SQLSelect select = new OdpsSelectParser(this.exprParser)
.select();
// if (select.getWithSubQuery() == null && select.getQuery() instanceof SQLSelectQueryBlock) {
// SQLSelectQueryBlock queryBlock = (SQLSelectQueryBlock) select.getQuery();
// if (queryBlock.getFrom() == null && queryBlock.getWhere() != null) {
// throw new ParserException("none from query not support where clause.");
// }
// }
return new SQLSelectStatement(select, DbType.odps);
}
public SQLCreateTableStatement parseCreateTable() {
SQLCreateTableParser parser = new OdpsCreateTableParser(this.exprParser);
return parser.parseCreateTable();
}
public SQLCreateTableParser getSQLCreateTableParser() {
return new OdpsCreateTableParser(this.exprParser);
}
public boolean parseStatementListDialect(List<SQLStatement> statementList) {
if (lexer.token() == Token.FROM) {
SQLStatement stmt = this.parseInsert();
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals("ANALYZE")) {
SQLStatement stmt = parseAnalyze();
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals("ADD")) {
lexer.nextToken();
if (lexer.identifierEquals("STATISTIC")) {
lexer.nextToken();
OdpsAddStatisticStatement stmt = new OdpsAddStatisticStatement();
stmt.setTable(this.exprParser.name());
stmt.setStatisticClause(parseStaticClause());
statementList.add(stmt);
return true;
}
if (lexer.token() == Token.USER) {
lexer.nextToken();
OdpsAddUserStatement stmt = new OdpsAddUserStatement();
stmt.setUser(this.exprParser.name());
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals("ACCOUNTPROVIDER")) {
lexer.nextToken();
OdpsAddAccountProviderStatement stmt = new OdpsAddAccountProviderStatement();
stmt.setProvider(this.exprParser.name());
statementList.add(stmt);
return true;
}
if (lexer.token() == Token.TABLE) {
lexer.nextToken();
OdpsAddTableStatement stmt = new OdpsAddTableStatement();
stmt.setTable(this.exprParser.name());
if (lexer.token() == Token.PARTITION) {
lexer.nextToken();
this.exprParser.parseAssignItem(stmt.getPartitions(), stmt);
}
if (lexer.token() == Token.AS) {
lexer.nextToken();
SQLName name = this.exprParser.name();
stmt.getTable().setAlias(name.toString());
}
if (lexer.token() == Token.COMMENT) {
lexer.nextToken();
stmt.setComment(this.exprParser.primary());
}
if (lexer.token() == Token.SUB) {
lexer.nextToken();
acceptIdentifier("f");
stmt.setForce(true);
}
if (lexer.token() == Token.TO) {
lexer.nextToken();
acceptIdentifier("PACKAGE");
SQLName packageName = this.exprParser.name();
stmt.setToPackage(packageName);
if (lexer.token() == Token.WITH) {
lexer.nextToken();
acceptIdentifier("PRIVILEGES");
parsePrivilege(stmt.getPrivileges(), stmt);
}
}
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals(FnvHash.Constants.FILE)
|| lexer.identifierEquals(FnvHash.Constants.JAR)
|| lexer.identifierEquals(FnvHash.Constants.PY)
|| lexer.identifierEquals(FnvHash.Constants.ARCHIVE)) {
OdpsAddFileStatement stmt = new OdpsAddFileStatement();
long hash = lexer.hashLCase();
if (hash == FnvHash.Constants.JAR) {
stmt.setType(OdpsAddFileStatement.FileType.JAR);
} else if (hash == FnvHash.Constants.PY) {
stmt.setType(OdpsAddFileStatement.FileType.PY);
} else if (hash == FnvHash.Constants.ARCHIVE) {
stmt.setType(OdpsAddFileStatement.FileType.ARCHIVE);
}
lexer.nextPath();
String path = lexer.stringVal();
lexer.nextToken();
stmt.setFile(path);
if (lexer.token() == Token.AS) {
lexer.nextToken();
SQLName name = this.exprParser.name();
stmt.setAlias(name.toString());
}
if (lexer.token() == Token.COMMENT) {
lexer.nextToken();
stmt.setComment(this.exprParser.primary());
}
if (lexer.token() == Token.SUB) {
lexer.nextToken();
acceptIdentifier("f");
stmt.setForce(true);
}
statementList.add(stmt);
return true;
}
throw new ParserException("TODO " + lexer.info());
}
if (lexer.identifierEquals("REMOVE")) {
lexer.nextToken();
if (lexer.identifierEquals("STATISTIC")) {
lexer.nextToken();
OdpsRemoveStatisticStatement stmt = new OdpsRemoveStatisticStatement();
stmt.setTable(this.exprParser.name());
stmt.setStatisticClause(parseStaticClause());
statementList.add(stmt);
return true;
}
if (lexer.token() == Token.USER) {
lexer.nextToken();
OdpsRemoveUserStatement stmt = new OdpsRemoveUserStatement();
stmt.setUser((SQLIdentifierExpr) this.exprParser.name());
statementList.add(stmt);
return true;
}
throw new ParserException("TODO " + lexer.info());
}
if (lexer.identifierEquals("READ")) {
OdpsReadStatement stmt = new OdpsReadStatement();
if (lexer.hasComment() && lexer.isKeepComments()) {
stmt.addBeforeComment(lexer.readAndResetComments());
}
lexer.nextToken();
stmt.setTable(this.exprParser.name());
if (lexer.token() == Token.LPAREN) {
lexer.nextToken();
this.exprParser.names(stmt.getColumns(), stmt);
accept(Token.RPAREN);
}
if (lexer.token() == Token.PARTITION) {
lexer.nextToken();
accept(Token.LPAREN);
parseAssignItems(stmt.getPartition(), stmt);
accept(Token.RPAREN);
}
if (lexer.token() == Token.LITERAL_INT) {
stmt.setRowCount(this.exprParser.primary());
}
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals("LIST")) {
OdpsListStmt stmt = new OdpsListStmt();
lexer.nextToken();
stmt.setObject(this.exprParser.expr());
if (lexer.identifierEquals("ROLES")
&& stmt.getObject() instanceof SQLIdentifierExpr && ((SQLIdentifierExpr) stmt.getObject()).nameEquals("TENANT")) {
lexer.nextToken();
stmt.setObject(new SQLIdentifierExpr("TENANT ROLES"));
} else if (lexer.identifierEquals("OUTPUT")
&& stmt.getObject() instanceof SQLIdentifierExpr && ((SQLIdentifierExpr) stmt.getObject()).nameEquals("TEMPORARY")) {
lexer.nextToken();
stmt.setObject(new SQLIdentifierExpr("TEMPORARY OUTPUT"));
}
statementList.add(stmt);
return true;
}
if (lexer.token() == Token.DESC || lexer.identifierEquals("DESCRIBE")) {
SQLStatement stmt = parseDescribe();
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals("WHOAMI")) {
lexer.nextToken();
SQLWhoamiStatement stmt = new SQLWhoamiStatement();
stmt.setDbType(DbType.odps);
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals("COUNT")) {
lexer.nextToken();
OdpsCountStatement stmt = new OdpsCountStatement();
stmt.setTable(this.exprParser.name());
if (lexer.token() == Token.PARTITION) {
lexer.nextToken();
this.exprParser.parseAssignItem(stmt.getPartitions(), stmt);
}
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals("MSCK")) {
SQLStatement stmt = parseMsck();
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals("alias")) {
SQLStatement stmt = parseSet();
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals("EXSTORE")) {
lexer.nextToken();
OdpsExstoreStatement stmt = new OdpsExstoreStatement();
SQLExpr table = this.exprParser.expr();
stmt.setTable(new SQLExprTableSource(table));
accept(Token.PARTITION);
this.exprParser.parseAssignItem(stmt.getPartitions(), stmt);
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals("INSTALL")) {
lexer.nextToken();
acceptIdentifier("PACKAGE");
OdpsInstallPackageStatement stmt = new OdpsInstallPackageStatement();
stmt.setPackageName(
this.exprParser.name()
);
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals("PAI")) {
lexer.nextToken();
int semiPos = lexer.text.indexOf(';', lexer.pos());
while (semiPos != -1 && semiPos + 2 < lexer.text.length()) {
char next = lexer.text.charAt(semiPos + 1);
if (next == '"' || next == '\'') {
semiPos = lexer.text.indexOf(';', semiPos + 1);
continue;
}
break;
}
String arguments;
if (semiPos != -1) {
int count = semiPos - lexer.pos();
arguments = lexer.subString(lexer.pos(), count);
lexer.reset(semiPos);
} else {
arguments = lexer.subString(lexer.pos());
lexer.reset(lexer.text.length());
}
lexer.nextToken();
OdpsPAIStmt stmt = new OdpsPAIStmt();
stmt.setArguments(arguments);
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals("COPY")) {
SQLStatement stmt = parseCopy();
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals(FnvHash.Constants.KILL)) {
SQLStatement stmt = parseKill();
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals(FnvHash.Constants.LOAD)) {
HiveLoadDataStatement stmt = parseLoad();
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals(FnvHash.Constants.MERGE)) {
SQLStatement stmt = parseMerge();
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals(FnvHash.Constants.CLONE)) {
SQLStatement stmt = parseClone();
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals(FnvHash.Constants.UNLOAD)) {
SQLStatement stmt = parseUnload();
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals(FnvHash.Constants.BEGIN)) {
SQLStatement stmt = parseBlock();
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals(FnvHash.Constants.RESTORE)) {
lexer.nextToken();
accept(Token.TABLE);
OdpsRestoreStatement stmt = new OdpsRestoreStatement();
stmt.setTable(this.exprParser.name());
if (lexer.token() == Token.LPAREN) {
this.exprParser.parseAssignItem(stmt.getProperties(), stmt);
}
if (lexer.token() == Token.PARTITION) {
lexer.nextToken();
this.exprParser.parseAssignItem(stmt.getPartitions(), stmt);
}
if (lexer.token() == Token.TO) {
lexer.nextToken();
acceptIdentifier("LSN");
stmt.setTo(
this.exprParser.expr()
);
}
if (lexer.token() == Token.AS) {
lexer.nextToken();
stmt.setAlias(
this.alias()
);
}
statementList.add(stmt);
return true;
}
if (lexer.identifierEquals(FnvHash.Constants.UNDO)) {
lexer.nextToken();
accept(Token.TABLE);
OdpsUndoTableStatement stmt = new OdpsUndoTableStatement();
stmt.setTable(
new SQLExprTableSource(
this.exprParser.name()
)
);
if (lexer.token() == Token.PARTITION) {
lexer.nextToken();
this.exprParser.parseAssignItem(stmt.getPartitions(), stmt);
}
accept(Token.TO);
stmt.setTo(
this.exprParser.expr()
);
statementList.add(stmt);
return true;
}
if (lexer.token() == Token.FUNCTION) {
HiveCreateFunctionStatement stmt = (HiveCreateFunctionStatement) parseHiveCreateFunction();
stmt.setDeclare(true);
statementList.add(stmt);
return true;
}
if (lexer.token() == Token.VARIANT && lexer.stringVal().startsWith("@")) {
Lexer.SavePoint mark = lexer.mark();
String variant = lexer.stringVal();
lexer.nextToken();
if (lexer.token() == Token.COLONEQ) {
lexer.nextToken();
boolean cache = false;
if (lexer.identifierEquals(FnvHash.Constants.CACHE)) {
lexer.nextToken();
accept(Token.ON);
cache = true;
}
Lexer.SavePoint lpMark = null;
if (lexer.token() == Token.LPAREN) {
lpMark = lexer.mark();
lexer.nextToken();
}
switch (lexer.token()) {
case LITERAL_INT:
case LITERAL_FLOAT:
case LITERAL_CHARS:
case LITERAL_ALIAS:
case IDENTIFIER:
case CASE:
case CAST:
case IF:
case VARIANT:
case REPLACE:
case NEW:
case SUB:
case TRUE:
case FALSE: {
if (lpMark != null) {
lexer.reset(lpMark);
}
SQLExpr expr = this.exprParser.expr();
SQLExprStatement stmt = new SQLExprStatement(
new SQLAssignItem(new SQLIdentifierExpr(variant), expr)
);
statementList.add(stmt);
return true;
}
default:
if (lpMark != null) {
lexer.reset(lpMark);
}
boolean paren = lexer.token() == Token.LPAREN;
Lexer.SavePoint parenMark = lexer.mark();
SQLSelect select;
try {
select = new OdpsSelectParser(this.exprParser)
.select();
} catch (ParserException error) {
if (paren) {
lexer.reset(parenMark);
SQLExpr expr = this.exprParser.expr();
SQLExprStatement stmt = new SQLExprStatement(
new SQLAssignItem(new SQLIdentifierExpr(variant), expr)
);
statementList.add(stmt);
return true;
}
throw error;
}
switch (lexer.token()) {
case GT:
case GTEQ:
case EQ:
case LT:
case LTEQ:
statementList.add(
new SQLExprStatement(
new SQLAssignItem(new SQLIdentifierExpr(variant),
this.exprParser.exprRest(new SQLQueryExpr(select))
)
)
);
return true;
default:
break;
}
SQLSelectStatement stmt = new SQLSelectStatement(select, dbType);
OdpsQueryAliasStatement aliasQueryStatement = new OdpsQueryAliasStatement(variant, stmt);
aliasQueryStatement.setCache(cache);
statementList.add(aliasQueryStatement);
return true;
}
}
OdpsDeclareVariableStatement stmt = new OdpsDeclareVariableStatement();
if (lexer.token() != Token.EQ && lexer.token() != Token.SEMI && lexer.token() != Token.EOF) {
stmt.setDataType(
this.exprParser.parseDataType()
);
}
if (lexer.token() == Token.EQ || lexer.token() == Token.COLONEQ) {
lexer.nextToken();
stmt.setInitValue(
this.exprParser.expr()
);
}
if (lexer.token() == Token.SEMI) {
lexer.nextToken();
}
statementList.add(stmt);
return true;
}
if (lexer.token() == Token.IF) {
SQLStatement stmt = parseIf();
statementList.add(stmt);
return true;
}
if (lexer.token() == Token.CODE) {
Lexer.SavePoint mark = lexer.mark();
lexer.nextToken();
if (lexer.token() == Token.EOF || lexer.token() == Token.SEMI) {
return true;
}
lexer.reset(mark);
}
if (identifierEquals("COST")) {
SQLStatement stmt = parseCost();
statementList.add(stmt);
return true;
}
return false;
}
public SQLStatement parseIf() {
accept(Token.IF);
SQLIfStatement ifStmt = new SQLIfStatement();
ifStmt.setCondition(
this.exprParser.expr()
);
if (lexer.identifierEquals("BEGIN")) {
lexer.nextToken();
parseStatementList(ifStmt.getStatements(), -1, ifStmt);
accept(Token.END);
} else {
SQLStatement stmt = parseStatement();
ifStmt.getStatements().add(stmt);
stmt.setParent(ifStmt);
}
if (lexer.token() == Token.SEMI) {
lexer.nextToken();
}
if (lexer.token() == Token.ELSE) {
lexer.nextToken();
SQLIfStatement.Else elseItem = new SQLIfStatement.Else();
if (lexer.identifierEquals("BEGIN")) {
lexer.nextToken();
parseStatementList(elseItem.getStatements(), -1, ifStmt);
accept(Token.END);
} else {
SQLStatement stmt = parseStatement();
elseItem.getStatements().add(stmt);
stmt.setParent(elseItem);
}
ifStmt.setElseItem(elseItem);
}
return ifStmt;
}
public SQLStatement parseKill() {
acceptIdentifier("KILL");
MySqlKillStatement stmt = new MySqlKillStatement();
SQLExpr instanceId = this.exprParser.primary();
stmt.setThreadId(instanceId);
return stmt;
}
public SQLStatement parseUnload() {
acceptIdentifier("UNLOAD");
OdpsUnloadStatement stmt = new OdpsUnloadStatement();
accept(Token.FROM);
if (lexer.token() == Token.LPAREN || lexer.token() == Token.SELECT) {
stmt.setFrom(
this.createSQLSelectParser().parseTableSource()
);
} else {
stmt.setFrom(
this.exprParser.name()
);
}
if (lexer.token() == Token.PARTITION) {
lexer.nextToken();
this.exprParser.parseAssignItem(stmt.getPartitions(), stmt);
}
accept(Token.INTO);
if (lexer.identifierEquals("LOCATION")) {
lexer.nextToken();
stmt.setLocation(this.exprParser.primary());
}
if (lexer.identifierEquals("ROW")) {
SQLExternalRecordFormat format = this.exprParser.parseRowFormat();
stmt.setRowFormat(format);
}
for (; ; ) {
if (lexer.identifierEquals(FnvHash.Constants.STORED)) {
lexer.nextToken();
if (lexer.token() == Token.BY) {
lexer.nextToken();
} else {
accept(Token.AS);
}
stmt.setStoredAs(
this.exprParser.name());
continue;
}
if (lexer.token() == Token.WITH) {
lexer.nextToken();
acceptIdentifier("SERDEPROPERTIES");
this.exprParser.parseAssignItem(stmt.getSerdeProperties(), stmt);
continue;
}
if (identifierEquals("PROPERTIES")) {
lexer.nextToken();
this.exprParser.parseAssignItem(stmt.getProperties(), stmt);
continue;
}
break;
}
return stmt;
}
@Override
public OdpsExprParser getExprParser() {
return (OdpsExprParser) exprParser;
}
public SQLStatement parseClone() {
acceptIdentifier("CLONE");
accept(Token.TABLE);
SQLCloneTableStatement stmt = new SQLCloneTableStatement();
stmt.setFrom(
this.exprParser.name());
if (lexer.token() == Token.PARTITION) {
lexer.nextToken();
this.exprParser.parseAssignItem(stmt.getPartitions(), stmt);
}
accept(Token.TO);
stmt.setTo(
this.exprParser.name());
if (lexer.token() == Token.IF) {
lexer.nextToken();
accept(Token.EXISTS);
if (lexer.token() == OVERWRITE) {
lexer.nextToken();
stmt.setIfExistsOverwrite(true);
} else {
acceptIdentifier("IGNORE");
stmt.setIfExistsIgnore(true);
}
}
return stmt;
}
public SQLStatement parseBlock() {
SQLBlockStatement block = new SQLBlockStatement();
if (lexer.identifierEquals(FnvHash.Constants.BEGIN)) {
lexer.nextToken();
} else {
accept(Token.BEGIN);
}
this.parseStatementList(block.getStatementList(), -1, block);
accept(Token.END);
return block;
}
protected OdpsStatisticClause parseStaticClause() {
if (lexer.identifierEquals("TABLE_COUNT")) {
lexer.nextToken();
return new OdpsStatisticClause.TableCount();
} else if (lexer.identifierEquals("NULL_VALUE")) {
lexer.nextToken();
OdpsStatisticClause.NullValue null_value = new OdpsStatisticClause.NullValue();
null_value.setColumn(this.exprParser.name());
return null_value;
} else if (lexer.identifierEquals("DISTINCT_VALUE")) {
lexer.nextToken();
OdpsStatisticClause.DistinctValue distinctValue = new OdpsStatisticClause.DistinctValue();
distinctValue.setColumn(this.exprParser.name());
return distinctValue;
} else if (lexer.identifierEquals("COLUMN_SUM")) {
lexer.nextToken();
OdpsStatisticClause.ColumnSum column_sum = new OdpsStatisticClause.ColumnSum();
column_sum.setColumn(this.exprParser.name());
return column_sum;
} else if (lexer.identifierEquals("COLUMN_MAX")) {
lexer.nextToken();
OdpsStatisticClause.ColumnMax column_max = new OdpsStatisticClause.ColumnMax();
column_max.setColumn(this.exprParser.name());
return column_max;
} else if (lexer.identifierEquals("COLUMN_MIN")) {
lexer.nextToken();
OdpsStatisticClause.ColumnMin column_min = new OdpsStatisticClause.ColumnMin();
column_min.setColumn(this.exprParser.name());
return column_min;
} else if (lexer.identifierEquals("EXPRESSION_CONDITION")) {
lexer.nextToken();
OdpsStatisticClause.ExpressionCondition expr_condition = new OdpsStatisticClause.ExpressionCondition();
expr_condition.setExpr(this.exprParser.expr());
return expr_condition;
} else {
throw new ParserException("TODO " + lexer.info());
}
}
public SQLStatement parseInsert() {
if (lexer.token() == Token.FROM) {
lexer.nextToken();
HiveMultiInsertStatement stmt = new HiveMultiInsertStatement();
if (lexer.token() == Token.IDENTIFIER || lexer.token() == Token.VARIANT) {
Lexer.SavePoint mark = lexer.mark();
SQLExpr tableName = this.exprParser.name();
if (lexer.token() == Token.LPAREN) {
lexer.reset(mark);
tableName = this.exprParser.primary();
}
SQLTableSource from = new SQLExprTableSource(tableName);
if (lexer.token() == Token.IDENTIFIER) {
String alias = alias();
from.setAlias(alias);
}
SQLSelectParser selectParser = createSQLSelectParser();
from = selectParser.parseTableSourceRest(from);
if (lexer.token() == Token.WHERE) {
lexer.nextToken();
SQLExpr where = this.exprParser.expr();
SQLSelectQueryBlock queryBlock = new SQLSelectQueryBlock();
queryBlock.addSelectItem(new SQLAllColumnExpr());
queryBlock.setFrom(from);
queryBlock.setWhere(where);
if (lexer.token() == Token.GROUP) {
selectParser.parseGroupBy(queryBlock);
}
stmt.setFrom(
new SQLSubqueryTableSource(queryBlock)
);
} else {
stmt.setFrom(from);
}
} else {
SQLCommentHint hint = null;
if (lexer.token() == Token.HINT) {
hint = this.exprParser.parseHint();
}
accept(Token.LPAREN);
boolean paren2 = lexer.token() == Token.LPAREN;
SQLSelectParser selectParser = createSQLSelectParser();
SQLSelect select = selectParser.select();
SQLTableSource from = null;
if (paren2 && lexer.token() != Token.RPAREN) {
String subQueryAs = null;
if (lexer.token() == Token.AS) {
lexer.nextToken();
subQueryAs = tableAlias(true);
} else {
subQueryAs = tableAlias(false);
}
SQLSubqueryTableSource subQuery = new SQLSubqueryTableSource(select, subQueryAs);
from = selectParser.parseTableSourceRest(subQuery);
}
accept(Token.RPAREN);
String alias;
if (lexer.token() == Token.INSERT) {
alias = null;
} else if (lexer.token() == Token.SELECT) {
// skip
alias = null;
} else {
if (lexer.token() == Token.AS) {
lexer.nextToken();
}
alias = lexer.stringVal();
accept(Token.IDENTIFIER);
}
if (from == null) {
from = new SQLSubqueryTableSource(select, alias);
} else {
if (alias != null) {
from.setAlias(alias);
}
}
SQLTableSource tableSource = selectParser.parseTableSourceRest(from);
if (hint != null) {
if (tableSource instanceof SQLJoinTableSource) {
((SQLJoinTableSource) tableSource).setHint(hint);
}
}
stmt.setFrom(tableSource);
}
if (lexer.token() == Token.SELECT) {
SQLSelectParser selectParser = createSQLSelectParser();
SQLSelect query = selectParser.select();
HiveInsert insert = new HiveInsert();
insert.setQuery(query);
stmt.addItem(insert);
return stmt;
}
for (; ; ) {
HiveInsert insert = parseHiveInsert();
stmt.addItem(insert);
if (lexer.token() != Token.INSERT) {
break;
}
}
return stmt;
}
return parseHiveInsertStmt();
}
public SQLSelectParser createSQLSelectParser() {
return new OdpsSelectParser(this.exprParser, selectListCache);
}
public SQLStatement parseShow() {
accept(Token.SHOW);
if (lexer.identifierEquals(FnvHash.Constants.PARTITIONS)) {
lexer.nextToken();
SQLShowPartitionsStmt stmt = new SQLShowPartitionsStmt();
SQLExpr expr = this.exprParser.expr();
stmt.setTableSource(new SQLExprTableSource(expr));
if (lexer.token() == Token.PARTITION) {
lexer.nextToken();
accept(Token.LPAREN);
parseAssignItems(stmt.getPartition(), stmt, false);
accept(Token.RPAREN);
}
if (lexer.token() == Token.WHERE) {
lexer.nextToken();
stmt.setWhere(
this.exprParser.expr()
);
}
return stmt;
}
if (lexer.identifierEquals(FnvHash.Constants.STATISTIC)) {
lexer.nextToken();
SQLShowStatisticStmt stmt = new SQLShowStatisticStmt();
SQLExpr expr = this.exprParser.expr();
stmt.setTableSource(new SQLExprTableSource(expr));
if (lexer.token() == Token.PARTITION) {
lexer.nextToken();
accept(Token.LPAREN);
parseAssignItems(stmt.getPartitions(), stmt, false);
accept(Token.RPAREN);
}
if (identifierEquals("COLUMNS")) {
lexer.nextToken();
if (lexer.token() != Token.SEMI) {
accept(Token.LPAREN);
this.exprParser.names(stmt.getColumns(), stmt);
accept(Token.RPAREN);
}
}
return stmt;
}
if (lexer.identifierEquals(FnvHash.Constants.STATISTIC_LIST)) {
lexer.nextToken();
SQLShowStatisticListStmt stmt = new SQLShowStatisticListStmt();
SQLExpr expr = this.exprParser.expr();
stmt.setTableSource(new SQLExprTableSource(expr));
return stmt;
}
if (lexer.identifierEquals(FnvHash.Constants.PACKAGES)) {
lexer.nextToken();
SQLShowPackagesStatement stmt = new SQLShowPackagesStatement();
return stmt;
}
if (lexer.identifierEquals(FnvHash.Constants.TABLES)) {
lexer.nextToken();
SQLShowTablesStatement stmt = new SQLShowTablesStatement();
if (lexer.token() == Token.FROM || lexer.token() == Token.IN) {
lexer.nextToken();
stmt.setDatabase(this.exprParser.name());
} else if (lexer.token() == IDENTIFIER) {
SQLName database = exprParser.name();
stmt.setDatabase(database);
}
if (lexer.token() == Token.LIKE) {
lexer.nextToken();
stmt.setLike(this.exprParser.expr());
} else if (lexer.token() == Token.LITERAL_CHARS || lexer.token() == Token.LITERAL_ALIAS) {
stmt.setLike(this.exprParser.expr());
}
return stmt;
}
if (lexer.identifierEquals(FnvHash.Constants.LABEL)) {
lexer.nextToken();
acceptIdentifier("GRANTS");
OdpsShowGrantsStmt stmt = new OdpsShowGrantsStmt();
stmt.setLabel(true);
if (lexer.token() == Token.ON) {
lexer.nextToken();
accept(Token.TABLE);
stmt.setObjectType(this.exprParser.expr());
}
if (lexer.token() == Token.FOR) {
lexer.nextToken();
accept(Token.USER);
stmt.setUser(this.exprParser.expr());
}
return stmt;
}
if (lexer.identifierEquals(FnvHash.Constants.GRANTS)) {
lexer.nextToken();
OdpsShowGrantsStmt stmt = new OdpsShowGrantsStmt();
if (lexer.token() == Token.FOR) {
lexer.nextToken();
if (lexer.token() == Token.USER) {
lexer.nextToken();
}
stmt.setUser(this.exprParser.expr());
}
if (lexer.token() == Token.ON) {
lexer.nextToken();
acceptIdentifier("type");
stmt.setObjectType(this.exprParser.expr());
}
return stmt;
}
if (lexer.identifierEquals(FnvHash.Constants.USERS)) {
lexer.nextToken();
SQLShowUsersStatement stmt = new SQLShowUsersStatement();
return stmt;
}
if (lexer.identifierEquals("RECYCLEBIN")) {
lexer.nextToken();
SQLShowRecylebinStatement stmt = new SQLShowRecylebinStatement();
return stmt;
}
if (lexer.identifierEquals("VARIABLES")) {
lexer.nextToken();
return parseShowVariants();
}
if (lexer.token() == Token.CREATE) {
return parseShowCreateTable();
}
if (lexer.identifierEquals(FnvHash.Constants.FUNCTIONS)) {
lexer.nextToken();
SQLShowFunctionsStatement stmt = new SQLShowFunctionsStatement();
if (lexer.token() == Token.LIKE) {
lexer.nextToken();
stmt.setLike(
this.exprParser.expr()
);
} else if (lexer.token() == Token.LITERAL_CHARS || lexer.token() == IDENTIFIER) {
stmt.setLike(
this.exprParser.expr()
);
}
return stmt;
}
if (lexer.identifierEquals(FnvHash.Constants.ROLE)) {
lexer.nextToken();
SQLShowRoleStatement stmt = new SQLShowRoleStatement();
if (lexer.token() == Token.GRANT) {
lexer.nextToken();
stmt.setGrant(
this.exprParser.name()
);
}
return stmt;
}
if (lexer.identifierEquals("ACL")) {
lexer.nextToken();
SQLShowACLStatement stmt = new SQLShowACLStatement();
if (lexer.token() == Token.FOR) {
lexer.nextToken();
stmt.setTable(
new SQLExprTableSource(
this.exprParser.name()
)
);
}
return stmt;
}
if (lexer.identifierEquals(FnvHash.Constants.ROLES)) {
lexer.nextToken();
SQLShowRolesStatement stmt = new SQLShowRolesStatement();
return stmt;
}
if (lexer.identifierEquals("HISTORY")) {
lexer.nextToken();
SQLShowHistoryStatement stmt = new SQLShowHistoryStatement();
if (lexer.token() == Token.FOR) {
lexer.nextToken();
if (lexer.identifierEquals(FnvHash.Constants.TABLES)) {
lexer.nextToken();
stmt.setTables(true);
} else if (lexer.token() == Token.TABLE) {
lexer.nextToken();
stmt.setTable(
new SQLExprTableSource(
this.exprParser.name()
)
);
}
}
if (lexer.token() == Token.LPAREN) {
this.exprParser.parseAssignItem(stmt.getProperties(), stmt);
}
if (lexer.token() == Token.PARTITION) {
lexer.nextToken();
this.exprParser.parseAssignItem(stmt.getPartitions(), stmt);
}
return stmt;
}
if (lexer.identifierEquals("CHANGELOGS")) {
lexer.nextToken();
OdpsShowChangelogsStatement stmt = new OdpsShowChangelogsStatement();
if (lexer.token() == Token.FOR) {
lexer.nextToken();
if (lexer.identifierEquals(FnvHash.Constants.TABLES)) {
lexer.nextToken();
stmt.setTables(true);
} else if (lexer.token() == Token.TABLE) {
lexer.nextToken();
stmt.setTable(
new SQLExprTableSource(
this.exprParser.name()
)
);
} else if (lexer.token() == IDENTIFIER) {
stmt.setTable(
new SQLExprTableSource(
this.exprParser.name()
)
);
}
}
if (lexer.token() == Token.LPAREN) {
this.exprParser.parseAssignItem(stmt.getProperties(), stmt);
}
if (lexer.token() == Token.PARTITION) {
lexer.nextToken();
this.exprParser.parseAssignItem(stmt.getPartitions(), stmt);
}
if (lexer.token() == Token.LITERAL_INT) {
stmt.setId(
this.exprParser.primary()
);
}
return stmt;
}
throw new ParserException("TODO " + lexer.info());
}
public SQLStatement parseCost() {
acceptIdentifier("COST");
acceptIdentifier("SQL");
SQLStatement stmt = parseStatement();
SQLCostStatement cost = new SQLCostStatement();
cost.setStatement(stmt);
return cost;
}
public SQLStatement parseSet() {
List<String> comments = null;
if (lexer.isKeepComments() && lexer.hasComment()) {
comments = lexer.readAndResetComments();
}
boolean setProject = false;
if (identifierEquals("SETPROJECT")) {
lexer.nextToken();
setProject = true;
} else if (dbType == DbType.odps && identifierEquals("ALIAS")) {
lexer.nextToken();
} else {
accept(Token.SET);
}
if (lexer.token() == Token.SET && dbType == DbType.odps) {
lexer.nextToken();
}
if (lexer.identifierEquals("PROJECT")) {
lexer.nextToken();
setProject = true;
}
if (setProject) {
SQLSetStatement stmt = new SQLSetStatement();
stmt.setOption(SQLSetStatement.Option.PROJECT);
SQLName target = this.exprParser.name();
accept(Token.EQ);
SQLExpr value = this.exprParser.expr();
stmt.set(target, value);
return stmt;
} else if (lexer.identifierEquals("LABEL")) {
OdpsSetLabelStatement stmt = new OdpsSetLabelStatement();
if (comments != null) {
stmt.addBeforeComment(comments);
}
lexer.nextToken();
stmt.setLabel(lexer.stringVal());
lexer.nextToken();
accept(Token.TO);
if (lexer.token() == Token.USER) {
lexer.nextToken();
SQLName name = this.exprParser.name();
stmt.setUser(name);
return stmt;
}
accept(Token.TABLE);
SQLExpr expr = this.exprParser.name();
stmt.setTable(new SQLExprTableSource(expr));
if (lexer.token() == Token.LPAREN) {
lexer.nextToken();
this.exprParser.names(stmt.getColumns(), stmt);
accept(Token.RPAREN);
}
return stmt;
} else {
SQLSetStatement stmt = new SQLSetStatement(dbType);
stmt.putAttribute("parser.set", Boolean.TRUE);
if (comments != null) {
stmt.addBeforeComment(comments);
}
parseAssignItems(stmt.getItems(), stmt);
return stmt;
}
}
public OdpsGrantStmt parseGrant() {
accept(Token.GRANT);
OdpsGrantStmt stmt = new OdpsGrantStmt();
if (lexer.identifierEquals("LABEL")) {
stmt.setLabel(true);
lexer.nextToken();
stmt.setLabel(this.exprParser.expr());
} else {
if (lexer.identifierEquals("SUPER")) {
stmt.setSuper(true);
lexer.nextToken();
}
parsePrivilege(stmt.getPrivileges(), stmt);
}
if (lexer.token() == Token.ON) {
lexer.nextToken();
if (lexer.identifierEquals("PROJECT")) {
lexer.nextToken();
stmt.setResourceType(SQLObjectType.PROJECT);
} else if (lexer.identifierEquals("PACKAGE")) {
lexer.nextToken();
stmt.setResourceType(SQLObjectType.PACKAGE);
} else if (lexer.token() == Token.FUNCTION) {
lexer.nextToken();
stmt.setResourceType(SQLObjectType.FUNCTION);
} else if (lexer.token() == Token.TABLE) {
lexer.nextToken();
stmt.setResourceType(SQLObjectType.TABLE);
if (lexer.token() == Token.LPAREN) {
lexer.nextToken();
this.exprParser.names(stmt.getColumns(), stmt);
accept(Token.RPAREN);
}
} else if (lexer.identifierEquals("RESOURCE")) {
lexer.nextToken();
stmt.setResourceType(SQLObjectType.RESOURCE);
} else if (lexer.identifierEquals("INSTANCE")) {
lexer.nextToken();
stmt.setResourceType(SQLObjectType.INSTANCE);
} else if (lexer.identifierEquals("JOB")) {
lexer.nextToken();
stmt.setResourceType(SQLObjectType.JOB);
} else if (lexer.identifierEquals("VOLUME")) {
lexer.nextToken();
stmt.setResourceType(SQLObjectType.VOLUME);
} else if (lexer.identifierEquals("OfflineModel")) {
lexer.nextToken();
stmt.setResourceType(SQLObjectType.OfflineModel);
} else if (lexer.identifierEquals("XFLOW")) {
lexer.nextToken();
stmt.setResourceType(SQLObjectType.XFLOW);
}
stmt.setResource(this.exprParser.expr());
}
if (lexer.token() == Token.TO) {
lexer.nextToken();
if (lexer.token() == Token.USER) {
lexer.nextToken();
stmt.setSubjectType(SQLObjectType.USER);
} else if (lexer.identifierEquals("ROLE")) {
lexer.nextToken();
stmt.setSubjectType(SQLObjectType.ROLE);
}
stmt.getUsers().add(this.exprParser.expr());
}
if (lexer.token() == Token.WITH) {
lexer.nextToken();
acceptIdentifier("EXP");
stmt.setExpire(this.exprParser.expr());
}
return stmt;
}
protected void parsePrivilege(List<SQLPrivilegeItem> privileges, SQLObject parent) {
for (; ; ) {
String privilege = null;
if (lexer.token() == Token.ALL) {
lexer.nextToken();
privilege = "ALL";
} else if (lexer.token() == Token.SELECT) {
privilege = "SELECT";
lexer.nextToken();
} else if (lexer.token() == Token.UPDATE) {
privilege = "UPDATE";
lexer.nextToken();
} else if (lexer.token() == Token.DELETE) {
privilege = "DELETE";
lexer.nextToken();
} else if (lexer.token() == Token.INSERT) {
privilege = "INSERT";
lexer.nextToken();
} else if (lexer.token() == Token.DROP) {
lexer.nextToken();
privilege = "DROP";
} else if (lexer.token() == Token.ALTER) {
lexer.nextToken();
privilege = "ALTER";
} else if (lexer.identifierEquals("DESCRIBE")) {
privilege = "DESCRIBE";
lexer.nextToken();
} else if (lexer.identifierEquals("READ")) {
privilege = "READ";
lexer.nextToken();
} else if (lexer.identifierEquals("WRITE")) {
privilege = "WRITE";
lexer.nextToken();
} else if (lexer.identifierEquals("EXECUTE")) {
lexer.nextToken();
privilege = "EXECUTE";
} else if (lexer.identifierEquals("LIST")) {
lexer.nextToken();
privilege = "LIST";
} else if (lexer.identifierEquals("CreateTable")) {
lexer.nextToken();
privilege = "CreateTable";
} else if (lexer.identifierEquals("CreateInstance")) {
lexer.nextToken();
privilege = "CreateInstance";
} else if (lexer.identifierEquals("CreateFunction")) {
lexer.nextToken();
privilege = "CreateFunction";
} else if (lexer.identifierEquals("CreateResource")) {
lexer.nextToken();
privilege = "CreateResource";
} else if (lexer.identifierEquals("CreateJob")) {
lexer.nextToken();
privilege = "CreateJob";
} else if (lexer.identifierEquals("CreateVolume")) {
lexer.nextToken();
privilege = "CreateVolume";
} else if (lexer.identifierEquals("CreateOfflineModel")) {
lexer.nextToken();
privilege = "CreateOfflineModel";
} else if (lexer.identifierEquals("CreateXflow")) {
lexer.nextToken();
privilege = "CreateXflow";
}
SQLExpr expr = null;
if (privilege != null) {
expr = new SQLIdentifierExpr(privilege);
} else {
expr = this.exprParser.expr();
}
SQLPrivilegeItem privilegeItem = new SQLPrivilegeItem();
privilegeItem.setAction(expr);
if (lexer.token() == Token.LPAREN) {
lexer.nextToken();
for (; ; ) {
privilegeItem.getColumns().add(this.exprParser.name());
if (lexer.token() == Token.COMMA) {
lexer.nextToken();
continue;
}
break;
}
accept(Token.RPAREN);
}
expr.setParent(parent);
privileges.add(privilegeItem);
if (lexer.token() == Token.COMMA) {
lexer.nextToken();
continue;
}
break;
}
}
public SQLCreateFunctionStatement parseCreateFunction() {
return parseHiveCreateFunction();
}
protected HiveLoadDataStatement parseLoad() {
acceptIdentifier("LOAD");
HiveLoadDataStatement stmt = new HiveLoadDataStatement();
if (lexer.token() == OVERWRITE) {
stmt.setOverwrite(true);
lexer.nextToken();
} else if (lexer.token() == Token.INTO) {
lexer.nextToken();
}
accept(Token.TABLE);
stmt.setInto(
this.exprParser.expr());
if (lexer.token() == Token.PARTITION) {
lexer.nextToken();
accept(Token.LPAREN);
this.exprParser.exprList(stmt.getPartition(), stmt);
accept(Token.RPAREN);
}
if (lexer.identifierEquals(FnvHash.Constants.LOCAL)) {
lexer.nextToken();
stmt.setLocal(true);
}
accept(Token.FROM);
acceptIdentifier("LOCATION");
SQLExpr inpath = this.exprParser.expr();
stmt.setInpath(inpath);
if (lexer.identifierEquals("STORED")) {
lexer.nextToken();
if (lexer.token() == Token.BY) {
lexer.nextToken();
stmt.setStoredBy(this.exprParser.expr());
} else {
accept(Token.AS);
stmt.setStoredAs(this.exprParser.expr());
}
}
if (lexer.identifierEquals("ROW")) {
lexer.nextToken();
acceptIdentifier("FORMAT");
acceptIdentifier("SERDE");
stmt.setRowFormat(this.exprParser.expr());
}
if (lexer.token() == Token.WITH) {
lexer.nextToken();
acceptIdentifier("SERDEPROPERTIES");
accept(Token.LPAREN);
for (; ; ) {
String name = lexer.stringVal();
lexer.nextToken();
accept(Token.EQ);
SQLExpr value = this.exprParser.primary();
stmt.getSerdeProperties().put(name, value);
if (lexer.token() == Token.COMMA) {
lexer.nextToken();
continue;
}
break;
}
accept(Token.RPAREN);
}
if (lexer.identifierEquals("STORED")) {
lexer.nextToken();
accept(Token.AS);
stmt.setStoredAs(this.exprParser.expr());
}
if (lexer.identifierEquals(FnvHash.Constants.USING)) {
lexer.nextToken();
stmt.setUsing(
this.exprParser.expr()
);
}
return stmt;
}
public SQLStatement parseCopy() {
lexer.nextToken();
int semiPos = lexer.text.indexOf(';', lexer.pos());
String arguments;
if (semiPos != -1) {
int count = semiPos - lexer.pos();
arguments = lexer.subString(lexer.pos(), count);
lexer.reset(semiPos);
} else {
arguments = lexer.subString(lexer.pos());
lexer.reset(lexer.text.length());
}
lexer.nextToken();
OdpsCopyStmt stmt = new OdpsCopyStmt();
stmt.setArguments(arguments);
return stmt;
}
@Override
protected boolean alterTableAfterNameRest(SQLAlterTableStatement stmt) {
if (lexer.identifierEquals("MERGE")) {
alterTableMerge(stmt);
} else if ((lexer.identifierEquals(FnvHash.Constants.RANGE)
|| lexer.identifierEquals(FnvHash.Constants.CLUSTERED))
) {
if (lexer.identifierEquals(FnvHash.Constants.RANGE)) {
lexer.nextToken();
acceptIdentifier("CLUSTERED");
stmt.setRange(true);
} else {
lexer.nextToken();
}
accept(Token.BY);
accept(Token.LPAREN);
for (; ; ) {
SQLSelectOrderByItem item = this.exprParser.parseSelectOrderByItem();
stmt.addClusteredByItem(item);
if (lexer.token() == Token.COMMA) {
lexer.nextToken();
continue;
}
break;
}
accept(Token.RPAREN);
} else if (lexer.identifierEquals(FnvHash.Constants.SORTED)) {
alterTableSorted(stmt);
} else if (dbType == DbType.odps && lexer.token() == Token.NOT) {
lexer.nextToken();
acceptIdentifier("CLUSTERED");
stmt.setNotClustered(true);
} else {
return true;
}
return super.alterTableAfterNameRest(stmt);
}
@Override
protected boolean alterTableSetRest(SQLAlterTableStatement stmt) {
if (lexer.identifierEquals("CHANGELOGS")) {
lexer.nextToken();
OdpsAlterTableSetChangeLogs item = new OdpsAlterTableSetChangeLogs();
item.setValue(this.exprParser.primary());
stmt.addItem(item);
} else if (lexer.identifierEquals("FILEFORMAT")) {
lexer.nextToken();
OdpsAlterTableSetFileFormat item = new OdpsAlterTableSetFileFormat();
item.setValue(this.exprParser.primary());
stmt.addItem(item);
} else {
return super.alterTableSetRest(stmt);
}
return false;
}
@Override
protected void alterTableChangeOwner(SQLAlterTableStatement stmt) {
lexer.nextToken();
accept(Token.TO);
OdpsAlterTableChangeOwner item = new OdpsAlterTableChangeOwner();
item.setValue(this.exprParser.primary());
stmt.addItem(item);
}
@Override
protected void parseCreateMaterializedViewRest(SQLCreateMaterializedViewStatement stmt) {
if (lexer.identifierEquals(FnvHash.Constants.LIFECYCLE)) {
lexer.nextToken();
stmt.setLifyCycle(
this.exprParser.primary()
);
}
if (lexer.token() == Token.PARTITIONED) {
lexer.nextToken();
accept(ON);
accept(LPAREN);
this.exprParser.names(stmt.getPartitionedOn(), stmt);
accept(RPAREN);
}
}
@Override
public void parseUpdateStatementPartition(SQLUpdateStatement updateStatement) {
if (lexer.token() == PARTITION) {
lexer.nextToken();
updateStatement.setPartitions(new ArrayList<>());
this.exprParser.parseAssignItem(updateStatement.getPartitions(), updateStatement);
}
}
@Override
protected void parseUpdateSetComma() {
if (lexer.token() == COMMA) {
lexer.nextToken();
}
}
@Override
public void parseCreateViewAtDataType(SQLColumnDefinition column, SQLName expr) {
if (expr.getSimpleName().startsWith("@")) {
column.setDataType(this.exprParser.parseDataType());
}
}
@Override
protected void parseWithQuerySkip() {
if (lexer.identifierEquals(FnvHash.Constants.STRING)
|| lexer.identifierEquals(FnvHash.Constants.INT)
|| lexer.identifierEquals(FnvHash.Constants.BIGINT)
) {
lexer.nextToken(); // skip
}
}
}
| OdpsStatementParser |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueCalculationTestBase.java | {
"start": 1640,
"end": 5196
} | class ____ {
protected static final QueuePath ROOT = new QueuePath("root");
protected static final QueuePath A = new QueuePath("root.a");
protected static final QueuePath A1 = new QueuePath("root.a.a1");
protected static final QueuePath A11 = new QueuePath("root.a.a1.a11");
protected static final QueuePath A12 = new QueuePath("root.a.a1.a12");
protected static final QueuePath A2 = new QueuePath("root.a.a2");
protected static final QueuePath B = new QueuePath("root.b");
protected static final QueuePath B1 = new QueuePath("root.b.b1");
protected static final QueuePath C = new QueuePath("root.c");
private static final String CAPACITY_VECTOR_TEMPLATE = "[memory=%s, vcores=%s]";
protected ResourceCalculator resourceCalculator;
protected MockRM mockRM;
protected CapacityScheduler cs;
protected CapacitySchedulerConfiguration csConf;
protected NullRMNodeLabelsManager mgr;
@BeforeEach
public void setUp() throws Exception {
csConf = new CapacitySchedulerConfiguration();
csConf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
ResourceScheduler.class);
csConf.setQueues(ROOT, new String[]{"a", "b"});
csConf.setCapacity(A, 50f);
csConf.setCapacity(B, 50f);
csConf.setQueues(A, new String[]{"a1", "a2"});
csConf.setCapacity(A1, 100f);
csConf.setQueues(A1, new String[]{"a11", "a12"});
csConf.setCapacity(A11, 50f);
csConf.setCapacity(A12, 50f);
mgr = new NullRMNodeLabelsManager();
mgr.init(csConf);
mockRM = new MockRM(csConf) {
protected RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
cs = (CapacityScheduler) mockRM.getResourceScheduler();
cs.updatePlacementRules();
// Policy for new auto created queue's auto deletion when expired
mockRM.start();
cs.start();
mockRM.registerNode("h1:1234", 10 * GB); // label = x
resourceCalculator = cs.getResourceCalculator();
}
protected QueueCapacityUpdateContext update(
QueueAssertionBuilder assertions, Resource clusterResource)
throws IOException {
return update(assertions, clusterResource, clusterResource);
}
protected QueueCapacityUpdateContext update(
QueueAssertionBuilder assertions, Resource clusterResource, Resource emptyLabelResource)
throws IOException {
cs.reinitialize(csConf, mockRM.getRMContext());
CapacitySchedulerQueueCapacityHandler queueController =
new CapacitySchedulerQueueCapacityHandler(mgr, csConf);
mgr.setResourceForLabel(CommonNodeLabelsManager.NO_LABEL, emptyLabelResource);
queueController.updateRoot(cs.getQueue("root"), clusterResource);
QueueCapacityUpdateContext updateContext =
queueController.updateChildren(clusterResource, cs.getQueue("root"));
assertions.finishAssertion();
return updateContext;
}
protected QueueAssertionBuilder createAssertionBuilder() {
return new QueueAssertionBuilder(cs);
}
protected static String createCapacityVector(Object memory, Object vcores) {
return String.format(CAPACITY_VECTOR_TEMPLATE, memory, vcores);
}
protected static String absolute(double value) {
return String.valueOf((long) value);
}
protected static String weight(float value) {
return value + "w";
}
protected static String percentage(float value) {
return value + "%";
}
protected static Resource createResource(double memory, double vcores) {
return Resource.newInstance((int) memory, (int) vcores);
}
}
| CapacitySchedulerQueueCalculationTestBase |
java | qos-ch__slf4j | jcl-over-slf4j/src/main/java/org/apache/commons/logging/LogFactory.java | {
"start": 11123,
"end": 11884
} | class ____ (if
* any), after calling the instance method <code>release()</code> on each of
* them.
*
* @param classLoader
* ClassLoader for which to release the LogFactory
*/
public static void release(ClassLoader classLoader) {
// since SLF4J based JCL does not make use of classloaders, there is nothing
// to do here
}
/**
* Release any internal references to previously created {@link LogFactory}
* instances, after calling the instance method <code>release()</code> on
* each of them. This is useful in environments like servlet containers, which
* implement application reloading by throwing away a ClassLoader. Dangling
* references to objects in that | loader |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/biginteger/BigIntegerAssert_isCloseTo_Test.java | {
"start": 921,
"end": 1399
} | class ____ extends BigIntegerAssertBaseTest {
private final BigInteger other = new BigInteger("6");
private final Offset<BigInteger> offset = offset(BigInteger.ONE);
@Override
protected BigIntegerAssert invoke_api_method() {
return assertions.isCloseTo(other, offset);
}
@Override
protected void verify_internal_effects() {
verify(bigIntegers).assertIsCloseTo(getInfo(assertions), getActual(assertions), other, offset);
}
}
| BigIntegerAssert_isCloseTo_Test |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java | {
"start": 17903,
"end": 18024
} | class ____ used to convert the test loaded WKB into encoded longs for the aggregators.
*/
private abstract static | is |
java | apache__flink | flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/sink/FileSinkITBase.java | {
"start": 1504,
"end": 3261
} | class ____ {
protected static final int NUM_SOURCES = 4;
protected static final int NUM_SINKS = 3;
protected static final int NUM_RECORDS = 10000;
protected static final int NUM_BUCKETS = 4;
protected static final double FAILOVER_RATIO = 0.4;
private static Stream<Boolean> params() {
return Stream.of(false, true);
}
@ParameterizedTest(name = "triggerFailover = {0}")
@MethodSource("params")
void testFileSink(boolean triggerFailover, @TempDir java.nio.file.Path tmpDir)
throws Exception {
String path = tmpDir.toString();
JobGraph jobGraph = createJobGraph(triggerFailover, path);
final MiniClusterConfiguration cfg =
new MiniClusterConfiguration.Builder()
.withRandomPorts()
.setNumTaskManagers(1)
.setNumSlotsPerTaskManager(4)
.build();
try (MiniCluster miniCluster = new MiniCluster(cfg)) {
miniCluster.start();
miniCluster.executeJobBlocking(jobGraph);
}
IntegerFileSinkTestDataUtils.checkIntegerSequenceSinkOutput(
path, NUM_RECORDS, NUM_BUCKETS, NUM_SOURCES);
}
protected abstract JobGraph createJobGraph(boolean triggerFailover, String path);
protected FileSink<Integer> createFileSink(String path) {
return FileSink.forRowFormat(new Path(path), new IntegerFileSinkTestDataUtils.IntEncoder())
.withBucketAssigner(
new IntegerFileSinkTestDataUtils.ModuloBucketAssigner(NUM_BUCKETS))
.withRollingPolicy(new PartSizeAndCheckpointRollingPolicy<>(1024, true))
.build();
}
}
| FileSinkITBase |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java | {
"start": 84023,
"end": 84441
} | class ____ implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
taskAttempt.attemptId,
TaskEventType.T_ATTEMPT_COMMIT_PENDING));
}
}
private static | CommitPendingTransition |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/context/TestExecutionListener.java | {
"start": 1859,
"end": 6868
} | interface ____
* {@link org.springframework.core.annotation.Order @Order} annotation. See
* {@link TestContextBootstrapper#getTestExecutionListeners()} for details.
*
* <h3>Wrapping Behavior for Listeners</h3>
*
* <p>The {@link TestContextManager} guarantees <em>wrapping</em> behavior for
* multiple registered listeners that implement lifecycle callbacks such as
* {@link #beforeTestClass(TestContext) beforeTestClass},
* {@link #afterTestClass(TestContext) afterTestClass},
* {@link #beforeTestMethod(TestContext) beforeTestMethod},
* {@link #afterTestMethod(TestContext) afterTestMethod},
* {@link #beforeTestExecution(TestContext) beforeTestExecution}, and
* {@link #afterTestExecution(TestContext) afterTestExecution}. This means that,
* given two listeners {@code Listener1} and {@code Listener2} with {@code Listener1}
* registered before {@code Listener2}, any <em>before</em> callbacks implemented
* by {@code Listener1} are guaranteed to be invoked <strong>before</strong> any
* <em>before</em> callbacks implemented by {@code Listener2}. Similarly, given
* the same two listeners registered in the same order, any <em>after</em>
* callbacks implemented by {@code Listener1} are guaranteed to be invoked
* <strong>after</strong> any <em>after</em> callbacks implemented by
* {@code Listener2}. {@code Listener1} is therefore said to <em>wrap</em>
* {@code Listener2}.
*
* <p>For a concrete example, consider the relationship between the
* {@link org.springframework.test.context.transaction.TransactionalTestExecutionListener
* TransactionalTestExecutionListener} and the
* {@link org.springframework.test.context.jdbc.SqlScriptsTestExecutionListener
* SqlScriptsTestExecutionListener}. The {@code SqlScriptsTestExecutionListener}
* is registered after the {@code TransactionalTestExecutionListener}, so that
* SQL scripts are executed within a transaction managed by the
* {@code TransactionalTestExecutionListener}.
*
* <h3>Registering TestExecutionListener Implementations</h3>
*
* <p>A {@code TestExecutionListener} can be registered explicitly for a test class,
* its subclasses, and its nested classes by using the
* {@link TestExecutionListeners @TestExecutionListeners} annotation. Explicit
* registration is suitable for custom listeners that are used in limited testing
* scenarios. However, it can become cumbersome if a custom listener needs to be
* used across an entire test suite. This issue is addressed through support for
* automatic discovery of <em>default</em> {@code TestExecutionListener}
* implementations through the
* {@link org.springframework.core.io.support.SpringFactoriesLoader SpringFactoriesLoader}
* mechanism. Specifically, default {@code TestExecutionListener} implementations
* can be registered under the {@code org.springframework.test.context.TestExecutionListener}
* key in a {@code META-INF/spring.factories} properties file.
*
* <p>Spring provides the following implementations. Each of these implements
* {@code Ordered} and is registered automatically by default.
*
* <ul>
* <li>{@link org.springframework.test.context.web.ServletTestExecutionListener
* ServletTestExecutionListener}</li>
* <li>{@link org.springframework.test.context.support.DirtiesContextBeforeModesTestExecutionListener
* DirtiesContextBeforeModesTestExecutionListener}</li>
* <li>{@link org.springframework.test.context.event.ApplicationEventsTestExecutionListener
* ApplicationEventsTestExecutionListener}</li>
* <li>{@link org.springframework.test.context.bean.override.BeanOverrideTestExecutionListener
* BeanOverrideTestExecutionListener}</li>
* <li>{@link org.springframework.test.context.support.DependencyInjectionTestExecutionListener
* DependencyInjectionTestExecutionListener}</li>
* <li>{@link org.springframework.test.context.observation.MicrometerObservationRegistryTestExecutionListener
* MicrometerObservationRegistryTestExecutionListener}</li>
* <li>{@link org.springframework.test.context.support.DirtiesContextTestExecutionListener
* DirtiesContextTestExecutionListener}</li>
* <li>{@link org.springframework.test.context.support.CommonCachesTestExecutionListener
* CommonCachesTestExecutionListener}</li>
* <li>{@link org.springframework.test.context.transaction.TransactionalTestExecutionListener
* TransactionalTestExecutionListener}</li>
* <li>{@link org.springframework.test.context.jdbc.SqlScriptsTestExecutionListener
* SqlScriptsTestExecutionListener}</li>
* <li>{@link org.springframework.test.context.event.EventPublishingTestExecutionListener
* EventPublishingTestExecutionListener}</li>
* <li>{@link org.springframework.test.context.bean.override.mockito.MockitoResetTestExecutionListener
* MockitoResetTestExecutionListener}</li>
* </ul>
*
* @author Sam Brannen
* @author Juergen Hoeller
* @since 2.5
* @see TestExecutionListeners @TestExecutionListeners
* @see TestContextManager
* @see org.springframework.test.context.support.AbstractTestExecutionListener
*/
public | or |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java | {
"start": 2097,
"end": 8122
} | class ____ extends
AbstractDelegationTokenSecretManager<RMDelegationTokenIdentifier> implements
Recoverable {
private static final Logger LOG = LoggerFactory
.getLogger(RMDelegationTokenSecretManager.class);
private final ResourceManager rm;
/**
* Create a secret manager
* @param delegationKeyUpdateInterval the number of milliseconds for rolling
* new secret keys.
* @param delegationTokenMaxLifetime the maximum lifetime of the delegation
* tokens in milliseconds
* @param delegationTokenRenewInterval how often the tokens must be renewed
* in milliseconds
* @param delegationTokenRemoverScanInterval how often the tokens are scanned
* for expired tokens in milliseconds
* @param rmContext current context of the ResourceManager
*/
public RMDelegationTokenSecretManager(long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime,
long delegationTokenRenewInterval,
long delegationTokenRemoverScanInterval,
RMContext rmContext) {
super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
this.rm = rmContext.getResourceManager();
}
@Override
public RMDelegationTokenIdentifier createIdentifier() {
return new RMDelegationTokenIdentifier();
}
private boolean shouldIgnoreException(Exception e) {
return !running && e.getCause() instanceof InterruptedException;
}
@Override
protected void storeNewMasterKey(DelegationKey newKey) {
try {
LOG.info("storing master key with keyID " + newKey.getKeyId());
rm.getRMContext().getStateStore().storeRMDTMasterKey(newKey);
} catch (Exception e) {
if (!shouldIgnoreException(e)) {
LOG.error(
"Error in storing master key with KeyID: " + newKey.getKeyId());
ExitUtil.terminate(1, e);
}
}
}
@Override
protected void removeStoredMasterKey(DelegationKey key) {
try {
LOG.info("removing master key with keyID " + key.getKeyId());
rm.getRMContext().getStateStore().removeRMDTMasterKey(key);
} catch (Exception e) {
if (!shouldIgnoreException(e)) {
LOG.error("Error in removing master key with KeyID: " + key.getKeyId());
ExitUtil.terminate(1, e);
}
}
}
@Override
protected void storeNewToken(RMDelegationTokenIdentifier identifier,
long renewDate) {
try {
LOG.info("storing RMDelegation token with sequence number: "
+ identifier.getSequenceNumber());
rm.getRMContext().getStateStore().storeRMDelegationToken(identifier,
renewDate);
} catch (Exception e) {
if (!shouldIgnoreException(e)) {
LOG.error("Error in storing RMDelegationToken with sequence number: "
+ identifier.getSequenceNumber());
ExitUtil.terminate(1, e);
}
}
}
@Override
protected void updateStoredToken(RMDelegationTokenIdentifier id,
long renewDate) {
try {
LOG.info("updating RMDelegation token with sequence number: "
+ id.getSequenceNumber());
rm.getRMContext().getStateStore().updateRMDelegationToken(id, renewDate);
} catch (Exception e) {
if (!shouldIgnoreException(e)) {
LOG.error("Error in updating persisted RMDelegationToken"
+ " with sequence number: " + id.getSequenceNumber());
ExitUtil.terminate(1, e);
}
}
}
@Override
protected void removeStoredToken(RMDelegationTokenIdentifier ident)
throws IOException {
try {
LOG.info("removing RMDelegation token with sequence number: "
+ ident.getSequenceNumber());
rm.getRMContext().getStateStore().removeRMDelegationToken(ident);
} catch (Exception e) {
if (!shouldIgnoreException(e)) {
LOG.error(
"Error in removing RMDelegationToken with sequence number: "
+ ident.getSequenceNumber());
ExitUtil.terminate(1, e);
}
}
}
@Private
@VisibleForTesting
public synchronized Set<DelegationKey> getAllMasterKeys() {
HashSet<DelegationKey> keySet = new HashSet<DelegationKey>();
keySet.addAll(allKeys.values());
return keySet;
}
@Private
@VisibleForTesting
public synchronized Map<RMDelegationTokenIdentifier, Long> getAllTokens() {
Map<RMDelegationTokenIdentifier, Long> allTokens =
new HashMap<RMDelegationTokenIdentifier, Long>();
for (Map.Entry<RMDelegationTokenIdentifier,
DelegationTokenInformation> entry : currentTokens.entrySet()) {
allTokens.put(entry.getKey(), entry.getValue().getRenewDate());
}
return allTokens;
}
@Private
@VisibleForTesting
public int getLatestDTSequenceNumber() {
return delegationTokenSequenceNumber;
}
@Override
public void recover(RMState rmState) throws Exception {
LOG.info("recovering RMDelegationTokenSecretManager.");
// recover RMDTMasterKeys
for (DelegationKey dtKey : rmState.getRMDTSecretManagerState()
.getMasterKeyState()) {
addKey(dtKey);
}
// recover RMDelegationTokens
Map<RMDelegationTokenIdentifier, Long> rmDelegationTokens =
rmState.getRMDTSecretManagerState().getTokenState();
this.delegationTokenSequenceNumber =
rmState.getRMDTSecretManagerState().getDTSequenceNumber();
for (Map.Entry<RMDelegationTokenIdentifier, Long> entry : rmDelegationTokens
.entrySet()) {
addPersistedDelegationToken(entry.getKey(), entry.getValue());
}
}
public long getRenewDate(RMDelegationTokenIdentifier ident)
throws InvalidToken {
DelegationTokenInformation info = currentTokens.get(ident);
if (info == null) {
throw new InvalidToken("token (" + ident.toString()
+ ") can't be found in cache");
}
return info.getRenewDate();
}
}
| RMDelegationTokenSecretManager |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/endpoint/jmx/annotation/JmxEndpoint.java | {
"start": 1440,
"end": 1817
} | interface ____ {
/**
* The id of the endpoint.
* @return the id
*/
@AliasFor(annotation = Endpoint.class)
String id() default "";
/**
* Level of access to the endpoint that is permitted by default.
* @return the default level of access
* @since 3.4.0
*/
@AliasFor(annotation = Endpoint.class)
Access defaultAccess() default Access.UNRESTRICTED;
}
| JmxEndpoint |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/datageneration/fields/leaf/BooleanFieldDataGenerator.java | {
"start": 784,
"end": 2279
} | class ____ implements FieldDataGenerator {
private final DataSource dataSource;
private final Supplier<Object> booleans;
private final Supplier<Object> booleansWithStrings;
private final Supplier<Object> booleansWithStringsAndMalformed;
public BooleanFieldDataGenerator(DataSource dataSource) {
this.dataSource = dataSource;
var booleans = dataSource.get(new DataSourceRequest.BooleanGenerator()).generator();
this.booleans = booleans::get;
// produces "true" and "false" strings
var toStringTransform = dataSource.get(new DataSourceRequest.TransformWrapper(0.5, Object::toString)).wrapper();
this.booleansWithStrings = toStringTransform.apply(this.booleans::get);
var strings = dataSource.get(new DataSourceRequest.StringGenerator()).generator();
this.booleansWithStringsAndMalformed = Wrappers.defaultsWithMalformed(booleansWithStrings, strings::get, dataSource);
}
@Override
public Object generateValue(Map<String, Object> fieldMapping) {
if (fieldMapping == null) {
// dynamically mapped, use booleans only to avoid mapping the field as string
return Wrappers.defaults(booleans, dataSource).get();
}
if ((Boolean) fieldMapping.getOrDefault("ignore_malformed", false)) {
return booleansWithStringsAndMalformed.get();
}
return Wrappers.defaults(booleansWithStrings, dataSource).get();
}
}
| BooleanFieldDataGenerator |
java | google__dagger | hilt-compiler/main/java/dagger/hilt/processor/internal/definecomponent/KspDefineComponentProcessor.java | {
"start": 1632,
"end": 1875
} | class ____ implements SymbolProcessorProvider {
@Override
public SymbolProcessor create(SymbolProcessorEnvironment symbolProcessorEnvironment) {
return new KspDefineComponentProcessor(symbolProcessorEnvironment);
}
}
}
| Provider |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/main/java/org/springframework/data/jpa/repository/query/DeclaredQueries.java | {
"start": 1530,
"end": 2229
} | class ____ implements DeclaredQuery {
private final String sql;
NativeQuery(String sql) {
this.sql = sql;
}
@Override
public boolean isNative() {
return true;
}
@Override
public String getQueryString() {
return sql;
}
@Override
public boolean equals(Object o) {
if (!(o instanceof NativeQuery that)) {
return false;
}
return ObjectUtils.nullSafeEquals(sql, that.sql);
}
@Override
public int hashCode() {
return ObjectUtils.nullSafeHashCode(sql);
}
@Override
public String toString() {
return "Native[" + sql + "]";
}
}
/**
* A rewritten {@link DeclaredQuery} holding a reference to its original query.
*/
static | NativeQuery |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java | {
"start": 3165,
"end": 31612
} | class ____ extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(TestPlugin.class, InternalSettingsPlugin.class);
}
public void testSpecifiedIndexUnavailableMultipleIndices() throws Exception {
assertAcked(prepareCreate("test1"));
// Verify defaults
verify(search("test1", "test2"), true);
verify(msearch(null, "test1", "test2"), true);
verify(clearCache("test1", "test2"), true);
verify(_flush("test1", "test2"), true);
verify(segments("test1", "test2"), true);
verify(indicesStats("test1", "test2"), true);
verify(forceMerge("test1", "test2"), true);
verify(refreshBuilder("test1", "test2"), true);
verify(validateQuery("test1", "test2"), true);
verify(getAliases("test1", "test2"), true);
verify(getFieldMapping("test1", "test2"), true);
verify(getMapping("test1", "test2"), true);
verify(getSettings("test1", "test2"), true);
IndicesOptions options = IndicesOptions.strictExpandOpen();
verify(search("test1", "test2").setIndicesOptions(options), true);
verify(msearch(options, "test1", "test2"), true);
verify(clearCache("test1", "test2").setIndicesOptions(options), true);
verify(_flush("test1", "test2").setIndicesOptions(options), true);
verify(segments("test1", "test2").setIndicesOptions(options), true);
verify(indicesStats("test1", "test2").setIndicesOptions(options), true);
verify(forceMerge("test1", "test2").setIndicesOptions(options), true);
verify(refreshBuilder("test1", "test2").setIndicesOptions(options), true);
verify(validateQuery("test1", "test2").setIndicesOptions(options), true);
verify(getAliases("test1", "test2").setIndicesOptions(options), true);
verify(getFieldMapping("test1", "test2").setIndicesOptions(options), true);
verify(getMapping("test1", "test2").setIndicesOptions(options), true);
verify(getSettings("test1", "test2").setIndicesOptions(options), true);
options = IndicesOptions.lenientExpandOpen();
verify(search("test1", "test2").setIndicesOptions(options), false);
verify(msearch(options, "test1", "test2").setIndicesOptions(options), false);
verify(clearCache("test1", "test2").setIndicesOptions(options), false);
verify(_flush("test1", "test2").setIndicesOptions(options), false);
verify(segments("test1", "test2").setIndicesOptions(options), false);
verify(indicesStats("test1", "test2").setIndicesOptions(options), false);
verify(forceMerge("test1", "test2").setIndicesOptions(options), false);
verify(refreshBuilder("test1", "test2").setIndicesOptions(options), false);
verify(validateQuery("test1", "test2").setIndicesOptions(options), false);
verify(getAliases("test1", "test2").setIndicesOptions(options), false);
verify(getFieldMapping("test1", "test2").setIndicesOptions(options), false);
verify(getMapping("test1", "test2").setIndicesOptions(options), false);
verify(getSettings("test1", "test2").setIndicesOptions(options), false);
options = IndicesOptions.strictExpandOpen();
assertAcked(prepareCreate("test2"));
verify(search("test1", "test2").setIndicesOptions(options), false);
verify(msearch(options, "test1", "test2").setIndicesOptions(options), false);
verify(clearCache("test1", "test2").setIndicesOptions(options), false);
verify(_flush("test1", "test2").setIndicesOptions(options), false);
verify(segments("test1", "test2").setIndicesOptions(options), false);
verify(indicesStats("test1", "test2").setIndicesOptions(options), false);
verify(forceMerge("test1", "test2").setIndicesOptions(options), false);
verify(refreshBuilder("test1", "test2").setIndicesOptions(options), false);
verify(validateQuery("test1", "test2").setIndicesOptions(options), false);
verify(getAliases("test1", "test2").setIndicesOptions(options), false);
verify(getFieldMapping("test1", "test2").setIndicesOptions(options), false);
verify(getMapping("test1", "test2").setIndicesOptions(options), false);
verify(getSettings("test1", "test2").setIndicesOptions(options), false);
}
public void testSpecifiedIndexUnavailableSingleIndexThatIsClosed() throws Exception {
assertAcked(prepareCreate("test1"));
// we need to wait until all shards are allocated since recovery from
// gateway will fail unless the majority of the replicas was allocated
// pre-closing. with lots of replicas this will fail.
ensureGreen();
assertAcked(indicesAdmin().prepareClose("test1"));
IndicesOptions options = IndicesOptions.strictExpandOpenAndForbidClosed();
verify(search("test1").setIndicesOptions(options), true);
verify(msearch(options, "test1"), true);
verify(clearCache("test1").setIndicesOptions(options), true);
verify(_flush("test1").setIndicesOptions(options), true);
verify(segments("test1").setIndicesOptions(options), true);
verify(indicesStats("test1").setIndicesOptions(options), true);
verify(forceMerge("test1").setIndicesOptions(options), true);
verify(refreshBuilder("test1").setIndicesOptions(options), true);
verify(validateQuery("test1").setIndicesOptions(options), true);
verify(getAliases("test1").setIndicesOptions(options), true);
verify(getFieldMapping("test1").setIndicesOptions(options), true);
verify(getMapping("test1").setIndicesOptions(options), true);
verify(getSettings("test1").setIndicesOptions(options), true);
options = IndicesOptions.fromOptions(
true,
options.allowNoIndices(),
options.expandWildcardsOpen(),
options.expandWildcardsClosed(),
options
);
verify(search("test1").setIndicesOptions(options), false);
verify(msearch(options, "test1"), false);
verify(clearCache("test1").setIndicesOptions(options), false);
verify(_flush("test1").setIndicesOptions(options), false);
verify(segments("test1").setIndicesOptions(options), false);
verify(indicesStats("test1").setIndicesOptions(options), false);
verify(forceMerge("test1").setIndicesOptions(options), false);
verify(refreshBuilder("test1").setIndicesOptions(options), false);
verify(validateQuery("test1").setIndicesOptions(options), false);
verify(getAliases("test1").setIndicesOptions(options), false);
verify(getFieldMapping("test1").setIndicesOptions(options), false);
verify(getMapping("test1").setIndicesOptions(options), false);
verify(getSettings("test1").setIndicesOptions(options), false);
assertAcked(indicesAdmin().prepareOpen("test1"));
ensureYellow();
options = IndicesOptions.strictExpandOpenAndForbidClosed();
verify(search("test1").setIndicesOptions(options), false);
verify(msearch(options, "test1"), false);
verify(clearCache("test1").setIndicesOptions(options), false);
verify(_flush("test1").setIndicesOptions(options), false);
verify(segments("test1").setIndicesOptions(options), false);
verify(indicesStats("test1").setIndicesOptions(options), false);
verify(forceMerge("test1").setIndicesOptions(options), false);
verify(refreshBuilder("test1").setIndicesOptions(options), false);
verify(validateQuery("test1").setIndicesOptions(options), false);
verify(getAliases("test1").setIndicesOptions(options), false);
verify(getFieldMapping("test1").setIndicesOptions(options), false);
verify(getMapping("test1").setIndicesOptions(options), false);
verify(getSettings("test1").setIndicesOptions(options), false);
}
public void testSpecifiedIndexUnavailableSingleIndex() throws Exception {
IndicesOptions options = IndicesOptions.strictExpandOpenAndForbidClosed();
verify(search("test1").setIndicesOptions(options), true);
verify(msearch(options, "test1"), true);
verify(clearCache("test1").setIndicesOptions(options), true);
verify(_flush("test1").setIndicesOptions(options), true);
verify(segments("test1").setIndicesOptions(options), true);
verify(indicesStats("test1").setIndicesOptions(options), true);
verify(forceMerge("test1").setIndicesOptions(options), true);
verify(refreshBuilder("test1").setIndicesOptions(options), true);
verify(validateQuery("test1").setIndicesOptions(options), true);
verify(getAliases("test1").setIndicesOptions(options), true);
verify(getFieldMapping("test1").setIndicesOptions(options), true);
verify(getMapping("test1").setIndicesOptions(options), true);
verify(getSettings("test1").setIndicesOptions(options), true);
options = IndicesOptions.fromOptions(
true,
options.allowNoIndices(),
options.expandWildcardsOpen(),
options.expandWildcardsClosed(),
options
);
verify(search("test1").setIndicesOptions(options), false);
verify(msearch(options, "test1"), false);
verify(clearCache("test1").setIndicesOptions(options), false);
verify(_flush("test1").setIndicesOptions(options), false);
verify(segments("test1").setIndicesOptions(options), false);
verify(indicesStats("test1").setIndicesOptions(options), false);
verify(forceMerge("test1").setIndicesOptions(options), false);
verify(refreshBuilder("test1").setIndicesOptions(options), false);
verify(validateQuery("test1").setIndicesOptions(options), false);
verify(getAliases("test1").setIndicesOptions(options), false);
verify(getFieldMapping("test1").setIndicesOptions(options), false);
verify(getMapping("test1").setIndicesOptions(options), false);
verify(getSettings("test1").setIndicesOptions(options), false);
assertAcked(prepareCreate("test1"));
options = IndicesOptions.strictExpandOpenAndForbidClosed();
verify(search("test1").setIndicesOptions(options), false);
verify(msearch(options, "test1"), false);
verify(clearCache("test1").setIndicesOptions(options), false);
verify(_flush("test1").setIndicesOptions(options), false);
verify(segments("test1").setIndicesOptions(options), false);
verify(indicesStats("test1").setIndicesOptions(options), false);
verify(forceMerge("test1").setIndicesOptions(options), false);
verify(refreshBuilder("test1").setIndicesOptions(options), false);
verify(validateQuery("test1").setIndicesOptions(options), false);
verify(getAliases("test1").setIndicesOptions(options), false);
verify(getFieldMapping("test1").setIndicesOptions(options), false);
verify(getMapping("test1").setIndicesOptions(options), false);
verify(getSettings("test1").setIndicesOptions(options), false);
}
public void testSpecifiedIndexUnavailableSnapshotRestore() throws Exception {
createIndex("test1");
ensureGreen("test1");
waitForRelocation();
AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository(
TEST_REQUEST_TIMEOUT,
TEST_REQUEST_TIMEOUT,
"dummy-repo"
).setType("fs").setSettings(Settings.builder().put("location", randomRepoPath())).get();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "dummy-repo", "snap1").setWaitForCompletion(true).get();
verify(snapshot("snap2", "test1", "test2"), true);
verify(restore("snap1", "test1", "test2"), true);
IndicesOptions options = IndicesOptions.strictExpandOpen();
verify(snapshot("snap2", "test1", "test2").setIndicesOptions(options), true);
verify(restore("snap1", "test1", "test2").setIndicesOptions(options), true);
options = IndicesOptions.lenientExpandOpen();
verify(snapshot("snap2", "test1", "test2").setIndicesOptions(options), false);
verify(restore("snap2", "test1", "test2").setIndicesOptions(options), false);
options = IndicesOptions.strictExpandOpen();
createIndex("test2");
// TODO: temporary work-around for #5531
ensureGreen("test2");
waitForRelocation();
verify(snapshot("snap3", "test1", "test2").setIndicesOptions(options), false);
verify(restore("snap3", "test1", "test2").setIndicesOptions(options), false);
}
public void testWildcardBehaviour() throws Exception {
// Verify defaults for wildcards, when specifying no indices (*, _all, /)
String[] indices = Strings.EMPTY_ARRAY;
verify(search(indices), false);
verify(msearch(null, indices), false);
verify(clearCache(indices), false);
verify(_flush(indices), false);
verify(segments(indices), false);
verify(indicesStats(indices), false);
verify(forceMerge(indices), false);
verify(refreshBuilder(indices), false);
verify(validateQuery(indices), false);
verify(getAliases(indices), false);
verify(getFieldMapping(indices), false);
verify(getMapping(indices), false);
verify(getSettings(indices), false);
// Now force allow_no_indices=true
IndicesOptions options = IndicesOptions.fromOptions(false, true, true, false);
verify(search(indices).setIndicesOptions(options), false);
verify(msearch(options, indices).setIndicesOptions(options), false);
verify(clearCache(indices).setIndicesOptions(options), false);
verify(_flush(indices).setIndicesOptions(options), false);
verify(segments(indices).setIndicesOptions(options), false);
verify(indicesStats(indices).setIndicesOptions(options), false);
verify(forceMerge(indices).setIndicesOptions(options), false);
verify(refreshBuilder(indices).setIndicesOptions(options), false);
verify(validateQuery(indices).setIndicesOptions(options), false);
verify(getAliases(indices).setIndicesOptions(options), false);
verify(getFieldMapping(indices).setIndicesOptions(options), false);
verify(getMapping(indices).setIndicesOptions(options), false);
verify(getSettings(indices).setIndicesOptions(options), false);
assertAcked(prepareCreate("foobar"));
prepareIndex("foobar").setId("1").setSource("k", "v").setRefreshPolicy(IMMEDIATE).get();
// Verify defaults for wildcards, with one wildcard expression and one existing index
indices = new String[] { "foo*" };
verify(search(indices), false, 1);
verify(msearch(null, indices), false, 1);
verify(clearCache(indices), false);
verify(_flush(indices), false);
verify(segments(indices), false);
verify(indicesStats(indices), false);
verify(forceMerge(indices), false);
verify(refreshBuilder(indices), false);
verify(validateQuery(indices), false);
verify(getAliases(indices), false);
verify(getFieldMapping(indices), false);
verify(getMapping(indices), false);
verify(getSettings(indices).setIndicesOptions(options), false);
// Verify defaults for wildcards, with two wildcard expression and one existing index
indices = new String[] { "foo*", "bar*" };
verify(search(indices), false, 1);
verify(msearch(null, indices), false, 1);
verify(clearCache(indices), false);
verify(_flush(indices), false);
verify(segments(indices), false);
verify(indicesStats(indices), false);
verify(forceMerge(indices), false);
verify(refreshBuilder(indices), false);
verify(validateQuery(indices), false);
verify(getAliases(indices), false);
verify(getFieldMapping(indices), false);
verify(getMapping(indices), false);
verify(getSettings(indices).setIndicesOptions(options), false);
// Now force allow_no_indices=true
options = IndicesOptions.fromOptions(false, true, true, false);
verify(search(indices).setIndicesOptions(options), false, 1);
verify(msearch(options, indices).setIndicesOptions(options), false, 1);
verify(clearCache(indices).setIndicesOptions(options), false);
verify(_flush(indices).setIndicesOptions(options), false);
verify(segments(indices).setIndicesOptions(options), false);
verify(indicesStats(indices).setIndicesOptions(options), false);
verify(forceMerge(indices).setIndicesOptions(options), false);
verify(refreshBuilder(indices).setIndicesOptions(options), false);
verify(validateQuery(indices).setIndicesOptions(options), false);
verify(getAliases(indices).setIndicesOptions(options), false);
verify(getFieldMapping(indices).setIndicesOptions(options), false);
verify(getMapping(indices).setIndicesOptions(options), false);
verify(getSettings(indices).setIndicesOptions(options), false);
}
public void testWildcardBehaviourSnapshotRestore() throws Exception {
createIndex("foobar");
ensureGreen("foobar");
waitForRelocation();
AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository(
TEST_REQUEST_TIMEOUT,
TEST_REQUEST_TIMEOUT,
"dummy-repo"
).setType("fs").setSettings(Settings.builder().put("location", randomRepoPath())).get();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "dummy-repo", "snap1").setWaitForCompletion(true).get();
IndicesOptions options = IndicesOptions.fromOptions(false, false, true, false);
verify(snapshot("snap2", "foo*", "bar*").setIndicesOptions(options), true);
verify(restore("snap1", "foo*", "bar*").setIndicesOptions(options), true);
options = IndicesOptions.strictExpandOpen();
verify(snapshot("snap2", "foo*", "bar*").setIndicesOptions(options), false);
verify(restore("snap2", "foo*", "bar*").setIndicesOptions(options), false);
assertAcked(prepareCreate("barbaz"));
// TODO: temporary work-around for #5531
ensureGreen("barbaz");
waitForRelocation();
options = IndicesOptions.fromOptions(false, false, true, false);
verify(snapshot("snap3", "foo*", "bar*").setIndicesOptions(options), false);
verify(restore("snap3", "foo*", "bar*").setIndicesOptions(options), false);
options = IndicesOptions.fromOptions(false, false, true, false);
verify(snapshot("snap4", "foo*", "baz*").setIndicesOptions(options), true);
verify(restore("snap3", "foo*", "baz*").setIndicesOptions(options), true);
}
public void testAllMissingLenient() throws Exception {
createIndex("test1");
prepareIndex("test1").setId("1").setSource("k", "v").setRefreshPolicy(IMMEDIATE).get();
assertHitCount(
0L,
prepareSearch("test2").setIndicesOptions(IndicesOptions.lenientExpandOpen()).setQuery(matchAllQuery()),
prepareSearch("test2", "test3").setQuery(matchAllQuery()).setIndicesOptions(IndicesOptions.lenientExpandOpen())
);
// you should still be able to run empty searches without things blowing up
assertHitCount(prepareSearch().setIndicesOptions(IndicesOptions.lenientExpandOpen()).setQuery(matchAllQuery()), 1L);
}
public void testAllMissingStrict() throws Exception {
createIndex("test1");
expectThrows(IndexNotFoundException.class, prepareSearch("test2").setQuery(matchAllQuery()));
expectThrows(IndexNotFoundException.class, prepareSearch("test2", "test3").setQuery(matchAllQuery()));
// you should still be able to run empty searches without things blowing up
prepareSearch().setQuery(matchAllQuery()).get().decRef();
}
// For now don't handle closed indices
public void testCloseApiSpecifiedIndices() throws Exception {
createIndex("test1", "test2");
ensureGreen();
verify(search("test1", "test2"), false);
assertAcked(indicesAdmin().prepareClose("test2").get());
verify(search("test1", "test2"), true);
IndicesOptions options = IndicesOptions.fromOptions(true, true, true, false, IndicesOptions.strictExpandOpenAndForbidClosed());
verify(search("test1", "test2").setIndicesOptions(options), false);
verify(search(), false);
verify(search("t*"), false);
}
public void testOpenCloseApiWildcards() throws Exception {
createIndex("foo", "foobar", "bar", "barbaz");
ensureGreen();
// if there are no indices to open/close and allow_no_indices=true (default), the open/close is a no-op
verify(indicesAdmin().prepareClose("bar*"), false);
verify(indicesAdmin().prepareClose("bar*"), false);
verify(indicesAdmin().prepareClose("foo*"), false);
verify(indicesAdmin().prepareClose("foo*"), false);
verify(indicesAdmin().prepareClose("_all"), false);
verify(indicesAdmin().prepareOpen("bar*"), false);
verify(indicesAdmin().prepareOpen("_all"), false);
verify(indicesAdmin().prepareOpen("_all"), false);
// if there are no indices to open/close throw an exception
IndicesOptions openIndicesOptions = IndicesOptions.fromOptions(false, false, false, true);
IndicesOptions closeIndicesOptions = IndicesOptions.fromOptions(false, false, true, false);
verify(indicesAdmin().prepareClose("bar*").setIndicesOptions(closeIndicesOptions), false);
verify(indicesAdmin().prepareClose("bar*").setIndicesOptions(closeIndicesOptions), true);
verify(indicesAdmin().prepareClose("foo*").setIndicesOptions(closeIndicesOptions), false);
verify(indicesAdmin().prepareClose("foo*").setIndicesOptions(closeIndicesOptions), true);
verify(indicesAdmin().prepareClose("_all").setIndicesOptions(closeIndicesOptions), true);
verify(indicesAdmin().prepareOpen("bar*").setIndicesOptions(openIndicesOptions), false);
verify(indicesAdmin().prepareOpen("_all").setIndicesOptions(openIndicesOptions), false);
verify(indicesAdmin().prepareOpen("_all").setIndicesOptions(openIndicesOptions), true);
}
public void testDeleteIndex() throws Exception {
createIndex("foobar");
verify(indicesAdmin().prepareDelete("foo"), true);
assertThat(indexExists("foobar"), equalTo(true));
verify(indicesAdmin().prepareDelete("foobar"), false);
assertThat(indexExists("foobar"), equalTo(false));
}
public void testDeleteIndexWildcard() throws Exception {
verify(indicesAdmin().prepareDelete("_all"), false);
createIndex("foo", "foobar", "bar", "barbaz");
verify(indicesAdmin().prepareDelete("foo*"), false);
assertThat(indexExists("foobar"), equalTo(false));
assertThat(indexExists("bar"), equalTo(true));
assertThat(indexExists("barbaz"), equalTo(true));
verify(indicesAdmin().prepareDelete("foo*"), false);
verify(indicesAdmin().prepareDelete("_all"), false);
assertThat(indexExists("foo"), equalTo(false));
assertThat(indexExists("foobar"), equalTo(false));
assertThat(indexExists("bar"), equalTo(false));
assertThat(indexExists("barbaz"), equalTo(false));
}
public void testPutAlias() throws Exception {
createIndex("foobar");
verify(indicesAdmin().prepareAliases(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).addAlias("foobar", "foobar_alias"), false);
assertFalse(
indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foobar_alias").setIndices("foobar").get().getAliases().isEmpty()
);
}
public void testPutAliasWildcard() throws Exception {
createIndex("foo", "foobar", "bar", "barbaz");
verify(indicesAdmin().prepareAliases(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).addAlias("foo*", "foobar_alias"), false);
assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foobar_alias").setIndices("foo").get().getAliases().isEmpty());
assertFalse(
indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foobar_alias").setIndices("foobar").get().getAliases().isEmpty()
);
assertTrue(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foobar_alias").setIndices("bar").get().getAliases().isEmpty());
assertTrue(
indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foobar_alias").setIndices("barbaz").get().getAliases().isEmpty()
);
verify(indicesAdmin().prepareAliases(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).addAlias("*", "foobar_alias"), false);
assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foobar_alias").setIndices("foo").get().getAliases().isEmpty());
assertFalse(
indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foobar_alias").setIndices("foobar").get().getAliases().isEmpty()
);
assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foobar_alias").setIndices("bar").get().getAliases().isEmpty());
assertFalse(
indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foobar_alias").setIndices("barbaz").get().getAliases().isEmpty()
);
}
public void testPutMapping() throws Exception {
verify(indicesAdmin().preparePutMapping("foo").setSource("field", "type=text"), true);
verify(indicesAdmin().preparePutMapping("_all").setSource("field", "type=text"), true);
for (String index : Arrays.asList("foo", "foobar", "bar", "barbaz")) {
assertAcked(prepareCreate(index));
}
verify(indicesAdmin().preparePutMapping("foo").setSource("field", "type=text"), false);
assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foo").get().mappings().get("foo"), notNullValue());
verify(indicesAdmin().preparePutMapping("b*").setSource("field", "type=text"), false);
assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "bar").get().mappings().get("bar"), notNullValue());
assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "barbaz").get().mappings().get("barbaz"), notNullValue());
verify(indicesAdmin().preparePutMapping("_all").setSource("field", "type=text"), false);
assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foo").get().mappings().get("foo"), notNullValue());
assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foobar").get().mappings().get("foobar"), notNullValue());
assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "bar").get().mappings().get("bar"), notNullValue());
assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "barbaz").get().mappings().get("barbaz"), notNullValue());
verify(indicesAdmin().preparePutMapping().setSource("field", "type=text"), false);
assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foo").get().mappings().get("foo"), notNullValue());
assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foobar").get().mappings().get("foobar"), notNullValue());
assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "bar").get().mappings().get("bar"), notNullValue());
assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "barbaz").get().mappings().get("barbaz"), notNullValue());
verify(indicesAdmin().preparePutMapping("c*").setSource("field", "type=text"), true);
assertAcked(indicesAdmin().prepareClose("barbaz").get());
verify(indicesAdmin().preparePutMapping("barbaz").setSource("field", "type=text"), false);
assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "barbaz").get().mappings().get("barbaz"), notNullValue());
}
public static final | IndicesOptionsIntegrationIT |
java | elastic__elasticsearch | x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/NodesDeprecationCheckRequest.java | {
"start": 424,
"end": 1050
} | class ____ extends BaseNodesRequest {
public NodesDeprecationCheckRequest(String... nodesIds) {
super(nodesIds);
}
@Override
public int hashCode() {
return Objects.hash((Object[]) this.nodesIds());
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
NodesDeprecationCheckRequest that = (NodesDeprecationCheckRequest) obj;
return Arrays.equals(this.nodesIds(), that.nodesIds());
}
}
| NodesDeprecationCheckRequest |
java | grpc__grpc-java | netty/src/main/java/io/grpc/netty/ProtocolNegotiators.java | {
"start": 46195,
"end": 48950
} | class ____ extends ChannelDuplexHandler {
private final ChannelHandler next;
private final String negotiatorName;
private ProtocolNegotiationEvent pne;
private final ChannelLogger negotiationLogger;
protected ProtocolNegotiationHandler(ChannelHandler next, String negotiatorName,
ChannelLogger negotiationLogger) {
this.next = Preconditions.checkNotNull(next, "next");
this.negotiatorName = negotiatorName;
this.negotiationLogger = Preconditions.checkNotNull(negotiationLogger, "negotiationLogger");
}
protected ProtocolNegotiationHandler(ChannelHandler next, ChannelLogger negotiationLogger) {
this.next = Preconditions.checkNotNull(next, "next");
this.negotiatorName = getClass().getSimpleName().replace("Handler", "");
this.negotiationLogger = Preconditions.checkNotNull(negotiationLogger, "negotiationLogger");
}
@Override
public final void handlerAdded(ChannelHandlerContext ctx) throws Exception {
negotiationLogger.log(ChannelLogLevel.DEBUG, "{0} started", negotiatorName);
handlerAdded0(ctx);
}
@ForOverride
protected void handlerAdded0(ChannelHandlerContext ctx) throws Exception {
super.handlerAdded(ctx);
}
@Override
public final void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt instanceof ProtocolNegotiationEvent) {
checkState(pne == null, "pre-existing negotiation: %s < %s", pne, evt);
pne = (ProtocolNegotiationEvent) evt;
protocolNegotiationEventTriggered(ctx);
} else {
userEventTriggered0(ctx, evt);
}
}
protected void userEventTriggered0(ChannelHandlerContext ctx, Object evt) throws Exception {
super.userEventTriggered(ctx, evt);
}
@ForOverride
protected void protocolNegotiationEventTriggered(ChannelHandlerContext ctx) {
// no-op
}
protected final ProtocolNegotiationEvent getProtocolNegotiationEvent() {
checkState(pne != null, "previous protocol negotiation event hasn't triggered");
return pne;
}
protected final void replaceProtocolNegotiationEvent(ProtocolNegotiationEvent pne) {
checkState(this.pne != null, "previous protocol negotiation event hasn't triggered");
this.pne = Preconditions.checkNotNull(pne);
}
protected final void fireProtocolNegotiationEvent(ChannelHandlerContext ctx) {
checkState(pne != null, "previous protocol negotiation event hasn't triggered");
negotiationLogger.log(ChannelLogLevel.INFO, "{0} completed", negotiatorName);
ctx.pipeline().replace(ctx.name(), /* newName= */ null, next);
ctx.fireUserEventTriggered(pne);
}
}
static final | ProtocolNegotiationHandler |
java | apache__flink | flink-libraries/flink-state-processing-api/src/test/java/org/apache/flink/state/api/SavepointWriterITCase.java | {
"start": 10756,
"end": 11398
} | class ____ {
public String currency;
public Double rate;
CurrencyRate(String currency, double rate) {
this.currency = currency;
this.rate = rate;
}
@Override
public boolean equals(Object obj) {
return obj instanceof CurrencyRate
&& ((CurrencyRate) obj).currency.equals(currency)
&& ((CurrencyRate) obj).rate.equals(rate);
}
@Override
public int hashCode() {
return Objects.hash(currency, rate);
}
}
/** A savepoint writer function. */
public static | CurrencyRate |
java | google__dagger | javatests/dagger/internal/codegen/SetBindingRequestFulfillmentTest.java | {
"start": 3854,
"end": 4048
} | class ____ {}");
JavaFileObject inaccessible2 =
JavaFileObjects.forSourceLines(
"other.Inaccessible2",
"package other;",
"",
" | Inaccessible |
java | apache__maven | impl/maven-cli/src/main/java/org/apache/maven/cling/invoker/mvnup/goals/StrategyOrchestrator.java | {
"start": 1300,
"end": 1693
} | class ____ the same functionality as StrategyOrchestrator but works
* with domtrip-based strategies for superior formatting preservation.
*
* <p>Determines which strategies to apply based on options and executes them in priority order.
* The DI container automatically sorts the injected strategies by their @Priority annotations.
*/
@Named("strategy-orchestrator")
@Singleton
public | provides |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/engine/jdbc/env/spi/IdentifierHelperBuilder.java | {
"start": 800,
"end": 7410
} | class ____ {
private static final Logger LOG = Logger.getLogger( IdentifierHelperBuilder.class );
private final JdbcEnvironment jdbcEnvironment;
private NameQualifierSupport nameQualifierSupport = NameQualifierSupport.BOTH;
//TODO interesting computer science puzzle: find a more compact representation?
// we only need "contains" on this set, and it has to be case sensitive and efficient.
private final TreeSet<String> reservedWords = new TreeSet<>( String.CASE_INSENSITIVE_ORDER );
private boolean globallyQuoteIdentifiers = false;
private boolean skipGlobalQuotingForColumnDefinitions = false;
private boolean autoQuoteKeywords = true;
private boolean autoQuoteInitialUnderscore = false;
private boolean autoQuoteDollar = false;
private IdentifierCaseStrategy unquotedCaseStrategy = IdentifierCaseStrategy.UPPER;
private IdentifierCaseStrategy quotedCaseStrategy = IdentifierCaseStrategy.MIXED;
public static IdentifierHelperBuilder from(JdbcEnvironment jdbcEnvironment) {
return new IdentifierHelperBuilder( jdbcEnvironment );
}
private IdentifierHelperBuilder(JdbcEnvironment jdbcEnvironment) {
this.jdbcEnvironment = jdbcEnvironment;
}
/**
* Applies any reserved words reported via {@link DatabaseMetaData#getSQLKeywords()}
*
* @param metaData The metadata to get reserved words from
*
* @throws SQLException Any access to DatabaseMetaData can case SQLException; just re-throw.
*/
public void applyReservedWords(DatabaseMetaData metaData) throws SQLException {
if ( metaData != null
// Important optimisation: skip loading all keywords
// from the DB when autoQuoteKeywords is disabled
&& autoQuoteKeywords ) {
addAll( reservedWords, splitAtCommas( metaData.getSQLKeywords() ) );
}
}
public void applyIdentifierCasing(DatabaseMetaData metaData) throws SQLException {
if ( metaData != null ) {
final int unquotedAffirmatives = ArrayHelper.countTrue(
metaData.storesLowerCaseIdentifiers(),
metaData.storesUpperCaseIdentifiers(),
metaData.storesMixedCaseIdentifiers()
);
if ( unquotedAffirmatives == 0 ) {
LOG.trace( "JDBC driver metadata reported database stores unquoted identifiers in neither upper, lower nor mixed case" );
}
else {
// NOTE: still "dodgy" if more than one is true
if ( unquotedAffirmatives > 1 ) {
LOG.trace( "JDBC driver metadata reported database stores unquoted identifiers in more than one case" );
}
if ( metaData.storesUpperCaseIdentifiers() ) {
unquotedCaseStrategy = IdentifierCaseStrategy.UPPER;
}
else if ( metaData.storesLowerCaseIdentifiers() ) {
unquotedCaseStrategy = IdentifierCaseStrategy.LOWER;
}
else {
unquotedCaseStrategy = IdentifierCaseStrategy.MIXED;
}
}
final int quotedAffirmatives = ArrayHelper.countTrue(
metaData.storesLowerCaseQuotedIdentifiers(),
metaData.storesUpperCaseQuotedIdentifiers(),
metaData.storesMixedCaseQuotedIdentifiers()
);
if ( quotedAffirmatives == 0 ) {
LOG.trace( "JDBC driver metadata reported database stores quoted identifiers in neither upper, lower nor mixed case" );
}
else {
// NOTE: still "dodgy" if more than one is true
if ( quotedAffirmatives > 1 ) {
LOG.trace( "JDBC driver metadata reported database stores quoted identifiers in more than one case" );
}
if ( metaData.storesMixedCaseQuotedIdentifiers() ) {
quotedCaseStrategy = IdentifierCaseStrategy.MIXED;
}
else if ( metaData.storesLowerCaseQuotedIdentifiers() ) {
quotedCaseStrategy = IdentifierCaseStrategy.LOWER;
}
else {
quotedCaseStrategy = IdentifierCaseStrategy.UPPER;
}
}
}
}
public boolean isGloballyQuoteIdentifiers() {
return globallyQuoteIdentifiers;
}
public void setGloballyQuoteIdentifiers(boolean globallyQuoteIdentifiers) {
this.globallyQuoteIdentifiers = globallyQuoteIdentifiers;
}
public boolean isSkipGlobalQuotingForColumnDefinitions() {
return skipGlobalQuotingForColumnDefinitions;
}
public void setSkipGlobalQuotingForColumnDefinitions(boolean skipGlobalQuotingForColumnDefinitions) {
this.skipGlobalQuotingForColumnDefinitions = skipGlobalQuotingForColumnDefinitions;
}
public void setAutoQuoteKeywords(boolean autoQuoteKeywords) {
this.autoQuoteKeywords = autoQuoteKeywords;
}
public void setAutoQuoteInitialUnderscore(boolean autoQuoteInitialUnderscore) {
this.autoQuoteInitialUnderscore = autoQuoteInitialUnderscore;
}
public void setAutoQuoteDollar(boolean autoQuoteDollar) {
this.autoQuoteDollar = autoQuoteDollar;
}
public NameQualifierSupport getNameQualifierSupport() {
return nameQualifierSupport;
}
public void setNameQualifierSupport(NameQualifierSupport nameQualifierSupport) {
this.nameQualifierSupport = nameQualifierSupport == null ? NameQualifierSupport.BOTH : nameQualifierSupport;
}
public IdentifierCaseStrategy getUnquotedCaseStrategy() {
return unquotedCaseStrategy;
}
public void setUnquotedCaseStrategy(IdentifierCaseStrategy unquotedCaseStrategy) {
this.unquotedCaseStrategy = unquotedCaseStrategy;
}
public IdentifierCaseStrategy getQuotedCaseStrategy() {
return quotedCaseStrategy;
}
public void setQuotedCaseStrategy(IdentifierCaseStrategy quotedCaseStrategy) {
this.quotedCaseStrategy = quotedCaseStrategy;
}
public void clearReservedWords() {
this.reservedWords.clear();
}
public void applyReservedWords(String... words) {
applyReservedWords( Arrays.asList( words ) );
}
public void applyReservedWords(Collection<String> words) {
//No use when autoQuoteKeywords is disabled
if ( autoQuoteKeywords ) {
reservedWords.addAll( words );
}
}
public void applyReservedWords(Set<String> words) {
applyReservedWords( (Collection<String>) words );
}
public void setReservedWords(Set<String> words) {
clearReservedWords();
applyReservedWords( words );
}
public IdentifierHelper build() {
if ( unquotedCaseStrategy == quotedCaseStrategy ) {
LOG.debugf(
"IdentifierCaseStrategy for both quoted and unquoted identifiers was set " +
"to the same strategy [%s]; that will likely lead to problems in schema update " +
"and validation if using quoted identifiers",
unquotedCaseStrategy.name()
);
}
return new NormalizingIdentifierHelperImpl(
jdbcEnvironment,
nameQualifierSupport,
globallyQuoteIdentifiers,
skipGlobalQuotingForColumnDefinitions,
autoQuoteKeywords,
autoQuoteInitialUnderscore,
autoQuoteDollar,
reservedWords,
unquotedCaseStrategy,
quotedCaseStrategy
);
}
}
| IdentifierHelperBuilder |
java | spring-projects__spring-framework | spring-context-support/src/main/java/org/springframework/scheduling/quartz/JobMethodInvocationFailedException.java | {
"start": 1092,
"end": 1621
} | class ____ extends NestedRuntimeException {
/**
* Constructor for JobMethodInvocationFailedException.
* @param methodInvoker the MethodInvoker used for reflective invocation
* @param cause the root cause (as thrown from the target method)
*/
public JobMethodInvocationFailedException(MethodInvoker methodInvoker, Throwable cause) {
super("Invocation of method '" + methodInvoker.getTargetMethod() +
"' on target class [" + methodInvoker.getTargetClass() + "] failed", cause);
}
}
| JobMethodInvocationFailedException |
java | alibaba__nacos | naming/src/main/java/com/alibaba/nacos/naming/push/NamingSubscriberServiceAggregationImpl.java | {
"start": 1866,
"end": 5630
} | class ____ implements NamingSubscriberService {
private static final String SUBSCRIBER_ON_SYNC_URL = "/subscribers";
private final NamingSubscriberServiceLocalImpl subscriberServiceLocal;
private final ServerMemberManager memberManager;
public NamingSubscriberServiceAggregationImpl(NamingSubscriberServiceLocalImpl subscriberServiceLocal,
ServerMemberManager serverMemberManager) {
this.subscriberServiceLocal = subscriberServiceLocal;
this.memberManager = serverMemberManager;
}
@Override
public Collection<Subscriber> getSubscribers(String namespaceId, String serviceName) {
Collection<Subscriber> result = new LinkedList<>(
subscriberServiceLocal.getSubscribers(namespaceId, serviceName));
if (memberManager.getServerList().size() > 1) {
getSubscribersFromRemotes(namespaceId, serviceName, result);
}
return result;
}
@Override
public Collection<Subscriber> getSubscribers(Service service) {
Collection<Subscriber> result = new LinkedList<>(subscriberServiceLocal.getSubscribers(service));
if (memberManager.getServerList().size() > 1) {
getSubscribersFromRemotes(service.getNamespace(), service.getGroupedServiceName(), result);
}
return result;
}
@Override
public Collection<Subscriber> getFuzzySubscribers(String namespaceId, String serviceName) {
Collection<Subscriber> result = new LinkedList<>(
subscriberServiceLocal.getFuzzySubscribers(namespaceId, serviceName));
if (memberManager.getServerList().size() > 1) {
getSubscribersFromRemotes(namespaceId, serviceName, result);
}
return result;
}
@Override
public Collection<Subscriber> getFuzzySubscribers(Service service) {
Collection<Subscriber> result = new LinkedList<>(subscriberServiceLocal.getFuzzySubscribers(service));
if (memberManager.getServerList().size() > 1) {
getSubscribersFromRemotes(service.getNamespace(), service.getGroupedServiceName(), result);
}
return result;
}
private void getSubscribersFromRemotes(String namespaceId, String serviceName, Collection<Subscriber> result) {
for (Member server : memberManager.allMembersWithoutSelf()) {
Map<String, String> paramValues = new HashMap<>(128);
String groupName = NamingUtils.getGroupName(serviceName);
String serviceNameWithoutGroup = NamingUtils.getServiceName(serviceName);
paramValues.put(CommonParams.GROUP_NAME, groupName);
paramValues.put(CommonParams.SERVICE_NAME, serviceNameWithoutGroup);
paramValues.put(CommonParams.NAMESPACE_ID, namespaceId);
paramValues.put("aggregation", String.valueOf(Boolean.FALSE));
RestResult<String> response = HttpClient.httpGet(
HTTP_PREFIX + server.getAddress() + EnvUtil.getContextPath()
+ UtilsAndCommons.SERVICE_CONTROLLER_V3_ADMIN_PATH + SUBSCRIBER_ON_SYNC_URL,
new ArrayList<>(), paramValues);
if (response.ok()) {
Result<Page<SubscriberInfo>> subscribers = JacksonUtils.toObj(response.getData(),
new TypeReference<>() {
});
for (SubscriberInfo each : subscribers.getData().getPageItems()) {
result.add(new Subscriber(each.getAddress(), each.getAgent(), each.getAppName(), each.getIp(),
each.getNamespaceId(), serviceName, each.getPort()));
}
}
}
}
}
| NamingSubscriberServiceAggregationImpl |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/steps/RuntimeConfigSetupBuildStep.java | {
"start": 831,
"end": 2454
} | class ____ {
private static final String RUNTIME_CONFIG_STARTUP_TASK_CLASS_NAME = "io.quarkus.deployment.steps.RuntimeConfigSetup";
/**
* Generates a StartupTask that sets up the final runtime configuration and thus runs before any StartupTask that uses
* runtime configuration.
* If there are recorders that produce a ConfigSourceProvider, these objects are used to set up the final runtime
* configuration
*/
@BuildStep
@Produce(RuntimeConfigSetupCompleteBuildItem.class)
void setupRuntimeConfig(
BuildProducer<GeneratedClassBuildItem> generatedClass,
BuildProducer<MainBytecodeRecorderBuildItem> mainBytecodeRecorder) {
ClassOutput classOutput = new GeneratedClassGizmoAdaptor(generatedClass, true);
try (ClassCreator clazz = ClassCreator.builder().classOutput(classOutput)
.className(RUNTIME_CONFIG_STARTUP_TASK_CLASS_NAME)
.interfaces(StartupTask.class).build()) {
try (MethodCreator method = clazz.getMethodCreator("deploy", void.class, StartupContext.class)) {
method.invokeVirtualMethod(ofMethod(StartupContext.class, "setCurrentBuildStepName", void.class, String.class),
method.getMethodParam(0), method.load("RuntimeConfigSetupBuildStep.setupRuntimeConfig"));
method.invokeStaticMethod(C_CREATE_RUN_TIME_CONFIG);
method.returnValue(null);
}
}
mainBytecodeRecorder.produce(new MainBytecodeRecorderBuildItem(RUNTIME_CONFIG_STARTUP_TASK_CLASS_NAME));
}
}
| RuntimeConfigSetupBuildStep |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/time/InstantTemporalUnitTest.java | {
"start": 3159,
"end": 4694
} | class ____ {
// BUG: Diagnostic contains: InstantTemporalUnit
private static final Instant I0 = Instant.EPOCH.plus(1, ChronoUnit.CENTURIES);
// BUG: Diagnostic contains: InstantTemporalUnit
private static final Instant I1 = Instant.EPOCH.plus(1, ChronoUnit.DECADES);
// BUG: Diagnostic contains: InstantTemporalUnit
private static final Instant I2 = Instant.EPOCH.plus(1, ChronoUnit.ERAS);
// BUG: Diagnostic contains: InstantTemporalUnit
private static final Instant I3 = Instant.EPOCH.plus(1, ChronoUnit.FOREVER);
// BUG: Diagnostic contains: InstantTemporalUnit
private static final Instant I4 = Instant.EPOCH.plus(1, ChronoUnit.MILLENNIA);
// BUG: Diagnostic contains: InstantTemporalUnit
private static final Instant I5 = Instant.EPOCH.plus(1, ChronoUnit.MONTHS);
// BUG: Diagnostic contains: InstantTemporalUnit
private static final Instant I6 = Instant.EPOCH.plus(1, ChronoUnit.WEEKS);
// BUG: Diagnostic contains: InstantTemporalUnit
private static final Instant I7 = Instant.EPOCH.plus(1, ChronoUnit.YEARS);
}
""")
.doTest();
}
@Test
public void instantMinus_good() {
helper
.addSourceLines(
"TestClass.java",
"""
import java.time.Instant;
import java.time.temporal.ChronoUnit;
public | TestClass |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/decorators/SimpleDecoratorOverloadingTest.java | {
"start": 1109,
"end": 1435
} | class ____ implements Converter {
@Override
public String convert(String value) {
return value.toUpperCase();
}
@Override
public int convert(int value) {
return -1 * value;
}
}
@Dependent
@Priority(1)
@Decorator
static | SimpleConverter |
java | apache__camel | components/camel-geocoder/src/generated/java/org/apache/camel/component/geocoder/GeoCoderEndpointConfigurer.java | {
"start": 735,
"end": 6348
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
GeoCoderEndpoint target = (GeoCoderEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apikey":
case "apiKey": target.setApiKey(property(camelContext, java.lang.String.class, value)); return true;
case "clientid":
case "clientId": target.setClientId(property(camelContext, java.lang.String.class, value)); return true;
case "clientkey":
case "clientKey": target.setClientKey(property(camelContext, java.lang.String.class, value)); return true;
case "headersonly":
case "headersOnly": target.setHeadersOnly(property(camelContext, boolean.class, value)); return true;
case "language": target.setLanguage(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "proxyauthdomain":
case "proxyAuthDomain": target.setProxyAuthDomain(property(camelContext, java.lang.String.class, value)); return true;
case "proxyauthhost":
case "proxyAuthHost": target.setProxyAuthHost(property(camelContext, java.lang.String.class, value)); return true;
case "proxyauthmethod":
case "proxyAuthMethod": target.setProxyAuthMethod(property(camelContext, java.lang.String.class, value)); return true;
case "proxyauthpassword":
case "proxyAuthPassword": target.setProxyAuthPassword(property(camelContext, java.lang.String.class, value)); return true;
case "proxyauthusername":
case "proxyAuthUsername": target.setProxyAuthUsername(property(camelContext, java.lang.String.class, value)); return true;
case "proxyhost":
case "proxyHost": target.setProxyHost(property(camelContext, java.lang.String.class, value)); return true;
case "proxyport":
case "proxyPort": target.setProxyPort(property(camelContext, java.lang.Integer.class, value)); return true;
case "serverurl":
case "serverUrl": target.setServerUrl(property(camelContext, java.lang.String.class, value)); return true;
case "type": target.setType(property(camelContext, org.apache.camel.component.geocoder.GeoCoderType.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "apikey":
case "apiKey": return java.lang.String.class;
case "clientid":
case "clientId": return java.lang.String.class;
case "clientkey":
case "clientKey": return java.lang.String.class;
case "headersonly":
case "headersOnly": return boolean.class;
case "language": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "proxyauthdomain":
case "proxyAuthDomain": return java.lang.String.class;
case "proxyauthhost":
case "proxyAuthHost": return java.lang.String.class;
case "proxyauthmethod":
case "proxyAuthMethod": return java.lang.String.class;
case "proxyauthpassword":
case "proxyAuthPassword": return java.lang.String.class;
case "proxyauthusername":
case "proxyAuthUsername": return java.lang.String.class;
case "proxyhost":
case "proxyHost": return java.lang.String.class;
case "proxyport":
case "proxyPort": return java.lang.Integer.class;
case "serverurl":
case "serverUrl": return java.lang.String.class;
case "type": return org.apache.camel.component.geocoder.GeoCoderType.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
GeoCoderEndpoint target = (GeoCoderEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apikey":
case "apiKey": return target.getApiKey();
case "clientid":
case "clientId": return target.getClientId();
case "clientkey":
case "clientKey": return target.getClientKey();
case "headersonly":
case "headersOnly": return target.isHeadersOnly();
case "language": return target.getLanguage();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "proxyauthdomain":
case "proxyAuthDomain": return target.getProxyAuthDomain();
case "proxyauthhost":
case "proxyAuthHost": return target.getProxyAuthHost();
case "proxyauthmethod":
case "proxyAuthMethod": return target.getProxyAuthMethod();
case "proxyauthpassword":
case "proxyAuthPassword": return target.getProxyAuthPassword();
case "proxyauthusername":
case "proxyAuthUsername": return target.getProxyAuthUsername();
case "proxyhost":
case "proxyHost": return target.getProxyHost();
case "proxyport":
case "proxyPort": return target.getProxyPort();
case "serverurl":
case "serverUrl": return target.getServerUrl();
case "type": return target.getType();
default: return null;
}
}
}
| GeoCoderEndpointConfigurer |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/nullness/UnnecessaryCheckNotNull.java | {
"start": 2624,
"end": 9915
} | class ____ extends BugChecker implements MethodInvocationTreeMatcher {
private static final Matcher<MethodInvocationTree> CHECK_NOT_NULL_MATCHER =
Matchers.<MethodInvocationTree>anyOf(
staticMethod().onClass("com.google.common.base.Preconditions").named("checkNotNull"),
staticMethod().onClass("com.google.common.base.Verify").named("verifyNotNull"),
staticMethod().onClass("java.util.Objects").named("requireNonNull"));
private static final Matcher<MethodInvocationTree> NEW_INSTANCE_MATCHER =
argument(
0, Matchers.<ExpressionTree>kindAnyOf(ImmutableSet.of(Kind.NEW_CLASS, Kind.NEW_ARRAY)));
private static final Matcher<MethodInvocationTree> STRING_LITERAL_ARG_MATCHER =
argument(0, Matchers.<ExpressionTree>kindIs(STRING_LITERAL));
private static final Matcher<MethodInvocationTree> PRIMITIVE_ARG_MATCHER =
argument(0, Matchers.<ExpressionTree>isPrimitiveType());
@Override
public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) {
if (!CHECK_NOT_NULL_MATCHER.matches(tree, state) || tree.getArguments().isEmpty()) {
return Description.NO_MATCH;
}
if (NEW_INSTANCE_MATCHER.matches(tree, state)) {
return matchNewInstance(tree, state);
}
if (STRING_LITERAL_ARG_MATCHER.matches(tree, state)) {
return matchStringLiteral(tree, state);
}
if (PRIMITIVE_ARG_MATCHER.matches(tree, state)) {
return describePrimitiveMatch(tree, state);
}
return Description.NO_MATCH;
}
private Description matchNewInstance(MethodInvocationTree tree, VisitorState state) {
Fix fix = SuggestedFix.replace(tree, state.getSourceForNode(tree.getArguments().get(0)));
return describeMatch(tree, fix);
}
private Description matchStringLiteral(
MethodInvocationTree methodInvocationTree, VisitorState state) {
List<? extends ExpressionTree> arguments = methodInvocationTree.getArguments();
ExpressionTree stringLiteralValue = arguments.get(0);
Fix fix;
if (arguments.size() == 2) {
fix = SuggestedFix.swap(arguments.get(0), arguments.get(1), state);
} else {
fix = SuggestedFix.delete(state.getPath().getParentPath().getLeaf());
}
return describeMatch(stringLiteralValue, fix);
}
/**
* If the call to Preconditions.checkNotNull is part of an expression (assignment, return, etc.),
* we substitute the argument for the method call. E.g.: {@code bar =
* Preconditions.checkNotNull(foo); ==> bar = foo;}
*
* <p>If the argument to Preconditions.checkNotNull is a comparison using == or != and one of the
* operands is null, we call checkNotNull on the non-null operand. E.g.: {@code checkNotNull(a ==
* null); ==> checkNotNull(a);}
*
* <p>If the argument is a method call or binary tree and its return type is boolean, change it to
* a checkArgument/checkState. E.g.: {@code Preconditions.checkNotNull(foo.hasFoo()) ==>
* Preconditions.checkArgument(foo.hasFoo())}
*
* <p>Otherwise, delete the checkNotNull call. E.g.: {@code Preconditions.checkNotNull(foo); ==>
* [delete the line]}
*/
private Description describePrimitiveMatch(
MethodInvocationTree methodInvocationTree, VisitorState state) {
ExpressionTree arg1 = methodInvocationTree.getArguments().get(0);
Tree parent = state.getPath().getParentPath().getLeaf();
// Assignment, return, etc.
if (!(parent instanceof ExpressionStatementTree)) {
return describeMatch(
arg1, SuggestedFix.replace(methodInvocationTree, state.getSourceForNode(arg1)));
}
// Comparison to null
if (arg1.getKind() == Kind.EQUAL_TO || arg1.getKind() == Kind.NOT_EQUAL_TO) {
BinaryTree binaryExpr = (BinaryTree) arg1;
if (binaryExpr.getLeftOperand().getKind() == Kind.NULL_LITERAL) {
return describeMatch(
arg1, SuggestedFix.replace(arg1, state.getSourceForNode(binaryExpr.getRightOperand())));
}
if (binaryExpr.getRightOperand().getKind() == Kind.NULL_LITERAL) {
return describeMatch(
arg1, SuggestedFix.replace(arg1, state.getSourceForNode(binaryExpr.getLeftOperand())));
}
}
if ((arg1 instanceof BinaryTree
|| arg1 instanceof MethodInvocationTree
|| arg1.getKind() == Kind.LOGICAL_COMPLEMENT)
&& state.getTypes().isSameType(ASTHelpers.getType(arg1), state.getSymtab().booleanType)) {
return describeMatch(arg1, createCheckArgumentOrStateCall(methodInvocationTree, state, arg1));
}
return describeMatch(arg1, SuggestedFix.delete(parent));
}
/**
* Creates a SuggestedFix that replaces the checkNotNull call with a checkArgument or checkState
* call.
*/
private static Fix createCheckArgumentOrStateCall(
MethodInvocationTree methodInvocationTree, VisitorState state, ExpressionTree arg1) {
String replacementMethod = "checkState";
if (hasMethodParameter(state.getPath(), arg1)) {
replacementMethod = "checkArgument";
}
SuggestedFix.Builder fix = SuggestedFix.builder();
String name =
SuggestedFixes.qualifyStaticImport(
"com.google.common.base.Preconditions." + replacementMethod, fix, state);
fix.replace(methodInvocationTree.getMethodSelect(), name);
return fix.build();
}
/**
* Determines whether the expression contains a reference to one of the enclosing method's
* parameters.
*
* <p>TODO(eaftan): Extract this to ASTHelpers.
*
* @param path the path to the current tree node
* @param tree the node to compare against the parameters
* @return whether the argument is a parameter to the enclosing method
*/
private static boolean hasMethodParameter(TreePath path, ExpressionTree tree) {
Set<Symbol> symbols = new HashSet<>();
for (IdentifierTree ident : getVariableUses(tree)) {
Symbol sym = ASTHelpers.getSymbol(ident);
if (sym.isDirectlyOrIndirectlyLocal()) {
symbols.add(sym);
}
}
// Find enclosing method declaration.
while (path != null && !(path.getLeaf() instanceof MethodTree)) {
path = path.getParentPath();
}
if (path == null) {
throw new IllegalStateException("Should have an enclosing method declaration");
}
MethodTree methodDecl = (MethodTree) path.getLeaf();
for (VariableTree param : methodDecl.getParameters()) {
if (symbols.contains(ASTHelpers.getSymbol(param))) {
return true;
}
}
return false;
}
/**
* Find the root variable identifiers from an arbitrary expression.
*
* <p>Examples: a.trim().intern() ==> {a} a.b.trim().intern() ==> {a} this.intValue.foo() ==>
* {this} this.foo() ==> {this} intern() ==> {} String.format() ==> {} java.lang.String.format()
* ==> {} x.y.z(s.t) ==> {x,s}
*/
static List<IdentifierTree> getVariableUses(ExpressionTree tree) {
List<IdentifierTree> freeVars = new ArrayList<>();
new TreeScanner<Void, Void>() {
@Override
public Void visitIdentifier(IdentifierTree node, Void v) {
if (((JCIdent) node).sym instanceof VarSymbol) {
freeVars.add(node);
}
return super.visitIdentifier(node, null);
}
}.scan(tree, null);
return freeVars;
}
}
| UnnecessaryCheckNotNull |
java | square__retrofit | retrofit-adapters/rxjava/src/test/java/retrofit2/adapter/rxjava/SingleThrowingTest.java | {
"start": 1606,
"end": 9413
} | interface ____ {
@GET("/")
Single<String> body();
@GET("/")
Single<Response<String>> response();
@GET("/")
Single<Result<String>> result();
}
private Service service;
@Before
public void setUp() {
Retrofit retrofit =
new Retrofit.Builder()
.baseUrl(server.url("/"))
.addConverterFactory(new StringConverterFactory())
.addCallAdapterFactory(RxJavaCallAdapterFactory.create())
.build();
service = retrofit.create(Service.class);
}
@Test
public void bodyThrowingInOnSuccessDeliveredToPlugin() {
server.enqueue(new MockResponse());
final AtomicReference<Throwable> pluginRef = new AtomicReference<>();
RxJavaPlugins.getInstance()
.registerErrorHandler(
new RxJavaErrorHandler() {
@Override
public void handleError(Throwable throwable) {
if (!pluginRef.compareAndSet(null, throwable)) {
throw Exceptions.propagate(throwable); // Don't swallow secondary errors!
}
}
});
RecordingSubscriber<String> observer = subscriberRule.create();
final RuntimeException e = new RuntimeException();
service
.body()
.subscribe(
new ForwardingObserver<String>(observer) {
@Override
public void onSuccess(String value) {
throw e;
}
});
assertThat(pluginRef.get()).isSameInstanceAs(e);
}
@Test
public void bodyThrowingInOnErrorDeliveredToPlugin() {
server.enqueue(new MockResponse().setResponseCode(404));
final AtomicReference<Throwable> pluginRef = new AtomicReference<>();
RxJavaPlugins.getInstance()
.registerErrorHandler(
new RxJavaErrorHandler() {
@Override
public void handleError(Throwable throwable) {
if (!pluginRef.compareAndSet(null, throwable)) {
throw Exceptions.propagate(throwable); // Don't swallow secondary errors!
}
}
});
RecordingSubscriber<String> observer = subscriberRule.create();
final AtomicReference<Throwable> errorRef = new AtomicReference<>();
final RuntimeException e = new RuntimeException();
service
.body()
.subscribe(
new ForwardingObserver<String>(observer) {
@Override
public void onError(Throwable throwable) {
if (!errorRef.compareAndSet(null, throwable)) {
throw Exceptions.propagate(throwable);
}
throw e;
}
});
CompositeException composite = (CompositeException) pluginRef.get();
assertThat(composite.getExceptions()).containsExactly(errorRef.get(), e);
}
@Test
public void responseThrowingInOnSuccessDeliveredToPlugin() {
server.enqueue(new MockResponse());
final AtomicReference<Throwable> pluginRef = new AtomicReference<>();
RxJavaPlugins.getInstance()
.registerErrorHandler(
new RxJavaErrorHandler() {
@Override
public void handleError(Throwable throwable) {
if (!pluginRef.compareAndSet(null, throwable)) {
throw Exceptions.propagate(throwable); // Don't swallow secondary errors!
}
}
});
RecordingSubscriber<Response<String>> observer = subscriberRule.create();
final RuntimeException e = new RuntimeException();
service
.response()
.subscribe(
new ForwardingObserver<Response<String>>(observer) {
@Override
public void onSuccess(Response<String> value) {
throw e;
}
});
assertThat(pluginRef.get()).isSameInstanceAs(e);
}
@Test
public void responseThrowingInOnErrorDeliveredToPlugin() {
server.enqueue(new MockResponse().setSocketPolicy(DISCONNECT_AFTER_REQUEST));
final AtomicReference<Throwable> pluginRef = new AtomicReference<>();
RxJavaPlugins.getInstance()
.registerErrorHandler(
new RxJavaErrorHandler() {
@Override
public void handleError(Throwable throwable) {
if (!pluginRef.compareAndSet(null, throwable)) {
throw Exceptions.propagate(throwable); // Don't swallow secondary errors!
}
}
});
RecordingSubscriber<Response<String>> observer = subscriberRule.create();
final AtomicReference<Throwable> errorRef = new AtomicReference<>();
final RuntimeException e = new RuntimeException();
service
.response()
.subscribe(
new ForwardingObserver<Response<String>>(observer) {
@Override
public void onError(Throwable throwable) {
if (!errorRef.compareAndSet(null, throwable)) {
throw Exceptions.propagate(throwable);
}
throw e;
}
});
CompositeException composite = (CompositeException) pluginRef.get();
assertThat(composite.getExceptions()).containsExactly(errorRef.get(), e);
}
@Test
public void resultThrowingInOnSuccessDeliveredToPlugin() {
server.enqueue(new MockResponse());
final AtomicReference<Throwable> pluginRef = new AtomicReference<>();
RxJavaPlugins.getInstance()
.registerErrorHandler(
new RxJavaErrorHandler() {
@Override
public void handleError(Throwable throwable) {
if (!pluginRef.compareAndSet(null, throwable)) {
throw Exceptions.propagate(throwable); // Don't swallow secondary errors!
}
}
});
RecordingSubscriber<Result<String>> observer = subscriberRule.create();
final RuntimeException e = new RuntimeException();
service
.result()
.subscribe(
new ForwardingObserver<Result<String>>(observer) {
@Override
public void onSuccess(Result<String> value) {
throw e;
}
});
assertThat(pluginRef.get()).isSameInstanceAs(e);
}
@Ignore("Single's contract is onNext|onError so we have no way of triggering this case")
@Test
public void resultThrowingInOnErrorDeliveredToPlugin() {
server.enqueue(new MockResponse());
final AtomicReference<Throwable> pluginRef = new AtomicReference<>();
RxJavaPlugins.getInstance()
.registerErrorHandler(
new RxJavaErrorHandler() {
@Override
public void handleError(Throwable throwable) {
if (!pluginRef.compareAndSet(null, throwable)) {
throw Exceptions.propagate(throwable); // Don't swallow secondary errors!
}
}
});
RecordingSubscriber<Result<String>> observer = subscriberRule.create();
final RuntimeException first = new RuntimeException();
final RuntimeException second = new RuntimeException();
service
.result()
.subscribe(
new ForwardingObserver<Result<String>>(observer) {
@Override
public void onSuccess(Result<String> value) {
// The only way to trigger onError for Result is if onSuccess throws.
throw first;
}
@Override
public void onError(Throwable throwable) {
throw second;
}
});
CompositeException composite = (CompositeException) pluginRef.get();
assertThat(composite.getExceptions()).containsExactly(first, second);
}
private abstract static | Service |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java | {
"start": 1112,
"end": 2289
} | class ____ extends BroadcastOperationRequestBuilder<
ForceMergeRequest,
BroadcastResponse,
ForceMergeRequestBuilder> {
public ForceMergeRequestBuilder(ElasticsearchClient client) {
super(client, ForceMergeAction.INSTANCE, new ForceMergeRequest());
}
/**
* Will force merge the index down to <= maxNumSegments. By default, will
* cause the merge process to merge down to half the configured number of
* segments.
*/
public ForceMergeRequestBuilder setMaxNumSegments(int maxNumSegments) {
request.maxNumSegments(maxNumSegments);
return this;
}
/**
* Should the merge only expunge deletes from the index, without full merging.
* Defaults to full merging ({@code false}).
*/
public ForceMergeRequestBuilder setOnlyExpungeDeletes(boolean onlyExpungeDeletes) {
request.onlyExpungeDeletes(onlyExpungeDeletes);
return this;
}
/**
* Should flush be performed after the merge. Defaults to {@code true}.
*/
public ForceMergeRequestBuilder setFlush(boolean flush) {
request.flush(flush);
return this;
}
}
| ForceMergeRequestBuilder |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestLineInputFormat.java | {
"start": 3562,
"end": 3957
} | class ____ Text.");
try {
count = 0;
while (reader.next(key, value)) {
count++;
}
} finally {
reader.close();
}
assertEquals(expectedN, count,
"number of lines in split is " + expectedN);
}
}
public static void main(String[] args) throws Exception {
new TestLineInputFormat().testFormat();
}
}
| is |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SimplifyJoinConditionRule.java | {
"start": 2948,
"end": 3528
} | interface ____ extends RelRule.Config {
SimplifyJoinConditionRule.SimplifyJoinConditionRuleConfig DEFAULT =
ImmutableSimplifyJoinConditionRule.SimplifyJoinConditionRuleConfig.builder()
.build()
.withOperandSupplier(b0 -> b0.operand(LogicalJoin.class).anyInputs())
.withDescription("SimplifyJoinConditionRule");
@Override
default SimplifyJoinConditionRule toRule() {
return new SimplifyJoinConditionRule(this);
}
}
}
| SimplifyJoinConditionRuleConfig |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/metrics/PerNodeAggTimelineCollectorMetrics.java | {
"start": 1328,
"end": 1520
} | class ____ TimelineCollectorWebService
* running on each NM.
*/
@Metrics(about = "Aggregated metrics of TimelineCollector's running on each NM",
context = "timelineservice")
final public | for |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/engine/support/hierarchical/ParallelExecutionIntegrationTests.java | {
"start": 19018,
"end": 19180
} | class ____ {
static AtomicInteger sharedResource = new AtomicInteger();
static CountDownLatch countDownLatch = new CountDownLatch(4);
static | IndependentClasses |
java | apache__camel | catalog/camel-report-maven-plugin/src/test/java/org/apache/camel/maven/htmlxlsx/process/TemplateRendererTest.java | {
"start": 1043,
"end": 3573
} | class ____ {
private static final String EXPECTED = "<!--\n" +
"\n" +
" Licensed to the Apache Software Foundation (ASF) under one or more\n" +
" contributor license agreements. See the NOTICE file distributed with\n" +
" this work for additional information regarding copyright ownership.\n" +
" The ASF licenses this file to You under the Apache License, Version 2.0\n" +
" (the \"License\"); you may not use this file except in compliance with\n" +
" the License. You may obtain a copy of the License at\n" +
"\n" +
" http://www.apache.org/licenses/LICENSE-2.0\n" +
"\n" +
" Unless required by applicable law or agreed to in writing, software\n" +
" distributed under the License is distributed on an \"AS IS\" BASIS,\n" +
" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" +
" See the License for the specific language governing permissions and\n" +
" limitations under the License.\n" +
"\n" +
"-->\n" +
"<!DOCTYPE html>\n" +
"<html lang=\"en\">\n" +
"\n" +
" <body>\n" +
" <h1>testRender</h1>\n" +
" </body>\n" +
"</html>";
@Test
public void testTemplateRenderer() {
// keep jacoco happy
TemplateRenderer result = new TemplateRenderer();
assertNotNull(result);
}
@Test
public void testRender() {
String result = TemplateRenderer.render("index", Map.of("testValue", "testRender"));
assertEquals(EXPECTED, result);
}
}
| TemplateRendererTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/onetoone/OptionalOneToOneMapsIdQueryTest.java | {
"start": 7877,
"end": 8138
} | class ____ {
@Id
private Long id;
@OneToOne
@MapsId
@JoinColumn(name = "id")
@NotFound(action = NotFoundAction.IGNORE)
private BarWithNoIdOrPropNamedId bar;
}
@Entity(name = "BarWithNoIdOrPropNamedId")
public static | FooHasBarWithNoIdOrPropNamedId |
java | elastic__elasticsearch | x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongLeafFieldData.java | {
"start": 1163,
"end": 4037
} | class ____ implements LeafNumericFieldData {
private final LeafNumericFieldData signedLongFD;
protected final ToScriptFieldFactory<SortedNumericLongValues> toScriptFieldFactory;
UnsignedLongLeafFieldData(LeafNumericFieldData signedLongFD, ToScriptFieldFactory<SortedNumericLongValues> toScriptFieldFactory) {
this.signedLongFD = signedLongFD;
this.toScriptFieldFactory = toScriptFieldFactory;
}
@Override
public SortedNumericLongValues getLongValues() {
return signedLongFD.getLongValues();
}
@Override
public SortedNumericDoubleValues getDoubleValues() {
final SortedNumericLongValues values = signedLongFD.getLongValues();
final LongValues singleValues = SortedNumericLongValues.unwrapSingleton(values);
if (singleValues != null) {
return FieldData.singleton(new DoubleValues() {
@Override
public boolean advanceExact(int doc) throws IOException {
return singleValues.advanceExact(doc);
}
@Override
public double doubleValue() throws IOException {
return convertUnsignedLongToDouble(singleValues.longValue());
}
});
} else {
return new SortedNumericDoubleValues() {
@Override
public boolean advanceExact(int target) throws IOException {
return values.advanceExact(target);
}
@Override
public double nextValue() throws IOException {
return convertUnsignedLongToDouble(values.nextValue());
}
@Override
public int docValueCount() {
return values.docValueCount();
}
};
}
}
@Override
public DocValuesScriptFieldFactory getScriptFieldFactory(String name) {
return toScriptFieldFactory.getScriptFieldFactory(getLongValues(), name);
}
@Override
public SortedBinaryDocValues getBytesValues() {
return FieldData.toString(getDoubleValues());
}
@Override
public long ramBytesUsed() {
return signedLongFD.ramBytesUsed();
}
@Override
public void close() {
signedLongFD.close();
}
@Override
public FormattedDocValues getFormattedValues(DocValueFormat format) {
return new FormattedSortedNumericDocValues(getLongValues(), format);
}
static double convertUnsignedLongToDouble(long value) {
if (value < 0L) {
return sortableSignedLongToUnsigned(value); // add 2 ^ 63
} else {
// add 2 ^ 63 as a double to make sure there is no overflow and final result is positive
return 0x1.0p63 + value;
}
}
}
| UnsignedLongLeafFieldData |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/MathAbsoluteNegativeTest.java | {
"start": 5362,
"end": 5888
} | class ____ {
void f(UUID uuid) {
// BUG: Diagnostic contains: MathAbsoluteNegative
long foo = Math.abs(uuid.getLeastSignificantBits());
// BUG: Diagnostic contains: MathAbsoluteNegative
long bar = Math.abs(uuid.getMostSignificantBits());
}
}
""")
.doTest();
}
@Test
public void guavaPrimitivesHashCode() {
helper
.addSourceLines(
"Test.java",
"""
| Test |
java | apache__camel | components/camel-leveldb/src/main/java/org/apache/camel/component/leveldb/serializer/AbstractLevelDBSerializer.java | {
"start": 3794,
"end": 3901
} | interface ____ {
byte[] serialize(DefaultExchangeHolder holder) throws IOException;
}
}
| Serializer |
java | elastic__elasticsearch | x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/expression/function/scalar/string/EndsWithFunctionProcessor.java | {
"start": 658,
"end": 3477
} | class ____ implements Processor {
public static final String NAME = "senw";
private final Processor input;
private final Processor pattern;
private final boolean caseInsensitive;
public EndsWithFunctionProcessor(Processor input, Processor pattern, boolean caseInsensitive) {
this.input = input;
this.pattern = pattern;
this.caseInsensitive = caseInsensitive;
}
public EndsWithFunctionProcessor(StreamInput in) throws IOException {
input = in.readNamedWriteable(Processor.class);
pattern = in.readNamedWriteable(Processor.class);
caseInsensitive = in.readBoolean();
}
@Override
public final void writeTo(StreamOutput out) throws IOException {
out.writeNamedWriteable(input);
out.writeNamedWriteable(pattern);
out.writeBoolean(caseInsensitive);
}
@Override
public Object process(Object o) {
return doProcess(input.process(o), pattern.process(o), isCaseInsensitive());
}
public static Object doProcess(Object input, Object pattern, boolean isCaseInsensitive) {
if (input == null) {
return null;
}
if (input instanceof String == false && input instanceof Character == false) {
throw new EqlIllegalArgumentException("A string/char is required; received [{}]", input);
}
if (pattern == null) {
return null;
}
if (pattern instanceof String == false && pattern instanceof Character == false) {
throw new EqlIllegalArgumentException("A string/char is required; received [{}]", pattern);
}
if (isCaseInsensitive == false) {
return input.toString().endsWith(pattern.toString());
} else {
return input.toString().toLowerCase(Locale.ROOT).endsWith(pattern.toString().toLowerCase(Locale.ROOT));
}
}
protected Processor input() {
return input;
}
protected Processor pattern() {
return pattern;
}
protected boolean isCaseInsensitive() {
return caseInsensitive;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
EndsWithFunctionProcessor other = (EndsWithFunctionProcessor) obj;
return Objects.equals(input(), other.input())
&& Objects.equals(pattern(), other.pattern())
&& Objects.equals(isCaseInsensitive(), other.isCaseInsensitive());
}
@Override
public int hashCode() {
return Objects.hash(input(), pattern(), isCaseInsensitive());
}
@Override
public String getWriteableName() {
return NAME;
}
}
| EndsWithFunctionProcessor |
java | google__dagger | javatests/dagger/internal/codegen/SubcomponentCreatorValidationTest.java | {
"start": 27449,
"end": 27742
} | interface ____ {",
" ChildComponent build();",
" void set1(String s);",
" void set2(Integer s);",
" }")
.addLinesIf(
FACTORY,
" @Subcomponent.Factory",
" | Builder |
java | micronaut-projects__micronaut-core | http-client-core/src/main/java/io/micronaut/http/client/bind/AnnotatedClientArgumentRequestBinder.java | {
"start": 735,
"end": 970
} | interface ____ classes that bind an {@link io.micronaut.core.type.Argument} to an
* {@link io.micronaut.http.MutableHttpRequest} driven by an annotation.
*
* @param <A> An annotation
* @author James Kleeh
* @since 2.1.0
*/
public | for |
java | micronaut-projects__micronaut-core | http-server-netty/src/main/java/io/micronaut/http/server/netty/handler/accesslog/element/RemoteHostElementBuilder.java | {
"start": 760,
"end": 1041
} | class ____ implements LogElementBuilder {
@Override
public LogElement build(String token, String param) {
if (RemoteHostElement.REMOTE_HOST.equals(token)) {
return RemoteHostElement.INSTANCE;
}
return null;
}
}
| RemoteHostElementBuilder |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/ast/statement/SQLPrivilegeItem.java | {
"start": 290,
"end": 854
} | class ____ extends SQLObjectImpl {
private SQLExpr action;
private List<SQLName> columns = new ArrayList<SQLName>();
public SQLExpr getAction() {
return action;
}
public void setAction(SQLExpr action) {
this.action = action;
}
public List<SQLName> getColumns() {
return columns;
}
@Override
protected void accept0(SQLASTVisitor v) {
if (v.visit(this)) {
acceptChild(v, action);
acceptChild(v, this.columns);
}
v.endVisit(this);
}
}
| SQLPrivilegeItem |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java | {
"start": 1239,
"end": 8217
} | class ____ {
@Public
@Unstable
public static ReservationDefinition newInstance(long arrival, long deadline,
ReservationRequests reservationRequests, String name,
String recurrenceExpression, Priority priority) {
ReservationDefinition rDefinition =
Records.newRecord(ReservationDefinition.class);
rDefinition.setArrival(arrival);
rDefinition.setDeadline(deadline);
rDefinition.setReservationRequests(reservationRequests);
rDefinition.setReservationName(name);
rDefinition.setRecurrenceExpression(recurrenceExpression);
rDefinition.setPriority(priority);
return rDefinition;
}
@Public
@Unstable
public static ReservationDefinition newInstance(long arrival, long deadline,
ReservationRequests reservationRequests, String name) {
ReservationDefinition rDefinition = newInstance(arrival, deadline,
reservationRequests, name, "0", Priority.UNDEFINED);
return rDefinition;
}
/**
* Get the arrival time or the earliest time from which the resource(s) can be
* allocated. Time expressed as UTC.
*
* @return the earliest valid time for this reservation
*/
@Public
@Unstable
public abstract long getArrival();
/**
* Set the arrival time or the earliest time from which the resource(s) can be
* allocated. Time expressed as UTC.
*
* @param earliestStartTime the earliest valid time for this reservation
*/
@Public
@Unstable
public abstract void setArrival(long earliestStartTime);
/**
* Get the deadline or the latest time by when the resource(s) must be
* allocated. Time expressed as UTC.
*
* @return the deadline or the latest time by when the resource(s) must be
* allocated
*/
@Public
@Unstable
public abstract long getDeadline();
/**
* Set the deadline or the latest time by when the resource(s) must be
* allocated. Time expressed as UTC.
*
* @param latestEndTime the deadline or the latest time by when the
* resource(s) should be allocated
*/
@Public
@Unstable
public abstract void setDeadline(long latestEndTime);
/**
* Get the list of {@link ReservationRequests} representing the resources
* required by the application
*
* @return the list of {@link ReservationRequests}
*/
@Public
@Unstable
public abstract ReservationRequests getReservationRequests();
/**
* Set the list of {@link ReservationRequests} representing the resources
* required by the application
*
* @param reservationRequests the list of {@link ReservationRequests}
*/
@Public
@Unstable
public abstract void setReservationRequests(
ReservationRequests reservationRequests);
/**
* Get the name for this reservation. The name need-not be unique, and it is
* just a mnemonic for the user (akin to job names). Accepted reservations are
* uniquely identified by a system-generated ReservationId.
*
* @return string representing the name of the corresponding reserved resource
* allocation in the scheduler
*/
@Public
@Unstable
public abstract String getReservationName();
/**
* Set the name for this reservation. The name need-not be unique, and it is
* just a mnemonic for the user (akin to job names). Accepted reservations are
* uniquely identified by a system-generated ReservationId.
*
* @param name representing the name of the corresponding reserved resource
* allocation in the scheduler
*/
@Public
@Unstable
public abstract void setReservationName(String name);
/**
* Get the recurrence of this reservation representing the time period of
* the periodic job. Currently, only long values are supported. Later,
* support for regular expressions denoting arbitrary recurrence patterns
* (e.g., every Tuesday and Thursday) will be added.
* Recurrence is represented in milliseconds for periodic jobs.
* Recurrence is 0 for non-periodic jobs. Periodic jobs are valid until they
* are explicitly cancelled and have higher priority than non-periodic jobs
* (during initial placement and replanning). Periodic job allocations are
* consistent across runs (flexibility in allocation is leveraged only during
* initial placement, allocations remain consistent thereafter). Note that
* as a long, the recurrence expression must be greater than the duration of
* the reservation (deadline - arrival). Also note that the configured max
* period must be divisible by the recurrence expression if expressed as a
* long.
*
* @return recurrence of this reservation
*/
@Public
@Unstable
public abstract String getRecurrenceExpression();
/**
* Set the recurrence of this reservation representing the time period of
* the periodic job. Currently, only long values are supported. Later,
* support for regular expressions denoting arbitrary recurrence patterns
* (e.g., every Tuesday and Thursday) will be added.
* Recurrence is represented in milliseconds for periodic jobs.
* Recurrence is 0 for non-periodic jobs. Periodic jobs are valid until they
* are explicitly cancelled and have higher priority than non-periodic jobs
* (during initial placement and replanning). Periodic job allocations are
* consistent across runs (flexibility in allocation is leveraged only during
* initial placement, allocations remain consistent thereafter). Note that
* as a long, the recurrence expression must be greater than the duration of
* the reservation (deadline - arrival). Also note that the configured max
* period must be divisible by the recurrence expression if expressed as a
* long.
*
* @param recurrenceExpression recurrence interval of this reservation
*/
@Public
@Unstable
public abstract void setRecurrenceExpression(String recurrenceExpression);
/**
* Get the priority for this reservation. A lower number for priority
* indicates a higher priority reservation. Recurring reservations are
* always higher priority than non-recurring reservations. Priority for
* non-recurring reservations are only compared with non-recurring
* reservations. Likewise for recurring reservations.
*
* @return int representing the priority of the reserved resource
* allocation in the scheduler
*/
@Public
@Unstable
public abstract Priority getPriority();
/**
* Set the priority for this reservation. A lower number for priority
* indicates a higher priority reservation. Recurring reservations are
* always higher priority than non-recurring reservations. Priority for
* non-recurring reservations are only compared with non-recurring
* reservations. Likewise for recurring reservations.
*
* @param priority representing the priority of the reserved resource
* allocation in the scheduler
*/
@Public
@Unstable
public abstract void setPriority(Priority priority);
}
| ReservationDefinition |
java | apache__kafka | server-common/src/test/java/org/apache/kafka/server/record/BrokerCompressionTypeTest.java | {
"start": 1311,
"end": 2901
} | class ____ {
@Test
public void testTargetCompressionType() {
GzipCompression gzipWithLevel = Compression.gzip().level(CompressionType.GZIP.maxLevel()).build();
assertEquals(gzipWithLevel, BrokerCompressionType.targetCompression(Optional.of(gzipWithLevel), CompressionType.ZSTD));
SnappyCompression snappy = Compression.snappy().build();
assertEquals(snappy, BrokerCompressionType.targetCompression(Optional.of(snappy), CompressionType.LZ4));
Lz4Compression lz4WithLevel = Compression.lz4().level(CompressionType.LZ4.maxLevel()).build();
assertEquals(lz4WithLevel, BrokerCompressionType.targetCompression(Optional.of(lz4WithLevel), CompressionType.ZSTD));
ZstdCompression zstdWithLevel = Compression.zstd().level(CompressionType.ZSTD.maxLevel()).build();
assertEquals(zstdWithLevel, BrokerCompressionType.targetCompression(Optional.of(zstdWithLevel), CompressionType.GZIP));
GzipCompression gzip = Compression.gzip().build();
assertEquals(gzip, BrokerCompressionType.targetCompression(Optional.empty(), CompressionType.GZIP));
assertEquals(snappy, BrokerCompressionType.targetCompression(Optional.empty(), CompressionType.SNAPPY));
Lz4Compression lz4 = Compression.lz4().build();
assertEquals(lz4, BrokerCompressionType.targetCompression(Optional.empty(), CompressionType.LZ4));
ZstdCompression zstd = Compression.zstd().build();
assertEquals(zstd, BrokerCompressionType.targetCompression(Optional.empty(), CompressionType.ZSTD));
}
}
| BrokerCompressionTypeTest |
java | apache__camel | components/camel-salesforce/camel-salesforce-component/src/main/java/org/apache/camel/component/salesforce/api/utils/InstantDeserializer.java | {
"start": 1053,
"end": 1454
} | class ____ extends com.fasterxml.jackson.datatype.jsr310.deser.InstantDeserializer<Instant> {
static final JsonDeserializer<Instant> INSTANCE = new InstantDeserializer();
private static final long serialVersionUID = 1L;
private InstantDeserializer() {
super(com.fasterxml.jackson.datatype.jsr310.deser.InstantDeserializer.INSTANT, ISO_OFFSET_DATE_TIME);
}
}
| InstantDeserializer |
java | quarkusio__quarkus | extensions/security/deployment/src/main/java/io/quarkus/security/deployment/PermissionSecurityChecks.java | {
"start": 42571,
"end": 60200
} | class ____ {
*
* @PermissionChecker("permission-name")
* boolean isGranted(SecurityIdentity securityIdentity, SomeDto someDto) {
* return false;
* }
*
* }
* }
* </pre>
*/
void generatePermissionCheckers(BuildProducer<GeneratedClassBuildItem> generatedClassProducer) {
permissionNameToChecker.values().forEach(checkerMetadata -> {
var declaringCdiBean = checkerMetadata.checkerMethod().declaringClass();
var declaringCdiBeanType = classType(declaringCdiBean.name());
var generatedClassName = checkerMetadata.generatedClassName();
try (var classCreator = ClassCreator.builder()
.classOutput(new GeneratedClassGizmoAdaptor(generatedClassProducer, true))
.setFinal(true)
.className(generatedClassName)
.signature(SignatureBuilder
.forClass()
// extends QuarkusPermission<XYZ>
// XYZ == @PermissionChecker declaring class
.setSuperClass(parameterizedType(classType(QuarkusPermission.class), declaringCdiBeanType)))
.build()) {
record SecuredMethodParamDesc(FieldDescriptor fieldDescriptor, int ctorParamIdx) {
SecuredMethodParamDesc() {
this(null, -1);
}
boolean isNotSecurityIdentity() {
return fieldDescriptor != null;
}
}
SecuredMethodParamDesc[] securedMethodParams = new SecuredMethodParamDesc[checkerMetadata
.methodParamMappers().length];
for (int i = 0; i < checkerMetadata.methodParamMappers.length; i++) {
var paramMapper = checkerMetadata.methodParamMappers[i];
if (paramMapper.isSecurityIdentity()) {
securedMethodParams[i] = new SecuredMethodParamDesc();
} else {
// GENERATED CODE: private final SomeDto securedMethodParameter1;
var fieldName = SECURED_METHOD_PARAMETER + paramMapper.securedMethodIdx();
var ctorParamIdx = paramMapper.permConstructorIdx();
var fieldTypeName = checkerMetadata.quarkusPermissionConstructor().parameterType(ctorParamIdx)
.name();
var fieldCreator = classCreator.getFieldCreator(fieldName, fieldTypeName.toString());
fieldCreator.setModifiers(Modifier.PRIVATE | Modifier.FINAL);
securedMethodParams[i] = new SecuredMethodParamDesc(fieldCreator.getFieldDescriptor(),
ctorParamIdx);
}
}
// public GeneratedQuarkusPermission(String permissionName, SomeDto securedMethodParameter1) {
// super("io.quarkus.security.runtime.GeneratedQuarkusPermission");
// this.securedMethodParameter1 = securedMethodParameter1;
// }
// How many 'securedMethodParameterXYZ' are there depends on the secured method
var ctorParams = Stream.concat(Stream.of(String.class.getName()), Arrays
.stream(securedMethodParams)
.filter(SecuredMethodParamDesc::isNotSecurityIdentity)
.map(SecuredMethodParamDesc::fieldDescriptor)
.map(FieldDescriptor::getType)).toArray(String[]::new);
try (var ctor = classCreator.getConstructorCreator(ctorParams)) {
ctor.setModifiers(Modifier.PUBLIC);
// GENERATED CODE: super("io.quarkus.security.runtime.GeneratedQuarkusPermission");
// why not to propagate permission name to the java.security.Permission ?
// if someone declares @PermissionChecker("permission-name-1") we expect that required permission
// @PermissionAllowed("permission-name-1") is only granted by the checker method and accidentally some
// user-defined augmentor won't grant it based on permission name match in case they misunderstand docs
var superCtorDesc = MethodDescriptor.ofConstructor(classCreator.getSuperClass(), String.class);
ctor.invokeSpecialMethod(superCtorDesc, ctor.getThis(), ctor.load(generatedClassName));
// GENERATED CODE: this.securedMethodParameterXYZ = securedMethodParameterXYZ;
for (var securedMethodParamDesc : securedMethodParams) {
if (securedMethodParamDesc.isNotSecurityIdentity()) {
var field = securedMethodParamDesc.fieldDescriptor();
var constructorParameter = ctor.getMethodParam(securedMethodParamDesc.ctorParamIdx());
ctor.writeInstanceField(field, ctor.getThis(), constructorParameter);
}
}
ctor.returnVoid();
}
// @Override
// protected final boolean isGranted(SecurityIdentity securityIdentity) {
// return getBean().hasPermission(securityIdentity, securedMethodParameter1);
// }
// or when user-defined permission checker returns Uni<Boolean>:
// @Override
// protected final Uni<Boolean> isGrantedUni(SecurityIdentity securityIdentity) {
// return getBean().hasPermission(securityIdentity, securedMethodParameter1);
// }
var isGrantedName = checkerMetadata.reactive() ? IS_GRANTED_UNI : IS_GRANTED;
var isGrantedReturn = DescriptorUtils.typeToString(checkerMetadata.checkerMethod().returnType());
try (var methodCreator = classCreator.getMethodCreator(isGrantedName, isGrantedReturn,
SecurityIdentity.class)) {
methodCreator.setModifiers(Modifier.PROTECTED | Modifier.FINAL);
methodCreator.addAnnotation(Override.class.getName(), RetentionPolicy.CLASS);
// getBean()
var getBeanDescriptor = MethodDescriptor.ofMethod(generatedClassName, "getBean", Object.class);
var cdiBean = methodCreator.invokeVirtualMethod(getBeanDescriptor, methodCreator.getThis());
// <<cdiBean>>.hasPermission(securityIdentity, securedMethodParameter1)
var isGrantedDescriptor = MethodDescriptor.of(checkerMetadata.checkerMethod());
var securedMethodParamHandles = new ResultHandle[securedMethodParams.length];
for (int i = 0; i < securedMethodParams.length; i++) {
var securedMethodParam = securedMethodParams[i];
if (securedMethodParam.isNotSecurityIdentity()) {
// QuarkusPermission field assigned in the permission constructor
// for example: this.securedMethodParameter1
securedMethodParamHandles[i] = methodCreator
.readInstanceField(securedMethodParam.fieldDescriptor(), methodCreator.getThis());
} else {
// SecurityIdentity from QuarkusPermission#isGranted method parameter
securedMethodParamHandles[i] = methodCreator.getMethodParam(0);
}
}
final ResultHandle result;
if (checkerMetadata.checkerMethod.isDefault()) {
result = methodCreator.invokeInterfaceMethod(isGrantedDescriptor, cdiBean,
securedMethodParamHandles);
} else {
result = methodCreator.invokeVirtualMethod(isGrantedDescriptor, cdiBean, securedMethodParamHandles);
}
// return 'hasPermission' result
methodCreator.returnValue(result);
}
var alwaysFalseName = checkerMetadata.reactive() ? IS_GRANTED : IS_GRANTED_UNI;
var alwaysFalseType = checkerMetadata.reactive() ? boolean.class.getName() : UNI.toString();
try (var methodCreator = classCreator.getMethodCreator(alwaysFalseName, alwaysFalseType,
SecurityIdentity.class)) {
methodCreator.setModifiers(Modifier.PROTECTED | Modifier.FINAL);
methodCreator.addAnnotation(Override.class.getName(), RetentionPolicy.CLASS);
if (checkerMetadata.reactive()) {
methodCreator.returnValue(methodCreator.load(false));
} else {
var accessDenied = methodCreator.invokeStaticMethod(
MethodDescriptor.ofMethod(QuarkusPermission.class, "accessDenied", UNI.toString()));
methodCreator.returnValue(accessDenied);
}
}
// @Override
// protected final Class<T> getBeanClass() {
// return io.quarkus.security.runtime.GeneratedQuarkusPermission.class;
// }
try (var methodCreator = classCreator.getMethodCreator("getBeanClass", Class.class)) {
methodCreator.setModifiers(Modifier.PROTECTED | Modifier.FINAL);
methodCreator.addAnnotation(Override.class.getName(), RetentionPolicy.CLASS);
methodCreator.returnValue(methodCreator.loadClassFromTCCL(declaringCdiBean.name().toString()));
}
// @Override
// protected final boolean isBlocking() {
// return false; // or true
// }
try (var methodCreator = classCreator.getMethodCreator("isBlocking", boolean.class)) {
methodCreator.setModifiers(Modifier.PROTECTED | Modifier.FINAL);
methodCreator.addAnnotation(Override.class.getName(), RetentionPolicy.CLASS);
methodCreator.returnValue(methodCreator.load(checkerMetadata.blocking()));
}
// @Override
// protected final boolean isReactive() {
// return false; // true when checker method returns Uni<Boolean>
// }
try (var methodCreator = classCreator.getMethodCreator("isReactive", boolean.class)) {
methodCreator.setModifiers(Modifier.PROTECTED | Modifier.FINAL);
methodCreator.addAnnotation(Override.class.getName(), RetentionPolicy.CLASS);
methodCreator.returnValue(methodCreator.load(checkerMetadata.reactive()));
}
}
});
}
private static String toString(AnnotationTarget annotationTarget) {
if (annotationTarget.kind() == AnnotationTarget.Kind.METHOD) {
var method = annotationTarget.asMethod();
return method.declaringClass().toString() + "#" + method.name();
}
return annotationTarget.asClass().name().toString();
}
private SecurityCheck createSecurityCheck(LogicalAndPermissionPredicate andPredicate) {
final SecurityCheck securityCheck;
final boolean isSinglePermissionGroup = andPredicate.operands.size() == 1;
if (isSinglePermissionGroup) {
final LogicalOrPermissionPredicate orPredicate = andPredicate.operands.iterator().next();
final boolean isSinglePermission = orPredicate.operands.size() == 1;
if (isSinglePermission) {
// single permission
final PermissionWrapper permissionWrapper = orPredicate.operands.iterator().next();
securityCheck = recorder.permissionsAllowed(permissionWrapper.computedPermission,
permissionWrapper.permission);
} else {
// multiple OR operands (permission OR permission OR ...)
if (andPredicate.atLeastOnePermissionIsComputed) {
securityCheck = recorder.permissionsAllowed(orPredicate.asComputedPermissions(recorder), null);
} else {
securityCheck = recorder.permissionsAllowed(null, orPredicate.asPermissions());
}
}
} else {
// permission group AND permission group AND permission group AND ...
// permission group = (permission OR permission OR permission OR ...)
if (andPredicate.atLeastOnePermissionIsComputed) {
final List<List<Function<Object[], Permission>>> computedPermissionGroups = new ArrayList<>();
for (LogicalOrPermissionPredicate permissionGroup : andPredicate.operands) {
computedPermissionGroups.add(permissionGroup.asComputedPermissions(recorder));
}
securityCheck = recorder.permissionsAllowedGroups(computedPermissionGroups, null);
} else {
final List<List<RuntimeValue<Permission>>> permissionGroups = new ArrayList<>();
for (LogicalOrPermissionPredicate permissionGroup : andPredicate.operands) {
permissionGroups.add(permissionGroup.asPermissions());
}
securityCheck = recorder.permissionsAllowedGroups(null, permissionGroups);
}
}
return securityCheck;
}
private PermissionWrapper createPermission(PermissionKey permissionKey, AnnotationTarget securedTarget,
Map<PermissionCacheKey, PermissionWrapper> cache) {
var constructor = classSignatureToConstructor.get(permissionKey.classSignature());
return cache.computeIfAbsent(
new PermissionCacheKey(permissionKey, securedTarget, constructor, paramConverterGenerator),
new Function<PermissionCacheKey, PermissionWrapper>() {
@Override
public PermissionWrapper apply(PermissionCacheKey permissionCacheKey) {
if (permissionCacheKey.computed) {
return new PermissionWrapper(createComputedPermission(permissionCacheKey), null);
} else {
final RuntimeValue<Permission> permission;
if (permissionCacheKey.isStringPermission()) {
permission = createStringPermission(permissionCacheKey.permissionKey);
} else {
permission = createCustomPermission(permissionCacheKey);
}
return new PermissionWrapper(null, permission);
}
}
});
}
private Function<Object[], Permission> createComputedPermission(PermissionCacheKey permissionCacheKey) {
return recorder.createComputedPermission(permissionCacheKey.permissionKey.name,
permissionCacheKey.permissionKey.classSignature(), permissionCacheKey.permissionKey.actions(),
permissionCacheKey.passActionsToConstructor, permissionCacheKey.methodParamIndexes(),
permissionCacheKey.methodParamConverters, paramConverterGenerator.getConverterNameToMethodHandle());
}
private RuntimeValue<Permission> createCustomPermission(PermissionCacheKey permissionCacheKey) {
return recorder.createPermission(permissionCacheKey.permissionKey.name,
permissionCacheKey.permissionKey.classSignature(), permissionCacheKey.permissionKey.actions(),
permissionCacheKey.passActionsToConstructor);
}
private RuntimeValue<Permission> createStringPermission(PermissionKey permissionKey) {
if (permissionKey.notAutodetectParams()) {
// validate - no point to specify params as string permission only accept name and actions
throw new IllegalArgumentException(String.format("'%s' must have autodetected params", STRING_PERMISSION));
}
return recorder.createStringPermission(permissionKey.name, permissionKey.actions());
}
private static final | CheckerBean |
java | grpc__grpc-java | okhttp/src/test/java/io/grpc/okhttp/OkHttpClientTransportTest.java | {
"start": 90667,
"end": 92416
} | enum ____ {
THROW_EXCEPTION,
RETURN_FALSE,
THROW_ERROR
}
final LinkedBlockingQueue<Result> nextResults = new LinkedBlockingQueue<>();
@Override
public void close() throws IOException {
closed.countDown();
}
void assertClosed() {
try {
if (!closed.await(TIME_OUT_MS, TimeUnit.MILLISECONDS)) {
fail("Failed waiting frame reader to be closed.");
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
fail("Interrupted while waiting for frame reader to be closed.");
}
}
// The wait is safe; nextFrame is called in a loop and can have spurious wakeups
@SuppressWarnings("WaitNotInLoop")
@Override
public boolean nextFrame(FrameReader.Handler handler) throws IOException {
Result result;
try {
result = nextResults.take();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IOException(e);
}
switch (result) {
case THROW_EXCEPTION:
throw new IOException(NETWORK_ISSUE_MESSAGE);
case RETURN_FALSE:
return false;
case THROW_ERROR:
throw new Error(ERROR_MESSAGE);
default:
throw new UnsupportedOperationException("unimplemented: " + result);
}
}
void throwIoExceptionForNextFrame() {
nextResults.add(Result.THROW_EXCEPTION);
}
void throwErrorForNextFrame() {
nextResults.add(Result.THROW_ERROR);
}
void nextFrameAtEndOfStream() {
nextResults.add(Result.RETURN_FALSE);
}
@Override
public void readConnectionPreface() throws IOException {
// not used.
}
}
private static | Result |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/injection/guice/internal/MatcherAndConverter.java | {
"start": 911,
"end": 1732
} | class ____ {
private final Matcher<? super TypeLiteral<?>> typeMatcher;
private final TypeConverter typeConverter;
private final Object source;
public MatcherAndConverter(Matcher<? super TypeLiteral<?>> typeMatcher, TypeConverter typeConverter, Object source) {
this.typeMatcher = Objects.requireNonNull(typeMatcher, "type matcher");
this.typeConverter = Objects.requireNonNull(typeConverter, "converter");
this.source = source;
}
public TypeConverter getTypeConverter() {
return typeConverter;
}
public Matcher<? super TypeLiteral<?>> getTypeMatcher() {
return typeMatcher;
}
@Override
public String toString() {
return typeConverter + " which matches " + typeMatcher + " (bound at " + source + ")";
}
}
| MatcherAndConverter |
java | quarkusio__quarkus | extensions/panache/panache-common/deployment/src/main/java/io/quarkus/panache/common/deployment/visitors/PanacheEntityClassOperationGenerationVisitor.java | {
"start": 1633,
"end": 11273
} | class ____ extends ClassVisitor {
protected Type thisClass;
private final Set<String> userMethods = new HashSet<>();
protected TypeBundle typeBundle;
protected final ClassInfo panacheEntityBaseClassInfo;
protected ClassInfo entityInfo;
protected List<PanacheMethodCustomizer> methodCustomizers;
protected final Map<String, ByteCodeType> typeArguments = new HashMap<>();
protected final Function<String, org.jboss.jandex.Type> argMapper;
protected final ByteCodeType entityUpperBound;
private final Map<String, String> erasures = new HashMap<>();
public PanacheEntityClassOperationGenerationVisitor(ClassVisitor outputClassVisitor,
TypeBundle typeBundle,
ClassInfo entityInfo,
List<PanacheMethodCustomizer> methodCustomizers, IndexView indexView) {
super(Gizmo.ASM_API_VERSION, outputClassVisitor);
String className = entityInfo.name().toString();
thisClass = Type.getType("L" + className.replace('.', '/') + ";");
this.typeBundle = typeBundle;
this.panacheEntityBaseClassInfo = indexView.getClassByName(typeBundle.entityBase().dotName());
this.entityInfo = entityInfo;
this.methodCustomizers = methodCustomizers;
ByteCodeType baseType = typeBundle.entityBase();
List<TypeVariable> typeVariables = indexView.getClassByName(baseType.dotName()).typeParameters();
if (!typeVariables.isEmpty()) {
entityUpperBound = new ByteCodeType(typeVariables.get(0).bounds().get(0));
} else {
entityUpperBound = null;
}
discoverTypeParameters(entityInfo, indexView, typeBundle, baseType);
argMapper = type -> {
ByteCodeType byteCodeType = typeArguments.get(type);
return byteCodeType != null
? byteCodeType.get()
: OBJECT.get();
};
}
@Override
public MethodVisitor visitMethod(int access, String methodName, String descriptor, String signature,
String[] exceptions) {
userMethods.add(methodName + "/" + descriptor);
MethodVisitor superVisitor = super.visitMethod(access, methodName, descriptor, signature, exceptions);
if (Modifier.isStatic(access)
&& Modifier.isPublic(access)
&& (access & Opcodes.ACC_SYNTHETIC) == 0
&& !methodCustomizers.isEmpty()) {
org.jboss.jandex.Type[] argTypes = AsmUtil.getParameterTypes(descriptor);
MethodInfo method = this.entityInfo.method(methodName, argTypes);
if (method == null) {
throw new IllegalStateException(
"Could not find indexed method: " + thisClass + "." + methodName + " with descriptor " + descriptor
+ " and arg types " + Arrays.toString(argTypes));
}
superVisitor = new PanacheMethodCustomizerVisitor(superVisitor, method, thisClass, methodCustomizers);
}
return superVisitor;
}
@Override
public void visitEnd() {
// FIXME: generate default constructor
for (MethodInfo method : panacheEntityBaseClassInfo.methods()) {
// Do not generate a method that already exists
String descriptor = method.descriptor();
if (!userMethods.contains(method.name() + "/" + descriptor)) {
AnnotationInstance bridge = method.annotation(PanacheConstants.DOTNAME_GENERATE_BRIDGE);
if (bridge != null) {
generateMethod(method, bridge.value("targetReturnTypeErased"), bridge.value("callSuperMethod"));
}
}
}
super.visitEnd();
}
protected void discoverTypeParameters(ClassInfo classInfo, IndexView indexView, TypeBundle types, ByteCodeType baseType) {
List<ByteCodeType> foundTypeArguments = recursivelyFindEntityTypeArguments(indexView,
classInfo.name(), baseType.dotName());
ByteCodeType entityType = (foundTypeArguments.size() > 0) ? foundTypeArguments.get(0) : OBJECT;
ByteCodeType idType = (foundTypeArguments.size() > 1) ? foundTypeArguments.get(1) : OBJECT;
typeArguments.put("Entity", entityType);
typeArguments.put("Id", idType);
typeArguments.keySet().stream()
.filter(k -> !k.equals("Id"))
.forEach(k -> erasures.put(k, OBJECT.descriptor()));
try {
ByteCodeType entity = typeArguments.get("Entity");
if (entity != null) {
erasures.put(entity.dotName().toString(), entity.descriptor());
}
erasures.put(types.queryType().dotName().toString(), OBJECT.descriptor());
erasures.put(types.updateType().dotName().toString(), OBJECT.descriptor());
} catch (UnsupportedOperationException ignored) {
}
}
protected void generateMethod(MethodInfo method, AnnotationValue targetReturnTypeErased, AnnotationValue callSuperMethod) {
List<org.jboss.jandex.Type> parameters = method.parameterTypes();
MethodVisitor mv = super.visitMethod(Opcodes.ACC_PUBLIC | Opcodes.ACC_STATIC | Opcodes.ACC_SYNTHETIC,
method.name(),
method.descriptor(),
method.genericSignature(),
null);
AsmUtil.copyParameterNames(mv, method);
mv.visitCode();
for (PanacheMethodCustomizer customizer : methodCustomizers) {
customizer.customize(thisClass, method, mv);
}
if (callSuperMethod != null && callSuperMethod.asBoolean()) {
// delegate to super method
for (int i = 0; i < parameters.size(); i++) {
mv.visitIntInsn(Opcodes.ALOAD, i);
}
invokeOperations(mv, method, true);
} else {
loadOperations(mv);
loadArguments(mv, parameters);
invokeOperations(mv, method, false);
}
mv.visitMaxs(0, 0);
mv.visitEnd();
}
private void loadOperations(MethodVisitor mv) {
mv.visitFieldInsn(Opcodes.GETSTATIC, typeBundle.operations().internalName(), "INSTANCE",
typeBundle.operations().descriptor());
}
private void loadArguments(MethodVisitor mv, List<org.jboss.jandex.Type> parameters) {
// inject Class
injectModel(mv);
for (int i = 0; i < parameters.size(); i++) {
mv.visitIntInsn(Opcodes.ALOAD, i);
}
}
private void invokeOperations(MethodVisitor mv, MethodInfo method, boolean callSuperMethod) {
String operationDescriptor;
StringJoiner joiner = new StringJoiner("", "(", ")");
if (!callSuperMethod) {
joiner.add(CLASS.descriptor());
}
descriptors(method, joiner);
org.jboss.jandex.Type returnType = method.returnType();
String descriptor = returnType.descriptor(argMapper);
String key = returnType.kind() == org.jboss.jandex.Type.Kind.TYPE_VARIABLE
? returnType.asTypeVariable().identifier()
: returnType.name().toString();
operationDescriptor = joiner + erasures.getOrDefault(key, descriptor);
if (callSuperMethod) {
mv.visitMethodInsn(Opcodes.INVOKESTATIC, typeBundle.entityBase().internalName(), method.name(),
operationDescriptor, false);
} else {
mv.visitMethodInsn(INVOKEVIRTUAL, typeBundle.operations().internalName(), method.name(),
operationDescriptor, false);
}
if (returnType.kind() != org.jboss.jandex.Type.Kind.PRIMITIVE
&& returnType.kind() != org.jboss.jandex.Type.Kind.VOID) {
String cast;
if (returnType.kind() == org.jboss.jandex.Type.Kind.TYPE_VARIABLE) {
TypeVariable typeVariable = returnType.asTypeVariable();
ByteCodeType type = typeArguments.get(typeVariable.identifier());
if (type == null && typeVariable.bounds().size() != 1) {
type = OBJECT;
} else {
type = new ByteCodeType(typeVariable.bounds().get(0));
}
cast = type.internalName();
} else {
cast = returnType.name().toString().replace('.', '/');
}
mv.visitTypeInsn(CHECKCAST, cast);
}
mv.visitInsn(AsmUtil.getReturnInstruction(returnType));
}
private void descriptors(MethodInfo method, StringJoiner joiner) {
for (org.jboss.jandex.Type parameter : method.parameterTypes()) {
if (parameter.kind() == org.jboss.jandex.Type.Kind.TYPE_VARIABLE
|| method.name().endsWith("ById")
&& parameter.name().equals(typeArguments.get("Id").dotName())) {
joiner.add(OBJECT.descriptor());
} else {
joiner.add(mapType(parameter));
}
}
}
private String mapType(org.jboss.jandex.Type parameter) {
String descriptor;
switch (parameter.kind()) {
case PRIMITIVE:
case TYPE_VARIABLE:
descriptor = OBJECT.descriptor();
break;
default:
String value = parameter.descriptor(argMapper);
descriptor = erasures.getOrDefault(value, value);
}
return descriptor;
}
protected void injectModel(MethodVisitor mv) {
mv.visitLdcInsn(thisClass);
}
}
| PanacheEntityClassOperationGenerationVisitor |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnnecessarilyFullyQualifiedTest.java | {
"start": 1413,
"end": 1648
} | interface ____ {
java.util.List foo();
java.util.List bar();
}
""")
.addOutputLines(
"Test.java",
"""
import java.util.List;
| Test |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/CharSet.java | {
"start": 1184,
"end": 9236
} | class ____ implements Serializable {
/**
* Required for serialization support. Lang version 2.0.
*
* @see java.io.Serializable
*/
private static final long serialVersionUID = 5947847346149275958L;
/**
* A CharSet defining no characters.
* @since 2.0
*/
public static final CharSet EMPTY = new CharSet((String) null);
/**
* A CharSet defining ASCII alphabetic characters "a-zA-Z".
* @since 2.0
*/
public static final CharSet ASCII_ALPHA = new CharSet("a-zA-Z");
/**
* A CharSet defining ASCII alphabetic characters "a-z".
* @since 2.0
*/
public static final CharSet ASCII_ALPHA_LOWER = new CharSet("a-z");
/**
* A CharSet defining ASCII alphabetic characters "A-Z".
* @since 2.0
*/
public static final CharSet ASCII_ALPHA_UPPER = new CharSet("A-Z");
/**
* A CharSet defining ASCII alphabetic characters "0-9".
* @since 2.0
*/
public static final CharSet ASCII_NUMERIC = new CharSet("0-9");
/**
* A Map of the common cases used in the factory.
* Subclasses can add more common patterns if desired
* @since 2.0
*/
protected static final Map<String, CharSet> COMMON = Collections.synchronizedMap(new HashMap<>());
static {
COMMON.put(null, EMPTY);
COMMON.put(StringUtils.EMPTY, EMPTY);
COMMON.put("a-zA-Z", ASCII_ALPHA);
COMMON.put("A-Za-z", ASCII_ALPHA);
COMMON.put("a-z", ASCII_ALPHA_LOWER);
COMMON.put("A-Z", ASCII_ALPHA_UPPER);
COMMON.put("0-9", ASCII_NUMERIC);
}
/**
* Factory method to create a new CharSet using a special syntax.
*
* <ul>
* <li>{@code null} or empty string ("")
* - set containing no characters</li>
* <li>Single character, such as "a"
* - set containing just that character</li>
* <li>Multi character, such as "a-e"
* - set containing characters from one character to the other</li>
* <li>Negated, such as "^a" or "^a-e"
* - set containing all characters except those defined</li>
* <li>Combinations, such as "abe-g"
* - set containing all the characters from the individual sets</li>
* </ul>
*
* <p>The matching order is:</p>
* <ol>
* <li>Negated multi character range, such as "^a-e"
* <li>Ordinary multi character range, such as "a-e"
* <li>Negated single character, such as "^a"
* <li>Ordinary single character, such as "a"
* </ol>
*
* <p>Matching works left to right. Once a match is found the
* search starts again from the next character.</p>
*
* <p>If the same range is defined twice using the same syntax, only
* one range will be kept.
* Thus, "a-ca-c" creates only one range of "a-c".</p>
*
* <p>If the start and end of a range are in the wrong order,
* they are reversed. Thus "a-e" is the same as "e-a".
* As a result, "a-ee-a" would create only one range,
* as the "a-e" and "e-a" are the same.</p>
*
* <p>The set of characters represented is the union of the specified ranges.</p>
*
* <p>There are two ways to add a literal negation character ({@code ^}):</p>
* <ul>
* <li>As the last character in a string, e.g. {@code CharSet.getInstance("a-z^")}</li>
* <li>As a separate element, e.g. {@code CharSet.getInstance("^", "a-z")}</li>
* </ul>
*
* <p>Examples using the negation character:</p>
* <pre>
* CharSet.getInstance("^a-c").contains('a') = false
* CharSet.getInstance("^a-c").contains('d') = true
* CharSet.getInstance("^^a-c").contains('a') = true // (only '^' is negated)
* CharSet.getInstance("^^a-c").contains('^') = false
* CharSet.getInstance("^a-cd-f").contains('d') = true
* CharSet.getInstance("a-c^").contains('^') = true
* CharSet.getInstance("^", "a-c").contains('^') = true
* </pre>
*
* <p>All CharSet objects returned by this method will be immutable.</p>
*
* @param setStrs Strings to merge into the set, may be null
* @return a CharSet instance
* @since 2.4
*/
public static CharSet getInstance(final String... setStrs) {
if (setStrs == null) {
return null;
}
if (setStrs.length == 1) {
final CharSet common = COMMON.get(setStrs[0]);
if (common != null) {
return common;
}
}
return new CharSet(setStrs);
}
/** The set of CharRange objects. */
private final Set<CharRange> set = Collections.synchronizedSet(new HashSet<>());
/**
* Constructs a new CharSet using the set syntax.
* Each string is merged in with the set.
*
* @param set Strings to merge into the initial set
* @throws NullPointerException if set is {@code null}
*/
protected CharSet(final String... set) {
Stream.of(set).forEach(this::add);
}
/**
* Add a set definition string to the {@link CharSet}.
*
* @param str set definition string
*/
protected void add(final String str) {
if (str == null) {
return;
}
final int len = str.length();
int pos = 0;
while (pos < len) {
final int remainder = len - pos;
if (remainder >= 4 && str.charAt(pos) == '^' && str.charAt(pos + 2) == '-') {
// negated range
set.add(CharRange.isNotIn(str.charAt(pos + 1), str.charAt(pos + 3)));
pos += 4;
} else if (remainder >= 3 && str.charAt(pos + 1) == '-') {
// range
set.add(CharRange.isIn(str.charAt(pos), str.charAt(pos + 2)));
pos += 3;
} else if (remainder >= 2 && str.charAt(pos) == '^') {
// negated char
set.add(CharRange.isNot(str.charAt(pos + 1)));
pos += 2;
} else {
// char
set.add(CharRange.is(str.charAt(pos)));
pos += 1;
}
}
}
/**
* Does the {@link CharSet} contain the specified
* character {@code ch}.
*
* @param ch the character to check for
* @return {@code true} if the set contains the characters
*/
public boolean contains(final char ch) {
synchronized (set) {
return set.stream().anyMatch(range -> range.contains(ch));
}
}
// Basics
/**
* Compares two {@link CharSet} objects, returning true if they represent
* exactly the same set of characters defined in the same way.
*
* <p>The two sets {@code abc} and {@code a-c} are <em>not</em>
* equal according to this method.</p>
*
* @param obj the object to compare to
* @return true if equal
* @since 2.0
*/
@Override
public boolean equals(final Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof CharSet)) {
return false;
}
final CharSet other = (CharSet) obj;
return set.equals(other.set);
}
/**
* Gets the internal set as an array of CharRange objects.
*
* @return an array of immutable CharRange objects
* @since 2.0
*/
// NOTE: This is no longer public as CharRange is no longer a public class.
// It may be replaced when CharSet moves to Range.
/*public*/ CharRange[] getCharRanges() {
return set.toArray(CharRange.EMPTY_ARRAY);
}
/**
* Gets a hash code compatible with the equals method.
*
* @return a suitable hash code
* @since 2.0
*/
@Override
public int hashCode() {
return 89 + set.hashCode();
}
/**
* Gets a string representation of the set.
*
* @return string representation of the set
*/
@Override
public String toString() {
return set.toString();
}
}
| CharSet |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/test/java/org/springframework/data/jpa/domain/UpdateSpecificationUnitTests.java | {
"start": 5502,
"end": 5730
} | class ____ implements Serializable, UpdateSpecification<Object> {
@Override
public Predicate toPredicate(Root<Object> root, CriteriaUpdate<Object> update, CriteriaBuilder cb) {
return null;
}
}
}
| SerializableSpecification |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.