language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/time/JavaTimeDefaultTimeZoneTest.java | {
"start": 5603,
"end": 6061
} | class ____ {
// BUG: Diagnostic matches: REPLACEME
Year now = Year.now();
Year nowWithZone = Year.now(systemDefault());
}
""")
.doTest();
}
@Test
public void yearMonth() {
helper
.addSourceLines(
"TestClass.java",
"""
import static java.time.ZoneId.systemDefault;
import java.time.YearMonth;
public | TestClass |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/runtime/utils/JavaUserDefinedAggFunctions.java | {
"start": 11461,
"end": 12623
} | class ____ extends AggregateFunction<Long, DataViewTestAccum> {
@Override
public DataViewTestAccum createAccumulator() {
return new DataViewTestAccum();
}
// Overloaded accumulate method
public void accumulate(DataViewTestAccum accumulator, String a, Long b) {
try {
if (!accumulator.map.contains(a)) {
accumulator.map.put(a, 1);
accumulator.count++;
}
accumulator.list.add(b);
} catch (Exception e) {
e.printStackTrace();
}
}
@Override
public Long getValue(DataViewTestAccum accumulator) {
long sum = accumulator.count;
try {
for (Long value : accumulator.list.get()) {
sum += value;
}
} catch (Exception e) {
e.printStackTrace();
}
return sum;
}
@Override
public void close() {
isCloseCalled = true;
}
}
/** Count accumulator. */
public static | DataViewTestAgg |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/factories/TestSupportsStagingTableFactory.java | {
"start": 1998,
"end": 3749
} | class ____ implements DynamicTableSinkFactory {
public static final String IDENTIFIER = "test-staging";
public static final List<String> JOB_STATUS_CHANGE_PROCESS = new LinkedList<>();
public static final List<SupportsStaging.StagingPurpose> STAGING_PURPOSE_LIST =
new LinkedList<>();
private static final ConfigOption<String> DATA_DIR =
ConfigOptions.key("data-dir")
.stringType()
.noDefaultValue()
.withDescription("The data id used to write the rows.");
private static final ConfigOption<Boolean> SINK_FAIL =
ConfigOptions.key("sink-fail")
.booleanType()
.defaultValue(false)
.withDescription(
"If set to true, then sink will throw an exception causing the job to fail, used to verify the TestStagedTable#abort.");
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);
helper.validate();
String dataDir = helper.getOptions().get(DATA_DIR);
boolean sinkFail = helper.getOptions().get(SINK_FAIL);
return new SupportsStagingTableSink(dataDir, sinkFail);
}
@Override
public String factoryIdentifier() {
return IDENTIFIER;
}
@Override
public Set<ConfigOption<?>> requiredOptions() {
return Collections.singleton(DATA_DIR);
}
@Override
public Set<ConfigOption<?>> optionalOptions() {
return Collections.singleton(SINK_FAIL);
}
/** A sink that supports staging. */
private static | TestSupportsStagingTableFactory |
java | quarkusio__quarkus | extensions/scheduler/common/src/main/java/io/quarkus/scheduler/common/runtime/BaseScheduler.java | {
"start": 503,
"end": 2908
} | class ____ {
protected final Vertx vertx;
protected final CronParser cronParser;
protected final Duration defaultOverdueGracePeriod;
protected final Events events;
protected final Instance<JobInstrumenter> jobInstrumenter;
protected final ScheduledExecutorService blockingExecutor;
public BaseScheduler(Vertx vertx, CronParser cronParser,
Duration defaultOverdueGracePeriod, Events events, Instance<JobInstrumenter> jobInstrumenter,
ScheduledExecutorService blockingExecutor) {
this.vertx = vertx;
this.cronParser = cronParser;
this.defaultOverdueGracePeriod = defaultOverdueGracePeriod;
this.events = events;
this.jobInstrumenter = jobInstrumenter;
this.blockingExecutor = blockingExecutor;
}
protected UnsupportedOperationException notStarted() {
return new UnsupportedOperationException("Scheduler was not started");
}
protected ScheduledInvoker initInvoker(ScheduledInvoker invoker, Events events,
ConcurrentExecution concurrentExecution, Scheduled.SkipPredicate skipPredicate, JobInstrumenter instrumenter,
Vertx vertx, boolean skipOffloadingInvoker,
OptionalLong delay, ScheduledExecutorService blockingExecutor) {
invoker = new StatusEmitterInvoker(invoker, events.successExecution, events.failedExecution);
if (concurrentExecution == ConcurrentExecution.SKIP) {
invoker = new SkipConcurrentExecutionInvoker(invoker, events.skippedExecution);
}
if (skipPredicate != null) {
invoker = new SkipPredicateInvoker(invoker, skipPredicate, events.skippedExecution);
}
if (instrumenter != null) {
invoker = new InstrumentedInvoker(invoker, instrumenter);
}
if (!skipOffloadingInvoker) {
invoker = new OffloadingInvoker(invoker, vertx);
}
if (delay.isPresent()) {
invoker = new DelayedExecutionInvoker(invoker, delay.getAsLong(), blockingExecutor, events.delayedExecution);
}
return invoker;
}
protected Scheduled.SkipPredicate initSkipPredicate(Class<? extends SkipPredicate> predicateClass) {
if (predicateClass.equals(Scheduled.Never.class)) {
return null;
}
return SchedulerUtils.instantiateBeanOrClass(predicateClass);
}
}
| BaseScheduler |
java | google__dagger | javatests/dagger/internal/codegen/ProductionComponentProcessorTest.java | {
"start": 17033,
"end": 17627
} | interface ____ {",
" ListenableFuture<String> str();",
"}");
Source module =
CompilerTests.javaSource(
"test.TestModule",
"package test;",
"",
"import dagger.producers.ProducerModule;",
"import dagger.producers.Produces;",
"import dagger.producers.ProductionScope;",
"import javax.inject.Provider;",
"import java.util.concurrent.Executor;",
"import dagger.producers.Production;",
"",
"@ProducerModule",
" | TestComponent |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorApiKeyIdActionRequestBWCSerializingTests.java | {
"start": 564,
"end": 2535
} | class ____ extends AbstractBWCSerializationTestCase<
UpdateConnectorApiKeyIdAction.Request> {
private String connectorId;
@Override
protected Writeable.Reader<UpdateConnectorApiKeyIdAction.Request> instanceReader() {
return UpdateConnectorApiKeyIdAction.Request::new;
}
@Override
protected UpdateConnectorApiKeyIdAction.Request createTestInstance() {
this.connectorId = randomUUID();
return new UpdateConnectorApiKeyIdAction.Request(connectorId, randomAlphaOfLengthBetween(5, 15), randomAlphaOfLengthBetween(5, 15));
}
@Override
protected UpdateConnectorApiKeyIdAction.Request mutateInstance(UpdateConnectorApiKeyIdAction.Request instance) throws IOException {
String originalConnectorId = instance.getConnectorId();
String apiKeyId = instance.getApiKeyId();
String apiKeySecretId = instance.getApiKeySecretId();
switch (between(0, 2)) {
case 0 -> originalConnectorId = randomValueOtherThan(originalConnectorId, () -> randomUUID());
case 1 -> apiKeyId = randomValueOtherThan(apiKeyId, () -> randomAlphaOfLengthBetween(5, 15));
case 2 -> apiKeySecretId = randomValueOtherThan(apiKeySecretId, () -> randomAlphaOfLengthBetween(5, 15));
default -> throw new AssertionError("Illegal randomisation branch");
}
return new UpdateConnectorApiKeyIdAction.Request(originalConnectorId, apiKeyId, apiKeySecretId);
}
@Override
protected UpdateConnectorApiKeyIdAction.Request doParseInstance(XContentParser parser) throws IOException {
return UpdateConnectorApiKeyIdAction.Request.fromXContent(parser, this.connectorId);
}
@Override
protected UpdateConnectorApiKeyIdAction.Request mutateInstanceForVersion(
UpdateConnectorApiKeyIdAction.Request instance,
TransportVersion version
) {
return instance;
}
}
| UpdateConnectorApiKeyIdActionRequestBWCSerializingTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/callbacks/CallbacksDisabledTest.java | {
"start": 788,
"end": 1573
} | class ____ {
@AfterEach
public void tearDown(EntityManagerFactoryScope scope) {
scope.getEntityManagerFactory().getSchemaManager().truncate();
}
@Test
public void testCallbacksAreDisabled(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> {
Cat c = new Cat();
c.setName( "Kitty" );
c.setDateOfBirth( new Date( 90, 11, 15 ) );
entityManager.persist( c );
entityManager.getTransaction().commit();
entityManager.clear();
entityManager.getTransaction().begin();
Cat _c = entityManager.find( Cat.class, c.getId() );
assertTrue( _c.getAge() == 0 ); // With listeners enabled this would be false. Proven by org.hibernate.orm.test.jpa.callbacks.CallbacksTest.testCallbackMethod
}
);
}
}
| CallbacksDisabledTest |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java | {
"start": 916,
"end": 2611
} | class ____ {
private final DatanodeStorage storage;
private final boolean failed;
private final long capacity;
private final long dfsUsed;
private final long nonDfsUsed;
private final long remaining;
private final long blockPoolUsed;
private final float blockPoolUsagePercent;
private final String mount;
public static final StorageReport[] EMPTY_ARRAY = {};
public StorageReport(DatanodeStorage storage, boolean failed, long capacity,
long dfsUsed, long remaining, long bpUsed, long nonDfsUsed) {
this(storage, failed, capacity, dfsUsed,
remaining, bpUsed, nonDfsUsed, "");
}
public StorageReport(DatanodeStorage storage, boolean failed, long capacity,
long dfsUsed, long remaining, long bpUsed,
long nonDfsUsed, String mount) {
this.storage = storage;
this.failed = failed;
this.capacity = capacity;
this.dfsUsed = dfsUsed;
this.nonDfsUsed = nonDfsUsed;
this.remaining = remaining;
this.blockPoolUsed = bpUsed;
this.blockPoolUsagePercent = capacity <= 0 ? 0.0f :
(bpUsed * 100.0f) / capacity;
this.mount = mount;
}
public DatanodeStorage getStorage() {
return storage;
}
public boolean isFailed() {
return failed;
}
public long getCapacity() {
return capacity;
}
public long getDfsUsed() {
return dfsUsed;
}
public long getNonDfsUsed() {
return nonDfsUsed;
}
public long getRemaining() {
return Math.max(remaining, 0L);
}
public long getBlockPoolUsed() {
return blockPoolUsed;
}
public String getMount() {
return mount;
}
public float getBlockPoolUsagePercent() {
return blockPoolUsagePercent;
}
}
| StorageReport |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/oauth2/server/resource/OAuth2ResourceServerConfigurerTests.java | {
"start": 102243,
"end": 102782
} | class ____ implements BeanPostProcessor, EnvironmentAware {
private final MockWebServer server = new MockWebServer();
@PreDestroy
void shutdown() throws IOException {
this.server.shutdown();
}
@Override
public void setEnvironment(Environment environment) {
if (environment instanceof ConfigurableEnvironment) {
((ConfigurableEnvironment) environment).getPropertySources()
.addFirst(new MockWebServerPropertySource());
}
}
@Bean
MockWebServer web() {
return this.server;
}
private | WebServerConfig |
java | google__guice | extensions/assistedinject/src/com/google/inject/assistedinject/FactoryModuleBuilder.java | {
"start": 7001,
"end": 7090
} | interface ____ your factory, use binding
* annotations on your return types:
*
* <pre> | from |
java | playframework__playframework | documentation/manual/working/javaGuide/main/upload/code/javaguide/fileupload/controllers/HomeController.java | {
"start": 369,
"end": 1052
} | class ____ extends Controller {
public Result upload(Http.Request request) {
Http.MultipartFormData<TemporaryFile> body = request.body().asMultipartFormData();
Http.MultipartFormData.FilePart<TemporaryFile> picture = body.getFile("picture");
if (picture != null) {
String fileName = picture.getFilename();
long fileSize = picture.getFileSize();
String contentType = picture.getContentType();
TemporaryFile file = picture.getRef();
file.copyTo(Paths.get("/tmp/picture/destination.jpg"), true);
return ok("File uploaded");
} else {
return badRequest().flashing("error", "Missing file");
}
}
}
// #syncUpload
| HomeController |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/i18n/DefaultLocaleMissingMessageTemplateTest.java | {
"start": 1591,
"end": 1695
} | interface ____ {
@Message
String goodbye();
}
@Localized("en")
public | Messages |
java | apache__dubbo | dubbo-cluster/src/main/java/org/apache/dubbo/rpc/cluster/router/mesh/rule/virtualservice/match/DoubleMatch.java | {
"start": 886,
"end": 1772
} | class ____ {
private Double exact;
private DoubleRangeMatch range;
private Double mod;
public Double getExact() {
return exact;
}
public void setExact(Double exact) {
this.exact = exact;
}
public DoubleRangeMatch getRange() {
return range;
}
public void setRange(DoubleRangeMatch range) {
this.range = range;
}
public Double getMod() {
return mod;
}
public void setMod(Double mod) {
this.mod = mod;
}
public boolean isMatch(Double input) {
if (exact != null && mod == null) {
return input.equals(exact);
} else if (range != null) {
return range.isMatch(input);
} else if (exact != null) {
Double result = input % mod;
return result.equals(exact);
}
return false;
}
}
| DoubleMatch |
java | apache__camel | components/camel-spring-parent/camel-spring-ai/camel-spring-ai-tools/src/test/java/org/apache/camel/component/springai/tools/JsonSchemaTest.java | {
"start": 3636,
"end": 8490
} | enum
____ unitProp = schemaNode.get("properties").get("unit");
assertNotNull(unitProp);
assertEquals("string", unitProp.get("type").asText());
assertEquals("The temperature unit", unitProp.get("description").asText());
assertTrue(unitProp.has("enum"));
assertEquals("C", unitProp.get("enum").get(0).asText());
assertEquals("F", unitProp.get("enum").get(1).asText());
// Verify threshold property
JsonNode thresholdProp = schemaNode.get("properties").get("threshold");
assertNotNull(thresholdProp);
assertEquals("number", thresholdProp.get("type").asText());
assertEquals("Temperature threshold", thresholdProp.get("description").asText());
// Verify required array
JsonNode required = schemaNode.get("required");
assertNotNull(required);
assertEquals(2, required.size());
assertTrue(required.toString().contains("location"));
assertTrue(required.toString().contains("unit"));
}
@Test
public void testJsonSchemaGenerationWithMinimalMetadata() throws Exception {
// Create parameters with minimal metadata
Map<String, String> parameters = new HashMap<>();
parameters.put("name", "string");
parameters.put("age", "integer");
// Create endpoint and test schema generation
SpringAiToolsComponent component = new SpringAiToolsComponent(context);
SpringAiToolsEndpoint endpoint = new SpringAiToolsEndpoint(
"spring-ai-tools:test",
component,
"test-tool",
"test-tag",
new SpringAiToolsConfiguration());
endpoint.setParameters(parameters);
endpoint.setDescription("Test tool");
// Access the private method via reflection for testing
java.lang.reflect.Method method
= SpringAiToolsEndpoint.class.getDeclaredMethod("buildJsonSchemaFromParameters", Map.class);
method.setAccessible(true);
String schema = (String) method.invoke(endpoint, parameters);
System.out.println("Generated Minimal Schema:");
System.out.println(schema);
// Parse and validate the schema
JsonNode schemaNode = MAPPER.readTree(schema);
// Verify basic structure
assertEquals("object", schemaNode.get("type").asText());
assertNotNull(schemaNode.get("properties"));
// Verify properties
JsonNode nameProp = schemaNode.get("properties").get("name");
assertNotNull(nameProp);
assertEquals("string", nameProp.get("type").asText());
JsonNode ageProp = schemaNode.get("properties").get("age");
assertNotNull(ageProp);
assertEquals("integer", ageProp.get("type").asText());
// Verify no required array (since no required=true was specified)
// In JSON, if required is empty, it might not be present or be an empty array
assertTrue(schemaNode.get("required") == null || schemaNode.get("required").size() == 0);
}
@Test
public void testToolMetadataReturnDirectConfiguration() throws Exception {
// Create endpoint with returnDirect=true
SpringAiToolsComponent component = new SpringAiToolsComponent(context);
SpringAiToolsEndpoint endpoint = new SpringAiToolsEndpoint(
"spring-ai-tools:directTool?returnDirect=true",
component,
"directTool",
"test-tag",
new SpringAiToolsConfiguration());
endpoint.setDescription("Tool that returns directly");
endpoint.setReturnDirect(true);
// Create a simple consumer to test
Consumer consumer = endpoint.createConsumer(exchange -> {
exchange.getIn().setBody("Direct result");
});
assertNotNull(consumer);
// Verify the returnDirect property is set
assertTrue(endpoint.isReturnDirect());
}
@Test
public void testToolMetadataDefaultReturnDirect() throws Exception {
// Create endpoint without returnDirect (should default to false)
SpringAiToolsComponent component = new SpringAiToolsComponent(context);
SpringAiToolsEndpoint endpoint = new SpringAiToolsEndpoint(
"spring-ai-tools:normalTool",
component,
"normalTool",
"test-tag",
new SpringAiToolsConfiguration());
endpoint.setDescription("Normal tool");
// Create a simple consumer to test
Consumer consumer = endpoint.createConsumer(exchange -> {
exchange.getIn().setBody("Normal result");
});
assertNotNull(consumer);
// Verify the returnDirect property defaults to false
assertTrue(!endpoint.isReturnDirect());
}
}
| JsonNode |
java | elastic__elasticsearch | x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/AnalyzeTests.java | {
"start": 926,
"end": 4739
} | class ____ extends SecurityIntegTestCase {
@Override
protected String configUsers() {
final String usersPasswdHashed = new String(
getFastStoredHashAlgoForTests().hash(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)
);
return super.configUsers() + "analyze_indices:" + usersPasswdHashed + "\n" + "analyze_cluster:" + usersPasswdHashed + "\n";
}
@Override
protected String configUsersRoles() {
return super.configUsersRoles() + "analyze_indices:analyze_indices\n" + "analyze_cluster:analyze_cluster\n";
}
@Override
protected String configRoles() {
// role that has analyze indices privileges only
return Strings.format("""
%s
analyze_indices:
indices:
- names: 'test_*'
privileges: [ 'indices:admin/analyze' ]
analyze_cluster:
cluster:
- cluster:admin/analyze
""", super.configRoles());
}
public void testAnalyzeWithIndices() {
// this test tries to execute different analyze api variants from a user that has analyze privileges only on a specific index
// namespace
createIndex("test_1");
ensureGreen();
// ok: user has permissions for analyze on test_*
SecureString passwd = SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING;
client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("analyze_indices", passwd)))
.admin()
.indices()
.prepareAnalyze("this is my text")
.setIndex("test_1")
.setAnalyzer("standard")
.get();
// fails: user doesn't have permissions for analyze on index non_authorized
assertThrowsAuthorizationException(
client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("analyze_indices", passwd)))
.admin()
.indices()
.prepareAnalyze("this is my text")
.setIndex("non_authorized")
.setAnalyzer("standard")::get,
AnalyzeAction.NAME,
"analyze_indices"
);
// fails: user doesn't have permissions for cluster level analyze
assertThrowsAuthorizationException(
client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("analyze_indices", passwd)))
.admin()
.indices()
.prepareAnalyze("this is my text")
.setAnalyzer("standard")::get,
"cluster:admin/analyze",
"analyze_indices"
);
}
public void testAnalyzeWithoutIndices() {
// this test tries to execute different analyze api variants from a user that has analyze privileges only at cluster level
SecureString passwd = SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING;
// fails: user doesn't have permissions for analyze on index test_1
assertThrowsAuthorizationException(
client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("analyze_cluster", passwd)))
.admin()
.indices()
.prepareAnalyze("this is my text")
.setIndex("test_1")
.setAnalyzer("standard")::get,
AnalyzeAction.NAME,
"analyze_cluster"
);
client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("analyze_cluster", passwd)))
.admin()
.indices()
.prepareAnalyze("this is my text")
.setAnalyzer("standard")
.get();
}
}
| AnalyzeTests |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/TwoOptionalArguments.java | {
"start": 327,
"end": 521
} | interface ____ that a function accepts two optional arguments (the last two).
* This is used by the {@link EsqlFunctionRegistry} to perform validation of function declaration.
*/
public | indicating |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/api/TableEnvironment.java | {
"start": 42259,
"end": 43232
} | class ____ for the format of the path.
* @param descriptor The descriptor of the model to register.
*/
void createModel(String path, ModelDescriptor descriptor);
/**
* Registers the given {@link ModelDescriptor} as a catalog model similar to SQL models.
*
* <p>The {@link ModelDescriptor descriptor} is converted into a {@link CatalogModel} and stored
* in the catalog.
*
* <p>If the model should not be permanently stored in a catalog, use {@link
* #createTemporaryModel(String, ModelDescriptor)} instead.
*
* <p>Temporary objects can shadow permanent ones. If a temporary object in a given path exists,
* the permanent one will be inaccessible in the current session. To make the permanent object
* available again one can drop the corresponding temporary object.
*
* @param path The path under which the model will be registered. See also the {@link
* TableEnvironment} | description |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/builditem/nativeimage/ServiceProviderBuildItem.java | {
"start": 1994,
"end": 2156
} | interface ____ cannot be null or blank");
}
if (serviceInterfaceDescriptorFile == null) {
throw new IllegalArgumentException("service | name |
java | spring-projects__spring-framework | spring-jdbc/src/main/java/org/springframework/jdbc/core/RowMapper.java | {
"start": 915,
"end": 2006
} | interface ____ the actual work of mapping each row to a result object
* but don't need to worry about exception handling.
* {@link java.sql.SQLException SQLExceptions} will be caught and handled
* by the calling {@code JdbcTemplate}.
*
* <p>Typically used either for {@code JdbcTemplate}'s query methods or for
* {@code out} parameters of stored procedures. {@code RowMapper} objects are
* typically stateless and thus reusable; they are an ideal choice for
* implementing row-mapping logic in a single place.
*
* <p>Alternatively, consider subclassing
* {@link org.springframework.jdbc.object.MappingSqlQuery} from the
* {@code jdbc.object} package: instead of working with separate
* {@code JdbcTemplate} and {@code RowMapper} objects, you can build executable
* query objects (containing row-mapping logic) in that style.
*
* @author Thomas Risberg
* @author Juergen Hoeller
* @param <T> the result type
* @see JdbcTemplate
* @see RowCallbackHandler
* @see ResultSetExtractor
* @see org.springframework.jdbc.object.MappingSqlQuery
*/
@FunctionalInterface
public | perform |
java | apache__flink | flink-core/src/main/java/org/apache/flink/core/io/InputSplitAssigner.java | {
"start": 1066,
"end": 1847
} | interface ____ {
/**
* Returns the next input split that shall be consumed. The consumer's host is passed as a
* parameter to allow localized assignments.
*
* @param host The host address of split requesting task.
* @param taskId The id of the split requesting task.
* @return the next input split to be consumed, or <code>null</code> if no more splits remain.
*/
InputSplit getNextInputSplit(String host, int taskId);
/**
* Return the splits to assigner if the task failed to process it.
*
* @param splits The list of input splits to be returned.
* @param taskId The id of the task that failed to process the input splits.
*/
void returnInputSplit(List<InputSplit> splits, int taskId);
}
| InputSplitAssigner |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationStateDataPBImpl.java | {
"start": 2042,
"end": 11188
} | class ____ extends ApplicationStateData {
ApplicationStateDataProto proto =
ApplicationStateDataProto.getDefaultInstance();
ApplicationStateDataProto.Builder builder = null;
boolean viaProto = false;
private ApplicationSubmissionContext applicationSubmissionContext = null;
private Map<ApplicationTimeoutType, Long> applicationTimeouts = null;
public ApplicationStateDataPBImpl() {
builder = ApplicationStateDataProto.newBuilder();
}
public ApplicationStateDataPBImpl(
ApplicationStateDataProto proto) {
this.proto = proto;
viaProto = true;
}
@Override
public ApplicationStateDataProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.applicationSubmissionContext != null) {
builder.setApplicationSubmissionContext(
((ApplicationSubmissionContextPBImpl)applicationSubmissionContext)
.getProto());
}
if (this.applicationTimeouts != null) {
addApplicationTimeouts();
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = ApplicationStateDataProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public long getSubmitTime() {
ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasSubmitTime()) {
return -1;
}
return (p.getSubmitTime());
}
@Override
public void setSubmitTime(long submitTime) {
maybeInitBuilder();
builder.setSubmitTime(submitTime);
}
@Override
public long getStartTime() {
ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
return p.getStartTime();
}
@Override
public void setStartTime(long startTime) {
maybeInitBuilder();
builder.setStartTime(startTime);
}
@Override
public long getLaunchTime() {
ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
return p.getLaunchTime();
}
@Override
public void setLaunchTime(long launchTime) {
maybeInitBuilder();
builder.setLaunchTime(launchTime);
}
@Override
public String getUser() {
ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasUser()) {
return null;
}
return (p.getUser());
}
@Override
public void setUser(String user) {
maybeInitBuilder();
builder.setUser(user);
}
@Override
public String getRealUser() {
ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasRealUser()) {
return null;
}
return (p.getRealUser());
}
@Override
public void setRealUser(String realUser) {
maybeInitBuilder();
builder.setRealUser(realUser);
}
@Override
public ApplicationSubmissionContext getApplicationSubmissionContext() {
ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
if(applicationSubmissionContext != null) {
return applicationSubmissionContext;
}
if (!p.hasApplicationSubmissionContext()) {
return null;
}
applicationSubmissionContext =
new ApplicationSubmissionContextPBImpl(
p.getApplicationSubmissionContext());
return applicationSubmissionContext;
}
@Override
public void setApplicationSubmissionContext(
ApplicationSubmissionContext context) {
maybeInitBuilder();
if (context == null) {
builder.clearApplicationSubmissionContext();
}
this.applicationSubmissionContext = context;
}
@Override
public RMAppState getState() {
ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasApplicationState()) {
return null;
}
return convertFromProtoFormat(p.getApplicationState());
}
@Override
public void setState(RMAppState finalState) {
maybeInitBuilder();
if (finalState == null) {
builder.clearApplicationState();
return;
}
builder.setApplicationState(convertToProtoFormat(finalState));
}
@Override
public String getDiagnostics() {
ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasDiagnostics()) {
return null;
}
return p.getDiagnostics();
}
@Override
public void setDiagnostics(String diagnostics) {
maybeInitBuilder();
if (diagnostics == null) {
builder.clearDiagnostics();
return;
}
builder.setDiagnostics(diagnostics);
}
@Override
public long getFinishTime() {
ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
return p.getFinishTime();
}
@Override
public void setFinishTime(long finishTime) {
maybeInitBuilder();
builder.setFinishTime(finishTime);
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public CallerContext getCallerContext() {
ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
RpcHeaderProtos.RPCCallerContextProto pbContext = p.getCallerContext();
if (pbContext != null) {
CallerContext context = new CallerContext.Builder(pbContext.getContext())
.setSignature(pbContext.getSignature().toByteArray()).build();
return context;
}
return null;
}
@Override
public void setCallerContext(CallerContext callerContext) {
if (callerContext != null) {
maybeInitBuilder();
RpcHeaderProtos.RPCCallerContextProto.Builder b = RpcHeaderProtos.RPCCallerContextProto
.newBuilder();
if (callerContext.isContextValid()) {
b.setContext(callerContext.getContext());
}
if (callerContext.getSignature() != null) {
b.setSignature(ByteString.copyFrom(callerContext.getSignature()));
}
if(callerContext.isContextValid()
|| callerContext.getSignature() != null) {
builder.setCallerContext(b);
}
}
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
private static String RM_APP_PREFIX = "RMAPP_";
public static RMAppStateProto convertToProtoFormat(RMAppState e) {
return RMAppStateProto.valueOf(RM_APP_PREFIX + e.name());
}
public static RMAppState convertFromProtoFormat(RMAppStateProto e) {
return RMAppState.valueOf(e.name().replace(RM_APP_PREFIX, ""));
}
@Override
public Map<ApplicationTimeoutType, Long> getApplicationTimeouts() {
initApplicationTimeout();
return this.applicationTimeouts;
}
private void initApplicationTimeout() {
if (this.applicationTimeouts != null) {
return;
}
ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
List<ApplicationTimeoutMapProto> lists = p.getApplicationTimeoutsList();
this.applicationTimeouts =
new HashMap<ApplicationTimeoutType, Long>(lists.size());
for (ApplicationTimeoutMapProto timeoutProto : lists) {
this.applicationTimeouts.put(
ProtoUtils
.convertFromProtoFormat(timeoutProto.getApplicationTimeoutType()),
timeoutProto.getTimeout());
}
}
@Override
public void setApplicationTimeouts(
Map<ApplicationTimeoutType, Long> appTimeouts) {
if (appTimeouts == null) {
return;
}
initApplicationTimeout();
this.applicationTimeouts.clear();
this.applicationTimeouts.putAll(appTimeouts);
}
private void addApplicationTimeouts() {
maybeInitBuilder();
builder.clearApplicationTimeouts();
if (applicationTimeouts == null) {
return;
}
Iterable<? extends ApplicationTimeoutMapProto> values =
new Iterable<ApplicationTimeoutMapProto>() {
@Override
public Iterator<ApplicationTimeoutMapProto> iterator() {
return new Iterator<ApplicationTimeoutMapProto>() {
private Iterator<ApplicationTimeoutType> iterator =
applicationTimeouts.keySet().iterator();
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public ApplicationTimeoutMapProto next() {
ApplicationTimeoutType key = iterator.next();
return ApplicationTimeoutMapProto.newBuilder()
.setTimeout(applicationTimeouts.get(key))
.setApplicationTimeoutType(
ProtoUtils.convertToProtoFormat(key))
.build();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
this.builder.addAllApplicationTimeouts(values);
}
}
| ApplicationStateDataPBImpl |
java | apache__camel | components/camel-pqc/src/test/java/org/apache/camel/component/pqc/PQCSignatureSPHINCSPLUSTest.java | {
"start": 1514,
"end": 3635
} | class ____ extends CamelTestSupport {
@EndpointInject("mock:sign")
protected MockEndpoint resultSign;
@EndpointInject("mock:verify")
protected MockEndpoint resultVerify;
@Produce("direct:sign")
protected ProducerTemplate templateSign;
public PQCSignatureSPHINCSPLUSTest() throws NoSuchAlgorithmException {
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:sign").to("pqc:sign?operation=sign").to("mock:sign").to("pqc:verify?operation=verify")
.to("mock:verify");
}
};
}
@BeforeAll
public static void startup() throws Exception {
Security.addProvider(new BouncyCastleProvider());
Security.addProvider(new BouncyCastlePQCProvider());
}
@Test
void testSignAndVerify() throws Exception {
resultSign.expectedMessageCount(1);
resultVerify.expectedMessageCount(1);
templateSign.sendBody("Hello");
resultSign.assertIsSatisfied();
resultVerify.assertIsSatisfied();
assertTrue(resultVerify.getExchanges().get(0).getMessage().getHeader(PQCConstants.VERIFY, Boolean.class));
}
@BindToRegistry("Keypair")
public KeyPair setKeyPair() throws NoSuchAlgorithmException, NoSuchProviderException, InvalidAlgorithmParameterException {
KeyPairGenerator kpGen = KeyPairGenerator.getInstance(PQCSignatureAlgorithms.SPHINCSPLUS.getAlgorithm(),
PQCSignatureAlgorithms.SPHINCSPLUS.getBcProvider());
kpGen.initialize(SPHINCSPlusParameterSpec.haraka_256s, new SecureRandom());
KeyPair kp = kpGen.generateKeyPair();
return kp;
}
@BindToRegistry("Signer")
public Signature getSigner() throws NoSuchAlgorithmException, NoSuchProviderException {
Signature mlDsa = Signature.getInstance(PQCSignatureAlgorithms.SPHINCSPLUS.getAlgorithm(),
PQCSignatureAlgorithms.SPHINCSPLUS.getBcProvider());
return mlDsa;
}
}
| PQCSignatureSPHINCSPLUSTest |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/vertx/src/test/java/org/jboss/resteasy/reactive/server/vertx/test/resource/basic/HttpHeadersTest.java | {
"start": 1065,
"end": 3194
} | class ____ {
static Client client;
@RegisterExtension
static ResteasyReactiveUnitTest testExtension = new ResteasyReactiveUnitTest()
.setArchiveProducer(new Supplier<>() {
@Override
public JavaArchive get() {
JavaArchive war = ShrinkWrap.create(JavaArchive.class);
war.addClasses(PortProviderUtil.class, HttpHeadersResource.class);
return war;
}
});
@BeforeAll
public static void init() {
client = ClientBuilder.newClient();
}
@AfterAll
public static void close() {
client.close();
}
private static String generateURL(String path) {
return PortProviderUtil.generateURL(path, HttpHeadersTest.class.getSimpleName());
}
/**
* @tpTestDetails Client invokes GET request on a sub resource at /HeadersTest/sub2
* with Accept MediaType and Content-Type Headers set;
* Verify that HttpHeaders got the property set by the request
* @tpSince RESTEasy 3.0.16
*/
@Test
@DisplayName("Request Headers Test")
public void RequestHeadersTest() throws Exception {
String errorMessage = "Wrong content of response";
Response response = client.target(generateURL("/HeadersTest/headers")).request()
.header("Accept", "text/plain, text/html, text/html;level=1, */*")
.header("Content-Type", "application/xml;charset=utf8").get();
Assertions.assertEquals(Response.Status.OK.getStatusCode(), response.getStatus());
String content = response.readEntity(String.class);
Assertions.assertTrue(-1 < content.indexOf("Accept:"));
Assertions.assertTrue(-1 < content.indexOf("Content-Type:"));
Assertions.assertTrue(-1 < content.indexOf("application/xml"));
Assertions.assertTrue(-1 < content.indexOf("charset=utf8"));
Assertions.assertTrue(-1 < content.indexOf("text/html"));
Assertions.assertTrue(-1 < content.indexOf("*/*"));
response.close();
}
}
| HttpHeadersTest |
java | google__auto | value/src/it/functional/src/test/java/com/google/auto/value/AutoValueJava8Test.java | {
"start": 20013,
"end": 20306
} | class ____ {
public abstract String notNullable();
public abstract @Nullable String nullable();
public static Builder builder() {
return new AutoValue_AutoValueJava8Test_NullablePropertyWithBuilder.Builder();
}
@AutoValue.Builder
public | NullablePropertyWithBuilder |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/cfg/TransactionSettings.java | {
"start": 770,
"end": 1642
} | class ____ implements {@code TransactionCoordinatorBuilder},
* <li>{@code jta} or {@code jdbc}</li>
* </ul>
* <p/>
*
* @settingDefault With Jakarta Persistence bootstrapping, based on the persistence unit's {@link PersistenceUnitInfo#getTransactionType()};
* otherwise {@code jdbc}.
*
* @implSpec With non-Jakarta Persistence bootstrapping, Hibernate will use {@code jdbc} as the
* default which will cause problems if the application actually uses JTA-based transactions.
*
* @see #JTA_PLATFORM
*
* @since 5.0
*/
String TRANSACTION_COORDINATOR_STRATEGY = "hibernate.transaction.coordinator_class";
/**
* Specifies the {@link org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform}
* implementation to use for integrating with JTA, either:
* <ul>
* <li>an instance of {@code JtaPlatform}, or
* <li>the name of a | that |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/StringConfigurationBasicValidator.java | {
"start": 1183,
"end": 1711
} | class ____ extends ConfigurationBasicValidator<String> implements ConfigurationValidator{
public StringConfigurationBasicValidator(final String configKey, final String defaultVal, final boolean throwIfInvalid){
super(configKey, defaultVal, throwIfInvalid);
}
public String validate(final String configValue) throws InvalidConfigurationValueException {
String result = super.validate((configValue));
if (result != null) {
return result;
}
return configValue;
}
}
| StringConfigurationBasicValidator |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/ast/tree/from/MappedByTableGroup.java | {
"start": 450,
"end": 4757
} | class ____ extends DelegatingTableGroup implements VirtualTableGroup {
private final NavigablePath navigablePath;
private final TableGroupProducer producer;
private final TableGroup underlyingTableGroup;
private final boolean fetched;
private final TableGroup parentTableGroup;
private final LazyTableGroup.ParentTableGroupUseChecker parentTableGroupUseChecker;
public MappedByTableGroup(
NavigablePath navigablePath,
TableGroupProducer producer,
TableGroup underlyingTableGroup,
boolean fetched,
TableGroup parentTableGroup,
LazyTableGroup.ParentTableGroupUseChecker parentTableGroupUseChecker) {
this.navigablePath = navigablePath;
this.producer = producer;
this.underlyingTableGroup = underlyingTableGroup;
this.fetched = fetched;
this.parentTableGroup = parentTableGroup;
this.parentTableGroupUseChecker = parentTableGroupUseChecker;
}
@Override
protected TableGroup getTableGroup() {
return underlyingTableGroup;
}
@Override
public TableGroup getUnderlyingTableGroup() {
return underlyingTableGroup;
}
@Override
public NavigablePath getNavigablePath() {
return navigablePath;
}
@Override
public ModelPartContainer getExpressionType() {
return getModelPart();
}
@Override
public String getGroupAlias() {
// none, although we could also delegate to the underlyingTableGroup's group-alias
return null;
}
@Override
public boolean isFetched() {
return fetched;
}
@Override
public ModelPartContainer getModelPart() {
return producer;
}
// Don't provide access to table group joins as this is table group is just a "named reference"
// The underlying table group contains the joins and will render them
@Override
public boolean isRealTableGroup() {
return false;
}
@Override
public boolean isLateral() {
return false;
}
@Override
public List<TableGroupJoin> getTableGroupJoins() {
return Collections.emptyList();
}
@Override
public List<TableGroupJoin> getNestedTableGroupJoins() {
return Collections.emptyList();
}
@Override
public void visitTableGroupJoins(Consumer<TableGroupJoin> consumer) {
// No-op
}
@Override
public void visitNestedTableGroupJoins(Consumer<TableGroupJoin> consumer) {
// No-op
}
@Override
public List<TableReferenceJoin> getTableReferenceJoins() {
return Collections.emptyList();
}
@Override
public TableReference resolveTableReference(
NavigablePath navigablePath,
String tableExpression) {
final TableReference tableReference = getTableReference(
navigablePath,
tableExpression,
true
);
if ( tableReference == null ) {
throw new UnknownTableReferenceException(
tableExpression,
String.format(
Locale.ROOT,
"Unable to determine TableReference (`%s`) for `%s`",
tableExpression,
navigablePath
)
);
}
return tableReference;
}
@Override
public TableReference resolveTableReference(
NavigablePath navigablePath,
ValuedModelPart modelPart,
String tableExpression) {
assert modelPart != null;
final TableReference tableReference = getTableReference(
navigablePath,
modelPart,
tableExpression,
true
);
if ( tableReference == null ) {
throw new UnknownTableReferenceException(
tableExpression,
String.format(
Locale.ROOT,
"Unable to determine TableReference (`%s`) for `%s`",
tableExpression,
navigablePath
)
);
}
return tableReference;
}
@Override
public TableReference getTableReference(
NavigablePath navigablePath,
String tableExpression,
boolean resolve) {
return getTableGroup().getTableReference(
navigablePath,
tableExpression,
resolve
);
}
@Override
public TableReference getTableReference(
NavigablePath navigablePath,
ValuedModelPart modelPart,
String tableExpression,
boolean resolve) {
if ( parentTableGroupUseChecker.canUseParentTableGroup( producer, navigablePath, modelPart ) ) {
final TableReference reference = parentTableGroup.getTableReference(
navigablePath,
(ValuedModelPart) producer,
tableExpression,
resolve
);
if ( reference != null ) {
return reference;
}
}
return getTableGroup().getTableReference( navigablePath, modelPart, tableExpression, resolve );
}
}
| MappedByTableGroup |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/generated/GeneratedTest.java | {
"start": 2003,
"end": 4028
} | class ____ {
@Id
private Long id;
private String firstName;
private String lastName;
private String middleName1;
private String middleName2;
private String middleName3;
private String middleName4;
private String middleName5;
@Generated(event = {INSERT,UPDATE})
@Column(columnDefinition =
"AS CONCAT(" +
" COALESCE(firstName, ''), " +
" COALESCE(' ' + middleName1, ''), " +
" COALESCE(' ' + middleName2, ''), " +
" COALESCE(' ' + middleName3, ''), " +
" COALESCE(' ' + middleName4, ''), " +
" COALESCE(' ' + middleName5, ''), " +
" COALESCE(' ' + lastName, '') " +
")")
private String fullName;
//end::mapping-generated-provided-generated[]
public Person() {}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getFirstName() {
return firstName;
}
public void setFirstName(String firstName) {
this.firstName = firstName;
}
public String getLastName() {
return lastName;
}
public void setLastName(String lastName) {
this.lastName = lastName;
}
public String getMiddleName1() {
return middleName1;
}
public void setMiddleName1(String middleName1) {
this.middleName1 = middleName1;
}
public String getMiddleName2() {
return middleName2;
}
public void setMiddleName2(String middleName2) {
this.middleName2 = middleName2;
}
public String getMiddleName3() {
return middleName3;
}
public void setMiddleName3(String middleName3) {
this.middleName3 = middleName3;
}
public String getMiddleName4() {
return middleName4;
}
public void setMiddleName4(String middleName4) {
this.middleName4 = middleName4;
}
public String getMiddleName5() {
return middleName5;
}
public void setMiddleName5(String middleName5) {
this.middleName5 = middleName5;
}
public String getFullName() {
return fullName;
}
//tag::mapping-generated-provided-generated[]
}
//end::mapping-generated-provided-generated[]
}
| Person |
java | apache__camel | components/camel-xj/src/main/java/org/apache/camel/component/xj/JsonFileResultHandlerFactory.java | {
"start": 1168,
"end": 1880
} | class ____ implements ResultHandlerFactory {
private final JsonFactory jsonFactory;
/**
* Creates a new json to file result handler factory
*
* @param jsonFactory the {@link JsonFactory} to use to write the json.
*/
public JsonFileResultHandlerFactory(JsonFactory jsonFactory) {
this.jsonFactory = jsonFactory;
}
/**
* {@inheritDoc}
*/
@Override
public ResultHandler createResult(Exchange exchange) throws Exception {
final String fileName = ExchangeHelper.getMandatoryHeader(exchange, XJConstants.XSLT_FILE_NAME, String.class);
return new JsonFileResultHandler(jsonFactory, new File(fileName));
}
}
| JsonFileResultHandlerFactory |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/method/annotation/RequestMappingHandlerMapping.java | {
"start": 3606,
"end": 22629
} | class ____ extends RequestMappingInfoHandlerMapping
implements MatchableHandlerMapping, EmbeddedValueResolverAware {
private static final String[] EMPTY_STRING_ARRAY = new String[0];
private static final RequestMethod[] EMPTY_REQUEST_METHOD_ARRAY = new RequestMethod[0];
private Map<String, Predicate<Class<?>>> pathPrefixes = Collections.emptyMap();
private ContentNegotiationManager contentNegotiationManager = new ContentNegotiationManager();
private @Nullable StringValueResolver embeddedValueResolver;
private RequestMappingInfo.BuilderConfiguration config = new RequestMappingInfo.BuilderConfiguration();
/**
* Configure path prefixes to apply to controller methods.
* <p>Prefixes are used to enrich the mappings of every {@code @RequestMapping}
* method and {@code @HttpExchange} method whose controller type is matched
* by a corresponding {@code Predicate} in the map. The prefix for the first
* matching predicate is used, assuming the input map has predictable order.
* <p>Consider using {@link org.springframework.web.method.HandlerTypePredicate
* HandlerTypePredicate} to group controllers.
* @param prefixes a map with path prefixes as key
* @since 5.1
*/
public void setPathPrefixes(Map<String, Predicate<Class<?>>> prefixes) {
this.pathPrefixes = (!prefixes.isEmpty() ?
Collections.unmodifiableMap(new LinkedHashMap<>(prefixes)) :
Collections.emptyMap());
}
/**
* The configured path prefixes as a read-only, possibly empty map.
* @since 5.1
*/
public Map<String, Predicate<Class<?>>> getPathPrefixes() {
return this.pathPrefixes;
}
/**
* Set the {@link ContentNegotiationManager} to use to determine requested media types.
* If not set, the default constructor is used.
*/
public void setContentNegotiationManager(ContentNegotiationManager contentNegotiationManager) {
Assert.notNull(contentNegotiationManager, "ContentNegotiationManager must not be null");
this.contentNegotiationManager = contentNegotiationManager;
}
/**
* Return the configured {@link ContentNegotiationManager}.
*/
public ContentNegotiationManager getContentNegotiationManager() {
return this.contentNegotiationManager;
}
@Override
public void setEmbeddedValueResolver(StringValueResolver resolver) {
this.embeddedValueResolver = resolver;
}
@Override
@SuppressWarnings("removal")
public void afterPropertiesSet() {
this.config = new RequestMappingInfo.BuilderConfiguration();
this.config.setContentNegotiationManager(getContentNegotiationManager());
this.config.setApiVersionStrategy(getApiVersionStrategy());
if (getPatternParser() != null) {
this.config.setPatternParser(getPatternParser());
}
else {
this.config.setPathMatcher(getPathMatcher());
}
super.afterPropertiesSet();
}
/**
* Obtain a {@link RequestMappingInfo.BuilderConfiguration} that reflects
* the internal configuration of this {@code HandlerMapping} and can be used
* to set {@link RequestMappingInfo.Builder#options(RequestMappingInfo.BuilderConfiguration)}.
* <p>This is useful for programmatic registration of request mappings via
* {@link #registerHandlerMethod(Object, Method, RequestMappingInfo)}.
* @return the builder configuration that reflects the internal state
* @since 5.3.14
*/
public RequestMappingInfo.BuilderConfiguration getBuilderConfiguration() {
return this.config;
}
/**
* {@inheritDoc}
* <p>Expects a handler to have a type-level @{@link Controller} annotation.
*/
@Override
protected boolean isHandler(Class<?> beanType) {
return AnnotatedElementUtils.hasAnnotation(beanType, Controller.class);
}
/**
* Uses type-level and method-level {@link RequestMapping @RequestMapping}
* and {@link HttpExchange @HttpExchange} annotations to create the
* {@link RequestMappingInfo}.
* <p>For CGLIB proxy classes, additional validation is performed based on
* method visibility:
* <ul>
* <li>Private methods cannot be overridden and therefore cannot be used as
* handler methods.</li>
* <li>Package-private methods from different packages are inaccessible and
* must be changed to public or protected.</li>
* </ul>
* @return the created {@code RequestMappingInfo}, or {@code null} if the method
* does not have a {@code @RequestMapping} or {@code @HttpExchange} annotation
* @see #getCustomMethodCondition(Method)
* @see #getCustomTypeCondition(Class)
*/
@Override
protected @Nullable RequestMappingInfo getMappingForMethod(Method method, Class<?> handlerType) {
validateCglibProxyMethodVisibility(method, handlerType);
RequestMappingInfo info = createRequestMappingInfo(method);
if (info != null) {
RequestMappingInfo typeInfo = createRequestMappingInfo(handlerType);
if (typeInfo != null) {
info = typeInfo.combine(info);
}
if (info.isEmptyMapping()) {
info = info.mutate().paths("", "/").options(this.config).build();
}
String prefix = getPathPrefix(handlerType);
if (prefix != null) {
info = RequestMappingInfo.paths(prefix).options(this.config).build().combine(info);
}
}
return info;
}
/**
* Validate the method visibility requirements specified in {@link #getMappingForMethod(Method, Class)}.
* @since 7.0
*/
private static void validateCglibProxyMethodVisibility(Method method, Class<?> handlerType) {
if (handlerType.getName().contains(ClassUtils.CGLIB_CLASS_SEPARATOR)) {
int modifiers = method.getModifiers();
if (Modifier.isPrivate(modifiers)) {
throw new IllegalStateException("""
Private method [%s] on CGLIB proxy class [%s] cannot be used as a request \
handler method, because private methods cannot be overridden. \
Change the method to non-private visibility, or use interface-based JDK \
proxying instead.""".formatted(method.getName(), handlerType.getName()));
}
if (!Modifier.isPublic(modifiers) && !Modifier.isProtected(modifiers)) {
Class<?> declaringClass = method.getDeclaringClass();
String declaringPackage = declaringClass.getPackage().getName();
String handlerPackage = handlerType.getPackage().getName();
if (!Objects.equals(declaringPackage, handlerPackage)) {
throw new IllegalStateException("""
Package-private method [%s] declared in class [%s] cannot be advised by \
CGLIB-proxied handler class [%s], because it is effectively private. Either \
make the method public or protected, or use interface-based JDK proxying instead."""
.formatted(method.getName(), declaringClass.getName(), handlerType.getName()));
}
}
}
}
@Nullable String getPathPrefix(Class<?> handlerType) {
for (Map.Entry<String, Predicate<Class<?>>> entry : this.pathPrefixes.entrySet()) {
if (entry.getValue().test(handlerType)) {
String prefix = entry.getKey();
if (this.embeddedValueResolver != null) {
prefix = this.embeddedValueResolver.resolveStringValue(prefix);
}
return prefix;
}
}
return null;
}
private @Nullable RequestMappingInfo createRequestMappingInfo(AnnotatedElement element) {
List<AnnotationDescriptor> descriptors =
MergedAnnotations.from(element, SearchStrategy.TYPE_HIERARCHY, RepeatableContainers.none())
.stream()
.filter(MergedAnnotationPredicates.typeIn(RequestMapping.class, HttpExchange.class))
.filter(MergedAnnotationPredicates.firstRunOf(MergedAnnotation::getAggregateIndex))
.map(AnnotationDescriptor::new)
.distinct()
.toList();
RequestMappingInfo info = null;
RequestCondition<?> customCondition = (element instanceof Class<?> clazz ?
getCustomTypeCondition(clazz) : getCustomMethodCondition((Method) element));
List<AnnotationDescriptor> mappingDescriptors =
descriptors.stream().filter(desc -> desc.annotation instanceof RequestMapping).toList();
if (!mappingDescriptors.isEmpty()) {
checkMultipleAnnotations(element, mappingDescriptors);
info = createRequestMappingInfo((RequestMapping) mappingDescriptors.get(0).annotation, customCondition);
}
List<AnnotationDescriptor> exchangeDescriptors =
descriptors.stream().filter(desc -> desc.annotation instanceof HttpExchange).toList();
if (!exchangeDescriptors.isEmpty()) {
checkMultipleAnnotations(element, info, mappingDescriptors, exchangeDescriptors);
info = createRequestMappingInfo((HttpExchange) exchangeDescriptors.get(0).annotation, customCondition);
}
if (info != null && getApiVersionStrategy() instanceof DefaultApiVersionStrategy davs) {
String version = info.getVersionCondition().getVersion();
if (version != null) {
davs.addMappedVersion(version);
}
}
return info;
}
/**
* Provide a custom type-level request condition.
* The custom {@link RequestCondition} can be of any type so long as the
* same condition type is returned from all calls to this method in order
* to ensure custom request conditions can be combined and compared.
* <p>Consider extending {@link AbstractRequestCondition} for custom
* condition types and using {@link CompositeRequestCondition} to provide
* multiple custom conditions.
* @param handlerType the handler type for which to create the condition
* @return the condition, or {@code null}
*/
protected @Nullable RequestCondition<?> getCustomTypeCondition(Class<?> handlerType) {
return null;
}
/**
* Provide a custom method-level request condition.
* The custom {@link RequestCondition} can be of any type so long as the
* same condition type is returned from all calls to this method in order
* to ensure custom request conditions can be combined and compared.
* <p>Consider extending {@link AbstractRequestCondition} for custom
* condition types and using {@link CompositeRequestCondition} to provide
* multiple custom conditions.
* @param method the handler method for which to create the condition
* @return the condition, or {@code null}
*/
protected @Nullable RequestCondition<?> getCustomMethodCondition(Method method) {
return null;
}
private void checkMultipleAnnotations(
AnnotatedElement element, List<AnnotationDescriptor> mappingDescriptors) {
if (logger.isWarnEnabled() && mappingDescriptors.size() > 1) {
logger.warn("Multiple @RequestMapping annotations found on %s, but only the first will be used: %s"
.formatted(element, mappingDescriptors));
}
}
private static void checkMultipleAnnotations(
AnnotatedElement element, @Nullable RequestMappingInfo info,
List<AnnotationDescriptor> mappingDescriptors, List<AnnotationDescriptor> exchangeDescriptors) {
Assert.state(info == null,
() -> "%s is annotated with @RequestMapping and @HttpExchange annotations, but only one is allowed: %s"
.formatted(element, Stream.of(mappingDescriptors, exchangeDescriptors).flatMap(List::stream).toList()));
Assert.state(exchangeDescriptors.size() == 1,
() -> "Multiple @HttpExchange annotations found on %s, but only one is allowed: %s"
.formatted(element, exchangeDescriptors));
}
/**
* Create a {@link RequestMappingInfo} from the supplied
* {@link RequestMapping @RequestMapping} annotation, meta-annotation,
* or synthesized result of merging annotation attributes within an
* annotation hierarchy.
*/
protected RequestMappingInfo createRequestMappingInfo(
RequestMapping requestMapping, @Nullable RequestCondition<?> customCondition) {
RequestMappingInfo.Builder builder = RequestMappingInfo
.paths(resolveEmbeddedValuesInPatterns(requestMapping.path()))
.methods(requestMapping.method())
.params(requestMapping.params())
.headers(requestMapping.headers())
.consumes(requestMapping.consumes())
.produces(requestMapping.produces())
.version(requestMapping.version())
.mappingName(requestMapping.name());
if (customCondition != null) {
builder.customCondition(customCondition);
}
return builder.options(this.config).build();
}
/**
* Create a {@link RequestMappingInfo} from the supplied
* {@link HttpExchange @HttpExchange} annotation, meta-annotation,
* or synthesized result of merging annotation attributes within an
* annotation hierarchy.
* @since 6.1
*/
protected RequestMappingInfo createRequestMappingInfo(
HttpExchange httpExchange, @Nullable RequestCondition<?> customCondition) {
RequestMappingInfo.Builder builder = RequestMappingInfo
.paths(resolveEmbeddedValuesInPatterns(toStringArray(httpExchange.value())))
.methods(toMethodArray(httpExchange.method()))
.consumes(toStringArray(httpExchange.contentType()))
.produces(httpExchange.accept())
.headers(httpExchange.headers());
if (customCondition != null) {
builder.customCondition(customCondition);
}
return builder.options(this.config).build();
}
/**
* Resolve placeholder values in the given array of patterns.
* @return a new array with updated patterns
*/
protected String[] resolveEmbeddedValuesInPatterns(String[] patterns) {
if (this.embeddedValueResolver == null) {
return patterns;
}
else {
String[] resolvedPatterns = new String[patterns.length];
for (int i = 0; i < patterns.length; i++) {
resolvedPatterns[i] = Objects.requireNonNull(this.embeddedValueResolver.resolveStringValue(patterns[i]));
}
return resolvedPatterns;
}
}
private static String[] toStringArray(String value) {
return (StringUtils.hasText(value) ? new String[] {value} : EMPTY_STRING_ARRAY);
}
private static RequestMethod[] toMethodArray(String method) {
return (StringUtils.hasText(method) ?
new RequestMethod[] {RequestMethod.valueOf(method)} : EMPTY_REQUEST_METHOD_ARRAY);
}
@Override
public void registerMapping(RequestMappingInfo mapping, Object handler, Method method) {
super.registerMapping(mapping, handler, method);
updateConsumesCondition(mapping, method);
}
/**
* {@inheritDoc}
* <p><strong>Note:</strong> To create the {@link RequestMappingInfo},
* please use {@link #getBuilderConfiguration()} and set the options on
* {@link RequestMappingInfo.Builder#options(RequestMappingInfo.BuilderConfiguration)}
* to match how this {@code HandlerMapping} is configured. This
* is important for example to ensure use of
* {@link org.springframework.web.util.pattern.PathPattern} or
* {@link org.springframework.util.PathMatcher} based matching.
* @param handler the bean name of the handler or the handler instance
* @param method the method to register
* @param mapping the mapping conditions associated with the handler method
*/
@Override
protected void registerHandlerMethod(Object handler, Method method, RequestMappingInfo mapping) {
super.registerHandlerMethod(handler, method, mapping);
updateConsumesCondition(mapping, method);
}
private void updateConsumesCondition(RequestMappingInfo info, Method method) {
ConsumesRequestCondition condition = info.getConsumesCondition();
if (!condition.isEmpty()) {
AnnotatedMethod annotatedMethod = new AnnotatedMethod(method);
for (MethodParameter parameter : annotatedMethod.getMethodParameters()) {
RequestBody requestBody = parameter.getParameterAnnotation(RequestBody.class);
if (requestBody != null) {
condition.setBodyRequired(requestBody.required());
break;
}
}
}
}
@SuppressWarnings("removal")
@Deprecated(since = "7.0", forRemoval = true)
@Override
public @Nullable RequestMatchResult match(HttpServletRequest request, String pattern) {
Assert.state(getPatternParser() == null, "This HandlerMapping uses PathPatterns.");
RequestMappingInfo info = RequestMappingInfo.paths(pattern).options(this.config).build();
RequestMappingInfo match = info.getMatchingCondition(request);
return (match != null && match.getPatternsCondition() != null ?
new RequestMatchResult(
match.getPatternsCondition().getPatterns().iterator().next(),
UrlPathHelper.getResolvedLookupPath(request),
getPathMatcher()) : null);
}
@Override
protected @Nullable CorsConfiguration initCorsConfiguration(Object handler, Method method, RequestMappingInfo mappingInfo) {
HandlerMethod handlerMethod = createHandlerMethod(handler, method);
Class<?> beanType = handlerMethod.getBeanType();
CrossOrigin typeAnnotation = AnnotatedElementUtils.findMergedAnnotation(beanType, CrossOrigin.class);
CrossOrigin methodAnnotation = AnnotatedElementUtils.findMergedAnnotation(method, CrossOrigin.class);
if (typeAnnotation == null && methodAnnotation == null) {
return null;
}
CorsConfiguration config = new CorsConfiguration();
updateCorsConfig(config, typeAnnotation);
updateCorsConfig(config, methodAnnotation);
if (CollectionUtils.isEmpty(config.getAllowedMethods())) {
for (RequestMethod allowedMethod : mappingInfo.getMethodsCondition().getMethods()) {
config.addAllowedMethod(allowedMethod.name());
}
}
return config.applyPermitDefaultValues();
}
private void updateCorsConfig(CorsConfiguration config, @Nullable CrossOrigin annotation) {
if (annotation == null) {
return;
}
for (String origin : annotation.origins()) {
config.addAllowedOrigin(resolveCorsAnnotationValue(origin));
}
for (String patterns : annotation.originPatterns()) {
config.addAllowedOriginPattern(resolveCorsAnnotationValue(patterns));
}
for (RequestMethod method : annotation.methods()) {
config.addAllowedMethod(method.name());
}
for (String header : annotation.allowedHeaders()) {
config.addAllowedHeader(resolveCorsAnnotationValue(header));
}
for (String header : annotation.exposedHeaders()) {
config.addExposedHeader(resolveCorsAnnotationValue(header));
}
String allowCredentials = resolveCorsAnnotationValue(annotation.allowCredentials());
if ("true".equalsIgnoreCase(allowCredentials)) {
config.setAllowCredentials(true);
}
else if ("false".equalsIgnoreCase(allowCredentials)) {
config.setAllowCredentials(false);
}
else if (!allowCredentials.isEmpty()) {
throw new IllegalStateException("@CrossOrigin's allowCredentials value must be \"true\", \"false\", " +
"or an empty string (\"\"): current value is [" + allowCredentials + "]");
}
String allowPrivateNetwork = resolveCorsAnnotationValue(annotation.allowPrivateNetwork());
if ("true".equalsIgnoreCase(allowPrivateNetwork)) {
config.setAllowPrivateNetwork(true);
}
else if ("false".equalsIgnoreCase(allowPrivateNetwork)) {
config.setAllowPrivateNetwork(false);
}
else if (!allowPrivateNetwork.isEmpty()) {
throw new IllegalStateException("@CrossOrigin's allowPrivateNetwork value must be \"true\", \"false\", " +
"or an empty string (\"\"): current value is [" + allowPrivateNetwork + "]");
}
if (annotation.maxAge() >= 0 ) {
config.setMaxAge(annotation.maxAge());
}
}
private String resolveCorsAnnotationValue(String value) {
if (this.embeddedValueResolver != null) {
String resolved = this.embeddedValueResolver.resolveStringValue(value);
return (resolved != null ? resolved : "");
}
else {
return value;
}
}
private static | RequestMappingHandlerMapping |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/EndpointDisabledTest.java | {
"start": 2826,
"end": 3127
} | class ____ {
@GET
public String get() {
return "missing=true";
}
}
@Path("other-dummy-disabled-missing-false")
@EndpointDisabled(name = "other.dummy.disabled", stringValue = "true", disableIfMissing = false)
public static | OtherDummyDisabledMissingTrue |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java | {
"start": 6063,
"end": 11849
} | class ____ extends SubjectInheritingThread {
static public final int N_ITERATIONS = 1024;
private static final double PROPORTION_NON_POSITIONAL_READ = 0.10;
private final TestFileInfo testInfo;
private final long fileSize;
private long bytesRead;
private boolean error;
private final ReadWorkerHelper helper;
ReadWorker(TestFileInfo testInfo, int id, ReadWorkerHelper helper) {
super("ReadWorker-" + id + "-" + testInfo.filepath.toString());
this.testInfo = testInfo;
this.helper = helper;
fileSize = testInfo.dis.getFileLength();
assertEquals(fileSize, testInfo.authenticData.length);
bytesRead = 0;
error = false;
}
/**
* Randomly do one of (1) Small read; and (2) Large Pread.
*/
@Override
public void work() {
for (int i = 0; i < N_ITERATIONS; ++i) {
int startOff = rand.nextInt((int) fileSize);
int len = 0;
try {
double p = rand.nextDouble();
if (p < PROPORTION_NON_POSITIONAL_READ) {
// Do a small regular read. Very likely this will leave unread
// data on the socket and make the socket uncacheable.
len = Math.min(rand.nextInt(64), (int) fileSize - startOff);
read(startOff, len);
bytesRead += len;
} else {
// Do a positional read most of the time.
len = rand.nextInt((int) (fileSize - startOff));
pRead(startOff, len);
bytesRead += len;
}
} catch (Throwable t) {
LOG.error(getName() + ": Error while testing read at " + startOff +
" length " + len, t);
error = true;
fail(t.getMessage());
}
}
}
public long getBytesRead() {
return bytesRead;
}
/**
* Raising error in a thread doesn't seem to fail the test.
* So check afterwards.
*/
public boolean hasError() {
return error;
}
static int readCount = 0;
/**
* Seek to somewhere random and read.
*/
private void read(int start, int len) throws Exception {
assertTrue(
start + len <= fileSize,
"Bad args: " + start + " + " + len + " should be <= " + fileSize);
readCount++;
DFSInputStream dis = testInfo.dis;
byte buf[] = new byte[len];
helper.read(dis, buf, start, len);
verifyData("Read data corrupted", buf, start, start + len);
}
/**
* Positional read.
*/
private void pRead(int start, int len) throws Exception {
assertTrue(
start + len <= fileSize,
"Bad args: " + start + " + " + len + " should be <= " + fileSize);
DFSInputStream dis = testInfo.dis;
byte buf[] = new byte[len];
helper.pRead(dis, buf, start, len);
verifyData("Pread data corrupted", buf, start, start + len);
}
/**
* Verify read data vs authentic data
*/
private void verifyData(String msg, byte actual[], int start, int end)
throws Exception {
byte auth[] = testInfo.authenticData;
if (end > auth.length) {
throw new Exception(msg + ": Actual array (" + end +
") is past the end of authentic data (" +
auth.length + ")");
}
int j = start;
for (int i = 0; i < actual.length; ++i, ++j) {
if (auth[j] != actual[i]) {
throw new Exception(msg + ": Arrays byte " + i + " (at offset " +
j + ") differs: expect " +
auth[j] + " got " + actual[i]);
}
}
}
}
/**
* Start the parallel read with the given parameters.
*/
boolean runParallelRead(int nFiles, int nWorkerEach, ReadWorkerHelper helper) throws IOException {
ReadWorker workers[] = new ReadWorker[nFiles * nWorkerEach];
TestFileInfo testInfoArr[] = new TestFileInfo[nFiles];
// Prepare the files and workers
int nWorkers = 0;
for (int i = 0; i < nFiles; ++i) {
TestFileInfo testInfo = new TestFileInfo();
testInfoArr[i] = testInfo;
testInfo.filepath = new Path("/TestParallelRead.dat." + i);
testInfo.authenticData = util.writeFile(testInfo.filepath, FILE_SIZE_K);
testInfo.dis = dfsClient.open(testInfo.filepath.toString(),
dfsClient.getConf().getIoBufferSize(), verifyChecksums);
for (int j = 0; j < nWorkerEach; ++j) {
workers[nWorkers++] = new ReadWorker(testInfo, nWorkers, helper);
}
}
// Start the workers and wait
long starttime = Time.monotonicNow();
for (ReadWorker worker : workers) {
worker.start();
}
for (ReadWorker worker : workers) {
try {
worker.join();
} catch (InterruptedException ignored) { }
}
long endtime = Time.monotonicNow();
// Cleanup
for (TestFileInfo testInfo : testInfoArr) {
testInfo.dis.close();
}
// Report
boolean res = true;
long totalRead = 0;
for (ReadWorker worker : workers) {
long nread = worker.getBytesRead();
LOG.info("--- Report: " + worker.getName() + " read " + nread + " B; " +
"average " + nread / ReadWorker.N_ITERATIONS + " B per read");
totalRead += nread;
if (worker.hasError()) {
res = false;
}
}
double timeTakenSec = (endtime - starttime) / 1000.0;
long totalReadKB = totalRead / 1024;
LOG.info("=== Report: " + nWorkers + " threads read " +
totalReadKB + " KB (across " +
nFiles + " file(s)) in " +
timeTakenSec + "s; average " +
totalReadKB / timeTakenSec + " KB/s");
return res;
}
/**
* Runs a standard workload using a helper | ReadWorker |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/MulticastParallelStreamingTimeoutTest.java | {
"start": 1463,
"end": 3054
} | class ____ extends ContextTestSupport {
@Test
public void testMulticastParallelStreamingTimeout() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
// A will timeout so we only get B and C (C is faster than B)
mock.expectedBodiesReceived("CB");
template.sendBody("direct:start", "Hello");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").multicast(new AggregationStrategy() {
public Exchange aggregate(Exchange oldExchange, Exchange newExchange) {
if (oldExchange == null) {
return newExchange;
}
String body = oldExchange.getIn().getBody(String.class);
oldExchange.getIn().setBody(body + newExchange.getIn().getBody(String.class));
return oldExchange;
}
}).parallelProcessing().streaming().timeout(2000).to("direct:a", "direct:b", "direct:c")
// use end to indicate end of multicast route
.end().to("mock:result");
from("direct:a").delay(3000).setBody(constant("A"));
from("direct:b").delay(500).setBody(constant("B"));
from("direct:c").setBody(constant("C"));
}
};
}
}
| MulticastParallelStreamingTimeoutTest |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/search/aggregate/AggregationOptions.java | {
"start": 736,
"end": 1398
} | class ____ extends AggregationBaseOptions<AggregationOptions> {
private AggregationOptions() {
}
public static AggregationOptions defaults() {
return new AggregationOptions();
}
public AggregationOptions withCursor() {
withCursor = true;
return this;
}
public AggregationOptions withCursor(int count) {
withCursor = true;
cursorCount = count;
return this;
}
public AggregationOptions withCursor(int count, int maxIdle) {
withCursor = true;
cursorCount = count;
cursorMaxIdle = Duration.ofMillis(maxIdle);
return this;
}
}
| AggregationOptions |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/annotation/MergedAnnotationsTests.java | {
"start": 122113,
"end": 122460
} | class ____ extends Root {
@Order(25)
public void annotatedOnLeaf() {
}
@Meta1
public void metaAnnotatedOnLeaf() {
}
@MetaMeta
public void metaMetaAnnotatedOnLeaf() {
}
@Override
@Order(1)
public void overrideToAnnotate() {
}
@Override
public void overrideWithoutNewAnnotation() {
}
}
public abstract static | Leaf |
java | eclipse-vertx__vert.x | vertx-core/src/test/java/io/vertx/benchmarks/ContextBenchmark.java | {
"start": 1149,
"end": 1778
} | class ____ {
Vertx vertx;
ContextInternal context;
Handler<Void> task;
@Setup
public void setup() {
vertx = Vertx.vertx(new VertxOptions().setDisableTCCL(true));
context = BenchmarkContext.create(vertx);
task = v -> consume("the-string");
}
}
@Benchmark
public void runOnContext(BaselineState state) {
state.context.runOnContext(state.task);
}
@Benchmark
@Fork(jvmArgsAppend = { "-Dvertx.threadChecks=false", "-Dvertx.disableContextTimings=true" })
public void runOnContextNoChecks(BaselineState state) {
state.context.runOnContext(state.task);
}
}
| BaselineState |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/NlpInferenceResults.java | {
"start": 689,
"end": 2744
} | class ____ implements InferenceResults {
protected final boolean isTruncated;
NlpInferenceResults(boolean isTruncated) {
this.isTruncated = isTruncated;
}
NlpInferenceResults(StreamInput in) throws IOException {
this.isTruncated = in.readBoolean();
}
abstract void doXContentBody(XContentBuilder builder, Params params) throws IOException;
abstract void doWriteTo(StreamOutput out) throws IOException;
abstract void addMapFields(Map<String, Object> map);
public boolean isTruncated() {
return isTruncated;
}
@Override
public final void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(isTruncated);
doWriteTo(out);
}
@Override
public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
doXContentBody(builder, params);
if (isTruncated) {
builder.field("is_truncated", isTruncated);
}
return builder;
}
@Override
public final Map<String, Object> asMap() {
Map<String, Object> map = new LinkedHashMap<>();
addMapFields(map);
addSupportingFieldsToMap(map);
return map;
}
@Override
public Map<String, Object> asMap(String outputField) {
Map<String, Object> map = new LinkedHashMap<>();
addSupportingFieldsToMap(map);
return map;
}
private void addSupportingFieldsToMap(Map<String, Object> map) {
if (isTruncated) {
map.put("is_truncated", isTruncated);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
NlpInferenceResults that = (NlpInferenceResults) o;
return isTruncated == that.isTruncated;
}
@Override
public int hashCode() {
return Objects.hash(isTruncated);
}
@Override
public String toString() {
return Strings.toString(this);
}
}
| NlpInferenceResults |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/KryoUtils.java | {
"start": 1260,
"end": 5177
} | class ____ {
/**
* Tries to copy the given record from using the provided Kryo instance. If this fails, then the
* record from is copied by serializing it into a byte buffer and deserializing it from there.
*
* @param from Element to copy
* @param kryo Kryo instance to use
* @param serializer TypeSerializer which is used in case of a Kryo failure
* @param <T> Type of the element to be copied
* @return Copied element
*/
public static <T> T copy(T from, Kryo kryo, TypeSerializer<T> serializer) {
try {
return kryo.copy(from);
} catch (KryoException ke) {
// Kryo could not copy the object --> try to serialize/deserialize the object
try {
byte[] byteArray = InstantiationUtil.serializeToByteArray(serializer, from);
return InstantiationUtil.deserializeFromByteArray(serializer, byteArray);
} catch (IOException ioe) {
throw new RuntimeException(
"Could not copy object by serializing/deserializing" + " it.", ioe);
}
}
}
/**
* Tries to copy the given record from using the provided Kryo instance. If this fails, then the
* record from is copied by serializing it into a byte buffer and deserializing it from there.
*
* @param from Element to copy
* @param reuse Reuse element for the deserialization
* @param kryo Kryo instance to use
* @param serializer TypeSerializer which is used in case of a Kryo failure
* @param <T> Type of the element to be copied
* @return Copied element
*/
public static <T> T copy(T from, T reuse, Kryo kryo, TypeSerializer<T> serializer) {
try {
return kryo.copy(from);
} catch (KryoException ke) {
// Kryo could not copy the object --> try to serialize/deserialize the object
try {
byte[] byteArray = InstantiationUtil.serializeToByteArray(serializer, from);
return InstantiationUtil.deserializeFromByteArray(serializer, reuse, byteArray);
} catch (IOException ioe) {
throw new RuntimeException(
"Could not copy object by serializing/deserializing" + " it.", ioe);
}
}
}
/**
* Apply a list of {@link KryoRegistration} to a Kryo instance. The list of registrations is
* assumed to already be a final resolution of all possible registration overwrites.
*
* <p>The registrations are applied in the given order and always specify the registration id,
* using the given {@code firstRegistrationId} and incrementing it for each registration.
*
* @param kryo the Kryo instance to apply the registrations
* @param resolvedRegistrations the registrations, which should already be resolved of all
* possible registration overwrites
* @param firstRegistrationId the first registration id to use
*/
public static void applyRegistrations(
Kryo kryo,
Collection<KryoRegistration> resolvedRegistrations,
int firstRegistrationId) {
int currentRegistrationId = firstRegistrationId;
Serializer<?> serializer;
for (KryoRegistration registration : resolvedRegistrations) {
serializer = registration.getSerializer(kryo);
if (serializer != null) {
kryo.register(registration.getRegisteredClass(), serializer, currentRegistrationId);
} else {
kryo.register(registration.getRegisteredClass(), currentRegistrationId);
}
// if Kryo already had a serializer for that type then it ignores the registration
if (kryo.getRegistration(currentRegistrationId) != null) {
currentRegistrationId++;
}
}
}
}
| KryoUtils |
java | quarkusio__quarkus | extensions/kubernetes/vanilla/deployment/src/main/java/io/quarkus/kubernetes/deployment/ConfigMapVolumeConverter.java | {
"start": 334,
"end": 1410
} | class ____ {
public static ConfigMapVolume convert(Map.Entry<String, ConfigMapVolumeConfig> e) {
return convert(e.getValue()).withVolumeName(e.getKey()).build();
}
public static ConfigMapVolumeBuilder convert(ConfigMapVolumeConfig cm) {
ConfigMapVolumeBuilder b = new ConfigMapVolumeBuilder();
b.withConfigMapName(cm.configMapName());
b.withDefaultMode(FilePermissionUtil.parseInt(cm.defaultMode()));
b.withOptional(cm.optional());
if (cm.items() != null && !cm.items().isEmpty()) {
List<Item> items = new ArrayList<>(cm.items().size());
for (Map.Entry<String, VolumeItemConfig> item : cm.items().entrySet()) {
items.add(new ItemBuilder()
.withKey(item.getKey())
.withPath(item.getValue().path())
.withMode(item.getValue().mode())
.build());
}
b.withItems(items.toArray(new Item[items.size()]));
}
return b;
}
}
| ConfigMapVolumeConverter |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/bind/PropertySourcesPlaceholdersResolverTests.java | {
"start": 1348,
"end": 3279
} | class ____ {
private @Nullable PropertySourcesPlaceholdersResolver resolver;
@Test
@SuppressWarnings("NullAway") // Test null check
void placeholderResolverIfEnvironmentNullShouldThrowException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new PropertySourcesPlaceholdersResolver((Environment) null))
.withMessageContaining("'environment' must not be null");
}
@Test
void resolveIfPlaceholderPresentResolvesProperty() {
MutablePropertySources sources = getPropertySources();
this.resolver = new PropertySourcesPlaceholdersResolver(sources);
Object resolved = this.resolver.resolvePlaceholders("${FOO}");
assertThat(resolved).isEqualTo("hello world");
}
@Test
void resolveIfPlaceholderAbsentUsesDefault() {
this.resolver = new PropertySourcesPlaceholdersResolver((PropertySources) null);
Object resolved = this.resolver.resolvePlaceholders("${FOO:bar}");
assertThat(resolved).isEqualTo("bar");
}
@Test
void resolveIfPlaceholderAbsentAndNoDefaultUsesPlaceholder() {
this.resolver = new PropertySourcesPlaceholdersResolver((PropertySources) null);
Object resolved = this.resolver.resolvePlaceholders("${FOO}");
assertThat(resolved).isEqualTo("${FOO}");
}
@Test
void resolveIfHelperPresentShouldUseIt() {
MutablePropertySources sources = getPropertySources();
TestPropertyPlaceholderHelper helper = new TestPropertyPlaceholderHelper("$<", ">");
this.resolver = new PropertySourcesPlaceholdersResolver(sources, helper);
Object resolved = this.resolver.resolvePlaceholders("$<FOO>");
assertThat(resolved).isEqualTo("hello world");
}
private MutablePropertySources getPropertySources() {
MutablePropertySources sources = new MutablePropertySources();
Map<String, Object> source = new HashMap<>();
source.put("FOO", "hello world");
sources.addFirst(new MapPropertySource("test", source));
return sources;
}
static | PropertySourcesPlaceholdersResolverTests |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/h12/http2/GenericHttp2ServerTransportListener.java | {
"start": 8814,
"end": 9223
} | class ____ implements HttpMessageListener {
private final StreamingDecoder streamingDecoder;
StreamingHttpMessageListener(StreamingDecoder streamingDecoder) {
this.streamingDecoder = streamingDecoder;
}
@Override
public void onMessage(InputStream inputStream) {
streamingDecoder.decode(inputStream);
}
}
}
| StreamingHttpMessageListener |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/UpdateNamenodeRegistrationRequestPBImpl.java | {
"start": 1600,
"end": 3412
} | class ____
extends UpdateNamenodeRegistrationRequest implements PBRecord {
private FederationProtocolPBTranslator<
UpdateNamenodeRegistrationRequestProto,
UpdateNamenodeRegistrationRequestProto.Builder,
UpdateNamenodeRegistrationRequestProtoOrBuilder> translator =
new FederationProtocolPBTranslator<
UpdateNamenodeRegistrationRequestProto,
UpdateNamenodeRegistrationRequestProto.Builder,
UpdateNamenodeRegistrationRequestProtoOrBuilder>(
UpdateNamenodeRegistrationRequestProto.class);
public UpdateNamenodeRegistrationRequestPBImpl() {
}
@Override
public UpdateNamenodeRegistrationRequestProto getProto() {
return this.translator.build();
}
@Override
public void setProto(Message protocol) {
this.translator.setProto(protocol);
}
@Override
public void readInstance(String base64String) throws IOException {
this.translator.readInstance(base64String);
}
@Override
public String getNameserviceId() {
return this.translator.getProtoOrBuilder().getNameserviceId();
}
@Override
public String getNamenodeId() {
return this.translator.getProtoOrBuilder().getNamenodeId();
}
@Override
public FederationNamenodeServiceState getState() {
return FederationNamenodeServiceState
.valueOf(this.translator.getProtoOrBuilder().getState());
}
@Override
public void setNameserviceId(String nsId) {
this.translator.getBuilder().setNameserviceId(nsId);
}
@Override
public void setNamenodeId(String nnId) {
this.translator.getBuilder().setNamenodeId(nnId);
}
@Override
public void setState(FederationNamenodeServiceState state) {
this.translator.getBuilder().setState(state.toString());
}
} | UpdateNamenodeRegistrationRequestPBImpl |
java | apache__hadoop | hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/SimulatedDataNodes.java | {
"start": 2771,
"end": 2863
} | class ____ loosely based off of
* {@link org.apache.hadoop.hdfs.DataNodeCluster}.
*/
public | is |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionVertex.java | {
"start": 2549,
"end": 21580
} | class ____
implements AccessExecutionVertex, Archiveable<ArchivedExecutionVertex> {
public static final long NUM_BYTES_UNKNOWN = -1;
// --------------------------------------------------------------------------------------------
final ExecutionJobVertex jobVertex;
private final Map<IntermediateResultPartitionID, IntermediateResultPartition> resultPartitions;
private final int subTaskIndex;
private final ExecutionVertexID executionVertexId;
final ExecutionHistory executionHistory;
private final Duration timeout;
/** The name in the format "myTask (2/7)", cached to avoid frequent string concatenations. */
private final String taskNameWithSubtask;
/** The current or latest execution attempt of this vertex's task. */
Execution currentExecution; // this field must never be null
final ArrayList<InputSplit> inputSplits;
private int nextAttemptNumber;
private long inputBytes;
/** This field holds the allocation id of the last successful assignment. */
@Nullable private TaskManagerLocation lastAssignedLocation;
@Nullable private AllocationID lastAssignedAllocationID;
// --------------------------------------------------------------------------------------------
/**
* Creates an ExecutionVertex.
*
* @param timeout The RPC timeout to use for deploy / cancel calls
* @param createTimestamp The timestamp for the vertex creation, used to initialize the first
* Execution with.
* @param executionHistorySizeLimit The maximum number of historical Executions (= execution
* attempts) to keep.
* @param initialAttemptCount The attempt number of the first execution of this vertex.
*/
@VisibleForTesting
public ExecutionVertex(
ExecutionJobVertex jobVertex,
int subTaskIndex,
IntermediateResult[] producedDataSets,
Duration timeout,
long createTimestamp,
int executionHistorySizeLimit,
int initialAttemptCount) {
this.jobVertex = jobVertex;
this.subTaskIndex = subTaskIndex;
this.executionVertexId = new ExecutionVertexID(jobVertex.getJobVertexId(), subTaskIndex);
this.taskNameWithSubtask =
String.format(
"%s (%d/%d)",
jobVertex.getJobVertex().getName(),
subTaskIndex + 1,
jobVertex.getParallelism());
this.resultPartitions = new LinkedHashMap<>(producedDataSets.length, 1);
for (IntermediateResult result : producedDataSets) {
IntermediateResultPartition irp =
new IntermediateResultPartition(
result,
this,
subTaskIndex,
getExecutionGraphAccessor().getEdgeManager());
result.setPartition(subTaskIndex, irp);
resultPartitions.put(irp.getPartitionId(), irp);
}
this.executionHistory = new ExecutionHistory(executionHistorySizeLimit);
this.nextAttemptNumber = initialAttemptCount;
this.inputBytes = NUM_BYTES_UNKNOWN;
this.timeout = timeout;
this.inputSplits = new ArrayList<>();
this.currentExecution = createNewExecution(createTimestamp);
getExecutionGraphAccessor().registerExecution(currentExecution);
}
// --------------------------------------------------------------------------------------------
// Properties
// --------------------------------------------------------------------------------------------
Execution createNewExecution(final long timestamp) {
return new Execution(
getExecutionGraphAccessor().getFutureExecutor(),
this,
nextAttemptNumber++,
timestamp,
timeout);
}
public ExecutionVertexInputInfo getExecutionVertexInputInfo(IntermediateDataSetID resultId) {
return getExecutionGraphAccessor()
.getJobVertexInputInfo(getJobvertexId(), resultId)
.getExecutionVertexInputInfos()
.get(subTaskIndex);
}
public void setInputBytes(long inputBytes) {
this.inputBytes = inputBytes;
}
public long getInputBytes() {
return inputBytes;
}
public Execution getPartitionProducer() {
return currentExecution;
}
public JobID getJobId() {
return this.jobVertex.getJobId();
}
public ExecutionJobVertex getJobVertex() {
return jobVertex;
}
public JobVertexID getJobvertexId() {
return this.jobVertex.getJobVertexId();
}
public String getTaskName() {
return this.jobVertex.getJobVertex().getName();
}
/**
* Creates a simple name representation in the style 'taskname (x/y)', where 'taskname' is the
* name as returned by {@link #getTaskName()}, 'x' is the parallel subtask index as returned by
* {@link #getParallelSubtaskIndex()}{@code + 1}, and 'y' is the total number of tasks, as
* returned by {@link #getTotalNumberOfParallelSubtasks()}.
*
* @return A simple name representation in the form 'myTask (2/7)'
*/
@Override
public String getTaskNameWithSubtaskIndex() {
return this.taskNameWithSubtask;
}
public int getTotalNumberOfParallelSubtasks() {
return this.jobVertex.getParallelism();
}
public int getMaxParallelism() {
return this.jobVertex.getMaxParallelism();
}
public ResourceProfile getResourceProfile() {
return this.jobVertex.getResourceProfile();
}
@Override
public int getParallelSubtaskIndex() {
return this.subTaskIndex;
}
public ExecutionVertexID getID() {
return executionVertexId;
}
public int getNumberOfInputs() {
return getAllConsumedPartitionGroups().size();
}
public List<ConsumedPartitionGroup> getAllConsumedPartitionGroups() {
return getExecutionGraphAccessor()
.getEdgeManager()
.getConsumedPartitionGroupsForVertex(executionVertexId);
}
public ConsumedPartitionGroup getConsumedPartitionGroup(int input) {
final List<ConsumedPartitionGroup> allConsumedPartitions = getAllConsumedPartitionGroups();
if (input < 0 || input >= allConsumedPartitions.size()) {
throw new IllegalArgumentException(
String.format(
"Input %d is out of range [0..%d)",
input, allConsumedPartitions.size()));
}
return allConsumedPartitions.get(input);
}
public Optional<InputSplit> getNextInputSplit(String host, int attemptNumber) {
final int subtaskIndex = getParallelSubtaskIndex();
final InputSplit nextInputSplit =
jobVertex.getSplitAssigner().getNextInputSplit(host, subtaskIndex);
if (nextInputSplit != null) {
inputSplits.add(nextInputSplit);
}
return Optional.ofNullable(nextInputSplit);
}
@Override
public Execution getCurrentExecutionAttempt() {
return currentExecution;
}
public Collection<Execution> getCurrentExecutions() {
return Collections.singleton(currentExecution);
}
public Execution getCurrentExecution(int attemptNumber) {
checkArgument(attemptNumber == currentExecution.getAttemptNumber());
return currentExecution;
}
@Override
public ExecutionState getExecutionState() {
return getCurrentExecutionAttempt().getState();
}
@Override
public long getStateTimestamp(ExecutionState state) {
return getCurrentExecutionAttempt().getStateTimestamp(state);
}
@Override
public Optional<ErrorInfo> getFailureInfo() {
return getCurrentExecutionAttempt().getFailureInfo();
}
public CompletableFuture<TaskManagerLocation> getCurrentTaskManagerLocationFuture() {
return getCurrentExecutionAttempt().getTaskManagerLocationFuture();
}
public LogicalSlot getCurrentAssignedResource() {
return getCurrentExecutionAttempt().getAssignedResource();
}
@Override
public TaskManagerLocation getCurrentAssignedResourceLocation() {
return getCurrentExecutionAttempt().getAssignedResourceLocation();
}
@Override
public ExecutionHistory getExecutionHistory() {
return executionHistory;
}
void setLatestPriorSlotAllocation(
TaskManagerLocation taskManagerLocation, AllocationID lastAssignedAllocationID) {
this.lastAssignedLocation = Preconditions.checkNotNull(taskManagerLocation);
this.lastAssignedAllocationID = Preconditions.checkNotNull(lastAssignedAllocationID);
}
/**
* Gets the location that an execution of this vertex was assigned to.
*
* @return The last execution location, or null, if there is none, yet.
*/
public Optional<TaskManagerLocation> findLastLocation() {
return Optional.ofNullable(lastAssignedLocation);
}
public Optional<AllocationID> findLastAllocation() {
return Optional.ofNullable(lastAssignedAllocationID);
}
public final InternalExecutionGraphAccessor getExecutionGraphAccessor() {
return this.jobVertex.getGraph();
}
public Map<IntermediateResultPartitionID, IntermediateResultPartition> getProducedPartitions() {
return resultPartitions;
}
CompletableFuture<?> getTerminationFuture() {
return currentExecution.getTerminalStateFuture();
}
// --------------------------------------------------------------------------------------------
// Graph building
// --------------------------------------------------------------------------------------------
public void addConsumedPartitionGroup(ConsumedPartitionGroup consumedPartitions) {
getExecutionGraphAccessor()
.getEdgeManager()
.connectVertexWithConsumedPartitionGroup(executionVertexId, consumedPartitions);
}
/**
* Gets the preferred location to execute the current task execution attempt, based on the state
* that the execution attempt will resume.
*/
public Optional<TaskManagerLocation> getPreferredLocationBasedOnState() {
// only restore to same execution if it has state
if (currentExecution.getTaskRestore() != null
&& currentExecution.getTaskRestore().getTaskStateSnapshot().hasState()) {
return findLastLocation();
}
return Optional.empty();
}
// --------------------------------------------------------------------------------------------
// Actions
// --------------------------------------------------------------------------------------------
/** Archives the current Execution and creates a new Execution for this vertex. */
public void resetForNewExecution() {
resetForNewExecutionInternal(System.currentTimeMillis());
}
private void resetForNewExecutionInternal(final long timestamp) {
final boolean isFinished = (getExecutionState() == FINISHED);
resetExecutionsInternal();
InputSplitAssigner assigner = jobVertex.getSplitAssigner();
if (assigner != null) {
assigner.returnInputSplit(inputSplits, getParallelSubtaskIndex());
inputSplits.clear();
}
// if the execution was 'FINISHED' before, tell the ExecutionGraph that
// we take one step back on the road to reaching global FINISHED
if (isFinished) {
getJobVertex().executionVertexUnFinished();
}
// reset the intermediate results
for (IntermediateResultPartition resultPartition : resultPartitions.values()) {
resultPartition.resetForNewExecution();
}
final Execution newExecution = createNewExecution(timestamp);
currentExecution = newExecution;
// register this execution to the execution graph, to receive call backs
getExecutionGraphAccessor().registerExecution(newExecution);
}
void resetExecutionsInternal() {
resetExecution(currentExecution);
}
void resetExecution(final Execution execution) {
final ExecutionState oldState = execution.getState();
checkState(
oldState.isTerminal(),
"Cannot reset an execution that is in non-terminal state " + oldState);
if (oldState == FINISHED) {
// pipelined partitions are released in Execution#cancel(), covering both job
// failures and vertex resets
// do not release pipelined partitions here to save RPC calls
execution.handlePartitionCleanup(false, true);
getExecutionGraphAccessor()
.getPartitionGroupReleaseStrategy()
.vertexUnfinished(executionVertexId);
}
executionHistory.add(execution.archive());
}
public void tryAssignResource(LogicalSlot slot) {
if (!currentExecution.tryAssignResource(slot)) {
throw new IllegalStateException(
"Could not assign resource "
+ slot
+ " to current execution "
+ currentExecution
+ '.');
}
}
public void deploy() throws JobException {
currentExecution.deploy();
}
@VisibleForTesting
public void deployToSlot(LogicalSlot slot) throws JobException {
if (currentExecution.tryAssignResource(slot)) {
currentExecution.deploy();
} else {
throw new IllegalStateException(
"Could not assign resource "
+ slot
+ " to current execution "
+ currentExecution
+ '.');
}
}
/**
* Cancels this ExecutionVertex.
*
* @return A future that completes once the execution has reached its final state.
*/
public CompletableFuture<?> cancel() {
// to avoid any case of mixup in the presence of concurrent calls,
// we copy a reference to the stack to make sure both calls go to the same Execution
final Execution exec = currentExecution;
exec.cancel();
return exec.getReleaseFuture();
}
public CompletableFuture<?> suspend() {
return currentExecution.suspend();
}
public void fail(Throwable t) {
currentExecution.fail(t);
}
/**
* This method marks the task as failed, but will make no attempt to remove task execution from
* the task manager. It is intended for cases where the task is known not to be deployed yet.
*
* @param t The exception that caused the task to fail.
*/
public void markFailed(Throwable t) {
currentExecution.markFailed(t);
}
void cachePartitionInfo(PartitionInfo partitionInfo) {
getCurrentExecutionAttempt().cachePartitionInfo(partitionInfo);
}
/**
* Mark partition finished if needed.
*
* @return list of finished partitions.
*/
@VisibleForTesting
public List<IntermediateResultPartition> finishPartitionsIfNeeded() {
List<IntermediateResultPartition> finishedPartitions = null;
MarkPartitionFinishedStrategy markPartitionFinishedStrategy =
getExecutionGraphAccessor().getMarkPartitionFinishedStrategy();
for (IntermediateResultPartition partition : resultPartitions.values()) {
if (markPartitionFinishedStrategy.needMarkPartitionFinished(
partition.getResultType())) {
partition.markFinished();
if (finishedPartitions == null) {
finishedPartitions = new LinkedList<>();
}
finishedPartitions.add(partition);
}
}
if (finishedPartitions == null) {
return Collections.emptyList();
} else {
return finishedPartitions;
}
}
// --------------------------------------------------------------------------------------------
// Notifications from the Execution Attempt
// --------------------------------------------------------------------------------------------
void executionFinished(Execution execution) {
getJobVertex().executionVertexFinished();
}
// --------------------------------------------------------------------------------------------
// Miscellaneous
// --------------------------------------------------------------------------------------------
void notifyPendingDeployment(Execution execution) {
// only forward this notification if the execution is still the current execution
// otherwise we have an outdated execution
if (isCurrentExecution(execution)) {
getExecutionGraphAccessor()
.getExecutionDeploymentListener()
.onStartedDeployment(
execution.getAttemptId(),
execution.getAssignedResourceLocation().getResourceID());
}
}
void notifyCompletedDeployment(Execution execution) {
// only forward this notification if the execution is still the current execution
// otherwise we have an outdated execution
if (isCurrentExecution(execution)) {
getExecutionGraphAccessor()
.getExecutionDeploymentListener()
.onCompletedDeployment(execution.getAttemptId());
}
}
/** Simply forward this notification. */
void notifyStateTransition(
Execution execution, ExecutionState previousState, ExecutionState newState) {
// only forward this notification if the execution is still the current execution
// otherwise we have an outdated execution
if (isCurrentExecution(execution)) {
getExecutionGraphAccessor().notifyExecutionChange(execution, previousState, newState);
}
}
private boolean isCurrentExecution(Execution execution) {
return currentExecution == execution;
}
@VisibleForTesting
public CompletableFuture<Void> getTddCreationDuringDeployFuture() {
return currentExecution.getTddCreationDuringDeployFuture();
}
// --------------------------------------------------------------------------------------------
// Utilities
// --------------------------------------------------------------------------------------------
@Override
public String toString() {
return getTaskNameWithSubtaskIndex();
}
@Override
public ArchivedExecutionVertex archive() {
return new ArchivedExecutionVertex(this);
}
}
| ExecutionVertex |
java | apache__kafka | coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorTimer.java | {
"start": 1530,
"end": 3947
} | interface ____<T, U> {
CoordinatorResult<T, U> generateRecords() throws KafkaException;
}
/**
* Add an operation to the timer. If an operation with the same key
* already exists, replace it with the new operation.
*
* @param key The key to identify this operation.
* @param delay The delay to wait before expiring.
* @param unit The delay unit.
* @param retry A boolean indicating whether the operation should
* be retried on failure.
* @param operation The operation to perform upon expiration.
*/
void schedule(String key, long delay, TimeUnit unit, boolean retry, TimeoutOperation<T, U> operation);
/**
* Add an operation to the timer. If an operation with the same key
* already exists, replace it with the new operation.
*
* @param key The key to identify this operation.
* @param delay The delay to wait before expiring.
* @param unit The delay unit.
* @param retry A boolean indicating whether the operation should
* be retried on failure.
* @param retryBackoff The delay when rescheduled on retry. The same unit is used.
* @param operation The operation to perform upon expiration.
*/
void schedule(String key, long delay, TimeUnit unit, boolean retry, long retryBackoff, TimeoutOperation<T, U> operation);
/**
* Add an operation to the timer if there's no operation with the same key.
*
* @param key The key to identify this operation.
* @param delay The delay to wait before expiring.
* @param unit The delay unit.
* @param retry A boolean indicating whether the operation should
* be retried on failure.
* @param operation The operation to perform upon expiration.
*/
void scheduleIfAbsent(String key, long delay, TimeUnit unit, boolean retry, TimeoutOperation<T, U> operation);
/**
* Remove an operation corresponding to a given key.
*
* @param key The key.
*/
void cancel(String key);
/**
* Check if an operation with the given key is scheduled.
*
* @param key The key.
* @return True if an operation with the key is scheduled, false otherwise.
*/
boolean isScheduled(String key);
}
| TimeoutOperation |
java | quarkusio__quarkus | extensions/smallrye-graphql/deployment/src/test/java/io/quarkus/smallrye/graphql/deployment/federation/base/uni/GraphQLFederationBaseUniTest.java | {
"start": 568,
"end": 2962
} | class ____ extends AbstractGraphQLTest {
@RegisterExtension
static QuarkusUnitTest test = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(FooApiUni.class, Foo.class)
.addAsResource(new StringAsset("quarkus.smallrye-graphql.schema-include-directives=true"),
"application.properties")
.addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml"));
@Test
public void checkServiceDeclarationInSchema() {
RestAssured.given()
.get("/graphql/schema.graphql")
.then()
.body(containsString("type _Service {"));
}
@Test
public void checkFederationDirectivesInSchema() {
RestAssured.given()
.get("/graphql/schema.graphql")
.then()
.body(containsString("id: Int! @external"))
.body(containsString("type Foo @extends @key(fields : \"id\")"));
;
}
@Test
public void resolvePerFederation() {
String query = "query federation($representations: [_Any!]!) {\n" +
" _entities(representations: $representations) {\n" +
" ... on Foo {\n" +
" id\n" +
" name\n" +
" }\n" +
" }\n" +
"}";
String variables = "{\n" +
" \"representations\": [\n" +
" {\n" +
" \"__typename\": \"Foo\",\n" +
" \"id\": 1\n" +
" },\n" +
" {\n" +
" \"__typename\": \"Foo\",\n" +
" \"id\": 2\n" +
" }\n" +
" ]\n" +
"}";
String request = getPayload(query, variables);
RestAssured.given().when()
.accept(MEDIATYPE_JSON)
.contentType(MEDIATYPE_JSON)
.body(request)
.post("/graphql")
.then()
.assertThat()
.statusCode(200)
.and()
.body(CoreMatchers.is(
"{\"data\":{\"_entities\":[{\"id\":1,\"name\":\"Name of 1\"},{\"id\":2,\"name\":\"Name of 2\"}]}}"));
}
}
| GraphQLFederationBaseUniTest |
java | quarkusio__quarkus | extensions/vertx-http/runtime/src/main/java/io/quarkus/vertx/http/runtime/attribute/RequestMethodAttribute.java | {
"start": 200,
"end": 1340
} | class ____ implements ExchangeAttribute {
public static final String REQUEST_METHOD_SHORT = "%m";
public static final String REQUEST_METHOD = "%{METHOD}";
public static final String ORIGINAL_REQUEST_METHOD_SHORT = "%<m";
public static final String ORIGINAL_REQUEST_METHOD = "%{<METHOD}";
public static final ExchangeAttribute INSTANCE = new RequestMethodAttribute(false);
public static final ExchangeAttribute INSTANCE_ORIGINAL_REQUEST = new RequestMethodAttribute(true);
private final boolean useOriginalRequest;
private RequestMethodAttribute(boolean useOriginalRequest) {
this.useOriginalRequest = useOriginalRequest;
}
@Override
public String readAttribute(final RoutingContext exchange) {
return useOriginalRequest ? OriginalRequestContext.getMethod(exchange).name() : exchange.request().method().name();
}
@Override
public void writeAttribute(final RoutingContext exchange, final String newValue) throws ReadOnlyAttributeException {
throw new ReadOnlyAttributeException("Request method", newValue);
}
public static final | RequestMethodAttribute |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_3711/SourceTargetMapper.java | {
"start": 274,
"end": 472
} | interface ____ extends BaseMapper<ParentDto, ParentEntity> {
SourceTargetMapper INSTANCE = Mappers.getMapper( SourceTargetMapper.class );
ParentEntity toDTO(ParentDto dto);
}
| SourceTargetMapper |
java | apache__hadoop | hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractGetFileStatusV1List.java | {
"start": 1285,
"end": 1978
} | class ____
extends AbstractContractGetFileStatusTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new AliyunOSSContract(conf);
}
@AfterEach
@Override
public void teardown() throws Exception {
getLogger().info("FS details {}", getFileSystem());
super.teardown();
}
@Override
protected Configuration createConfiguration() {
Configuration conf = super.createConfiguration();
AliyunOSSTestUtils.disableFilesystemCaching(conf);
conf.setInt(Constants.MAX_PAGING_KEYS_KEY, 2);
// Use v1 List Objects API
conf.setInt(Constants.LIST_VERSION, 1);
return conf;
}
}
| TestAliyunOSSContractGetFileStatusV1List |
java | quarkusio__quarkus | independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/InterceptionProxyInfo.java | {
"start": 432,
"end": 1225
} | class ____ known.
*/
InterceptionProxyInfo(DotName bindingsSourceClass) {
this(DotName.OBJECT_NAME, bindingsSourceClass);
}
InterceptionProxyInfo(DotName targetClass, DotName bindingsSourceClass) {
this.targetClass = Objects.requireNonNull(targetClass);
this.bindingsSourceClass = bindingsSourceClass;
}
void init(BeanInfo pseudoBean) {
this.pseudoBean = pseudoBean;
}
DotName getTargetClass() {
return targetClass;
}
DotName getBindingsSourceClass() {
return bindingsSourceClass;
}
/**
* Note that this method only returns non-{@code null} value
* <em>after</em> {@link BeanDeployment#init(Consumer, List)}.
*/
BeanInfo getPseudoBean() {
return pseudoBean;
}
}
| is |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/ast/spi/AliasCollector.java | {
"start": 631,
"end": 1717
} | class ____ extends AbstractSqlAstWalker {
private final Map<String, TableReference> tableReferenceMap = new HashMap<>();
public static Map<String, TableReference> getTableReferences(SqlAstNode node) {
final AliasCollector aliasCollector = new AliasCollector();
node.accept( aliasCollector );
return aliasCollector.tableReferenceMap;
}
@Override
public void visitNamedTableReference(NamedTableReference tableReference) {
tableReferenceMap.put( tableReference.getIdentificationVariable(), tableReference );
}
@Override
public void visitValuesTableReference(ValuesTableReference tableReference) {
tableReferenceMap.put( tableReference.getIdentificationVariable(), tableReference );
}
@Override
public void visitQueryPartTableReference(QueryPartTableReference tableReference) {
tableReferenceMap.put( tableReference.getIdentificationVariable(), tableReference );
}
@Override
public void visitFunctionTableReference(FunctionTableReference tableReference) {
tableReferenceMap.put( tableReference.getIdentificationVariable(), tableReference );
}
}
| AliasCollector |
java | apache__camel | components/camel-test/camel-test-junit5/src/main/java/org/apache/camel/test/junit5/params/ParameterizedExtension.java | {
"start": 1900,
"end": 4538
} | class ____ implements TestTemplateInvocationContextProvider {
@Override
public boolean supportsTestTemplate(ExtensionContext context) {
return context.getTestMethod()
.map(m -> isAnnotated(m, Test.class))
.orElse(false);
}
@Override
public java.util.stream.Stream<TestTemplateInvocationContext> provideTestTemplateInvocationContexts(
ExtensionContext extensionContext) {
Class<?> testClass = extensionContext.getRequiredTestClass();
try {
List<Method> parameters = getParametersMethods(testClass);
if (parameters.size() != 1) {
throw new IllegalStateException(
"Class " + testClass.getName() + " should provide a single method annotated with @"
+ Parameters.class.getSimpleName());
}
Object params = parameters.iterator().next().invoke(null);
return CollectionUtils.toStream(params)
.map(ParameterizedExtension::toArguments)
.map(Arguments::get)
.map(ParameterizedTemplate::new);
} catch (Exception e) {
throw new IllegalStateException("Unable to generate test templates for class " + testClass.getName(), e);
}
}
private List<Method> getParametersMethods(Class<?> testClass) {
List<Method> parameters = java.util.stream.Stream.of(testClass.getDeclaredMethods())
.filter(m -> Modifier.isStatic(m.getModifiers()))
.filter(m -> m.getAnnotation(Parameters.class) != null)
.collect(Collectors.toList());
if (parameters.isEmpty()) {
return getParametersMethods(testClass.getSuperclass());
} else {
return parameters;
}
}
private static Arguments toArguments(Object item) {
// Nothing to do except cast.
if (item instanceof Arguments arguments) {
return arguments;
}
// Pass all multidimensional arrays "as is", in contrast to Object[].
// See https://github.com/junit-team/junit5/issues/1665
if (ReflectionUtils.isMultidimensionalArray(item)) {
return arguments(item);
}
// Special treatment for one-dimensional reference arrays.
// See https://github.com/junit-team/junit5/issues/1665
if (item instanceof Object[]) {
return arguments((Object[]) item);
}
// Pass everything else "as is".
return arguments(item);
}
public static | ParameterizedExtension |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContainsGeoSourceAndSourceEvaluator.java | {
"start": 3841,
"end": 4682
} | class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory left;
private final EvalOperator.ExpressionEvaluator.Factory right;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory left,
EvalOperator.ExpressionEvaluator.Factory right) {
this.source = source;
this.left = left;
this.right = right;
}
@Override
public SpatialContainsGeoSourceAndSourceEvaluator get(DriverContext context) {
return new SpatialContainsGeoSourceAndSourceEvaluator(source, left.get(context), right.get(context), context);
}
@Override
public String toString() {
return "SpatialContainsGeoSourceAndSourceEvaluator[" + "left=" + left + ", right=" + right + "]";
}
}
}
| Factory |
java | quarkusio__quarkus | integration-tests/spring-boot-properties/src/main/java/io/quarkus/it/spring/boot/ClassPropertiesResource.java | {
"start": 143,
"end": 618
} | class ____ {
@Inject
ClassProperties properties;
@Path("/value")
@GET
public String getValue() {
return properties.getValue();
}
@Path("/anotherClass/value")
@GET
public boolean isAnotherClassValue() {
return properties.getAnotherClass().isValue();
}
@Path("/interface")
@GET
public boolean getUrlFromClassWithInterface() {
return properties.getAnInterface() == null;
}
}
| ClassPropertiesResource |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/rest/util/DocumentingDispatcherRestEndpoint.java | {
"start": 2174,
"end": 2289
} | class ____ extract the {@link RuntimeMessageHeaders} that the {@link
* DispatcherRestEndpoint} supports.
*/
public | to |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/method/configuration/GlobalMethodSecurityConfigurationTests.java | {
"start": 14705,
"end": 14847
} | class ____ {
@Bean
MethodSecurityService service() {
return new MethodSecurityServiceImpl();
}
}
@Configuration
static | ParentConfig |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/streaming/api/datastream/StatefulDataStreamV2ITCase.java | {
"start": 5400,
"end": 5987
} | class ____ implements AggregateFunction<Long, Long, Long> {
@Override
public Long createAccumulator() {
return 0L;
}
@Override
public Long add(Long value, Long accumulator) {
return value + accumulator;
}
@Override
public Long getResult(Long accumulator) {
return accumulator;
}
@Override
public Long merge(Long a, Long b) {
return a + b;
}
}
/** {@link ReduceFunction} that sums values. */
private static | MockAggregateSumFunction |
java | google__dagger | javatests/artifacts/dagger-ksp/transitive-annotation-app/library1/src/main/java/library1/Foo.java | {
"start": 766,
"end": 1199
} | class ____ to test that Dagger won't fail when non-dagger related annotations cannot be
* resolved.
*
* <p>During the compilation of {@code :app}, {@link MyTransitiveAnnotation} will no longer be on
* the classpath. In most cases, Dagger shouldn't care that the annotation isn't on the classpath
*/
@Singleton
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public final | used |
java | quarkusio__quarkus | extensions/funqy/funqy-amazon-lambda/maven-archetype/src/main/resources/archetype-resources/src/main/java/TestFunq.java | {
"start": 89,
"end": 262
} | class ____ {
@Inject
ProcessingService service;
@Funq
public OutputObject greeting(InputObject input) {
return service.process(input);
}
}
| TestFunq |
java | google__auto | value/src/test/java/com/google/auto/value/extension/toprettystring/ToPrettyStringValidatorTest.java | {
"start": 1120,
"end": 1447
} | class ____ {
@Test
public void cannotBeStatic() {
JavaFileObject file =
JavaFileObjects.forSourceLines(
"test.Test",
"package test;",
"",
"import com.google.auto.value.extension.toprettystring.ToPrettyString;",
"",
" | ToPrettyStringValidatorTest |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/kstream/TableJoined.java | {
"start": 1438,
"end": 8238
} | class ____<K, KO> implements NamedOperation<TableJoined<K, KO>> {
protected final StreamPartitioner<K, Void> partitioner;
protected final StreamPartitioner<KO, Void> otherPartitioner;
protected final String name;
private TableJoined(final StreamPartitioner<K, Void> partitioner,
final StreamPartitioner<KO, Void> otherPartitioner,
final String name) {
this.partitioner = partitioner;
this.otherPartitioner = otherPartitioner;
this.name = name;
}
protected TableJoined(final TableJoined<K, KO> tableJoined) {
this(tableJoined.partitioner, tableJoined.otherPartitioner, tableJoined.name);
}
/**
* Create an instance of {@code TableJoined} with partitioner and otherPartitioner {@link StreamPartitioner} instances.
* {@code null} values are accepted and will result in the default partitioner being used.
*
* @param partitioner a {@link StreamPartitioner} that captures the partitioning strategy for the left (primary)
* table of the foreign key join. Specifying this option does not repartition or otherwise
* affect the source table; rather, this option informs the foreign key join on how internal
* topics should be partitioned in order to be co-partitioned with the left join table.
* The partitioning strategy must depend only on the message key and not the message value,
* else the source table is not supported with foreign key joins. This option may be left
* {@code null} if the source table uses the default partitioner.
* @param otherPartitioner a {@link StreamPartitioner} that captures the partitioning strategy for the right (foreign
* key) table of the foreign key join. Specifying this option does not repartition or otherwise
* affect the source table; rather, this option informs the foreign key join on how internal
* topics should be partitioned in order to be co-partitioned with the right join table.
* The partitioning strategy must depend only on the message key and not the message value,
* else the source table is not supported with foreign key joins. This option may be left
* {@code null} if the source table uses the default partitioner.
* @param <K> this key type ; key type for the left (primary) table
* @param <KO> other key type ; key type for the right (foreign key) table
* @return new {@code TableJoined} instance with the provided partitioners
*/
public static <K, KO> TableJoined<K, KO> with(final StreamPartitioner<K, Void> partitioner,
final StreamPartitioner<KO, Void> otherPartitioner) {
return new TableJoined<>(partitioner, otherPartitioner, null);
}
/**
* Create an instance of {@code TableJoined} with base name for all components of the join, including internal topics
* created to complete the join.
*
* @param name the name used as the base for naming components of the join including internal topics
* @param <K> this key type ; key type for the left (primary) table
* @param <KO> other key type ; key type for the right (foreign key) table
* @return new {@code TableJoined} instance configured with the {@code name}
*
*/
public static <K, KO> TableJoined<K, KO> as(final String name) {
return new TableJoined<>(null, null, name);
}
/**
* Set the custom {@link StreamPartitioner} to be used as part of computing the join.
* {@code null} values are accepted and will result in the default partitioner being used.
*
* @param partitioner a {@link StreamPartitioner} that captures the partitioning strategy for the left (primary)
* table of the foreign key join. Specifying this option does not repartition or otherwise
* affect the source table; rather, this option informs the foreign key join on how internal
* topics should be partitioned in order to be co-partitioned with the left join table.
* The partitioning strategy must depend only on the message key and not the message value,
* else the source table is not supported with foreign key joins. This option may be left
* {@code null} if the source table uses the default partitioner.
* @return new {@code TableJoined} instance configured with the {@code partitioner}
*/
public TableJoined<K, KO> withPartitioner(final StreamPartitioner<K, Void> partitioner) {
return new TableJoined<>(partitioner, otherPartitioner, name);
}
/**
* Set the custom other {@link StreamPartitioner} to be used as part of computing the join.
* {@code null} values are accepted and will result in the default partitioner being used.
*
* @param otherPartitioner a {@link StreamPartitioner} that captures the partitioning strategy for the right (foreign
* key) table of the foreign key join. Specifying this option does not repartition or otherwise
* affect the source table; rather, this option informs the foreign key join on how internal
* topics should be partitioned in order to be co-partitioned with the right join table.
* The partitioning strategy must depend only on the message key and not the message value,
* else the source table is not supported with foreign key joins. This option may be left
* {@code null} if the source table uses the default partitioner.
* @return new {@code TableJoined} instance configured with the {@code otherPartitioner}
*/
public TableJoined<K, KO> withOtherPartitioner(final StreamPartitioner<KO, Void> otherPartitioner) {
return new TableJoined<>(partitioner, otherPartitioner, name);
}
/**
* Set the base name used for all components of the join, including internal topics
* created to complete the join.
*
* @param name the name used as the base for naming components of the join including internal topics
* @return new {@code TableJoined} instance configured with the {@code name}
*/
@Override
public TableJoined<K, KO> withName(final String name) {
return new TableJoined<>(partitioner, otherPartitioner, name);
}
}
| TableJoined |
java | apache__kafka | coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java | {
"start": 4854,
"end": 5120
} | class ____<S extends CoordinatorShard<U>, U> implements AutoCloseable {
/**
* Builder to create a CoordinatorRuntime.
*
* @param <S> The type of the state machine.
* @param <U> The type of the record.
*/
public static | CoordinatorRuntime |
java | spring-projects__spring-boot | integration-test/spring-boot-integration-tests/src/test/java/org/springframework/boot/web/servlet/support/ErrorPageFilterIntegrationTests.java | {
"start": 4153,
"end": 4581
} | class ____ {
@Bean
ServletWebServerFactory webServerFactory() {
return new TomcatServletWebServerFactory(0);
}
@Bean
ErrorPageFilter errorPageFilter() {
return new ErrorPageFilter();
}
@Bean
DispatcherServlet dispatcherServlet() {
return new DispatcherServlet();
}
@Bean
HelloWorldController helloWorldController() {
return new HelloWorldController();
}
}
@Controller
static | TomcatConfig |
java | quarkusio__quarkus | integration-tests/spring-data-jpa/src/main/java/io/quarkus/it/spring/data/jpa/Cart.java | {
"start": 290,
"end": 1115
} | class ____ extends AbstractEntity {
@ManyToOne(fetch = FetchType.LAZY)
private Customer customer;
@NotNull
@Enumerated(EnumType.STRING)
private CartStatus status;
public Cart() {
}
public Cart(Long id, Customer customer, @NotNull CartStatus status) {
this.id = id;
this.customer = customer;
this.status = status;
}
public Cart(Customer customer, @NotNull CartStatus status) {
this.customer = customer;
this.status = status;
}
public Customer getCustomer() {
return customer;
}
public void setCustomer(Customer customer) {
this.customer = customer;
}
public CartStatus getStatus() {
return status;
}
public void setStatus(CartStatus status) {
this.status = status;
}
}
| Cart |
java | google__guava | android/guava-testlib/src/com/google/common/collect/testing/DerivedComparable.java | {
"start": 939,
"end": 1133
} | class ____ extends BaseComparable {
public DerivedComparable(String s) {
super(s);
}
@GwtIncompatible @J2ktIncompatible private static final long serialVersionUID = 0;
}
| DerivedComparable |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java | {
"start": 1296,
"end": 1372
} | class ____ extends AbstractResponse {
public static | DescribeConfigsResponse |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggRowSet.java | {
"start": 710,
"end": 3026
} | class ____ extends ResultRowSet<BucketExtractor> {
final List<? extends CompositeAggregation.Bucket> buckets;
Map<String, Object> afterKey;
int remainingData;
int size;
int row = 0;
CompositeAggRowSet(
List<BucketExtractor> exts,
BitSet mask,
SearchResponse response,
int sizeRequested,
int remainingLimit,
boolean mightProducePartialPages
) {
super(exts, mask);
CompositeAggregation composite = CompositeAggCursor.getComposite(response);
buckets = composite.getBuckets();
afterKey = composite.afterKey();
// page size
size = remainingLimit == -1 ? buckets.size() : Math.min(buckets.size(), remainingLimit);
boolean hasNextPage = mightProducePartialPages || buckets.size() == sizeRequested;
remainingData = remainingData(hasNextPage, size, remainingLimit);
}
static int remainingData(boolean hasNextPage, int size, int limit) {
if (hasNextPage == false) {
return 0;
} else {
int remainingLimit = (limit == -1) ? limit : ((limit - size) >= 0 ? (limit - size) : 0);
// if the computed limit is zero, or the size is zero it means either there's nothing left or the limit has been reached
// note that a composite agg might be valid but return zero groups (since these can be filtered with HAVING/bucket selector)
// however the Querier takes care of that and keeps making requests until either the query is invalid or at least one response
// is returned.
return size == 0 ? size : remainingLimit;
}
}
@Override
protected Object extractValue(BucketExtractor e) {
return e.extract(buckets.get(row));
}
@Override
protected boolean doHasCurrent() {
return row < size;
}
@Override
protected boolean doNext() {
if (row < size - 1) {
row++;
return true;
}
return false;
}
@Override
protected void doReset() {
row = 0;
}
@Override
public int size() {
return size;
}
int remainingData() {
return remainingData;
}
Map<String, Object> afterKey() {
return afterKey;
}
}
| CompositeAggRowSet |
java | elastic__elasticsearch | x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/QueryableReservedRolesProviderTests.java | {
"start": 582,
"end": 1254
} | class ____ extends ESTestCase {
public void testReservedRoleProvider() {
QueryableReservedRolesProvider provider = new QueryableReservedRolesProvider(new ReservedRolesStore());
assertNotNull(provider.getRoles());
assertThat(provider.getRoles(), equalTo(provider.getRoles()));
assertThat(provider.getRoles().rolesDigest().size(), equalTo(ReservedRolesStore.roleDescriptors().size()));
assertThat(
provider.getRoles().rolesDigest().keySet(),
equalTo(ReservedRolesStore.roleDescriptors().stream().map(RoleDescriptor::getName).collect(Collectors.toSet()))
);
}
}
| QueryableReservedRolesProviderTests |
java | FasterXML__jackson-core | src/main/java/tools/jackson/core/exc/StreamConstraintsException.java | {
"start": 310,
"end": 623
} | class ____
extends JacksonException
{
private final static long serialVersionUID = 2L;
public StreamConstraintsException(String msg) {
super(msg);
}
public StreamConstraintsException(String msg, TokenStreamLocation loc) {
super(msg, loc, null);
}
}
| StreamConstraintsException |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/util/TestMRAsyncDiskService.java | {
"start": 1594,
"end": 11751
} | class ____ {
public static final Logger LOG =
LoggerFactory.getLogger(TestMRAsyncDiskService.class);
private static String TEST_ROOT_DIR = new Path(System.getProperty(
"test.build.data", "/tmp")).toString();
@BeforeEach
public void setUp() {
FileUtil.fullyDelete(new File(TEST_ROOT_DIR));
}
/**
* Given 'pathname', compute an equivalent path relative to the cwd.
* @param pathname the path to a directory.
* @return the path to that same directory, relative to ${user.dir}.
*/
private String relativeToWorking(String pathname) {
String cwd = System.getProperty("user.dir", "/");
// normalize pathname and cwd into full directory paths.
pathname = (new Path(pathname)).toUri().getPath();
cwd = (new Path(cwd)).toUri().getPath();
String [] cwdParts = cwd.split(Path.SEPARATOR);
String [] pathParts = pathname.split(Path.SEPARATOR);
// There are three possible cases:
// 1) pathname and cwd are equal. Return '.'
// 2) pathname is under cwd. Return the components that are under it.
// e.g., cwd = /a/b, path = /a/b/c, return 'c'
// 3) pathname is outside of cwd. Find the common components, if any,
// and subtract them from the returned path, then return enough '..'
// components to "undo" the non-common components of cwd, then all
// the remaining parts of pathname.
// e.g., cwd = /a/b, path = /a/c, return '../c'
if (cwd.equals(pathname)) {
LOG.info("relative to working: " + pathname + " -> .");
return "."; // They match exactly.
}
// Determine how many path components are in common between cwd and path.
int common = 0;
for (int i = 0; i < Math.min(cwdParts.length, pathParts.length); i++) {
if (cwdParts[i].equals(pathParts[i])) {
common++;
} else {
break;
}
}
// output path stringbuilder.
StringBuilder sb = new StringBuilder();
// For everything in cwd that isn't in pathname, add a '..' to undo it.
int parentDirsRequired = cwdParts.length - common;
for (int i = 0; i < parentDirsRequired; i++) {
sb.append("..");
sb.append(Path.SEPARATOR);
}
// Then append all non-common parts of 'pathname' itself.
for (int i = common; i < pathParts.length; i++) {
sb.append(pathParts[i]);
sb.append(Path.SEPARATOR);
}
// Don't end with a '/'.
String s = sb.toString();
if (s.endsWith(Path.SEPARATOR)) {
s = s.substring(0, s.length() - 1);
}
LOG.info("relative to working: " + pathname + " -> " + s);
return s;
}
@Test
/** Test that the relativeToWorking() method above does what we expect. */
public void testRelativeToWorking() {
assertEquals(".", relativeToWorking(System.getProperty("user.dir", ".")));
String cwd = System.getProperty("user.dir", ".");
Path cwdPath = new Path(cwd);
Path subdir = new Path(cwdPath, "foo");
assertEquals("foo", relativeToWorking(subdir.toUri().getPath()));
Path subsubdir = new Path(subdir, "bar");
assertEquals("foo/bar", relativeToWorking(subsubdir.toUri().getPath()));
Path parent = new Path(cwdPath, "..");
assertEquals("..", relativeToWorking(parent.toUri().getPath()));
Path sideways = new Path(parent, "baz");
assertEquals("../baz", relativeToWorking(sideways.toUri().getPath()));
}
@Test
/** Test that volumes specified as relative paths are handled properly
* by MRAsyncDiskService (MAPREDUCE-1887).
*/
public void testVolumeNormalization() throws Throwable {
LOG.info("TEST_ROOT_DIR is " + TEST_ROOT_DIR);
String relativeTestRoot = relativeToWorking(TEST_ROOT_DIR);
FileSystem localFileSystem = FileSystem.getLocal(new Configuration());
String [] vols = new String[] { relativeTestRoot + "/0",
relativeTestRoot + "/1" };
// Put a file in one of the volumes to be cleared on startup.
Path delDir = new Path(vols[0], MRAsyncDiskService.TOBEDELETED);
localFileSystem.mkdirs(delDir);
localFileSystem.create(new Path(delDir, "foo")).close();
MRAsyncDiskService service = new MRAsyncDiskService(
localFileSystem, vols);
makeSureCleanedUp(vols, service);
}
/**
* This test creates some directories and then removes them through
* MRAsyncDiskService.
*/
@Test
public void testMRAsyncDiskService() throws Throwable {
FileSystem localFileSystem = FileSystem.getLocal(new Configuration());
String[] vols = new String[]{TEST_ROOT_DIR + "/0",
TEST_ROOT_DIR + "/1"};
MRAsyncDiskService service = new MRAsyncDiskService(
localFileSystem, vols);
String a = "a";
String b = "b";
String c = "b/c";
String d = "d";
File fa = new File(vols[0], a);
File fb = new File(vols[1], b);
File fc = new File(vols[1], c);
File fd = new File(vols[1], d);
// Create the directories
fa.mkdirs();
fb.mkdirs();
fc.mkdirs();
fd.mkdirs();
assertTrue(fa.exists());
assertTrue(fb.exists());
assertTrue(fc.exists());
assertTrue(fd.exists());
// Move and delete them
service.moveAndDeleteRelativePath(vols[0], a);
assertFalse(fa.exists());
service.moveAndDeleteRelativePath(vols[1], b);
assertFalse(fb.exists());
assertFalse(fc.exists());
assertFalse(service.moveAndDeleteRelativePath(vols[1], "not_exists"));
// asyncDiskService is NOT able to delete files outside all volumes.
IOException ee = null;
try {
service.moveAndDeleteAbsolutePath(TEST_ROOT_DIR + "/2");
} catch (IOException e) {
ee = e;
}
assertNotNull(ee, "asyncDiskService should not be able to delete files "
+ "outside all volumes");
// asyncDiskService is able to automatically find the file in one
// of the volumes.
assertTrue(service.moveAndDeleteAbsolutePath(vols[1] + Path.SEPARATOR_CHAR + d));
// Make sure everything is cleaned up
makeSureCleanedUp(vols, service);
}
/**
* This test creates some directories inside the volume roots, and then
* call asyncDiskService.MoveAndDeleteAllVolumes.
* We should be able to delete all files/dirs inside the volumes except
* the toBeDeleted directory.
*/
@Test
public void testMRAsyncDiskServiceMoveAndDeleteAllVolumes() throws Throwable {
FileSystem localFileSystem = FileSystem.getLocal(new Configuration());
String[] vols = new String[]{TEST_ROOT_DIR + "/0",
TEST_ROOT_DIR + "/1"};
MRAsyncDiskService service = new MRAsyncDiskService(
localFileSystem, vols);
String a = "a";
String b = "b";
String c = "b/c";
String d = "d";
File fa = new File(vols[0], a);
File fb = new File(vols[1], b);
File fc = new File(vols[1], c);
File fd = new File(vols[1], d);
// Create the directories
fa.mkdirs();
fb.mkdirs();
fc.mkdirs();
fd.mkdirs();
assertTrue(fa.exists());
assertTrue(fb.exists());
assertTrue(fc.exists());
assertTrue(fd.exists());
// Delete all of them
service.cleanupAllVolumes();
assertFalse(fa.exists());
assertFalse(fb.exists());
assertFalse(fc.exists());
assertFalse(fd.exists());
// Make sure everything is cleaned up
makeSureCleanedUp(vols, service);
}
/**
* This test creates some directories inside the toBeDeleted directory and
* then start the asyncDiskService.
* AsyncDiskService will create tasks to delete the content inside the
* toBeDeleted directories.
*/
@Test
public void testMRAsyncDiskServiceStartupCleaning() throws Throwable {
FileSystem localFileSystem = FileSystem.getLocal(new Configuration());
String[] vols = new String[]{TEST_ROOT_DIR + "/0",
TEST_ROOT_DIR + "/1"};
String a = "a";
String b = "b";
String c = "b/c";
String d = "d";
// Create directories inside SUBDIR
String suffix = Path.SEPARATOR_CHAR + MRAsyncDiskService.TOBEDELETED;
File fa = new File(vols[0] + suffix, a);
File fb = new File(vols[1] + suffix, b);
File fc = new File(vols[1] + suffix, c);
File fd = new File(vols[1] + suffix, d);
// Create the directories
fa.mkdirs();
fb.mkdirs();
fc.mkdirs();
fd.mkdirs();
assertTrue(fa.exists());
assertTrue(fb.exists());
assertTrue(fc.exists());
assertTrue(fd.exists());
// Create the asyncDiskService which will delete all contents inside SUBDIR
MRAsyncDiskService service = new MRAsyncDiskService(
localFileSystem, vols);
// Make sure everything is cleaned up
makeSureCleanedUp(vols, service);
}
private void makeSureCleanedUp(String[] vols, MRAsyncDiskService service)
throws Throwable {
// Sleep at most 5 seconds to make sure the deleted items are all gone.
service.shutdown();
if (!service.awaitTermination(5000)) {
fail("MRAsyncDiskService is still not shutdown in 5 seconds!");
}
// All contents should be gone by now.
for (int i = 0; i < vols.length; i++) {
File subDir = new File(vols[0]);
String[] subDirContent = subDir.list();
assertEquals(1, subDirContent.length, "Volume should contain a single child: "
+ MRAsyncDiskService.TOBEDELETED);
File toBeDeletedDir = new File(vols[0], MRAsyncDiskService.TOBEDELETED);
String[] content = toBeDeletedDir.list();
assertNotNull(content, "Cannot find " + toBeDeletedDir);
assertThat(content).withFailMessage(
toBeDeletedDir + " should be empty now.").isEmpty();
}
}
@Test
public void testToleratesSomeUnwritableVolumes() throws Throwable {
FileSystem localFileSystem = FileSystem.getLocal(new Configuration());
String[] vols = new String[]{TEST_ROOT_DIR + "/0",
TEST_ROOT_DIR + "/1"};
assertTrue(new File(vols[0]).mkdirs());
assertEquals(0, FileUtil.chmod(vols[0], "400")); // read only
try {
new MRAsyncDiskService(localFileSystem, vols);
} finally {
FileUtil.chmod(vols[0], "755"); // make writable again
}
}
}
| TestMRAsyncDiskService |
java | apache__flink | flink-kubernetes/src/main/java/org/apache/flink/kubernetes/entrypoint/KubernetesWorkerResourceSpecFactory.java | {
"start": 1408,
"end": 2156
} | class ____ extends WorkerResourceSpecFactory {
public static final KubernetesWorkerResourceSpecFactory INSTANCE =
new KubernetesWorkerResourceSpecFactory();
private KubernetesWorkerResourceSpecFactory() {}
@Override
public WorkerResourceSpec createDefaultWorkerResourceSpec(Configuration configuration) {
return workerResourceSpecFromConfigAndCpu(configuration, getDefaultCpus(configuration));
}
@VisibleForTesting
static CPUResource getDefaultCpus(Configuration configuration) {
double fallback = configuration.get(KubernetesConfigOptions.TASK_MANAGER_CPU);
return TaskExecutorProcessUtils.getCpuCoresWithFallback(configuration, fallback);
}
}
| KubernetesWorkerResourceSpecFactory |
java | quarkusio__quarkus | extensions/websockets-next/runtime/src/main/java/io/quarkus/websockets/next/runtime/telemetry/ForwardingWebSocketEndpoint.java | {
"start": 479,
"end": 3004
} | class ____ implements WebSocketEndpoint {
protected final WebSocketEndpoint delegate;
protected ForwardingWebSocketEndpoint(WebSocketEndpoint delegate) {
this.delegate = delegate;
}
@Override
public InboundProcessingMode inboundProcessingMode() {
return delegate.inboundProcessingMode();
}
@Override
public Future<Void> onOpen() {
return delegate.onOpen();
}
@Override
public ExecutionModel onOpenExecutionModel() {
return delegate.onOpenExecutionModel();
}
@Override
public Future<Void> onTextMessage(Object message) {
return delegate.onTextMessage(message);
}
@Override
public ExecutionModel onTextMessageExecutionModel() {
return delegate.onTextMessageExecutionModel();
}
@Override
public Type consumedTextMultiType() {
return delegate.consumedTextMultiType();
}
@Override
public Object decodeTextMultiItem(Object message) {
return delegate.decodeTextMultiItem(message);
}
@Override
public Future<Void> onBinaryMessage(Object message) {
return delegate.onBinaryMessage(message);
}
@Override
public ExecutionModel onBinaryMessageExecutionModel() {
return delegate.onBinaryMessageExecutionModel();
}
@Override
public Type consumedBinaryMultiType() {
return delegate.consumedBinaryMultiType();
}
@Override
public Object decodeBinaryMultiItem(Object message) {
return delegate.decodeBinaryMultiItem(message);
}
@Override
public Future<Void> onPingMessage(Buffer message) {
return delegate.onPingMessage(message);
}
@Override
public ExecutionModel onPingMessageExecutionModel() {
return delegate.onPingMessageExecutionModel();
}
@Override
public Future<Void> onPongMessage(Buffer message) {
return delegate.onPongMessage(message);
}
@Override
public ExecutionModel onPongMessageExecutionModel() {
return delegate.onPongMessageExecutionModel();
}
@Override
public Future<Void> onClose() {
return delegate.onClose();
}
@Override
public ExecutionModel onCloseExecutionModel() {
return delegate.onCloseExecutionModel();
}
@Override
public Uni<Void> doOnError(Throwable t) {
return delegate.doOnError(t);
}
@Override
public String beanIdentifier() {
return delegate.beanIdentifier();
}
}
| ForwardingWebSocketEndpoint |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/deser/bean/BeanPropertyMap.java | {
"start": 528,
"end": 648
} | class ____ for storing mapping from property name to
* {@link SettableBeanProperty} instances.
*<p>
* Note that this | used |
java | alibaba__nacos | common/src/main/java/com/alibaba/nacos/common/http/client/response/HttpClientResponse.java | {
"start": 1055,
"end": 1183
} | class ____ content no longer be {@link InputStream} anymore, we don't need to close it anymore.
*
* @author mai.jh
*/
public | body |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/RQueueAsync.java | {
"start": 1409,
"end": 3014
} | class ____ the specified element
* prevents it from being added to this queue
* @throws NullPointerException if the specified element is null
*/
RFuture<Boolean> offerAsync(V e);
/**
* Retrieves and removes last available tail element of this queue queue and adds it at the head of <code>queueName</code>.
*
* @param queueName - names of destination queue
* @return the tail of this queue, or {@code null} if the
* specified waiting time elapses before an element is available
*/
RFuture<V> pollLastAndOfferFirstToAsync(String queueName);
/**
* Returns all queue elements at once
*
* @return elements
*/
RFuture<List<V>> readAllAsync();
/**
* Retrieves and removes the head elements of this queue.
* Elements amount limited by <code>limit</code> param.
*
* @return list of head elements
*/
RFuture<List<V>> pollAsync(int limit);
/**
* Adds object event listener
*
* @see org.redisson.api.listener.TrackingListener
* @see org.redisson.api.ExpiredObjectListener
* @see org.redisson.api.DeletedObjectListener
* @see org.redisson.api.listener.ListAddListener
* @see org.redisson.api.listener.ListInsertListener
* @see org.redisson.api.listener.ListSetListener
* @see org.redisson.api.listener.ListRemoveListener
* @see org.redisson.api.listener.ListTrimListener
*
* @param listener - object event listener
* @return listener id
*/
RFuture<Integer> addListenerAsync(ObjectListener listener);
}
| of |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/flush/TestCollectionInitializingDuringFlush.java | {
"start": 2689,
"end": 3182
} | class ____ implements Integrator {
@Override
public void integrate(
Metadata metadata,
BootstrapContext bootstrapContext,
SessionFactoryImplementor sessionFactory) {
integrate( sessionFactory );
}
private void integrate(SessionFactoryImplementor sessionFactory) {
sessionFactory.getEventListenerRegistry()
.getEventListenerGroup( EventType.PRE_UPDATE )
.appendListener( InitializingPreUpdateEventListener.INSTANCE );
}
}
public static | CustomLoadIntegrator |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/fielddata/SortingNumericLongValues.java | {
"start": 804,
"end": 3523
} | class ____ extends SortedNumericLongValues {
private int count;
protected long[] values;
protected int valuesCursor;
private final Sorter sorter;
private final LongConsumer circuitBreakerConsumer;
protected SortingNumericLongValues() {
this(l -> {});
}
protected SortingNumericLongValues(LongConsumer circuitBreakerConsumer) {
values = new long[1];
valuesCursor = 0;
sorter = new InPlaceMergeSorter() {
@Override
protected void swap(int i, int j) {
final long tmp = values[i];
values[i] = values[j];
values[j] = tmp;
}
@Override
protected int compare(int i, int j) {
return Long.compare(values[i], values[j]);
}
};
this.circuitBreakerConsumer = circuitBreakerConsumer;
// account for initial values size of 1
this.circuitBreakerConsumer.accept(Long.BYTES);
}
/**
* Set the {@link #docValueCount()} and ensure that the {@link #values} array can
* store at least that many entries.
*/
protected final void resize(int newSize) {
count = newSize;
valuesCursor = 0;
if (newSize <= getArrayLength()) {
return;
}
// Array is expected to grow so increment the circuit breaker
// to include both the additional bytes used by the grown array
// as well as the overhead of keeping both arrays in memory while
// copying.
long oldValuesSizeInBytes = (long) getArrayLength() * Long.BYTES;
int newValuesLength = ArrayUtil.oversize(newSize, Long.BYTES);
circuitBreakerConsumer.accept((long) newValuesLength * Long.BYTES);
// resize
growExact(newValuesLength);
// account for freeing the old values array
circuitBreakerConsumer.accept(-oldValuesSizeInBytes);
}
/** Grow the array in a method so we can override it during testing */
protected void growExact(int newValuesLength) {
values = ArrayUtil.growExact(values, newValuesLength);
}
/** Get the size of the internal array using a method so we can override it during testing */
protected int getArrayLength() {
return values.length;
}
/**
* Sort values that are stored between offsets <code>0</code> and
* {@link #count} of {@link #values}.
*/
protected final void sort() {
sorter.sort(0, count);
}
@Override
public final int docValueCount() {
return count;
}
@Override
public final long nextValue() {
return values[valuesCursor++];
}
}
| SortingNumericLongValues |
java | apache__kafka | jmh-benchmarks/src/main/java/org/apache/kafka/jmh/acl/StandardAuthorizerUpdateBenchmark.java | {
"start": 2466,
"end": 5018
} | class ____ {
private static final Random RANDOM = new Random(System.currentTimeMillis());
private static final KafkaPrincipal PRINCIPAL = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "test-user");
private final String resourceNamePrefix = "foo-bar35_resource-";
private final Set<Uuid> ids = new HashSet<>();
private final List<StandardAclWithId> aclsToAdd = prepareAcls();
private StandardAuthorizer authorizer;
@Param({"25000", "50000", "75000", "100000"})
private int aclCount;
int index = 0;
@Setup(Level.Iteration)
public void setup() throws Exception {
authorizer = new StandardAuthorizer();
addAcls(aclCount);
}
@TearDown(Level.Iteration)
public void tearDown() throws IOException {
authorizer.close();
}
@Benchmark
public void testAddAcl() {
StandardAclWithId aclWithId = aclsToAdd.get(index++);
authorizer.addAcl(aclWithId.id(), aclWithId.acl());
}
private List<StandardAclWithId> prepareAcls() {
return IntStream.range(0, 10000)
.mapToObj(i -> {
ResourceType resourceType = RANDOM.nextInt(10) > 7 ? ResourceType.GROUP : ResourceType.TOPIC;
String resourceName = resourceNamePrefix + i;
ResourcePattern resourcePattern = new ResourcePattern(resourceType, resourceName, PatternType.LITERAL);
return aclsForResource(resourcePattern);
})
.flatMap(Collection::stream)
.toList();
}
private List<StandardAclWithId> aclsForResource(ResourcePattern pattern) {
return IntStream.range(1, 256)
.mapToObj(i -> {
String p = PRINCIPAL.toString() + RANDOM.nextInt(100);
String h = "127.0.0." + i;
return new StandardAcl(pattern.resourceType(), pattern.name(), pattern.patternType(), p, h, READ, ALLOW);
})
.map(this::withId)
.toList();
}
private StandardAclWithId withId(StandardAcl acl) {
Uuid id = new Uuid(acl.hashCode(), acl.hashCode());
while (ids.contains(id)) {
id = Uuid.randomUuid();
}
ids.add(id);
return new StandardAclWithId(id, acl);
}
private void addAcls(int num) {
IntStream.range(0, num)
.mapToObj(aclsToAdd::get)
.forEach(aclWithId -> {
authorizer.addAcl(aclWithId.id(), aclWithId.acl());
index++;
});
}
}
| StandardAuthorizerUpdateBenchmark |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java | {
"start": 5152,
"end": 9925
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(
TestCapacitySchedulerAutoCreatedQueueBase.class);
public static final int GB = 1024;
public static final ContainerUpdates NULL_UPDATE_REQUESTS =
new ContainerUpdates();
public static final String DEFAULT_PATH = CapacitySchedulerConfiguration.ROOT + ".default";
public static final String A_PATH = CapacitySchedulerConfiguration.ROOT + ".a";
public static final String B_PATH = CapacitySchedulerConfiguration.ROOT + ".b";
public static final String C_PATH = CapacitySchedulerConfiguration.ROOT + ".c";
public static final String D_PATH = CapacitySchedulerConfiguration.ROOT + ".d";
public static final String E_PATH = CapacitySchedulerConfiguration.ROOT + ".e";
public static final QueuePath ROOT = new QueuePath(CapacitySchedulerConfiguration.ROOT);
public static final QueuePath DEFAULT = new QueuePath(DEFAULT_PATH);
public static final QueuePath A = new QueuePath(A_PATH);
public static final QueuePath B = new QueuePath(B_PATH);
public static final QueuePath C = new QueuePath(C_PATH);
public static final QueuePath D = new QueuePath(D_PATH);
public static final QueuePath E = new QueuePath(E_PATH);
public static final String ESUBGROUP1_PATH =
CapacitySchedulerConfiguration.ROOT + ".esubgroup1";
public static final String FGROUP_PATH =
CapacitySchedulerConfiguration.ROOT + ".fgroup";
public static final String A1_PATH = A_PATH + ".a1";
public static final String A2_PATH = A_PATH + ".a2";
public static final String B1_PATH = B_PATH + ".b1";
public static final String B2_PATH = B_PATH + ".b2";
public static final String B3_PATH = B_PATH + ".b3";
public static final String B4_PATH = B_PATH + ".b4subgroup1";
public static final String ESUBGROUP1_A_PATH = ESUBGROUP1_PATH + ".e";
public static final String FGROUP_F_PATH = FGROUP_PATH + ".f";
public static final QueuePath A1 = new QueuePath(A1_PATH);
public static final QueuePath A2 = new QueuePath(A2_PATH);
public static final QueuePath B1 = new QueuePath(B1_PATH);
public static final QueuePath B2 = new QueuePath(B2_PATH);
public static final QueuePath B3 = new QueuePath(B3_PATH);
public static final QueuePath B4 = new QueuePath(B4_PATH);
public static final QueuePath E_GROUP = new QueuePath(ESUBGROUP1_PATH);
public static final QueuePath F_GROUP = new QueuePath(FGROUP_PATH);
public static final QueuePath E_SG = new QueuePath(ESUBGROUP1_A_PATH);
public static final QueuePath F_SG = new QueuePath(FGROUP_F_PATH);
public static final float A_CAPACITY = 20f;
public static final float B_CAPACITY = 20f;
public static final float C_CAPACITY = 20f;
public static final float D_CAPACITY = 20f;
public static final float ESUBGROUP1_CAPACITY = 10f;
public static final float FGROUP_CAPACITY = 10f;
public static final float A1_CAPACITY = 30;
public static final float A2_CAPACITY = 70;
public static final float B1_CAPACITY = 60f;
public static final float B2_CAPACITY = 20f;
public static final float B3_CAPACITY = 10f;
public static final float B4_CAPACITY = 10f;
public static final int NODE_MEMORY = 16;
public static final int NODE1_VCORES = 16;
public static final int NODE2_VCORES = 32;
public static final int NODE3_VCORES = 48;
public static final String TEST_GROUP = "testusergroup";
public static final String TEST_GROUPUSER = "testuser";
public static final String TEST_GROUP1 = "testusergroup1";
public static final String TEST_GROUPUSER1 = "testuser1";
public static final String TEST_GROUP2 = "testusergroup2";
public static final String TEST_GROUPUSER2 = "testuser2";
public static final String USER = "user_";
public static final String USER0 = USER + 0;
public static final String USER1 = USER + 1;
public static final String USER2 = USER + 2;
public static final String USER3 = USER + 3;
public static final String PARENT_QUEUE = "c";
public static final QueuePath PARENT_QUEUE_PATH = new QueuePath(PARENT_QUEUE);
public static final Set<String> accessibleNodeLabelsOnC = new HashSet<>();
public static final String NODEL_LABEL_GPU = "GPU";
public static final String NODEL_LABEL_SSD = "SSD";
public static final float NODE_LABEL_GPU_TEMPLATE_CAPACITY = 30.0f;
public static final float NODEL_LABEL_SSD_TEMPLATE_CAPACITY = 40.0f;
public static final ImmutableSet<String> RESOURCE_TYPES = ImmutableSet.of("memory", "vcores");
protected MockRM mockRM = null;
protected MockNM nm1 = null;
protected MockNM nm2 = null;
protected MockNM nm3 = null;
protected CapacityScheduler cs;
protected SpyDispatcher dispatcher;
private static EventHandler<Event> rmAppEventEventHandler;
public static | TestCapacitySchedulerAutoCreatedQueueBase |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/system/ApplicationTemp.java | {
"start": 1627,
"end": 2279
} | class ____ {
private static final FileAttribute<?>[] NO_FILE_ATTRIBUTES = {};
private static final EnumSet<PosixFilePermission> DIRECTORY_PERMISSIONS = EnumSet.of(PosixFilePermission.OWNER_READ,
PosixFilePermission.OWNER_WRITE, PosixFilePermission.OWNER_EXECUTE);
private final @Nullable Class<?> sourceClass;
private final Lock pathLock = new ReentrantLock();
private volatile @Nullable Path path;
/**
* Create a new {@link ApplicationTemp} instance.
*/
public ApplicationTemp() {
this(null);
}
/**
* Create a new {@link ApplicationTemp} instance for the specified source class.
* @param sourceClass the source | ApplicationTemp |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/embeddables/generics/GenericEmbeddedIdentifierMappedSuperclassTest.java | {
"start": 2157,
"end": 8846
} | class ____ {
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final User user = new User( "user" );
session.persist( user );
final UserAccessReportEntity userAccessReportEntity = new UserAccessReportEntity();
userAccessReportEntity.setId( new EmbeddableKey<>( user, new UserReport( "user_report" ), 1 ) );
session.persist( userAccessReportEntity );
final Group group = new Group( "group" );
session.persist( group );
final GroupAccessReportEntity groupAccessReportEntity = new GroupAccessReportEntity();
groupAccessReportEntity.setId( new EmbeddableKey<>( group, new GroupReport( "group_report" ), 2 ) );
session.persist( groupAccessReportEntity );
} );
}
@AfterAll
public void tearDown(SessionFactoryScope scope) {
scope.inTransaction( session -> {
session.createMutationQuery( "delete from GroupAccessReport" ).executeUpdate();
session.createMutationQuery( "delete from UserAccessReport" ).executeUpdate();
session.createMutationQuery( "delete from GroupEntity" ).executeUpdate();
session.createMutationQuery( "delete from UserEntity" ).executeUpdate();
} );
}
@Test
public void testUserReport(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final UserAccessReportEntity result = session.createQuery(
"select ur from UserAccessReport ur " +
"where ur.id.entity.login = 'user' " +
"and ur.id.embedded.userCode = 'user_report'",
UserAccessReportEntity.class
).getSingleResult();
assertThat( result.getId().getEntity().getLogin() ).isEqualTo( "user" );
assertThat( result.getId().getEmbedded().getUserCode() ).isEqualTo( "user_report" );
} );
}
@Test
public void testUserReportCriteria(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final CriteriaBuilder cb = session.getCriteriaBuilder();
final CriteriaQuery<UserAccessReportEntity> query = cb.createQuery( UserAccessReportEntity.class );
final Root<UserAccessReportEntity> root = query.from( UserAccessReportEntity.class );
final Path<Object> id = root.get( "id" );
assertThat( id.getJavaType() ).isEqualTo( EmbeddableKey.class );
// assert that the generic attributes inside the component's model are reported as Object type
final SqmPathSource<?> modelPathSource = (SqmPathSource<?>) id.getModel();
assertThat( modelPathSource.findSubPathSource( "entity" ).getBindableJavaType() ).isEqualTo( Object.class );
assertThat( modelPathSource.findSubPathSource( "embedded" )
.getBindableJavaType() ).isEqualTo( Object.class );
// the serial property is not generic, so it should have the correct type even in the generic component's model
assertThat( modelPathSource.findSubPathSource( "serial" )
.getBindableJavaType() ).isEqualTo( Integer.class );
// assert that types of the concrete attributes inside the component's resolved model are reported correctly
final SqmPathSource<?> resolvedPathSource = ( (SqmPath<Object>) id ).getResolvedModel();
assertThat( resolvedPathSource.findSubPathSource( "entity" )
.getBindableJavaType() ).isEqualTo( User.class );
assertThat( resolvedPathSource.findSubPathSource( "embedded" )
.getBindableJavaType() ).isEqualTo( UserReport.class );
assertThat( modelPathSource.findSubPathSource( "serial" )
.getBindableJavaType() ).isEqualTo( Integer.class );
// test same query as HQL
query.select( root ).where(
cb.and(
cb.equal( id.get( "entity" ).get( "login" ), "user" ),
cb.equal( id.get( "embedded" ).get( "userCode" ), "user_report" )
)
);
final UserAccessReportEntity result = session.createQuery( query ).getSingleResult();
assertThat( result.getId().getEntity().getLogin() ).isEqualTo( "user" );
assertThat( result.getId().getEmbedded().getUserCode() ).isEqualTo( "user_report" );
} );
}
@Test
public void testGroupReport(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final GroupAccessReportEntity result = session.createQuery(
"select gr from GroupAccessReport gr " +
"where gr.id.entity.name = 'group' " +
"and gr.id.embedded.groupCode = 'group_report'",
GroupAccessReportEntity.class
).getSingleResult();
assertThat( result.getId().getEntity().getName() ).isEqualTo( "group" );
assertThat( result.getId().getEmbedded().getGroupCode() ).isEqualTo( "group_report" );
} );
}
@Test
public void testGroupReportCriteria(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final CriteriaBuilder cb = session.getCriteriaBuilder();
final CriteriaQuery<GroupAccessReportEntity> query = cb.createQuery( GroupAccessReportEntity.class );
final Root<GroupAccessReportEntity> root = query.from( GroupAccessReportEntity.class );
final Path<Object> id = root.get( "id" );
assertThat( id.getJavaType() ).isEqualTo( EmbeddableKey.class );
// assert that the generic attributes inside the component's model are reported as Object type
final SqmPathSource<?> modelPathSource = (SqmPathSource<?>) id.getModel();
assertThat( modelPathSource.findSubPathSource( "entity" ).getBindableJavaType() ).isEqualTo( Object.class );
assertThat( modelPathSource.findSubPathSource( "embedded" )
.getBindableJavaType() ).isEqualTo( Object.class );
// the serial property is not generic, so it should have the correct type even in the generic component's model
assertThat( modelPathSource.findSubPathSource( "serial" )
.getBindableJavaType() ).isEqualTo( Integer.class );
// assert that types of the concrete attributes inside the component's resolved model are reported correctly
final SqmPathSource<?> resolvedPathSource = ( (SqmPath<Object>) id ).getResolvedModel();
assertThat( resolvedPathSource.findSubPathSource( "entity" )
.getBindableJavaType() ).isEqualTo( Group.class );
assertThat( resolvedPathSource.findSubPathSource( "embedded" )
.getBindableJavaType() ).isEqualTo( GroupReport.class );
assertThat( modelPathSource.findSubPathSource( "serial" )
.getBindableJavaType() ).isEqualTo( Integer.class );
// test same query as HQL
query.select( root ).where(
cb.and(
cb.equal( id.get( "entity" ).get( "name" ), "group" ),
cb.equal( id.get( "embedded" ).get( "groupCode" ), "group_report" )
)
);
final GroupAccessReportEntity result = session.createQuery( query ).getSingleResult();
assertThat( result.getId().getEntity().getName() ).isEqualTo( "group" );
assertThat( result.getId().getEmbedded().getGroupCode() ).isEqualTo( "group_report" );
} );
}
public static abstract | GenericEmbeddedIdentifierMappedSuperclassTest |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/embeddings/HuggingFaceEmbeddingsModel.java | {
"start": 1103,
"end": 3223
} | class ____ extends HuggingFaceModel {
public HuggingFaceEmbeddingsModel(
String inferenceEntityId,
TaskType taskType,
String service,
Map<String, Object> serviceSettings,
ChunkingSettings chunkingSettings,
@Nullable Map<String, Object> secrets,
ConfigurationParseContext context
) {
this(
inferenceEntityId,
taskType,
service,
HuggingFaceServiceSettings.fromMap(serviceSettings, context),
chunkingSettings,
DefaultSecretSettings.fromMap(secrets)
);
}
// Should only be used directly for testing
HuggingFaceEmbeddingsModel(
String inferenceEntityId,
TaskType taskType,
String service,
HuggingFaceServiceSettings serviceSettings,
ChunkingSettings chunkingSettings,
@Nullable DefaultSecretSettings secrets
) {
super(
new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings, chunkingSettings),
new ModelSecrets(secrets),
serviceSettings,
secrets
);
}
public HuggingFaceEmbeddingsModel(HuggingFaceEmbeddingsModel model, HuggingFaceServiceSettings serviceSettings) {
this(
model.getInferenceEntityId(),
model.getTaskType(),
model.getConfigurations().getService(),
serviceSettings,
model.getConfigurations().getChunkingSettings(),
model.getSecretSettings()
);
}
@Override
public HuggingFaceServiceSettings getServiceSettings() {
return (HuggingFaceServiceSettings) super.getServiceSettings();
}
@Override
public DefaultSecretSettings getSecretSettings() {
return (DefaultSecretSettings) super.getSecretSettings();
}
@Override
public Integer getTokenLimit() {
return getServiceSettings().maxInputTokens();
}
@Override
public ExecutableAction accept(HuggingFaceActionVisitor creator) {
return creator.create(this);
}
}
| HuggingFaceEmbeddingsModel |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/jackson/Log4jJsonObjectMapper.java | {
"start": 1090,
"end": 1913
} | class ____ extends ObjectMapper {
private static final long serialVersionUID = 1L;
/**
* Create a new instance using the {@link Log4jJsonModule}.
*/
public Log4jJsonObjectMapper() {
this(false, true, false, false);
}
/**
* Create a new instance using the {@link Log4jJsonModule}.
*/
public Log4jJsonObjectMapper(
final boolean encodeThreadContextAsList,
final boolean includeStacktrace,
final boolean stacktraceAsString,
final boolean objectMessageAsJsonObject) {
this.registerModule(new Log4jJsonModule(
encodeThreadContextAsList, includeStacktrace, stacktraceAsString, objectMessageAsJsonObject));
this.setSerializationInclusion(JsonInclude.Include.NON_EMPTY);
}
}
| Log4jJsonObjectMapper |
java | apache__flink | flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/impl/FileSourceSplitReader.java | {
"start": 1813,
"end": 4519
} | class ____<T, SplitT extends FileSourceSplit>
implements SplitReader<RecordAndPosition<T>, SplitT> {
private static final Logger LOG = LoggerFactory.getLogger(FileSourceSplitReader.class);
private final Configuration config;
private final BulkFormat<T, SplitT> readerFactory;
private final Queue<SplitT> splits;
@Nullable private BulkFormat.Reader<T> currentReader;
@Nullable private String currentSplitId;
public FileSourceSplitReader(Configuration config, BulkFormat<T, SplitT> readerFactory) {
this.config = config;
this.readerFactory = readerFactory;
this.splits = new ArrayDeque<>();
}
@Override
public RecordsWithSplitIds<RecordAndPosition<T>> fetch() throws IOException {
checkSplitOrStartNext();
final BulkFormat.RecordIterator<T> nextBatch = currentReader.readBatch();
return nextBatch == null
? finishSplit()
: FileRecords.forRecords(currentSplitId, nextBatch);
}
@Override
public void handleSplitsChanges(final SplitsChange<SplitT> splitChange) {
if (!(splitChange instanceof SplitsAddition)) {
throw new UnsupportedOperationException(
String.format(
"The SplitChange type of %s is not supported.",
splitChange.getClass()));
}
LOG.debug("Handling split change {}", splitChange);
splits.addAll(splitChange.splits());
}
@Override
public void wakeUp() {}
@Override
public void close() throws Exception {
if (currentReader != null) {
currentReader.close();
}
}
private void checkSplitOrStartNext() throws IOException {
if (currentReader != null) {
return;
}
final SplitT nextSplit = splits.poll();
if (nextSplit == null) {
throw new IOException("Cannot fetch from another split - no split remaining");
}
currentSplitId = nextSplit.splitId();
final Optional<CheckpointedPosition> position = nextSplit.getReaderPosition();
currentReader =
position.isPresent()
? readerFactory.restoreReader(config, nextSplit)
: readerFactory.createReader(config, nextSplit);
}
private FileRecords<T> finishSplit() throws IOException {
if (currentReader != null) {
currentReader.close();
currentReader = null;
}
final FileRecords<T> finishRecords = FileRecords.finishedSplit(currentSplitId);
currentSplitId = null;
return finishRecords;
}
}
| FileSourceSplitReader |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/core/commands/transactional/HashTxCommandIntegrationTests.java | {
"start": 333,
"end": 574
} | class ____ extends HashCommandIntegrationTests {
@Inject
HashTxCommandIntegrationTests(StatefulRedisConnection<String, String> connection) {
super(TxSyncInvocationHandler.sync(connection));
}
}
| HashTxCommandIntegrationTests |
java | spring-projects__spring-security | core/src/test/java/org/springframework/security/util/FieldUtilsTests.java | {
"start": 1638,
"end": 1700
} | class ____ {
private String protectedField = "z";
}
}
| Nested |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/lazy/LazySqlResultMappingTest.java | {
"start": 4407,
"end": 4627
} | class ____ {
@Id
private Long id;
@Basic(fetch = FetchType.LAZY)
private Byte principal;
public User() {
}
public User(Long id, Byte principal) {
this.id = id;
this.principal = principal;
}
}
}
| User |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/registry/selector/spi/NamedStrategyContributor.java | {
"start": 416,
"end": 876
} | interface ____ {
/**
* Allows registration of named strategy implementations.
* Called on bootstrap.
*/
void contributeStrategyImplementations(NamedStrategyContributions contributions);
/**
* Allows cleanup of (presumably previously {@linkplain #contributeStrategyImplementations registered}) strategy implementations.
* Called on shutdown.
*/
void clearStrategyImplementations(NamedStrategyContributions contributions);
}
| NamedStrategyContributor |
java | junit-team__junit5 | junit-jupiter-params/src/main/java/org/junit/jupiter/params/EvaluatedArgumentSet.java | {
"start": 1037,
"end": 3594
} | class ____ {
static EvaluatedArgumentSet allOf(Arguments arguments) {
@Nullable
Object[] all = arguments.get();
return create(all, all, arguments);
}
static EvaluatedArgumentSet of(Arguments arguments, IntUnaryOperator consumedLengthComputer) {
@Nullable
Object[] all = arguments.get();
@Nullable
Object[] consumed = dropSurplus(all, consumedLengthComputer.applyAsInt(all.length));
return create(all, consumed, arguments);
}
private static EvaluatedArgumentSet create(@Nullable Object[] all, @Nullable Object[] consumed,
Arguments arguments) {
return new EvaluatedArgumentSet(all, consumed, determineName(arguments));
}
private final @Nullable Object[] all;
private final @Nullable Object[] consumed;
private final Optional<String> name;
private EvaluatedArgumentSet(@Nullable Object[] all, @Nullable Object[] consumed, Optional<String> name) {
this.all = all;
this.consumed = consumed;
this.name = name;
}
int getTotalLength() {
return this.all.length;
}
@Nullable
Object[] getAllPayloads() {
return extractFromNamed(this.all, Named::getPayload);
}
int getConsumedLength() {
return this.consumed.length;
}
@Nullable
Object[] getConsumedArguments() {
return this.consumed;
}
@Nullable
Object[] getConsumedPayloads() {
return extractFromNamed(this.consumed, Named::getPayload);
}
@Nullable
Object getConsumedPayload(int index) {
return extractFromNamed(this.consumed[index], Named::getPayload);
}
Optional<String> getName() {
return this.name;
}
private static @Nullable Object[] dropSurplus(@Nullable Object[] arguments, int newLength) {
Preconditions.condition(newLength <= arguments.length,
() -> "New length %d must be less than or equal to the total length %d".formatted(newLength,
arguments.length));
return arguments.length > newLength ? Arrays.copyOf(arguments, newLength) : arguments;
}
private static Optional<String> determineName(Arguments arguments) {
if (arguments instanceof ArgumentSet set) {
return Optional.of(set.getName());
}
return Optional.empty();
}
private static @Nullable Object[] extractFromNamed(@Nullable Object[] arguments,
Function<Named<?>, @Nullable Object> mapper) {
return Arrays.stream(arguments) //
.map(argument -> extractFromNamed(argument, mapper)) //
.toArray();
}
private static @Nullable Object extractFromNamed(@Nullable Object argument,
Function<Named<?>, @Nullable Object> mapper) {
return argument instanceof Named<?> named ? mapper.apply(named) : argument;
}
}
| EvaluatedArgumentSet |
java | apache__logging-log4j2 | log4j-iostreams/src/main/java/org/apache/logging/log4j/io/LoggerBufferedInputStream.java | {
"start": 1203,
"end": 2837
} | class ____ extends BufferedInputStream {
private static final String FQCN = LoggerBufferedInputStream.class.getName();
private InternalBufferedInputStream stream;
protected LoggerBufferedInputStream(
final InputStream in,
final Charset charset,
final ExtendedLogger logger,
final String fqcn,
final Level level,
final Marker marker) {
super(in);
this.stream = new InternalBufferedInputStream(in, charset, logger, fqcn == null ? FQCN : fqcn, level, marker);
}
protected LoggerBufferedInputStream(
final InputStream in,
final Charset charset,
final int size,
final ExtendedLogger logger,
final String fqcn,
final Level level,
final Marker marker) {
super(in);
this.stream =
new InternalBufferedInputStream(in, charset, size, logger, fqcn == null ? FQCN : fqcn, level, marker);
}
@Override
public void close() throws IOException {
stream.close();
}
@Override
public synchronized int read() throws IOException {
return stream.read();
}
@Override
public int read(final byte[] b) throws IOException {
return stream.read(b);
}
@Override
public synchronized int read(final byte[] b, final int off, final int len) throws IOException {
return stream.read(b, off, len);
}
@Override
public String toString() {
return LoggerBufferedInputStream.class.getSimpleName() + stream.toString();
}
}
| LoggerBufferedInputStream |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/testutil/compilation/annotation/Diagnostic.java | {
"start": 333,
"end": 1815
} | interface ____ {
/**
* The type for which the diagnostic was created.
*
* @return The type for which the diagnostic was created.
*/
Class<?> type() default void.class;
/**
* The expected kind of diagnostic.
*
* @return The expected kind of diagnostic.
*/
Kind kind();
/**
* The expected line number of the diagnostic.
*
* @return The expected line number of the diagnostic.
*/
long line() default -1;
/**
* In case compilers report diagnostics on different lines this can be used as the alternative expected line number
* of the diagnostic.
* <p>
* This should be used as a last resort when the compilers report the diagnostic on a wrong line.
*
* @return The alternative line number of the diagnostic.
*/
long alternativeLine() default -1;
/**
* A message matching the exact expected message of the diagnostic.
*
* @return A message matching the exact expected message of the
* diagnostic.
*/
String message() default "";
/**
* A regular expression matching the expected message of the diagnostic.
* Wild-cards matching any character (".*") will be added to the beginning
* and end of the given expression when applying it.
*
* @return A regular expression matching the expected message of the
* diagnostic.
*/
String messageRegExp() default ".*";
}
| Diagnostic |
java | elastic__elasticsearch | x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CancelReindexDataStreamAction.java | {
"start": 813,
"end": 1169
} | class ____ extends ActionType<AcknowledgedResponse> {
public static final CancelReindexDataStreamAction INSTANCE = new CancelReindexDataStreamAction();
public static final String NAME = "indices:admin/data_stream/reindex_cancel";
public CancelReindexDataStreamAction() {
super(NAME);
}
public static | CancelReindexDataStreamAction |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/discriminator/MultiSingleTableLoadTest.java | {
"start": 4398,
"end": 4730
} | class ____ extends A {
@ManyToOne(optional = true, cascade = CascadeType.ALL, fetch = FetchType.LAZY)
@JoinColumn(name = "x_id")
private Y x;
public B() {
}
public B(long id, Y x) {
super( id );
this.x = x;
}
public Y getX() {
return x;
}
}
@Entity(name = "C")
@DiscriminatorValue("C")
public static | B |
java | elastic__elasticsearch | x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsAction.java | {
"start": 2310,
"end": 2670
} | class ____ extends ActionType<GetGlobalCheckpointsAction.Response> {
public static final GetGlobalCheckpointsAction INSTANCE = new GetGlobalCheckpointsAction();
public static final String NAME = "indices:monitor/fleet/global_checkpoints";
private GetGlobalCheckpointsAction() {
super(NAME);
}
public static | GetGlobalCheckpointsAction |
java | apache__flink | flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/sink/PrintSink.java | {
"start": 1850,
"end": 3776
} | class ____<IN> implements Sink<IN>, SupportsConcurrentExecutionAttempts {
private static final long serialVersionUID = 1L;
private final String sinkIdentifier;
private final boolean isStdErr;
/** Instantiates a print sink function that prints to STDOUT. */
public PrintSink() {
this("");
}
/**
* Instantiates a print sink that prints to STDOUT or STDERR.
*
* @param isStdErr True, if the format should print to standard error instead of standard out.
*/
public PrintSink(final boolean isStdErr) {
this("", isStdErr);
}
/**
* Instantiates a print sink that prints to STDOUT and gives a sink identifier.
*
* @param sinkIdentifier Message that identifies the sink and is prefixed to the output of the
* value
*/
public PrintSink(final String sinkIdentifier) {
this(sinkIdentifier, false);
}
/**
* Instantiates a print sink that prints to STDOUT or STDERR and gives a sink identifier.
*
* @param sinkIdentifier Message that identifies the sink and is prefixed to the output of the
* value
* @param isStdErr True if the sink should print to STDERR instead of STDOUT.
*/
public PrintSink(final String sinkIdentifier, final boolean isStdErr) {
this.sinkIdentifier = sinkIdentifier;
this.isStdErr = isStdErr;
}
@Override
public SinkWriter<IN> createWriter(WriterInitContext context) throws IOException {
final PrintSinkOutputWriter<IN> writer =
new PrintSinkOutputWriter<>(sinkIdentifier, isStdErr);
writer.open(
context.getTaskInfo().getIndexOfThisSubtask(),
context.getTaskInfo().getNumberOfParallelSubtasks());
return writer;
}
@Override
public String toString() {
return "Print to " + (isStdErr ? "System.err" : "System.out");
}
}
| PrintSink |
java | apache__spark | sql/catalyst/src/main/java/org/apache/spark/sql/connector/expressions/aggregate/Sum.java | {
"start": 1176,
"end": 1619
} | class ____ extends ExpressionWithToString implements AggregateFunc {
private final Expression input;
private final boolean isDistinct;
public Sum(Expression column, boolean isDistinct) {
this.input = column;
this.isDistinct = isDistinct;
}
public Expression column() { return input; }
public boolean isDistinct() { return isDistinct; }
@Override
public Expression[] children() { return new Expression[]{ input }; }
}
| Sum |
java | apache__dubbo | dubbo-config/dubbo-config-spring/src/test/java/org/apache/dubbo/config/spring/impl/HelloServiceImpl.java | {
"start": 912,
"end": 1045
} | class ____ implements HelloService {
public String sayHello(String name) {
return "Hello, " + name;
}
}
| HelloServiceImpl |
java | apache__camel | components/camel-jgroups/src/test/java/org/apache/camel/component/jgroups/JGroupsClusterTest.java | {
"start": 1631,
"end": 4410
} | class ____ extends RouteBuilder {
@Override
public void configure() {
from(jgroupsEndpoint).filter(dropNonCoordinatorViews()).process(new Processor() {
@Override
public void process(Exchange exchange) {
String camelContextName = exchange.getContext().getName();
if (!camelContextName.equals(master)) {
master = camelContextName;
nominationCount++;
}
}
});
}
}
@BeforeEach
public void setUp() throws Exception {
firstCamelContext = new DefaultCamelContext();
firstCamelContext.getCamelContextExtension().setName("firstNode");
firstCamelContext.addRoutes(new Builder());
secondCamelContext = new DefaultCamelContext();
secondCamelContext.getCamelContextExtension().setName("secondNode");
secondCamelContext.addRoutes(new Builder());
}
// Tests
@Test
public void shouldElectSecondNode() {
// When
firstCamelContext.start();
String firstMaster = master;
secondCamelContext.start();
firstCamelContext.stop();
String secondMaster = master;
secondCamelContext.stop();
// Then
assertEquals(firstCamelContext.getName(), firstMaster);
assertEquals(secondCamelContext.getName(), secondMaster);
assertEquals(2, nominationCount);
}
@Test
public void shouldKeepMaster() {
// When
firstCamelContext.start();
String firstMaster = master;
secondCamelContext.start();
secondCamelContext.stop();
String secondMaster = master;
firstCamelContext.stop();
// Then
assertEquals(firstCamelContext.getName(), firstMaster);
assertEquals(firstCamelContext.getName(), secondMaster);
assertEquals(1, nominationCount);
}
@Test
public void shouldElectSecondNodeAndReturnToFirst() {
// When
firstCamelContext.start();
String firstMaster = master;
secondCamelContext.start();
firstCamelContext.stop();
String secondMaster = master;
firstCamelContext.start();
String masterAfterRestartOfFirstNode = master;
secondCamelContext.stop();
String finalMaster = master;
firstCamelContext.stop();
// Then
assertEquals(firstCamelContext.getName(), firstMaster);
assertEquals(secondCamelContext.getName(), secondMaster);
assertEquals(secondCamelContext.getName(), masterAfterRestartOfFirstNode);
assertEquals(firstCamelContext.getName(), finalMaster);
assertEquals(3, nominationCount);
}
}
| Builder |
java | square__javapoet | src/main/java/com/squareup/javapoet/CodeBlock.java | {
"start": 6382,
"end": 15600
} | class ____ {
final List<String> formatParts = new ArrayList<>();
final List<Object> args = new ArrayList<>();
private Builder() {
}
public boolean isEmpty() {
return formatParts.isEmpty();
}
/**
* Adds code using named arguments.
*
* <p>Named arguments specify their name after the '$' followed by : and the corresponding type
* character. Argument names consist of characters in {@code a-z, A-Z, 0-9, and _} and must
* start with a lowercase character.
*
* <p>For example, to refer to the type {@link java.lang.Integer} with the argument name {@code
* clazz} use a format string containing {@code $clazz:T} and include the key {@code clazz} with
* value {@code java.lang.Integer.class} in the argument map.
*/
public Builder addNamed(String format, Map<String, ?> arguments) {
int p = 0;
for (String argument : arguments.keySet()) {
checkArgument(LOWERCASE.matcher(argument).matches(),
"argument '%s' must start with a lowercase character", argument);
}
while (p < format.length()) {
int nextP = format.indexOf("$", p);
if (nextP == -1) {
formatParts.add(format.substring(p));
break;
}
if (p != nextP) {
formatParts.add(format.substring(p, nextP));
p = nextP;
}
Matcher matcher = null;
int colon = format.indexOf(':', p);
if (colon != -1) {
int endIndex = Math.min(colon + 2, format.length());
matcher = NAMED_ARGUMENT.matcher(format.substring(p, endIndex));
}
if (matcher != null && matcher.lookingAt()) {
String argumentName = matcher.group("argumentName");
checkArgument(arguments.containsKey(argumentName), "Missing named argument for $%s",
argumentName);
char formatChar = matcher.group("typeChar").charAt(0);
addArgument(format, formatChar, arguments.get(argumentName));
formatParts.add("$" + formatChar);
p += matcher.regionEnd();
} else {
checkArgument(p < format.length() - 1, "dangling $ at end");
checkArgument(isNoArgPlaceholder(format.charAt(p + 1)),
"unknown format $%s at %s in '%s'", format.charAt(p + 1), p + 1, format);
formatParts.add(format.substring(p, p + 2));
p += 2;
}
}
return this;
}
/**
* Add code with positional or relative arguments.
*
* <p>Relative arguments map 1:1 with the placeholders in the format string.
*
* <p>Positional arguments use an index after the placeholder to identify which argument index
* to use. For example, for a literal to reference the 3rd argument: "$3L" (1 based index)
*
* <p>Mixing relative and positional arguments in a call to add is invalid and will result in an
* error.
*/
public Builder add(String format, Object... args) {
boolean hasRelative = false;
boolean hasIndexed = false;
int relativeParameterCount = 0;
int[] indexedParameterCount = new int[args.length];
for (int p = 0; p < format.length(); ) {
if (format.charAt(p) != '$') {
int nextP = format.indexOf('$', p + 1);
if (nextP == -1) nextP = format.length();
formatParts.add(format.substring(p, nextP));
p = nextP;
continue;
}
p++; // '$'.
// Consume zero or more digits, leaving 'c' as the first non-digit char after the '$'.
int indexStart = p;
char c;
do {
checkArgument(p < format.length(), "dangling format characters in '%s'", format);
c = format.charAt(p++);
} while (c >= '0' && c <= '9');
int indexEnd = p - 1;
// If 'c' doesn't take an argument, we're done.
if (isNoArgPlaceholder(c)) {
checkArgument(
indexStart == indexEnd, "$$, $>, $<, $[, $], $W, and $Z may not have an index");
formatParts.add("$" + c);
continue;
}
// Find either the indexed argument, or the relative argument. (0-based).
int index;
if (indexStart < indexEnd) {
index = Integer.parseInt(format.substring(indexStart, indexEnd)) - 1;
hasIndexed = true;
if (args.length > 0) {
indexedParameterCount[index % args.length]++; // modulo is needed, checked below anyway
}
} else {
index = relativeParameterCount;
hasRelative = true;
relativeParameterCount++;
}
checkArgument(index >= 0 && index < args.length,
"index %d for '%s' not in range (received %s arguments)",
index + 1, format.substring(indexStart - 1, indexEnd + 1), args.length);
checkArgument(!hasIndexed || !hasRelative, "cannot mix indexed and positional parameters");
addArgument(format, c, args[index]);
formatParts.add("$" + c);
}
if (hasRelative) {
checkArgument(relativeParameterCount >= args.length,
"unused arguments: expected %s, received %s", relativeParameterCount, args.length);
}
if (hasIndexed) {
List<String> unused = new ArrayList<>();
for (int i = 0; i < args.length; i++) {
if (indexedParameterCount[i] == 0) {
unused.add("$" + (i + 1));
}
}
String s = unused.size() == 1 ? "" : "s";
checkArgument(unused.isEmpty(), "unused argument%s: %s", s, String.join(", ", unused));
}
return this;
}
private boolean isNoArgPlaceholder(char c) {
return c == '$' || c == '>' || c == '<' || c == '[' || c == ']' || c == 'W' || c == 'Z';
}
private void addArgument(String format, char c, Object arg) {
switch (c) {
case 'N':
this.args.add(argToName(arg));
break;
case 'L':
this.args.add(argToLiteral(arg));
break;
case 'S':
this.args.add(argToString(arg));
break;
case 'T':
this.args.add(argToType(arg));
break;
default:
throw new IllegalArgumentException(
String.format("invalid format string: '%s'", format));
}
}
private String argToName(Object o) {
if (o instanceof CharSequence) return o.toString();
if (o instanceof ParameterSpec) return ((ParameterSpec) o).name;
if (o instanceof FieldSpec) return ((FieldSpec) o).name;
if (o instanceof MethodSpec) return ((MethodSpec) o).name;
if (o instanceof TypeSpec) return ((TypeSpec) o).name;
throw new IllegalArgumentException("expected name but was " + o);
}
private Object argToLiteral(Object o) {
return o;
}
private String argToString(Object o) {
return o != null ? String.valueOf(o) : null;
}
private TypeName argToType(Object o) {
if (o instanceof TypeName) return (TypeName) o;
if (o instanceof TypeMirror) return TypeName.get((TypeMirror) o);
if (o instanceof Element) return TypeName.get(((Element) o).asType());
if (o instanceof Type) return TypeName.get((Type) o);
throw new IllegalArgumentException("expected type but was " + o);
}
/**
* @param controlFlow the control flow construct and its code, such as "if (foo == 5)".
* Shouldn't contain braces or newline characters.
*/
public Builder beginControlFlow(String controlFlow, Object... args) {
add(controlFlow + " {\n", args);
indent();
return this;
}
/**
* @param controlFlow the control flow construct and its code, such as "else if (foo == 10)".
* Shouldn't contain braces or newline characters.
*/
public Builder nextControlFlow(String controlFlow, Object... args) {
unindent();
add("} " + controlFlow + " {\n", args);
indent();
return this;
}
public Builder endControlFlow() {
unindent();
add("}\n");
return this;
}
/**
* @param controlFlow the optional control flow construct and its code, such as
* "while(foo == 20)". Only used for "do/while" control flows.
*/
public Builder endControlFlow(String controlFlow, Object... args) {
unindent();
add("} " + controlFlow + ";\n", args);
return this;
}
public Builder addStatement(String format, Object... args) {
add("$[");
add(format, args);
add(";\n$]");
return this;
}
public Builder addStatement(CodeBlock codeBlock) {
return addStatement("$L", codeBlock);
}
public Builder add(CodeBlock codeBlock) {
formatParts.addAll(codeBlock.formatParts);
args.addAll(codeBlock.args);
return this;
}
public Builder indent() {
this.formatParts.add("$>");
return this;
}
public Builder unindent() {
this.formatParts.add("$<");
return this;
}
public Builder clear() {
formatParts.clear();
args.clear();
return this;
}
public CodeBlock build() {
return new CodeBlock(this);
}
}
private static final | Builder |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1300/Issue1310_noasm.java | {
"start": 212,
"end": 579
} | class ____ extends TestCase {
public void test_trim() throws Exception {
Model model = new Model();
model.value = " a ";
assertEquals("{\"value\":\"a\"}", JSON.toJSONString(model));
Model model2 = JSON.parseObject("{\"value\":\" a \"}", Model.class);
assertEquals("a", model2.value);
}
private static | Issue1310_noasm |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.