language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java | {
"start": 917,
"end": 14864
} | class ____ extends ESTestCase {
public void testToXContent() throws IOException {
String connectorId = "test-connector";
String content = XContentHelper.stripWhitespace("""
{
"api_key_id":"test-aki",
"api_key_secret_id":"test-aksi",
"custom_scheduling":{
"schedule-key":{
"configuration_overrides":{
"domain_allowlist":[
"https://example.com"
],
"max_crawl_depth":1,
"seed_urls":[
"https://example.com/blog",
"https://example.com/info"
],
"sitemap_discovery_disabled":true,
"sitemap_urls":[
"https://example.com/sitemap.xml"
]
},
"enabled":true,
"interval":"0 0 12 * * ?",
"last_synced":null,
"name":"My Schedule"
}
},
"configuration":{
"some_field":{
"default_value":null,
"depends_on":[
{
"field":"some_field",
"value":true
}
],
"display":"textbox",
"label":"Very important field",
"options":[],
"order":4,
"required":true,
"sensitive":false,
"tooltip":"Wow, this tooltip is useful.",
"type":"str",
"ui_restrictions":[],
"validations":[
{
"constraint":0,
"type":"greater_than"
}
],
"value":""
},
"field_with_null_tooltip":{
"default_value":null,
"depends_on":[
{
"field":"some_field",
"value":true
}
],
"display":"textbox",
"label":"Very important field",
"options":[],
"order":4,
"required":true,
"sensitive":false,
"tooltip":null,
"type":"str",
"ui_restrictions":[],
"validations":[
{
"constraint":0,
"type":"greater_than"
}
],
"value":""
}
},
"description":"test-connector",
"features":{
"document_level_security":{
"enabled":true
},
"sync_rules":{
"advanced":{
"enabled":false
},
"basic":{
"enabled":true
}
},
"native_connector_api_keys": {
"enabled": true
}
},
"filtering":[
{
"active":{
"advanced_snippet":{
"created_at":"2023-11-09T15:13:08.231Z",
"updated_at":"2023-11-09T15:13:08.231Z",
"value":[
{
"tables": [
"some_table"
],
"query": "SELECT id, st_geohash(coordinates) FROM my_db.some_table;"
}
]
},
"rules":[
{
"created_at":"2023-11-09T15:13:08.231Z",
"field":"_",
"id":"DEFAULT",
"order":0,
"policy":"include",
"rule":"regex",
"updated_at":"2023-11-09T15:13:08.231Z",
"value":".*"
}
],
"validation":{
"errors":[],
"state":"valid"
}
},
"domain":"DEFAULT",
"draft":{
"advanced_snippet":{
"created_at":"2023-11-09T15:13:08.231Z",
"updated_at":"2023-11-09T15:13:08.231Z",
"value":[
{
"tables": [
"some_table"
],
"query": "SELECT id, st_geohash(coordinates) FROM my_db.some_table;"
}
]
},
"rules":[
{
"created_at":"2023-11-09T15:13:08.231Z",
"field":"_",
"id":"DEFAULT",
"order":0,
"policy":"include",
"rule":"regex",
"updated_at":"2023-11-09T15:13:08.231Z",
"value":".*"
}
],
"validation":{
"errors":[],
"state":"valid"
}
}
}
],
"index_name":"search-test",
"is_native":true,
"language":"polish",
"last_access_control_sync_error":"some error",
"last_access_control_sync_scheduled_at":"2023-11-09T15:13:08.231Z",
"last_access_control_sync_status":"pending",
"last_deleted_document_count":42,
"last_incremental_sync_scheduled_at":"2023-11-09T15:13:08.231Z",
"last_indexed_document_count":42,
"last_seen":"2023-11-09T15:13:08.231Z",
"last_sync_error":"some error",
"last_sync_scheduled_at":"2024-11-09T15:13:08.231Z",
"last_sync_status":"completed",
"last_synced":"2024-11-09T15:13:08.231Z",
"name":"test-name",
"pipeline":{
"extract_binary_content":true,
"name":"search-default-ingestion",
"reduce_whitespace":true,
"run_ml_inference":false
},
"scheduling":{
"access_control":{
"enabled":false,
"interval":"0 0 0 * * ?"
},
"full":{
"enabled":false,
"interval":"0 0 0 * * ?"
},
"incremental":{
"enabled":false,
"interval":"0 0 0 * * ?"
}
},
"service_type":"google_drive",
"status":"needs_configuration",
"sync_now":false
}""");
Connector connector = Connector.fromXContentBytes(new BytesArray(content), connectorId, XContentType.JSON);
boolean humanReadable = true;
BytesReference originalBytes = toShuffledXContent(connector, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable);
Connector parsed;
try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) {
parsed = Connector.fromXContent(parser, connectorId);
}
assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON);
}
public void testToContent_WithNullValues() throws IOException {
String connectorId = "test-connector";
String content = XContentHelper.stripWhitespace("""
{
"api_key_id": null,
"api_key_secret_id": null,
"custom_scheduling":{},
"configuration":{},
"description": null,
"features": null,
"filtering":[],
"index_name": "search-test",
"is_native": false,
"language": null,
"last_access_control_sync_error": null,
"last_access_control_sync_scheduled_at": null,
"last_access_control_sync_status": null,
"last_deleted_document_count":null,
"last_incremental_sync_scheduled_at": null,
"last_indexed_document_count":null,
"last_seen": null,
"last_sync_error": null,
"last_sync_scheduled_at": null,
"last_sync_status": null,
"last_synced": null,
"name": null,
"pipeline":{
"extract_binary_content":true,
"name":"search-default-ingestion",
"reduce_whitespace":true,
"run_ml_inference":false
},
"scheduling":{
"access_control":{
"enabled":false,
"interval":"0 0 0 * * ?"
},
"full":{
"enabled":false,
"interval":"0 0 0 * * ?"
},
"incremental":{
"enabled":false,
"interval":"0 0 0 * * ?"
}
},
"service_type": null,
"status": "needs_configuration",
"sync_now":false
}""");
Connector connector = Connector.fromXContentBytes(new BytesArray(content), connectorId, XContentType.JSON);
boolean humanReadable = true;
BytesReference originalBytes = toShuffledXContent(connector, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable);
Connector parsed;
try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) {
parsed = Connector.fromXContent(parser, connectorId);
}
assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON);
}
public void testToXContent_withOptionalFieldsMissing() throws IOException {
// This test is to ensure the doc can serialize without fields that have been added since 8.12.
// This is to avoid breaking serverless, which has a regular BC built
// that can be broken if we haven't made migrations yet.
String connectorId = "test-connector";
// Missing from doc:
// api_key_secret_id
String content = XContentHelper.stripWhitespace("""
{
"api_key_id": null,
"custom_scheduling":{},
"configuration":{},
"description": null,
"features": null,
"filtering":[],
"index_name": "search-test",
"is_native": false,
"language": null,
"last_access_control_sync_error": null,
"last_access_control_sync_scheduled_at": null,
"last_access_control_sync_status": null,
"last_incremental_sync_scheduled_at": null,
"last_seen": null,
"last_sync_error": null,
"last_sync_scheduled_at": null,
"last_sync_status": null,
"last_synced": null,
"name": null,
"pipeline":{
"extract_binary_content":true,
"name":"search-default-ingestion",
"reduce_whitespace":true,
"run_ml_inference":false
},
"scheduling":{
"access_control":{
"enabled":false,
"interval":"0 0 0 * * ?"
},
"full":{
"enabled":false,
"interval":"0 0 0 * * ?"
},
"incremental":{
"enabled":false,
"interval":"0 0 0 * * ?"
}
},
"service_type": null,
"status": "needs_configuration",
"sync_now":false
}""");
Connector connector = Connector.fromXContentBytes(new BytesArray(content), connectorId, XContentType.JSON);
boolean humanReadable = true;
BytesReference originalBytes = toShuffledXContent(connector, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable);
Connector parsed;
try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) {
parsed = Connector.fromXContent(parser, connectorId);
}
assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON);
assertThat(parsed.getApiKeySecretId(), equalTo(null));
}
}
| ConnectorTests |
java | junit-team__junit5 | junit-vintage-engine/src/testFixtures/java/org/junit/vintage/engine/samples/junit4/JUnit4TestCaseWithRunnerWithCustomUniqueIdsAndDisplayNames.java | {
"start": 556,
"end": 700
} | class ____ {
@Test
@Label("(TestMethod)")
public void test() {
Assert.fail();
}
}
| JUnit4TestCaseWithRunnerWithCustomUniqueIdsAndDisplayNames |
java | micronaut-projects__micronaut-core | messaging/src/main/java/io/micronaut/messaging/MessageHeaders.java | {
"start": 788,
"end": 840
} | interface ____ extends MutableHeaders {
}
| MessageHeaders |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java | {
"start": 1837,
"end": 1909
} | class ____ creates the schema and generates test data.
*/
public final | that |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/condition/RequestCondition.java | {
"start": 1299,
"end": 2760
} | interface ____<T> {
/**
* Combine this condition with another such as conditions from a
* type-level and method-level {@code @RequestMapping} annotation.
* @param other the condition to combine with.
* @return a request condition instance that is the result of combining
* the two condition instances.
*/
T combine(T other);
/**
* Check if the condition matches the request returning a potentially new
* instance created for the current request. For example a condition with
* multiple URL patterns may return a new instance only with those patterns
* that match the request.
* <p>For CORS pre-flight requests, conditions should match to the would-be,
* actual request (for example, URL pattern, query parameters, and the HTTP method
* from the "Access-Control-Request-Method" header). If a condition cannot
* be matched to a pre-flight request it should return an instance with
* empty content thus not causing a failure to match.
* @return a condition instance in case of a match or {@code null} otherwise.
*/
@Nullable T getMatchingCondition(HttpServletRequest request);
/**
* Compare this condition to another condition in the context of
* a specific request. This method assumes both instances have
* been obtained via {@link #getMatchingCondition(HttpServletRequest)}
* to ensure they have content relevant to current request only.
*/
int compareTo(T other, HttpServletRequest request);
}
| RequestCondition |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/task/support/ContextPropagatingTaskDecoratorTests.java | {
"start": 1827,
"end": 2117
} | class ____ {
private static final ThreadLocal<String> holder = new ThreadLocal<>();
static void setValue(String value) {
holder.set(value);
}
static String getValue() {
return holder.get();
}
static void reset() {
holder.remove();
}
}
static | TestThreadLocalHolder |
java | apache__camel | core/camel-core-reifier/src/main/java/org/apache/camel/reifier/AbstractReifier.java | {
"start": 1565,
"end": 6636
} | class ____ implements BeanRepository {
protected final org.apache.camel.Route route;
protected final CamelContext camelContext;
public AbstractReifier(Route route) {
this.route = ObjectHelper.notNull(route, "Route");
this.camelContext = route.getCamelContext();
}
public AbstractReifier(CamelContext camelContext) {
this.route = null;
this.camelContext = ObjectHelper.notNull(camelContext, "CamelContext");
}
protected CamelContext getCamelContext() {
return camelContext;
}
protected String parseString(String text) {
return CamelContextHelper.parseText(camelContext, text);
}
protected Boolean parseBoolean(String text) {
return CamelContextHelper.parseBoolean(camelContext, text);
}
protected boolean parseBoolean(String text, boolean def) {
Boolean b = parseBoolean(text);
return b != null ? b : def;
}
protected Long parseLong(String text) {
return CamelContextHelper.parseLong(camelContext, text);
}
protected long parseLong(String text, long def) {
Long l = parseLong(text);
return l != null ? l : def;
}
protected Long parseDuration(String text) {
Duration d = CamelContextHelper.parseDuration(camelContext, text);
return d != null ? d.toMillis() : null;
}
protected long parseDuration(String text, long def) {
Duration d = CamelContextHelper.parseDuration(camelContext, text);
return d != null ? d.toMillis() : def;
}
protected Integer parseInt(String text) {
return CamelContextHelper.parseInteger(camelContext, text);
}
protected int parseInt(String text, int def) {
Integer i = parseInt(text);
return i != null ? i : def;
}
protected Float parseFloat(String text) {
return CamelContextHelper.parseFloat(camelContext, text);
}
protected float parseFloat(String text, float def) {
Float f = parseFloat(text);
return f != null ? f : def;
}
protected <T> T parse(Class<T> clazz, String text) {
return CamelContextHelper.parse(camelContext, clazz, text);
}
protected <T> T parse(Class<T> clazz, Object text) {
if (text instanceof String string) {
text = parseString(string);
}
return CamelContextHelper.convertTo(camelContext, clazz, text);
}
protected Expression createExpression(ExpressionDefinition expression) {
return ExpressionReifier.reifier(camelContext, expression).createExpression();
}
protected Expression createExpression(ExpressionSubElementDefinition expression) {
return ExpressionReifier.reifier(camelContext, expression).createExpression();
}
protected Predicate createPredicate(ExpressionDefinition expression) {
return ExpressionReifier.reifier(camelContext, expression).createPredicate();
}
protected Predicate createPredicate(ExpressionSubElementDefinition expression) {
return ExpressionReifier.reifier(camelContext, expression).createPredicate();
}
protected Object or(Object a, Object b) {
return a != null ? a : b;
}
protected Object asRef(String s) {
return s != null ? s.startsWith("#") ? s : "#" + s : null;
}
protected BeanRepository getRegistry() {
return camelContext.getRegistry();
}
public <T> T mandatoryLookup(String name, Class<T> type) {
name = parseString(name);
Object obj = lookupByNameAndType(name, type);
if (obj == null) {
throw new NoSuchBeanException(name, type.getName());
}
return type.cast(obj);
}
@Override
public Object lookupByName(String name) {
if (name == null) {
return null;
}
name = parseString(name);
if (EndpointHelper.isReferenceParameter(name)) {
return EndpointHelper.resolveReferenceParameter(camelContext, name, Object.class, false);
} else {
return getRegistry().lookupByName(name);
}
}
public <T> T lookupByNameAndType(String name, Class<T> type) {
if (name == null) {
return null;
}
name = parseString(name);
if (EndpointHelper.isReferenceParameter(name)) {
return EndpointHelper.resolveReferenceParameter(camelContext, name, type, false);
} else {
return getRegistry().lookupByNameAndType(name, type);
}
}
@Override
public <T> Map<String, T> findByTypeWithName(Class<T> type) {
return getRegistry().findByTypeWithName(type);
}
@Override
public <T> Set<T> findByType(Class<T> type) {
return getRegistry().findByType(type);
}
@Override
public Object unwrap(Object value) {
return getRegistry().unwrap(value);
}
public Endpoint resolveEndpoint(String uri) throws NoSuchEndpointException {
return CamelContextHelper.getMandatoryEndpoint(camelContext, uri);
}
}
| AbstractReifier |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-jaxb/deployment/src/test/java/io/quarkus/resteasy/reactive/jaxb/deployment/test/MultipartTest.java | {
"start": 5660,
"end": 5837
} | class ____ {
@RestForm
String name;
@RestForm
@PartType(MediaType.TEXT_XML)
Person person;
}
public static | MultipartOutputResponse |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/FluxTimeout.java | {
"start": 9559,
"end": 9630
} | interface ____ {
long index();
void cancel();
}
| IndexedCancellable |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/util/xml/StaxResult.java | {
"start": 1962,
"end": 4190
} | class ____ extends SAXResult {
private @Nullable XMLEventWriter eventWriter;
private @Nullable XMLStreamWriter streamWriter;
/**
* Construct a new instance of the {@code StaxResult} with the specified {@code XMLEventWriter}.
* @param eventWriter the {@code XMLEventWriter} to write to
*/
public StaxResult(XMLEventWriter eventWriter) {
StaxEventHandler handler = new StaxEventHandler(eventWriter);
super.setHandler(handler);
super.setLexicalHandler(handler);
this.eventWriter = eventWriter;
}
/**
* Construct a new instance of the {@code StaxResult} with the specified {@code XMLStreamWriter}.
* @param streamWriter the {@code XMLStreamWriter} to write to
*/
public StaxResult(XMLStreamWriter streamWriter) {
StaxStreamHandler handler = new StaxStreamHandler(streamWriter);
super.setHandler(handler);
super.setLexicalHandler(handler);
this.streamWriter = streamWriter;
}
/**
* Return the {@code XMLEventWriter} used by this {@code StaxResult}.
* <p>If this {@code StaxResult} was created with an {@code XMLStreamWriter},
* the result will be {@code null}.
* @return the StAX event writer used by this result
* @see #StaxResult(javax.xml.stream.XMLEventWriter)
*/
public @Nullable XMLEventWriter getXMLEventWriter() {
return this.eventWriter;
}
/**
* Return the {@code XMLStreamWriter} used by this {@code StaxResult}.
* <p>If this {@code StaxResult} was created with an {@code XMLEventConsumer},
* the result will be {@code null}.
* @return the StAX stream writer used by this result
* @see #StaxResult(javax.xml.stream.XMLStreamWriter)
*/
public @Nullable XMLStreamWriter getXMLStreamWriter() {
return this.streamWriter;
}
/**
* Throws an {@code UnsupportedOperationException}.
* @throws UnsupportedOperationException always
*/
@Override
public void setHandler(ContentHandler handler) {
throw new UnsupportedOperationException("setHandler is not supported");
}
/**
* Throws an {@code UnsupportedOperationException}.
* @throws UnsupportedOperationException always
*/
@Override
public void setLexicalHandler(LexicalHandler handler) {
throw new UnsupportedOperationException("setLexicalHandler is not supported");
}
}
| StaxResult |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/xpack/compute/operator/lookup/EnrichResultBuilderForBoolean.java | {
"start": 822,
"end": 898
} | class ____ generated. Edit `X-EnrichResultBuilder.java.st` instead.
*/
final | is |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/VertexFinishedStateCheckerTest.java | {
"start": 2118,
"end": 14808
} | class ____ {
@RegisterExtension
private static final TestExecutorExtension<ScheduledExecutorService> EXECUTOR_EXTENSION =
TestingUtils.defaultExecutorExtension();
@Test
void testRestoringPartiallyFinishedChainsFailsWithoutUidHash() throws Exception {
// If useUidHash is set to false, the operator states would still be keyed with the
// generated ID, which simulates the case of restoring a checkpoint taken after jobs
// started. The checker should still be able to access the stored state correctly, otherwise
// it would mark op1 as running and pass the check wrongly.
testRestoringPartiallyFinishedChainsFails(false);
}
@Test
void testRestoringPartiallyFinishedChainsFailsWithUidHash() throws Exception {
testRestoringPartiallyFinishedChainsFails(true);
}
private void testRestoringPartiallyFinishedChainsFails(boolean useUidHash) throws Exception {
final JobVertexID jobVertexID1 = new JobVertexID();
final JobVertexID jobVertexID2 = new JobVertexID();
// The op1 has uidHash set.
OperatorIDPair op1 =
OperatorIDPair.of(
new OperatorID(), new OperatorID(), "operatorName", "operatorUid");
OperatorIDPair op2 = OperatorIDPair.generatedIDOnly(new OperatorID());
OperatorIDPair op3 = OperatorIDPair.generatedIDOnly(new OperatorID());
final ExecutionGraph graph =
new CheckpointCoordinatorTestingUtils.CheckpointExecutionGraphBuilder()
.addJobVertex(jobVertexID2, 1, 1, singletonList(op3), true)
.addJobVertex(jobVertexID1, 1, 1, Arrays.asList(op1, op2), true)
.build(EXECUTOR_EXTENSION.getExecutor());
Map<OperatorID, OperatorState> operatorStates = new HashMap<>();
operatorStates.put(
useUidHash ? op1.getUserDefinedOperatorID().get() : op1.getGeneratedOperatorID(),
new FullyFinishedOperatorState(null, null, op1.getGeneratedOperatorID(), 1, 1));
operatorStates.put(
op2.getGeneratedOperatorID(),
new OperatorState(null, null, op2.getGeneratedOperatorID(), 1, 1));
Set<ExecutionJobVertex> vertices = new HashSet<>();
vertices.add(graph.getJobVertex(jobVertexID1));
VertexFinishedStateChecker finishedStateChecker =
new VertexFinishedStateChecker(vertices, operatorStates);
assertThatThrownBy(finishedStateChecker::validateOperatorsFinishedState)
.hasMessage(
"Can not restore vertex "
+ "anon("
+ jobVertexID1
+ ")"
+ " which contain mixed operator finished state: [ALL_RUNNING, FULLY_FINISHED]")
.isInstanceOf(FlinkRuntimeException.class);
}
@Test
void testAddingRunningOperatorBeforeFinishedOneFails() throws Exception {
JobVertexID jobVertexID2 = new JobVertexID();
testAddingOperatorsBeforePartiallyOrFullyFinishedOne(
new JobVertexID(),
"vert1",
VertexFinishedStateChecker.VertexFinishedState.ALL_RUNNING,
jobVertexID2,
"vert2",
VertexFinishedStateChecker.VertexFinishedState.FULLY_FINISHED,
new DistributionPattern[] {DistributionPattern.ALL_TO_ALL},
FlinkRuntimeException.class,
"Illegal JobGraph modification. Cannot run a program with fully finished vertices"
+ " predeceased with the ones not fully finished. Task vertex vert2"
+ "("
+ jobVertexID2
+ ")"
+ " has a predecessor not fully finished");
}
@Test
void testAddingPartiallyFinishedOperatorBeforeFinishedOneFails() throws Exception {
JobVertexID jobVertexID2 = new JobVertexID();
testAddingOperatorsBeforePartiallyOrFullyFinishedOne(
new JobVertexID(),
"vert1",
VertexFinishedStateChecker.VertexFinishedState.PARTIALLY_FINISHED,
jobVertexID2,
"vert2",
VertexFinishedStateChecker.VertexFinishedState.FULLY_FINISHED,
new DistributionPattern[] {DistributionPattern.ALL_TO_ALL},
FlinkRuntimeException.class,
"Illegal JobGraph modification. Cannot run a program with fully finished vertices"
+ " predeceased with the ones not fully finished. Task vertex vert2"
+ "("
+ jobVertexID2
+ ")"
+ " has a predecessor not fully finished");
}
@Test
void testAddingAllRunningOperatorBeforePartiallyFinishedOneWithAllToAllFails()
throws Exception {
JobVertexID jobVertexID2 = new JobVertexID();
testAddingOperatorsBeforePartiallyOrFullyFinishedOne(
new JobVertexID(),
"vert1",
VertexFinishedStateChecker.VertexFinishedState.ALL_RUNNING,
jobVertexID2,
"vert2",
VertexFinishedStateChecker.VertexFinishedState.PARTIALLY_FINISHED,
new DistributionPattern[] {DistributionPattern.ALL_TO_ALL},
FlinkRuntimeException.class,
"Illegal JobGraph modification. Cannot run a program with partially finished vertices"
+ " predeceased with running or partially finished ones and connected via the ALL_TO_ALL edges. "
+ "Task vertex vert2"
+ "("
+ jobVertexID2
+ ")"
+ " has a all running predecessor");
}
@Test
void testAddingPartiallyFinishedOperatorBeforePartiallyFinishedOneWithAllToAllFails()
throws Exception {
JobVertexID jobVertexID2 = new JobVertexID();
testAddingOperatorsBeforePartiallyOrFullyFinishedOne(
new JobVertexID(),
"vert1",
VertexFinishedStateChecker.VertexFinishedState.PARTIALLY_FINISHED,
jobVertexID2,
"vert2",
VertexFinishedStateChecker.VertexFinishedState.PARTIALLY_FINISHED,
new DistributionPattern[] {DistributionPattern.ALL_TO_ALL},
FlinkRuntimeException.class,
"Illegal JobGraph modification. Cannot run a program with partially finished vertices"
+ " predeceased with running or partially finished ones and connected via the ALL_TO_ALL edges. "
+ "Task vertex vert2"
+ "("
+ jobVertexID2
+ ")"
+ " has a partially finished predecessor");
}
@Test
void
testAddingPartiallyFinishedOperatorBeforePartiallyFinishedOneWithPointwiseAndAllToAllFails()
throws Exception {
JobVertexID jobVertexID2 = new JobVertexID();
testAddingOperatorsBeforePartiallyOrFullyFinishedOne(
new JobVertexID(),
"vert1",
VertexFinishedStateChecker.VertexFinishedState.PARTIALLY_FINISHED,
jobVertexID2,
"vert2",
VertexFinishedStateChecker.VertexFinishedState.PARTIALLY_FINISHED,
new DistributionPattern[] {
DistributionPattern.POINTWISE, DistributionPattern.ALL_TO_ALL
},
FlinkRuntimeException.class,
"Illegal JobGraph modification. Cannot run a program with partially finished vertices"
+ " predeceased with running or partially finished ones and connected via the ALL_TO_ALL edges. "
+ "Task vertex vert2"
+ "("
+ jobVertexID2
+ ")"
+ " has a partially finished predecessor");
}
@Test
void testAddingAllRunningOperatorBeforePartiallyFinishedOneFails() throws Exception {
JobVertexID jobVertexID2 = new JobVertexID();
testAddingOperatorsBeforePartiallyOrFullyFinishedOne(
new JobVertexID(),
"vert1",
VertexFinishedStateChecker.VertexFinishedState.ALL_RUNNING,
jobVertexID2,
"vert2",
VertexFinishedStateChecker.VertexFinishedState.PARTIALLY_FINISHED,
new DistributionPattern[] {DistributionPattern.POINTWISE},
FlinkRuntimeException.class,
"Illegal JobGraph modification. Cannot run a program with partially finished vertices"
+ " predeceased with all running ones. "
+ "Task vertex vert2"
+ "("
+ jobVertexID2
+ ")"
+ " has a all running predecessor");
}
private void testAddingOperatorsBeforePartiallyOrFullyFinishedOne(
JobVertexID firstVertexId,
String firstVertexName,
VertexFinishedStateChecker.VertexFinishedState firstOperatorFinishedState,
JobVertexID secondVertexId,
String secondVertexName,
VertexFinishedStateChecker.VertexFinishedState secondOperatorFinishedState,
DistributionPattern[] distributionPatterns,
Class<? extends Throwable> expectedExceptionalClass,
String expectedMessage)
throws Exception {
OperatorIDPair op1 = OperatorIDPair.generatedIDOnly(new OperatorID());
OperatorIDPair op2 = OperatorIDPair.generatedIDOnly(new OperatorID());
JobVertex vertex1 = new JobVertex(firstVertexName, firstVertexId, singletonList(op1));
JobVertex vertex2 = new JobVertex(secondVertexName, secondVertexId, singletonList(op2));
vertex1.setInvokableClass(NoOpInvokable.class);
vertex2.setInvokableClass(NoOpInvokable.class);
final ExecutionGraph graph =
new CheckpointCoordinatorTestingUtils.CheckpointExecutionGraphBuilder()
.addJobVertex(vertex1, true)
.addJobVertex(vertex2, false)
.setDistributionPattern(distributionPatterns[0])
.build(EXECUTOR_EXTENSION.getExecutor());
// Adds the additional edges
for (int i = 1; i < distributionPatterns.length; ++i) {
connectNewDataSetAsInput(
vertex2, vertex1, distributionPatterns[i], ResultPartitionType.PIPELINED);
}
Map<OperatorID, OperatorState> operatorStates = new HashMap<>();
operatorStates.put(
op1.getGeneratedOperatorID(),
createOperatorState(op1.getGeneratedOperatorID(), firstOperatorFinishedState));
operatorStates.put(
op2.getGeneratedOperatorID(),
createOperatorState(op2.getGeneratedOperatorID(), secondOperatorFinishedState));
Set<ExecutionJobVertex> vertices = new HashSet<>();
vertices.add(graph.getJobVertex(vertex1.getID()));
vertices.add(graph.getJobVertex(vertex2.getID()));
VertexFinishedStateChecker finishedStateChecker =
new VertexFinishedStateChecker(vertices, operatorStates);
assertThatThrownBy(finishedStateChecker::validateOperatorsFinishedState)
.hasMessage(expectedMessage)
.isInstanceOf(expectedExceptionalClass);
}
private OperatorState createOperatorState(
OperatorID operatorId, VertexFinishedStateChecker.VertexFinishedState finishedState) {
switch (finishedState) {
case ALL_RUNNING:
return new OperatorState(null, null, operatorId, 2, 2);
case PARTIALLY_FINISHED:
OperatorState operatorState = new OperatorState(null, null, operatorId, 2, 2);
operatorState.putState(0, FinishedOperatorSubtaskState.INSTANCE);
return operatorState;
case FULLY_FINISHED:
return new FullyFinishedOperatorState(null, null, operatorId, 2, 2);
default:
throw new UnsupportedOperationException(
"Not supported finished state: " + finishedState);
}
}
}
| VertexFinishedStateCheckerTest |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/FluxRefCount.java | {
"start": 8335,
"end": 8470
} | interface ____ only used when qs is not null";
return qs.poll();
}
@Override
public int size() {
assert qs != null : "Queue | is |
java | apache__flink | flink-annotations/src/main/java/org/apache/flink/annotation/docs/Documentation.java | {
"start": 1988,
"end": 2320
} | interface ____ {
/** The sections in the config docs where this option should be included. */
String[] value() default {};
/** The relative position of the option in its section. */
int position() default Integer.MAX_VALUE;
}
/** Constants for section names. */
public static final | Section |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/tools/picocli/CommandLine.java | {
"start": 168716,
"end": 170304
} | class ____ inner classes and interfaces that can be used to create custom help messages.</p>
* <h3>IOptionRenderer and IParameterRenderer</h3>
* <p>Renders a field annotated with {@link Option} or {@link Parameters} to an array of {@link Text} values.
* By default, these values are</p><ul>
* <li>mandatory marker character (if the option/parameter is {@link Option#required() required})</li>
* <li>short option name (empty for parameters)</li>
* <li>comma or empty (empty for parameters)</li>
* <li>long option names (the parameter {@link IParamLabelRenderer label} for parameters)</li>
* <li>description</li>
* </ul>
* <p>Other components rely on this ordering.</p>
* <h3>Layout</h3>
* <p>Delegates to the renderers to create {@link Text} values for the annotated fields, and uses a
* {@link TextTable} to display these values in tabular format. Layout is responsible for deciding which values
* to display where in the table. By default, Layout shows one option or parameter per table row.</p>
* <h3>TextTable</h3>
* <p>Responsible for spacing out {@link Text} values according to the {@link Column} definitions the table was
* created with. Columns have a width, indentation, and an overflow policy that decides what to do if a value is
* longer than the column's width.</p>
* <h3>Text</h3>
* <p>Encapsulates rich text with styles and colors in a way that other components like {@link TextTable} are
* unaware of the embedded ANSI escape codes.</p>
*/
public static | contains |
java | quarkusio__quarkus | extensions/arc/deployment/src/test/java/io/quarkus/arc/test/config/RemovedConfigMappingTest.java | {
"start": 1079,
"end": 1143
} | interface ____ {
String prop();
}
}
| RemovedConfigMapping |
java | apache__spark | sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java | {
"start": 51644,
"end": 52338
} | class ____ {
private Nesting2 field1_1 = Nesting2.newBuilder().build();
private Nesting2 field1_2 = Nesting2.newBuilder().build();
private Nesting2 field1_3 = Nesting2.newBuilder().build();
private Builder() {
}
public Builder field1_1(Nesting2 field1_1) {
this.field1_1 = field1_1;
return this;
}
public Builder field1_2(Nesting2 field1_2) {
this.field1_2 = field1_2;
return this;
}
public Builder field1_3(Nesting2 field1_3) {
this.field1_3 = field1_3;
return this;
}
public Nesting1 build() {
return new Nesting1(this);
}
}
}
public static | Builder |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/impl/RouteMustHaveOutputOnExceptionTest.java | {
"start": 1030,
"end": 2348
} | class ____ extends ContextTestSupport {
@Override
public boolean isUseRouteBuilder() {
return false;
}
@Test
public void testValid() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:start").onException(Exception.class).redeliveryDelay(10).maximumRedeliveries(2)
.backOffMultiplier(1.5).handled(true).delay(1000)
.log("Halting for some time").to("mock:halt").end().end().to("mock:result");
}
});
context.start();
}
@Test
public void testInValid() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:start").onException(Exception.class).redeliveryDelay(10).maximumRedeliveries(2)
.backOffMultiplier(1.5).handled(true).delay(1000)
.log("Halting for some time").to("mock:halt")
// end missing
.end().to("mock:result");
}
});
assertThrows(Exception.class, () -> context.start(),
"Should have thrown an exception");
}
}
| RouteMustHaveOutputOnExceptionTest |
java | quarkusio__quarkus | extensions/smallrye-jwt/runtime/src/main/java/io/quarkus/smallrye/jwt/runtime/auth/MpJwtValidator.java | {
"start": 999,
"end": 3535
} | class ____ implements IdentityProvider<TokenAuthenticationRequest> {
private static final Logger log = Logger.getLogger(MpJwtValidator.class);
final JWTParser parser;
final boolean blockingAuthentication;
public MpJwtValidator() {
this.parser = null;
this.blockingAuthentication = false;
}
@Inject
public MpJwtValidator(JWTParser parser, SmallRyeJwtConfig config) {
this.parser = parser;
this.blockingAuthentication = config == null ? false : config.blockingAuthentication();
}
@Override
public Class<TokenAuthenticationRequest> getRequestType() {
return TokenAuthenticationRequest.class;
}
@Override
public Uni<SecurityIdentity> authenticate(TokenAuthenticationRequest request,
AuthenticationRequestContext context) {
if (!(request.getToken() instanceof JsonWebTokenCredential)) {
return Uni.createFrom().nullItem();
}
if (!blockingAuthentication) {
return Uni.createFrom().emitter(new Consumer<UniEmitter<? super SecurityIdentity>>() {
@Override
public void accept(UniEmitter<? super SecurityIdentity> uniEmitter) {
try {
uniEmitter.complete(createSecurityIdentity(request));
} catch (AuthenticationFailedException e) {
uniEmitter.fail(e);
}
}
});
} else {
return context.runBlocking(() -> createSecurityIdentity(request));
}
}
private SecurityIdentity createSecurityIdentity(TokenAuthenticationRequest request) {
try {
JsonWebToken jwtPrincipal = parser.parse(request.getToken().getToken());
QuarkusSecurityIdentity.Builder builder = QuarkusSecurityIdentity.builder().setPrincipal(jwtPrincipal)
.addCredential(request.getToken())
.addRoles(jwtPrincipal.getGroups())
.addAttribute(SecurityIdentity.USER_ATTRIBUTE, jwtPrincipal);
RoutingContext routingContext = HttpSecurityUtils.getRoutingContextAttribute(request);
if (routingContext != null) {
builder.addAttribute(RoutingContext.class.getName(), routingContext);
}
return builder.build();
} catch (ParseException e) {
log.debug("Authentication failed", e);
throw new AuthenticationFailedException(e);
}
}
}
| MpJwtValidator |
java | spring-projects__spring-framework | spring-context-support/src/test/java/org/springframework/cache/jcache/config/JCacheJavaConfigTests.java | {
"start": 5321,
"end": 5910
} | class ____ {
@Bean
public CacheManager cacheManager() {
SimpleCacheManager cm = new SimpleCacheManager();
cm.setCaches(Arrays.asList(
defaultCache(),
new ConcurrentMapCache("primary"),
new ConcurrentMapCache("secondary"),
new ConcurrentMapCache("exception")));
return cm;
}
@Bean
public JCacheableService<?> cacheableService() {
return new AnnotatedJCacheableService(defaultCache());
}
@Bean
public Cache defaultCache() {
return new ConcurrentMapCache("default");
}
}
@Configuration
@EnableCaching
public static | EnableCachingConfig |
java | quarkusio__quarkus | extensions/amazon-lambda/deployment/src/main/java/io/quarkus/amazon/lambda/deployment/RequestHandlerJandexUtil.java | {
"start": 2183,
"end": 10438
} | class ____
while (currentClass != null && !OBJECT.equals(currentClass.name())) {
for (MethodInfo method : currentClass.methods()) {
if (isHandleRequestMethod(method) && !method.isSynthetic() && !method.isAbstract()) {
return method;
}
}
Type superType = currentClass.superClassType();
if (superType != null) {
currentClass = index.getClassByName(superType.name());
} else {
currentClass = null;
}
}
// If not found, look for default methods in interfaces
currentClass = handlerClass;
while (currentClass != null && !OBJECT.equals(currentClass.name())) {
for (Type ifaceType : currentClass.interfaceTypes()) {
MethodInfo defaultMethod = findDefaultInterfaceMethod(ifaceType.name(), index);
if (defaultMethod != null) {
return defaultMethod;
}
}
// Move to superclass
Type superType = currentClass.superClassType();
if (superType != null) {
currentClass = index.getClassByName(superType.name());
} else {
currentClass = null;
}
}
return null;
}
private static MethodInfo findDefaultInterfaceMethod(DotName ifaceName, IndexView index) {
ClassInfo iface = index.getClassByName(ifaceName);
if (iface == null) {
return null;
}
for (MethodInfo method : iface.methods()) {
if (isHandleRequestMethod(method) && method.isDefault()) {
return method;
}
}
for (Type parentIfaceType : iface.interfaceTypes()) {
MethodInfo defaultMethod = findDefaultInterfaceMethod(parentIfaceType.name(), index);
if (defaultMethod != null) {
return defaultMethod;
}
}
return null;
}
private static boolean isHandleRequestMethod(MethodInfo method) {
return method.name().equals("handleRequest") && method.parametersCount() == 2;
}
private static boolean isCollectionType(DotName typeName, IndexView index) {
if (COLLECTION.equals(typeName)) {
return true;
}
ClassInfo classInfo = index.getClassByName(typeName);
if (classInfo == null) {
return false;
}
for (Type interfaceType : classInfo.interfaceTypes()) {
if (isCollectionType(interfaceType.name(), index)) {
return true;
}
}
Type superType = classInfo.superClassType();
if (superType != null) {
if (isCollectionType(superType.name(), index)) {
return true;
}
}
return false;
}
public record InputOutputTypes(Type inputType, boolean isCollection, Type elementType, Type outputType) {
private static final InputOutputTypes UNRESOLVED = new InputOutputTypes(
Type.create(OBJECT, Type.Kind.CLASS), false, null,
Type.create(OBJECT, Type.Kind.CLASS));
}
public record RequestHandlerJandexDefinition(ClassInfo handlerClass, MethodInfo method, InputOutputTypes inputOutputTypes) {
}
private static boolean isUnresolved(InputOutputTypes types) {
return types == null || types == InputOutputTypes.UNRESOLVED;
}
private static InputOutputTypes resolveInputOutputTypes(DotName className, IndexView index,
Map<String, Type> typeMap) {
ClassInfo classInfo = index.getClassByName(className);
if (classInfo == null) {
return null;
}
for (Type interfaceType : classInfo.interfaceTypes()) {
InputOutputTypes result = resolveInputOutputTypesFromType(interfaceType, index, typeMap);
if (result != null) {
return result;
}
}
Type superType = classInfo.superClassType();
if (superType != null) {
InputOutputTypes result = resolveInputOutputTypesFromType(superType, index, typeMap);
if (result != null) {
return result;
}
}
return null;
}
private static InputOutputTypes resolveInputOutputTypesFromType(Type currentType, IndexView index,
Map<String, Type> typeMap) {
if (currentType.kind() == Type.Kind.PARAMETERIZED_TYPE) {
ParameterizedType pt = currentType.asParameterizedType();
DotName rawName = pt.name();
// If we hit RequestHandler, resolve the type arguments
if (REQUEST_HANDLER.equals(rawName)) {
List<Type> args = pt.arguments();
Type inputType = resolveTypeArgument(args.get(0), typeMap);
Type outputType = resolveTypeArgument(args.get(1), typeMap);
// Check if input type is a collection and extract element type
boolean isCollection = false;
Type elementType = null;
Type rawInputType = args.get(0);
if (rawInputType.kind() == Type.Kind.PARAMETERIZED_TYPE) {
ParameterizedType inputPt = rawInputType.asParameterizedType();
if (isCollectionType(inputPt.name(), index)) {
isCollection = true;
if (!inputPt.arguments().isEmpty()) {
elementType = resolveTypeArgument(inputPt.arguments().get(0), typeMap);
}
}
} else if (rawInputType.kind() == Type.Kind.CLASS) {
if (isCollectionType(rawInputType.name(), index)) {
isCollection = true;
elementType = Type.create(OBJECT, Type.Kind.CLASS);
}
}
return new InputOutputTypes(inputType, isCollection, elementType, outputType);
}
// Record bindings for current type variables
ClassInfo rawClass = index.getClassByName(rawName);
if (rawClass != null) {
List<TypeVariable> vars = rawClass.typeParameters();
List<Type> args = pt.arguments();
Map<String, Type> newTypeMap = new HashMap<>(typeMap);
for (int i = 0; i < vars.size() && i < args.size(); i++) {
newTypeMap.put(vars.get(i).identifier(), args.get(i));
}
// Recursively check this type's hierarchy
InputOutputTypes result = resolveInputOutputTypes(rawName, index, newTypeMap);
if (result != null) {
return result;
}
}
} else if (currentType.kind() == Type.Kind.CLASS) {
DotName className = currentType.name();
if (REQUEST_HANDLER.equals(className)) {
return InputOutputTypes.UNRESOLVED;
}
return resolveInputOutputTypes(className, index, typeMap);
}
return null;
}
private static Type resolveTypeArgument(Type type, Map<String, Type> typeMap) {
if (type.kind() == Type.Kind.TYPE_VARIABLE) {
TypeVariable tv = type.asTypeVariable();
Type resolved = typeMap.get(tv.identifier());
if (resolved != null) {
// Recursively resolve in case the resolved type is also a type variable
return resolveTypeArgument(resolved, typeMap);
}
// If we can't resolve the type variable, get its first bound
return getTypeFromBounds(tv);
}
return type;
}
private static Type getTypeFromBounds(TypeVariable tv) {
List<Type> bounds = tv.bounds();
if (!bounds.isEmpty()) {
Type firstBound = bounds.get(0);
if (firstBound.kind() == Type.Kind.PARAMETERIZED_TYPE) {
return Type.create(firstBound.name(), Type.Kind.CLASS);
}
return firstBound;
}
return Type.create(OBJECT, Type.Kind.CLASS);
}
}
| hierarchy |
java | redisson__redisson | redisson-spring-data/redisson-spring-data-23/src/test/java/org/redisson/spring/data/connection/PropertiesDecoderTest.java | {
"start": 1176,
"end": 2263
} | class ____ implements Decoder<Properties> {
@Override
public Properties decode(ByteBuf buf, State state) {
String value = buf.toString(CharsetUtil.UTF_8);
Properties result = new Properties();
for (String entry : value.split("\r\n|\n")) {
String[] parts = entry.split(":");
if (parts.length == 2) {
result.put(parts[0], parts[1]);
}
}
return result;
}
}
// @Test
public void benchmark() {
PropertiesDecoderOld old = new PropertiesDecoderOld();
long start = System.currentTimeMillis();
int c = 400000;
while (c-- > 0) {
old.decode(Unpooled.copiedBuffer(info, StandardCharsets.UTF_8), null);
}
System.out.println(System.currentTimeMillis() - start);
start = System.currentTimeMillis();
c = 400000;
while (c-- > 0) {
testDecode();
}
System.out.println(System.currentTimeMillis() - start);
}
} | PropertiesDecoderOld |
java | apache__camel | tooling/camel-util-json/src/main/java/org/apache/camel/util/json/Jsoner.java | {
"start": 60404,
"end": 62730
} | class ____ does implement the Jsonable interface.\n"
+ " 3) Otherwise convert it to a boolean, null, number, JsonArray, JsonObject, or String value before serializing it.\n"
+ " 4) If you feel it should have serialized you could use a more tolerant serialization for debugging purposes.");
}
}
}
/**
* Serializes like the first version of this library. It has been adapted to use Jsonable for serializing custom
* objects, but otherwise works like the old JSON string serializer. It will allow non-JSON values in its output
* like the old one. It can be helpful for last resort log statements and debugging errors in self generated JSON.
* Anything serialized using this method isn't guaranteed to be deserializable.
*
* @param jsonSerializable represents the object that should be serialized in JSON format.
* @param writableDestination represents where the resulting JSON text is written to.
* @throws IOException if the writableDestination encounters an I/O problem, like being closed while in use.
*/
public static void serializeCarelessly(final Object jsonSerializable, final Writer writableDestination) throws IOException {
Jsoner.serialize(jsonSerializable, writableDestination,
EnumSet.of(SerializationOptions.ALLOW_JSONABLES, SerializationOptions.ALLOW_INVALIDS));
}
/**
* Serializes JSON values and only JSON values according to the RFC 4627 JSON specification.
*
* @param jsonSerializable represents the object that should be serialized in JSON format.
* @param writableDestination represents where the resulting JSON text is written to.
* @throws IOException if the writableDestination encounters an I/O problem, like being closed while in
* use.
* @throws IllegalArgumentException if the jsonSerializable isn't serializable in JSON.
*/
public static void serializeStrictly(final Object jsonSerializable, final Writer writableDestination) throws IOException {
Jsoner.serialize(jsonSerializable, writableDestination, EnumSet.noneOf(SerializationOptions.class));
}
}
| that |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/search/arguments/CreateArgs.java | {
"start": 797,
"end": 2429
} | enum ____ {
HASH, JSON
}
private Optional<TargetType> on = Optional.of(TargetType.HASH);
private final List<K> prefixes = new ArrayList<>();
private Optional<V> filter = Optional.empty();
private Optional<DocumentLanguage> defaultLanguage = Optional.empty();
private Optional<K> languageField = Optional.empty();
private OptionalDouble defaultScore = OptionalDouble.empty();
private Optional<K> scoreField = Optional.empty();
private Optional<K> payloadField = Optional.empty();
private boolean maxTextFields = false;
private OptionalLong temporary = OptionalLong.empty();
private boolean noOffsets = false;
private boolean noHighlight = false;
private boolean noFields = false;
private boolean noFrequency = false;
private boolean skipInitialScan = false;
private Optional<List<V>> stopWords = Optional.empty();
/**
* Used to build a new instance of the {@link CreateArgs}.
*
* @return a {@link Builder} that provides the option to build up a new instance of the {@link CreateArgs}
* @param <K> the key type
* @param <V> the value type
*/
public static <K, V> Builder<K, V> builder() {
return new Builder<>();
}
/**
* Builder for {@link CreateArgs}.
* <p>
* As a final step the {@link Builder#build()} method needs to be executed to create the final {@link CreateArgs} instance.
*
* @param <K> the key type
* @param <V> the value type
* @see <a href="https://redis.io/docs/latest/commands/ft.create/">FT.CREATE</a>
*/
public static | TargetType |
java | eclipse-vertx__vert.x | vertx-core/src/test/java/io/vertx/tests/vertx/CloseFutureTest.java | {
"start": 2426,
"end": 2569
} | interface ____ {
Future<Void> close();
}
private static final ThreadLocal<Object> closing = new ThreadLocal<>();
private static | UseCase |
java | spring-projects__spring-security | config/src/main/java/org/springframework/security/config/observation/SecurityObservationSettings.java | {
"start": 1040,
"end": 2389
} | class ____ {
private final boolean observeRequests;
private final boolean observeAuthentications;
private final boolean observeAuthorizations;
private SecurityObservationSettings(boolean observeRequests, boolean observeAuthentications,
boolean observeAuthorizations) {
this.observeRequests = observeRequests;
this.observeAuthentications = observeAuthentications;
this.observeAuthorizations = observeAuthorizations;
}
/**
* Make no Spring Security observations
* @return a {@link SecurityObservationSettings} with all exclusions turned on
*/
public static SecurityObservationSettings noObservations() {
return new SecurityObservationSettings(false, false, false);
}
/**
* Begin the configuration of a {@link SecurityObservationSettings}
* @return a {@link Builder} where filter chain observations are off and authn/authz
* observations are on
*/
public static Builder withDefaults() {
return new Builder(false, true, true);
}
public boolean shouldObserveRequests() {
return this.observeRequests;
}
public boolean shouldObserveAuthentications() {
return this.observeAuthentications;
}
public boolean shouldObserveAuthorizations() {
return this.observeAuthorizations;
}
/**
* A builder for configuring a {@link SecurityObservationSettings}
*/
public static final | SecurityObservationSettings |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/context/support/MessageSourceAccessor.java | {
"start": 1347,
"end": 7387
} | class ____ {
private final MessageSource messageSource;
private final @Nullable Locale defaultLocale;
/**
* Create a new MessageSourceAccessor, using LocaleContextHolder's locale
* as default locale.
* @param messageSource the MessageSource to wrap
* @see org.springframework.context.i18n.LocaleContextHolder#getLocale()
*/
public MessageSourceAccessor(MessageSource messageSource) {
this.messageSource = messageSource;
this.defaultLocale = null;
}
/**
* Create a new MessageSourceAccessor, using the given default locale.
* @param messageSource the MessageSource to wrap
* @param defaultLocale the default locale to use for message access
*/
public MessageSourceAccessor(MessageSource messageSource, Locale defaultLocale) {
this.messageSource = messageSource;
this.defaultLocale = defaultLocale;
}
/**
* Return the default locale to use if no explicit locale has been given.
* <p>The default implementation returns the default locale passed into the
* corresponding constructor, or LocaleContextHolder's locale as fallback.
* Can be overridden in subclasses.
* @see #MessageSourceAccessor(org.springframework.context.MessageSource, java.util.Locale)
* @see org.springframework.context.i18n.LocaleContextHolder#getLocale()
*/
protected Locale getDefaultLocale() {
return (this.defaultLocale != null ? this.defaultLocale : LocaleContextHolder.getLocale());
}
/**
* Retrieve the message for the given code and the default Locale.
* @param code the code of the message
* @param defaultMessage the String to return if the lookup fails
* @return the message
*/
public String getMessage(String code, String defaultMessage) {
String msg = this.messageSource.getMessage(code, null, defaultMessage, getDefaultLocale());
return (msg != null ? msg : "");
}
/**
* Retrieve the message for the given code and the given Locale.
* @param code the code of the message
* @param defaultMessage the String to return if the lookup fails
* @param locale the Locale in which to do lookup
* @return the message
*/
public String getMessage(String code, String defaultMessage, Locale locale) {
String msg = this.messageSource.getMessage(code, null, defaultMessage, locale);
return (msg != null ? msg : "");
}
/**
* Retrieve the message for the given code and the default Locale.
* @param code the code of the message
* @param args arguments for the message, or {@code null} if none
* @param defaultMessage the String to return if the lookup fails
* @return the message
*/
public String getMessage(String code, Object @Nullable [] args, String defaultMessage) {
String msg = this.messageSource.getMessage(code, args, defaultMessage, getDefaultLocale());
return (msg != null ? msg : "");
}
/**
* Retrieve the message for the given code and the given Locale.
* @param code the code of the message
* @param args arguments for the message, or {@code null} if none
* @param defaultMessage the String to return if the lookup fails
* @param locale the Locale in which to do lookup
* @return the message
*/
public String getMessage(String code, Object @Nullable [] args, String defaultMessage, Locale locale) {
String msg = this.messageSource.getMessage(code, args, defaultMessage, locale);
return (msg != null ? msg : "");
}
/**
* Retrieve the message for the given code and the default Locale.
* @param code the code of the message
* @return the message
* @throws org.springframework.context.NoSuchMessageException if not found
*/
public String getMessage(String code) throws NoSuchMessageException {
return this.messageSource.getMessage(code, null, getDefaultLocale());
}
/**
* Retrieve the message for the given code and the given Locale.
* @param code the code of the message
* @param locale the Locale in which to do lookup
* @return the message
* @throws org.springframework.context.NoSuchMessageException if not found
*/
public String getMessage(String code, Locale locale) throws NoSuchMessageException {
return this.messageSource.getMessage(code, null, locale);
}
/**
* Retrieve the message for the given code and the default Locale.
* @param code the code of the message
* @param args arguments for the message, or {@code null} if none
* @return the message
* @throws org.springframework.context.NoSuchMessageException if not found
*/
public String getMessage(String code, Object @Nullable [] args) throws NoSuchMessageException {
return this.messageSource.getMessage(code, args, getDefaultLocale());
}
/**
* Retrieve the message for the given code and the given Locale.
* @param code the code of the message
* @param args arguments for the message, or {@code null} if none
* @param locale the Locale in which to do lookup
* @return the message
* @throws org.springframework.context.NoSuchMessageException if not found
*/
public String getMessage(String code, Object @Nullable [] args, Locale locale) throws NoSuchMessageException {
return this.messageSource.getMessage(code, args, locale);
}
/**
* Retrieve the given MessageSourceResolvable (for example, an ObjectError instance)
* in the default Locale.
* @param resolvable the MessageSourceResolvable
* @return the message
* @throws org.springframework.context.NoSuchMessageException if not found
*/
public String getMessage(MessageSourceResolvable resolvable) throws NoSuchMessageException {
return this.messageSource.getMessage(resolvable, getDefaultLocale());
}
/**
* Retrieve the given MessageSourceResolvable (for example, an ObjectError instance)
* in the given Locale.
* @param resolvable the MessageSourceResolvable
* @param locale the Locale in which to do lookup
* @return the message
* @throws org.springframework.context.NoSuchMessageException if not found
*/
public String getMessage(MessageSourceResolvable resolvable, Locale locale) throws NoSuchMessageException {
return this.messageSource.getMessage(resolvable, locale);
}
}
| MessageSourceAccessor |
java | redisson__redisson | redisson/src/main/java/org/redisson/cache/NoneCacheMap.java | {
"start": 726,
"end": 980
} | class ____<K, V> extends AbstractCacheMap<K, V> {
public NoneCacheMap(long timeToLiveInMillis, long maxIdleInMillis) {
super(0, timeToLiveInMillis, maxIdleInMillis);
}
@Override
protected void onMapFull() {
}
}
| NoneCacheMap |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/function/array/OracleArrayConcatElementFunction.java | {
"start": 496,
"end": 1755
} | class ____ extends ArrayConcatElementFunction {
public OracleArrayConcatElementFunction(boolean prepend) {
super( "(", ",", ")", prepend );
}
@Override
public void render(
SqlAppender sqlAppender,
List<? extends SqlAstNode> sqlAstArguments,
ReturnableType<?> returnType,
SqlAstTranslator<?> walker) {
final Expression firstArgument = (Expression) sqlAstArguments.get( 0 );
final Expression secondArgument = (Expression) sqlAstArguments.get( 1 );
final String arrayTypeName = DdlTypeHelper.getTypeName(
prepend ? secondArgument.getExpressionType()
: firstArgument.getExpressionType(),
walker.getSessionFactory().getTypeConfiguration()
);
sqlAppender.append( arrayTypeName );
sqlAppender.append( "_concat(" );
if ( prepend ) {
sqlAppender.append( arrayTypeName );
sqlAppender.append( '(' );
firstArgument.accept( walker );
sqlAppender.append( ')' );
}
else {
firstArgument.accept( walker );
}
sqlAppender.append( ',' );
if ( prepend ) {
secondArgument.accept( walker );
}
else {
sqlAppender.append( arrayTypeName );
sqlAppender.append( '(' );
secondArgument.accept( walker );
sqlAppender.append( ')' );
}
sqlAppender.append( ')' );
}
}
| OracleArrayConcatElementFunction |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/createTable/MySqlCreateTableTest52.java | {
"start": 992,
"end": 3422
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "CREATE TABLE `ins_ebay_auth` ("//
+ "`auth_id` int(10) NOT NULL AUTO_INCREMENT COMMENT '主键id',"//
+ "`usr_id` int(10) NOT NULL COMMENT '外键,用户表',"//
+ "`status` char(1) COLLATE utf8_bin NOT NULL COMMENT '状态 0.有效?1.无效',"//
+ "`ebay_token` varchar(255) COLLATE utf8_bin NOT NULL COMMENT 'eBay授权码',"//
+ "`ebay_name` varchar(50) NOT NULL COMMENT 'eBay唯一名',"//
+ "`create_time` datetime NOT NULL COMMENT '授权时间',"//
+ "`invalid_time` datetime NOT NULL COMMENT '授权失效时间'," + "PRIMARY KEY (`auth_id`)"//
+ ") ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='INS_EBAY_AUTH';";
MySqlStatementParser parser = new MySqlStatementParser(sql);
SQLStatement stmt = parser.parseCreateTable();
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
stmt.accept(visitor);
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertEquals(7, visitor.getColumns().size());
assertEquals(0, visitor.getConditions().size());
assertTrue(visitor.getTables().containsKey(new TableStat.Name("ins_ebay_auth")));
String output = SQLUtils.toMySqlString(stmt);
assertEquals("CREATE TABLE `ins_ebay_auth` ("//
+ "\n\t`auth_id` int(10) NOT NULL AUTO_INCREMENT COMMENT '主键id',"
+ "\n\t`usr_id` int(10) NOT NULL COMMENT '外键,用户表',"//
+ "\n\t`status` char(1) COLLATE utf8_bin NOT NULL COMMENT '状态 0.有效?1.无效',"//
+ "\n\t`ebay_token` varchar(255) COLLATE utf8_bin NOT NULL COMMENT 'eBay授权码',"//
+ "\n\t`ebay_name` varchar(50) NOT NULL COMMENT 'eBay唯一名',"//
+ "\n\t`create_time` datetime NOT NULL COMMENT '授权时间',"//
+ "\n\t`invalid_time` datetime NOT NULL COMMENT '授权失效时间',"//
+ "\n\tPRIMARY KEY (`auth_id`)"
+ "\n) ENGINE = InnoDB CHARSET = utf8 COLLATE = utf8_bin COMMENT 'INS_EBAY_AUTH'", output);
}
}
| MySqlCreateTableTest52 |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/AllLastIntByTimestampAggregator.java | {
"start": 2038,
"end": 5872
} | class ____ {
public static String describe() {
return "all_last_int_by_timestamp";
}
public static AllLongIntState initSingle(DriverContext driverContext) {
return new AllLongIntState(0, 0);
}
private static void first(AllLongIntState current, long timestamp, int value, boolean v2Seen) {
current.seen(true);
current.v1(timestamp);
current.v2(v2Seen ? value : 0);
current.v2Seen(v2Seen);
}
public static void combine(AllLongIntState current, @Position int position, IntBlock value, LongBlock timestamp) {
if (current.seen() == false) {
// We never observed a value before so we'll take this right in, no questions asked.
first(current, timestamp.getLong(position), value.getInt(position), value.isNull(position) == false);
return;
}
long ts = timestamp.getLong(position);
if (ts > current.v1()) {
// timestamp and seen flag are updated in all cases
current.v1(ts);
current.seen(true);
if (value.isNull(position) == false) {
// non-null value
current.v2(value.getInt(position));
current.v2Seen(true);
} else {
// null value
current.v2Seen(false);
}
}
}
public static void combineIntermediate(AllLongIntState current, long timestamp, int value, boolean seen, boolean v2Seen) {
if (seen) {
if (current.seen()) {
if (timestamp > current.v1()) {
// A newer timestamp has been observed in the reporting shard so we must update internal state
current.v1(timestamp);
current.v2(value);
current.v2Seen(v2Seen);
}
} else {
current.v1(timestamp);
current.v2(value);
current.seen(true);
current.v2Seen(v2Seen);
}
}
}
public static Block evaluateFinal(AllLongIntState current, DriverContext ctx) {
if (current.v2Seen()) {
return ctx.blockFactory().newConstantIntBlockWith(current.v2(), 1);
} else {
return ctx.blockFactory().newConstantNullBlock(1);
}
}
public static GroupingState initGrouping(DriverContext driverContext) {
return new GroupingState(driverContext.bigArrays());
}
public static void combine(GroupingState current, int groupId, @Position int position, IntBlock value, LongBlock timestamp) {
boolean hasValue = value.isNull(position) == false;
current.collectValue(groupId, timestamp.getLong(position), value.getInt(position), hasValue);
}
public static void combineIntermediate(
GroupingState current,
int groupId,
LongBlock timestamps,
IntBlock values,
BooleanBlock hasValues,
int otherPosition
) {
// TODO seen should probably be part of the intermediate representation
int valueCount = values.getValueCount(otherPosition);
if (valueCount > 0) {
long timestamp = timestamps.getLong(timestamps.getFirstValueIndex(otherPosition));
int firstIndex = values.getFirstValueIndex(otherPosition);
boolean hasValueFlag = hasValues.getBoolean(otherPosition);
for (int i = 0; i < valueCount; i++) {
current.collectValue(groupId, timestamp, values.getInt(firstIndex + i), hasValueFlag);
}
}
}
public static Block evaluateFinal(GroupingState state, IntVector selected, GroupingAggregatorEvaluationContext ctx) {
return state.evaluateFinal(selected, ctx);
}
public static final | AllLastIntByTimestampAggregator |
java | apache__dubbo | dubbo-cluster/src/main/java/org/apache/dubbo/rpc/cluster/router/mesh/rule/virtualservice/match/StringMatch.java | {
"start": 962,
"end": 3099
} | class ____ {
private String exact;
private String prefix;
private String regex;
private String noempty;
private String empty;
private String wildcard;
public String getExact() {
return exact;
}
public void setExact(String exact) {
this.exact = exact;
}
public String getPrefix() {
return prefix;
}
public void setPrefix(String prefix) {
this.prefix = prefix;
}
public String getRegex() {
return regex;
}
public void setRegex(String regex) {
this.regex = regex;
}
public String getNoempty() {
return noempty;
}
public void setNoempty(String noempty) {
this.noempty = noempty;
}
public String getEmpty() {
return empty;
}
public void setEmpty(String empty) {
this.empty = empty;
}
public String getWildcard() {
return wildcard;
}
public void setWildcard(String wildcard) {
this.wildcard = wildcard;
}
public boolean isMatch(String input) {
if (getExact() != null && input != null) {
return input.equals(getExact());
} else if (getPrefix() != null && input != null) {
return input.startsWith(getPrefix());
} else if (getRegex() != null && input != null) {
return input.matches(getRegex());
} else if (getWildcard() != null && input != null) {
// only supports "*"
return input.equals(getWildcard()) || ANY_VALUE.equals(getWildcard());
} else if (getEmpty() != null) {
return input == null || "".equals(input);
} else if (getNoempty() != null) {
return input != null && input.length() > 0;
} else {
return false;
}
}
@Override
public String toString() {
return "StringMatch{" + "exact='"
+ exact + '\'' + ", prefix='"
+ prefix + '\'' + ", regex='"
+ regex + '\'' + ", noempty='"
+ noempty + '\'' + ", empty='"
+ empty + '\'' + '}';
}
}
| StringMatch |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/ImmutableMemberCollection.java | {
"start": 3171,
"end": 11087
} | class ____ extends BugChecker implements ClassTreeMatcher {
private static final ImmutableSet<String> MUTATING_METHODS =
ImmutableSet.of(
"add",
"addAll",
"clear",
"compute",
"computeIfAbsent",
"computeIfPresent",
"forcePut",
"merge",
"pollFirst",
"pollFirstEntry",
"pollLast",
"pollLastEntry",
"put",
"putAll",
"putIfAbsent",
"remove",
"removeAll",
"removeIf",
"replace",
"replaceAll",
"replaceValues",
"retainAll",
"set",
"sort");
private static final ImmutableSet<ReplaceableType<?>> REPLACEABLE_TYPES =
ImmutableSet.of(
ReplaceableType.create(NavigableSet.class, ImmutableSortedSet.class),
ReplaceableType.create(Set.class, ImmutableSet.class),
ReplaceableType.create(List.class, ImmutableList.class),
ReplaceableType.create(ListMultimap.class, ImmutableListMultimap.class),
ReplaceableType.create(SetMultimap.class, ImmutableSetMultimap.class),
ReplaceableType.create(SortedMap.class, ImmutableSortedMap.class),
ReplaceableType.create(Map.class, ImmutableMap.class));
private static final Matcher<Tree> PRIVATE_FINAL_VAR_MATCHER =
allOf(kindIs(Kind.VARIABLE), hasModifier(Modifier.PRIVATE), hasModifier(Modifier.FINAL));
// TODO(ashishkedia) : Share this with ImmutableSetForContains.
private static final Matcher<Tree> EXCLUSIONS =
anyOf(hasAnnotationWithSimpleName("Bind"), hasAnnotationWithSimpleName("Inject"));
private final WellKnownKeep wellKnownKeep;
@Inject
ImmutableMemberCollection(WellKnownKeep wellKnownKeep) {
this.wellKnownKeep = wellKnownKeep;
}
@Override
public Description matchClass(ClassTree classTree, VisitorState state) {
ImmutableMap<Symbol, ReplaceableVar> replaceableVars =
classTree.getMembers().stream()
.filter(member -> PRIVATE_FINAL_VAR_MATCHER.matches(member, state))
.filter(member -> !wellKnownKeep.shouldKeep(member))
.filter(member -> !EXCLUSIONS.matches(member, state))
.filter(member -> !isSuppressed(member, state))
.map(VariableTree.class::cast)
.flatMap(varTree -> isReplaceable(varTree, state).stream())
.collect(toImmutableMap(ReplaceableVar::symbol, var -> var));
if (replaceableVars.isEmpty()) {
return Description.NO_MATCH;
}
HashSet<Symbol> isPotentiallyMutated = new HashSet<>();
ImmutableSetMultimap.Builder<Symbol, Tree> initTreesBuilder = ImmutableSetMultimap.builder();
new TreePathScanner<Void, VisitorState>() {
@Override
public Void visitAssignment(AssignmentTree assignmentTree, VisitorState visitorState) {
Symbol varSymbol = getSymbol(assignmentTree.getVariable());
if (replaceableVars.containsKey(varSymbol) && assignmentTree.getExpression() != null) {
initTreesBuilder.put(varSymbol, assignmentTree.getExpression());
}
return scan(assignmentTree.getExpression(), visitorState);
}
@Override
public Void visitVariable(VariableTree variableTree, VisitorState visitorState) {
VarSymbol varSym = getSymbol(variableTree);
if (replaceableVars.containsKey(varSym) && variableTree.getInitializer() != null) {
initTreesBuilder.put(varSym, variableTree.getInitializer());
}
return super.visitVariable(variableTree, visitorState);
}
@Override
public Void visitIdentifier(IdentifierTree identifierTree, VisitorState visitorState) {
recordVarMutation(getSymbol(identifierTree));
return super.visitIdentifier(identifierTree, visitorState);
}
@Override
public Void visitMemberSelect(MemberSelectTree memberSelectTree, VisitorState visitorState) {
recordVarMutation(getSymbol(memberSelectTree));
return super.visitMemberSelect(memberSelectTree, visitorState);
}
@Override
public Void visitMethodInvocation(
MethodInvocationTree methodInvocationTree, VisitorState visitorState) {
ExpressionTree receiver = getReceiver(methodInvocationTree);
if (replaceableVars.containsKey(getSymbol(receiver))) {
MemberSelectTree selectTree = (MemberSelectTree) methodInvocationTree.getMethodSelect();
if (!MUTATING_METHODS.contains(selectTree.getIdentifier().toString())) {
// This is a safe read only method invoked on a replaceable collection member.
methodInvocationTree.getTypeArguments().forEach(type -> scan(type, visitorState));
methodInvocationTree.getArguments().forEach(arg -> scan(arg, visitorState));
return null;
}
}
return super.visitMethodInvocation(methodInvocationTree, visitorState);
}
private void recordVarMutation(Symbol sym) {
if (replaceableVars.containsKey(sym)) {
isPotentiallyMutated.add(sym);
}
}
}.scan(state.findPathToEnclosing(CompilationUnitTree.class), state);
ImmutableSetMultimap<Symbol, Tree> initTrees = initTreesBuilder.build();
SuggestedFix.Builder suggestedFix = SuggestedFix.builder();
replaceableVars.values().stream()
.filter(
var ->
var.areAllInitImmutable(initTrees.get(var.symbol()), state)
|| !isPotentiallyMutated.contains(var.symbol()))
.forEach(
replaceableVar ->
suggestedFix.merge(
replaceableVar.getFix(initTrees.get(replaceableVar.symbol()), state)));
if (suggestedFix.isEmpty()) {
return Description.NO_MATCH;
}
return describeMatch(classTree, suggestedFix.build());
}
private static Optional<ReplaceableVar> isReplaceable(VariableTree tree, VisitorState state) {
return REPLACEABLE_TYPES.stream()
.filter(type -> isSameType(type.interfaceType()).matches(tree, state))
.findFirst()
.map(type -> ReplaceableVar.create(tree, type));
}
record ReplaceableType<M>(Class<M> interfaceType, Class<? extends M> immutableType) {
static <M> ReplaceableType<M> create(Class<M> interfaceType, Class<? extends M> immutableType) {
return new ReplaceableType<>(interfaceType, immutableType);
}
}
record ReplaceableVar(Symbol symbol, ReplaceableType<?> type, Tree declaredType) {
static ReplaceableVar create(VariableTree variableTree, ReplaceableType<?> type) {
return new ReplaceableVar(getSymbol(variableTree), type, variableTree.getType());
}
private SuggestedFix getFix(ImmutableSet<Tree> initTrees, VisitorState state) {
SuggestedFix.Builder fixBuilder =
SuggestedFix.builder()
.replace(stripTypeParameters(declaredType()), type().immutableType().getSimpleName())
.addImport(type().immutableType().getName());
initTrees.stream()
.filter(initTree -> !isSameType(type().immutableType()).matches(initTree, state))
.forEach(init -> fixBuilder.replace(init, wrapWithImmutableCopy(init, state)));
return fixBuilder.build();
}
private String wrapWithImmutableCopy(Tree tree, VisitorState state) {
String type = type().immutableType().getSimpleName();
return type + ".copyOf(" + state.getSourceForNode(tree) + ")";
}
private boolean areAllInitImmutable(ImmutableSet<Tree> initTrees, VisitorState state) {
return initTrees.stream()
.allMatch(initTree -> isSameType(type().immutableType()).matches(initTree, state));
}
private static Tree stripTypeParameters(Tree tree) {
return tree instanceof ParameterizedTypeTree parameterizedTypeTree
? parameterizedTypeTree.getType()
: tree;
}
}
}
| ImmutableMemberCollection |
java | spring-projects__spring-security | webauthn/src/main/java/org/springframework/security/web/webauthn/api/PublicKeyCredentialRequestOptions.java | {
"start": 4990,
"end": 8400
} | class ____ {
private @Nullable Bytes challenge;
private Duration timeout = Duration.ofMinutes(5);
private @Nullable String rpId;
private List<PublicKeyCredentialDescriptor> allowCredentials = Collections.emptyList();
private @Nullable UserVerificationRequirement userVerification;
private AuthenticationExtensionsClientInputs extensions = new ImmutableAuthenticationExtensionsClientInputs(
new ArrayList<>());
private PublicKeyCredentialRequestOptionsBuilder() {
}
/**
* Sets the {@link #getChallenge()} property.
* @param challenge the challenge
* @return the {@link PublicKeyCredentialRequestOptionsBuilder}
*/
public PublicKeyCredentialRequestOptionsBuilder challenge(Bytes challenge) {
this.challenge = challenge;
return this;
}
/**
* Sets the {@link #getTimeout()} property.
* @param timeout the timeout
* @return the {@link PublicKeyCredentialRequestOptionsBuilder}
*/
public PublicKeyCredentialRequestOptionsBuilder timeout(Duration timeout) {
Assert.notNull(timeout, "timeout cannot be null");
this.timeout = timeout;
return this;
}
/**
* Sets the {@link #getRpId()} property.
* @param rpId the rpId property
* @return the {@link PublicKeyCredentialRequestOptionsBuilder}
*/
public PublicKeyCredentialRequestOptionsBuilder rpId(String rpId) {
this.rpId = rpId;
return this;
}
/**
* Sets the {@link #getAllowCredentials()} property
* @param allowCredentials the allowed credentials
* @return the {@link PublicKeyCredentialRequestOptionsBuilder}
*/
public PublicKeyCredentialRequestOptionsBuilder allowCredentials(
List<PublicKeyCredentialDescriptor> allowCredentials) {
Assert.notNull(allowCredentials, "allowCredentials cannot be null");
this.allowCredentials = allowCredentials;
return this;
}
/**
* Sets the {@link #getUserVerification()} property.
* @param userVerification the user verification
* @return the {@link PublicKeyCredentialRequestOptionsBuilder}
*/
public PublicKeyCredentialRequestOptionsBuilder userVerification(UserVerificationRequirement userVerification) {
this.userVerification = userVerification;
return this;
}
/**
* Sets the {@link #getExtensions()} property
* @param extensions the extensions
* @return the {@link PublicKeyCredentialRequestOptionsBuilder}
*/
public PublicKeyCredentialRequestOptionsBuilder extensions(AuthenticationExtensionsClientInputs extensions) {
this.extensions = extensions;
return this;
}
/**
* Allows customizing the {@link PublicKeyCredentialRequestOptionsBuilder}
* @param customizer the {@link Consumer} used to customize the builder
* @return the {@link PublicKeyCredentialRequestOptionsBuilder}
*/
public PublicKeyCredentialRequestOptionsBuilder customize(
Consumer<PublicKeyCredentialRequestOptionsBuilder> customizer) {
customizer.accept(this);
return this;
}
/**
* Builds a new {@link PublicKeyCredentialRequestOptions}
* @return a new {@link PublicKeyCredentialRequestOptions}
*/
public PublicKeyCredentialRequestOptions build() {
if (this.challenge == null) {
this.challenge = Bytes.random();
}
return new PublicKeyCredentialRequestOptions(this.challenge, this.timeout, this.rpId, this.allowCredentials,
this.userVerification, this.extensions);
}
}
}
| PublicKeyCredentialRequestOptionsBuilder |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/collectionparameters/CollectionParametersTest.java | {
"start": 1183,
"end": 2924
} | class ____ {
private static SqlSessionFactory sqlSessionFactory;
@BeforeAll
static void setUp() throws Exception {
// create an SqlSessionFactory
try (Reader reader = Resources
.getResourceAsReader("org/apache/ibatis/submitted/collectionparameters/mybatis-config.xml")) {
sqlSessionFactory = new SqlSessionFactoryBuilder().build(reader);
}
// populate in-memory database
BaseDataTest.runScript(sqlSessionFactory.getConfiguration().getEnvironment().getDataSource(),
"org/apache/ibatis/submitted/collectionparameters/CreateDB.sql");
}
@Test
void shouldGetTwoUsersPassingAList() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
ArrayList<Integer> list = new ArrayList<>();
list.add(1);
list.add(2);
List<User> users = mapper.getUsersFromList(list);
Assertions.assertEquals(2, users.size());
}
}
@Test
void shouldGetTwoUsersPassingAnArray() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
Integer[] list = new Integer[2];
list[0] = 1;
list[1] = 2;
List<User> users = mapper.getUsersFromArray(list);
Assertions.assertEquals(2, users.size());
}
}
@Test
void shouldGetTwoUsersPassingACollection() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
Set<Integer> list = new HashSet<>();
list.add(1);
list.add(2);
List<User> users = mapper.getUsersFromCollection(list);
Assertions.assertEquals(2, users.size());
}
}
}
| CollectionParametersTest |
java | apache__kafka | connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/ReflectionScanner.java | {
"start": 2264,
"end": 2391
} | class ____ a no-args constructor</li>
* <li>The no-args constructor is public</li>
* <li>Static initialization of the | has |
java | google__guava | android/guava/src/com/google/common/collect/Maps.java | {
"start": 26923,
"end": 29323
} | class ____<K extends @Nullable Object, V extends @Nullable Object>
implements MapDifference<K, V> {
final Map<K, V> onlyOnLeft;
final Map<K, V> onlyOnRight;
final Map<K, V> onBoth;
final Map<K, ValueDifference<V>> differences;
MapDifferenceImpl(
Map<K, V> onlyOnLeft,
Map<K, V> onlyOnRight,
Map<K, V> onBoth,
Map<K, ValueDifference<V>> differences) {
this.onlyOnLeft = unmodifiableMap(onlyOnLeft);
this.onlyOnRight = unmodifiableMap(onlyOnRight);
this.onBoth = unmodifiableMap(onBoth);
this.differences = unmodifiableMap(differences);
}
@Override
public boolean areEqual() {
return onlyOnLeft.isEmpty() && onlyOnRight.isEmpty() && differences.isEmpty();
}
@Override
public Map<K, V> entriesOnlyOnLeft() {
return onlyOnLeft;
}
@Override
public Map<K, V> entriesOnlyOnRight() {
return onlyOnRight;
}
@Override
public Map<K, V> entriesInCommon() {
return onBoth;
}
@Override
public Map<K, ValueDifference<V>> entriesDiffering() {
return differences;
}
@Override
public boolean equals(@Nullable Object object) {
if (object == this) {
return true;
}
if (object instanceof MapDifference) {
MapDifference<?, ?> other = (MapDifference<?, ?>) object;
return entriesOnlyOnLeft().equals(other.entriesOnlyOnLeft())
&& entriesOnlyOnRight().equals(other.entriesOnlyOnRight())
&& entriesInCommon().equals(other.entriesInCommon())
&& entriesDiffering().equals(other.entriesDiffering());
}
return false;
}
@Override
public int hashCode() {
return Objects.hash(
entriesOnlyOnLeft(), entriesOnlyOnRight(), entriesInCommon(), entriesDiffering());
}
@Override
public String toString() {
if (areEqual()) {
return "equal";
}
StringBuilder result = new StringBuilder("not equal");
if (!onlyOnLeft.isEmpty()) {
result.append(": only on left=").append(onlyOnLeft);
}
if (!onlyOnRight.isEmpty()) {
result.append(": only on right=").append(onlyOnRight);
}
if (!differences.isEmpty()) {
result.append(": value differences=").append(differences);
}
return result.toString();
}
}
static final | MapDifferenceImpl |
java | apache__flink | flink-table/flink-sql-client/src/main/java/org/apache/flink/table/client/resource/ClientResourceManager.java | {
"start": 1747,
"end": 2434
} | class ____ extends ResourceManager {
public ClientResourceManager(Configuration config, MutableURLClassLoader userClassLoader) {
super(config, userClassLoader);
}
@Nullable
public URL unregisterJarResource(String jarPath) {
Path path = new Path(jarPath);
try {
checkPath(path, ResourceType.JAR);
return resourceInfos.remove(
new ResourceUri(ResourceType.JAR, getURLFromPath(path).getPath()));
} catch (IOException e) {
throw new SqlExecutionException(
String.format("Failed to unregister the jar resource [%s]", jarPath), e);
}
}
}
| ClientResourceManager |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/RReliableQueue.java | {
"start": 1827,
"end": 11861
} | interface ____<V> extends RExpirable, RReliableQueueAsync<V>, RDestroyable {
/**
* Sets the configuration for this reliable queue.
*
* @param config the queue configuration to apply
*/
void setConfig(QueueConfig config);
/**
* Attempts to set the configuration for this reliable queue.
* <p>
* This method only applies the configuration if no configuration has been set previously.
*
* @param config the queue configuration to apply
* @return {@code true} if the configuration was successfully applied,
* {@code false} if a configuration already exists
*/
boolean setConfigIfAbsent(QueueConfig config);
/**
* Returns the total number of messages in the queue ready for polling,
* excluding delayed and unacknowledged messages.
*
* @return the total number of messages
*/
int size();
/**
* Returns the number of delayed messages in the queue.
* <p>
* Delayed messages are those scheduled for future delivery and not yet available for consumption.
*
* @return the number of delayed messages
*/
int countDelayedMessages();
/**
* Returns the number of unacknowledged messages in the queue.
* <p>
* Unacknowledged messages are those that have been delivered to consumers
* but not yet acknowledged as successfully processed.
*
* @return the number of unacknowledged messages
*/
int countUnacknowledgedMessages();
/**
* Checks if the queue is empty.
* <p>
* A queue is considered empty when it contains no messages in any state
* (ready, delayed, or unacknowledged).
*
* @return {@code true} if the queue is empty, {@code false} otherwise
*/
boolean isEmpty();
/**
* Removes all messages from the queue.
* <p>
* This operation clears messages in all states (ready, delayed, and unacknowledged).
*
* @return {@code true} if the queue existed and has been cleared, otherwise false
*/
boolean clear();
/**
* Retrieves and removes the head of this queue, or returns {@code null} if this queue is empty.
* <p>
* The retrieved message remains unacknowledged until explicitly acknowledged
* using the {@link #acknowledge(QueueAckArgs)} or {@link #negativeAcknowledge(QueueNegativeAckArgs)} method.
*
* @return the message in the head of this queue, or {@code null} if this queue is empty
* @throws OperationDisabledException if this operation is disabled
*/
Message<V> poll();
/**
* Retrieves and removes the head of this queue with the specified polling arguments.
* <p>
* The retrieved message remains unacknowledged until explicitly acknowledged
* using the {@link #acknowledge(QueueAckArgs)} or {@link #negativeAcknowledge(QueueNegativeAckArgs)} method.
*
* @param args polling arguments
* @return the message in the head of this queue, or {@code null} if this queue is empty
* @throws OperationDisabledException if this operation is disabled
*/
Message<V> poll(QueuePollArgs args);
/**
* Retrieves and removes multiple messages from the queue with the specified polling arguments.
* <p>
* This batch operation is more efficient than polling messages individually.
* <p>
* The retrieved messages remain unacknowledged until explicitly acknowledged
* using the {@link #acknowledge(QueueAckArgs)} or {@link #negativeAcknowledge(QueueNegativeAckArgs)} method.
*
* @param pargs polling arguments
* @return a list of retrieved messages
* @throws OperationDisabledException if this operation is disabled
*/
List<Message<V>> pollMany(QueuePollArgs pargs);
/**
* Acknowledges the successful processing of a message.
* <p>
* Once acknowledged, a message is permanently removed from the queue and will not be redelivered.
*
* @param args acknowledgment arguments
*/
void acknowledge(QueueAckArgs args);
/**
* Checks if the queue contains a message with the specified ID.
*
* @param id the message ID to check
* @return {@code true} if a message with the specified ID exists in the queue, {@code false} otherwise
*/
boolean contains(String id);
/**
* Checks if the queue contains messages with the specified IDs.
*
* @param ids the message IDs to check
* @return the number of matching messages found in the queue
*/
int containsMany(String... ids);
/**
* Removes a specific message from the queue.
* <p>
* This operation can remove messages in any state (ready, delayed, or unacknowledged).
*
* @param args removal arguments
* @return {@code true} if the message was successfully removed, {@code false} if the message was not found
*/
boolean remove(QueueRemoveArgs args);
/**
* Removes multiple messages from the queue in a single operation.
*
* @param args removal arguments
* @return the number of messages successfully removed
*/
int removeMany(QueueRemoveArgs args);
/**
* Moves messages between queues.
*
* @param args move arguments
* @return the number of messages successfully moved
*/
int move(QueueMoveArgs args);
/**
* Adds a message to the queue with the specified parameters.
* <p>
* Returns {@code null} if the message hasn't been added for one of the following reasons:
* <ul>
* <li>Due to message deduplication by id or hash</li>
* <li>Due to configured queue size limit and queue is full</li>
* </ul>
*
* @param params parameters for the message to be added
* @return the added message with its assigned ID and metadata
* or {@code null} if timeout defined and no space becomes available in full queue.
* @throws OperationDisabledException if this operation is disabled
*/
Message<V> add(QueueAddArgs<V> params);
/**
* Adds multiple messages to the queue in a single operation.
* <p>
* This batch operation is more efficient than adding messages individually.
* <p>
* Messages may not be added for one of the following reasons:
* <ul>
* <li>Due to message deduplication by id or hash</li>
* <li>Due to configured queue size limit and queue is full</li>
* </ul>
*
* @param params parameters for the messages to be added
* @return a list of added messages with their assigned IDs and metadata
* or empty list if timeout defined and no space becomes available in full queue.
* @throws OperationDisabledException if this operation is disabled
*/
List<Message<V>> addMany(QueueAddArgs<V> params);
/**
* Returns the names of source queues which uses this reliable queue as dead letter queue.
* <p>
* This only applies if this queue is configured as a dead letter queue in the source queue configurations.
*
* @return a set of source queue names
*/
Set<String> getDeadLetterQueueSources();
/**
* Returns all messages in the queue, ready to be retrieved by the poll() command, without removing them.
* <p>
* This operation is useful for inspection and debugging purposes.
*
* @return a list of all messages in the queue
*/
List<Message<V>> listAll();
/**
* Returns all messages in the queue, ready to be retrieved by the poll() command,
* using the specified codec for message header values.
*
* @param headersCodec the codec to use for deserializing message header values
* @return a list of all messages in the queue
*/
List<Message<V>> listAll(Codec headersCodec);
/**
* Returns message by id
*
* @param id message id
* @return message
*/
Message<V> get(String id);
/**
* Returns message by id applying specified codec to headers
*
* @param id message id
* @param headersCodec codec for headers
* @return message
*/
Message<V> get(Codec headersCodec, String id);
/**
* Returns messages by ids
*
* @param ids message ids
* @return message
*/
List<Message<V>> getAll(String... ids);
/**
* Returns messages by ids applying specified codec to headers
*
* @param ids message ids
* @param headersCodec codec for headers
* @return message
*/
List<Message<V>> getAll(Codec headersCodec, String... ids);
/**
* Explicitly marks a message as failed or rejected.
*
* @param args arguments specifying the message to negatively acknowledge
*/
void negativeAcknowledge(QueueNegativeAckArgs args);
/**
* Adds queue listener
*
* @see org.redisson.api.queue.event.AddedEventListener
* @see org.redisson.api.queue.event.PolledEventListener
* @see org.redisson.api.queue.event.RemovedEventListener
* @see org.redisson.api.queue.event.AcknowledgedEventListener
* @see org.redisson.api.queue.event.NegativelyAcknowledgedEventListener
* @see org.redisson.api.queue.event.ConfigEventListener
* @see org.redisson.api.queue.event.DisabledOperationEventListener
* @see org.redisson.api.queue.event.EnabledOperationEventListener
* @see org.redisson.api.queue.event.FullEventListener
*
* @param listener entry listener
* @return listener id
*/
String addListener(QueueEventListener listener);
/**
* Removes map entry listener
*
* @param id listener id
*/
void removeListener(String id);
/**
* Disables a queue operation
*
* @param operation queue operation
*/
void disableOperation(QueueOperation operation);
/**
* Enables a queue operation
*
* @param operation queue operation
*/
void enableOperation(QueueOperation operation);
}
| RReliableQueue |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/promql/PromqlParserUtils.java | {
"start": 733,
"end": 11371
} | class ____ {
// time units recognized by Prometheus
private static final Map<String, Long> TIME_UNITS;
static {
// NB: using JDK TimeUnit or ChronoUnits turns out to be verbose
// hence the basic approach used below
// NB2: using LHM to preserve insertion order for consistent strings around keys
Map<String, Long> map = new LinkedHashMap<>();
map.put("y", 1000L * 60 * 60 * 24 * 365);
map.put("w", 1000L * 60 * 60 * 24 * 7);
map.put("d", 1000L * 60 * 60 * 24);
map.put("h", 1000L * 60 * 60);
map.put("m", 1000L * 60);
map.put("s", 1000L);
map.put("ms", 1L);
TIME_UNITS = unmodifiableMap(map);
}
private PromqlParserUtils() {}
public static Duration parseDuration(Source source, String string) {
char[] chars = string.toCharArray();
long millis = 0;
String errorPrefix = "Invalid time duration [{}], ";
int current;
Tuple<String, Long> lastUnit = null;
for (int i = 0; i < chars.length;) {
current = i;
// number - look for digits
while (current < chars.length && Character.isDigit(chars[current])) {
current++;
}
// at least one digit needs to be specified
if (current == i) {
throw new ParsingException(source, errorPrefix + "no number specified at index [{}]", string, current);
}
String token = new String(chars, i, current - i);
int number;
try {
number = Integer.parseInt(token);
} catch (NumberFormatException ex) {
throw new ParsingException(source, errorPrefix + "invalid number [{}]", string, token);
}
i = current;
// unit - look for letters
while (current < chars.length && Character.isLetter(chars[current])) {
current++;
}
// at least one letter needs to be specified
if (current == i) {
throw new ParsingException(source, errorPrefix + "no unit specified at index [{}]", string, current);
}
token = new String(chars, i, current - i);
i = current;
Long msMultiplier = TIME_UNITS.get(token);
if (msMultiplier == null) {
throw new ParsingException(
source,
errorPrefix + "unrecognized time unit [{}], must be one of {}",
string,
token,
TIME_UNITS.keySet()
);
}
if (lastUnit != null) {
if (lastUnit.v2() < msMultiplier) {
throw new ParsingException(
source,
errorPrefix + "units must be ordered from the longest to the shortest, found [{}] before [{}]",
string,
lastUnit.v1(),
token
);
} else if (lastUnit.v2().equals(msMultiplier)) {
throw new ParsingException(
source,
errorPrefix + "a given unit must only appear once, found [{}] multiple times",
string,
token
);
}
}
lastUnit = new Tuple<>(token, msMultiplier);
millis += number * msMultiplier;
}
return Duration.ofMillis(millis);
}
static String unquote(Source source) {
// remove leading and trailing ' for strings and also eliminate escaped single quotes
if (source == null) {
return null;
}
String text = source.text();
boolean unescaped = text.startsWith("`");
// remove leading/trailing chars
text = text.substring(1, text.length() - 1);
if (unescaped) {
return text;
}
StringBuilder sb = new StringBuilder();
// https://prometheus.io/docs/prometheus/latest/querying/basics/#string-literals
// Go: https://golang.org/ref/spec#Rune_literals
char[] chars = text.toCharArray();
for (int i = 0; i < chars.length;) {
if (chars[i] == '\\') {
// ANTLR4 Grammar guarantees there is always a character after the `\`
switch (chars[++i]) {
case 'a':
sb.append('\u0007');
break;
case 'b':
sb.append('\b');
break;
case 'f':
sb.append('\f');
break;
case 'n':
sb.append('\n');
break;
case 'r':
sb.append('\r');
break;
case 't':
sb.append('\t');
break;
case 'v':
sb.append('\u000B');
break;
case '\\':
sb.append('\\');
break;
case '\'':
sb.append('\'');
break;
case '"':
sb.append('"');
break;
case 'x':
case 'u':
case 'U':
// all 3 cases rely on hex characters - only the number of chars between them differ
// get the current chat and move to the next offset
int ch = chars[i++];
int count = ch == 'U' ? 8 : (ch == 'u' ? 4 : 2);
sb.append(fromRadix(source, chars, i, count, 16));
i += count - 1;
break;
default:
// octal declaration - eats 3 chars
// there's no escape character, no need to move the offset
count = 3;
sb.append(fromRadix(source, chars, i, count, 8));
i += count - 1;
}
i++;
} else {
sb.append(chars[i++]);
}
}
return sb.toString();
}
// parse the given number of strings to
private static String fromRadix(Source source, char[] chars, int offset, int count, int radix) {
if (offset + count > chars.length) {
throw new ParsingException(
source,
"Incomplete escape sequence at [{}], expected [{}] found [{}]",
offset,
count,
chars.length - offset - 1 // offset starts at 0
);
}
String toParse = new String(chars, offset, count);
int code;
try {
code = Integer.parseInt(toParse, radix);
} catch (NumberFormatException ex) {
throw new ParsingException(source, "Invalid unicode character code [{}]", toParse);
}
// For \x escapes (2-digit hex), validate UTF-8 compliance
// Single-byte UTF-8 characters must be in range 0x00-0x7F
if (radix == 16 && count == 2) {
if (code >= 0x80 && code <= 0xFF) {
throw new ParsingException(
source,
"Invalid unicode character code [\\x{}], single-byte UTF-8 characters must be in range 0x00-0x7F",
toParse
);
}
}
return String.valueOf(Character.toChars(code));
}
/**
* Adjusts the location of the source by the line and column offsets.
* @see #adjustLocation(Location, int, int)
*/
public static Source adjustSource(Source source, int startLine, int startColumn) {
return new Source(adjustLocation(source.source(), startLine, startColumn), source.text());
}
/**
* Adjusts the location by the line and column offsets.
* The PromQL query inside the PROMQL command is parsed separately,
* so its line and column numbers need to be adjusted to match their
* position inside the full ES|QL query.
*/
public static Location adjustLocation(Location location, int startLine, int startColumn) {
int lineNumber = location.getLineNumber();
int columnNumber = location.getColumnNumber();
return new Location(adjustLine(lineNumber, startLine), adjustColumn(lineNumber, columnNumber, startColumn));
}
/**
* Adjusts the line and column numbers of the given {@code ParsingException}
* by the provided offsets.
* The PromQL query inside the PROMQL command is parsed separately,
* so its line and column numbers need to be adjusted to match their
* position inside the full ES|QL query.
*/
public static ParsingException adjustParsingException(ParsingException pe, int promqlStartLine, int promqlStartColumn) {
ParsingException adjusted = new ParsingException(
pe.getErrorMessage(),
pe.getCause() instanceof Exception ? (Exception) pe.getCause() : null,
adjustLine(pe.getLineNumber(), promqlStartLine),
adjustColumn(pe.getLineNumber(), pe.getColumnNumber(), promqlStartColumn)
);
adjusted.setStackTrace(pe.getStackTrace());
return adjusted;
}
private static int adjustLine(int lineNumber, int startLine) {
return lineNumber + startLine - 1;
}
private static int adjustColumn(int lineNumber, int columnNumber, int startColumn) {
// the column offset only applies to the first line of the PROMQL command
return lineNumber == 1 ? columnNumber + startColumn - 1 : columnNumber;
}
/*
* Parses a Prometheus date which can be either a float representing epoch seconds or an RFC3339 date string.
*/
public static Instant parseDate(Source source, String value) {
try {
return Instant.ofEpochMilli((long) (Double.parseDouble(value) * 1000));
} catch (NumberFormatException ignore) {
// Not a float, try parsing as date string
}
try {
return Instant.parse(value);
} catch (DateTimeParseException e) {
throw new ParsingException(source, "Invalid date format [{}]", value);
}
}
}
| PromqlParserUtils |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/optional/OptionalAssert_containsSame_Test.java | {
"start": 1146,
"end": 3155
} | class ____ {
@Test
void should_fail_when_actual_is_null() {
// GIVEN
@SuppressWarnings("OptionalAssignedToNull")
Optional<Object> actual = null;
// WHEN
var assertionError = expectAssertionError(() -> assertThat(actual).containsSame("something"));
// THEN
then(assertionError).hasMessage(actualIsNull());
}
@Test
void should_fail_if_expected_value_is_null() {
assertThatIllegalArgumentException().isThrownBy(() -> assertThat(Optional.of("something")).containsSame(null))
.withMessage("The expected value should not be <null>.");
}
@Test
void should_pass_if_optional_contains_the_expected_object_reference() {
assertThat(Optional.of("something")).containsSame("something");
}
@Test
void should_fail_if_optional_does_not_contain_the_expected_object_reference() {
// GIVEN
Optional<String> actual = Optional.of("not-expected");
String expectedValue = "something";
// WHEN
var assertionError = expectAssertionError(() -> assertThat(actual).containsSame(expectedValue));
// THEN
then(assertionError).hasMessage(shouldContainSame(actual, expectedValue).create());
}
@Test
void should_fail_if_optional_contains_equal_but_not_same_value() {
// GIVEN
Optional<String> actual = Optional.of(new String("something"));
String expectedValue = "something";
// WHEN
var assertionError = expectAssertionError(() -> assertThat(actual).containsSame(expectedValue));
// THEN
then(assertionError).hasMessage(shouldContainSame(actual, expectedValue).create());
}
@Test
void should_fail_if_optional_is_empty() {
// GIVEN
String expectedValue = "something";
// WHEN
Optional<Object> actual = Optional.empty();
var assertionError = expectAssertionError(() -> assertThat(actual).containsSame(expectedValue));
// THEN
then(assertionError).hasMessage(shouldContainSame(actual, expectedValue).create());
}
}
| OptionalAssert_containsSame_Test |
java | grpc__grpc-java | xds/src/main/java/io/grpc/xds/client/Stats.java | {
"start": 3048,
"end": 4090
} | class ____ {
public abstract Locality locality();
public abstract long totalIssuedRequests();
public abstract long totalSuccessfulRequests();
public abstract long totalErrorRequests();
public abstract long totalRequestsInProgress();
public abstract ImmutableMap<String, BackendLoadMetricStats> loadMetricStatsMap();
public static UpstreamLocalityStats create(Locality locality, long totalIssuedRequests,
long totalSuccessfulRequests, long totalErrorRequests,
long totalRequestsInProgress,
Map<String, BackendLoadMetricStats> loadMetricStatsMap) {
return new AutoValue_Stats_UpstreamLocalityStats(locality, totalIssuedRequests,
totalSuccessfulRequests, totalErrorRequests, totalRequestsInProgress,
ImmutableMap.copyOf(loadMetricStatsMap));
}
}
/**
* Load metric stats for multi-dimensional load balancing.
*/
public static final | UpstreamLocalityStats |
java | apache__hadoop | hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/FedBalanceConfigs.java | {
"start": 1008,
"end": 1236
} | class ____ {
public static final String LAST_SNAPSHOT_NAME = "DISTCP-BALANCE-CURRENT";
public static final String CURRENT_SNAPSHOT_NAME = "DISTCP-BALANCE-NEXT";
/* Specify the behaviour of trash. */
public | FedBalanceConfigs |
java | elastic__elasticsearch | x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/Schedule.java | {
"start": 919,
"end": 1050
} | interface ____<S extends Schedule> {
String type();
S parse(XContentParser parser) throws IOException;
}
}
| Parser |
java | quarkusio__quarkus | integration-tests/main/src/main/java/io/quarkus/it/rest/SerializableDoubleFunction.java | {
"start": 103,
"end": 405
} | class ____ implements Serializable, ToDoubleFunction<Integer> {
private final double value;
public SerializableDoubleFunction(double inputValue) {
value = inputValue;
}
@Override
public double applyAsDouble(Integer o) {
return value;
}
}
| SerializableDoubleFunction |
java | google__dagger | javatests/dagger/functional/guava/OptionalBindingComponents.java | {
"start": 1768,
"end": 1832
} | enum ____ {
VALUE,
QUALIFIED_VALUE
}
static final | Value |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/shard/IndexingFailuresDebugListener.java | {
"start": 701,
"end": 1988
} | class ____ implements IndexingOperationListener {
private static final Logger LOGGER = LogManager.getLogger(IndexingFailuresDebugListener.class);
private final IndexShard indexShard;
public IndexingFailuresDebugListener(IndexShard indexShard) {
this.indexShard = indexShard;
}
@Override
public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult result) {
if (LOGGER.isDebugEnabled()) {
if (result.getResultType() == Engine.Result.Type.FAILURE) {
postIndex(shardId, index, result.getFailure());
}
}
}
@Override
public void postIndex(ShardId shardId, Engine.Index index, Exception ex) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
() -> format(
"index-fail [%s] seq# [%s] allocation-id [%s] primaryTerm [%s] operationPrimaryTerm [%s] origin [%s]",
index.id(),
index.seqNo(),
indexShard.routingEntry().allocationId(),
index.primaryTerm(),
indexShard.getOperationPrimaryTerm(),
index.origin()
),
ex
);
}
}
}
| IndexingFailuresDebugListener |
java | apache__maven | impl/maven-impl/src/main/java/org/apache/maven/impl/resolver/PluginsMetadataGenerator.java | {
"start": 1972,
"end": 8011
} | class ____ implements MetadataGenerator {
private static final String PLUGIN_DESCRIPTOR_LOCATION = "META-INF/maven/plugin.xml";
private final Logger logger = LoggerFactory.getLogger(getClass());
private final Map<Object, PluginsMetadata> processedPlugins;
private final Instant timestamp;
PluginsMetadataGenerator(RepositorySystemSession session, InstallRequest request) {
this(session, request.getMetadata());
}
PluginsMetadataGenerator(RepositorySystemSession session, DeployRequest request) {
this(session, request.getMetadata());
}
private PluginsMetadataGenerator(RepositorySystemSession session, Collection<? extends Metadata> metadatas) {
this.processedPlugins = new LinkedHashMap<>();
this.timestamp = (Instant) ConfigUtils.getObject(session, MonotonicClock.now(), Constants.MAVEN_START_INSTANT);
/*
* NOTE: This should be considered a quirk to support interop with Maven's legacy ArtifactDeployer which
* processes one artifact at a time and hence cannot associate the artifacts from the same project to use the
* same version index. Allowing the caller to pass in metadata from a previous deployment allows to re-establish
* the association between the artifacts of the same project.
*/
for (Iterator<? extends Metadata> it = metadatas.iterator(); it.hasNext(); ) {
Metadata metadata = it.next();
if (metadata instanceof PluginsMetadata pluginMetadata) {
it.remove();
processedPlugins.put(pluginMetadata.getGroupId(), pluginMetadata);
}
}
}
@Override
public Collection<? extends Metadata> prepare(Collection<? extends Artifact> artifacts) {
return Collections.emptyList();
}
@Override
public Artifact transformArtifact(Artifact artifact) {
return artifact;
}
@Override
public Collection<? extends Metadata> finish(Collection<? extends Artifact> artifacts) {
LinkedHashMap<String, PluginsMetadata> plugins = new LinkedHashMap<>();
for (Artifact artifact : artifacts) {
PluginInfo pluginInfo = extractPluginInfo(artifact);
if (pluginInfo != null) {
String key = pluginInfo.groupId;
if (processedPlugins.get(key) == null) {
PluginsMetadata pluginMetadata = plugins.get(key);
if (pluginMetadata == null) {
pluginMetadata = new PluginsMetadata(pluginInfo, timestamp);
plugins.put(key, pluginMetadata);
}
}
}
}
return plugins.values();
}
private PluginInfo extractPluginInfo(Artifact artifact) {
// sanity: jar, no classifier and file exists
if (artifact != null
&& "jar".equals(artifact.getExtension())
&& "".equals(artifact.getClassifier())
&& artifact.getPath() != null) {
Path artifactPath = artifact.getPath();
if (Files.isRegularFile(artifactPath)) {
try (JarFile artifactJar = new JarFile(artifactPath.toFile(), false)) {
ZipEntry pluginDescriptorEntry = artifactJar.getEntry(PLUGIN_DESCRIPTOR_LOCATION);
if (pluginDescriptorEntry != null) {
try (InputStream is = artifactJar.getInputStream(pluginDescriptorEntry)) {
// Note: using DOM instead of use of
// org.apache.maven.plugin.descriptor.PluginDescriptor
// as it would pull in dependency on:
// - maven-plugin-api (for model)
// - Plexus Container (for model supporting classes and exceptions)
XmlNode root = XmlService.read(is, null);
String groupId = mayGetChild(root, "groupId");
String artifactId = mayGetChild(root, "artifactId");
String goalPrefix = mayGetChild(root, "goalPrefix");
String name = mayGetChild(root, "name");
// sanity check: plugin descriptor extracted from artifact must have same GA
if (Objects.equals(artifact.getGroupId(), groupId)
&& Objects.equals(artifact.getArtifactId(), artifactId)) {
// here groupId and artifactId cannot be null
return new PluginInfo(groupId, artifactId, goalPrefix, name);
} else {
logger.warn(
"Artifact {}:{}"
+ " JAR (about to be installed/deployed) contains Maven Plugin metadata for"
+ " conflicting coordinates: {}:{}."
+ " Your JAR contains rogue Maven Plugin metadata."
+ " Possible causes may be: shaded into this JAR some Maven Plugin or some rogue resource.",
artifact.getGroupId(),
artifact.getArtifactId(),
groupId,
artifactId);
}
}
}
} catch (Exception e) {
// here we can have: IO. ZIP or Plexus Conf Ex: but we should not interfere with user intent
}
}
}
return null;
}
private static String mayGetChild(XmlNode node, String child) {
XmlNode c = node.child(child);
if (c != null) {
return c.value();
}
return null;
}
}
| PluginsMetadataGenerator |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStSharedResourcesFactory.java | {
"start": 1467,
"end": 7146
} | enum ____ {
/** Memory allocated per Slot (shared across slot tasks), managed by Flink. */
SLOT_SHARED_MANAGED(false, MemoryShareScope.SLOT) {
@Override
protected OpaqueMemoryResource<ForStSharedResources> createInternal(
ForStMemoryConfiguration jobMemoryConfig,
String resourceId,
Environment env,
double memoryFraction,
LongFunctionWithException<ForStSharedResources, Exception> allocator)
throws Exception {
return env.getMemoryManager()
.getSharedMemoryResourceForManagedMemory(resourceId, allocator, memoryFraction);
}
},
/** Memory allocated per Slot (shared across slot tasks), unmanaged. */
SLOT_SHARED_UNMANAGED(false, MemoryShareScope.SLOT) {
@Override
protected OpaqueMemoryResource<ForStSharedResources> createInternal(
ForStMemoryConfiguration jobMemoryConfig,
String resourceId,
Environment env,
double memoryFraction,
LongFunctionWithException<ForStSharedResources, Exception> allocator)
throws Exception {
return env.getMemoryManager()
.getExternalSharedMemoryResource(
resourceId,
allocator,
jobMemoryConfig.getFixedMemoryPerSlot().getBytes());
}
},
/** Memory allocated per TM (shared across all tasks), unmanaged. */
TM_SHARED_UNMANAGED(false, MemoryShareScope.TM) {
@Override
protected OpaqueMemoryResource<ForStSharedResources> createInternal(
ForStMemoryConfiguration jobMemoryConfig,
String resourceId,
Environment env,
double memoryFraction,
LongFunctionWithException<ForStSharedResources, Exception> allocator)
throws Exception {
SharedResources sharedResources = env.getSharedResources();
Object leaseHolder = new Object();
SharedResources.ResourceAndSize<ForStSharedResources> resource =
sharedResources.getOrAllocateSharedResource(
resourceId, leaseHolder, allocator, getTmSharedMemorySize(env));
ThrowingRunnable<Exception> disposer =
() -> sharedResources.release(resourceId, leaseHolder, unused -> {});
return new OpaqueMemoryResource<>(resource.resourceHandle(), resource.size(), disposer);
}
};
private final boolean managed;
private final MemoryShareScope shareScope;
ForStSharedResourcesFactory(boolean managed, MemoryShareScope shareScope) {
this.managed = managed;
this.shareScope = shareScope;
}
@Nullable
public static ForStSharedResourcesFactory from(
ForStMemoryConfiguration jobMemoryConfig, Environment env) {
if (jobMemoryConfig.isUsingFixedMemoryPerSlot()) {
return ForStSharedResourcesFactory.SLOT_SHARED_UNMANAGED;
} else if (jobMemoryConfig.isUsingManagedMemory()) {
return ForStSharedResourcesFactory.SLOT_SHARED_MANAGED;
} else if (getTmSharedMemorySize(env) > 0) {
return ForStSharedResourcesFactory.TM_SHARED_UNMANAGED;
} else {
// not shared and not managed - allocate per column family
return null;
}
}
public final OpaqueMemoryResource<ForStSharedResources> create(
ForStMemoryConfiguration jobMemoryConfig,
Environment env,
double memoryFraction,
Logger logger,
ForStMemoryControllerUtils.ForStMemoryFactory forStMemoryFactory)
throws Exception {
logger.info(
"Getting shared memory for ForSt: shareScope={}, managed={}", shareScope, managed);
return createInternal(
jobMemoryConfig,
managed ? MANAGED_MEMORY_RESOURCE_ID : UNMANAGED_MEMORY_RESOURCE_ID,
env,
memoryFraction,
createAllocator(
shareScope.getConfiguration(jobMemoryConfig, env), forStMemoryFactory));
}
protected abstract OpaqueMemoryResource<ForStSharedResources> createInternal(
ForStMemoryConfiguration jobMemoryConfig,
String resourceId,
Environment env,
double memoryFraction,
LongFunctionWithException<ForStSharedResources, Exception> allocator)
throws Exception;
private static long getTmSharedMemorySize(Environment env) {
return env.getTaskManagerInfo()
.getConfiguration()
.getOptional(FIX_PER_TM_MEMORY_SIZE)
.orElse(MemorySize.ZERO)
.getBytes();
}
private static final String MANAGED_MEMORY_RESOURCE_ID = "state-forst-managed-memory";
private static final String UNMANAGED_MEMORY_RESOURCE_ID = "state-forst-fixed-slot-memory";
private static LongFunctionWithException<ForStSharedResources, Exception> createAllocator(
ForStMemoryConfiguration config,
ForStMemoryControllerUtils.ForStMemoryFactory forStMemoryFactory) {
return size ->
ForStMemoryControllerUtils.allocateForStSharedResources(
size,
config.getWriteBufferRatio(),
config.getHighPriorityPoolRatio(),
config.isUsingPartitionedIndexFilters(),
forStMemoryFactory);
}
}
| ForStSharedResourcesFactory |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/quote/User.java | {
"start": 776,
"end": 2347
} | class ____ implements Serializable {
@Id
@GeneratedValue(strategy = GenerationType.AUTO)
private long id;
@ManyToMany
private Set<Role> roles = new HashSet<>();
// These exist solely for HHH-8464 to ensure that the various forms of quoting are normalized internally
// (using backticks), including the join column. Without normalization, the mapping will throw a
// DuplicateMappingException.
@ManyToOne
@JoinColumn(name = "\"house\"")
private House house;
@Column(name = "\"house\"", insertable = false, updatable = false )
private Long house1;
@Column(name = "`house`", insertable = false, updatable = false )
private Long house2;
// test UK on FK w/ global quoting -- see HHH-8638
// This MUST be initialized. Several DBs do not allow multiple null values in a unique column.
@ManyToOne(cascade = CascadeType.ALL)
@JoinColumn(name = "house3")
private House house3 = new House();
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public Set<Role> getRoles() {
return roles;
}
public void setRoles(Set<Role> roles) {
this.roles = roles;
}
public House getHouse() {
return house;
}
public void setHouse(House house) {
this.house = house;
}
public Long getHouse1() {
return house1;
}
public void setHouse1(Long house1) {
this.house1 = house1;
}
public Long getHouse2() {
return house2;
}
public void setHouse2(Long house2) {
this.house2 = house2;
}
public House getHouse3() {
return house;
}
public void setHouse3(House house3) {
this.house3 = house3;
}
}
| User |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/CsrfConfigurerTests.java | {
"start": 35907,
"end": 36284
} | class ____ {
static AccessDeniedHandler DENIED_HANDLER;
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.exceptionHandling((handling) -> handling
.accessDeniedHandler(DENIED_HANDLER));
return http.build();
// @formatter:on
}
}
@Configuration
@EnableWebSecurity
static | AccessDeniedHandlerConfig |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/filter/wall/mysql/MySqlWallTest14.java | {
"start": 837,
"end": 1244
} | class ____ extends TestCase {
public void test_true() throws Exception {
assertTrue(WallUtils.isValidateMySql(//
"SELECT index_privacy AS index,info_privacy AS ?,msgboard_privacy AS messageboard,photos_privacy AS photos," +
"diary_privacy AS diary,owrite_privacy AS weibo " +
"FROM pw_ouserdata WHERE uid= ?"));
}
}
| MySqlWallTest14 |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/namingstrategy/complete/Order.java | {
"start": 567,
"end": 1487
} | class ____ {
private Integer id;
private String referenceCode;
private Date placed;
private Date fulfilled;
private Customer customer;
@Id
@GeneratedValue
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
@Basic
@NaturalId
public String getReferenceCode() {
return referenceCode;
}
public void setReferenceCode(String referenceCode) {
this.referenceCode = referenceCode;
}
@Temporal(TemporalType.TIMESTAMP )
public Date getPlaced() {
return placed;
}
public void setPlaced(Date placed) {
this.placed = placed;
}
@Temporal(TemporalType.TIMESTAMP )
public Date getFulfilled() {
return fulfilled;
}
public void setFulfilled(Date fulfilled) {
this.fulfilled = fulfilled;
}
@ManyToOne
@JoinColumn
public Customer getCustomer() {
return customer;
}
public void setCustomer(Customer customer) {
this.customer = customer;
}
}
| Order |
java | google__guava | guava-testlib/test/com/google/common/testing/ClassSanityTesterTest.java | {
"start": 36518,
"end": 36590
} | class ____ {
private NotInstantiable() {}
}
private | NotInstantiable |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/short_/ShortAssert_isBetween_Shorts_Test.java | {
"start": 900,
"end": 1254
} | class ____ extends ShortAssertBaseTest {
@Override
protected ShortAssert invoke_api_method() {
return assertions.isBetween((short) 6, (short) 8);
}
@Override
protected void verify_internal_effects() {
verify(shorts).assertIsBetween(getInfo(assertions), getActual(assertions), (short) 6, (short) 8);
}
}
| ShortAssert_isBetween_Shorts_Test |
java | netty__netty | transport/src/main/java/io/netty/channel/ChannelOutboundBuffer.java | {
"start": 2107,
"end": 4955
} | class ____ {
// Assuming a 64-bit JVM:
// - 16 bytes object header
// - 6 reference fields
// - 2 long fields
// - 2 int fields
// - 1 boolean field
// - padding
static final int CHANNEL_OUTBOUND_BUFFER_ENTRY_OVERHEAD =
SystemPropertyUtil.getInt("io.netty.transport.outboundBufferEntrySizeOverhead", 96);
private static final InternalLogger logger = InternalLoggerFactory.getInstance(ChannelOutboundBuffer.class);
private static final FastThreadLocal<ByteBuffer[]> NIO_BUFFERS = new FastThreadLocal<ByteBuffer[]>() {
@Override
protected ByteBuffer[] initialValue() throws Exception {
return new ByteBuffer[1024];
}
};
private final Channel channel;
// Entry(flushedEntry) --> ... Entry(unflushedEntry) --> ... Entry(tailEntry)
//
// The Entry that is the first in the linked-list structure that was flushed
private Entry flushedEntry;
// The Entry which is the first unflushed in the linked-list structure
private Entry unflushedEntry;
// The Entry which represents the tail of the buffer
private Entry tailEntry;
// The number of flushed entries that are not written yet
private int flushed;
private int nioBufferCount;
private long nioBufferSize;
private boolean inFail;
private static final AtomicLongFieldUpdater<ChannelOutboundBuffer> TOTAL_PENDING_SIZE_UPDATER =
AtomicLongFieldUpdater.newUpdater(ChannelOutboundBuffer.class, "totalPendingSize");
@SuppressWarnings("UnusedDeclaration")
private volatile long totalPendingSize;
private static final AtomicIntegerFieldUpdater<ChannelOutboundBuffer> UNWRITABLE_UPDATER =
AtomicIntegerFieldUpdater.newUpdater(ChannelOutboundBuffer.class, "unwritable");
@SuppressWarnings("UnusedDeclaration")
private volatile int unwritable;
private volatile Runnable fireChannelWritabilityChangedTask;
ChannelOutboundBuffer(AbstractChannel channel) {
this.channel = channel;
}
/**
* Add given message to this {@link ChannelOutboundBuffer}. The given {@link ChannelPromise} will be notified once
* the message was written.
*/
public void addMessage(Object msg, int size, ChannelPromise promise) {
Entry entry = Entry.newInstance(msg, size, total(msg), promise);
if (tailEntry == null) {
flushedEntry = null;
} else {
Entry tail = tailEntry;
tail.next = entry;
}
tailEntry = entry;
if (unflushedEntry == null) {
unflushedEntry = entry;
}
// Touch the message to make it easier to debug buffer leaks.
// this save both checking against the ReferenceCounted interface
// and makes better use of virtual calls vs | ChannelOutboundBuffer |
java | apache__camel | catalog/camel-route-parser/src/test/java/org/apache/camel/parser/java/MyNewLineRouteBuilder.java | {
"start": 896,
"end": 1190
} | class ____ extends RouteBuilder {
@Override
public void configure() {
from("timer:foo")
.toD("file:output?fileExist=Append"
+ "&chmod=777"
+ "&allowNullBody=true")
.to("log:b");
}
}
| MyNewLineRouteBuilder |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java | {
"start": 2892,
"end": 20871
} | class ____ {
static {
GenericTestUtils.setLogLevel(FSEditLog.LOG, Level.DEBUG);
}
public static Collection<Object[]> data() {
Collection<Object[]> params = new ArrayList<Object[]>();
params.add(new Object[]{ Boolean.FALSE });
params.add(new Object[]{ Boolean.TRUE });
return params;
}
private static boolean useAsyncEditLog;
public TestEditLogTailer(Boolean async) {
useAsyncEditLog = async;
}
private static final String DIR_PREFIX = "/dir";
private static final int DIRS_TO_MAKE = 20;
static final long SLEEP_TIME = 1000;
static final long NN_LAG_TIMEOUT = 10 * 1000;
static {
GenericTestUtils.setLogLevel(FSImage.LOG, Level.DEBUG);
GenericTestUtils.setLogLevel(FSEditLog.LOG, org.slf4j.event.Level.DEBUG);
GenericTestUtils.setLogLevel(EditLogTailer.LOG, Level.DEBUG);
}
private static Configuration getConf() {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_ASYNC_LOGGING,
useAsyncEditLog);
return conf;
}
@Test
public void testTailer() throws IOException, InterruptedException,
ServiceFailedException {
Configuration conf = getConf();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 0);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_ALL_NAMESNODES_RETRY_KEY, 100);
conf.setLong(EditLogTailer.DFS_HA_TAILEDITS_MAX_TXNS_PER_LOCK_KEY, 3);
HAUtil.setAllowStandbyReads(conf, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(0)
.build();
cluster.waitActive();
cluster.transitionToActive(0);
NameNode nn1 = cluster.getNameNode(0);
NameNode nn2 = cluster.getNameNode(1);
try {
for (int i = 0; i < DIRS_TO_MAKE / 2; i++) {
NameNodeAdapter.mkdirs(nn1, getDirPath(i),
new PermissionStatus("test","test", new FsPermission((short)00755)),
true);
}
HATestUtil.waitForStandbyToCatchUp(nn1, nn2);
assertEquals(nn1.getNamesystem().getEditLog().getLastWrittenTxId(),
nn2.getNamesystem().getFSImage().getLastAppliedTxId() + 1,
"Inconsistent number of applied txns on Standby");
for (int i = 0; i < DIRS_TO_MAKE / 2; i++) {
assertTrue(NameNodeAdapter.getFileInfo(nn2,
getDirPath(i), false, false, false).isDirectory());
}
for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
NameNodeAdapter.mkdirs(nn1, getDirPath(i),
new PermissionStatus("test","test", new FsPermission((short)00755)),
true);
}
HATestUtil.waitForStandbyToCatchUp(nn1, nn2);
assertEquals(nn1.getNamesystem().getEditLog().getLastWrittenTxId(),
nn2.getNamesystem().getFSImage().getLastAppliedTxId() + 1,
"Inconsistent number of applied txns on Standby");
for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
assertTrue(NameNodeAdapter.getFileInfo(nn2,
getDirPath(i), false, false, false).isDirectory());
}
} finally {
cluster.shutdown();
}
}
@Test
public void testTailerBackoff() throws Exception {
Configuration conf = new Configuration();
NameNode.initMetrics(conf, HdfsServerConstants.NamenodeRole.NAMENODE);
conf.setTimeDuration(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,
1, TimeUnit.MILLISECONDS);
conf.setTimeDuration(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_BACKOFF_MAX_KEY,
10, TimeUnit.MILLISECONDS);
FSNamesystem mockNamesystem = mock(FSNamesystem.class);
FSImage mockImage = mock(FSImage.class);
NNStorage mockStorage = mock(NNStorage.class);
when(mockNamesystem.getFSImage()).thenReturn(mockImage);
when(mockImage.getStorage()).thenReturn(mockStorage);
final Queue<Long> sleepDurations = new ConcurrentLinkedQueue<>();
final int zeroEditCount = 5;
final AtomicInteger tailEditsCallCount = new AtomicInteger(0);
EditLogTailer tailer = new EditLogTailer(mockNamesystem, conf) {
@Override
void sleep(long sleepTimeMs) {
if (sleepDurations.size() <= zeroEditCount) {
sleepDurations.add(sleepTimeMs);
}
}
@Override
public long doTailEdits() {
return tailEditsCallCount.getAndIncrement() < zeroEditCount ? 0 : 1;
}
};
tailer.start();
try {
GenericTestUtils.waitFor(
() -> sleepDurations.size() > zeroEditCount, 50, 10000);
} finally {
tailer.stop();
}
List<Long> expectedDurations = Arrays.asList(2L, 4L, 8L, 10L, 10L, 1L);
assertEquals(expectedDurations, new ArrayList<>(sleepDurations));
}
@Test
public void testNN0TriggersLogRolls() throws Exception {
testStandbyTriggersLogRolls(0);
}
@Test
public void testNN1TriggersLogRolls() throws Exception {
testStandbyTriggersLogRolls(1);
}
@Test
public void testNN2TriggersLogRolls() throws Exception {
testStandbyTriggersLogRolls(2);
}
private static void testStandbyTriggersLogRolls(int activeIndex)
throws Exception {
Configuration conf = getConf();
// Roll every 1s
conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_ALL_NAMESNODES_RETRY_KEY, 100);
MiniDFSCluster cluster = null;
for (int i = 0; i < 5; i++) {
try {
cluster = createMiniDFSCluster(conf, 3);
break;
} catch (BindException e) {
// retry if race on ports given by ServerSocketUtil#getPorts
continue;
}
}
if (cluster == null) {
fail("failed to start mini cluster.");
}
try {
cluster.transitionToActive(activeIndex);
waitForLogRollInSharedDir(cluster, 3);
} finally {
cluster.shutdown();
}
}
/*
1. when all NN become standby nn, standby NN execute to roll log,
it will be failed.
2. when one NN become active, standby NN roll log success.
*/
@Test
public void testTriggersLogRollsForAllStandbyNN() throws Exception {
Configuration conf = getConf();
// Roll every 1s
conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_ALL_NAMESNODES_RETRY_KEY, 100);
MiniDFSCluster cluster = null;
try {
cluster = createMiniDFSCluster(conf, 3);
cluster.transitionToStandby(0);
cluster.transitionToStandby(1);
cluster.transitionToStandby(2);
try {
waitForLogRollInSharedDir(cluster, 3);
fail("After all NN become Standby state, Standby NN should roll log, " +
"but it will be failed");
} catch (TimeoutException ignore) {
}
cluster.transitionToActive(0);
waitForLogRollInSharedDir(cluster, 3);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
private static String getDirPath(int suffix) {
return DIR_PREFIX + suffix;
}
private static void waitForLogRollInSharedDir(MiniDFSCluster cluster,
long startTxId) throws Exception {
URI sharedUri = cluster.getSharedEditsDir(0, 2);
File sharedDir = new File(sharedUri.getPath(), "current");
final File expectedInProgressLog =
new File(sharedDir, NNStorage.getInProgressEditsFileName(startTxId));
final File expectedFinalizedLog = new File(sharedDir,
NNStorage.getFinalizedEditsFileName(startTxId, startTxId + 1));
// There is a chance that multiple rolling happens by multiple NameNodes
// And expected inprogress file would have also finalized. So look for the
// finalized edits file as well
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return expectedInProgressLog.exists() || expectedFinalizedLog.exists();
}
}, 100, 10000);
}
@Test
@Timeout(value = 20)
public void testRollEditTimeoutForActiveNN() throws IOException {
Configuration conf = getConf();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_ROLLEDITS_TIMEOUT_KEY, 5); // 5s
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_ALL_NAMESNODES_RETRY_KEY, 100);
HAUtil.setAllowStandbyReads(conf, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(0)
.build();
cluster.waitActive();
cluster.transitionToActive(0);
try {
EditLogTailer tailer = Mockito.spy(
cluster.getNamesystem(1).getEditLogTailer());
AtomicInteger flag = new AtomicInteger(0);
// Return a slow roll edit process.
when(tailer.getNameNodeProxy()).thenReturn(
new Callable<Void>() {
@Override
public Void call() throws Exception {
Thread.sleep(30000); // sleep for 30 seconds.
assertTrue(Thread.currentThread().isInterrupted());
flag.addAndGet(1);
return null;
}
}
);
tailer.triggerActiveLogRoll();
assertEquals(0, flag.get());
} finally {
cluster.shutdown();
}
}
@Test
public void testRollEditLogIOExceptionForRemoteNN() throws IOException {
Configuration conf = getConf();
// Roll every 1s
conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
MiniDFSCluster cluster = null;
try {
cluster = createMiniDFSCluster(conf, 3);
cluster.transitionToActive(0);
EditLogTailer tailer = Mockito.spy(
cluster.getNamesystem(1).getEditLogTailer());
final AtomicInteger invokedTimes = new AtomicInteger(0);
// It should go on to next name node when IOException happens.
when(tailer.getNameNodeProxy()).thenReturn(
tailer.new MultipleNameNodeProxy<Void>() {
@Override
protected Void doWork() throws IOException {
invokedTimes.getAndIncrement();
throw new IOException("It is an IO Exception.");
}
}
);
tailer.triggerActiveLogRoll();
// MultipleNameNodeProxy uses Round-robin to look for active NN
// to do RollEditLog. If doWork() fails, then IOException throws,
// it continues to try next NN. triggerActiveLogRoll finishes
// either due to success, or using up retries.
// In this test case, there are 2 remote name nodes, default retry is 3.
// For test purpose, doWork() always returns IOException,
// so the total invoked times will be default retry 3 * remote NNs 2 = 6
assertEquals(6, invokedTimes.get());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testStandbyTriggersLogRollsWhenTailInProgressEdits()
throws Exception {
// Time in seconds to wait for standby to catch up to edits from active
final int standbyCatchupWaitTime = 2;
// Time in seconds to wait before checking if edit logs are rolled while
// expecting no edit log roll
final int noLogRollWaitTime = 2;
// Time in seconds to wait before checking if edit logs are rolled while
// expecting edit log roll.
final int logRollWaitTime = 3;
final int logRollPeriod = standbyCatchupWaitTime + noLogRollWaitTime + 1;
final long logRollPeriodMs = TimeUnit.SECONDS.toMillis(logRollPeriod);
Configuration conf = getConf();
conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, logRollPeriod);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
conf.setBoolean(DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY, true);
MiniDFSCluster cluster = createMiniDFSCluster(conf, 2);
if (cluster == null) {
fail("failed to start mini cluster.");
}
try {
int activeIndex = new Random().nextBoolean() ? 1 : 0;
int standbyIndex = (activeIndex == 0) ? 1 : 0;
cluster.transitionToActive(activeIndex);
NameNode active = cluster.getNameNode(activeIndex);
NameNode standby = cluster.getNameNode(standbyIndex);
long origTxId = active.getNamesystem().getFSImage().getEditLog()
.getCurSegmentTxId();
for (int i = 0; i < DIRS_TO_MAKE / 2; i++) {
NameNodeAdapter.mkdirs(active, getDirPath(i),
new PermissionStatus("test", "test",
new FsPermission((short)00755)), true);
}
long activeTxId = active.getNamesystem().getFSImage().getEditLog()
.getLastWrittenTxId();
waitForStandbyToCatchUpWithInProgressEdits(standby, activeTxId,
standbyCatchupWaitTime);
long curTime = standby.getNamesystem().getEditLogTailer().getTimer()
.monotonicNow();
long insufficientTimeForLogRoll = logRollPeriodMs / 3;
final FakeTimer testTimer =
new FakeTimer(curTime + insufficientTimeForLogRoll);
standby.getNamesystem().getEditLogTailer().setTimerForTest(testTimer);
Thread.sleep(2000);
for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
NameNodeAdapter.mkdirs(active, getDirPath(i),
new PermissionStatus("test", "test",
new FsPermission((short)00755)), true);
}
try {
checkForLogRoll(active, origTxId, noLogRollWaitTime);
fail("Expected to timeout");
} catch (TimeoutException e) {
// expected
}
long sufficientTimeForLogRoll = logRollPeriodMs * 3;
testTimer.advance(sufficientTimeForLogRoll);
checkForLogRoll(active, origTxId, logRollWaitTime);
} finally {
cluster.shutdown();
}
}
@Test
public void testRollEditLogHandleThreadInterruption()
throws IOException, InterruptedException, TimeoutException {
Configuration conf = getConf();
// RollEdits timeout 1s.
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_ROLLEDITS_TIMEOUT_KEY, 1);
MiniDFSCluster cluster = null;
try {
cluster = createMiniDFSCluster(conf, 3);
cluster.transitionToActive(2);
EditLogTailer tailer = Mockito.spy(
cluster.getNamesystem(0).getEditLogTailer());
// Stop the edit log tail thread for testing.
tailer.setShouldRunForTest(false);
final AtomicInteger invokedTimes = new AtomicInteger(0);
// For nn0 run triggerActiveLogRoll, nns is [nn1,nn2].
// Mock the NameNodeProxy for testing.
// An InterruptedIOException will be thrown when requesting to nn1.
when(tailer.getNameNodeProxy()).thenReturn(
tailer.new MultipleNameNodeProxy<Void>() {
@Override
protected Void doWork() throws IOException {
invokedTimes.getAndIncrement();
if (tailer.getCurrentNN().getNameNodeID().equals("nn1")) {
while (true) {
Thread.yield();
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedIOException("It is an Interrupted IOException.");
}
}
} else {
tailer.getCachedActiveProxy().rollEditLog();
return null;
}
}
}
);
// Record the initial LastRollTimeMs value.
// This time will be updated only when triggerActiveLogRoll is executed successfully.
long initLastRollTimeMs = tailer.getLastRollTimeMs();
// Execute triggerActiveLogRoll for the first time.
// The MultipleNameNodeProxy uses round-robin to look for an active NN to roll the edit log.
// Here, a request will be made to nn1, and the main thread will trigger a Timeout and
// the doWork() method will throw an InterruptedIOException.
// The getActiveNodeProxy() method will determine that the thread is interrupted
// and will return null.
tailer.triggerActiveLogRoll();
// Execute triggerActiveLogRoll for the second time.
// A request will be made to nn2 and the rollEditLog will be successfully finished and
// lastRollTimeMs will be updated.
tailer.triggerActiveLogRoll();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return tailer.getLastRollTimeMs() > initLastRollTimeMs;
}
}, 100, 10000);
// The total number of invoked times should be 2.
assertEquals(2, invokedTimes.get());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
private static void waitForStandbyToCatchUpWithInProgressEdits(
final NameNode standby, final long activeTxId,
int maxWaitSec) throws Exception {
GenericTestUtils.waitFor(() -> {
long standbyTxId = standby.getNamesystem().getFSImage()
.getLastAppliedTxId();
return (standbyTxId >= activeTxId);
}, 100, TimeUnit.SECONDS.toMillis(maxWaitSec));
}
private static void checkForLogRoll(final NameNode active,
final long origTxId, int maxWaitSec) throws Exception {
GenericTestUtils.waitFor(() -> {
long curSegmentTxId = active.getNamesystem().getFSImage().getEditLog()
.getCurSegmentTxId();
return (origTxId != curSegmentTxId);
}, 100, TimeUnit.SECONDS.toMillis(maxWaitSec));
}
private static MiniDFSCluster createMiniDFSCluster(Configuration conf,
int nnCount) throws IOException {
int basePort = 10060 + new Random().nextInt(1000) * 2;
// By passing in basePort, name node will have IPC port set,
// which is needed for enabling roll log.
MiniDFSNNTopology topology =
MiniDFSNNTopology.simpleHATopology(nnCount, basePort);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(topology)
.numDataNodes(0)
.build();
return cluster;
}
}
| TestEditLogTailer |
java | micronaut-projects__micronaut-core | core-processor/src/main/java/io/micronaut/inject/ast/beans/BeanFieldElement.java | {
"start": 975,
"end": 1779
} | interface ____ extends FieldElement, InjectableElement {
/**
* Makes the field injected.
*
* @return This field
*/
default BeanFieldElement inject() {
annotate(AnnotationUtil.INJECT);
return this;
}
@Override
default BeanFieldElement injectValue(String expression) {
return (BeanFieldElement) InjectableElement.super.injectValue(expression);
}
@NonNull
@Override
default BeanFieldElement qualifier(@Nullable String qualifier) {
return (BeanFieldElement) InjectableElement.super.qualifier(qualifier);
}
@NonNull
@Override
default BeanFieldElement qualifier(@NonNull AnnotationValue<?> qualifier) {
return (BeanFieldElement) InjectableElement.super.qualifier(qualifier);
}
}
| BeanFieldElement |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/engine/jdbc/BinaryStream.java | {
"start": 307,
"end": 877
} | interface ____ {
/**
* Retrieve the input stream.
*
* @return The input stream
*/
InputStream getInputStream();
/**
* Access to the bytes.
*
* @return The bytes.
*/
byte[] getBytes();
/**
* Retrieve the length of the input stream
*
* @return The input stream length
*/
long getLength();
/**
* Release any underlying resources.
*/
void release();
/**
* Use the given {@link LobCreator} to create a {@link Blob}
* with the same data as this binary stream.
*
* @since 7.0
*/
Blob asBlob(LobCreator lobCreator);
}
| BinaryStream |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/type/OracleJsonArrayJdbcType.java | {
"start": 583,
"end": 1135
} | class ____ extends OracleJsonArrayBlobJdbcType {
public OracleJsonArrayJdbcType(JdbcType elementJdbcType) {
super( elementJdbcType );
}
@Override
public int getDdlTypeCode() {
return SqlTypes.JSON;
}
@Override
public String toString() {
return "OracleJsonJdbcType";
}
@Override
public String getCheckCondition(String columnName, JavaType<?> javaType, BasicValueConverter<?, ?> converter, Dialect dialect) {
// No check constraint necessary, because the JSON DDL type is already OSON encoded
return null;
}
}
| OracleJsonArrayJdbcType |
java | spring-projects__spring-framework | spring-tx/src/main/java/org/springframework/transaction/jta/TransactionFactory.java | {
"start": 1447,
"end": 2703
} | interface ____ {
/**
* Create an active Transaction object based on the given name and timeout.
* @param name the transaction name (may be {@code null})
* @param timeout the transaction timeout (may be -1 for the default timeout)
* @return the active Transaction object (never {@code null})
* @throws NotSupportedException if the transaction manager does not support
* a transaction of the specified type
* @throws SystemException if the transaction manager failed to create the
* transaction
*/
Transaction createTransaction(@Nullable String name, int timeout) throws NotSupportedException, SystemException;
/**
* Determine whether the underlying transaction manager supports XA transactions
* managed by a resource adapter (i.e. without explicit XA resource enlistment).
* <p>Typically {@code false}. Checked by
* {@link org.springframework.jca.endpoint.AbstractMessageEndpointFactory}
* in order to differentiate between invalid configuration and valid
* ResourceAdapter-managed transactions.
* @see jakarta.resource.spi.ResourceAdapter#endpointActivation
* @see jakarta.resource.spi.endpoint.MessageEndpointFactory#isDeliveryTransacted
*/
boolean supportsResourceAdapterManagedTransactions();
}
| TransactionFactory |
java | apache__camel | components/camel-wordpress/src/main/java/org/apache/camel/component/wordpress/api/model/PostOrderBy.java | {
"start": 915,
"end": 1043
} | enum ____ {
author,
date,
id,
include,
modified,
parent,
relevance,
slug,
title;
}
| PostOrderBy |
java | netty__netty | common/src/main/java/io/netty/util/concurrent/DefaultPromise.java | {
"start": 30418,
"end": 31063
} | class ____ extends CancellationException {
private static final long serialVersionUID = -2974906711413716191L;
private StacklessCancellationException() { }
// Override fillInStackTrace() so we not populate the backtrace via a native call and so leak the
// Classloader.
@Override
public Throwable fillInStackTrace() {
return this;
}
static StacklessCancellationException newInstance(Class<?> clazz, String method) {
return ThrowableUtil.unknownStackTrace(new StacklessCancellationException(), clazz, method);
}
}
}
| StacklessCancellationException |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/inject/field/samename/v1/SameFieldProtectedTest.java | {
"start": 203,
"end": 630
} | class ____ {
@Test
public void test() {
try (ApplicationContext ctx = ApplicationContext.run(Map.of("spec.name", SameFieldProtectedTest.class.getSimpleName()))) {
Abc abc = ctx.getBean(Abc.class);
Foo foo = ctx.getBean(Foo.class);
Assertions.assertEquals(foo.getFooAbc(), abc);
Assertions.assertEquals(foo.getBarAbc(), abc);
}
}
}
| SameFieldProtectedTest |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/console/ConsoleDetailsTests.java | {
"start": 4749,
"end": 5030
} | class ____ {
@Test
void failWithSingleLineMessage() {
fail("single line fail message");
}
@Test
void failWithMultiLineMessage() {
fail("multi\nline\nfail\nmessage");
}
}
@SuppressWarnings("JUnitMalformedDeclaration")
@DisplayName("Report")
static | FailTestCase |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/indices/SystemIndicesTests.java | {
"start": 1253,
"end": 15633
} | class ____ extends ESTestCase {
private static final String OPTIONAL_UPGRADE_SUFFIX_REGEX = "(" + SystemIndices.UPGRADED_INDEX_SUFFIX + ")?";
public void testBasicOverlappingPatterns() {
SystemIndexDescriptor broadPattern = SystemIndexDescriptorUtils.createUnmanaged(".a*c*", "test");
SystemIndexDescriptor notOverlapping = SystemIndexDescriptorUtils.createUnmanaged(".bbbddd*", "test");
SystemIndexDescriptor overlapping1 = SystemIndexDescriptorUtils.createUnmanaged(".ac*", "test");
SystemIndexDescriptor overlapping2 = SystemIndexDescriptorUtils.createUnmanaged(".aaaabbbccc*", "test");
SystemIndexDescriptor overlapping3 = SystemIndexDescriptorUtils.createUnmanaged(".aaabb*cccddd*", "test");
// These sources have fixed prefixes to make sure they sort in the same order, so that the error message is consistent
// across tests
String broadPatternSource = "AAA" + randomAlphaOfLength(5);
String otherSource = "ZZZ" + randomAlphaOfLength(6);
Map<String, SystemIndices.Feature> descriptors = new HashMap<>();
descriptors.put(broadPatternSource, new SystemIndices.Feature(broadPatternSource, "test feature", List.of(broadPattern)));
descriptors.put(
otherSource,
new SystemIndices.Feature(otherSource, "test 2", List.of(notOverlapping, overlapping1, overlapping2, overlapping3))
);
IllegalStateException exception = expectThrows(
IllegalStateException.class,
() -> SystemIndices.checkForOverlappingPatterns(descriptors)
);
assertThat(
exception.getMessage(),
containsString(
"a system index descriptor ["
+ broadPattern
+ "] from ["
+ broadPatternSource
+ "] overlaps with other system index descriptors:"
)
);
String fromPluginString = " from [" + otherSource + "]";
assertThat(exception.getMessage(), containsString(overlapping1.toString() + fromPluginString));
assertThat(exception.getMessage(), containsString(overlapping2.toString() + fromPluginString));
assertThat(exception.getMessage(), containsString(overlapping3.toString() + fromPluginString));
assertThat(exception.getMessage(), not(containsString(notOverlapping.toString())));
IllegalStateException constructorException = expectThrows(
IllegalStateException.class,
() -> new SystemIndices(List.copyOf(descriptors.values()))
);
assertThat(constructorException.getMessage(), equalTo(exception.getMessage()));
}
public void testComplexOverlappingPatterns() {
// These patterns are slightly more complex to detect because pattern1 does not match pattern2 and vice versa
SystemIndexDescriptor pattern1 = SystemIndexDescriptorUtils.createUnmanaged(".a*c", "test");
SystemIndexDescriptor pattern2 = SystemIndexDescriptorUtils.createUnmanaged(".ab*", "test");
// These sources have fixed prefixes to make sure they sort in the same order, so that the error message is consistent
// across tests
String source1 = "AAA" + randomAlphaOfLength(5);
String source2 = "ZZZ" + randomAlphaOfLength(6);
Map<String, SystemIndices.Feature> descriptors = new HashMap<>();
descriptors.put(source1, new SystemIndices.Feature(source1, "test", List.of(pattern1)));
descriptors.put(source2, new SystemIndices.Feature(source2, "test", List.of(pattern2)));
IllegalStateException exception = expectThrows(
IllegalStateException.class,
() -> SystemIndices.checkForOverlappingPatterns(descriptors)
);
assertThat(
exception.getMessage(),
containsString(
"a system index descriptor [" + pattern1 + "] from [" + source1 + "] overlaps with other system index descriptors:"
)
);
assertThat(exception.getMessage(), containsString(pattern2.toString() + " from [" + source2 + "]"));
IllegalStateException constructorException = expectThrows(
IllegalStateException.class,
() -> new SystemIndices(List.copyOf(descriptors.values()))
);
assertThat(constructorException.getMessage(), equalTo(exception.getMessage()));
}
public void testBuiltInSystemIndices() {
SystemIndices systemIndices = new SystemIndices(List.of());
assertTrue(systemIndices.isSystemIndex(".tasks"));
assertTrue(systemIndices.isSystemIndex(".tasks1"));
assertTrue(systemIndices.isSystemIndex(".tasks-old"));
assertTrue(systemIndices.isSystemIndex(".synonyms"));
}
public void testPluginCannotOverrideBuiltInSystemIndex() {
List<SystemIndices.Feature> pluginMap = List.of(
new SystemIndices.Feature(
TASKS_FEATURE_NAME,
"test",
List.of(SystemIndexDescriptorUtils.createUnmanaged(TASK_INDEX + "*", "Task" + " Result Index"))
)
);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new SystemIndices(pluginMap));
assertThat(e.getMessage(), containsString("plugin or module attempted to define the same source"));
}
public void testPatternWithSimpleRange() {
final SystemIndices systemIndices = new SystemIndices(
List.of(
new SystemIndices.Feature(
"test",
"test feature",
List.of(SystemIndexDescriptorUtils.createUnmanaged(".test-[abc]" + OPTIONAL_UPGRADE_SUFFIX_REGEX, ""))
)
)
);
assertThat(systemIndices.isSystemIndex(".test-a"), equalTo(true));
assertThat(systemIndices.isSystemIndex(".test-b"), equalTo(true));
assertThat(systemIndices.isSystemIndex(".test-c"), equalTo(true));
assertThat(systemIndices.isSystemIndex(".test-aa"), equalTo(false));
assertThat(systemIndices.isSystemIndex(".test-d"), equalTo(false));
assertThat(systemIndices.isSystemIndex(".test-"), equalTo(false));
assertThat(systemIndices.isSystemIndex(".test-="), equalTo(false));
}
public void testPatternWithSimpleRangeAndRepeatOperator() {
final SystemIndices systemIndices = new SystemIndices(
List.of(
new SystemIndices.Feature(
"test",
"test feature",
List.of(SystemIndexDescriptorUtils.createUnmanaged(".test-[a]+" + OPTIONAL_UPGRADE_SUFFIX_REGEX, ""))
)
)
);
assertThat(systemIndices.isSystemIndex(".test-a"), equalTo(true));
assertThat(systemIndices.isSystemIndex(".test-aa"), equalTo(true));
assertThat(systemIndices.isSystemIndex(".test-aaa"), equalTo(true));
assertThat(systemIndices.isSystemIndex(".test-b"), equalTo(false));
}
public void testPatternWithComplexRange() {
final SystemIndices systemIndices = new SystemIndices(
List.of(
new SystemIndices.Feature(
"test",
"test feature",
List.of(SystemIndexDescriptorUtils.createUnmanaged(".test-[a-c]" + OPTIONAL_UPGRADE_SUFFIX_REGEX, ""))
)
)
);
assertThat(systemIndices.isSystemIndex(".test-a"), equalTo(true));
assertThat(systemIndices.isSystemIndex(".test-b"), equalTo(true));
assertThat(systemIndices.isSystemIndex(".test-c"), equalTo(true));
assertThat(systemIndices.isSystemIndex(".test-aa"), equalTo(false));
assertThat(systemIndices.isSystemIndex(".test-d"), equalTo(false));
assertThat(systemIndices.isSystemIndex(".test-"), equalTo(false));
assertThat(systemIndices.isSystemIndex(".test-="), equalTo(false));
}
public void testOverlappingDescriptorsWithRanges() {
String source1 = "source1";
String source2 = "source2";
SystemIndexDescriptor pattern1 = SystemIndexDescriptorUtils.createUnmanaged(".test-[ab]*", "");
SystemIndexDescriptor pattern2 = SystemIndexDescriptorUtils.createUnmanaged(".test-a*", "");
Map<String, SystemIndices.Feature> descriptors = new HashMap<>();
descriptors.put(source1, new SystemIndices.Feature(source1, "source 1", List.of(pattern1)));
descriptors.put(source2, new SystemIndices.Feature(source2, "source 2", List.of(pattern2)));
IllegalStateException exception = expectThrows(
IllegalStateException.class,
() -> SystemIndices.checkForOverlappingPatterns(descriptors)
);
assertThat(
exception.getMessage(),
containsString(
"a system index descriptor [" + pattern1 + "] from [" + source1 + "] overlaps with other system index descriptors:"
)
);
assertThat(exception.getMessage(), containsString(pattern2.toString() + " from [" + source2 + "]"));
}
public void testPatternsWithNoRoomForUpgradeSuffix() {
final SystemIndexDescriptor endsWithNumbersOnly = SystemIndexDescriptorUtils.createUnmanaged(
".desc[0-9]+",
"can only end with numbers"
);
final SystemIndexDescriptor concreteIndex = SystemIndexDescriptorUtils.createUnmanaged(".concrete", "concrete index");
final SystemIndexDescriptor okayDescriptor = SystemIndexDescriptorUtils.createUnmanaged(".okay*", "concrete index");
final SystemIndexDescriptor endsWithNumbersThenWildcard = SystemIndexDescriptorUtils.createUnmanaged(
".desc[0-9]+*",
"concrete index"
);
final Map<String, SystemIndices.Feature> features = new HashMap<>();
final String firstFeature = "first";
features.put(
firstFeature,
new SystemIndices.Feature(
firstFeature,
this.getTestName() + " - " + firstFeature,
Collections.singletonList(endsWithNumbersOnly)
)
);
final String secondFeature = "second";
features.put(
secondFeature,
new SystemIndices.Feature(secondFeature, this.getTestName() + " - " + secondFeature, List.of(concreteIndex, okayDescriptor))
);
final String thirdFeature = "third";
features.put(
thirdFeature,
new SystemIndices.Feature(thirdFeature, this.getTestName() + " - " + thirdFeature, List.of(endsWithNumbersThenWildcard))
);
IllegalStateException ex = expectThrows(IllegalStateException.class, () -> SystemIndices.ensurePatternsAllowSuffix(features));
assertThat(
ex.getMessage(),
allOf(
containsString(endsWithNumbersOnly.getIndexPattern()),
containsString(secondFeature),
containsString(concreteIndex.getIndexPattern()),
containsString(firstFeature)
)
);
}
public void testMappingsVersions() {
SystemIndexDescriptor unmanaged = SystemIndexDescriptorUtils.createUnmanaged(".unmanaged-*", "unmanaged");
SystemIndexDescriptor managed = SystemIndexDescriptor.builder()
.setIndexPattern(".managed-*")
.setPrimaryIndex(".managed-primary")
.setOrigin("system")
.setSettings(Settings.EMPTY)
.setMappings("""
{
"_meta": {
"version": "8.0.0",
"managed_index_mappings_version": 3
},
"properties": {
"name": { "type": "text" }
}
}
""")
.build();
SystemIndices systemIndices = new SystemIndices(
List.of(
new SystemIndices.Feature("unmanaged", "unmanaged", List.of(unmanaged)),
new SystemIndices.Feature("managed", "managed", List.of(managed))
)
);
Map<String, SystemIndexDescriptor.MappingsVersion> mappingsVersions = systemIndices.getMappingsVersions();
assertThat(mappingsVersions.get(".managed-primary"), notNullValue());
assertThat(mappingsVersions.get(".managed-primary").version(), equalTo(3));
assertThat(mappingsVersions.keySet(), not(contains("unmanaged")));
}
public void testSystemDataStreamPattern() {
String dataStreamName = ".my-data-stream";
SystemDataStreamDescriptor dataStreamDescriptor = new SystemDataStreamDescriptor(
dataStreamName,
"",
SystemDataStreamDescriptor.Type.EXTERNAL,
ComposableIndexTemplate.builder().build(),
Map.of(),
Collections.singletonList("origin"),
"origin",
ExecutorNames.DEFAULT_SYSTEM_DATA_STREAM_THREAD_POOLS
);
final SystemIndices systemIndices = new SystemIndices(
List.of(
new SystemIndices.Feature("test", "test feature", Collections.emptyList(), Collections.singletonList(dataStreamDescriptor))
)
);
assertThat(
systemIndices.isSystemIndexBackingDataStream(DataStream.BACKING_INDEX_PREFIX + dataStreamName + "-2025.03.07-000001"),
equalTo(true)
);
assertThat(
systemIndices.isSystemIndexBackingDataStream(DataStream.FAILURE_STORE_PREFIX + dataStreamName + "-2025.03.07-000001"),
equalTo(true)
);
assertThat(systemIndices.isSystemIndexBackingDataStream(".migrated-ds-" + dataStreamName + "-2025.03.07-000001"), equalTo(true));
assertThat(
systemIndices.isSystemIndexBackingDataStream(".migrated-migrated-ds-" + dataStreamName + "-2025.03.07-000001"),
equalTo(true)
);
assertThat(systemIndices.isSystemIndexBackingDataStream(".migrated-" + dataStreamName + "-2025.03.07-000001"), equalTo(false));
assertThat(systemIndices.isSystemIndexBackingDataStream(dataStreamName), equalTo(false));
assertThat(systemIndices.isSystemIndexBackingDataStream(dataStreamName + "-2025.03.07-000001"), equalTo(false));
}
}
| SystemIndicesTests |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/test/DiffableTestUtils.java | {
"start": 1467,
"end": 6015
} | class ____ {
protected static final int NUMBER_OF_DIFF_TEST_RUNS = NUMBER_OF_TEST_RUNS;
private DiffableTestUtils() {
}
/**
* Asserts that changes are applied correctly, i.e. that applying diffs to localInstance produces that object
* equal but not the same as the remoteChanges instance.
*/
public static <T extends Diffable<T>> T assertDiffApplication(
T remoteChanges,
T localInstance,
Diff<T> diffs,
BiPredicate<? super T, ? super T> equalsPredicate
) {
T localChanges = diffs.apply(localInstance);
if (equalsPredicate == null) {
assertEquals(remoteChanges, localChanges);
assertEquals(remoteChanges.hashCode(), localChanges.hashCode());
} else if (equalsPredicate.test(remoteChanges, localChanges) == false) {
Assert.failNotEquals(null, remoteChanges, localChanges);
}
assertNotSame(remoteChanges, localChanges);
return localChanges;
}
/**
* Simulates sending diffs over the wire
*/
public static <T extends Writeable> T copyInstance(T diffs, NamedWriteableRegistry namedWriteableRegistry, Reader<T> reader)
throws IOException {
return copyInstance(diffs, namedWriteableRegistry, reader, null);
}
/**
* Simulates sending diffs over the wire
*/
public static <T extends Writeable> T copyInstance(
T diffs,
NamedWriteableRegistry namedWriteableRegistry,
Reader<T> reader,
@Nullable TransportVersion transportVersion
) throws IOException {
try (BytesStreamOutput output = new BytesStreamOutput()) {
if (transportVersion != null) {
output.setTransportVersion(transportVersion);
}
diffs.writeTo(output);
try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) {
if (transportVersion != null) {
in.setTransportVersion(transportVersion);
}
return reader.read(in);
}
}
}
/**
* Tests making random changes to an object, calculating diffs for these changes, sending this
* diffs over the wire and appling these diffs on the other side.
*/
public static <T extends Diffable<T>> void testDiffableSerialization(
Supplier<T> testInstance,
Function<T, T> modifier,
NamedWriteableRegistry namedWriteableRegistry,
Reader<T> reader,
Reader<Diff<T>> diffReader
) throws IOException {
testDiffableSerialization(testInstance, modifier, namedWriteableRegistry, reader, diffReader, null, null);
}
/**
* Tests making random changes to an object, calculating diffs for these changes, sending this
* diffs over the wire and appling these diffs on the other side.
*/
public static <T extends Diffable<T>> void testDiffableSerialization(
Supplier<T> testInstance,
Function<T, T> modifier,
NamedWriteableRegistry namedWriteableRegistry,
Reader<T> reader,
Reader<Diff<T>> diffReader,
@Nullable TransportVersion transportVersion,
@Nullable BiPredicate<? super T, ? super T> equals
) throws IOException {
T remoteInstance = testInstance.get();
T localInstance = assertSerialization(remoteInstance, namedWriteableRegistry, reader);
for (int runs = 0; runs < NUMBER_OF_DIFF_TEST_RUNS; runs++) {
T remoteChanges = modifier.apply(remoteInstance);
Diff<T> remoteDiffs = remoteChanges.diff(remoteInstance);
Diff<T> localDiffs = copyInstance(remoteDiffs, namedWriteableRegistry, diffReader, transportVersion);
localInstance = assertDiffApplication(remoteChanges, localInstance, localDiffs, equals);
remoteInstance = remoteChanges;
}
}
/**
* Asserts that testInstance can be correctly.
*/
public static <T extends Writeable> T assertSerialization(
T testInstance,
NamedWriteableRegistry namedWriteableRegistry,
Reader<T> reader
) throws IOException {
T deserializedInstance = copyInstance(testInstance, namedWriteableRegistry, reader);
assertEquals(testInstance, deserializedInstance);
assertEquals(testInstance.hashCode(), deserializedInstance.hashCode());
assertNotSame(testInstance, deserializedInstance);
return deserializedInstance;
}
}
| DiffableTestUtils |
java | google__error-prone | core/src/main/java/com/google/errorprone/refaster/ULocalVarIdent.java | {
"start": 1017,
"end": 1163
} | class ____ extends UIdent {
/** A key in a {@code Bindings} associated with a local variable of the specified name. */
static final | ULocalVarIdent |
java | spring-projects__spring-boot | module/spring-boot-web-server/src/main/java/org/springframework/boot/web/server/PortInUseException.java | {
"start": 1045,
"end": 3380
} | class ____ extends WebServerException {
private final int port;
/**
* Creates a new port in use exception for the given {@code port}.
* @param port the port that was in use
*/
public PortInUseException(int port) {
this(port, null);
}
/**
* Creates a new port in use exception for the given {@code port}.
* @param port the port that was in use
* @param cause the cause of the exception
*/
public PortInUseException(int port, @Nullable Throwable cause) {
super("Port " + port + " is already in use", cause);
this.port = port;
}
/**
* Returns the port that was in use.
* @return the port
*/
public int getPort() {
return this.port;
}
/**
* Throw a {@link PortInUseException} if the given exception was caused by a "port in
* use" {@link BindException}.
* @param ex the source exception
* @param port a supplier used to provide the port
* @since 2.2.7
*/
public static void throwIfPortBindingException(Exception ex, IntSupplier port) {
ifPortBindingException(ex, (bindException) -> {
throw new PortInUseException(port.getAsInt(), ex);
});
}
/**
* Perform an action if the given exception was caused by a "port in use"
* {@link BindException}.
* @param ex the source exception
* @param action the action to perform
* @since 2.2.7
*/
public static void ifPortBindingException(Exception ex, Consumer<BindException> action) {
ifCausedBy(ex, BindException.class, (bindException) -> {
// bind exception can be also thrown because an address can't be assigned
String message = bindException.getMessage();
if (message != null && message.toLowerCase(Locale.ROOT).contains("in use")) {
action.accept(bindException);
}
});
}
/**
* Perform an action if the given exception was caused by a specific exception type.
* @param <E> the cause exception type
* @param ex the source exception
* @param causedBy the required cause type
* @param action the action to perform
* @since 2.2.7
*/
@SuppressWarnings("unchecked")
public static <E extends Exception> void ifCausedBy(Exception ex, Class<E> causedBy, Consumer<E> action) {
Throwable candidate = ex;
while (candidate != null) {
if (causedBy.isInstance(candidate)) {
action.accept((E) candidate);
return;
}
candidate = candidate.getCause();
}
}
}
| PortInUseException |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/index/ApplicationArchiveBuildStep.java | {
"start": 2273,
"end": 2545
} | class ____ {
private static final Logger LOGGER = Logger.getLogger(ApplicationArchiveBuildStep.class);
/**
* Indexing
*/
@ConfigMapping(prefix = "quarkus.index-dependency")
@ConfigRoot(phase = ConfigPhase.BUILD_TIME)
| ApplicationArchiveBuildStep |
java | apache__maven | impl/maven-core/src/test/java/org/apache/maven/exception/DefaultExceptionHandlerTest.java | {
"start": 1437,
"end": 6423
} | class ____ {
/**
* Running Maven under JDK7 may cause connection issues because IPv6 is used by default.
* <p>
* e.g running mvn site:run will cause Jetty to fail.
* </p>
* <p>
* The resolution is to add -Djava.net.preferIPv4Stack=true to the command line as documented in
* http://cwiki.apache.org/confluence/display/MAVEN/ConnectException
* </p>
*/
@Test
void testJdk7ipv6() {
ConnectException connEx = new ConnectException("Connection refused: connect");
IOException ioEx = new IOException("Unable to establish loopback connection", connEx);
MojoExecutionException mojoEx =
new MojoExecutionException("Error executing Jetty: Unable to establish loopback connection", ioEx);
ExceptionHandler exceptionHandler = new DefaultExceptionHandler();
ExceptionSummary exceptionSummary = exceptionHandler.handleException(mojoEx);
String expectedReference = "http://cwiki.apache.org/confluence/display/MAVEN/ConnectException";
assertEquals(expectedReference, exceptionSummary.getReference());
}
@Test
void testHandleExceptionAetherClassNotFound() {
Throwable cause2 = new NoClassDefFoundError("org/sonatype/aether/RepositorySystem");
Plugin plugin = new Plugin();
Exception cause = new PluginContainerException(plugin, null, null, cause2);
PluginDescriptor pluginDescriptor = new PluginDescriptor();
MojoDescriptor mojoDescriptor = new MojoDescriptor();
mojoDescriptor.setPluginDescriptor(pluginDescriptor);
MojoExecution mojoExecution = new MojoExecution(mojoDescriptor);
Throwable exception = new PluginExecutionException(mojoExecution, null, cause);
DefaultExceptionHandler handler = new DefaultExceptionHandler();
ExceptionSummary summary = handler.handleException(exception);
String expectedReference = "http://cwiki.apache.org/confluence/display/MAVEN/AetherClassNotFound";
assertEquals(expectedReference, summary.getReference());
}
@Test
void testHandleExceptionNoClassDefFoundErrorNull() {
Throwable cause2 = new NoClassDefFoundError();
Plugin plugin = new Plugin();
Exception cause = new PluginContainerException(plugin, null, null, cause2);
PluginDescriptor pluginDescriptor = new PluginDescriptor();
MojoDescriptor mojoDescriptor = new MojoDescriptor();
mojoDescriptor.setPluginDescriptor(pluginDescriptor);
MojoExecution mojoExecution = new MojoExecution(mojoDescriptor);
Throwable exception = new PluginExecutionException(mojoExecution, null, cause);
DefaultExceptionHandler handler = new DefaultExceptionHandler();
ExceptionSummary summary = handler.handleException(exception);
String expectedReference = "http://cwiki.apache.org/confluence/display/MAVEN/PluginContainerException";
assertEquals(expectedReference, summary.getReference());
}
@Test
void testHandleExceptionLoopInCause() {
// Some broken exception that does return "this" as getCause
AtomicReference<Throwable> causeRef = new AtomicReference<>(null);
Exception cause2 = new RuntimeException("loop") {
@Override
public synchronized Throwable getCause() {
return causeRef.get();
}
};
causeRef.set(cause2);
Plugin plugin = new Plugin();
Exception cause = new PluginContainerException(plugin, null, null, cause2);
cause2.initCause(cause);
PluginDescriptor pluginDescriptor = new PluginDescriptor();
MojoDescriptor mojoDescriptor = new MojoDescriptor();
mojoDescriptor.setPluginDescriptor(pluginDescriptor);
MojoExecution mojoExecution = new MojoExecution(mojoDescriptor);
Throwable exception = new PluginExecutionException(mojoExecution, null, cause);
DefaultExceptionHandler handler = new DefaultExceptionHandler();
ExceptionSummary summary = handler.handleException(exception);
String expectedReference = "http://cwiki.apache.org/confluence/display/MAVEN/PluginContainerException";
assertEquals(expectedReference, summary.getReference());
}
@Test
void testHandleExceptionSelfReferencing() {
RuntimeException boom3 = new RuntimeException("BOOM3");
RuntimeException boom2 = new RuntimeException("BOOM2", boom3);
RuntimeException boom1 = new RuntimeException("BOOM1", boom2);
boom3.initCause(boom1);
DefaultExceptionHandler handler = new DefaultExceptionHandler();
ExceptionSummary summary = handler.handleException(boom1);
assertEquals("BOOM1: BOOM2: BOOM3: [CIRCULAR REFERENCE]", summary.getMessage());
assertEquals("", summary.getReference());
assertEquals(0, summary.getChildren().size());
assertEquals(boom1, summary.getException());
}
}
| DefaultExceptionHandlerTest |
java | quarkusio__quarkus | extensions/web-dependency-locator/deployment/src/test/java/io/quarkus/webdependency/locator/test/WebDependencyLocatorTest.java | {
"start": 387,
"end": 3076
} | class ____ extends WebDependencyLocatorTestSupport {
private static final String META_INF_RESOURCES = "META-INF/resources/";
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addAsResource(new StringAsset("<html>Hello!<html>"), META_INF_RESOURCES + "/index.html")
.addAsResource(new StringAsset("Test"), META_INF_RESOURCES + "/some/path/test.txt"))
.setForcedDependencies(List.of(
Dependency.of("org.webjars", "jquery-ui", JQUERY_UI_VERSION),
Dependency.of("org.webjars", "momentjs", MOMENTJS_VERSION),
Dependency.of("org.mvnpm", "bootstrap", BOOTSTRAP_VERSION)));
@Test
public void test() {
// Test normal files
RestAssured.get("/").then()
.statusCode(200)
.body(is("<html>Hello!<html>"));
RestAssured.get("/index.html").then()
.statusCode(200)
.body(is("<html>Hello!<html>"));
RestAssured.get("/some/path/test.txt").then()
.statusCode(200)
.body(is("Test"));
// Test Existing Web Jars
RestAssured.get("/webjars/jquery-ui/jquery-ui.min.js").then()
.statusCode(200);
RestAssured.get("/webjars/momentjs/min/moment.min.js").then()
.statusCode(200);
RestAssured.get("/_static/bootstrap/dist/js/bootstrap.min.js").then()
.statusCode(200);
// Test using version in url of existing Web Jar
RestAssured.get("/webjars/jquery-ui/" + JQUERY_UI_VERSION + "/jquery-ui.min.js").then()
.statusCode(200);
RestAssured.get("/webjars/momentjs/" + MOMENTJS_VERSION + "/min/moment.min.js").then()
.statusCode(200);
RestAssured.get("/_static/bootstrap/" + BOOTSTRAP_VERSION + "/dist/js/bootstrap.min.js").then()
.statusCode(200);
// Test non-existing Web Jar
RestAssured.get("/webjars/bootstrap/js/bootstrap.min.js").then()
.statusCode(404);
RestAssured.get("/webjars/bootstrap/4.3.1/js/bootstrap.min.js").then()
.statusCode(404);
RestAssured.get("/webjars/momentjs/2.25.0/min/moment.min.js").then()
.statusCode(404);
RestAssured.get("/_static/foundation-sites/6.8.1/dist/js/foundation.esm.js").then()
.statusCode(404);
// Test webjar that does not have a version in the jar path
RestAssured.get("/webjars/dcjs/dc.min.js").then()
.statusCode(200);
}
}
| WebDependencyLocatorTest |
java | apache__spark | sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java | {
"start": 43478,
"end": 44671
} | class ____ implements Serializable {
private SmallBean nonNull_f;
private SmallBean nullable_f;
private Map<String, SmallBean> childMap;
@Nonnull
public SmallBean getNonNull_f() {
return nonNull_f;
}
public void setNonNull_f(SmallBean f) {
this.nonNull_f = f;
}
public SmallBean getNullable_f() {
return nullable_f;
}
public void setNullable_f(SmallBean f) {
this.nullable_f = f;
}
@Nonnull
public Map<String, SmallBean> getChildMap() { return childMap; }
public void setChildMap(Map<String, SmallBean> childMap) {
this.childMap = childMap;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
NestedSmallBeanWithNonNullField that = (NestedSmallBeanWithNonNullField) o;
return Objects.equals(nullable_f, that.nullable_f) &&
Objects.equals(nonNull_f, that.nonNull_f) && Objects.equals(childMap, that.childMap);
}
@Override
public int hashCode() {
return Objects.hash(nullable_f, nonNull_f, childMap);
}
}
public static | NestedSmallBeanWithNonNullField |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/TransientOverrideAsPersistentMappedSuperclassTests.java | {
"start": 9811,
"end": 10242
} | class ____ extends Employee {
public Editor(String name, String title) {
super( name );
setTitle( title );
}
@Column(name = "e_title")
public String getTitle() {
return super.getTitle();
}
public void setTitle(String title) {
super.setTitle( title );
}
protected Editor() {
// this form used by Hibernate
super();
}
}
@SuppressWarnings("unused")
@Entity(name = "Writer")
public static | Editor |
java | apache__logging-log4j2 | log4j-jpa/src/main/java/org/apache/logging/log4j/core/appender/db/jpa/AbstractLogEventWrapperEntity.java | {
"start": 3929,
"end": 11815
} | class ____ implements LogEvent {
private static final long serialVersionUID = 1L;
private final LogEvent wrappedEvent;
/**
* Instantiates this base class. All concrete implementations must have a constructor matching this constructor's
* signature. The no-argument constructor is required for a standards-compliant JPA provider to accept this as an
* entity.
*/
@SuppressWarnings("unused")
protected AbstractLogEventWrapperEntity() {
this(new NullLogEvent());
}
/**
* Instantiates this base class. All concrete implementations must have a constructor matching this constructor's
* signature. This constructor is used for wrapping this entity around a logged event.
*
* @param wrappedEvent The underlying event from which information is obtained.
*/
protected AbstractLogEventWrapperEntity(final LogEvent wrappedEvent) {
if (wrappedEvent == null) {
throw new IllegalArgumentException("The wrapped event cannot be null.");
}
this.wrappedEvent = wrappedEvent;
}
@Override
public LogEvent toImmutable() {
return Log4jLogEvent.createMemento(this);
}
/**
* All eventual accessor methods must call this method and delegate the method call to the underlying wrapped event.
* Annotated {@link Transient} so as not to be included in the persisted entity.
*
* @return The underlying event from which information is obtained.
*/
@Transient
protected final LogEvent getWrappedEvent() {
return this.wrappedEvent;
}
/**
* A no-op mutator to satisfy JPA requirements, as this entity is write-only.
*
* @param level Ignored.
*/
@SuppressWarnings("unused")
public void setLevel(final Level level) {
// this entity is write-only
}
/**
* A no-op mutator to satisfy JPA requirements, as this entity is write-only.
*
* @param loggerName Ignored.
*/
@SuppressWarnings("unused")
public void setLoggerName(final String loggerName) {
// this entity is write-only
}
/**
* A no-op mutator to satisfy JPA requirements, as this entity is write-only.
*
* @param source Ignored.
*/
@SuppressWarnings("unused")
public void setSource(final StackTraceElement source) {
// this entity is write-only
}
/**
* A no-op mutator to satisfy JPA requirements, as this entity is write-only.
*
* @param message Ignored.
*/
@SuppressWarnings("unused")
public void setMessage(final Message message) {
// this entity is write-only
}
/**
* A no-op mutator to satisfy JPA requirements, as this entity is write-only.
*
* @param marker Ignored.
*/
@SuppressWarnings("unused")
public void setMarker(final Marker marker) {
// this entity is write-only
}
/**
* A no-op mutator to satisfy JPA requirements, as this entity is write-only.
*
* @param threadId Ignored.
*/
@SuppressWarnings("unused")
public void setThreadId(final long threadId) {
// this entity is write-only
}
/**
* A no-op mutator to satisfy JPA requirements, as this entity is write-only.
*
* @param threadName Ignored.
*/
@SuppressWarnings("unused")
public void setThreadName(final String threadName) {
// this entity is write-only
}
/**
* A no-op mutator to satisfy JPA requirements, as this entity is write-only.
*
* @param threadPriority Ignored.
*/
@SuppressWarnings("unused")
public void setThreadPriority(final int threadPriority) {
// this entity is write-only
}
/**
* A no-op mutator to satisfy JPA requirements, as this entity is write-only.
*
* @param nanoTime Ignored.
*/
@SuppressWarnings("unused")
public void setNanoTime(final long nanoTime) {
// this entity is write-only
}
/**
* A no-op mutator to satisfy JPA requirements, as this entity is write-only.
*
* @param millis Ignored.
*/
@SuppressWarnings("unused")
public void setTimeMillis(final long millis) {
// this entity is write-only
}
/**
* A no-op mutator to satisfy JPA requirements, as this entity is write-only.
*
* @param instant Ignored.
*/
@SuppressWarnings("unused")
public void setInstant(final Instant instant) {
// this entity is write-only
}
/**
* A no-op mutator to satisfy JPA requirements, as this entity is write-only.
*
* @param nanoOfMillisecond Ignored.
*/
@SuppressWarnings("unused")
public void setNanoOfMillisecond(final int nanoOfMillisecond) {
// this entity is write-only
}
/**
* A no-op mutator to satisfy JPA requirements, as this entity is write-only.
*
* @param throwable Ignored.
*/
@SuppressWarnings("unused")
public void setThrown(final Throwable throwable) {
// this entity is write-only
}
/**
* A no-op mutator to satisfy JPA requirements, as this entity is write-only.
*
* @param contextData Ignored.
*/
@SuppressWarnings("unused")
public void setContextData(final ReadOnlyStringMap contextData) {
// this entity is write-only
}
/**
* A no-op mutator to satisfy JPA requirements, as this entity is write-only.
*
* @param map Ignored.
*/
@SuppressWarnings("unused")
public void setContextMap(final Map<String, String> map) {
// this entity is write-only
}
/**
* A no-op mutator to satisfy JPA requirements, as this entity is write-only.
*
* @param contextStack Ignored.
*/
@SuppressWarnings("unused")
public void setContextStack(final ThreadContext.ContextStack contextStack) {
// this entity is write-only
}
/**
* A no-op mutator to satisfy JPA requirements, as this entity is write-only.
*
* @param fqcn Ignored.
*/
@SuppressWarnings("unused")
public void setLoggerFqcn(final String fqcn) {
// this entity is write-only
}
/**
* Indicates whether the source of the logging request is required downstream. Annotated
* {@link Transient @Transient} so as to not be included in the persisted entity.
*
* @return whether the source of the logging request is required downstream.
*/
@Override
@Transient
public final boolean isIncludeLocation() {
return this.getWrappedEvent().isIncludeLocation();
}
@Override
public final void setIncludeLocation(final boolean locationRequired) {
this.getWrappedEvent().setIncludeLocation(locationRequired);
}
/**
* Indicates whether this event is the last one in a batch. Annotated {@link Transient @Transient} so as to not be
* included in the persisted entity.
*
* @return whether this event is the last one in a batch.
*/
@Override
@Transient
public final boolean isEndOfBatch() {
return this.getWrappedEvent().isEndOfBatch();
}
@Override
public final void setEndOfBatch(final boolean endOfBatch) {
this.getWrappedEvent().setEndOfBatch(endOfBatch);
}
/**
* Gets the context map. Transient, since the String version of the data is obtained via ReadOnlyStringMap.
*
* @return the context data.
* @see ContextDataAttributeConverter
* @see org.apache.logging.log4j.core.appender.db.jpa.converter.ContextDataAttributeConverter
*/
@Override
@Transient
// @Convert(converter = ContextDataAttributeConverter.class)
public ReadOnlyStringMap getContextData() {
return this.getWrappedEvent().getContextData();
}
/**
* A no-op log event | AbstractLogEventWrapperEntity |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjointCartesianSourceAndSourceEvaluator.java | {
"start": 3871,
"end": 4730
} | class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory left;
private final EvalOperator.ExpressionEvaluator.Factory right;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory left,
EvalOperator.ExpressionEvaluator.Factory right) {
this.source = source;
this.left = left;
this.right = right;
}
@Override
public SpatialDisjointCartesianSourceAndSourceEvaluator get(DriverContext context) {
return new SpatialDisjointCartesianSourceAndSourceEvaluator(source, left.get(context), right.get(context), context);
}
@Override
public String toString() {
return "SpatialDisjointCartesianSourceAndSourceEvaluator[" + "left=" + left + ", right=" + right + "]";
}
}
}
| Factory |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_101_for_rongganlin_case2.java | {
"start": 1248,
"end": 1464
} | class ____ implements Object {
public List<Element> elements;
public Group set(List<Element> items) {
this.elements = items;
return this;
}
}
public static | Group |
java | apache__dubbo | dubbo-remoting/dubbo-remoting-netty4/src/main/java/org/apache/dubbo/remoting/transport/netty4/NettyServerHandler.java | {
"start": 1746,
"end": 6765
} | class ____ extends ChannelDuplexHandler {
private static final ErrorTypeAwareLogger logger = LoggerFactory.getErrorTypeAwareLogger(NettyServerHandler.class);
/**
* the cache for alive worker channel.
* <ip:port, dubbo channel>
*/
private final Map<String, Channel> channels = new ConcurrentHashMap<>();
private static final AttributeKey<SSLSession> SSL_SESSION_KEY = AttributeKey.valueOf(Constants.SSL_SESSION_KEY);
private final URL url;
private final ChannelHandler handler;
public NettyServerHandler(URL url, ChannelHandler handler) {
if (url == null) {
throw new IllegalArgumentException("url == null");
}
if (handler == null) {
throw new IllegalArgumentException("handler == null");
}
this.url = url;
this.handler = handler;
}
public Map<String, Channel> getChannels() {
return channels;
}
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
io.netty.channel.Channel ch = ctx.channel();
NettyChannel channel = NettyChannel.getOrAddChannel(ch, url, handler);
if (channel != null) {
channels.put(NetUtils.toAddressString(channel.getRemoteAddress()), channel);
}
handler.connected(channel);
if (logger.isInfoEnabled() && channel != null) {
logger.info(
"The connection {} of {} -> {} is established.",
ch,
channel.getRemoteAddressKey(),
channel.getLocalAddressKey());
}
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
io.netty.channel.Channel ch = ctx.channel();
NettyChannel channel = NettyChannel.getOrAddChannel(ch, url, handler);
try {
channels.remove(NetUtils.toAddressString(channel.getRemoteAddress()));
handler.disconnected(channel);
} finally {
NettyChannel.removeChannel(ch);
}
if (logger.isInfoEnabled()) {
logger.info(
"The connection {} of {} -> {} is disconnected.",
ch,
channel.getRemoteAddressKey(),
channel.getLocalAddressKey());
}
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
NettyChannel channel = NettyChannel.getOrAddChannel(ctx.channel(), url, handler);
handler.received(channel, msg);
// trigger qos handler
ctx.fireChannelRead(msg);
}
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
super.write(ctx, msg, promise);
NettyChannel channel = NettyChannel.getOrAddChannel(ctx.channel(), url, handler);
handler.sent(channel, msg);
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
// server will close channel when server don't receive any heartbeat from client util timeout.
if (evt instanceof IdleStateEvent) {
NettyChannel channel = NettyChannel.getOrAddChannel(ctx.channel(), url, handler);
try {
logger.info("IdleStateEvent triggered, close channel " + channel);
channel.close();
} finally {
NettyChannel.removeChannelIfDisconnected(ctx.channel());
}
}
super.userEventTriggered(ctx, evt);
if (evt instanceof SslHandshakeCompletionEvent) {
SslHandshakeCompletionEvent handshakeEvent = (SslHandshakeCompletionEvent) evt;
if (handshakeEvent.isSuccess()) {
NettyChannel channel = NettyChannel.getOrAddChannel(ctx.channel(), url, handler);
channel.setAttribute(
Constants.SSL_SESSION_KEY,
ctx.channel().attr(SSL_SESSION_KEY).get());
}
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
io.netty.channel.Channel ch = ctx.channel();
NettyChannel channel = NettyChannel.getOrAddChannel(ch, url, handler);
try {
handler.caught(channel, cause);
} finally {
NettyChannel.removeChannelIfDisconnected(ch);
}
if (logger.isWarnEnabled()) {
logger.warn(
TRANSPORT_UNEXPECTED_EXCEPTION,
"",
"",
channel == null
? String.format("The connection %s has exception.", ch)
: String.format(
"The connection %s of %s -> %s has exception.",
ch, channel.getRemoteAddressKey(), channel.getLocalAddressKey()),
cause);
}
}
}
| NettyServerHandler |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/introspect/TestNameConflicts.java | {
"start": 930,
"end": 1604
} | class ____
{
@JsonProperty("val1")
private int x;
@JsonIgnore
private int value2;
public Bean193(@JsonProperty("val1")int value1,
@JsonProperty("val2")int value2)
{
this.x = value1;
this.value2 = value2;
}
@JsonProperty("val2")
int x()
{
return value2;
}
}
/* We should only report an exception for cases where there is
* real ambiguity as to how to rename things; but not when everything
* has been explicitly defined
*/
// [Issue#327]
@JsonPropertyOrder({ "prop1", "prop2" })
static | Bean193 |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java | {
"start": 5052,
"end": 17347
} | class ____ implements Client {
protected final Logger logger;
protected final Settings settings;
private final ThreadPool threadPool;
private final ProjectResolver projectResolver;
private final AdminClient admin;
private final ProjectClient defaultProjectClient;
@SuppressWarnings("this-escape")
public AbstractClient(Settings settings, ThreadPool threadPool, ProjectResolver projectResolver) {
this.settings = settings;
this.threadPool = threadPool;
this.projectResolver = projectResolver;
this.admin = new AdminClient(this);
this.logger = LogManager.getLogger(this.getClass());
// We create a dedicated project client for the default project to avoid having to reconstruct it on every invocation.
// This aims to reduce the overhead of creating a project client when the client is used in a single-project context.
// TODO: only create the default project client if the project resolver does not support multiple projects.
if (this instanceof ProjectClient == false) {
this.defaultProjectClient = new ProjectClientImpl(this, ProjectId.DEFAULT);
} else {
this.defaultProjectClient = null;
}
}
@Override
public final Settings settings() {
return this.settings;
}
@Override
public final ThreadPool threadPool() {
return this.threadPool;
}
@Override
public ProjectResolver projectResolver() {
return projectResolver;
}
@Override
public final AdminClient admin() {
return admin;
}
@Override
public final <Request extends ActionRequest, Response extends ActionResponse> ActionFuture<Response> execute(
ActionType<Response> action,
Request request
) {
PlainActionFuture<Response> actionFuture = new RefCountedFuture<>();
execute(action, request, actionFuture);
return actionFuture;
}
/**
* This is the single execution point of *all* clients.
*/
@Override
public final <Request extends ActionRequest, Response extends ActionResponse> void execute(
ActionType<Response> action,
Request request,
ActionListener<Response> listener
) {
try {
doExecute(action, request, listener);
} catch (Exception e) {
assert false : new AssertionError(e);
listener.onFailure(e);
}
}
protected abstract <Request extends ActionRequest, Response extends ActionResponse> void doExecute(
ActionType<Response> action,
Request request,
ActionListener<Response> listener
);
@Override
public ActionFuture<DocWriteResponse> index(final IndexRequest request) {
return execute(TransportIndexAction.TYPE, request);
}
@Override
public void index(final IndexRequest request, final ActionListener<DocWriteResponse> listener) {
execute(TransportIndexAction.TYPE, request, listener);
}
@Override
public IndexRequestBuilder prepareIndex() {
return new IndexRequestBuilder(this, null);
}
@Override
public IndexRequestBuilder prepareIndex(String index) {
return new IndexRequestBuilder(this, index);
}
@Override
public ActionFuture<UpdateResponse> update(final UpdateRequest request) {
return execute(TransportUpdateAction.TYPE, request);
}
@Override
public void update(final UpdateRequest request, final ActionListener<UpdateResponse> listener) {
execute(TransportUpdateAction.TYPE, request, listener);
}
@Override
public UpdateRequestBuilder prepareUpdate() {
return new UpdateRequestBuilder(this, null, null);
}
@Override
public UpdateRequestBuilder prepareUpdate(String index, String id) {
return new UpdateRequestBuilder(this, index, id);
}
@Override
public ActionFuture<DeleteResponse> delete(final DeleteRequest request) {
return execute(TransportDeleteAction.TYPE, request);
}
@Override
public void delete(final DeleteRequest request, final ActionListener<DeleteResponse> listener) {
execute(TransportDeleteAction.TYPE, request, listener);
}
@Override
public DeleteRequestBuilder prepareDelete() {
return new DeleteRequestBuilder(this, null);
}
@Override
public DeleteRequestBuilder prepareDelete(String index, String id) {
return prepareDelete().setIndex(index).setId(id);
}
@Override
public ActionFuture<BulkResponse> bulk(final BulkRequest request) {
return execute(TransportBulkAction.TYPE, request);
}
@Override
public void bulk(final BulkRequest request, final ActionListener<BulkResponse> listener) {
execute(TransportBulkAction.TYPE, request, listener);
}
@Override
public BulkRequestBuilder prepareBulk() {
return new BulkRequestBuilder(this);
}
@Override
public BulkRequestBuilder prepareBulk(@Nullable String globalIndex) {
return new BulkRequestBuilder(this, globalIndex);
}
@Override
public ActionFuture<GetResponse> get(final GetRequest request) {
return execute(TransportGetAction.TYPE, request);
}
@Override
public void get(final GetRequest request, final ActionListener<GetResponse> listener) {
execute(TransportGetAction.TYPE, request, listener);
}
@Override
public GetRequestBuilder prepareGet() {
return new GetRequestBuilder(this, null);
}
@Override
public GetRequestBuilder prepareGet(String index, String id) {
return prepareGet().setIndex(index).setId(id);
}
@Override
public ActionFuture<MultiGetResponse> multiGet(final MultiGetRequest request) {
return execute(TransportMultiGetAction.TYPE, request);
}
@Override
public void multiGet(final MultiGetRequest request, final ActionListener<MultiGetResponse> listener) {
execute(TransportMultiGetAction.TYPE, request, listener);
}
@Override
public MultiGetRequestBuilder prepareMultiGet() {
return new MultiGetRequestBuilder(this);
}
@Override
public ActionFuture<SearchResponse> search(final SearchRequest request) {
return execute(TransportSearchAction.TYPE, request);
}
@Override
public void search(final SearchRequest request, final ActionListener<SearchResponse> listener) {
execute(TransportSearchAction.TYPE, request, listener);
}
@Override
public SearchRequestBuilder prepareSearch(String... indices) {
return new SearchRequestBuilder(this).setIndices(indices);
}
@Override
public ActionFuture<SearchResponse> searchScroll(final SearchScrollRequest request) {
return execute(TransportSearchScrollAction.TYPE, request);
}
@Override
public void searchScroll(final SearchScrollRequest request, final ActionListener<SearchResponse> listener) {
execute(TransportSearchScrollAction.TYPE, request, listener);
}
@Override
public SearchScrollRequestBuilder prepareSearchScroll(String scrollId) {
return new SearchScrollRequestBuilder(this, scrollId);
}
@Override
public ActionFuture<MultiSearchResponse> multiSearch(MultiSearchRequest request) {
return execute(TransportMultiSearchAction.TYPE, request);
}
@Override
public void multiSearch(MultiSearchRequest request, ActionListener<MultiSearchResponse> listener) {
execute(TransportMultiSearchAction.TYPE, request, listener);
}
@Override
public MultiSearchRequestBuilder prepareMultiSearch() {
return new MultiSearchRequestBuilder(this);
}
@Override
public ActionFuture<TermVectorsResponse> termVectors(final TermVectorsRequest request) {
return execute(TermVectorsAction.INSTANCE, request);
}
@Override
public void termVectors(final TermVectorsRequest request, final ActionListener<TermVectorsResponse> listener) {
execute(TermVectorsAction.INSTANCE, request, listener);
}
@Override
public TermVectorsRequestBuilder prepareTermVectors() {
return new TermVectorsRequestBuilder(this);
}
@Override
public TermVectorsRequestBuilder prepareTermVectors(String index, String id) {
return new TermVectorsRequestBuilder(this, index, id);
}
@Override
public ActionFuture<MultiTermVectorsResponse> multiTermVectors(final MultiTermVectorsRequest request) {
return execute(MultiTermVectorsAction.INSTANCE, request);
}
@Override
public void multiTermVectors(final MultiTermVectorsRequest request, final ActionListener<MultiTermVectorsResponse> listener) {
execute(MultiTermVectorsAction.INSTANCE, request, listener);
}
@Override
public MultiTermVectorsRequestBuilder prepareMultiTermVectors() {
return new MultiTermVectorsRequestBuilder(this);
}
@Override
public ExplainRequestBuilder prepareExplain(String index, String id) {
return new ExplainRequestBuilder(this, index, id);
}
@Override
public ActionFuture<ExplainResponse> explain(ExplainRequest request) {
return execute(TransportExplainAction.TYPE, request);
}
@Override
public void explain(ExplainRequest request, ActionListener<ExplainResponse> listener) {
execute(TransportExplainAction.TYPE, request, listener);
}
@Override
public void clearScroll(ClearScrollRequest request, ActionListener<ClearScrollResponse> listener) {
execute(TransportClearScrollAction.TYPE, request, listener);
}
@Override
public ActionFuture<ClearScrollResponse> clearScroll(ClearScrollRequest request) {
return execute(TransportClearScrollAction.TYPE, request);
}
@Override
public ClearScrollRequestBuilder prepareClearScroll() {
return new ClearScrollRequestBuilder(this);
}
@Override
public void fieldCaps(FieldCapabilitiesRequest request, ActionListener<FieldCapabilitiesResponse> listener) {
execute(TransportFieldCapabilitiesAction.TYPE, request, listener);
}
@Override
public ActionFuture<FieldCapabilitiesResponse> fieldCaps(FieldCapabilitiesRequest request) {
return execute(TransportFieldCapabilitiesAction.TYPE, request);
}
@Override
public FieldCapabilitiesRequestBuilder prepareFieldCaps(String... indices) {
return new FieldCapabilitiesRequestBuilder(this, indices);
}
@Override
public Client filterWithHeader(Map<String, String> headers) {
return new FilterClient(this) {
@Override
protected <Request extends ActionRequest, Response extends ActionResponse> void doExecute(
ActionType<Response> action,
Request request,
ActionListener<Response> listener
) {
ThreadContext threadContext = threadPool().getThreadContext();
try (ThreadContext.StoredContext ctx = threadContext.stashAndMergeHeaders(headers)) {
super.doExecute(action, request, listener);
}
}
};
}
@Override
public ProjectClient projectClient(ProjectId projectId) {
// We only take the shortcut when the given project ID matches the "current" project ID. If it doesn't, we'll let #executeOnProject
// take care of error handling.
if (projectResolver.supportsMultipleProjects() == false && projectId.equals(projectResolver.getProjectId())) {
return defaultProjectClient;
}
return new ProjectClientImpl(this, projectId);
}
/**
* Same as {@link PlainActionFuture} but for use with {@link RefCounted} result types. Unlike {@code PlainActionFuture} this future
* acquires a reference to its result. This means that the result reference must be released by a call to {@link RefCounted#decRef()}
* on the result before it goes out of scope.
* @param <R> reference counted result type
*/
// todo: the use of UnsafePlainActionFuture here is quite broad, we should find a better way to be more specific
// (unless making all usages safe is easy).
private static | AbstractClient |
java | apache__camel | dsl/camel-jbang/camel-jbang-core/src/main/java/org/apache/camel/dsl/jbang/core/commands/plugin/PluginCommand.java | {
"start": 1087,
"end": 1424
} | class ____ extends PluginBaseCommand {
public PluginCommand(CamelJBangMain main) {
super(main);
}
@Override
public Integer doCall() throws Exception {
// defaults to list integrations deployed on Kubernetes
new CommandLine(new PluginGet(getMain())).execute();
return 0;
}
}
| PluginCommand |
java | apache__camel | core/camel-core-processor/src/main/java/org/apache/camel/processor/DefaultProcessorFactory.java | {
"start": 2080,
"end": 5569
} | class ____ implements ProcessorFactory, BootstrapCloseable {
public static final String RESOURCE_PATH = "META-INF/services/org/apache/camel/model/";
private FactoryFinder finder;
@Override
public void close() throws IOException {
if (finder instanceof BootstrapCloseable bootstrapCloseable) {
bootstrapCloseable.close();
finder = null;
}
}
@Override
public Processor createChildProcessor(Route route, NamedNode definition, boolean mandatory) throws Exception {
String name = definition.getClass().getSimpleName();
if (finder == null) {
finder = PluginHelper.getFactoryFinderResolver(route.getCamelContext())
.resolveBootstrapFactoryFinder(route.getCamelContext().getClassResolver(), RESOURCE_PATH);
}
try {
Object object = finder.newInstance(name).orElse(null);
if (object instanceof ProcessorFactory pc) {
Processor processor = pc.createChildProcessor(route, definition, mandatory);
LineNumberAware.trySetLineNumberAware(processor, definition);
return processor;
}
} catch (NoFactoryAvailableException e) {
// ignore there is no custom factory
}
return null;
}
@Override
public Processor createProcessor(Route route, NamedNode definition) throws Exception {
String name = definition.getClass().getSimpleName();
if (finder == null) {
finder = PluginHelper.getFactoryFinderResolver(route.getCamelContext())
.resolveBootstrapFactoryFinder(route.getCamelContext().getClassResolver(), RESOURCE_PATH);
}
ProcessorFactory pc = finder.newInstance(name, ProcessorFactory.class).orElse(null);
if (pc != null) {
Processor processor = pc.createProcessor(route, definition);
LineNumberAware.trySetLineNumberAware(processor, definition);
return processor;
}
return null;
}
@Override
@SuppressWarnings("unchecked")
public Processor createProcessor(CamelContext camelContext, String definitionName, Object[] args)
throws Exception {
if ("SendDynamicProcessor".equals(definitionName)) {
String uri = (String) args[0];
Expression expression = (Expression) args[1];
ExchangePattern exchangePattern = (ExchangePattern) args[2];
SendDynamicProcessor processor = new SendDynamicProcessor(uri, expression);
processor.setCamelContext(camelContext);
if (exchangePattern != null) {
processor.setPattern(exchangePattern);
}
return processor;
} else if ("MulticastProcessor".equals(definitionName)) {
Collection<Processor> processors = (Collection<Processor>) args[0];
ExecutorService executor = (ExecutorService) args[1];
boolean shutdownExecutorService = (boolean) args[2];
return new MulticastProcessor(
camelContext, null, processors, null, true, executor, shutdownExecutorService, false, false, 0,
null, false, false, 0);
} else if ("Pipeline".equals(definitionName)) {
List<Processor> processors = (List<Processor>) args[0];
return Pipeline.newInstance(camelContext, processors);
}
return null;
}
}
| DefaultProcessorFactory |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/adaptive/AdaptiveJoinOperatorGenerator.java | {
"start": 1771,
"end": 6435
} | class ____ implements AdaptiveJoin {
private static final Logger LOG = LoggerFactory.getLogger(AdaptiveJoinOperatorGenerator.class);
private final int[] leftKeys;
private final int[] rightKeys;
private final FlinkJoinType joinType;
private final boolean[] filterNulls;
private final RowType leftType;
private final RowType rightType;
private final GeneratedJoinCondition condFunc;
private final int leftRowSize;
private final long leftRowCount;
private final int rightRowSize;
private final long rightRowCount;
private final boolean tryDistinctBuildRow;
private final long managedMemory;
private final OperatorType originalJoin;
private boolean leftIsBuild;
private boolean originalLeftIsBuild;
private boolean isBroadcastJoin;
public AdaptiveJoinOperatorGenerator(
int[] leftKeys,
int[] rightKeys,
FlinkJoinType joinType,
boolean[] filterNulls,
RowType leftType,
RowType rightType,
GeneratedJoinCondition condFunc,
int leftRowSize,
int rightRowSize,
long leftRowCount,
long rightRowCount,
boolean tryDistinctBuildRow,
long managedMemory,
boolean leftIsBuild,
OperatorType originalJoin) {
this.leftKeys = leftKeys;
this.rightKeys = rightKeys;
this.joinType = joinType;
this.filterNulls = filterNulls;
this.leftType = leftType;
this.rightType = rightType;
this.condFunc = condFunc;
this.leftRowSize = leftRowSize;
this.rightRowSize = rightRowSize;
this.leftRowCount = leftRowCount;
this.rightRowCount = rightRowCount;
this.tryDistinctBuildRow = tryDistinctBuildRow;
this.managedMemory = managedMemory;
checkState(
originalJoin == OperatorType.ShuffleHashJoin
|| originalJoin == OperatorType.SortMergeJoin,
String.format(
"Adaptive join "
+ "currently only supports adaptive optimization for ShuffleHashJoin and "
+ "SortMergeJoin, not including %s.",
originalJoin.toString()));
this.leftIsBuild = leftIsBuild;
this.originalLeftIsBuild = leftIsBuild;
this.originalJoin = originalJoin;
}
@Override
public StreamOperatorFactory<?> genOperatorFactory(
ClassLoader classLoader, ReadableConfig config) {
if (isBroadcastJoin || originalJoin == OperatorType.ShuffleHashJoin) {
return HashJoinOperatorUtil.generateOperatorFactory(
leftKeys,
rightKeys,
joinType,
filterNulls,
leftType,
rightType,
condFunc,
leftIsBuild,
leftRowSize,
rightRowSize,
leftRowCount,
rightRowCount,
tryDistinctBuildRow,
managedMemory,
config,
classLoader);
} else {
return SorMergeJoinOperatorUtil.generateOperatorFactory(
condFunc,
leftType,
rightType,
leftKeys,
rightKeys,
joinType,
config,
leftIsBuild,
filterNulls,
managedMemory,
classLoader);
}
}
@Override
public FlinkJoinType getJoinType() {
return joinType;
}
@Override
public void markAsBroadcastJoin(boolean canBroadcast, boolean leftIsBuild) {
this.isBroadcastJoin = canBroadcast;
this.leftIsBuild = leftIsBuild;
}
@Override
public boolean shouldReorderInputs() {
// Sort merge join requires the left side to be read first if the broadcast threshold is not
// met.
if (!isBroadcastJoin && originalJoin == OperatorType.SortMergeJoin) {
return false;
}
if (leftIsBuild != originalLeftIsBuild) {
LOG.info(
"The build side of the adaptive join has been updated. Compile phase build side: {}, Runtime build side: {}.",
originalLeftIsBuild ? "left" : "right",
leftIsBuild ? "left" : "right");
}
return !leftIsBuild;
}
}
| AdaptiveJoinOperatorGenerator |
java | google__guava | android/guava/src/com/google/common/base/Optional.java | {
"start": 3070,
"end": 3552
} | class ____ serializable; {@code java.util.Optional} is not.
* <li>{@code java.util.Optional} has the additional methods {@code ifPresent}, {@code filter},
* {@code flatMap}, and {@code orElseThrow}.
* <li>{@code java.util} offers the primitive-specialized versions {@code OptionalInt}, {@code
* OptionalLong} and {@code OptionalDouble}, the use of which is recommended; Guava does not
* have these.
* </ul>
*
* <p><b>There are no plans to deprecate this | is |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/file/FileAssert_hasDigest_AlgorithmBytes_Test.java | {
"start": 892,
"end": 1338
} | class ____ extends FileAssertBaseTest {
private final String algorithm = "MD5";
private final byte[] expected = new byte[0];
@Override
protected FileAssert invoke_api_method() {
return assertions.hasDigest(algorithm, expected);
}
@Override
protected void verify_internal_effects() {
verify(files).assertHasDigest(getInfo(assertions), getActual(assertions), algorithm, expected);
}
}
| FileAssert_hasDigest_AlgorithmBytes_Test |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/customproviders/AnotherValidNonBlockingFiltersTest.java | {
"start": 5850,
"end": 6556
} | class ____ implements ContainerRequestFilter {
@Override
public void filter(ContainerRequestContext requestContext) throws IOException {
MultivaluedMap<String, String> headers = requestContext.getHeaders();
String previousFilterHeaderValue = headers.getFirst("filter-request");
headers.putSingle("filter-request", previousFilterHeaderValue + "/3-standard-non-blocking");
String previousThreadHeaderValue = headers.getFirst("thread");
headers.putSingle("thread", previousThreadHeaderValue + "/" + BlockingOperationControl.isBlockingAllowed());
}
}
@ApplicationScoped
public static | StandardNonBlockingRequestFilter |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/GitHubEndpointBuilderFactory.java | {
"start": 1554,
"end": 23527
} | interface ____
extends
EndpointConsumerBuilder {
default AdvancedGitHubEndpointConsumerBuilder advanced() {
return (AdvancedGitHubEndpointConsumerBuilder) this;
}
/**
* GitHub repository name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Required: true
* Group: common
*
* @param repoName the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder repoName(String repoName) {
doSetProperty("repoName", repoName);
return this;
}
/**
* GitHub repository owner (organization).
*
* The option is a: <code>java.lang.String</code> type.
*
* Required: true
* Group: common
*
* @param repoOwner the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder repoOwner(String repoOwner) {
doSetProperty("repoOwner", repoOwner);
return this;
}
/**
* Whether the commit consumer should store the commit message or the
* raw org.eclipse.egit.github.core.RepositoryCommit object as the
* message body.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: consumer
*
* @param commitMessageAsBody the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder commitMessageAsBody(boolean commitMessageAsBody) {
doSetProperty("commitMessageAsBody", commitMessageAsBody);
return this;
}
/**
* Whether the commit consumer should store the commit message or the
* raw org.eclipse.egit.github.core.RepositoryCommit object as the
* message body.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: consumer
*
* @param commitMessageAsBody the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder commitMessageAsBody(String commitMessageAsBody) {
doSetProperty("commitMessageAsBody", commitMessageAsBody);
return this;
}
/**
* If the polling consumer did not poll any files, you can enable this
* option to send an empty message (no body) instead.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param sendEmptyMessageWhenIdle the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder sendEmptyMessageWhenIdle(boolean sendEmptyMessageWhenIdle) {
doSetProperty("sendEmptyMessageWhenIdle", sendEmptyMessageWhenIdle);
return this;
}
/**
* If the polling consumer did not poll any files, you can enable this
* option to send an empty message (no body) instead.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param sendEmptyMessageWhenIdle the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder sendEmptyMessageWhenIdle(String sendEmptyMessageWhenIdle) {
doSetProperty("sendEmptyMessageWhenIdle", sendEmptyMessageWhenIdle);
return this;
}
/**
* The starting sha to use for polling commits with the commit consumer.
* The value can either be a sha for the sha to start from, or use
* beginning to start from the beginning, or last to start from the last
* commit.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: last
* Group: consumer
*
* @param startingSha the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder startingSha(String startingSha) {
doSetProperty("startingSha", startingSha);
return this;
}
/**
* The number of subsequent error polls (failed due some error) that
* should happen before the backoffMultipler should kick-in.
*
* The option is a: <code>int</code> type.
*
* Group: scheduler
*
* @param backoffErrorThreshold the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder backoffErrorThreshold(int backoffErrorThreshold) {
doSetProperty("backoffErrorThreshold", backoffErrorThreshold);
return this;
}
/**
* The number of subsequent error polls (failed due some error) that
* should happen before the backoffMultipler should kick-in.
*
* The option will be converted to a <code>int</code> type.
*
* Group: scheduler
*
* @param backoffErrorThreshold the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder backoffErrorThreshold(String backoffErrorThreshold) {
doSetProperty("backoffErrorThreshold", backoffErrorThreshold);
return this;
}
/**
* The number of subsequent idle polls that should happen before the
* backoffMultipler should kick-in.
*
* The option is a: <code>int</code> type.
*
* Group: scheduler
*
* @param backoffIdleThreshold the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder backoffIdleThreshold(int backoffIdleThreshold) {
doSetProperty("backoffIdleThreshold", backoffIdleThreshold);
return this;
}
/**
* The number of subsequent idle polls that should happen before the
* backoffMultipler should kick-in.
*
* The option will be converted to a <code>int</code> type.
*
* Group: scheduler
*
* @param backoffIdleThreshold the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder backoffIdleThreshold(String backoffIdleThreshold) {
doSetProperty("backoffIdleThreshold", backoffIdleThreshold);
return this;
}
/**
* To let the scheduled polling consumer backoff if there has been a
* number of subsequent idles/errors in a row. The multiplier is then
* the number of polls that will be skipped before the next actual
* attempt is happening again. When this option is in use then
* backoffIdleThreshold and/or backoffErrorThreshold must also be
* configured.
*
* The option is a: <code>int</code> type.
*
* Group: scheduler
*
* @param backoffMultiplier the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder backoffMultiplier(int backoffMultiplier) {
doSetProperty("backoffMultiplier", backoffMultiplier);
return this;
}
/**
* To let the scheduled polling consumer backoff if there has been a
* number of subsequent idles/errors in a row. The multiplier is then
* the number of polls that will be skipped before the next actual
* attempt is happening again. When this option is in use then
* backoffIdleThreshold and/or backoffErrorThreshold must also be
* configured.
*
* The option will be converted to a <code>int</code> type.
*
* Group: scheduler
*
* @param backoffMultiplier the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder backoffMultiplier(String backoffMultiplier) {
doSetProperty("backoffMultiplier", backoffMultiplier);
return this;
}
/**
* Milliseconds before the next poll.
*
* The option is a: <code>long</code> type.
*
* Default: 500
* Group: scheduler
*
* @param delay the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder delay(long delay) {
doSetProperty("delay", delay);
return this;
}
/**
* Milliseconds before the next poll.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 500
* Group: scheduler
*
* @param delay the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder delay(String delay) {
doSetProperty("delay", delay);
return this;
}
/**
* If greedy is enabled, then the ScheduledPollConsumer will run
* immediately again, if the previous run polled 1 or more messages.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: scheduler
*
* @param greedy the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder greedy(boolean greedy) {
doSetProperty("greedy", greedy);
return this;
}
/**
* If greedy is enabled, then the ScheduledPollConsumer will run
* immediately again, if the previous run polled 1 or more messages.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: scheduler
*
* @param greedy the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder greedy(String greedy) {
doSetProperty("greedy", greedy);
return this;
}
/**
* Milliseconds before the first poll starts.
*
* The option is a: <code>long</code> type.
*
* Default: 1000
* Group: scheduler
*
* @param initialDelay the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder initialDelay(long initialDelay) {
doSetProperty("initialDelay", initialDelay);
return this;
}
/**
* Milliseconds before the first poll starts.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 1000
* Group: scheduler
*
* @param initialDelay the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder initialDelay(String initialDelay) {
doSetProperty("initialDelay", initialDelay);
return this;
}
/**
* Specifies a maximum limit of number of fires. So if you set it to 1,
* the scheduler will only fire once. If you set it to 5, it will only
* fire five times. A value of zero or negative means fire forever.
*
* The option is a: <code>long</code> type.
*
* Default: 0
* Group: scheduler
*
* @param repeatCount the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder repeatCount(long repeatCount) {
doSetProperty("repeatCount", repeatCount);
return this;
}
/**
* Specifies a maximum limit of number of fires. So if you set it to 1,
* the scheduler will only fire once. If you set it to 5, it will only
* fire five times. A value of zero or negative means fire forever.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 0
* Group: scheduler
*
* @param repeatCount the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder repeatCount(String repeatCount) {
doSetProperty("repeatCount", repeatCount);
return this;
}
/**
* The consumer logs a start/complete log line when it polls. This
* option allows you to configure the logging level for that.
*
* The option is a: <code>org.apache.camel.LoggingLevel</code> type.
*
* Default: TRACE
* Group: scheduler
*
* @param runLoggingLevel the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder runLoggingLevel(org.apache.camel.LoggingLevel runLoggingLevel) {
doSetProperty("runLoggingLevel", runLoggingLevel);
return this;
}
/**
* The consumer logs a start/complete log line when it polls. This
* option allows you to configure the logging level for that.
*
* The option will be converted to a
* <code>org.apache.camel.LoggingLevel</code> type.
*
* Default: TRACE
* Group: scheduler
*
* @param runLoggingLevel the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder runLoggingLevel(String runLoggingLevel) {
doSetProperty("runLoggingLevel", runLoggingLevel);
return this;
}
/**
* Allows for configuring a custom/shared thread pool to use for the
* consumer. By default each consumer has its own single threaded thread
* pool.
*
* The option is a:
* <code>java.util.concurrent.ScheduledExecutorService</code> type.
*
* Group: scheduler
*
* @param scheduledExecutorService the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder scheduledExecutorService(ScheduledExecutorService scheduledExecutorService) {
doSetProperty("scheduledExecutorService", scheduledExecutorService);
return this;
}
/**
* Allows for configuring a custom/shared thread pool to use for the
* consumer. By default each consumer has its own single threaded thread
* pool.
*
* The option will be converted to a
* <code>java.util.concurrent.ScheduledExecutorService</code> type.
*
* Group: scheduler
*
* @param scheduledExecutorService the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder scheduledExecutorService(String scheduledExecutorService) {
doSetProperty("scheduledExecutorService", scheduledExecutorService);
return this;
}
/**
* To use a cron scheduler from either camel-spring or camel-quartz
* component. Use value spring or quartz for built in scheduler.
*
* The option is a: <code>java.lang.Object</code> type.
*
* Default: none
* Group: scheduler
*
* @param scheduler the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder scheduler(Object scheduler) {
doSetProperty("scheduler", scheduler);
return this;
}
/**
* To use a cron scheduler from either camel-spring or camel-quartz
* component. Use value spring or quartz for built in scheduler.
*
* The option will be converted to a <code>java.lang.Object</code> type.
*
* Default: none
* Group: scheduler
*
* @param scheduler the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder scheduler(String scheduler) {
doSetProperty("scheduler", scheduler);
return this;
}
/**
* To configure additional properties when using a custom scheduler or
* any of the Quartz, Spring based scheduler. This is a multi-value
* option with prefix: scheduler.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* schedulerProperties(String, Object) method to add a value (call the
* method multiple times to set more values).
*
* Group: scheduler
*
* @param key the option key
* @param value the option value
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder schedulerProperties(String key, Object value) {
doSetMultiValueProperty("schedulerProperties", "scheduler." + key, value);
return this;
}
/**
* To configure additional properties when using a custom scheduler or
* any of the Quartz, Spring based scheduler. This is a multi-value
* option with prefix: scheduler.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* schedulerProperties(String, Object) method to add a value (call the
* method multiple times to set more values).
*
* Group: scheduler
*
* @param values the values
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder schedulerProperties(Map values) {
doSetMultiValueProperties("schedulerProperties", "scheduler.", values);
return this;
}
/**
* Whether the scheduler should be auto started.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param startScheduler the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder startScheduler(boolean startScheduler) {
doSetProperty("startScheduler", startScheduler);
return this;
}
/**
* Whether the scheduler should be auto started.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param startScheduler the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder startScheduler(String startScheduler) {
doSetProperty("startScheduler", startScheduler);
return this;
}
/**
* Time unit for initialDelay and delay options.
*
* The option is a: <code>java.util.concurrent.TimeUnit</code> type.
*
* Default: MILLISECONDS
* Group: scheduler
*
* @param timeUnit the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder timeUnit(TimeUnit timeUnit) {
doSetProperty("timeUnit", timeUnit);
return this;
}
/**
* Time unit for initialDelay and delay options.
*
* The option will be converted to a
* <code>java.util.concurrent.TimeUnit</code> type.
*
* Default: MILLISECONDS
* Group: scheduler
*
* @param timeUnit the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder timeUnit(String timeUnit) {
doSetProperty("timeUnit", timeUnit);
return this;
}
/**
* Controls if fixed delay or fixed rate is used. See
* ScheduledExecutorService in JDK for details.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param useFixedDelay the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder useFixedDelay(boolean useFixedDelay) {
doSetProperty("useFixedDelay", useFixedDelay);
return this;
}
/**
* Controls if fixed delay or fixed rate is used. See
* ScheduledExecutorService in JDK for details.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param useFixedDelay the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder useFixedDelay(String useFixedDelay) {
doSetProperty("useFixedDelay", useFixedDelay);
return this;
}
/**
* GitHub OAuth token. Must be configured on either component or
* endpoint.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param oauthToken the value to set
* @return the dsl builder
*/
default GitHubEndpointConsumerBuilder oauthToken(String oauthToken) {
doSetProperty("oauthToken", oauthToken);
return this;
}
}
/**
* Advanced builder for endpoint consumers for the GitHub component.
*/
public | GitHubEndpointConsumerBuilder |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/operations/converters/AbstractCreateMaterializedTableConverter.java | {
"start": 2687,
"end": 10941
} | interface ____ {
Schema getMergedSchema();
Map<String, String> getMergedTableOptions();
List<String> getMergedPartitionKeys();
Optional<TableDistribution> getMergedTableDistribution();
String getMergedOriginalQuery();
String getMergedExpandedQuery();
ResolvedSchema getMergedQuerySchema();
}
protected abstract MergeContext getMergeContext(
T sqlCreateMaterializedTable, ConvertContext context);
protected final Optional<TableDistribution> getDerivedTableDistribution(
T sqlCreateMaterializedTable) {
return Optional.ofNullable(sqlCreateMaterializedTable.getDistribution())
.map(OperationConverterUtils::getDistributionFromSqlDistribution);
}
protected final List<String> getDerivedPartitionKeys(T sqlCreateMaterializedTable) {
return sqlCreateMaterializedTable.getPartitionKeyList();
}
protected final IntervalFreshness getDerivedFreshness(T sqlCreateMaterializedTable) {
return Optional.ofNullable(sqlCreateMaterializedTable.getFreshness())
.map(MaterializedTableUtils::getMaterializedTableFreshness)
.orElse(null);
}
protected final ResolvedSchema getQueryResolvedSchema(
T sqlCreateMaterializedTable, ConvertContext context) {
SqlNode selectQuery = sqlCreateMaterializedTable.getAsQuery();
SqlNode validateQuery = context.getSqlValidator().validate(selectQuery);
PlannerQueryOperation queryOperation =
new PlannerQueryOperation(
context.toRelRoot(validateQuery).project(),
() -> context.toQuotedSqlString(validateQuery));
return queryOperation.getResolvedSchema();
}
protected final LogicalRefreshMode getDerivedLogicalRefreshMode(T sqlCreateMaterializedTable) {
return MaterializedTableUtils.deriveLogicalRefreshMode(
sqlCreateMaterializedTable.getRefreshMode());
}
protected final RefreshMode getDerivedRefreshMode(LogicalRefreshMode logicalRefreshMode) {
return MaterializedTableUtils.fromLogicalRefreshModeToRefreshMode(logicalRefreshMode);
}
protected final String getDerivedOriginalQuery(
T sqlCreateMaterializedTable, ConvertContext context) {
SqlNode selectQuery = sqlCreateMaterializedTable.getAsQuery();
return context.toQuotedSqlString(selectQuery);
}
protected final String getDerivedExpandedQuery(
T sqlCreateMaterializedTable, ConvertContext context) {
SqlNode selectQuery = sqlCreateMaterializedTable.getAsQuery();
SqlNode validatedQuery = context.getSqlValidator().validate(selectQuery);
return context.expandSqlIdentifiers(context.toQuotedSqlString(validatedQuery));
}
protected final String getComment(T sqlCreateMaterializedTable) {
return sqlCreateMaterializedTable.getComment();
}
protected final ResolvedCatalogMaterializedTable getResolvedCatalogMaterializedTable(
T sqlCreateMaterializedTable, ConvertContext context) {
final MergeContext mergeContext = getMergeContext(sqlCreateMaterializedTable, context);
final List<String> partitionKeys = mergeContext.getMergedPartitionKeys();
final Schema schema = mergeContext.getMergedSchema();
final ResolvedSchema querySchema = mergeContext.getMergedQuerySchema();
final Map<String, String> tableOptions = mergeContext.getMergedTableOptions();
verifyPartitioningColumnsExist(querySchema, partitionKeys, tableOptions);
final TableDistribution distribution =
mergeContext.getMergedTableDistribution().orElse(null);
final String comment = sqlCreateMaterializedTable.getComment();
final String originalQuery = mergeContext.getMergedOriginalQuery();
final String expandedQuery = mergeContext.getMergedExpandedQuery();
final IntervalFreshness intervalFreshness = getDerivedFreshness(sqlCreateMaterializedTable);
final LogicalRefreshMode logicalRefreshMode =
getDerivedLogicalRefreshMode(sqlCreateMaterializedTable);
final RefreshMode refreshMode = getDerivedRefreshMode(logicalRefreshMode);
return context.getCatalogManager()
.resolveCatalogMaterializedTable(
CatalogMaterializedTable.newBuilder()
.schema(schema)
.comment(comment)
.distribution(distribution)
.partitionKeys(partitionKeys)
.options(tableOptions)
.originalQuery(originalQuery)
.expandedQuery(expandedQuery)
.freshness(intervalFreshness)
.logicalRefreshMode(logicalRefreshMode)
.refreshMode(refreshMode)
.refreshStatus(CatalogMaterializedTable.RefreshStatus.INITIALIZING)
.build());
}
protected final ObjectIdentifier getIdentifier(
SqlCreateMaterializedTable node, ConvertContext context) {
UnresolvedIdentifier unresolvedIdentifier = UnresolvedIdentifier.of(node.getFullName());
return context.getCatalogManager().qualifyIdentifier(unresolvedIdentifier);
}
private void verifyPartitioningColumnsExist(
ResolvedSchema schema, List<String> partitionKeys, Map<String, String> tableOptions) {
final Set<String> partitionFieldOptions =
tableOptions.keySet().stream()
.filter(k -> k.startsWith(PARTITION_FIELDS))
.collect(Collectors.toSet());
for (String partitionKey : partitionKeys) {
if (schema.getColumn(partitionKey).isEmpty()) {
throw new ValidationException(
String.format(
"Partition column '%s' not defined in the query schema. Available columns: [%s].",
partitionKey,
schema.getColumnNames().stream()
.collect(Collectors.joining("', '", "'", "'"))));
}
}
// verify partition key used by materialized table partition option
// partition.fields.#.date-formatter whether exist
for (String partitionOption : partitionFieldOptions) {
String partitionKey =
partitionOption.substring(
PARTITION_FIELDS.length() + 1,
partitionOption.length() - (DATE_FORMATTER.length() + 1));
// partition key used in option partition.fields.#.date-formatter must be existed
if (!partitionKeys.contains(partitionKey)) {
throw new ValidationException(
String.format(
"Column '%s' referenced by materialized table option '%s' isn't a partition column. Available partition columns: [%s].",
partitionKey,
partitionOption,
partitionKeys.stream()
.collect(Collectors.joining("', '", "'", "'"))));
}
// partition key used in option partition.fields.#.date-formatter must be string type
LogicalType partitionKeyType =
schema.getColumn(partitionKey).get().getDataType().getLogicalType();
if (!partitionKeyType
.getTypeRoot()
.getFamilies()
.contains(LogicalTypeFamily.CHARACTER_STRING)) {
throw new ValidationException(
String.format(
"Materialized table option '%s' only supports referring to char, varchar and string type partition column. Column %s type is %s.",
partitionOption, partitionKey, partitionKeyType.asSummaryString()));
}
}
}
}
| MergeContext |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/fun/SqlCastFunction.java | {
"start": 3356,
"end": 3679
} | class ____ copied over because of CALCITE-5017, in order to workaround the method {@link
* SqlTypeUtil#canCastFrom(RelDataType, RelDataType, boolean)}. Line 141 in {@link
* #checkOperandTypes(SqlCallBinding, boolean)} and new method {@link #canCastFrom(RelDataType,
* RelDataType)}.
*
* @see SqlCastOperator
*/
public | was |
java | quarkusio__quarkus | extensions/hibernate-orm/runtime/src/main/java/io/quarkus/hibernate/orm/runtime/service/QuarkusStaticInitDialectFactory.java | {
"start": 596,
"end": 1099
} | class ____ extends DialectFactoryImpl
implements DialectFactory, ServiceRegistryAwareService {
private Dialect dialect;
@Override
public Dialect buildDialect(Map<String, Object> configValues, DialectResolutionInfoSource resolutionInfoSource)
throws HibernateException {
dialect = super.buildDialect(configValues, resolutionInfoSource);
return dialect;
}
public Dialect getDialect() {
return dialect;
}
}
| QuarkusStaticInitDialectFactory |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1400/Issue1422.java | {
"start": 1035,
"end": 1081
} | class ____ {
public boolean v;
}
}
| Foo |
java | hibernate__hibernate-orm | hibernate-testing/src/main/java/org/hibernate/testing/orm/junit/DialectFeatureChecks.java | {
"start": 13307,
"end": 13656
} | class ____ implements DialectFeatureCheck {
public boolean apply(Dialect dialect) {
return dialect.getLockingSupport().getMetadata().getOuterJoinLockingType() == OuterJoinLockingType.FULL
|| dialect.getLockingSupport().getMetadata().getOuterJoinLockingType() == OuterJoinLockingType.IDENTIFIED;
}
}
public static final | SupportsLockingJoins |
java | square__retrofit | samples/src/main/java/com/example/retrofit/JsonAndXmlConverters.java | {
"start": 3509,
"end": 4506
} | interface ____ {
@GET("/")
@Json
Call<User> exampleJson();
@GET("/")
@Xml
Call<User> exampleXml();
}
public static void main(String... args) throws IOException {
MockWebServer server = new MockWebServer();
server.start();
server.enqueue(new MockResponse().setBody("{\"name\": \"Jason\"}"));
server.enqueue(new MockResponse().setBody("<user name=\"Eximel\"/>"));
Retrofit retrofit =
new Retrofit.Builder()
.baseUrl(server.url("/"))
.addConverterFactory(
new QualifiedTypeConverterFactory(
GsonConverterFactory.create(), SimpleXmlConverterFactory.create()))
.build();
Service service = retrofit.create(Service.class);
User user1 = service.exampleJson().execute().body();
System.out.println("User 1: " + user1.name);
User user2 = service.exampleXml().execute().body();
System.out.println("User 2: " + user2.name);
server.shutdown();
}
}
| Service |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KafkaEndpointBuilderFactory.java | {
"start": 1580,
"end": 41042
} | interface ____
extends
EndpointConsumerBuilder {
default AdvancedKafkaEndpointConsumerBuilder advanced() {
return (AdvancedKafkaEndpointConsumerBuilder) this;
}
/**
* Sets additional properties for either kafka consumer or kafka
* producer in case they can't be set directly on the camel
* configurations (e.g.: new Kafka properties that are not reflected yet
* in Camel configurations), the properties have to be prefixed with
* additionalProperties.., e.g.:
* additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=http://localhost:8811/avro. If the properties are set in the application.properties file, they must be prefixed with camel.component.kafka.additional-properties and the property enclosed in square brackets, like this example: camel.component.kafka.additional-propertiesdelivery.timeout.ms=15000. This is a multi-value option with prefix: additionalProperties.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* additionalProperties(String, Object) method to add a value (call the
* method multiple times to set more values).
*
* Group: common
*
* @param key the option key
* @param value the option value
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder additionalProperties(String key, Object value) {
doSetMultiValueProperty("additionalProperties", "additionalProperties." + key, value);
return this;
}
/**
* Sets additional properties for either kafka consumer or kafka
* producer in case they can't be set directly on the camel
* configurations (e.g.: new Kafka properties that are not reflected yet
* in Camel configurations), the properties have to be prefixed with
* additionalProperties.., e.g.:
* additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=http://localhost:8811/avro. If the properties are set in the application.properties file, they must be prefixed with camel.component.kafka.additional-properties and the property enclosed in square brackets, like this example: camel.component.kafka.additional-propertiesdelivery.timeout.ms=15000. This is a multi-value option with prefix: additionalProperties.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* additionalProperties(String, Object) method to add a value (call the
* method multiple times to set more values).
*
* Group: common
*
* @param values the values
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder additionalProperties(Map values) {
doSetMultiValueProperties("additionalProperties", "additionalProperties.", values);
return this;
}
/**
* URL of the Kafka brokers to use. The format is
* host1:port1,host2:port2, and the list can be a subset of brokers or a
* VIP pointing to a subset of brokers. This option is known as
* bootstrap.servers in the Kafka documentation.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param brokers the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder brokers(String brokers) {
doSetProperty("brokers", brokers);
return this;
}
/**
* The client id is a user-specified string sent in each request to help
* trace calls. It should logically identify the application making the
* request.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param clientId the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder clientId(String clientId) {
doSetProperty("clientId", clientId);
return this;
}
/**
* To use a custom HeaderFilterStrategy to filter header to and from
* Camel message.
*
* The option is a:
* <code>org.apache.camel.spi.HeaderFilterStrategy</code> type.
*
* Group: common
*
* @param headerFilterStrategy the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder headerFilterStrategy(org.apache.camel.spi.HeaderFilterStrategy headerFilterStrategy) {
doSetProperty("headerFilterStrategy", headerFilterStrategy);
return this;
}
/**
* To use a custom HeaderFilterStrategy to filter header to and from
* Camel message.
*
* The option will be converted to a
* <code>org.apache.camel.spi.HeaderFilterStrategy</code> type.
*
* Group: common
*
* @param headerFilterStrategy the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder headerFilterStrategy(String headerFilterStrategy) {
doSetProperty("headerFilterStrategy", headerFilterStrategy);
return this;
}
/**
* The maximum amount of time in milliseconds to wait when reconnecting
* to a broker that has repeatedly failed to connect. If provided, the
* backoff per host will increase exponentially for each consecutive
* connection failure, up to this maximum. After calculating the backoff
* increase, 20% random jitter is added to avoid connection storms.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Default: 1000
* Group: common
*
* @param reconnectBackoffMaxMs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder reconnectBackoffMaxMs(Integer reconnectBackoffMaxMs) {
doSetProperty("reconnectBackoffMaxMs", reconnectBackoffMaxMs);
return this;
}
/**
* The maximum amount of time in milliseconds to wait when reconnecting
* to a broker that has repeatedly failed to connect. If provided, the
* backoff per host will increase exponentially for each consecutive
* connection failure, up to this maximum. After calculating the backoff
* increase, 20% random jitter is added to avoid connection storms.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Default: 1000
* Group: common
*
* @param reconnectBackoffMaxMs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder reconnectBackoffMaxMs(String reconnectBackoffMaxMs) {
doSetProperty("reconnectBackoffMaxMs", reconnectBackoffMaxMs);
return this;
}
/**
* The maximum amount of time in milliseconds to wait when retrying a
* request to the broker that has repeatedly failed. If provided, the
* backoff per client will increase exponentially for each failed
* request, up to this maximum. To prevent all clients from being
* synchronized upon retry, a randomized jitter with a factor of 0.2
* will be applied to the backoff, resulting in the backoff falling
* within a range between 20% below and 20% above the computed value. If
* retry.backoff.ms is set to be higher than retry.backoff.max.ms, then
* retry.backoff.max.ms will be used as a constant backoff from the
* beginning without any exponential increase.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Default: 1000
* Group: common
*
* @param retryBackoffMaxMs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder retryBackoffMaxMs(Integer retryBackoffMaxMs) {
doSetProperty("retryBackoffMaxMs", retryBackoffMaxMs);
return this;
}
/**
* The maximum amount of time in milliseconds to wait when retrying a
* request to the broker that has repeatedly failed. If provided, the
* backoff per client will increase exponentially for each failed
* request, up to this maximum. To prevent all clients from being
* synchronized upon retry, a randomized jitter with a factor of 0.2
* will be applied to the backoff, resulting in the backoff falling
* within a range between 20% below and 20% above the computed value. If
* retry.backoff.ms is set to be higher than retry.backoff.max.ms, then
* retry.backoff.max.ms will be used as a constant backoff from the
* beginning without any exponential increase.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Default: 1000
* Group: common
*
* @param retryBackoffMaxMs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder retryBackoffMaxMs(String retryBackoffMaxMs) {
doSetProperty("retryBackoffMaxMs", retryBackoffMaxMs);
return this;
}
/**
* The amount of time to wait before attempting to retry a failed
* request to a given topic partition. This avoids repeatedly sending
* requests in a tight loop under some failure scenarios. This value is
* the initial backoff value and will increase exponentially for each
* failed request, up to the retry.backoff.max.ms value.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Default: 100
* Group: common
*
* @param retryBackoffMs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder retryBackoffMs(Integer retryBackoffMs) {
doSetProperty("retryBackoffMs", retryBackoffMs);
return this;
}
/**
* The amount of time to wait before attempting to retry a failed
* request to a given topic partition. This avoids repeatedly sending
* requests in a tight loop under some failure scenarios. This value is
* the initial backoff value and will increase exponentially for each
* failed request, up to the retry.backoff.max.ms value.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Default: 100
* Group: common
*
* @param retryBackoffMs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder retryBackoffMs(String retryBackoffMs) {
doSetProperty("retryBackoffMs", retryBackoffMs);
return this;
}
/**
* Timeout in milliseconds to wait gracefully for the consumer or
* producer to shut down and terminate its worker threads.
*
* The option is a: <code>int</code> type.
*
* Default: 30000
* Group: common
*
* @param shutdownTimeout the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder shutdownTimeout(int shutdownTimeout) {
doSetProperty("shutdownTimeout", shutdownTimeout);
return this;
}
/**
* Timeout in milliseconds to wait gracefully for the consumer or
* producer to shut down and terminate its worker threads.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 30000
* Group: common
*
* @param shutdownTimeout the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder shutdownTimeout(String shutdownTimeout) {
doSetProperty("shutdownTimeout", shutdownTimeout);
return this;
}
/**
* Whether to allow doing manual commits via KafkaManualCommit. If this
* option is enabled then an instance of KafkaManualCommit is stored on
* the Exchange message header, which allows end users to access this
* API and perform manual offset commits via the Kafka consumer.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param allowManualCommit the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder allowManualCommit(boolean allowManualCommit) {
doSetProperty("allowManualCommit", allowManualCommit);
return this;
}
/**
* Whether to allow doing manual commits via KafkaManualCommit. If this
* option is enabled then an instance of KafkaManualCommit is stored on
* the Exchange message header, which allows end users to access this
* API and perform manual offset commits via the Kafka consumer.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param allowManualCommit the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder allowManualCommit(String allowManualCommit) {
doSetProperty("allowManualCommit", allowManualCommit);
return this;
}
/**
* If true, periodically commit to ZooKeeper the offset of messages
* already fetched by the consumer. This committed offset will be used
* when the process fails as the position from which the new consumer
* will begin.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: consumer
*
* @param autoCommitEnable the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder autoCommitEnable(boolean autoCommitEnable) {
doSetProperty("autoCommitEnable", autoCommitEnable);
return this;
}
/**
* If true, periodically commit to ZooKeeper the offset of messages
* already fetched by the consumer. This committed offset will be used
* when the process fails as the position from which the new consumer
* will begin.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: consumer
*
* @param autoCommitEnable the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder autoCommitEnable(String autoCommitEnable) {
doSetProperty("autoCommitEnable", autoCommitEnable);
return this;
}
/**
* The frequency in ms that the consumer offsets are committed to
* zookeeper.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Default: 5000
* Group: consumer
*
* @param autoCommitIntervalMs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder autoCommitIntervalMs(Integer autoCommitIntervalMs) {
doSetProperty("autoCommitIntervalMs", autoCommitIntervalMs);
return this;
}
/**
* The frequency in ms that the consumer offsets are committed to
* zookeeper.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Default: 5000
* Group: consumer
*
* @param autoCommitIntervalMs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder autoCommitIntervalMs(String autoCommitIntervalMs) {
doSetProperty("autoCommitIntervalMs", autoCommitIntervalMs);
return this;
}
/**
* What to do when there is no initial offset in ZooKeeper or if an
* offset is out of range: earliest : automatically reset the offset to
* the earliest offset latest: automatically reset the offset to the
* latest offset fail: throw exception to the consumer.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: latest
* Group: consumer
*
* @param autoOffsetReset the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder autoOffsetReset(String autoOffsetReset) {
doSetProperty("autoOffsetReset", autoOffsetReset);
return this;
}
/**
* Whether to use batching for processing or streaming. The default is
* false, which uses streaming. In streaming mode, then a single kafka
* record is processed per Camel exchange in the message body. In
* batching mode, then Camel groups many kafka records together as a
* List objects in the message body. The option maxPollRecords is used
* to define the number of records to group together in batching mode.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param batching the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder batching(boolean batching) {
doSetProperty("batching", batching);
return this;
}
/**
* Whether to use batching for processing or streaming. The default is
* false, which uses streaming. In streaming mode, then a single kafka
* record is processed per Camel exchange in the message body. In
* batching mode, then Camel groups many kafka records together as a
* List objects in the message body. The option maxPollRecords is used
* to define the number of records to group together in batching mode.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param batching the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder batching(String batching) {
doSetProperty("batching", batching);
return this;
}
/**
* In consumer batching mode, then this option is specifying a time in
* millis, to trigger batch completion eager when the current batch size
* has not reached the maximum size defined by maxPollRecords. Notice
* the trigger is not exact at the given interval, as this can only
* happen between kafka polls (see pollTimeoutMs option). So for example
* setting this to 10000, then the trigger happens in the interval 10000
* pollTimeoutMs. The default value for pollTimeoutMs is 5000, so this
* would mean a trigger interval at about every 15 seconds.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: consumer
*
* @param batchingIntervalMs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder batchingIntervalMs(Integer batchingIntervalMs) {
doSetProperty("batchingIntervalMs", batchingIntervalMs);
return this;
}
/**
* In consumer batching mode, then this option is specifying a time in
* millis, to trigger batch completion eager when the current batch size
* has not reached the maximum size defined by maxPollRecords. Notice
* the trigger is not exact at the given interval, as this can only
* happen between kafka polls (see pollTimeoutMs option). So for example
* setting this to 10000, then the trigger happens in the interval 10000
* pollTimeoutMs. The default value for pollTimeoutMs is 5000, so this
* would mean a trigger interval at about every 15 seconds.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: consumer
*
* @param batchingIntervalMs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder batchingIntervalMs(String batchingIntervalMs) {
doSetProperty("batchingIntervalMs", batchingIntervalMs);
return this;
}
/**
* This options controls what happens when a consumer is processing an
* exchange and it fails. If the option is false then the consumer
* continues to the next message and processes it. If the option is true
* then the consumer breaks out. Using the default NoopCommitManager
* will cause the consumer to not commit the offset so that the message
* is re-attempted. The consumer should use the KafkaManualCommit to
* determine the best way to handle the message. Using either the
* SyncCommitManager or the AsyncCommitManager, the consumer will seek
* back to the offset of the message that caused a failure, and then
* re-attempt to process this message. However, this can lead to endless
* processing of the same message if it's bound to fail every time,
* e.g., a poison message. Therefore, it's recommended to deal with
* that, for example, by using Camel's error handler.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param breakOnFirstError the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder breakOnFirstError(boolean breakOnFirstError) {
doSetProperty("breakOnFirstError", breakOnFirstError);
return this;
}
/**
* This options controls what happens when a consumer is processing an
* exchange and it fails. If the option is false then the consumer
* continues to the next message and processes it. If the option is true
* then the consumer breaks out. Using the default NoopCommitManager
* will cause the consumer to not commit the offset so that the message
* is re-attempted. The consumer should use the KafkaManualCommit to
* determine the best way to handle the message. Using either the
* SyncCommitManager or the AsyncCommitManager, the consumer will seek
* back to the offset of the message that caused a failure, and then
* re-attempt to process this message. However, this can lead to endless
* processing of the same message if it's bound to fail every time,
* e.g., a poison message. Therefore, it's recommended to deal with
* that, for example, by using Camel's error handler.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param breakOnFirstError the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder breakOnFirstError(String breakOnFirstError) {
doSetProperty("breakOnFirstError", breakOnFirstError);
return this;
}
/**
* Automatically check the CRC32 of the records consumed. This ensures
* no on-the-wire or on-disk corruption to the messages occurred. This
* check adds some overhead, so it may be disabled in cases seeking
* extreme performance.
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Default: true
* Group: consumer
*
* @param checkCrcs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder checkCrcs(Boolean checkCrcs) {
doSetProperty("checkCrcs", checkCrcs);
return this;
}
/**
* Automatically check the CRC32 of the records consumed. This ensures
* no on-the-wire or on-disk corruption to the messages occurred. This
* check adds some overhead, so it may be disabled in cases seeking
* extreme performance.
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Default: true
* Group: consumer
*
* @param checkCrcs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder checkCrcs(String checkCrcs) {
doSetProperty("checkCrcs", checkCrcs);
return this;
}
/**
* The maximum time, in milliseconds, that the code will wait for a
* synchronous commit to complete.
*
* The option is a: <code>java.lang.Long</code> type.
*
* Default: 5000
* Group: consumer
*
* @param commitTimeoutMs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder commitTimeoutMs(Long commitTimeoutMs) {
doSetProperty("commitTimeoutMs", commitTimeoutMs);
return this;
}
/**
* The maximum time, in milliseconds, that the code will wait for a
* synchronous commit to complete.
*
* The option will be converted to a <code>java.lang.Long</code> type.
*
* Default: 5000
* Group: consumer
*
* @param commitTimeoutMs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder commitTimeoutMs(String commitTimeoutMs) {
doSetProperty("commitTimeoutMs", commitTimeoutMs);
return this;
}
/**
* The configuration controls the maximum amount of time the client will
* wait for the response of a request. If the response is not received
* before the timeout elapsed, the client will resend the request if
* necessary or fail the request if retries are exhausted.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Default: 30000
* Group: consumer
*
* @param consumerRequestTimeoutMs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder consumerRequestTimeoutMs(Integer consumerRequestTimeoutMs) {
doSetProperty("consumerRequestTimeoutMs", consumerRequestTimeoutMs);
return this;
}
/**
* The configuration controls the maximum amount of time the client will
* wait for the response of a request. If the response is not received
* before the timeout elapsed, the client will resend the request if
* necessary or fail the request if retries are exhausted.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Default: 30000
* Group: consumer
*
* @param consumerRequestTimeoutMs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder consumerRequestTimeoutMs(String consumerRequestTimeoutMs) {
doSetProperty("consumerRequestTimeoutMs", consumerRequestTimeoutMs);
return this;
}
/**
* The number of consumers that connect to kafka server. Each consumer
* is run on a separate thread that retrieves and process the incoming
* data.
*
* The option is a: <code>int</code> type.
*
* Default: 1
* Group: consumer
*
* @param consumersCount the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder consumersCount(int consumersCount) {
doSetProperty("consumersCount", consumersCount);
return this;
}
/**
* The number of consumers that connect to kafka server. Each consumer
* is run on a separate thread that retrieves and process the incoming
* data.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 1
* Group: consumer
*
* @param consumersCount the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder consumersCount(String consumersCount) {
doSetProperty("consumersCount", consumersCount);
return this;
}
/**
* The maximum amount of data the server should return for a fetch
* request. This is not an absolute maximum, if the first message in the
* first non-empty partition of the fetch is larger than this value, the
* message will still be returned to ensure that the consumer can make
* progress. The maximum message size accepted by the broker is defined
* via message.max.bytes (broker config) or max.message.bytes (topic
* config). Note that the consumer performs multiple fetches in
* parallel.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Default: 52428800
* Group: consumer
*
* @param fetchMaxBytes the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder fetchMaxBytes(Integer fetchMaxBytes) {
doSetProperty("fetchMaxBytes", fetchMaxBytes);
return this;
}
/**
* The maximum amount of data the server should return for a fetch
* request. This is not an absolute maximum, if the first message in the
* first non-empty partition of the fetch is larger than this value, the
* message will still be returned to ensure that the consumer can make
* progress. The maximum message size accepted by the broker is defined
* via message.max.bytes (broker config) or max.message.bytes (topic
* config). Note that the consumer performs multiple fetches in
* parallel.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Default: 52428800
* Group: consumer
*
* @param fetchMaxBytes the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder fetchMaxBytes(String fetchMaxBytes) {
doSetProperty("fetchMaxBytes", fetchMaxBytes);
return this;
}
/**
* The minimum amount of data the server should return for a fetch
* request. If insufficient data is available, the request will wait for
* that much data to accumulate before answering the request.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Default: 1
* Group: consumer
*
* @param fetchMinBytes the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder fetchMinBytes(Integer fetchMinBytes) {
doSetProperty("fetchMinBytes", fetchMinBytes);
return this;
}
/**
* The minimum amount of data the server should return for a fetch
* request. If insufficient data is available, the request will wait for
* that much data to accumulate before answering the request.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Default: 1
* Group: consumer
*
* @param fetchMinBytes the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder fetchMinBytes(String fetchMinBytes) {
doSetProperty("fetchMinBytes", fetchMinBytes);
return this;
}
/**
* The maximum amount of time the server will block before answering the
* fetch request if there isn't enough data to immediately satisfy
* fetch.min.bytes.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Default: 500
* Group: consumer
*
* @param fetchWaitMaxMs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder fetchWaitMaxMs(Integer fetchWaitMaxMs) {
doSetProperty("fetchWaitMaxMs", fetchWaitMaxMs);
return this;
}
/**
* The maximum amount of time the server will block before answering the
* fetch request if there isn't enough data to immediately satisfy
* fetch.min.bytes.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Default: 500
* Group: consumer
*
* @param fetchWaitMaxMs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder fetchWaitMaxMs(String fetchWaitMaxMs) {
doSetProperty("fetchWaitMaxMs", fetchWaitMaxMs);
return this;
}
/**
* A string that uniquely identifies the group of consumer processes to
* which this consumer belongs. By setting the same group id, multiple
* processes can indicate that they are all part of the same consumer
* group. This option is required for consumers.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param groupId the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder groupId(String groupId) {
doSetProperty("groupId", groupId);
return this;
}
/**
* A unique identifier of the consumer instance provided by the end
* user. Only non-empty strings are permitted. If set, the consumer is
* treated as a static member, which means that only one instance with
* this ID is allowed in the consumer group at any time. This can be
* used in combination with a larger session timeout to avoid group
* rebalances caused by transient unavailability (e.g., process
* restarts). If not set, the consumer will join the group as a dynamic
* member, which is the traditional behavior.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param groupInstanceId the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder groupInstanceId(String groupInstanceId) {
doSetProperty("groupInstanceId", groupInstanceId);
return this;
}
/**
* To use a custom KafkaHeaderDeserializer to deserialize kafka headers
* values.
*
* The option is a:
* <code>org.apache.camel.component.kafka.serde.KafkaHeaderDeserializer</code> type.
*
* Group: consumer
*
* @param headerDeserializer the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder headerDeserializer(org.apache.camel.component.kafka.serde.KafkaHeaderDeserializer headerDeserializer) {
doSetProperty("headerDeserializer", headerDeserializer);
return this;
}
/**
* To use a custom KafkaHeaderDeserializer to deserialize kafka headers
* values.
*
* The option will be converted to a
* <code>org.apache.camel.component.kafka.serde.KafkaHeaderDeserializer</code> type.
*
* Group: consumer
*
* @param headerDeserializer the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder headerDeserializer(String headerDeserializer) {
doSetProperty("headerDeserializer", headerDeserializer);
return this;
}
/**
* The expected time between heartbeats to the consumer coordinator when
* using Kafka's group management facilities. Heartbeats are used to
* ensure that the consumer's session stays active and to facilitate
* rebalancing when new consumers join or leave the group. The value
* must be set lower than session.timeout.ms, but typically should be
* set no higher than 1/3 of that value. It can be adjusted even lower
* to control the expected time for normal rebalances.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Default: 3000
* Group: consumer
*
* @param heartbeatIntervalMs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder heartbeatIntervalMs(Integer heartbeatIntervalMs) {
doSetProperty("heartbeatIntervalMs", heartbeatIntervalMs);
return this;
}
/**
* The expected time between heartbeats to the consumer coordinator when
* using Kafka's group management facilities. Heartbeats are used to
* ensure that the consumer's session stays active and to facilitate
* rebalancing when new consumers join or leave the group. The value
* must be set lower than session.timeout.ms, but typically should be
* set no higher than 1/3 of that value. It can be adjusted even lower
* to control the expected time for normal rebalances.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Default: 3000
* Group: consumer
*
* @param heartbeatIntervalMs the value to set
* @return the dsl builder
*/
default KafkaEndpointConsumerBuilder heartbeatIntervalMs(String heartbeatIntervalMs) {
doSetProperty("heartbeatIntervalMs", heartbeatIntervalMs);
return this;
}
/**
* Deserializer | KafkaEndpointConsumerBuilder |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java | {
"start": 84618,
"end": 87465
} | class ____ implements CoordinatorStrategy {
private final ElectionStrategy electionStrategy;
public DefaultCoordinatorStrategy() {
this(ElectionStrategy.DEFAULT_INSTANCE);
}
public DefaultCoordinatorStrategy(ElectionStrategy electionStrategy) {
this.electionStrategy = electionStrategy;
}
@Override
public CoordinationServices getCoordinationServices(
ThreadPool threadPool,
Settings settings,
ClusterSettings clusterSettings,
CoordinationState.PersistedState persistedState,
DisruptibleRegisterConnection disruptibleRegisterConnection
) {
return new CoordinationServices() {
@Override
public ElectionStrategy getElectionStrategy() {
return electionStrategy;
}
@Override
public Reconfigurator getReconfigurator() {
return new Reconfigurator(settings, clusterSettings);
}
@Override
public LeaderHeartbeatService getLeaderHeartbeatService() {
return LeaderHeartbeatService.NO_OP;
}
@Override
public PreVoteCollector.Factory getPreVoteCollectorFactory() {
return StatefulPreVoteCollector::new;
}
};
}
@Override
public CoordinationState.PersistedState createFreshPersistedState(
DiscoveryNode localNode,
BooleanSupplier disruptStorage,
ThreadPool threadPool
) {
return new MockPersistedState(localNode, disruptStorage);
}
@Override
public CoordinationState.PersistedState createPersistedStateFromExistingState(
DiscoveryNode newLocalNode,
CoordinationState.PersistedState oldState,
Function<Metadata, Metadata> adaptGlobalMetadata,
Function<Long, Long> adaptCurrentTerm,
LongSupplier currentTimeInMillisSupplier,
NamedWriteableRegistry namedWriteableRegistry,
BooleanSupplier disruptStorage,
ThreadPool threadPool
) {
assert oldState instanceof MockPersistedState : oldState.getClass();
return new MockPersistedState(
newLocalNode,
(MockPersistedState) oldState,
adaptGlobalMetadata,
adaptCurrentTerm,
currentTimeInMillisSupplier,
namedWriteableRegistry,
disruptStorage
);
}
}
protected CoordinatorStrategy createCoordinatorStrategy() {
return new DefaultCoordinatorStrategy();
}
static | DefaultCoordinatorStrategy |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/ops/OneToOneMergeTest.java | {
"start": 927,
"end": 1627
} | class ____ {
@Test
public void testMerge(EntityManagerFactoryScope scope) throws Exception {
Long primaId = scope.fromTransaction(
entityManager -> {
Prima prima = new Prima();
prima.setOptionalData( null );
entityManager.persist( prima );
return prima.getId();
} );
assertNotNull( primaId );
scope.inTransaction(
entityManager -> {
Prima prima = entityManager.find( Prima.class, primaId );
Secunda sec = new Secunda();
sec.setParent( prima );
prima.setOptionalData( sec );
Prima mergedPrima = entityManager.merge( prima );
assertNotNull( mergedPrima );
} );
}
@Entity(name = "Prima")
public static | OneToOneMergeTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/TypeParameterUnusedInFormalsTest.java | {
"start": 2413,
"end": 2844
} | class ____ {
// BUG: Diagnostic contains:
static <V extends Object, T, U extends Object> T doCast(U o, V v) {
T t = (T) o;
return t;
}
}
""")
.doTest();
}
@Test
public void superBound() {
compilationHelper
.addSourceLines(
"Test.java",
"""
package foo.bar;
| Test |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/SaveFederationQueuePolicyRequest.java | {
"start": 1151,
"end": 1212
} | class ____ used to save the queue policy interface.
*
* This | is |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/cfg/DeserializationContexts.java | {
"start": 904,
"end": 4239
} | class ____
implements java.io.Serializable
{
private static final long serialVersionUID = 3L;
/*
/**********************************************************************
/* Configuration
/**********************************************************************
*/
// NOTE! We do not need (or want) to serialize any of these because they
// get passed via `forMapper(...)` call; all we want to serialize is identity
// of this class (and possibly whatever sub-classes may want to retain).
// Hence `transient` modifiers
/**
* Low-level {@link TokenStreamFactory} that may be used for constructing
* embedded generators.
*/
final transient protected TokenStreamFactory _streamFactory;
/**
* Factory responsible for constructing standard serializers.
*/
final transient protected DeserializerFactory _deserializerFactory;
/**
* Cache for doing type-to-value-serializer lookups.
*/
final transient protected DeserializerCache _cache;
/*
/**********************************************************************
/* Life-cycle
/**********************************************************************
*/
protected DeserializationContexts() { this(null, null, null); }
protected DeserializationContexts(TokenStreamFactory tsf,
DeserializerFactory deserializerFactory, DeserializerCache cache) {
_streamFactory = tsf;
_deserializerFactory = deserializerFactory;
_cache = cache;
}
/**
* Mutant factory method called when instance is actually created for use by mapper
* (as opposed to coming into existence during building, module registration).
* Necessary usually to initialize non-configuration state, such as caching.
*/
public DeserializationContexts forMapper(Object mapper,
DeserializationConfig config,
TokenStreamFactory tsf,
DeserializerFactory deserializerFactory) {
return forMapper(mapper, tsf, deserializerFactory,
new DeserializerCache(config.getCacheProvider().forDeserializerCache(config)));
}
protected abstract DeserializationContexts forMapper(Object mapper,
TokenStreamFactory tsf, DeserializerFactory deserializerFactory,
DeserializerCache cache);
/**
* Factory method for constructing context object for individual {@code writeValue} call.
*/
public abstract DeserializationContextExt createContext(DeserializationConfig config,
FormatSchema schema, InjectableValues injectables);
/*
/**********************************************************************
/* Extended API
/**********************************************************************
*/
/**
* Method that will drop all dynamically constructed deserializers (ones that
* are counted as result value for {@link DeserializerCache#cachedDeserializersCount}).
*/
public void flushCachedDeserializers() {
_cache.flushCachedDeserializers();
}
/*
/**********************************************************************
/* Vanilla implementation
/**********************************************************************
*/
public static | DeserializationContexts |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.