language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/scheduling/annotation/EnableAsyncTests.java | {
"start": 19841,
"end": 20372
} | class ____ implements AsyncConfigurer {
@Bean
public AsyncInterface asyncBean() {
return new AsyncService();
}
@Override
public Executor getAsyncExecutor() {
ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
executor.setThreadNamePrefix("Custom-");
executor.initialize();
return executor;
}
@Override
public AsyncUncaughtExceptionHandler getAsyncUncaughtExceptionHandler() {
return null;
}
}
@Configuration
@EnableAsync
@Import(UserConfiguration.class)
static | Spr14949ConfigB |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlResponseListener.java | {
"start": 1399,
"end": 4455
} | class ____ extends RestResponseListener<SqlQueryResponse> {
private final long startNanos = System.nanoTime();
private final MediaType mediaType;
private final RestRequest request;
private final BasicFormatter requestFormatter;
SqlResponseListener(RestChannel channel, RestRequest request, SqlQueryRequest sqlRequest) {
super(channel);
this.request = request;
this.requestFormatter = Cursors.decodeFormatter(sqlRequest.cursor());
this.mediaType = SqlMediaTypeParser.getResponseMediaType(request, sqlRequest);
/*
* Special handling for the "delimiter" parameter which should only be
* checked for being present or not in the case of CSV format. We cannot
* override {@link BaseRestHandler#responseParams()} because this
* parameter should only be checked for CSV, not always.
*/
if (mediaType != TextFormat.CSV && request.hasParam(URL_PARAM_DELIMITER)) {
String message = String.format(
Locale.ROOT,
"request [%s] contains unrecognized parameter: [" + URL_PARAM_DELIMITER + "]",
request.path()
);
throw new IllegalArgumentException(message);
}
}
SqlResponseListener(RestChannel channel, RestRequest request) {
super(channel);
this.request = request;
this.requestFormatter = null;
this.mediaType = SqlMediaTypeParser.getResponseMediaType(request);
}
@Override
public RestResponse buildResponse(SqlQueryResponse response) throws Exception {
RestResponse restResponse;
// XContent branch
if (mediaType instanceof XContentType type) {
XContentBuilder builder = channel.newBuilder(request.getXContentType(), type, true);
response.toXContent(builder, request);
restResponse = new RestResponse(RestStatus.OK, builder);
} else { // TextFormat
TextFormat type = (TextFormat) mediaType;
final Tuple<String, BasicFormatter> dataWithNextFormatter = type.format(request, requestFormatter, response);
if (response.hasCursor()) {
response.cursor(Cursors.attachFormatter(response.cursor(), dataWithNextFormatter.v2()));
}
restResponse = new RestResponse(RestStatus.OK, type.contentType(request), dataWithNextFormatter.v1());
if (response.hasCursor()) {
restResponse.addHeader(HEADER_NAME_CURSOR, response.cursor());
}
if (response.hasId()) {
restResponse.addHeader(HEADER_NAME_ASYNC_ID, response.id());
restResponse.addHeader(HEADER_NAME_ASYNC_PARTIAL, String.valueOf(response.isPartial()));
restResponse.addHeader(HEADER_NAME_ASYNC_RUNNING, String.valueOf(response.isRunning()));
}
}
restResponse.addHeader(HEADER_NAME_TOOK_NANOS, Long.toString(System.nanoTime() - startNanos));
return restResponse;
}
}
| SqlResponseListener |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/SalesforceComponentBuilderFactory.java | {
"start": 1864,
"end": 27132
} | interface ____ extends ComponentBuilder<SalesforceComponent> {
/**
* APEX method name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param apexMethod the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder apexMethod(java.lang.String apexMethod) {
doSetProperty("apexMethod", apexMethod);
return this;
}
/**
* Query params for APEX method.
*
* The option is a: <code>java.util.Map&lt;java.lang.String,
* java.lang.Object&gt;</code> type.
*
* Group: common
*
* @param apexQueryParams the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder apexQueryParams(java.util.Map<java.lang.String, java.lang.Object> apexQueryParams) {
doSetProperty("apexQueryParams", apexQueryParams);
return this;
}
/**
* Salesforce API version.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: 56.0
* Group: common
*
* @param apiVersion the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder apiVersion(java.lang.String apiVersion) {
doSetProperty("apiVersion", apiVersion);
return this;
}
/**
* Backoff interval increment for Streaming connection restart attempts
* for failures beyond CometD auto-reconnect.
*
* The option is a: <code>long</code> type.
*
* Default: 1000
* Group: common
*
* @param backoffIncrement the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder backoffIncrement(long backoffIncrement) {
doSetProperty("backoffIncrement", backoffIncrement);
return this;
}
/**
* Bulk API Batch ID.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param batchId the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder batchId(java.lang.String batchId) {
doSetProperty("batchId", batchId);
return this;
}
/**
* Bulk API content type, one of XML, CSV, ZIP_XML, ZIP_CSV.
*
* The option is a:
* <code>org.apache.camel.component.salesforce.api.dto.bulk.ContentType</code> type.
*
* Group: common
*
* @param contentType the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder contentType(org.apache.camel.component.salesforce.api.dto.bulk.ContentType contentType) {
doSetProperty("contentType", contentType);
return this;
}
/**
* Default replayId setting if no value is found in initialReplayIdMap.
*
* The option is a: <code>java.lang.Long</code> type.
*
* Default: -1
* Group: common
*
* @param defaultReplayId the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder defaultReplayId(java.lang.Long defaultReplayId) {
doSetProperty("defaultReplayId", defaultReplayId);
return this;
}
/**
* ReplayId to fall back to after an Invalid Replay Id response.
*
* The option is a: <code>java.lang.Long</code> type.
*
* Default: -1
* Group: common
*
* @param fallBackReplayId the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder fallBackReplayId(java.lang.Long fallBackReplayId) {
doSetProperty("fallBackReplayId", fallBackReplayId);
return this;
}
/**
* Payload format to use for Salesforce API calls, either JSON or XML,
* defaults to JSON. As of Camel 3.12, this option only applies to the
* Raw operation.
*
* The option is a:
* <code>org.apache.camel.component.salesforce.internal.PayloadFormat</code> type.
*
* Group: common
*
* @param format the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder format(org.apache.camel.component.salesforce.internal.PayloadFormat format) {
doSetProperty("format", format);
return this;
}
/**
* Custom Jetty Http Client to use to connect to Salesforce.
*
* The option is a:
* <code>org.apache.camel.component.salesforce.SalesforceHttpClient</code> type.
*
* Group: common
*
* @param httpClient the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder httpClient(org.apache.camel.component.salesforce.SalesforceHttpClient httpClient) {
doSetProperty("httpClient", httpClient);
return this;
}
/**
* Connection timeout used by the HttpClient when connecting to the
* Salesforce server.
*
* The option is a: <code>long</code> type.
*
* Default: 60000
* Group: common
*
* @param httpClientConnectionTimeout the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder httpClientConnectionTimeout(long httpClientConnectionTimeout) {
doSetProperty("httpClientConnectionTimeout", httpClientConnectionTimeout);
return this;
}
/**
* Timeout used by the HttpClient when waiting for response from the
* Salesforce server.
*
* The option is a: <code>long</code> type.
*
* Default: 10000
* Group: common
*
* @param httpClientIdleTimeout the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder httpClientIdleTimeout(long httpClientIdleTimeout) {
doSetProperty("httpClientIdleTimeout", httpClientIdleTimeout);
return this;
}
/**
* Max content length of an HTTP response.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: common
*
* @param httpMaxContentLength the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder httpMaxContentLength(java.lang.Integer httpMaxContentLength) {
doSetProperty("httpMaxContentLength", httpMaxContentLength);
return this;
}
/**
* HTTP request buffer size. May need to be increased for large SOQL
* queries.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Default: 8192
* Group: common
*
* @param httpRequestBufferSize the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder httpRequestBufferSize(java.lang.Integer httpRequestBufferSize) {
doSetProperty("httpRequestBufferSize", httpRequestBufferSize);
return this;
}
/**
* Timeout value for HTTP requests.
*
* The option is a: <code>long</code> type.
*
* Default: 60000
* Group: common
*
* @param httpRequestTimeout the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder httpRequestTimeout(long httpRequestTimeout) {
doSetProperty("httpRequestTimeout", httpRequestTimeout);
return this;
}
/**
* Include details in Salesforce1 Analytics report, defaults to false.
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Default: false
* Group: common
*
* @param includeDetails the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder includeDetails(java.lang.Boolean includeDetails) {
doSetProperty("includeDetails", includeDetails);
return this;
}
/**
* Replay IDs to start from per channel name.
*
* The option is a: <code>java.util.Map&lt;java.lang.String,
* java.lang.Long&gt;</code> type.
*
* Group: common
*
* @param initialReplayIdMap the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder initialReplayIdMap(java.util.Map<java.lang.String, java.lang.Long> initialReplayIdMap) {
doSetProperty("initialReplayIdMap", initialReplayIdMap);
return this;
}
/**
* Salesforce1 Analytics report execution instance ID.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param instanceId the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder instanceId(java.lang.String instanceId) {
doSetProperty("instanceId", instanceId);
return this;
}
/**
* Bulk API Job ID.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param jobId the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder jobId(java.lang.String jobId) {
doSetProperty("jobId", jobId);
return this;
}
/**
* Limit on number of returned records. Applicable to some of the API,
* check the Salesforce documentation.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: common
*
* @param limit the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder limit(java.lang.Integer limit) {
doSetProperty("limit", limit);
return this;
}
/**
* Locator provided by salesforce Bulk 2.0 API for use in getting
* results for a Query job.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param locator the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder locator(java.lang.String locator) {
doSetProperty("locator", locator);
return this;
}
/**
* Maximum backoff interval for Streaming connection restart attempts
* for failures beyond CometD auto-reconnect.
*
* The option is a: <code>long</code> type.
*
* Default: 30000
* Group: common
*
* @param maxBackoff the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder maxBackoff(long maxBackoff) {
doSetProperty("maxBackoff", maxBackoff);
return this;
}
/**
* The maximum number of records to retrieve per set of results for a
* Bulk 2.0 Query. The request is still subject to the size limits. If
* you are working with a very large number of query results, you may
* experience a timeout before receiving all the data from Salesforce.
* To prevent a timeout, specify the maximum number of records your
* client is expecting to receive in the maxRecords parameter. This
* splits the results into smaller sets with this value as the maximum
* size.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: common
*
* @param maxRecords the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder maxRecords(java.lang.Integer maxRecords) {
doSetProperty("maxRecords", maxRecords);
return this;
}
/**
* Sets the behaviour of 404 not found status received from Salesforce
* API. Should the body be set to NULL NotFoundBehaviour#NULL or should
* a exception be signaled on the exchange NotFoundBehaviour#EXCEPTION -
* the default.
*
* The option is a:
* <code>org.apache.camel.component.salesforce.NotFoundBehaviour</code> type.
*
* Default: EXCEPTION
* Group: common
*
* @param notFoundBehaviour the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder notFoundBehaviour(org.apache.camel.component.salesforce.NotFoundBehaviour notFoundBehaviour) {
doSetProperty("notFoundBehaviour", notFoundBehaviour);
return this;
}
/**
* Notify for fields, options are ALL, REFERENCED, SELECT, WHERE.
*
* The option is a:
* <code>org.apache.camel.component.salesforce.internal.dto.NotifyForFieldsEnum</code> type.
*
* Group: common
*
* @param notifyForFields the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder notifyForFields(org.apache.camel.component.salesforce.internal.dto.NotifyForFieldsEnum notifyForFields) {
doSetProperty("notifyForFields", notifyForFields);
return this;
}
/**
* Notify for create operation, defaults to false (API version &gt;=
* 29.0).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Default: false
* Group: common
*
* @param notifyForOperationCreate the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder notifyForOperationCreate(java.lang.Boolean notifyForOperationCreate) {
doSetProperty("notifyForOperationCreate", notifyForOperationCreate);
return this;
}
/**
* Notify for delete operation, defaults to false (API version &gt;=
* 29.0).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Default: false
* Group: common
*
* @param notifyForOperationDelete the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder notifyForOperationDelete(java.lang.Boolean notifyForOperationDelete) {
doSetProperty("notifyForOperationDelete", notifyForOperationDelete);
return this;
}
/**
* Notify for operations, options are ALL, CREATE, EXTENDED, UPDATE (API
* version &lt; 29.0).
*
* The option is a:
* <code>org.apache.camel.component.salesforce.internal.dto.NotifyForOperationsEnum</code> type.
*
* Group: common
*
* @param notifyForOperations the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder notifyForOperations(org.apache.camel.component.salesforce.internal.dto.NotifyForOperationsEnum notifyForOperations) {
doSetProperty("notifyForOperations", notifyForOperations);
return this;
}
/**
* Notify for un-delete operation, defaults to false (API version
* &gt;= 29.0).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Default: false
* Group: common
*
* @param notifyForOperationUndelete the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder notifyForOperationUndelete(java.lang.Boolean notifyForOperationUndelete) {
doSetProperty("notifyForOperationUndelete", notifyForOperationUndelete);
return this;
}
/**
* Notify for update operation, defaults to false (API version &gt;=
* 29.0).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Default: false
* Group: common
*
* @param notifyForOperationUpdate the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder notifyForOperationUpdate(java.lang.Boolean notifyForOperationUpdate) {
doSetProperty("notifyForOperationUpdate", notifyForOperationUpdate);
return this;
}
/**
* Custom Jackson ObjectMapper to use when serializing/deserializing
* Salesforce objects.
*
* The option is a:
* <code>com.fasterxml.jackson.databind.ObjectMapper</code>
* type.
*
* Group: common
*
* @param objectMapper the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder objectMapper(com.fasterxml.jackson.databind.ObjectMapper objectMapper) {
doSetProperty("objectMapper", objectMapper);
return this;
}
/**
* In what packages are the generated DTO classes. Typically the classes
* would be generated using camel-salesforce-maven-plugin. Set it if
* using the generated DTOs to gain the benefit of using short SObject
* names in parameters/header values. Multiple packages can be separated
* by comma.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param packages the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder packages(java.lang.String packages) {
doSetProperty("packages", packages);
return this;
}
/**
* Use PK Chunking. Only for use in original Bulk API. Bulk 2.0 API
* performs PK chunking automatically, if necessary.
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Default: false
* Group: common
*
* @param pkChunking the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder pkChunking(java.lang.Boolean pkChunking) {
doSetProperty("pkChunking", pkChunking);
return this;
}
/**
* Chunk size for use with PK Chunking. If unspecified, salesforce
* default is 100,000. Maximum size is 250,000.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: common
*
* @param pkChunkingChunkSize the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder pkChunkingChunkSize(java.lang.Integer pkChunkingChunkSize) {
doSetProperty("pkChunkingChunkSize", pkChunkingChunkSize);
return this;
}
/**
* Specifies the parent object when you're enabling PK chunking for
* queries on sharing objects. The chunks are based on the parent
* object's records rather than the sharing object's records. For
* example, when querying on AccountShare, specify Account as the parent
* object. PK chunking is supported for sharing objects as long as the
* parent object is supported.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param pkChunkingParent the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder pkChunkingParent(java.lang.String pkChunkingParent) {
doSetProperty("pkChunkingParent", pkChunkingParent);
return this;
}
/**
* Specifies the 15-character or 18-character record ID to be used as
* the lower boundary for the first chunk. Use this parameter to specify
* a starting ID when restarting a job that failed between batches.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param pkChunkingStartRow the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder pkChunkingStartRow(java.lang.String pkChunkingStartRow) {
doSetProperty("pkChunkingStartRow", pkChunkingStartRow);
return this;
}
/**
* Query Locator provided by salesforce for use when a query results in
* more records than can be retrieved in a single call. Use this value
* in a subsequent call to retrieve additional records.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param queryLocator the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder queryLocator(java.lang.String queryLocator) {
doSetProperty("queryLocator", queryLocator);
return this;
}
/**
* Use raw payload String for request and response (either JSON or XML
* depending on format), instead of DTOs, false by default.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param rawPayload the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder rawPayload(boolean rawPayload) {
doSetProperty("rawPayload", rawPayload);
return this;
}
/**
* Salesforce1 Analytics report Id.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param reportId the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder reportId(java.lang.String reportId) {
doSetProperty("reportId", reportId);
return this;
}
/**
* Salesforce1 Analytics report metadata for filtering.
*
* The option is a:
* <code>org.apache.camel.component.salesforce.api.dto.analytics.reports.ReportMetadata</code> type.
*
* Group: common
*
* @param reportMetadata the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder reportMetadata(org.apache.camel.component.salesforce.api.dto.analytics.reports.ReportMetadata reportMetadata) {
doSetProperty("reportMetadata", reportMetadata);
return this;
}
/**
* Bulk API Result ID.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param resultId the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder resultId(java.lang.String resultId) {
doSetProperty("resultId", resultId);
return this;
}
/**
* SObject blob field name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param sObjectBlobFieldName the value to set
* @return the dsl builder
*/
default SalesforceComponentBuilder sObjectBlobFieldName(java.lang.String sObjectBlobFieldName) {
doSetProperty("sObjectBlobFieldName", sObjectBlobFieldName);
return this;
}
/**
* Fully qualified SObject | SalesforceComponentBuilder |
java | google__guice | core/test/com/google/inject/spi/BindingSourceRestrictionTest.java | {
"start": 639,
"end": 820
} | class ____ tested through the public {@code RestrictedBindingSource} API it's
* implementing.
*
* @author vzm@google.com (Vladimir Makaric)
*/
@RunWith(JUnit4.class)
public final | is |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/error/BlockingExceptionMapperTest.java | {
"start": 6100,
"end": 6345
} | interface ____ {
@GET
@Path("/non-blocking")
InputStream nonBlocking();
}
@Path("/error")
@RegisterRestClient
@RegisterProvider(BlockingExceptionMapper.class)
public | ClientUsingNotBlockingExceptionMapper |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/context/request/AbstractRequestAttributesScope.java | {
"start": 1061,
"end": 1394
} | class ____ {@link RequestAttributes} scope to read attributes from.
*
* <p>Subclasses may wish to override the {@link #get} and {@link #remove}
* methods to add synchronization around the call back into this superclass.
*
* @author Rod Johnson
* @author Juergen Hoeller
* @author Rob Harrop
* @since 2.0
*/
public abstract | which |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Issue869_1.java | {
"start": 250,
"end": 1814
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
List<DoublePoint> doublePointList = new ArrayList<DoublePoint>();
{
DoublePoint doublePoint = new DoublePoint();
doublePoint.startPoint = new Point(22, 35);
doublePoint.endPoint = doublePoint.startPoint;
doublePointList.add(doublePoint);
}
{
DoublePoint doublePoint = new DoublePoint();
doublePoint.startPoint = new Point(16, 18);
doublePoint.endPoint = doublePoint.startPoint;
doublePointList.add(doublePoint);
}
String json = JSON.toJSONString(doublePointList);
assertEquals("[{\"endPoint\":{\"x\":22,\"y\":35},\"startPoint\":{\"$ref\":\"$[0].endPoint\"}},{\"endPoint\":{\"x\":16,\"y\":18},\"startPoint\":{\"$ref\":\"$[1].endPoint\"}}]", json);
}
public void test_for_issue_parse() throws Exception {
String text = "[{\"endPoint\":{\"x\":22,\"y\":35},\"startPoint\":{\"$ref\":\"$[0].endPoint\"}},{\"endPoint\":{\"$ref\":\"$[1].startPoint\"},\"startPoint\":{\"x\":16,\"y\":18}}]";
List<Issue869.DoublePoint> doublePointList = JSON.parseObject(text, new TypeReference<List<Issue869.DoublePoint>>(){});
assertNotNull(doublePointList.get(0));
assertNotNull(doublePointList.get(1));
assertSame(doublePointList.get(0).startPoint, doublePointList.get(0).endPoint);
assertSame(doublePointList.get(1).startPoint, doublePointList.get(1).endPoint);
}
public static | Issue869_1 |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java | {
"start": 2112,
"end": 6005
} | class ____ {
private long lastProgress;
private final AtomicBoolean reported;
public ReportTime(long time) {
setLastProgress(time);
reported = new AtomicBoolean(false);
}
public synchronized void setLastProgress(long time) {
lastProgress = time;
}
public synchronized long getLastProgress() {
return lastProgress;
}
public boolean isReported(){
return reported.get();
}
}
private static final Logger LOG =
LoggerFactory.getLogger(TaskHeartbeatHandler.class);
//thread which runs periodically to see the last time since a heartbeat is
//received from a task.
private Thread lostTaskCheckerThread;
private volatile boolean stopped;
private long taskTimeOut;
private long unregisterTimeOut;
private long taskStuckTimeOut;
private int taskTimeOutCheckInterval = 30 * 1000; // 30 seconds.
private final EventHandler eventHandler;
private final Clock clock;
private ConcurrentMap<TaskAttemptId, ReportTime> runningAttempts;
private ConcurrentMap<TaskAttemptId, ReportTime> recentlyUnregisteredAttempts;
public TaskHeartbeatHandler(EventHandler eventHandler, Clock clock,
int numThreads) {
super("TaskHeartbeatHandler");
this.eventHandler = eventHandler;
this.clock = clock;
runningAttempts =
new ConcurrentHashMap<TaskAttemptId, ReportTime>(16, 0.75f, numThreads);
recentlyUnregisteredAttempts =
new ConcurrentHashMap<TaskAttemptId, ReportTime>(16, 0.75f, numThreads);
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
super.serviceInit(conf);
taskTimeOut = conf.getLong(
MRJobConfig.TASK_TIMEOUT, MRJobConfig.DEFAULT_TASK_TIMEOUT_MILLIS);
unregisterTimeOut = conf.getLong(MRJobConfig.TASK_EXIT_TIMEOUT,
MRJobConfig.TASK_EXIT_TIMEOUT_DEFAULT);
taskStuckTimeOut = conf.getLong(MRJobConfig.TASK_STUCK_TIMEOUT_MS,
MRJobConfig.DEFAULT_TASK_STUCK_TIMEOUT_MS);
// enforce task timeout is at least twice as long as task report interval
long taskProgressReportIntervalMillis = MRJobConfUtil.
getTaskProgressReportInterval(conf);
long minimumTaskTimeoutAllowed = taskProgressReportIntervalMillis * 2;
if(taskTimeOut < minimumTaskTimeoutAllowed) {
taskTimeOut = minimumTaskTimeoutAllowed;
LOG.info("Task timeout must be as least twice as long as the task " +
"status report interval. Setting task timeout to " + taskTimeOut);
}
taskTimeOutCheckInterval =
conf.getInt(MRJobConfig.TASK_TIMEOUT_CHECK_INTERVAL_MS, 30 * 1000);
}
@Override
protected void serviceStart() throws Exception {
lostTaskCheckerThread = new SubjectInheritingThread(new PingChecker());
lostTaskCheckerThread.setName("TaskHeartbeatHandler PingChecker");
lostTaskCheckerThread.start();
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
stopped = true;
if (lostTaskCheckerThread != null) {
lostTaskCheckerThread.interrupt();
}
super.serviceStop();
}
public void progressing(TaskAttemptId attemptID) {
//only put for the registered attempts
//TODO throw an exception if the task isn't registered.
ReportTime time = runningAttempts.get(attemptID);
if(time != null) {
time.reported.compareAndSet(false, true);
time.setLastProgress(clock.getTime());
}
}
public void register(TaskAttemptId attemptID) {
runningAttempts.put(attemptID, new ReportTime(clock.getTime()));
}
public void unregister(TaskAttemptId attemptID) {
runningAttempts.remove(attemptID);
recentlyUnregisteredAttempts.put(attemptID,
new ReportTime(clock.getTime()));
}
public boolean hasRecentlyUnregistered(TaskAttemptId attemptID) {
return recentlyUnregisteredAttempts.containsKey(attemptID);
}
private | ReportTime |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcException.java | {
"start": 956,
"end": 1694
} | class ____ extends IOException {
private static final long serialVersionUID = 1L;
/**
* Constructs exception with the specified detail message.
*
* @param messages detailed message.
*/
RpcException(final String message) {
super(message);
}
/**
* Constructs exception with the specified detail message and cause.
*
* @param message message.
* @param cause that cause this exception
* @param cause the cause (can be retried by the {@link #getCause()} method).
* (A <code>null</code> value is permitted, and indicates that the cause
* is nonexistent or unknown.)
*/
RpcException(final String message, final Throwable cause) {
super(message, cause);
}
}
| RpcException |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java | {
"start": 11791,
"end": 12362
} | class ____ extends SubjectInheritingThread {
private String pid = null;
private boolean isProcessGroup = false;
private long sleepTimeBeforeSigKill = DEFAULT_SLEEPTIME_BEFORE_SIGKILL;
private SigKillThread(String pid, boolean isProcessGroup, long interval) {
this.pid = pid;
this.isProcessGroup = isProcessGroup;
this.setName(this.getClass().getName() + "-" + pid);
sleepTimeBeforeSigKill = interval;
}
public void work() {
sigKillInCurrentThread(pid, isProcessGroup, sleepTimeBeforeSigKill);
}
}
}
| SigKillThread |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/web/server/FormLoginTests.java | {
"start": 17674,
"end": 19253
} | class ____ {
@ResponseBody
@GetMapping("/login")
public Mono<String> login(ServerWebExchange exchange) {
Mono<CsrfToken> token = exchange.getAttributeOrDefault(CsrfToken.class.getName(), Mono.empty());
// @formatter:off
return token.map((t) -> "<!DOCTYPE html>\n"
+ "<html lang=\"en\">\n"
+ " <head>\n"
+ " <meta charset=\"utf-8\">\n"
+ " <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, shrink-to-fit=no\">\n"
+ " <meta name=\"description\" content=\"\">\n"
+ " <meta name=\"author\" content=\"\">\n"
+ " <title>Custom Log In Page</title>\n"
+ " </head>\n"
+ " <body>\n"
+ " <div>\n"
+ " <form method=\"post\" action=\"/login\">\n"
+ " <h2>Please sign in</h2>\n"
+ " <p>\n"
+ " <label for=\"username\">Username</label>\n"
+ " <input type=\"text\" id=\"username\" name=\"username\" placeholder=\"Username\" required autofocus>\n"
+ " </p>\n"
+ " <p>\n"
+ " <label for=\"password\" class=\"sr-only\">Password</label>\n"
+ " <input type=\"password\" id=\"password\" name=\"password\" placeholder=\"Password\" required>\n"
+ " </p>\n"
+ " <input type=\"hidden\" name=\"" + t.getParameterName() + "\" value=\"" + t.getToken() + "\">\n"
+ " <button type=\"submit\">Sign in</button>\n"
+ " </form>\n"
+ " </div>\n"
+ " </body>\n"
+ "</html>");
// @formatter:on
}
}
}
| CustomLoginPageController |
java | apache__camel | components/camel-jetty/src/test/java/org/apache/camel/component/jetty/HttpsRouteSslContextParametersInUriTest.java | {
"start": 1185,
"end": 2892
} | class ____ extends HttpsRouteTest {
@BindToRegistry("sslContextParameters")
public SSLContextParameters loadSSLParams() {
KeyStoreParameters ksp = new KeyStoreParameters();
ksp.setResource(this.getClass().getClassLoader().getResource("jsse/localhost.p12").toString());
ksp.setPassword(pwd);
KeyManagersParameters kmp = new KeyManagersParameters();
kmp.setKeyPassword(pwd);
kmp.setKeyStore(ksp);
SSLContextParameters sslContextParameters = new SSLContextParameters();
sslContextParameters.setKeyManagers(kmp);
return sslContextParameters;
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
JettyHttpComponent jetty = getContext().getComponent("jetty", JettyHttpComponent.class);
// NOTE: These are here to check that they are properly ignored.
setSSLProps(jetty, "", "asdfasdfasdfdasfs", "sadfasdfasdfas");
from("jetty:https://localhost:" + port1 + "/test?sslContextParameters=#sslContextParameters").to("mock:a");
Processor proc = new Processor() {
public void process(Exchange exchange) {
exchange.getMessage().setBody("<b>Hello World</b>");
}
};
from("jetty:https://localhost:" + port1 + "/hello?sslContextParameters=#sslContextParameters").process(proc);
from("jetty:https://localhost:" + port2 + "/test?sslContextParameters=#sslContextParameters").to("mock:b");
}
};
}
}
| HttpsRouteSslContextParametersInUriTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/embeddable/table/EmbeddedTableTests.java | {
"start": 8443,
"end": 8764
} | class ____ {
@Id
private Integer id;
private String name;
@Embedded
@EmbeddedTable("posts_secondary")
private Tag tag;
}
@Entity(name="PostCompliant")
@Table(name="posts_compliant")
@SecondaryTable(name="posts_compliant_secondary", pkJoinColumns = @PrimaryKeyJoinColumn(name = "post_fk"))
public static | Post |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/NewFileSystemTest.java | {
"start": 1960,
"end": 2422
} | class ____ {
void f() throws IOException {
FileSystems.newFileSystem(Paths.get("."), (ClassLoader) null);
}
}
""")
.doTest();
}
@Test
public void negative() {
testHelper
.addSourceLines(
"Test.java",
"""
import java.nio.file.FileSystems;
import java.nio.file.Paths;
import java.io.IOException;
| Test |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/MongodbGridfsComponentBuilderFactory.java | {
"start": 1865,
"end": 5377
} | interface ____ extends ComponentBuilder<GridFsComponent> {
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default MongodbGridfsComponentBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default MongodbGridfsComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default MongodbGridfsComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
}
| MongodbGridfsComponentBuilder |
java | apache__dubbo | dubbo-plugin/dubbo-filter-validation/src/test/java/org/apache/dubbo/validation/support/jvalidation/mock/JValidatorTestTarget.java | {
"start": 2016,
"end": 2274
} | class ____ {
@NotNull(message = "name must not be null")
private String name;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
}
| Param |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/ResolvableTypeTests.java | {
"start": 73781,
"end": 75018
} | class ____<T> {
public List classType;
public T typeVariableType;
public List<T> parameterizedType;
public List[] arrayClassType;
public List<String>[] genericArrayType;
public List<String>[][][] genericMultiArrayType;
public List<?> anyListElement;
public List<? extends Number> wildcardType;
public List<? super Number> wildcardSuperType = new ArrayList<Object>();
public List<CharSequence> charSequenceList;
public List<String> stringList;
public List<List<String>> stringListList;
public List<String[]> stringArrayList;
public MultiValueMap<String, Integer> stringIntegerMultiValueMap;
public VariableNameSwitch<Integer, String> stringIntegerMultiValueMapSwitched;
public List<List> listOfListOfUnknown;
@SuppressWarnings("unused")
private List<String> privateField;
@SuppressWarnings("unused")
private List<String> otherPrivateField;
public Map<Map<String, Integer>, Map<Byte, Long>> nested;
public T[] variableTypeGenericArray;
public Integer[] integerArray;
public int[] intArray;
public SomeRepository<? extends Serializable> repository;
public SomeRepository<String> stringRepository;
public SomeRepository<String[]> arrayRepository;
}
static | Fields |
java | quarkusio__quarkus | extensions/scheduler/deployment/src/test/java/io/quarkus/scheduler/test/programmatic/ProgrammaticJobsTest.java | {
"start": 1197,
"end": 6235
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest test = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(Jobs.class));
@Inject
Scheduler scheduler;
@Inject
MyService myService;
static final CountDownLatch SYNC_LATCH = new CountDownLatch(1);
static final CountDownLatch SYNC_CLASS_LATCH = new CountDownLatch(1);
static final CountDownLatch ASYNC_LATCH = new CountDownLatch(1);
static final AtomicInteger SKIPPED_EXECUTIONS = new AtomicInteger();
static final CountDownLatch ASYNC_CLASS_LATCH = new CountDownLatch(1);
@Test
public void testJobs() throws InterruptedException {
scheduler.newJob("alwaysSkip1")
.setInterval("1s")
.setSkipPredicate(ex -> true)
.setTask(ex -> SKIPPED_EXECUTIONS.incrementAndGet())
.schedule();
scheduler.newJob("alwaysSkip2")
.setInterval("1s")
.setTask(ex -> SKIPPED_EXECUTIONS.incrementAndGet())
.setSkipPredicate(AlwaysSkipPredicate.class)
.schedule();
Scheduler.JobDefinition<?> job1 = scheduler.newJob("foo")
.setInterval("1s")
.setTask(ec -> {
assertTrue(Arc.container().requestContext().isActive());
myService.countDown(SYNC_LATCH);
});
assertEquals("Sync task was already set",
assertThrows(IllegalStateException.class, () -> job1.setAsyncTask(ec -> null)).getMessage());
Scheduler.JobDefinition<?> job2 = scheduler.newJob("foo").setCron("0/5 * * * * ?");
assertEquals("Either sync or async task must be set",
assertThrows(IllegalStateException.class, () -> job2.schedule()).getMessage());
job2.setTask(ec -> {
});
Trigger trigger1 = job1.schedule();
assertNotNull(trigger1);
assertTrue(ProgrammaticJobsTest.SYNC_LATCH.await(5, TimeUnit.SECONDS));
assertEquals("Cannot modify a job that was already scheduled",
assertThrows(IllegalStateException.class, () -> job1.setCron("fff")).getMessage());
// Since job1 was already scheduled - job2 defines a non-unique identity
assertEquals("A job with this identity is already scheduled: foo",
assertThrows(IllegalStateException.class, () -> job2.schedule()).getMessage());
// Identity must be unique
assertEquals("A job with this identity is already scheduled: foo",
assertThrows(IllegalStateException.class, () -> scheduler.newJob("foo")).getMessage());
assertEquals("A job with this identity is already scheduled: bar",
assertThrows(IllegalStateException.class, () -> scheduler.newJob("bar")).getMessage());
// No-op
assertNull(scheduler.unscheduleJob("bar"));
assertNull(scheduler.unscheduleJob("nonexisting"));
assertNotNull(scheduler.unscheduleJob("foo"));
assertNotNull(scheduler.unscheduleJob("alwaysSkip1"));
assertNotNull(scheduler.unscheduleJob("alwaysSkip2"));
assertEquals(0, SKIPPED_EXECUTIONS.get());
// Jobs#dummy()
assertEquals(1, scheduler.getScheduledJobs().size());
}
@Test
public void testAsyncJob() throws InterruptedException {
JobDefinition<?> asyncJob = scheduler.newJob("fooAsync")
.setInterval("1s")
.setAsyncTask(ec -> {
assertTrue(Context.isOnEventLoopThread() && VertxContext.isOnDuplicatedContext());
assertTrue(Arc.container().requestContext().isActive());
myService.countDown(ASYNC_LATCH);
return Uni.createFrom().voidItem();
});
assertEquals("Async task was already set",
assertThrows(IllegalStateException.class, () -> asyncJob.setTask(ec -> {
})).getMessage());
Trigger trigger = asyncJob.schedule();
assertNotNull(trigger);
assertTrue(ProgrammaticJobsTest.ASYNC_LATCH.await(5, TimeUnit.SECONDS));
assertNotNull(scheduler.unscheduleJob("fooAsync"));
}
@Test
public void testClassJobs() throws InterruptedException {
scheduler.newJob("fooClass")
.setInterval("1s")
.setTask(JobClassTask.class)
.schedule();
assertTrue(ProgrammaticJobsTest.SYNC_CLASS_LATCH.await(5, TimeUnit.SECONDS));
assertNotNull(scheduler.unscheduleJob("fooClass"));
}
@Test
public void testClassAsyncJobs() throws InterruptedException {
scheduler.newJob("fooAsyncClass")
.setInterval("1s")
.setAsyncTask(JobClassAsyncTask.class)
.schedule();
assertTrue(ProgrammaticJobsTest.ASYNC_CLASS_LATCH.await(5, TimeUnit.SECONDS));
assertNotNull(scheduler.unscheduleJob("fooAsyncClass"));
}
static | ProgrammaticJobsTest |
java | apache__flink | flink-metrics/flink-metrics-core/src/main/java/org/apache/flink/metrics/Metric.java | {
"start": 948,
"end": 1107
} | interface ____ {
default MetricType getMetricType() {
throw new UnsupportedOperationException("Custom metric types are not supported.");
}
}
| Metric |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/java/typeutils/runtime/SubclassFromInterfaceSerializerTest.java | {
"start": 2907,
"end": 3704
} | class ____ implements TestUserInterface {
public int dumm1;
public String dumm2;
public TestUserClassBase() {}
public TestUserClassBase(int dumm1, String dumm2) {
this.dumm1 = dumm1;
this.dumm2 = dumm2;
}
@Override
public int hashCode() {
return Objects.hash(dumm1, dumm2);
}
@Override
public boolean equals(Object other) {
if (!(other instanceof TestUserClassBase)) {
return false;
}
TestUserClassBase otherTUC = (TestUserClassBase) other;
if (dumm1 != otherTUC.dumm1) {
return false;
}
return dumm2.equals(otherTUC.dumm2);
}
}
public static | TestUserClassBase |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/cluster/topology/TopologyComparators.java | {
"start": 6324,
"end": 7458
} | enum ____ implements Comparator<RedisClusterNode> {
INSTANCE;
@Override
public int compare(RedisClusterNode o1, RedisClusterNode o2) {
if (o1 instanceof RedisClusterNodeSnapshot && o2 instanceof RedisClusterNodeSnapshot) {
RedisClusterNodeSnapshot w1 = (RedisClusterNodeSnapshot) o1;
RedisClusterNodeSnapshot w2 = (RedisClusterNodeSnapshot) o2;
if (w1.getLatencyNs() != null && w2.getLatencyNs() != null) {
return w1.getLatencyNs().compareTo(w2.getLatencyNs());
}
if (w1.getLatencyNs() != null && w2.getLatencyNs() == null) {
return -1;
}
if (w1.getLatencyNs() == null && w2.getLatencyNs() != null) {
return 1;
}
}
return 0;
}
}
/**
* Compare {@link RedisClusterNodeSnapshot} based on their client count. Lowest comes first. Objects of type
* {@link RedisClusterNode} cannot be compared and yield to a result of {@literal 0}.
*/
| LatencyComparator |
java | elastic__elasticsearch | modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapper.java | {
"start": 34652,
"end": 35522
} | class ____ extends TokenFilter {
private final int extraPositionIncrements;
private final PositionIncrementAttribute positionIncrementAttribute;
TrailingShingleTokenFilter(TokenStream input, int extraPositionIncrements) {
super(input);
this.extraPositionIncrements = extraPositionIncrements;
this.positionIncrementAttribute = addAttribute(PositionIncrementAttribute.class);
}
@Override
public boolean incrementToken() throws IOException {
return input.incrementToken();
}
@Override
public void end() throws IOException {
super.end();
positionIncrementAttribute.setPositionIncrement(extraPositionIncrements);
}
}
}
}
| TrailingShingleTokenFilter |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java | {
"start": 896,
"end": 2700
} | class ____ implements Writeable {
private final Map<Term, TermStatistics> termStatistics;
private final Map<String, CollectionStatistics> fieldStatistics;
private final long maxDoc;
public AggregatedDfs(StreamInput in) throws IOException {
int size = in.readVInt();
termStatistics = new HashMap<>(size);
for (int i = 0; i < size; i++) {
// term constructor copies the bytes so we can work with a slice
Term term = new Term(in.readString(), in.readSlicedBytesReference().toBytesRef());
TermStatistics stats = new TermStatistics(in.readBytesRef(), in.readVLong(), DfsSearchResult.subOne(in.readVLong()));
termStatistics.put(term, stats);
}
fieldStatistics = DfsSearchResult.readFieldStats(in);
maxDoc = in.readVLong();
}
public AggregatedDfs(Map<Term, TermStatistics> termStatistics, Map<String, CollectionStatistics> fieldStatistics, long maxDoc) {
this.termStatistics = termStatistics;
this.fieldStatistics = fieldStatistics;
this.maxDoc = maxDoc;
}
public Map<Term, TermStatistics> termStatistics() {
return termStatistics;
}
public Map<String, CollectionStatistics> fieldStatistics() {
return fieldStatistics;
}
@Override
public void writeTo(final StreamOutput out) throws IOException {
out.writeMap(termStatistics, (o, k) -> {
o.writeString(k.field());
o.writeBytesRef(k.bytes());
}, (o, v) -> {
o.writeBytesRef(v.term());
o.writeVLong(v.docFreq());
o.writeVLong(DfsSearchResult.addOne(v.totalTermFreq()));
});
DfsSearchResult.writeFieldStats(out, fieldStatistics);
out.writeVLong(maxDoc);
}
}
| AggregatedDfs |
java | apache__maven | its/core-it-support/core-it-plugins/maven-it-plugin-artifact/src/main/java/org/apache/maven/plugin/coreit/CustomRepositoryLayout.java | {
"start": 1196,
"end": 2244
} | class ____ implements ArtifactRepositoryLayout {
@Override
public String getId() {
return "id";
}
public String pathOf(Artifact artifact) {
ArtifactHandler artifactHandler = artifact.getArtifactHandler();
StringBuilder path = new StringBuilder();
path.append(artifact.getArtifactId()).append('-').append(artifact.getVersion());
if (artifact.hasClassifier()) {
path.append('-').append(artifact.getClassifier());
}
if (artifactHandler.getExtension() != null
&& artifactHandler.getExtension().length() > 0) {
path.append('.').append(artifactHandler.getExtension());
}
return path.toString();
}
public String pathOfLocalRepositoryMetadata(ArtifactMetadata metadata, ArtifactRepository repository) {
return metadata.getLocalFilename(repository);
}
public String pathOfRemoteRepositoryMetadata(ArtifactMetadata metadata) {
return metadata.getRemoteFilename();
}
}
| CustomRepositoryLayout |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/functions/casting/IdentityCastRule.java | {
"start": 1189,
"end": 3163
} | class ____ extends AbstractCodeGeneratorCastRule<Object, Object>
implements ExpressionCodeGeneratorCastRule<Object, Object> {
static final IdentityCastRule INSTANCE = new IdentityCastRule();
private IdentityCastRule() {
super(CastRulePredicate.builder().predicate(IdentityCastRule::isIdentityCast).build());
}
private static boolean isIdentityCast(
LogicalType inputLogicalType, LogicalType targetLogicalType) {
// INTERVAL_YEAR_MONTH and INTEGER uses the same primitive int type
if ((inputLogicalType.is(LogicalTypeRoot.INTERVAL_YEAR_MONTH)
&& targetLogicalType.is(LogicalTypeRoot.INTEGER))
|| (inputLogicalType.is(LogicalTypeRoot.INTEGER)
&& targetLogicalType.is(LogicalTypeRoot.INTERVAL_YEAR_MONTH))) {
return true;
}
// INTERVAL_DAY_TIME and BIGINT uses the same primitive long type
if ((inputLogicalType.is(LogicalTypeRoot.INTERVAL_DAY_TIME)
&& targetLogicalType.is(LogicalTypeRoot.BIGINT))
|| (inputLogicalType.is(LogicalTypeRoot.BIGINT)
&& targetLogicalType.is(LogicalTypeRoot.INTERVAL_DAY_TIME))) {
return true;
}
return LogicalTypeCasts.supportsAvoidingCast(inputLogicalType, targetLogicalType);
}
@Override
public String generateExpression(
CodeGeneratorCastRule.Context context,
String inputTerm,
LogicalType inputLogicalType,
LogicalType targetLogicalType) {
return inputTerm;
}
@Override
public CastCodeBlock generateCodeBlock(
CodeGeneratorCastRule.Context context,
String inputTerm,
String inputIsNullTerm,
LogicalType inputLogicalType,
LogicalType targetLogicalType) {
return CastCodeBlock.withoutCode(inputTerm, inputIsNullTerm);
}
}
| IdentityCastRule |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPCFactories.java | {
"start": 3786,
"end": 4533
} | class ____ implements ApplicationMasterProtocol {
@Override
public RegisterApplicationMasterResponse registerApplicationMaster(
RegisterApplicationMasterRequest request) throws YarnException,
IOException {
// TODO Auto-generated method stub
return null;
}
@Override
public FinishApplicationMasterResponse finishApplicationMaster(
FinishApplicationMasterRequest request) throws YarnException,
IOException {
// TODO Auto-generated method stub
return null;
}
@Override
public AllocateResponse allocate(AllocateRequest request)
throws YarnException, IOException {
// TODO Auto-generated method stub
return null;
}
}
}
| AMRMProtocolTestImpl |
java | apache__camel | components/camel-sjms2/src/generated/java/org/apache/camel/component/sjms2/Sjms2ComponentConfigurer.java | {
"start": 727,
"end": 860
} | class ____ extends SjmsComponentConfigurer implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
}
| Sjms2ComponentConfigurer |
java | apache__rocketmq | remoting/src/main/java/org/apache/rocketmq/remoting/protocol/RequestHeaderRegistry.java | {
"start": 2452,
"end": 2587
} | class ____ {
private static final RequestHeaderRegistry INSTANCE = new RequestHeaderRegistry();
}
}
| RequestHeaderRegistryHolder |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/functions/table/lookup/LookupCacheManager.java | {
"start": 3904,
"end": 4149
} | class ____ exposed as public for testing purpose and not thread safe. Concurrent
* accesses should be guarded by synchronized methods provided by {@link LookupCacheManager}.
*/
@NotThreadSafe
@VisibleForTesting
public static | is |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/observable/ObservableSampleWithObservable.java | {
"start": 4396,
"end": 4846
} | class ____<T> extends SampleMainObserver<T> {
private static final long serialVersionUID = -3029755663834015785L;
SampleMainNoLast(Observer<? super T> actual, ObservableSource<?> other) {
super(actual, other);
}
@Override
void completion() {
downstream.onComplete();
}
@Override
void run() {
emit();
}
}
static final | SampleMainNoLast |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/BDDSoftAssertions_ThrowableTypeAssert_Test.java | {
"start": 5172,
"end": 5852
} | class ____<T extends Throwable>
implements Function<BDDSoftAssertions, ThrowableTypeAssert<T>> {
private Function<BDDSoftAssertions, ThrowableTypeAssert<T>> function;
private String assertionMethod;
SoftAssertionsFunction(String assertionMethod, Function<BDDSoftAssertions, ThrowableTypeAssert<T>> softAssertionsFunction) {
this.function = softAssertionsFunction;
this.assertionMethod = assertionMethod;
}
@Override
public ThrowableTypeAssert<T> apply(BDDSoftAssertions softly) {
return function.apply(softly);
}
@Override
public String toString() {
return this.assertionMethod;
}
}
}
| SoftAssertionsFunction |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/common/component/LifecycleTests.java | {
"start": 3553,
"end": 5058
} | class ____ implements Releasable {
final int threads;
final CyclicBarrier barrier;
final ExecutorService executor;
ThreadSafetyTestHarness(int threads) {
this.threads = threads;
this.barrier = new CyclicBarrier(threads);
this.executor = EsExecutors.newScaling(
"test",
threads,
threads,
10,
TimeUnit.SECONDS,
true,
TestEsExecutors.testOnlyDaemonThreadFactory("test"),
new ThreadContext(Settings.EMPTY)
);
}
void testTransition(BooleanSupplier doTransition) {
final var transitioned = new AtomicBoolean();
safeAwait((ActionListener<Void> listener) -> {
try (var listeners = new RefCountingListener(listener)) {
for (int i = 0; i < threads; i++) {
executor.execute(ActionRunnable.run(listeners.acquire(), () -> {
safeAwait(barrier);
if (doTransition.getAsBoolean()) {
assertTrue(transitioned.compareAndSet(false, true));
}
}));
}
}
});
assertTrue(transitioned.get());
}
@Override
public void close() {
terminate(executor);
}
}
}
| ThreadSafetyTestHarness |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/HazelcastMapComponentBuilderFactory.java | {
"start": 1894,
"end": 6792
} | interface ____ extends ComponentBuilder<HazelcastMapComponent> {
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default HazelcastMapComponentBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default HazelcastMapComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default HazelcastMapComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
/**
* The hazelcast instance reference which can be used for hazelcast
* endpoint. If you don't specify the instance reference, camel use the
* default hazelcast instance from the camel-hazelcast instance.
*
* The option is a:
* <code>com.hazelcast.core.HazelcastInstance</code> type.
*
* Group: advanced
*
* @param hazelcastInstance the value to set
* @return the dsl builder
*/
default HazelcastMapComponentBuilder hazelcastInstance(com.hazelcast.core.HazelcastInstance hazelcastInstance) {
doSetProperty("hazelcastInstance", hazelcastInstance);
return this;
}
/**
* The hazelcast mode reference which kind of instance should be used.
* If you don't specify the mode, then the node mode will be the
* default.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: node
* Group: advanced
*
* @param hazelcastMode the value to set
* @return the dsl builder
*/
default HazelcastMapComponentBuilder hazelcastMode(java.lang.String hazelcastMode) {
doSetProperty("hazelcastMode", hazelcastMode);
return this;
}
}
| HazelcastMapComponentBuilder |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/api/sync/RediSearchCommands.java | {
"start": 1300,
"end": 52150
} | interface ____<K, V> {
/**
* Create a new search index with the given name and field definitions using default settings.
*
* <p>
* This command creates a new search index that enables full-text search, filtering, and aggregation capabilities on Redis
* data structures. The index will use default settings for data type (HASH), key prefixes (all keys), and other
* configuration options.
* </p>
*
* <p>
* <strong>Time complexity:</strong> O(K) at creation where K is the number of fields, O(N) if scanning the keyspace is
* triggered, where N is the number of keys in the keyspace
* </p>
*
* @param index the index name
* @param fieldArgs the {@link FieldArgs} list defining the searchable fields and their types
* @return {@code "OK"} if the index was created successfully
* @see <a href="https://redis.io/docs/latest/commands/ft.create/">FT.CREATE</a>
* @see CreateArgs
* @see FieldArgs
* @see #ftCreate(String, CreateArgs, List)
* @see #ftDropindex(String)
*/
@Experimental
String ftCreate(String index, List<FieldArgs<K>> fieldArgs);
/**
* Create a new search index with the given name, custom configuration, and field definitions.
*
* <p>
* This command creates a new search index with advanced configuration options that control how the index behaves, what data
* it indexes, and how it processes documents. This variant provides full control over index creation parameters.
* </p>
*
* <p>
* The {@link CreateArgs} parameter allows you to specify:
* </p>
* <ul>
* <li><strong>Data type:</strong> HASH (default) or JSON documents</li>
* <li><strong>Key prefixes:</strong> Which keys to index based on prefix patterns</li>
* <li><strong>Filters:</strong> Conditional indexing based on field values</li>
* <li><strong>Language settings:</strong> Default language and language field for stemming</li>
* <li><strong>Performance options:</strong> NOOFFSETS, NOHL, NOFIELDS, NOFREQS for memory optimization</li>
* <li><strong>Temporary indexes:</strong> Auto-expiring indexes for short-term use</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(K) at creation where K is the number of fields, O(N) if scanning the keyspace is
* triggered, where N is the number of keys in the keyspace
* </p>
*
* @param index the index name
* @param arguments the index {@link CreateArgs} containing configuration options
* @param fieldArgs the {@link FieldArgs} list defining the searchable fields and their types
* @return {@code "OK"} if the index was created successfully
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.create/">FT.CREATE</a>
* @see CreateArgs
* @see FieldArgs
* @see #ftCreate(String, List)
* @see #ftDropindex(String)
*/
@Experimental
String ftCreate(String index, CreateArgs<K, V> arguments, List<FieldArgs<K>> fieldArgs);
/**
* Add an alias to a search index.
*
* <p>
* This command creates an alias that points to an existing search index, allowing applications to reference the index by an
* alternative name. Aliases provide a level of indirection that enables transparent index management and migration
* strategies.
* </p>
*
* <p>
* Key features and use cases:
* </p>
* <ul>
* <li><strong>Index abstraction:</strong> Applications can use stable alias names while underlying indexes change</li>
* <li><strong>Blue-green deployments:</strong> Switch traffic between old and new indexes seamlessly</li>
* <li><strong>A/B testing:</strong> Route different application instances to different indexes</li>
* <li><strong>Maintenance windows:</strong> Redirect queries during index rebuilds or migrations</li>
* </ul>
*
* <p>
* <strong>Important notes:</strong>
* </p>
* <ul>
* <li>An index can have multiple aliases, but an alias can only point to one index</li>
* <li>Aliases cannot reference other aliases (no alias chaining)</li>
* <li>If the alias already exists, this command will fail with an error</li>
* <li>Use {@link #ftAliasupdate(String, String)} to reassign an existing alias</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param alias the alias name to create
* @param index the target index name that the alias will point to
* @return {@code "OK"} if the alias was successfully created
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.aliasadd/">FT.ALIASADD</a>
* @see #ftAliasupdate(String, String)
* @see #ftAliasdel(String)
*/
@Experimental
String ftAliasadd(String alias, String index);
/**
* Update an existing alias to point to a different search index.
*
* <p>
* This command updates an existing alias to point to a different index, or creates the alias if it doesn't exist. Unlike
* {@link #ftAliasadd(String, String)}, this command will succeed even if the alias already exists, making it useful for
* atomic alias updates during index migrations.
* </p>
*
* <p>
* Key features and use cases:
* </p>
* <ul>
* <li><strong>Atomic updates:</strong> Change alias target without downtime</li>
* <li><strong>Index migration:</strong> Seamlessly switch from old to new index versions</li>
* <li><strong>Rollback capability:</strong> Quickly revert to previous index if issues arise</li>
* <li><strong>Blue-green deployments:</strong> Switch production traffic between index versions</li>
* </ul>
*
* <p>
* <strong>Important notes:</strong>
* </p>
* <ul>
* <li>If the alias doesn't exist, it will be created (same as {@code ftAliasadd})</li>
* <li>If the alias exists, it will be updated to point to the new index</li>
* <li>The previous index association is removed automatically</li>
* <li>This operation is atomic - no intermediate state where alias is undefined</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param alias the alias name to update or create
* @param index the target index name that the alias will point to
* @return {@code "OK"} if the alias was successfully updated
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.aliasupdate/">FT.ALIASUPDATE</a>
* @see #ftAliasadd(String, String)
* @see #ftAliasdel(String)
*/
@Experimental
String ftAliasupdate(String alias, String index);
/**
* Remove an alias from a search index.
*
* <p>
* This command removes an existing alias, breaking the association between the alias name and its target index. The
* underlying index remains unchanged and accessible by its original name.
* </p>
*
* <p>
* Key features and use cases:
* </p>
* <ul>
* <li><strong>Cleanup:</strong> Remove unused or obsolete aliases</li>
* <li><strong>Security:</strong> Revoke access to indexes through specific alias names</li>
* <li><strong>Maintenance:</strong> Temporarily disable access during maintenance windows</li>
* <li><strong>Resource management:</strong> Clean up aliases before index deletion</li>
* </ul>
*
* <p>
* <strong>Important notes:</strong>
* </p>
* <ul>
* <li>Only the alias is removed - the target index is not affected</li>
* <li>If the alias doesn't exist, this command will fail with an error</li>
* <li>Applications using the alias will receive errors after deletion</li>
* <li>Consider using {@link #ftAliasupdate(String, String)} to redirect before deletion</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param alias the alias name to remove
* @return {@code "OK"} if the alias was successfully removed
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.aliasdel/">FT.ALIASDEL</a>
* @see #ftAliasadd(String, String)
* @see #ftAliasupdate(String, String)
*/
@Experimental
String ftAliasdel(String alias);
/**
* Add new attributes to an existing search index.
*
* <p>
* This command allows you to extend an existing search index by adding new searchable fields without recreating the entire
* index. The new attributes will be applied to future document updates and can optionally be applied to existing documents
* through reindexing.
* </p>
*
* <p>
* Key features and considerations:
* </p>
* <ul>
* <li><strong>Non-destructive:</strong> Existing index structure and data remain intact</li>
* <li><strong>Incremental indexing:</strong> New fields are indexed as documents are updated</li>
* <li><strong>Reindexing control:</strong> Option to skip initial scan for performance</li>
* <li><strong>Field limitations:</strong> Text field limits may apply based on index creation options</li>
* </ul>
*
* <p>
* <strong>Important notes:</strong>
* </p>
* <ul>
* <li>If the index was created without {@code MAXTEXTFIELDS}, you may be limited to 32 total text attributes</li>
* <li>New attributes are only indexed for documents that are updated after the ALTER command</li>
* <li>Use {@code SKIPINITIALSCAN} to avoid scanning existing documents if immediate indexing is not required</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(N) where N is the number of keys in the keyspace if initial scan is performed, O(1)
* if {@code SKIPINITIALSCAN} is used
* </p>
*
* @param index the index name, as a key
* @param skipInitialScan if {@code true}, skip scanning and indexing existing documents; if {@code false}, scan and index
* existing documents with the new attributes
* @param fieldArgs the {@link FieldArgs} list defining the new searchable fields and their types to add
* @return {@code "OK"} if the index was successfully altered
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.alter/">FT.ALTER</a>
* @see FieldArgs
* @see #ftCreate(String, List)
* @see #ftCreate(String, CreateArgs, List)
*/
@Experimental
String ftAlter(String index, boolean skipInitialScan, List<FieldArgs<K>> fieldArgs);
/**
* Add new attributes to an existing search index.
*
* <p>
* This command allows you to extend an existing search index by adding new searchable fields without recreating the entire
* index. The new attributes will be applied to future document updates and can optionally be applied to existing documents
* through reindexing.
* </p>
*
* <p>
* Key features and considerations:
* </p>
* <ul>
* <li><strong>Non-destructive:</strong> Existing index structure and data remain intact</li>
* <li><strong>Incremental indexing:</strong> New fields are indexed as documents are updated</li>
* <li><strong>Reindexing control:</strong> Option to skip initial scan for performance</li>
* <li><strong>Field limitations:</strong> Text field limits may apply based on index creation options</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(N) where N is the number of keys in the keyspace if initial scan is performed
* </p>
*
* @param index the index name, as a key
* @param fieldArgs the {@link FieldArgs} list defining the new searchable fields and their types to add
* @return {@code "OK"} if the index was successfully altered
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.alter/">FT.ALTER</a>
* @see FieldArgs
* @see #ftCreate(String, List)
* @see #ftCreate(String, CreateArgs, List)
*/
@Experimental
String ftAlter(String index, List<FieldArgs<K>> fieldArgs);
/**
* Return a distinct set of values indexed in a Tag field.
*
* <p>
* This command retrieves all unique values that have been indexed in a specific Tag field within a search index. It's
* particularly useful for discovering the range of values available in categorical fields such as cities, categories,
* status values, or any other enumerated data.
* </p>
*
* <p>
* Key features and use cases:
* </p>
* <ul>
* <li><strong>Data exploration:</strong> Discover all possible values in a tag field</li>
* <li><strong>Filter building:</strong> Populate dropdown lists or filter options in applications</li>
* <li><strong>Data validation:</strong> Verify expected values are present in the index</li>
* <li><strong>Analytics:</strong> Understand the distribution of categorical data</li>
* </ul>
*
* <p>
* <strong>Important limitations:</strong>
* </p>
* <ul>
* <li>Only works with Tag fields defined in the index schema</li>
* <li>No paging or sorting is provided - all values are returned at once</li>
* <li>Tags are not alphabetically sorted in the response</li>
* <li>Returned strings are lowercase with whitespaces removed</li>
* <li>Performance scales with the number of unique values (O(N) complexity)</li>
* </ul>
*
* <p>
* <strong>Example usage scenarios:</strong>
* </p>
* <ul>
* <li>Retrieving all available product categories for an e-commerce filter</li>
* <li>Getting all city names indexed for location-based searches</li>
* <li>Listing all status values (active, inactive, pending) for administrative interfaces</li>
* <li>Discovering all tags or labels applied to content</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(N) where N is the number of distinct values in the tag field
* </p>
*
* @param index the index name containing the tag field
* @param fieldName the name of the Tag field defined in the index schema
* @return a list of all distinct values indexed in the specified tag field. The list contains the raw tag values as they
* were indexed (lowercase, whitespace removed).
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.tagvals/">FT.TAGVALS</a>
* @see #ftCreate(String, List)
* @see #ftCreate(String, CreateArgs, List)
*/
@Experimental
List<V> ftTagvals(String index, String fieldName);
/**
* Perform spelling correction on a query, returning suggestions for misspelled terms.
*
* <p>
* This command analyzes the query for misspelled terms and provides spelling suggestions based on the indexed terms and
* optionally custom dictionaries. A misspelled term is a full text term (word) that is:
* </p>
* <ul>
* <li>Not a stop word</li>
* <li>Not in the index</li>
* <li>At least 3 characters long</li>
* </ul>
*
* <p>
* Key features and use cases:
* </p>
* <ul>
* <li><strong>Query correction:</strong> Improve search experience by suggesting corrections</li>
* <li><strong>Typo handling:</strong> Handle common typing mistakes and misspellings</li>
* <li><strong>Search enhancement:</strong> Increase search success rates</li>
* <li><strong>User experience:</strong> Provide "did you mean" functionality</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param index the index with the indexed terms
* @param query the search query to check for spelling errors
* @return spell check result containing misspelled terms and their suggestions
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.spellcheck/">FT.SPELLCHECK</a>
* @see <a href="https://redis.io/docs/latest/develop/ai/search-and-query/advanced-concepts/spellcheck/">Spellchecking</a>
* @see #ftSpellcheck(String, Object, SpellCheckArgs)
* @see #ftDictadd(String, Object[])
* @see #ftDictdel(String, Object[])
* @see #ftDictdump(String)
*/
@Experimental
SpellCheckResult<V> ftSpellcheck(String index, V query);
/**
* Perform spelling correction on a query with additional options.
*
* <p>
* This command analyzes the query for misspelled terms and provides spelling suggestions with configurable options for
* distance, custom dictionaries, and dialect.
* </p>
*
* <p>
* Available options:
* </p>
* <ul>
* <li><strong>DISTANCE:</strong> Maximum Levenshtein distance for suggestions (default: 1, max: 4)</li>
* <li><strong>TERMS INCLUDE:</strong> Include terms from custom dictionaries as suggestions</li>
* <li><strong>TERMS EXCLUDE:</strong> Exclude terms from custom dictionaries from suggestions</li>
* <li><strong>DIALECT:</strong> Specify dialect version for query execution</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param index the index with the indexed terms
* @param query the search query to check for spelling errors
* @param args the spellcheck arguments (distance, terms, dialect)
* @return spell check result containing misspelled terms and their suggestions
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.spellcheck/">FT.SPELLCHECK</a>
* @see <a href="https://redis.io/docs/latest/develop/ai/search-and-query/advanced-concepts/spellcheck/">Spellchecking</a>
* @see #ftSpellcheck(String, Object)
* @see #ftDictadd(String, Object[])
* @see #ftDictdel(String, Object[])
* @see #ftDictdump(String)
*/
@Experimental
SpellCheckResult<V> ftSpellcheck(String index, V query, SpellCheckArgs<K, V> args);
/**
* Add terms to a dictionary.
*
* <p>
* This command adds one or more terms to a dictionary. Dictionaries are used for storing custom stopwords, synonyms, and
* other term lists that can be referenced in search operations. The dictionary is created if it doesn't exist.
* </p>
*
* <p>
* Key features and use cases:
* </p>
* <ul>
* <li><strong>Stopwords:</strong> Create custom stopword lists for filtering</li>
* <li><strong>Synonyms:</strong> Build synonym dictionaries for query expansion</li>
* <li><strong>Custom terms:</strong> Store domain-specific terminology</li>
* <li><strong>Blacklists:</strong> Maintain lists of prohibited terms</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param dict the dictionary name
* @param terms the terms to add to the dictionary
* @return the number of new terms that were added
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.dictadd/">FT.DICTADD</a>
* @see <a href="https://redis.io/docs/latest/develop/ai/search-and-query/advanced-concepts/spellcheck/">Spellchecking</a>
* @see #ftDictdel(String, Object[])
* @see #ftDictdump(String)
*/
@Experimental
Long ftDictadd(String dict, V... terms);
/**
* Delete terms from a dictionary.
*
* <p>
* This command removes one or more terms from a dictionary. Only exact matches will be removed from the dictionary.
* Non-existent terms are ignored.
* </p>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param dict the dictionary name
* @param terms the terms to delete from the dictionary
* @return the number of terms that were deleted
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.dictdel/">FT.DICTDEL</a>
* @see #ftDictadd(String, Object[])
* @see #ftDictdump(String)
*/
@Experimental
Long ftDictdel(String dict, V... terms);
/**
* Dump all terms in a dictionary.
*
* <p>
* This command returns all terms stored in the specified dictionary. The terms are returned in no particular order.
* </p>
*
* <p>
* <strong>Time complexity:</strong> O(N), where N is the size of the dictionary
* </p>
*
* @param dict the dictionary name
* @return a list of all terms in the dictionary
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.dictdump/">FT.DICTDUMP</a>
* @see #ftDictadd(String, Object[])
* @see #ftDictdel(String, Object[])
*/
@Experimental
List<V> ftDictdump(String dict);
/**
* Return the execution plan for a complex query.
*
* <p>
* This command returns a string representing the execution plan that Redis Search will use to execute the given query. This
* is useful for understanding how the query will be processed and for optimizing query performance.
* </p>
*
* <p>
* Key features and use cases:
* </p>
* <ul>
* <li><strong>Query optimization:</strong> Understand how queries are executed</li>
* <li><strong>Performance analysis:</strong> Identify potential bottlenecks</li>
* <li><strong>Debugging:</strong> Troubleshoot complex query behavior</li>
* <li><strong>Learning:</strong> Understand Redis Search query processing</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param index the index name
* @param query the search query to explain
* @return the execution plan as a string
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.explain/">FT.EXPLAIN</a>
* @see #ftExplain(String, Object, ExplainArgs)
* @see #ftSearch(String, Object)
*/
@Experimental
String ftExplain(String index, V query);
/**
* Return the execution plan for a complex query with additional options.
*
* <p>
* This command returns a string representing the execution plan that Redis Search will use to execute the given query under
* the specified dialect version.
* </p>
*
* <p>
* Available options:
* </p>
* <ul>
* <li><strong>DIALECT:</strong> Specify dialect version for query execution</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param index the index name
* @param query the search query to explain
* @param args the explain arguments (dialect)
* @return the execution plan as a string
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.explain/">FT.EXPLAIN</a>
* @see #ftExplain(String, Object)
* @see #ftSearch(String, Object)
*/
@Experimental
String ftExplain(String index, V query, ExplainArgs<K, V> args);
/**
* Return a list of all existing indexes.
*
* <p>
* This command returns an array with the names of all existing indexes in the database. This is useful for discovering
* available indexes and managing index lifecycle.
* </p>
*
* <p>
* Key features and use cases:
* </p>
* <ul>
* <li><strong>Index discovery:</strong> Find all available search indexes</li>
* <li><strong>Management:</strong> List indexes for administrative operations</li>
* <li><strong>Monitoring:</strong> Track index creation and deletion</li>
* <li><strong>Debugging:</strong> Verify index existence</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* <p>
* <strong>Note:</strong> This is a temporary command (indicated by the underscore prefix). In the future, a SCAN-type
* command will be added for use when a database contains a large number of indices.
* </p>
*
* @return a list of index names
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft._list/">FT._LIST</a>
* @see #ftCreate(String, CreateArgs, FieldArgs[])
* @see #ftDropindex(String)
*/
@Experimental
List<V> ftList();
/**
* Dump synonym group contents.
*
* <p>
* This command returns the contents of a synonym group. Synonym groups are used to define terms that should be treated as
* equivalent during search operations.
* </p>
*
* <p>
* Key features and use cases:
* </p>
* <ul>
* <li><strong>Synonym management:</strong> View current synonym definitions</li>
* <li><strong>Query expansion:</strong> Understand how terms are expanded</li>
* <li><strong>Debugging:</strong> Verify synonym group contents</li>
* <li><strong>Administration:</strong> Audit synonym configurations</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param index the index name
* @return a map where keys are synonym terms and values are lists of group IDs containing that synonym
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.syndump/">FT.SYNDUMP</a>
* @see #ftSynupdate(String, Object, Object[])
* @see #ftSynupdate(String, Object, SynUpdateArgs, Object[])
*/
@Experimental
Map<V, List<V>> ftSyndump(String index);
/**
* Update a synonym group with additional terms.
*
* <p>
* This command creates or updates a synonym group with the specified terms. All terms in a synonym group are treated as
* equivalent during search operations. The command triggers a scan of all documents by default.
* </p>
*
* <p>
* Key features and use cases:
* </p>
* <ul>
* <li><strong>Synonym creation:</strong> Define equivalent terms for search</li>
* <li><strong>Query expansion:</strong> Improve search recall with synonyms</li>
* <li><strong>Language support:</strong> Handle different languages and dialects</li>
* <li><strong>Domain terminology:</strong> Map technical terms to common language</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param index the index name
* @param synonymGroupId the synonym group identifier
* @param terms the terms to add to the synonym group
* @return OK if executed correctly
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.synupdate/">FT.SYNUPDATE</a>
* @see #ftSynupdate(String, Object, SynUpdateArgs, Object[])
* @see #ftSyndump(String)
*/
@Experimental
String ftSynupdate(String index, V synonymGroupId, V... terms);
/**
* Update a synonym group with additional terms and options.
*
* <p>
* This command creates or updates a synonym group with the specified terms and options. The SKIPINITIALSCAN option can be
* used to avoid scanning existing documents, affecting only documents indexed after the update.
* </p>
*
* <p>
* Available options:
* </p>
* <ul>
* <li><strong>SKIPINITIALSCAN:</strong> Skip scanning existing documents</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param index the index name
* @param synonymGroupId the synonym group identifier
* @param args the synupdate arguments (skipInitialScan)
* @param terms the terms to add to the synonym group
* @return OK if executed correctly
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.synupdate/">FT.SYNUPDATE</a>
* @see #ftSynupdate(String, Object, Object[])
* @see #ftSyndump(String)
*/
@Experimental
String ftSynupdate(String index, V synonymGroupId, SynUpdateArgs<K, V> args, V... terms);
/**
* Add a suggestion string to an auto-complete suggestion dictionary.
*
* <p>
* This command adds a suggestion string to an auto-complete suggestion dictionary with a specified score. The auto-complete
* suggestion dictionary is disconnected from the index definitions and leaves creating and updating suggestions
* dictionaries to the user.
* </p>
*
* <p>
* Key features and use cases:
* </p>
* <ul>
* <li><strong>Auto-completion:</strong> Build type-ahead search functionality</li>
* <li><strong>Search suggestions:</strong> Provide query suggestions to users</li>
* <li><strong>Fuzzy matching:</strong> Support approximate string matching</li>
* <li><strong>Weighted results:</strong> Control suggestion ranking with scores</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param key the suggestion dictionary key
* @param suggestion the suggestion string to index
* @param score the floating point number of the suggestion string's weight
* @return the current size of the suggestion dictionary after adding the suggestion
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.sugadd/">FT.SUGADD</a>
* @see #ftSugadd(Object, Object, double, SugAddArgs)
* @see #ftSugget(Object, Object)
* @see #ftSugdel(Object, Object)
* @see #ftSuglen(Object)
*/
@Experimental
Long ftSugadd(K key, V suggestion, double score);
/**
* Add a suggestion string to an auto-complete suggestion dictionary with additional options.
*
* <p>
* This command adds a suggestion string to an auto-complete suggestion dictionary with a specified score and optional
* arguments for incremental updates and payload storage.
* </p>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param key the suggestion dictionary key
* @param suggestion the suggestion string to index
* @param score the floating point number of the suggestion string's weight
* @param args the suggestion add arguments (INCR, PAYLOAD)
* @return the current size of the suggestion dictionary after adding the suggestion
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.sugadd/">FT.SUGADD</a>
* @see #ftSugadd(Object, Object, double)
* @see #ftSugget(Object, Object, SugGetArgs)
* @see #ftSugdel(Object, Object)
* @see #ftSuglen(Object)
*/
@Experimental
Long ftSugadd(K key, V suggestion, double score, SugAddArgs<K, V> args);
/**
* Delete a string from a suggestion dictionary.
*
* <p>
* This command removes a suggestion string from an auto-complete suggestion dictionary. Only the exact string match will be
* removed from the dictionary.
* </p>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param key the suggestion dictionary key
* @param suggestion the suggestion string to delete
* @return {@code true} if the string was found and deleted, {@code false} otherwise
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.sugdel/">FT.SUGDEL</a>
* @see #ftSugadd(Object, Object, double)
* @see #ftSugget(Object, Object)
* @see #ftSuglen(Object)
*/
@Experimental
Boolean ftSugdel(K key, V suggestion);
/**
* Get completion suggestions for a prefix.
*
* <p>
* This command retrieves completion suggestions for a prefix from an auto-complete suggestion dictionary. By default, it
* returns up to 5 suggestions that match the given prefix.
* </p>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param key the suggestion dictionary key
* @param prefix the prefix to complete on
* @return a list of suggestions matching the prefix
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.sugget/">FT.SUGGET</a>
* @see #ftSugget(Object, Object, SugGetArgs)
* @see #ftSugadd(Object, Object, double)
* @see #ftSugdel(Object, Object)
* @see #ftSuglen(Object)
*/
@Experimental
List<Suggestion<V>> ftSugget(K key, V prefix);
/**
* Get completion suggestions for a prefix with additional options.
*
* <p>
* This command retrieves completion suggestions for a prefix from an auto-complete suggestion dictionary with optional
* arguments for fuzzy matching, score inclusion, payload inclusion, and result limiting.
* </p>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param key the suggestion dictionary key
* @param prefix the prefix to complete on
* @param args the suggestion get arguments (FUZZY, WITHSCORES, WITHPAYLOADS, MAX)
* @return a list of suggestions matching the prefix, optionally with scores and payloads
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.sugget/">FT.SUGGET</a>
* @see #ftSugget(Object, Object)
* @see #ftSugadd(Object, Object, double, SugAddArgs)
* @see #ftSugdel(Object, Object)
* @see #ftSuglen(Object)
*/
@Experimental
List<Suggestion<V>> ftSugget(K key, V prefix, SugGetArgs<K, V> args);
/**
* Get the size of an auto-complete suggestion dictionary.
*
* <p>
* This command returns the current number of suggestions stored in the auto-complete suggestion dictionary.
* </p>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param key the suggestion dictionary key
* @return the current size of the suggestion dictionary
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.suglen/">FT.SUGLEN</a>
* @see #ftSugadd(Object, Object, double)
* @see #ftSugget(Object, Object)
* @see #ftSugdel(Object, Object)
*/
@Experimental
Long ftSuglen(K key);
/**
* Drop a search index without deleting the associated documents.
*
* <p>
* This command removes the search index and all its associated metadata, but preserves the original documents (hashes or
* JSON objects) that were indexed. This is the safe default behavior that allows you to recreate the index later without
* losing data.
* </p>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param index the index name, as a key
* @return {@code "OK"} if the index was successfully dropped
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.dropindex/">FT.DROPINDEX</a>
* @see #ftDropindex(String, boolean)
* @see #ftCreate(String, List)
*/
@Experimental
String ftDropindex(String index);
/**
* Drop a search index with optional document deletion.
*
* <p>
* This command removes the search index and optionally deletes all associated documents. When {@code deleteDocuments} is
* {@code true}, this operation becomes destructive and will permanently remove both the index and all indexed documents
* from Redis.
* </p>
*
* <p>
* <strong>Asynchronous Behavior:</strong> If an index creation is still running ({@link #ftCreate(String, List)} is running
* asynchronously), only the document hashes that have already been indexed are deleted. Documents that are queued for
* indexing but not yet processed will remain in the database.
* </p>
*
* <p>
* <strong>Time complexity:</strong> O(1) or O(N) if documents are deleted, where N is the number of keys in the keyspace
* </p>
*
* @param index the index name, as a key
* @param deleteDocuments if {@code true}, delete the indexed documents as well; if {@code false}, preserve documents
* @return {@code "OK"} if the index was successfully dropped
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.dropindex/">FT.DROPINDEX</a>
* @see #ftDropindex(String)
* @see #ftCreate(String, List)
*/
@Experimental
String ftDropindex(String index, boolean deleteDocuments);
/**
* Search the index with a textual query using default search options.
*
* <p>
* This command performs a full-text search on the specified index using the provided query string. It returns matching
* documents with their content and metadata. This is the basic search variant that uses default search behavior without
* additional filtering, sorting, or result customization.
* </p>
*
* <p>
* The query follows RediSearch query syntax, supporting:
* </p>
* <ul>
* <li><strong>Simple text search:</strong> {@code "hello world"} - searches for documents containing both terms</li>
* <li><strong>Field-specific search:</strong> {@code "@title:redis"} - searches within specific fields</li>
* <li><strong>Boolean operators:</strong> {@code "redis AND search"} or {@code "redis | search"}</li>
* <li><strong>Phrase search:</strong> {@code "\"exact phrase\""} - searches for exact phrase matches</li>
* <li><strong>Wildcard search:</strong> {@code "redi*"} - prefix matching</li>
* <li><strong>Numeric ranges:</strong> {@code "@price:[100 200]"} - numeric field filtering</li>
* <li><strong>Geographic search:</strong> {@code "@location:[lon lat radius unit]"} - geo-spatial queries</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(N) where N is the number of results in the result set
* </p>
*
* @param index the index name, as a key
* @param query the query string following RediSearch query syntax
* @return the result of the search command containing matching documents, see {@link SearchReply}
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.search/">FT.SEARCH</a>
* @see <a href="https://redis.io/docs/latest/develop/interact/search-and-query/query/">Query syntax</a>
* @see SearchReply
* @see SearchArgs
* @see #ftSearch(String, Object, SearchArgs)
*/
@Experimental
SearchReply<K, V> ftSearch(String index, V query);
/**
* Search the index with a textual query using advanced search options and filters.
*
* <p>
* This command performs a full-text search on the specified index with advanced configuration options provided through
* {@link SearchArgs}. This variant allows fine-grained control over search behavior, result formatting, filtering, sorting,
* and pagination.
* </p>
*
* <p>
* The {@link SearchArgs} parameter enables you to specify:
* </p>
* <ul>
* <li><strong>Result options:</strong> NOCONTENT, WITHSCORES, WITHPAYLOADS, WITHSORTKEYS</li>
* <li><strong>Query behavior:</strong> VERBATIM (no stemming), NOSTOPWORDS</li>
* <li><strong>Filtering:</strong> Numeric filters, geo filters, field filters</li>
* <li><strong>Result customization:</strong> RETURN specific fields, SUMMARIZE, HIGHLIGHT</li>
* <li><strong>Sorting and pagination:</strong> SORTBY, LIMIT offset and count</li>
* <li><strong>Performance options:</strong> TIMEOUT, SLOP, INORDER</li>
* <li><strong>Language and scoring:</strong> LANGUAGE, SCORER, EXPLAINSCORE</li>
* </ul>
*
* <h3>Performance Considerations:</h3>
* <ul>
* <li>Use NOCONTENT when you only need document IDs</li>
* <li>Specify RETURN fields to limit data transfer</li>
* <li>Use SORTABLE fields for efficient sorting</li>
* <li>Apply filters to reduce result set size</li>
* <li>Use LIMIT for pagination to avoid large result sets</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(N) where N is the number of results in the result set. Complexity varies based on
* query type, filters, and sorting requirements.
* </p>
*
* @param index the index name, as a key
* @param query the query string following RediSearch query syntax
* @param args the search arguments containing advanced options and filters
* @return the result of the search command containing matching documents and metadata, see {@link SearchReply}
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.search/">FT.SEARCH</a>
* @see <a href="https://redis.io/docs/latest/develop/interact/search-and-query/query/">Query syntax</a>
* @see <a href="https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/">Advanced concepts</a>
* @see SearchReply
* @see SearchArgs
* @see #ftSearch(String, Object)
*/
@Experimental
SearchReply<K, V> ftSearch(String index, V query, SearchArgs<K, V> args);
/**
* Run a search query on an index and perform basic aggregate transformations using default options.
*
* <p>
* This command executes a search query and applies aggregation operations to transform and analyze the results. Unlike
* {@link #ftSearch(String, Object)}, which returns individual documents, FT.AGGREGATE processes the result set through a
* pipeline of transformations to produce analytical insights, summaries, and computed values.
* </p>
*
* <p>
* This basic variant uses default aggregation behavior without additional pipeline operations. For advanced aggregations
* with grouping, sorting, filtering, and custom transformations, use {@link #ftAggregate(String, Object, AggregateArgs)}.
* </p>
*
* <p>
* Common use cases for aggregations include:
* </p>
* <ul>
* <li><strong>Analytics:</strong> Count documents, calculate averages, find min/max values</li>
* <li><strong>Reporting:</strong> Group data by categories, time periods, or geographic regions</li>
* <li><strong>Data transformation:</strong> Apply mathematical functions, format dates, extract values</li>
* <li><strong>Performance optimization:</strong> Process large datasets server-side instead of client-side</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> O(1) base complexity, but depends on the query and number of results processed
* </p>
*
* @param index the index name, as a key
* @param query the base filtering query that retrieves documents for aggregation
* @return the result of the aggregate command containing processed results, see {@link SearchReply}
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.aggregate/">FT.AGGREGATE</a>
* @see <a href=
* "https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/aggregations/">Aggregations</a>
* @see SearchReply
* @see AggregateArgs
* @see #ftAggregate(String, Object, AggregateArgs)
*/
@Experimental
AggregationReply<K, V> ftAggregate(String index, V query);
/**
* Run a search query on an index and perform advanced aggregate transformations with a processing pipeline.
*
* <p>
* This command executes a search query and applies a sophisticated aggregation pipeline to transform, group, sort, and
* analyze the results. The {@link AggregateArgs} parameter defines a series of operations that process the data
* server-side, enabling powerful analytics and data transformation capabilities directly within Redis.
* </p>
*
* <p>
* The aggregation pipeline supports the following operations:
* </p>
* <ul>
* <li><strong>LOAD:</strong> Load specific document attributes for processing</li>
* <li><strong>GROUPBY:</strong> Group results by one or more properties</li>
* <li><strong>REDUCE:</strong> Apply reduction functions (COUNT, SUM, AVG, MIN, MAX, etc.)</li>
* <li><strong>SORTBY:</strong> Sort results by specified properties</li>
* <li><strong>APPLY:</strong> Apply mathematical expressions and transformations</li>
* <li><strong>FILTER:</strong> Filter results based on computed values</li>
* <li><strong>LIMIT:</strong> Paginate results efficiently</li>
* <li><strong>WITHCURSOR:</strong> Enable cursor-based pagination for large result sets</li>
* </ul>
*
* <h3>Performance Considerations:</h3>
* <ul>
* <li>Use SORTABLE fields for efficient grouping and sorting operations</li>
* <li>Apply filters early in the pipeline to reduce processing overhead</li>
* <li>Use WITHCURSOR for large result sets to avoid memory issues</li>
* <li>Load only necessary attributes to minimize data transfer</li>
* <li>Consider using LIMIT to restrict result set size</li>
* </ul>
*
* <p>
* <strong>Time complexity:</strong> Non-deterministic, depends on the query and aggregation operations performed. Generally
* linear to the number of results processed through the pipeline.
* </p>
*
* @param index the index name, as a key
* @param query the base filtering query that retrieves documents for aggregation
* @param args the aggregate arguments defining the processing pipeline and operations
* @return the result of the aggregate command containing processed and transformed results, see {@link SearchReply}
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.aggregate/">FT.AGGREGATE</a>
* @see <a href=
* "https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/aggregations/">Aggregations</a>
* @see <a href=
* "https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/aggregations/#cursor-api">Cursor
* API</a>
* @see SearchReply
* @see AggregateArgs
* @see #ftAggregate(String, Object)
* @see #ftCursorread(String, Cursor)
*/
@Experimental
AggregationReply<K, V> ftAggregate(String index, V query, AggregateArgs<K, V> args);
/**
* Read next results from an existing cursor and optionally override the batch size.
*
* <p>
* This command is used to read the next batch of results from a cursor that was created by
* {@link #ftAggregate(String, Object, AggregateArgs)} with the {@code WITHCURSOR} option. Cursors provide an efficient way
* to iterate through large result sets without loading all results into memory at once.
* </p>
*
* <p>
* The {@code count} parameter overrides the {@code COUNT} value specified in the original {@code FT.AGGREGATE} command,
* allowing you to control the batch size for this specific read operation.
* </p>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param index the index name
* @param cursor the cursor obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command
* @param count the number of results to read; overrides the {@code COUNT} from {@code FT.AGGREGATE}
* @return the next batch of results; see {@link AggregationReply}
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.cursor-read/">FT.CURSOR READ</a>
* @see <a href=
* "https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/aggregations/#cursor-api">Cursor
* API</a>
* @see AggregationReply
* @see #ftAggregate(String, Object, AggregateArgs)
*/
@Experimental
AggregationReply<K, V> ftCursorread(String index, Cursor cursor, int count);
/**
* Read next results from an existing cursor using the default batch size.
*
* <p>
* This command is used to read the next batch of results from a cursor created by
* {@link #ftAggregate(String, Object, AggregateArgs)} with the {@code WITHCURSOR} option. This variant uses the default
* batch size that was specified in the original {@code FT.AGGREGATE} command's {@code WITHCURSOR} clause.
* </p>
*
* <p>
* Cursors provide an efficient way to iterate through large result sets without loading all results into memory at once.
* When the cursor is exhausted (no more results), the returned {@link SearchReply} will have a cursor id of 0.
* </p>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param index the index name
* @param cursor the cursor obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command
* @return the next batch of results; see {@link AggregationReply}
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.cursor-read/">FT.CURSOR READ</a>
* @see <a href=
* "https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/aggregations/#cursor-api">Cursor
* API</a>
* @see AggregationReply
* @see #ftAggregate(String, Object, AggregateArgs)
*/
@Experimental
AggregationReply<K, V> ftCursorread(String index, Cursor cursor);
/**
* Delete a cursor and free its associated resources.
*
* <p>
* This command is used to explicitly delete a cursor created by {@link #ftAggregate(String, Object, AggregateArgs)} with
* the {@code WITHCURSOR} option. Deleting a cursor frees up server resources and should be done when you no longer need to
* read more results from the cursor.
* </p>
*
* <p>
* <strong>Important:</strong> Cursors have a default timeout and may be automatically deleted by Redis if not accessed
* within the timeout period. However, it's good practice to explicitly delete cursors when you're finished with them to
* free up resources immediately.
* </p>
*
* <p>
* Once a cursor is deleted, any subsequent attempts to read from it using {@link #ftCursorread(String, Cursor)} or
* {@link #ftCursorread(String, Cursor, int)} will result in an error.
* </p>
*
* <p>
* <strong>Time complexity:</strong> O(1)
* </p>
*
* @param index the index name, as a key
* @param cursor the cursor obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command
* @return {@code "OK"} if the cursor was successfully deleted
* @since 6.8
* @see <a href="https://redis.io/docs/latest/commands/ft.cursor-del/">FT.CURSOR DEL</a>
* @see <a href=
* "https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/aggregations/#cursor-api">Cursor
* API</a>
* @see #ftAggregate(String, Object, AggregateArgs)
* @see #ftCursorread(String, Cursor)
* @see #ftCursorread(String, Cursor, int)
*/
@Experimental
String ftCursordel(String index, Cursor cursor);
}
| RediSearchCommands |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/generator/internal/GeneratedAlwaysGeneration.java | {
"start": 478,
"end": 910
} | class ____ implements OnExecutionGenerator {
public GeneratedAlwaysGeneration() {}
@Override
public EnumSet<EventType> getEventTypes() {
return ALL;
}
@Override
public boolean writePropertyValue() {
return false;
}
@Override
public boolean referenceColumnsInSql(Dialect dialect) {
return false;
}
@Override
public String[] getReferencedColumnValues(Dialect dialect) {
return null;
}
}
| GeneratedAlwaysGeneration |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/StoreNewClusterNodeLabels.java | {
"start": 943,
"end": 1249
} | class ____ extends NodeLabelsStoreEvent {
private List<NodeLabel> labels;
public StoreNewClusterNodeLabels(List<NodeLabel> labels) {
super(NodeLabelsStoreEventType.ADD_LABELS);
this.labels = labels;
}
public List<NodeLabel> getLabels() {
return labels;
}
}
| StoreNewClusterNodeLabels |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/custom/CustomSecretSettingsTests.java | {
"start": 1088,
"end": 5216
} | class ____ extends AbstractBWCWireSerializationTestCase<CustomSecretSettings> {
public static CustomSecretSettings createRandom() {
return new CustomSecretSettings(createRandomSecretParameters());
}
private static Map<String, SecureString> createRandomSecretParameters() {
return randomMap(0, 5, () -> tuple(randomAlphaOfLength(5), new SecureString(randomAlphaOfLength(5).toCharArray())));
}
public void testFromMap() {
Map<String, Object> secretParameters = new HashMap<>(
Map.of(CustomSecretSettings.SECRET_PARAMETERS, new HashMap<>(Map.of("test_key", "test_value")))
);
assertThat(
CustomSecretSettings.fromMap(secretParameters),
is(new CustomSecretSettings(Map.of("test_key", new SecureString("test_value".toCharArray()))))
);
}
public void testFromMap_PassedNull_ReturnsNull() {
assertNull(CustomSecretSettings.fromMap(null));
}
public void testFromMap_RemovesNullValues() {
var mapWithNulls = new HashMap<String, Object>();
mapWithNulls.put("value", "abc");
mapWithNulls.put("null", null);
assertThat(
CustomSecretSettings.fromMap(modifiableMap(Map.of(CustomSecretSettings.SECRET_PARAMETERS, mapWithNulls))),
is(new CustomSecretSettings(Map.of("value", new SecureString("abc".toCharArray()))))
);
}
public void testFromMap_Throws_IfValueIsInvalid() {
var exception = expectThrows(
ValidationException.class,
() -> CustomSecretSettings.fromMap(
modifiableMap(Map.of(CustomSecretSettings.SECRET_PARAMETERS, modifiableMap(Map.of("key", Map.of("another_key", "value")))))
)
);
assertThat(
exception.getMessage(),
is(
"Validation Failed: 1: Map field [secret_parameters] has an entry that is not valid. "
+ "Value type is not one of [String].;"
)
);
}
public void testFromMap_DefaultsToEmptyMap_WhenSecretParametersField_DoesNotExist() {
var map = new HashMap<String, Object>(Map.of("key", new HashMap<>(Map.of("test_key", "test_value"))));
assertThat(CustomSecretSettings.fromMap(map), is(new CustomSecretSettings(Map.of())));
}
public void testXContent() throws IOException {
var entity = new CustomSecretSettings(Map.of("test_key", new SecureString("test_value".toCharArray())));
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
entity.toXContent(builder, null);
String xContentResult = Strings.toString(builder);
var expected = XContentHelper.stripWhitespace("""
{
"secret_parameters": {
"test_key": "test_value"
}
}
""");
assertThat(xContentResult, is(expected));
}
public void testXContent_EmptyParameters() throws IOException {
var entity = new CustomSecretSettings(Map.of());
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
entity.toXContent(builder, null);
String xContentResult = Strings.toString(builder);
var expected = XContentHelper.stripWhitespace("""
{
}
""");
assertThat(xContentResult, is(expected));
}
@Override
protected Writeable.Reader<CustomSecretSettings> instanceReader() {
return CustomSecretSettings::new;
}
@Override
protected CustomSecretSettings createTestInstance() {
return createRandom();
}
@Override
protected CustomSecretSettings mutateInstance(CustomSecretSettings instance) {
return new CustomSecretSettings(
randomValueOtherThan(instance.getSecretParameters(), CustomSecretSettingsTests::createRandomSecretParameters)
);
}
@Override
protected CustomSecretSettings mutateInstanceForVersion(CustomSecretSettings instance, TransportVersion version) {
return instance;
}
}
| CustomSecretSettingsTests |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/beans/factory/xml/XmlBeanFactoryTests.java | {
"start": 50625,
"end": 51085
} | class ____ in bean defs must now not be 'resolved' when the
* bean def is being parsed, 'cos everything on a bean def is now lazy, but
* must rather only be picked up when the bean is instantiated.
*/
@Test
void classNotFoundWithDefaultBeanClassLoader() {
DefaultListableBeanFactory factory = new DefaultListableBeanFactory();
new XmlBeanDefinitionReader(factory).loadBeanDefinitions(CLASS_NOT_FOUND_CONTEXT);
// cool, no errors, so the rubbish | names |
java | apache__camel | components/camel-cxf/camel-cxf-soap/src/main/java/org/apache/camel/component/cxf/jaxws/WSDLServiceFactoryBean.java | {
"start": 1526,
"end": 1761
} | class ____ create a service factory without requiring a service class (SEI). It will pick the
* first one service name and first one port/endpoint name in the WSDL, if there is service name or port/endpoint name
* setted.
*/
public | that |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/CountingBloomFilter.java | {
"start": 3469,
"end": 10755
} | class ____ extends Filter {
/** Storage for the counting buckets */
private long[] buckets;
/** We are using 4bit buckets, so each bucket can count to 15 */
private final static long BUCKET_MAX_VALUE = 15;
/** Default constructor - use with readFields */
public CountingBloomFilter() {}
/**
* Constructor
* @param vectorSize The vector size of <i>this</i> filter.
* @param nbHash The number of hash function to consider.
* @param hashType type of the hashing function (see
* {@link org.apache.hadoop.util.hash.Hash}).
*/
public CountingBloomFilter(int vectorSize, int nbHash, int hashType) {
super(vectorSize, nbHash, hashType);
buckets = new long[buckets2words(vectorSize)];
}
/** returns the number of 64 bit words it would take to hold vectorSize buckets */
private static int buckets2words(int vectorSize) {
return ((vectorSize - 1) >>> 4) + 1;
}
@Override
public void add(Key key) {
if(key == null) {
throw new NullPointerException("key can not be null");
}
int[] h = hash.hash(key);
hash.clear();
for(int i = 0; i < nbHash; i++) {
// find the bucket
int wordNum = h[i] >> 4; // div 16
int bucketShift = (h[i] & 0x0f) << 2; // (mod 16) * 4
long bucketMask = 15L << bucketShift;
long bucketValue = (buckets[wordNum] & bucketMask) >>> bucketShift;
// only increment if the count in the bucket is less than BUCKET_MAX_VALUE
if(bucketValue < BUCKET_MAX_VALUE) {
// increment by 1
buckets[wordNum] = (buckets[wordNum] & ~bucketMask) | ((bucketValue + 1) << bucketShift);
}
}
}
/**
* Removes a specified key from <i>this</i> counting Bloom filter.
* <p>
* <b>Invariant</b>: nothing happens if the specified key does not belong to <i>this</i> counter Bloom filter.
* @param key The key to remove.
*/
public void delete(Key key) {
if(key == null) {
throw new NullPointerException("Key may not be null");
}
if(!membershipTest(key)) {
throw new IllegalArgumentException("Key is not a member");
}
int[] h = hash.hash(key);
hash.clear();
for(int i = 0; i < nbHash; i++) {
// find the bucket
int wordNum = h[i] >> 4; // div 16
int bucketShift = (h[i] & 0x0f) << 2; // (mod 16) * 4
long bucketMask = 15L << bucketShift;
long bucketValue = (buckets[wordNum] & bucketMask) >>> bucketShift;
// only decrement if the count in the bucket is between 0 and BUCKET_MAX_VALUE
if(bucketValue >= 1 && bucketValue < BUCKET_MAX_VALUE) {
// decrement by 1
buckets[wordNum] = (buckets[wordNum] & ~bucketMask) | ((bucketValue - 1) << bucketShift);
}
}
}
@Override
public void and(Filter filter) {
if(filter == null
|| !(filter instanceof CountingBloomFilter)
|| filter.vectorSize != this.vectorSize
|| filter.nbHash != this.nbHash) {
throw new IllegalArgumentException("filters cannot be and-ed");
}
CountingBloomFilter cbf = (CountingBloomFilter)filter;
int sizeInWords = buckets2words(vectorSize);
for(int i = 0; i < sizeInWords; i++) {
this.buckets[i] &= cbf.buckets[i];
}
}
@Override
public boolean membershipTest(Key key) {
if(key == null) {
throw new NullPointerException("Key may not be null");
}
int[] h = hash.hash(key);
hash.clear();
for(int i = 0; i < nbHash; i++) {
// find the bucket
int wordNum = h[i] >> 4; // div 16
int bucketShift = (h[i] & 0x0f) << 2; // (mod 16) * 4
long bucketMask = 15L << bucketShift;
if((buckets[wordNum] & bucketMask) == 0) {
return false;
}
}
return true;
}
/**
* This method calculates an approximate count of the key, i.e. how many
* times the key was added to the filter. This allows the filter to be
* used as an approximate <code>key -> count</code> map.
* <p>NOTE: due to the bucket size of this filter, inserting the same
* key more than 15 times will cause an overflow at all filter positions
* associated with this key, and it will significantly increase the error
* rate for this and other keys. For this reason the filter can only be
* used to store small count values <code>0 <= N << 15</code>.
* @param key key to be tested
* @return 0 if the key is not present. Otherwise, a positive value v will
* be returned such that <code>v == count</code> with probability equal to the
* error rate of this filter, and <code>v > count</code> otherwise.
* Additionally, if the filter experienced an underflow as a result of
* {@link #delete(Key)} operation, the return value may be lower than the
* <code>count</code> with the probability of the false negative rate of such
* filter.
*/
public int approximateCount(Key key) {
int res = Integer.MAX_VALUE;
int[] h = hash.hash(key);
hash.clear();
for (int i = 0; i < nbHash; i++) {
// find the bucket
int wordNum = h[i] >> 4; // div 16
int bucketShift = (h[i] & 0x0f) << 2; // (mod 16) * 4
long bucketMask = 15L << bucketShift;
long bucketValue = (buckets[wordNum] & bucketMask) >>> bucketShift;
if (bucketValue < res) res = (int)bucketValue;
}
if (res != Integer.MAX_VALUE) {
return res;
} else {
return 0;
}
}
@Override
public void not() {
throw new UnsupportedOperationException("not() is undefined for "
+ this.getClass().getName());
}
@Override
public void or(Filter filter) {
if(filter == null
|| !(filter instanceof CountingBloomFilter)
|| filter.vectorSize != this.vectorSize
|| filter.nbHash != this.nbHash) {
throw new IllegalArgumentException("filters cannot be or-ed");
}
CountingBloomFilter cbf = (CountingBloomFilter)filter;
int sizeInWords = buckets2words(vectorSize);
for(int i = 0; i < sizeInWords; i++) {
this.buckets[i] |= cbf.buckets[i];
}
}
@Override
public void xor(Filter filter) {
throw new UnsupportedOperationException("xor() is undefined for "
+ this.getClass().getName());
}
@Override
public String toString() {
StringBuilder res = new StringBuilder();
for(int i = 0; i < vectorSize; i++) {
if(i > 0) {
res.append(" ");
}
int wordNum = i >> 4; // div 16
int bucketShift = (i & 0x0f) << 2; // (mod 16) * 4
long bucketMask = 15L << bucketShift;
long bucketValue = (buckets[wordNum] & bucketMask) >>> bucketShift;
res.append(bucketValue);
}
return res.toString();
}
// Writable
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
int sizeInWords = buckets2words(vectorSize);
for(int i = 0; i < sizeInWords; i++) {
out.writeLong(buckets[i]);
}
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
int sizeInWords = buckets2words(vectorSize);
buckets = new long[sizeInWords];
for(int i = 0; i < sizeInWords; i++) {
buckets[i] = in.readLong();
}
}
} | CountingBloomFilter |
java | google__auto | value/src/main/java/com/google/auto/value/processor/AutoBuilderProcessor.java | {
"start": 6134,
"end": 6381
} | interface ____ {...}
//
// Using AutoAnnotation and AutoBuilder together you'd write
//
// @AutoAnnotation static MyAnnot newAnnotation(...) { ... }
//
// @AutoBuilder(callMethod = "newAnnotation", ofClass = Some.class)
// | MyAnnotBuilder |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/spi/DependencyStrategy.java | {
"start": 1050,
"end": 1252
} | interface ____ {
/**
* A dependency was detected
*
* @param dependency the dependency such as mvn:com.foo/bar/1.2.3
*/
void onDependency(String dependency);
}
| DependencyStrategy |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/SplitterCollateTest.java | {
"start": 1087,
"end": 2318
} | class ____ extends ContextTestSupport {
@Test
public void testSplitterCollate() throws Exception {
getMockEndpoint("mock:line").expectedMessageCount(2);
List<Object> data = new ArrayList<>();
data.add("A");
data.add("B");
data.add("C");
data.add("D");
data.add("E");
template.sendBody("direct:start", data);
assertMockEndpointsSatisfied();
List chunk = getMockEndpoint("mock:line").getReceivedExchanges().get(0).getIn().getBody(List.class);
List chunk2 = getMockEndpoint("mock:line").getReceivedExchanges().get(1).getIn().getBody(List.class);
assertEquals(3, chunk.size());
assertEquals(2, chunk2.size());
assertEquals("A", chunk.get(0));
assertEquals("B", chunk.get(1));
assertEquals("C", chunk.get(2));
assertEquals("D", chunk2.get(0));
assertEquals("E", chunk2.get(1));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").split(simple("${collate(3)}")).to("mock:line");
}
};
}
}
| SplitterCollateTest |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/internal/conversion/LocaleToStringConversion.java | {
"start": 597,
"end": 1186
} | class ____ extends SimpleConversion {
@Override
protected String getToExpression(ConversionContext conversionContext) {
return "<SOURCE>.toLanguageTag()";
}
@Override
protected String getFromExpression(ConversionContext conversionContext) {
return locale( conversionContext ) + ".forLanguageTag( <SOURCE> )";
}
@Override
protected Set<Type> getFromConversionImportTypes(final ConversionContext conversionContext) {
return Collections.asSet( conversionContext.getTypeFactory().getType( Locale.class ) );
}
}
| LocaleToStringConversion |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/serialization/DoubleSerializer.java | {
"start": 854,
"end": 1390
} | class ____ implements Serializer<Double> {
@Override
public byte[] serialize(String topic, Double data) {
if (data == null)
return null;
long bits = Double.doubleToLongBits(data);
return new byte[] {
(byte) (bits >>> 56),
(byte) (bits >>> 48),
(byte) (bits >>> 40),
(byte) (bits >>> 32),
(byte) (bits >>> 24),
(byte) (bits >>> 16),
(byte) (bits >>> 8),
(byte) bits
};
}
} | DoubleSerializer |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/common/operators/SelectorFunctionKeysTest.java | {
"start": 6508,
"end": 6783
} | class ____
implements KeySelector<Tuple3<Long, Pojo1, Integer>, Tuple2<Integer, String>> {
@Override
public Tuple2<Integer, String> getKey(Tuple3<Long, Pojo1, Integer> v) {
return new Tuple2<>(v.f2, v.f1.a);
}
}
}
| KeySelector4 |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/rel/rules/AggregateReduceFunctionsRule.java | {
"start": 3875,
"end": 42782
} | class ____ extends RelRule<AggregateReduceFunctionsRule.Config>
implements TransformationRule {
// ~ Static fields/initializers ---------------------------------------------
private static void validateFunction(SqlKind function) {
if (!isValid(function)) {
throw new IllegalArgumentException(
"AggregateReduceFunctionsRule doesn't " + "support function: " + function.sql);
}
}
private static boolean isValid(SqlKind function) {
return SqlKind.AVG_AGG_FUNCTIONS.contains(function)
|| SqlKind.COVAR_AVG_AGG_FUNCTIONS.contains(function)
|| function == SqlKind.SUM;
}
private final Set<SqlKind> functionsToReduce;
// ~ Constructors -----------------------------------------------------------
/** Creates an AggregateReduceFunctionsRule. */
protected AggregateReduceFunctionsRule(Config config) {
super(config);
this.functionsToReduce = ImmutableSet.copyOf(config.actualFunctionsToReduce());
}
@Deprecated // to be removed before 2.0
public AggregateReduceFunctionsRule(
RelOptRuleOperand operand, RelBuilderFactory relBuilderFactory) {
this(
Config.DEFAULT
.withRelBuilderFactory(relBuilderFactory)
.withOperandSupplier(b -> b.exactly(operand))
.as(Config.class)
// reduce all functions handled by this rule
.withFunctionsToReduce(null));
}
@Deprecated // to be removed before 2.0
public AggregateReduceFunctionsRule(
Class<? extends Aggregate> aggregateClass,
RelBuilderFactory relBuilderFactory,
EnumSet<SqlKind> functionsToReduce) {
this(
Config.DEFAULT
.withRelBuilderFactory(relBuilderFactory)
.as(Config.class)
.withOperandFor(aggregateClass)
// reduce specific functions provided by the client
.withFunctionsToReduce(
Objects.requireNonNull(functionsToReduce, "functionsToReduce")));
}
// ~ Methods ----------------------------------------------------------------
@Override
public boolean matches(RelOptRuleCall call) {
if (!super.matches(call)) {
return false;
}
Aggregate oldAggRel = (Aggregate) call.rels[0];
return containsAvgStddevVarCall(oldAggRel.getAggCallList());
}
@Override
public void onMatch(RelOptRuleCall ruleCall) {
Aggregate oldAggRel = (Aggregate) ruleCall.rels[0];
reduceAggs(ruleCall, oldAggRel);
}
/**
* Returns whether any of the aggregates are calls to AVG, STDDEV_*, VAR_*.
*
* @param aggCallList List of aggregate calls
*/
private boolean containsAvgStddevVarCall(List<AggregateCall> aggCallList) {
return aggCallList.stream().anyMatch(this::canReduce);
}
/** Returns whether this rule can reduce a given aggregate function call. */
public boolean canReduce(AggregateCall call) {
return functionsToReduce.contains(call.getAggregation().getKind())
&& config.extraCondition().test(call);
}
/**
* Returns whether this rule can reduce some agg-call, which its arg exists in the aggregate's
* group.
*/
public boolean canReduceAggCallByGrouping(Aggregate oldAggRel, AggregateCall call) {
if (!Aggregate.isSimple(oldAggRel)) {
return false;
}
if (call.hasFilter()
|| call.distinctKeys != null
|| call.collation != RelCollations.EMPTY) {
return false;
}
final List<Integer> argList = call.getArgList();
if (argList.size() != 1) {
return false;
}
if (!oldAggRel.getGroupSet().asSet().contains(argList.get(0))) {
// arg doesn't exist in aggregate's group.
return false;
}
final SqlKind kind = call.getAggregation().getKind();
switch (kind) {
case AVG:
case MAX:
case MIN:
case ANY_VALUE:
case FIRST_VALUE:
case LAST_VALUE:
return true;
default:
return false;
}
}
/**
* Reduces calls to functions AVG, SUM, STDDEV_POP, STDDEV_SAMP, VAR_POP, VAR_SAMP, COVAR_POP,
* COVAR_SAMP, REGR_SXX, REGR_SYY if the function is present in {@link
* AggregateReduceFunctionsRule#functionsToReduce}
*
* <p>It handles newly generated common subexpressions since this was done at the sql2rel stage.
*/
private void reduceAggs(RelOptRuleCall ruleCall, Aggregate oldAggRel) {
RexBuilder rexBuilder = oldAggRel.getCluster().getRexBuilder();
List<AggregateCall> oldCalls = oldAggRel.getAggCallList();
final int groupCount = oldAggRel.getGroupCount();
final List<AggregateCall> newCalls = new ArrayList<>();
final Map<AggregateCall, RexNode> aggCallMapping = new HashMap<>();
final List<RexNode> projList = new ArrayList<>();
// pass through group key
for (int i = 0; i < groupCount; ++i) {
projList.add(rexBuilder.makeInputRef(oldAggRel, i));
}
// List of input expressions. If a particular aggregate needs more, it
// will add an expression to the end, and we will create an extra
// project.
final RelBuilder relBuilder = ruleCall.builder();
relBuilder.push(oldAggRel.getInput());
final List<RexNode> inputExprs = new ArrayList<>(relBuilder.fields());
// create new aggregate function calls and rest of project list together
for (AggregateCall oldCall : oldCalls) {
projList.add(reduceAgg(oldAggRel, oldCall, newCalls, aggCallMapping, inputExprs));
}
final int extraArgCount =
inputExprs.size() - relBuilder.peek().getRowType().getFieldCount();
if (extraArgCount > 0) {
relBuilder.project(
inputExprs,
CompositeList.of(
relBuilder.peek().getRowType().getFieldNames(),
Collections.nCopies(extraArgCount, null)));
}
newAggregateRel(relBuilder, oldAggRel, newCalls);
newCalcRel(relBuilder, oldAggRel.getRowType(), projList);
final RelNode build = relBuilder.build();
ruleCall.transformTo(build);
}
private RexNode reduceAgg(
Aggregate oldAggRel,
AggregateCall oldCall,
List<AggregateCall> newCalls,
Map<AggregateCall, RexNode> aggCallMapping,
List<RexNode> inputExprs) {
if (canReduceAggCallByGrouping(oldAggRel, oldCall)) {
// replace original MAX/MIN/AVG/ANY_VALUE/FIRST_VALUE/LAST_VALUE(x) with
// target field of x, when x exists in group
final RexNode reducedNode = reduceAggCallByGrouping(oldAggRel, oldCall);
return reducedNode;
} else if (canReduce(oldCall)) {
final Integer y;
final Integer x;
final SqlKind kind = oldCall.getAggregation().getKind();
switch (kind) {
case SUM:
// replace original SUM(x) with
// case COUNT(x) when 0 then null else SUM0(x) end
return reduceSum(oldAggRel, oldCall, newCalls, aggCallMapping);
case AVG:
// replace original AVG(x) with SUM(x) / COUNT(x)
return reduceAvg(oldAggRel, oldCall, newCalls, aggCallMapping, inputExprs);
case COVAR_POP:
// replace original COVAR_POP(x, y) with
// (SUM(x * y) - SUM(y) * SUM(y) / COUNT(x))
// / COUNT(x))
return reduceCovariance(
oldAggRel, oldCall, true, newCalls, aggCallMapping, inputExprs);
case COVAR_SAMP:
// replace original COVAR_SAMP(x, y) with
// SQRT(
// (SUM(x * y) - SUM(x) * SUM(y) / COUNT(x))
// / CASE COUNT(x) WHEN 1 THEN NULL ELSE COUNT(x) - 1 END)
return reduceCovariance(
oldAggRel, oldCall, false, newCalls, aggCallMapping, inputExprs);
case REGR_SXX:
// replace original REGR_SXX(x, y) with
// REGR_COUNT(x, y) * VAR_POP(y)
assert oldCall.getArgList().size() == 2 : oldCall.getArgList();
x = oldCall.getArgList().get(0);
y = oldCall.getArgList().get(1);
//noinspection SuspiciousNameCombination
return reduceRegrSzz(
oldAggRel, oldCall, newCalls, aggCallMapping, inputExprs, y, y, x);
case REGR_SYY:
// replace original REGR_SYY(x, y) with
// REGR_COUNT(x, y) * VAR_POP(x)
assert oldCall.getArgList().size() == 2 : oldCall.getArgList();
x = oldCall.getArgList().get(0);
y = oldCall.getArgList().get(1);
//noinspection SuspiciousNameCombination
return reduceRegrSzz(
oldAggRel, oldCall, newCalls, aggCallMapping, inputExprs, x, x, y);
case STDDEV_POP:
// replace original STDDEV_POP(x) with
// SQRT(
// (SUM(x * x) - SUM(x) * SUM(x) / COUNT(x))
// / COUNT(x))
return reduceStddev(
oldAggRel, oldCall, true, true, newCalls, aggCallMapping, inputExprs);
case STDDEV_SAMP:
// replace original STDDEV_POP(x) with
// SQRT(
// (SUM(x * x) - SUM(x) * SUM(x) / COUNT(x))
// / CASE COUNT(x) WHEN 1 THEN NULL ELSE COUNT(x) - 1 END)
return reduceStddev(
oldAggRel, oldCall, false, true, newCalls, aggCallMapping, inputExprs);
case VAR_POP:
// replace original VAR_POP(x) with
// (SUM(x * x) - SUM(x) * SUM(x) / COUNT(x))
// / COUNT(x)
return reduceStddev(
oldAggRel, oldCall, true, false, newCalls, aggCallMapping, inputExprs);
case VAR_SAMP:
// replace original VAR_POP(x) with
// (SUM(x * x) - SUM(x) * SUM(x) / COUNT(x))
// / CASE COUNT(x) WHEN 1 THEN NULL ELSE COUNT(x) - 1 END
return reduceStddev(
oldAggRel, oldCall, false, false, newCalls, aggCallMapping, inputExprs);
default:
throw Util.unexpected(kind);
}
} else {
// anything else: preserve original call
RexBuilder rexBuilder = oldAggRel.getCluster().getRexBuilder();
final int nGroups = oldAggRel.getGroupCount();
return rexBuilder.addAggCall(
oldCall,
nGroups,
newCalls,
aggCallMapping,
oldAggRel.getInput()::fieldIsNullable);
}
}
private static AggregateCall createAggregateCallWithBinding(
RelDataTypeFactory typeFactory,
SqlAggFunction aggFunction,
RelDataType operandType,
Aggregate oldAggRel,
AggregateCall oldCall,
int argOrdinal,
int filter) {
final Aggregate.AggCallBinding binding =
new Aggregate.AggCallBinding(
typeFactory,
aggFunction,
ImmutableList.of(),
ImmutableList.of(operandType),
oldAggRel.getGroupCount(),
filter >= 0);
return AggregateCall.create(
aggFunction,
oldCall.isDistinct(),
oldCall.isApproximate(),
oldCall.ignoreNulls(),
oldCall.rexList,
ImmutableIntList.of(argOrdinal),
filter,
oldCall.distinctKeys,
oldCall.collation,
aggFunction.inferReturnType(binding),
null);
}
private static RexNode reduceAvg(
Aggregate oldAggRel,
AggregateCall oldCall,
List<AggregateCall> newCalls,
Map<AggregateCall, RexNode> aggCallMapping,
@SuppressWarnings("unused") List<RexNode> inputExprs) {
final int nGroups = oldAggRel.getGroupCount();
final RexBuilder rexBuilder = oldAggRel.getCluster().getRexBuilder();
final AggregateCall sumCall =
AggregateCall.create(
SqlStdOperatorTable.SUM,
oldCall.isDistinct(),
oldCall.isApproximate(),
oldCall.ignoreNulls(),
oldCall.rexList,
oldCall.getArgList(),
oldCall.filterArg,
oldCall.distinctKeys,
oldCall.collation,
oldAggRel.getGroupCount(),
oldAggRel.getInput(),
null,
null);
final AggregateCall countCall =
AggregateCall.create(
SqlStdOperatorTable.COUNT,
oldCall.isDistinct(),
oldCall.isApproximate(),
oldCall.ignoreNulls(),
oldCall.rexList,
oldCall.getArgList(),
oldCall.filterArg,
oldCall.distinctKeys,
oldCall.collation,
oldAggRel.getGroupCount(),
oldAggRel.getInput(),
null,
null);
// NOTE: these references are with respect to the output
// of newAggRel
RexNode numeratorRef =
rexBuilder.addAggCall(
sumCall,
nGroups,
newCalls,
aggCallMapping,
oldAggRel.getInput()::fieldIsNullable);
final RexNode denominatorRef =
rexBuilder.addAggCall(
countCall,
nGroups,
newCalls,
aggCallMapping,
oldAggRel.getInput()::fieldIsNullable);
final RelDataTypeFactory typeFactory = oldAggRel.getCluster().getTypeFactory();
final RelDataType avgType =
typeFactory.createTypeWithNullability(
oldCall.getType(), numeratorRef.getType().isNullable());
numeratorRef = rexBuilder.ensureType(avgType, numeratorRef, true);
final RexNode divideRef =
rexBuilder.makeCall(SqlStdOperatorTable.DIVIDE, numeratorRef, denominatorRef);
return rexBuilder.makeCast(oldCall.getType(), divideRef);
}
private static RexNode reduceSum(
Aggregate oldAggRel,
AggregateCall oldCall,
List<AggregateCall> newCalls,
Map<AggregateCall, RexNode> aggCallMapping) {
final int nGroups = oldAggRel.getGroupCount();
RexBuilder rexBuilder = oldAggRel.getCluster().getRexBuilder();
final AggregateCall sumZeroCall =
AggregateCall.create(
SqlStdOperatorTable.SUM0,
oldCall.isDistinct(),
oldCall.isApproximate(),
oldCall.ignoreNulls(),
oldCall.rexList,
oldCall.getArgList(),
oldCall.filterArg,
oldCall.distinctKeys,
oldCall.collation,
oldAggRel.getGroupCount(),
oldAggRel.getInput(),
null,
oldCall.name);
final AggregateCall countCall =
AggregateCall.create(
SqlStdOperatorTable.COUNT,
oldCall.isDistinct(),
oldCall.isApproximate(),
oldCall.ignoreNulls(),
oldCall.rexList,
oldCall.getArgList(),
oldCall.filterArg,
oldCall.distinctKeys,
oldCall.collation,
oldAggRel.getGroupCount(),
oldAggRel,
null,
null);
// NOTE: these references are with respect to the output
// of newAggRel
RexNode sumZeroRef =
rexBuilder.addAggCall(
sumZeroCall,
nGroups,
newCalls,
aggCallMapping,
oldAggRel.getInput()::fieldIsNullable);
if (!oldCall.getType().isNullable()) {
// If SUM(x) is not nullable, the validator must have determined that
// nulls are impossible (because the group is never empty and x is never
// null). Therefore we translate to SUM0(x).
return sumZeroRef;
}
RexNode countRef =
rexBuilder.addAggCall(
countCall,
nGroups,
newCalls,
aggCallMapping,
oldAggRel.getInput()::fieldIsNullable);
return rexBuilder.makeCall(
SqlStdOperatorTable.CASE,
rexBuilder.makeCall(
SqlStdOperatorTable.EQUALS,
countRef,
rexBuilder.makeExactLiteral(BigDecimal.ZERO)),
rexBuilder.makeNullLiteral(sumZeroRef.getType()),
sumZeroRef);
}
private static RexNode reduceStddev(
Aggregate oldAggRel,
AggregateCall oldCall,
boolean biased,
boolean sqrt,
List<AggregateCall> newCalls,
Map<AggregateCall, RexNode> aggCallMapping,
List<RexNode> inputExprs) {
// stddev_pop(x) ==>
// power(
// (sum(x * x) - sum(x) * sum(x) / count(x))
// / count(x),
// .5)
//
// stddev_samp(x) ==>
// power(
// (sum(x * x) - sum(x) * sum(x) / count(x))
// / nullif(count(x) - 1, 0),
// .5)
final int nGroups = oldAggRel.getGroupCount();
final RelOptCluster cluster = oldAggRel.getCluster();
final RexBuilder rexBuilder = cluster.getRexBuilder();
final RelDataTypeFactory typeFactory = cluster.getTypeFactory();
assert oldCall.getArgList().size() == 1 : oldCall.getArgList();
final int argOrdinal = oldCall.getArgList().get(0);
final IntPredicate fieldIsNullable = oldAggRel.getInput()::fieldIsNullable;
final RelDataType oldCallType =
typeFactory.createTypeWithNullability(
oldCall.getType(), fieldIsNullable.test(argOrdinal));
final RexNode argRef = rexBuilder.ensureType(oldCallType, inputExprs.get(argOrdinal), true);
final RexNode argSquared =
rexBuilder.makeCall(SqlStdOperatorTable.MULTIPLY, argRef, argRef);
final int argSquaredOrdinal = lookupOrAdd(inputExprs, argSquared);
// FLINK MODIFICATION BEGIN
final AggregateCall sumArgSquaredAggCall =
createAggregateCallWithBinding(
typeFactory,
SqlStdOperatorTable.SUM,
argSquared.getType(),
oldAggRel,
oldCall,
argSquaredOrdinal,
oldCall.filterArg);
// FLINK MODIFICATION END
final RexNode sumArgSquared =
rexBuilder.addAggCall(
sumArgSquaredAggCall,
nGroups,
newCalls,
aggCallMapping,
oldAggRel.getInput()::fieldIsNullable);
final AggregateCall sumArgAggCall =
AggregateCall.create(
SqlStdOperatorTable.SUM,
oldCall.isDistinct(),
oldCall.isApproximate(),
oldCall.ignoreNulls(),
oldCall.rexList,
ImmutableIntList.of(argOrdinal),
oldCall.filterArg,
oldCall.distinctKeys,
oldCall.collation,
oldAggRel.getGroupCount(),
oldAggRel.getInput(),
null,
null);
final RexNode sumArg =
rexBuilder.addAggCall(
sumArgAggCall,
nGroups,
newCalls,
aggCallMapping,
oldAggRel.getInput()::fieldIsNullable);
final RexNode sumArgCast = rexBuilder.ensureType(oldCallType, sumArg, true);
final RexNode sumSquaredArg =
rexBuilder.makeCall(SqlStdOperatorTable.MULTIPLY, sumArgCast, sumArgCast);
final AggregateCall countArgAggCall =
AggregateCall.create(
SqlStdOperatorTable.COUNT,
oldCall.isDistinct(),
oldCall.isApproximate(),
oldCall.ignoreNulls(),
oldCall.rexList,
oldCall.getArgList(),
oldCall.filterArg,
oldCall.distinctKeys,
oldCall.collation,
oldAggRel.getGroupCount(),
oldAggRel,
null,
null);
final RexNode countArg =
rexBuilder.addAggCall(
countArgAggCall,
nGroups,
newCalls,
aggCallMapping,
oldAggRel.getInput()::fieldIsNullable);
final RexNode div = divide(biased, rexBuilder, sumArgSquared, sumSquaredArg, countArg);
final RexNode result;
if (sqrt) {
final RexNode half = rexBuilder.makeExactLiteral(new BigDecimal("0.5"));
result = rexBuilder.makeCall(SqlStdOperatorTable.POWER, div, half);
} else {
result = div;
}
return rexBuilder.makeCast(oldCall.getType(), result);
}
private static RexNode reduceAggCallByGrouping(Aggregate oldAggRel, AggregateCall oldCall) {
final RexBuilder rexBuilder = oldAggRel.getCluster().getRexBuilder();
final List<Integer> oldGroups = oldAggRel.getGroupSet().asList();
final Integer firstArg = oldCall.getArgList().get(0);
final int index = oldGroups.lastIndexOf(firstArg);
assert index >= 0;
final RexInputRef refByGroup = RexInputRef.of(index, oldAggRel.getRowType().getFieldList());
if (refByGroup.getType().equals(oldCall.getType())) {
return refByGroup;
} else {
return rexBuilder.makeCast(oldCall.getType(), refByGroup);
}
}
private static RexNode getSumAggregatedRexNode(
Aggregate oldAggRel,
AggregateCall oldCall,
List<AggregateCall> newCalls,
Map<AggregateCall, RexNode> aggCallMapping,
RexBuilder rexBuilder,
int argOrdinal,
int filterArg) {
final AggregateCall aggregateCall =
AggregateCall.create(
SqlStdOperatorTable.SUM,
oldCall.isDistinct(),
oldCall.isApproximate(),
oldCall.ignoreNulls(),
oldCall.rexList,
ImmutableIntList.of(argOrdinal),
filterArg,
oldCall.distinctKeys,
oldCall.collation,
oldAggRel.getGroupCount(),
oldAggRel.getInput(),
null,
null);
return rexBuilder.addAggCall(
aggregateCall,
oldAggRel.getGroupCount(),
newCalls,
aggCallMapping,
oldAggRel.getInput()::fieldIsNullable);
}
private static RexNode getSumAggregatedRexNodeWithBinding(
Aggregate oldAggRel,
AggregateCall oldCall,
List<AggregateCall> newCalls,
Map<AggregateCall, RexNode> aggCallMapping,
RelDataType operandType,
int argOrdinal,
int filter) {
RelOptCluster cluster = oldAggRel.getCluster();
final AggregateCall sumArgSquaredAggCall =
createAggregateCallWithBinding(
cluster.getTypeFactory(),
SqlStdOperatorTable.SUM,
operandType,
oldAggRel,
oldCall,
argOrdinal,
filter);
return cluster.getRexBuilder()
.addAggCall(
sumArgSquaredAggCall,
oldAggRel.getGroupCount(),
newCalls,
aggCallMapping,
oldAggRel.getInput()::fieldIsNullable);
}
private static RexNode getRegrCountRexNode(
Aggregate oldAggRel,
AggregateCall oldCall,
List<AggregateCall> newCalls,
Map<AggregateCall, RexNode> aggCallMapping,
ImmutableIntList argOrdinals,
int filterArg) {
final AggregateCall countArgAggCall =
AggregateCall.create(
SqlStdOperatorTable.REGR_COUNT,
oldCall.isDistinct(),
oldCall.isApproximate(),
oldCall.ignoreNulls(),
oldCall.rexList,
argOrdinals,
filterArg,
oldCall.distinctKeys,
oldCall.collation,
oldAggRel.getGroupCount(),
oldAggRel,
null,
null);
return oldAggRel
.getCluster()
.getRexBuilder()
.addAggCall(
countArgAggCall,
oldAggRel.getGroupCount(),
newCalls,
aggCallMapping,
oldAggRel.getInput()::fieldIsNullable);
}
private static RexNode reduceRegrSzz(
Aggregate oldAggRel,
AggregateCall oldCall,
List<AggregateCall> newCalls,
Map<AggregateCall, RexNode> aggCallMapping,
List<RexNode> inputExprs,
int xIndex,
int yIndex,
int nullFilterIndex) {
// regr_sxx(x, y) ==>
// sum(y * y, x) - sum(y, x) * sum(y, x) / regr_count(x, y)
//
final RelOptCluster cluster = oldAggRel.getCluster();
final RexBuilder rexBuilder = cluster.getRexBuilder();
final RelDataTypeFactory typeFactory = cluster.getTypeFactory();
final IntPredicate fieldIsNullable = oldAggRel.getInput()::fieldIsNullable;
final RelDataType oldCallType =
typeFactory.createTypeWithNullability(
oldCall.getType(),
fieldIsNullable.test(xIndex)
|| fieldIsNullable.test(yIndex)
|| fieldIsNullable.test(nullFilterIndex));
final RexNode argX = rexBuilder.ensureType(oldCallType, inputExprs.get(xIndex), true);
final RexNode argY = rexBuilder.ensureType(oldCallType, inputExprs.get(yIndex), true);
final RexNode argNullFilter =
rexBuilder.ensureType(oldCallType, inputExprs.get(nullFilterIndex), true);
final RexNode argXArgY = rexBuilder.makeCall(SqlStdOperatorTable.MULTIPLY, argX, argY);
final int argSquaredOrdinal = lookupOrAdd(inputExprs, argXArgY);
final RexNode argXAndYNotNullFilter =
rexBuilder.makeCall(
SqlStdOperatorTable.AND,
rexBuilder.makeCall(
SqlStdOperatorTable.AND,
rexBuilder.makeCall(SqlStdOperatorTable.IS_NOT_NULL, argX),
rexBuilder.makeCall(SqlStdOperatorTable.IS_NOT_NULL, argY)),
rexBuilder.makeCall(SqlStdOperatorTable.IS_NOT_NULL, argNullFilter));
final int argXAndYNotNullFilterOrdinal = lookupOrAdd(inputExprs, argXAndYNotNullFilter);
final RexNode sumXY =
getSumAggregatedRexNodeWithBinding(
oldAggRel,
oldCall,
newCalls,
aggCallMapping,
argXArgY.getType(),
argSquaredOrdinal,
argXAndYNotNullFilterOrdinal);
final RexNode sumXYCast = rexBuilder.ensureType(oldCallType, sumXY, true);
final RexNode sumX =
getSumAggregatedRexNode(
oldAggRel,
oldCall,
newCalls,
aggCallMapping,
rexBuilder,
xIndex,
argXAndYNotNullFilterOrdinal);
final RexNode sumY =
xIndex == yIndex
? sumX
: getSumAggregatedRexNode(
oldAggRel,
oldCall,
newCalls,
aggCallMapping,
rexBuilder,
yIndex,
argXAndYNotNullFilterOrdinal);
final RexNode sumXSumY = rexBuilder.makeCall(SqlStdOperatorTable.MULTIPLY, sumX, sumY);
final RexNode countArg =
getRegrCountRexNode(
oldAggRel,
oldCall,
newCalls,
aggCallMapping,
ImmutableIntList.of(xIndex),
argXAndYNotNullFilterOrdinal);
RexLiteral zero = rexBuilder.makeExactLiteral(BigDecimal.ZERO);
RexNode nul = rexBuilder.makeNullLiteral(zero.getType());
final RexNode avgSumXSumY =
rexBuilder.makeCall(
SqlStdOperatorTable.CASE,
rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, countArg, zero),
nul,
rexBuilder.makeCall(SqlStdOperatorTable.DIVIDE, sumXSumY, countArg));
final RexNode avgSumXSumYCast = rexBuilder.ensureType(oldCallType, avgSumXSumY, true);
final RexNode result =
rexBuilder.makeCall(SqlStdOperatorTable.MINUS, sumXYCast, avgSumXSumYCast);
return rexBuilder.makeCast(oldCall.getType(), result);
}
private static RexNode reduceCovariance(
Aggregate oldAggRel,
AggregateCall oldCall,
boolean biased,
List<AggregateCall> newCalls,
Map<AggregateCall, RexNode> aggCallMapping,
List<RexNode> inputExprs) {
// covar_pop(x, y) ==>
// (sum(x * y) - sum(x) * sum(y) / regr_count(x, y))
// / regr_count(x, y)
//
// covar_samp(x, y) ==>
// (sum(x * y) - sum(x) * sum(y) / regr_count(x, y))
// / regr_count(count(x, y) - 1, 0)
final RelOptCluster cluster = oldAggRel.getCluster();
final RexBuilder rexBuilder = cluster.getRexBuilder();
final RelDataTypeFactory typeFactory = cluster.getTypeFactory();
assert oldCall.getArgList().size() == 2 : oldCall.getArgList();
final int argXOrdinal = oldCall.getArgList().get(0);
final int argYOrdinal = oldCall.getArgList().get(1);
final IntPredicate fieldIsNullable = oldAggRel.getInput()::fieldIsNullable;
final RelDataType oldCallType =
typeFactory.createTypeWithNullability(
oldCall.getType(),
fieldIsNullable.test(argXOrdinal) || fieldIsNullable.test(argYOrdinal));
final RexNode argX = rexBuilder.ensureType(oldCallType, inputExprs.get(argXOrdinal), true);
final RexNode argY = rexBuilder.ensureType(oldCallType, inputExprs.get(argYOrdinal), true);
final RexNode argXAndYNotNullFilter =
rexBuilder.makeCall(
SqlStdOperatorTable.AND,
rexBuilder.makeCall(SqlStdOperatorTable.IS_NOT_NULL, argX),
rexBuilder.makeCall(SqlStdOperatorTable.IS_NOT_NULL, argY));
final int argXAndYNotNullFilterOrdinal = lookupOrAdd(inputExprs, argXAndYNotNullFilter);
final RexNode argXY = rexBuilder.makeCall(SqlStdOperatorTable.MULTIPLY, argX, argY);
final int argXYOrdinal = lookupOrAdd(inputExprs, argXY);
final RexNode sumXY =
getSumAggregatedRexNodeWithBinding(
oldAggRel,
oldCall,
newCalls,
aggCallMapping,
argXY.getType(),
argXYOrdinal,
argXAndYNotNullFilterOrdinal);
final RexNode sumX =
getSumAggregatedRexNode(
oldAggRel,
oldCall,
newCalls,
aggCallMapping,
rexBuilder,
argXOrdinal,
argXAndYNotNullFilterOrdinal);
final RexNode sumY =
getSumAggregatedRexNode(
oldAggRel,
oldCall,
newCalls,
aggCallMapping,
rexBuilder,
argYOrdinal,
argXAndYNotNullFilterOrdinal);
final RexNode sumXSumY = rexBuilder.makeCall(SqlStdOperatorTable.MULTIPLY, sumX, sumY);
final RexNode countArg =
getRegrCountRexNode(
oldAggRel,
oldCall,
newCalls,
aggCallMapping,
ImmutableIntList.of(argXOrdinal, argYOrdinal),
argXAndYNotNullFilterOrdinal);
final RexNode result = divide(biased, rexBuilder, sumXY, sumXSumY, countArg);
return rexBuilder.makeCast(oldCall.getType(), result);
}
private static RexNode divide(
boolean biased,
RexBuilder rexBuilder,
RexNode sumXY,
RexNode sumXSumY,
RexNode countArg) {
final RexNode avgSumSquaredArg =
rexBuilder.makeCall(SqlStdOperatorTable.DIVIDE, sumXSumY, countArg);
final RexNode diff =
rexBuilder.makeCall(SqlStdOperatorTable.MINUS, sumXY, avgSumSquaredArg);
final RexNode denominator;
if (biased) {
denominator = countArg;
} else {
final RexLiteral one = rexBuilder.makeExactLiteral(BigDecimal.ONE);
final RexNode nul = rexBuilder.makeNullLiteral(countArg.getType());
final RexNode countMinusOne =
rexBuilder.makeCall(SqlStdOperatorTable.MINUS, countArg, one);
final RexNode countEqOne =
rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, countArg, one);
denominator =
rexBuilder.makeCall(SqlStdOperatorTable.CASE, countEqOne, nul, countMinusOne);
}
return rexBuilder.makeCall(SqlStdOperatorTable.DIVIDE, diff, denominator);
}
/**
* Finds the ordinal of an element in a list, or adds it.
*
* @param list List
* @param element Element to lookup or add
* @param <T> Element type
* @return Ordinal of element in list
*/
private static <T> int lookupOrAdd(List<T> list, T element) {
int ordinal = list.indexOf(element);
if (ordinal == -1) {
ordinal = list.size();
list.add(element);
}
return ordinal;
}
/**
* Does a shallow clone of oldAggRel and updates aggCalls. Could be refactored into Aggregate
* and subclasses - but it's only needed for some subclasses.
*
* @param relBuilder Builder of relational expressions; at the top of its stack is its input
* @param oldAggregate LogicalAggregate to clone.
* @param newCalls New list of AggregateCalls
*/
protected void newAggregateRel(
RelBuilder relBuilder, Aggregate oldAggregate, List<AggregateCall> newCalls) {
relBuilder.aggregate(
relBuilder.groupKey(oldAggregate.getGroupSet(), oldAggregate.getGroupSets()),
newCalls);
}
/**
* Adds a calculation with the expressions to compute the original aggregate calls from the
* decomposed ones.
*
* @param relBuilder Builder of relational expressions; at the top of its stack is its input
* @param rowType The output row type of the original aggregate.
* @param exprs The expressions to compute the original aggregate calls
*/
protected void newCalcRel(RelBuilder relBuilder, RelDataType rowType, List<RexNode> exprs) {
relBuilder.project(exprs, rowType.getFieldNames());
}
/** Rule configuration. */
@Value.Immutable
public | AggregateReduceFunctionsRule |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/ser/BasicSerializerFactory.java | {
"start": 21333,
"end": 26525
} | class ____.
*/
ValueSerializer<Object> keySerializer = _findKeySerializer(ctxt, beanDescRef.getClassInfo());
if (mlt instanceof MapType mapType) {
return buildMapSerializer(ctxt, mapType,
beanDescRef, formatOverrides, staticTyping,
keySerializer, elementTypeSerializer, elementValueSerializer);
}
// With Map-like, just 2 options: (1) Custom, (2) Annotations
ValueSerializer<?> ser = null;
MapLikeType mlType = (MapLikeType) type;
for (Serializers serializers : customSerializers()) { // (1) Custom
ser = serializers.findMapLikeSerializer(config, mlType,
beanDescRef, formatOverrides,
keySerializer, elementTypeSerializer, elementValueSerializer);
if (ser != null) {
break;
}
}
if (ser == null) { // (2) Annotations-based ones:
ser = findSerializerByAnnotations(ctxt, type, beanDescRef);
}
if (ser != null) {
if (_factoryConfig.hasSerializerModifiers()) {
for (ValueSerializerModifier mod : _factoryConfig.serializerModifiers()) {
ser = mod.modifyMapLikeSerializer(config, mlType, beanDescRef, ser);
}
}
}
return ser;
}
if (type.isCollectionLikeType()) {
CollectionLikeType clt = (CollectionLikeType) type;
if (clt instanceof CollectionType collectionType) {
return buildCollectionSerializer(ctxt, collectionType,
beanDescRef, formatOverrides, staticTyping,
elementTypeSerializer, elementValueSerializer);
}
// With Collection-like, just 2 options: (1) Custom, (2) Annotations
ValueSerializer<?> ser = null;
CollectionLikeType clType = (CollectionLikeType) type;
for (Serializers serializers : customSerializers()) { // (1) Custom
ser = serializers.findCollectionLikeSerializer(config, clType,
beanDescRef, formatOverrides,
elementTypeSerializer, elementValueSerializer);
if (ser != null) {
break;
}
}
if (ser == null) { // (2) Annotations-based ones:
ser = findSerializerByAnnotations(ctxt, type, beanDescRef);
}
if (ser != null) {
if (_factoryConfig.hasSerializerModifiers()) {
for (ValueSerializerModifier mod : _factoryConfig.serializerModifiers()) {
ser = mod.modifyCollectionLikeSerializer(config, clType, beanDescRef, ser);
}
}
}
return ser;
}
if (type.isArrayType()) {
return buildArraySerializer(ctxt, (ArrayType) type,
beanDescRef, formatOverrides, staticTyping,
elementTypeSerializer, elementValueSerializer);
}
return null;
}
/**
* Helper method that handles configuration details when constructing serializers for
* {@link java.util.List} types that support efficient by-index access
*/
protected ValueSerializer<?> buildCollectionSerializer(SerializationContext ctxt,
CollectionType type, BeanDescription.Supplier beanDescRef, JsonFormat.Value formatOverrides,
boolean staticTyping,
TypeSerializer elementTypeSerializer, ValueSerializer<Object> elementValueSerializer)
{
SerializationConfig config = ctxt.getConfig();
ValueSerializer<?> ser = null;
// Order of lookups:
// 1. Custom serializers
// 2. Annotations (@JsonValue, @JsonDeserialize)
// 3. Defaults
for (Serializers serializers : customSerializers()) { // (1) Custom
ser = serializers.findCollectionSerializer(config, type, beanDescRef, formatOverrides,
elementTypeSerializer, elementValueSerializer);
if (ser != null) {
break;
}
}
if (ser == null) {
ser = findSerializerByAnnotations(ctxt, type, beanDescRef); // (2) Annotations
if (ser == null) {
JsonFormat.Value format = _calculateEffectiveFormat(ctxt,
beanDescRef, Collection.class, formatOverrides);
// We may also want to use serialize Collections "as beans", if (and only if)
// shape specified as "POJO"
if (format.getShape() == JsonFormat.Shape.POJO) {
return null;
}
if (type.isTypeOrSubTypeOf(EnumSet.class)) {
// this may or may not be available (Class doesn't; type of field/method does)
JavaType enumType = type.getContentType();
// and even if nominally there is something, only use if it really is | annotations |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java | {
"start": 2921,
"end": 6956
} | class ____ implements Writable, Configurable {
private String methodName;
private Class<?>[] parameterClasses;
private Object[] parameters;
private Configuration conf;
private long clientVersion;
private int clientMethodsHash;
private String declaringClassProtocolName;
//This could be different from static writableRpcVersion when received
//at server, if client is using a different version.
private long rpcVersion;
@SuppressWarnings("unused") // called when deserializing an invocation
public Invocation() {}
public Invocation(Method method, Object[] parameters) {
this.methodName = method.getName();
this.parameterClasses = method.getParameterTypes();
this.parameters = parameters;
rpcVersion = writableRpcVersion;
if (method.getDeclaringClass().equals(VersionedProtocol.class)) {
//VersionedProtocol is exempted from version check.
clientVersion = 0;
clientMethodsHash = 0;
} else {
this.clientVersion = RPC.getProtocolVersion(method.getDeclaringClass());
this.clientMethodsHash = ProtocolSignature.getFingerprint(method
.getDeclaringClass().getMethods());
}
this.declaringClassProtocolName =
RPC.getProtocolName(method.getDeclaringClass());
}
/** The name of the method invoked. */
public String getMethodName() { return methodName; }
/** The parameter classes. */
public Class<?>[] getParameterClasses() { return parameterClasses; }
/** The parameter instances. */
public Object[] getParameters() { return parameters; }
private long getProtocolVersion() {
return clientVersion;
}
@SuppressWarnings("unused")
private int getClientMethodsHash() {
return clientMethodsHash;
}
/**
* Returns the rpc version used by the client.
* @return rpcVersion
*/
public long getRpcVersion() {
return rpcVersion;
}
@Override
@SuppressWarnings("deprecation")
public void readFields(DataInput in) throws IOException {
rpcVersion = in.readLong();
declaringClassProtocolName = UTF8.readString(in);
methodName = UTF8.readString(in);
clientVersion = in.readLong();
clientMethodsHash = in.readInt();
parameters = new Object[in.readInt()];
parameterClasses = new Class[parameters.length];
ObjectWritable objectWritable = new ObjectWritable();
for (int i = 0; i < parameters.length; i++) {
parameters[i] =
ObjectWritable.readObject(in, objectWritable, this.conf);
parameterClasses[i] = objectWritable.getDeclaredClass();
}
}
@Override
@SuppressWarnings("deprecation")
public void write(DataOutput out) throws IOException {
out.writeLong(rpcVersion);
UTF8.writeString(out, declaringClassProtocolName);
UTF8.writeString(out, methodName);
out.writeLong(clientVersion);
out.writeInt(clientMethodsHash);
out.writeInt(parameterClasses.length);
for (int i = 0; i < parameterClasses.length; i++) {
ObjectWritable.writeObject(out, parameters[i], parameterClasses[i],
conf, true);
}
}
@Override
public String toString() {
StringBuilder buffer = new StringBuilder();
buffer.append(methodName)
.append("(");
for (int i = 0; i < parameters.length; i++) {
if (i != 0)
buffer.append(", ");
buffer.append(parameters[i]);
}
buffer.append(")")
.append(", rpc version="+rpcVersion)
.append(", client version="+clientVersion)
.append(", methodsFingerPrint="+clientMethodsHash);
return buffer.toString();
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public Configuration getConf() {
return this.conf;
}
}
private static ClientCache CLIENTS=new ClientCache();
private static | Invocation |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/pack/defaultpar_1_0/OtherIncrementListener1.java | {
"start": 195,
"end": 478
} | class ____ {
private static int increment;
public static int getIncrement() {
return OtherIncrementListener1.increment;
}
public static void reset() {
increment = 0;
}
public void increment(Object entity) {
OtherIncrementListener1.increment++;
}
}
| OtherIncrementListener1 |
java | elastic__elasticsearch | test/external-modules/jvm-crash/src/main/java/org/elasticsearch/test/jvm_crash/RestJvmCrashAction.java | {
"start": 945,
"end": 2168
} | class ____ implements RestHandler {
// Turns out, it's actually quite hard to get the JVM to crash...
private static Method FREE_MEMORY;
private static Object UNSAFE;
static {
try {
AccessController.doPrivileged((PrivilegedExceptionAction<?>) () -> {
Class<?> unsafe = Class.forName("sun.misc.Unsafe");
FREE_MEMORY = unsafe.getMethod("freeMemory", long.class);
Field f = unsafe.getDeclaredField("theUnsafe");
f.setAccessible(true);
UNSAFE = f.get(null);
return null;
});
} catch (Exception e) {
throw new AssertionError(e);
}
}
RestJvmCrashAction() {}
@Override
public List<Route> routes() {
return List.of(new Route(GET, "/_crash"));
}
@Override
public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception {
// BIG BADDA BOOM
try {
AccessController.doPrivileged((PrivilegedExceptionAction<?>) () -> FREE_MEMORY.invoke(UNSAFE, 1L));
} catch (Exception e) {
throw new AssertionError(e);
}
}
}
| RestJvmCrashAction |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/sort/SerializerComparatorTestData.java | {
"start": 1490,
"end": 4815
} | class ____ {
@SuppressWarnings("unchecked")
static Tuple2<byte[], StreamRecord<Integer>>[] getOrderedIntTestData() {
IntSerializer intSerializer = new IntSerializer();
DataOutputSerializer outputSerializer = new DataOutputSerializer(intSerializer.getLength());
return IntStream.range(-10, 10)
.mapToObj(
idx -> {
try {
intSerializer.serialize(idx, outputSerializer);
byte[] copyOfBuffer = outputSerializer.getCopyOfBuffer();
outputSerializer.clear();
return Tuple2.of(copyOfBuffer, new StreamRecord<>(idx, idx));
} catch (IOException e) {
throw new AssertionError(e);
}
})
.toArray(Tuple2[]::new);
}
@SuppressWarnings("unchecked")
static Tuple2<byte[], StreamRecord<String>>[] getOrderedStringTestData() {
StringSerializer stringSerializer = new StringSerializer();
DataOutputSerializer outputSerializer = new DataOutputSerializer(64);
return Stream.of(
new String(new byte[] {-1, 0}),
new String(new byte[] {0, 1}),
"A",
"AB",
"ABC",
"ABCD",
"ABCDE",
"ABCDEF",
"ABCDEFG",
"ABCDEFGH")
.map(
str -> {
try {
stringSerializer.serialize(str, outputSerializer);
byte[] copyOfBuffer = outputSerializer.getCopyOfBuffer();
outputSerializer.clear();
return Tuple2.of(copyOfBuffer, new StreamRecord<>(str, 0));
} catch (IOException e) {
throw new AssertionError(e);
}
})
.sorted(
(o1, o2) -> {
byte[] key0 = o1.f0;
byte[] key1 = o2.f0;
int firstLength = key0.length;
int secondLength = key1.length;
int minLength = Math.min(firstLength, secondLength);
for (int i = 0; i < minLength; i++) {
int cmp = Byte.compare(key0[i], key1[i]);
if (cmp != 0) {
return cmp;
}
}
int lengthCmp = Integer.compare(firstLength, secondLength);
if (lengthCmp != 0) {
return lengthCmp;
}
return Long.compare(o1.f1.getTimestamp(), o2.f1.getTimestamp());
})
.toArray(Tuple2[]::new);
}
private SerializerComparatorTestData() {}
}
| SerializerComparatorTestData |
java | elastic__elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/ParamsUtils.java | {
"start": 795,
"end": 1358
} | class ____ {
public static Property<BuildParameterExtension> loadBuildParams(Project project) {
BuildServiceRegistration<BuildParameterService, BuildParameterService.Params> buildParamsRegistrations = (BuildServiceRegistration<
BuildParameterService,
BuildParameterService.Params>) project.getGradle().getSharedServices().getRegistrations().getByName("buildParams");
Property<BuildParameterExtension> buildParams = buildParamsRegistrations.getParameters().getBuildParams();
return buildParams;
}
}
| ParamsUtils |
java | apache__camel | components/camel-tracing/src/main/java/org/apache/camel/tracing/Tracer.java | {
"start": 9392,
"end": 12764
} | class ____ extends EventNotifierSupport {
public TracingEventNotifier() {
// ignore these
setIgnoreCamelContextEvents(true);
setIgnoreCamelContextInitEvents(true);
setIgnoreRouteEvents(true);
// we need also async processing started events
setIgnoreExchangeAsyncProcessingStartedEvents(false);
}
@Override
public void notify(CamelEvent event) throws Exception {
try {
if (event instanceof CamelEvent.ExchangeSendingEvent ese) {
SpanDecorator sd = getSpanDecorator(ese.getEndpoint());
if (shouldExclude(sd, ese.getExchange(), ese.getEndpoint())) {
return;
}
SpanAdapter parent = ActiveSpanManager.getSpan(ese.getExchange());
InjectAdapter injectAdapter = sd.getInjectAdapter(ese.getExchange().getIn().getHeaders(), encoding);
SpanAdapter span = startSendingEventSpan(sd.getOperationName(ese.getExchange(), ese.getEndpoint()),
sd.getInitiatorSpanKind(), parent, ese.getExchange(), injectAdapter);
sd.pre(span, ese.getExchange(), ese.getEndpoint());
inject(span, injectAdapter);
ActiveSpanManager.activate(ese.getExchange(), span);
if (LOG.isDebugEnabled()) {
LOG.debug("Tracing: start client span: {} with parent {}", span, parent);
}
} else if (event instanceof CamelEvent.ExchangeSentEvent ese) {
SpanDecorator sd = getSpanDecorator(ese.getEndpoint());
if (shouldExclude(sd, ese.getExchange(), ese.getEndpoint())) {
return;
}
SpanAdapter span = ActiveSpanManager.getSpan(ese.getExchange());
if (span != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Tracing: stop client span: {}", span);
}
sd.post(span, ese.getExchange(), ese.getEndpoint());
ActiveSpanManager.deactivate(ese.getExchange());
finishSpan(span);
} else {
LOG.warn("Tracing: could not find managed span for exchange: {}", ese.getExchange());
}
} else if (event instanceof CamelEvent.ExchangeAsyncProcessingStartedEvent eap) {
// no need to filter scopes here. It's ok to close a scope multiple times and
// implementations check if the scope being disposed is current
// and should not do anything if scopes don't match.
ActiveSpanManager.endScope(eap.getExchange());
}
} catch (Exception t) {
// This exception is ignored
LOG.warn("Tracing: Failed to capture tracing data. This exception is ignored.", t);
}
}
private boolean shouldExclude(SpanDecorator sd, Exchange exchange, Endpoint endpoint) {
return !sd.newSpan()
|| isExcluded(exchange, endpoint);
}
}
private final | TracingEventNotifier |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/MonoSubscribeOnValue.java | {
"start": 1162,
"end": 2227
} | class ____<T> extends Mono<T> implements Scannable {
final @Nullable T value;
final Scheduler scheduler;
MonoSubscribeOnValue(@Nullable T value, Scheduler scheduler) {
this.value = value;
this.scheduler = Objects.requireNonNull(scheduler, "scheduler");
}
@Override
public void subscribe(CoreSubscriber<? super T> actual) {
T v = value;
if (v == null) {
ScheduledEmpty parent = new ScheduledEmpty(actual);
actual.onSubscribe(parent);
try {
parent.setFuture(scheduler.schedule(parent));
}
catch (RejectedExecutionException ree) {
if (parent.future != OperatorDisposables.DISPOSED) {
actual.onError(Operators.onRejectedExecution(ree,
actual.currentContext()));
}
}
}
else {
actual.onSubscribe(new ScheduledScalar<>(actual, v, scheduler));
}
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.RUN_ON) return scheduler;
if (key == Attr.RUN_STYLE) return Attr.RunStyle.ASYNC;
if (key == InternalProducerAttr.INSTANCE) return true;
return null;
}
}
| MonoSubscribeOnValue |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java | {
"start": 2400,
"end": 8638
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(TestJobHistoryEvents.class);
@Test
public void testHistoryEvents() throws Exception {
Configuration conf = new Configuration();
MRApp app = new MRAppWithHistory(2, 1, true, this.getClass().getName(), true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job, JobState.SUCCEEDED);
//make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
/*
* Use HistoryContext to read logged events and verify the number of
* completed maps
*/
HistoryContext context = new JobHistory();
// test start and stop states
((JobHistory)context).init(conf);
((JobHistory)context).start();
assertTrue(context.getStartTime() > 0);
assertEquals(((JobHistory) context).getServiceState(), Service.STATE.STARTED);
// get job before stopping JobHistory
Job parsedJob = context.getJob(jobId);
// stop JobHistory
((JobHistory)context).stop();
assertEquals(((JobHistory) context).getServiceState(), Service.STATE.STOPPED);
assertEquals(2, parsedJob.getCompletedMaps(),
"CompletedMaps not correct");
assertEquals(System.getProperty("user.name"), parsedJob.getUserName());
Map<TaskId, Task> tasks = parsedJob.getTasks();
assertEquals(3, tasks.size(), "No of tasks not correct");
for (Task task : tasks.values()) {
verifyTask(task);
}
Map<TaskId, Task> maps = parsedJob.getTasks(TaskType.MAP);
assertEquals(2, maps.size(), "No of maps not correct");
Map<TaskId, Task> reduces = parsedJob.getTasks(TaskType.REDUCE);
assertEquals(1, reduces.size(), "No of reduces not correct");
assertEquals(1, parsedJob.getCompletedReduces(), "CompletedReduce not correct");
assertEquals(JobState.SUCCEEDED, parsedJob.getState(), "Job state not correct");
}
/**
* Verify that all the events are flushed on stopping the HistoryHandler
* @throws Exception
*/
@Test
public void testEventsFlushOnStop() throws Exception {
Configuration conf = new Configuration();
MRApp app = new MRAppWithSpecialHistoryHandler(1, 0, true, this
.getClass().getName(), true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job, JobState.SUCCEEDED);
// make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
/*
* Use HistoryContext to read logged events and verify the number of
* completed maps
*/
HistoryContext context = new JobHistory();
((JobHistory) context).init(conf);
Job parsedJob = context.getJob(jobId);
assertEquals(1, parsedJob.getCompletedMaps(), "CompletedMaps not correct");
Map<TaskId, Task> tasks = parsedJob.getTasks();
assertEquals(1, tasks.size(), "No of tasks not correct");
verifyTask(tasks.values().iterator().next());
Map<TaskId, Task> maps = parsedJob.getTasks(TaskType.MAP);
assertEquals(1, maps.size(), "No of maps not correct");
assertEquals(JobState.SUCCEEDED, parsedJob.getState(), "Job state not correct");
}
@Test
public void testJobHistoryEventHandlerIsFirstServiceToStop() {
MRApp app = new MRAppWithSpecialHistoryHandler(1, 0, true, this
.getClass().getName(), true);
Configuration conf = new Configuration();
app.init(conf);
Service[] services = app.getServices().toArray(new Service[0]);
// Verifying that it is the last to be added is same as verifying that it is
// the first to be stopped. CompositeService related tests already validate
// this.
assertEquals("JobHistoryEventHandler",
services[services.length - 1].getName());
}
@Test
public void testAssignedQueue() throws Exception {
Configuration conf = new Configuration();
MRApp app = new MRAppWithHistory(2, 1, true, this.getClass().getName(),
true, "assignedQueue");
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job, JobState.SUCCEEDED);
//make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
/*
* Use HistoryContext to read logged events and verify the number of
* completed maps
*/
HistoryContext context = new JobHistory();
// test start and stop states
((JobHistory)context).init(conf);
((JobHistory)context).start();
assertTrue(context.getStartTime() > 0);
assertThat(((JobHistory)context).getServiceState())
.isEqualTo(Service.STATE.STARTED);
// get job before stopping JobHistory
Job parsedJob = context.getJob(jobId);
// stop JobHistory
((JobHistory)context).stop();
assertThat(((JobHistory)context).getServiceState())
.isEqualTo(Service.STATE.STOPPED);
assertEquals("assignedQueue", parsedJob.getQueueName(),
"QueueName not correct");
}
private void verifyTask(Task task) {
assertEquals(TaskState.SUCCEEDED, task.getState(), "Task state not correct");
Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
assertEquals(1, attempts.size(), "No of attempts not correct");
for (TaskAttempt attempt : attempts.values()) {
verifyAttempt(attempt);
}
}
private void verifyAttempt(TaskAttempt attempt) {
assertEquals(TaskAttemptState.SUCCEEDED, attempt.getState(),
"TaskAttempt state not correct");
assertNotNull(attempt.getAssignedContainerID());
// Verify the wrong ctor is not being used. Remove after mrv1 is removed.
ContainerId fakeCid = MRApp.newContainerId(-1, -1, -1, -1);
assertNotEquals(attempt.getAssignedContainerID(), fakeCid);
//Verify complete containerManagerAddress
assertEquals(MRApp.NM_HOST + ":" + MRApp.NM_PORT,
attempt.getAssignedContainerMgrAddress());
}
static | TestJobHistoryEvents |
java | apache__spark | common/kvstore/src/test/java/org/apache/spark/util/kvstore/RocksDBSuite.java | {
"start": 1462,
"end": 14943
} | class ____ {
private RocksDB db;
private File dbpath;
@AfterEach
public void cleanup() throws Exception {
if (db != null) {
db.close();
}
if (dbpath != null) {
JavaUtils.deleteQuietly(dbpath);
}
}
@BeforeEach
public void setup() throws Exception {
dbpath = File.createTempFile("test.", ".rdb");
dbpath.delete();
db = new RocksDB(dbpath);
}
@Test
public void testReopenAndVersionCheckDb() throws Exception {
db.close();
db = null;
assertTrue(dbpath.exists());
db = new RocksDB(dbpath);
assertEquals(RocksDB.STORE_VERSION,
db.serializer.deserializeLong(db.db().get(RocksDB.STORE_VERSION_KEY)));
db.db().put(RocksDB.STORE_VERSION_KEY, db.serializer.serialize(RocksDB.STORE_VERSION + 1));
db.close();
db = null;
assertThrows(UnsupportedStoreVersionException.class, () -> db = new RocksDB(dbpath));
}
@Test
public void testObjectWriteReadDelete() throws Exception {
CustomType1 t = createCustomType1(1);
assertThrows(NoSuchElementException.class, () -> db.read(CustomType1.class, t.key));
db.write(t);
assertEquals(t, db.read(t.getClass(), t.key));
assertEquals(1L, db.count(t.getClass()));
db.delete(t.getClass(), t.key);
assertThrows(NoSuchElementException.class, () -> db.read(t.getClass(), t.key));
// Look into the actual DB and make sure that all the keys related to the type have been
// removed.
assertEquals(0, countKeys(t.getClass()));
}
@Test
public void testMultipleObjectWriteReadDelete() throws Exception {
CustomType1 t1 = createCustomType1(1);
CustomType1 t2 = createCustomType1(2);
t2.id = t1.id;
db.write(t1);
db.write(t2);
assertEquals(t1, db.read(t1.getClass(), t1.key));
assertEquals(t2, db.read(t2.getClass(), t2.key));
assertEquals(2L, db.count(t1.getClass()));
// There should be one "id" index entry with two values.
assertEquals(2, db.count(t1.getClass(), "id", t1.id));
// Delete the first entry; now there should be 3 remaining keys, since one of the "name"
// index entries should have been removed.
db.delete(t1.getClass(), t1.key);
// Make sure there's a single entry in the "id" index now.
assertEquals(1, db.count(t2.getClass(), "id", t2.id));
// Delete the remaining entry, make sure all data is gone.
db.delete(t2.getClass(), t2.key);
assertEquals(0, countKeys(t2.getClass()));
}
@Test
public void testMultipleTypesWriteReadDelete() throws Exception {
CustomType1 t1 = createCustomType1(1);
IntKeyType t2 = new IntKeyType();
t2.key = 2;
t2.id = "2";
t2.values = Arrays.asList("value1", "value2");
ArrayKeyIndexType t3 = new ArrayKeyIndexType();
t3.key = new int[] { 42, 84 };
t3.id = new String[] { "id1", "id2" };
db.write(t1);
db.write(t2);
db.write(t3);
assertEquals(t1, db.read(t1.getClass(), t1.key));
assertEquals(t2, db.read(t2.getClass(), t2.key));
assertEquals(t3, db.read(t3.getClass(), t3.key));
// There should be one "id" index with a single entry for each type.
assertEquals(1, db.count(t1.getClass(), "id", t1.id));
assertEquals(1, db.count(t2.getClass(), "id", t2.id));
assertEquals(1, db.count(t3.getClass(), "id", t3.id));
// Delete the first entry; this should not affect the entries for the second type.
db.delete(t1.getClass(), t1.key);
assertEquals(0, countKeys(t1.getClass()));
assertEquals(1, db.count(t2.getClass(), "id", t2.id));
assertEquals(1, db.count(t3.getClass(), "id", t3.id));
// Delete the remaining entries, make sure all data is gone.
db.delete(t2.getClass(), t2.key);
assertEquals(0, countKeys(t2.getClass()));
db.delete(t3.getClass(), t3.key);
assertEquals(0, countKeys(t3.getClass()));
}
@Test
public void testMetadata() throws Exception {
assertNull(db.getMetadata(CustomType1.class));
CustomType1 t = createCustomType1(1);
db.setMetadata(t);
assertEquals(t, db.getMetadata(CustomType1.class));
db.setMetadata(null);
assertNull(db.getMetadata(CustomType1.class));
}
@Test
public void testUpdate() throws Exception {
CustomType1 t = createCustomType1(1);
db.write(t);
t.name = "anotherName";
db.write(t);
assertEquals(1, db.count(t.getClass()));
assertEquals(1, db.count(t.getClass(), "name", "anotherName"));
assertEquals(0, db.count(t.getClass(), "name", "name"));
}
@Test
public void testRemoveAll() throws Exception {
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
ArrayKeyIndexType o = new ArrayKeyIndexType();
o.key = new int[] { i, j, 0 };
o.id = new String[] { "things" };
db.write(o);
o = new ArrayKeyIndexType();
o.key = new int[] { i, j, 1 };
o.id = new String[] { "more things" };
db.write(o);
}
}
ArrayKeyIndexType o = new ArrayKeyIndexType();
o.key = new int[] { 2, 2, 2 };
o.id = new String[] { "things" };
db.write(o);
assertEquals(9, db.count(ArrayKeyIndexType.class));
db.removeAllByIndexValues(
ArrayKeyIndexType.class,
KVIndex.NATURAL_INDEX_NAME,
Set.of(new int[] {0, 0, 0}, new int[] { 2, 2, 2 }));
assertEquals(7, db.count(ArrayKeyIndexType.class));
db.removeAllByIndexValues(
ArrayKeyIndexType.class,
"id",
Set.<String[]>of(new String[] { "things" }));
assertEquals(4, db.count(ArrayKeyIndexType.class));
db.removeAllByIndexValues(
ArrayKeyIndexType.class,
"id",
Set.<String[]>of(new String[] { "more things" }));
assertEquals(0, db.count(ArrayKeyIndexType.class));
}
@Test
public void testSkip() throws Exception {
for (int i = 0; i < 10; i++) {
db.write(createCustomType1(i));
}
try (KVStoreIterator<CustomType1> it = db.view(CustomType1.class).closeableIterator()) {
assertTrue(it.hasNext());
assertTrue(it.skip(5));
assertEquals("key5", it.next().key);
assertTrue(it.skip(3));
assertEquals("key9", it.next().key);
assertFalse(it.hasNext());
}
}
@Test
public void testNegativeIndexValues() throws Exception {
List<Integer> expected = Arrays.asList(-100, -50, 0, 50, 100);
expected.forEach(i -> {
try {
db.write(createCustomType1(i));
} catch (Exception e) {
throw new RuntimeException(e);
}
});
try (KVStoreIterator<CustomType1> iterator =
db.view(CustomType1.class).index("int").closeableIterator()) {
List<Integer> results = StreamSupport
.stream(Spliterators.spliteratorUnknownSize(iterator, 0), false)
.map(e -> e.num)
.collect(Collectors.toList());
assertEquals(expected, results);
}
}
@Test
public void testCloseRocksDBIterator() throws Exception {
// SPARK-31929: test when RocksDB.close() is called, related RocksDBIterators
// are closed. And files opened by iterators are also closed.
File dbPathForCloseTest = File
.createTempFile(
"test_db_close.",
".rdb");
dbPathForCloseTest.delete();
RocksDB dbForCloseTest = new RocksDB(dbPathForCloseTest);
for (int i = 0; i < 8192; i++) {
dbForCloseTest.write(createCustomType1(i));
}
String key = dbForCloseTest
.view(CustomType1.class).iterator().next().key;
assertEquals("key0", key);
Iterator<CustomType1> it0 = dbForCloseTest
.view(CustomType1.class).max(1).iterator();
while (it0.hasNext()) {
it0.next();
}
System.gc();
Iterator<CustomType1> it1 = dbForCloseTest
.view(CustomType1.class).iterator();
assertEquals("key0", it1.next().key);
try (KVStoreIterator<CustomType1> it2 = dbForCloseTest
.view(CustomType1.class).closeableIterator()) {
assertEquals("key0", it2.next().key);
}
dbForCloseTest.close();
assertTrue(dbPathForCloseTest.exists());
JavaUtils.deleteQuietly(dbPathForCloseTest);
assertTrue(!dbPathForCloseTest.exists());
}
@Test
public void testHasNextAfterIteratorClose() throws Exception {
db.write(createCustomType1(0));
KVStoreIterator<CustomType1> iter =
db.view(CustomType1.class).closeableIterator();
// iter should be true
assertTrue(iter.hasNext());
// close iter
iter.close();
// iter.hasNext should be false after iter close
assertFalse(iter.hasNext());
}
@Test
public void testHasNextAfterDBClose() throws Exception {
db.write(createCustomType1(0));
KVStoreIterator<CustomType1> iter =
db.view(CustomType1.class).closeableIterator();
// iter should be true
assertTrue(iter.hasNext());
// close db
db.close();
// iter.hasNext should be false after db close
assertFalse(iter.hasNext());
}
@Test
public void testNextAfterIteratorClose() throws Exception {
db.write(createCustomType1(0));
KVStoreIterator<CustomType1> iter =
db.view(CustomType1.class).closeableIterator();
// iter should be true
assertTrue(iter.hasNext());
// close iter
iter.close();
// iter.next should throw NoSuchElementException after iter close
assertThrows(NoSuchElementException.class, iter::next);
}
@Test
public void testNextAfterDBClose() throws Exception {
db.write(createCustomType1(0));
KVStoreIterator<CustomType1> iter =
db.view(CustomType1.class).closeableIterator();
// iter should be true
assertTrue(iter.hasNext());
// close db
iter.close();
// iter.next should throw NoSuchElementException after db close
assertThrows(NoSuchElementException.class, iter::next);
}
@Test
public void testSkipAfterIteratorClose() throws Exception {
db.write(createCustomType1(0));
KVStoreIterator<CustomType1> iter =
db.view(CustomType1.class).closeableIterator();
// close iter
iter.close();
// skip should always return false after iter close
assertFalse(iter.skip(0));
assertFalse(iter.skip(1));
}
@Test
public void testSkipAfterDBClose() throws Exception {
db.write(createCustomType1(0));
KVStoreIterator<CustomType1> iter =
db.view(CustomType1.class).closeableIterator();
// iter should be true
assertTrue(iter.hasNext());
// close db
db.close();
// skip should always return false after db close
assertFalse(iter.skip(0));
assertFalse(iter.skip(1));
}
@Test
public void testResourceCleaner() throws Exception {
File dbPathForCleanerTest = File.createTempFile(
"test_db_cleaner.", ".rdb");
dbPathForCleanerTest.delete();
RocksDB dbForCleanerTest = new RocksDB(dbPathForCleanerTest);
try {
for (int i = 0; i < 8192; i++) {
dbForCleanerTest.write(createCustomType1(i));
}
RocksDBIterator<CustomType1> rocksDBIterator =
(RocksDBIterator<CustomType1>) dbForCleanerTest.view(CustomType1.class).iterator();
Reference<RocksDBIterator<?>> reference = new WeakReference<>(rocksDBIterator);
assertNotNull(reference);
RocksDBIterator.ResourceCleaner resourceCleaner = rocksDBIterator.getResourceCleaner();
assertFalse(resourceCleaner.isCompleted());
// Manually set rocksDBIterator to null, to be GC.
rocksDBIterator = null;
// 100 times gc, the rocksDBIterator should be GCed.
int count = 0;
while (count < 100 && !reference.refersTo(null)) {
System.gc();
count++;
Thread.sleep(100);
}
// check rocksDBIterator should be GCed
assertTrue(reference.refersTo(null));
// Verify that the Cleaner will be executed after a period of time,
// and status will become false.
assertTrue(resourceCleaner.isCompleted());
} finally {
dbForCleanerTest.close();
JavaUtils.deleteQuietly(dbPathForCleanerTest);
}
}
@Test
public void testMultipleTypesWriteAll() throws Exception {
List<CustomType1> type1List = Arrays.asList(
createCustomType1(1),
createCustomType1(2),
createCustomType1(3),
createCustomType1(4)
);
List<CustomType2> type2List = Arrays.asList(
createCustomType2(10),
createCustomType2(11),
createCustomType2(12),
createCustomType2(13)
);
List fullList = new ArrayList();
fullList.addAll(type1List);
fullList.addAll(type2List);
db.writeAll(fullList);
for (CustomType1 value : type1List) {
assertEquals(value, db.read(value.getClass(), value.key));
}
for (CustomType2 value : type2List) {
assertEquals(value, db.read(value.getClass(), value.key));
}
}
private CustomType1 createCustomType1(int i) {
CustomType1 t = new CustomType1();
t.key = "key" + i;
t.id = "id" + i;
t.name = "name" + i;
t.num = i;
t.child = "child" + i;
return t;
}
private CustomType2 createCustomType2(int i) {
CustomType2 t = new CustomType2();
t.key = "key" + i;
t.id = "id" + i;
t.parentId = "parent_id" + (i / 2);
return t;
}
private int countKeys(Class<?> type) throws Exception {
byte[] prefix = db.getTypeInfo(type).keyPrefix();
int count = 0;
try (RocksIterator it = db.db().newIterator()) {
it.seek(prefix);
while (it.isValid()) {
byte[] key = it.key();
if (RocksDBIterator.startsWith(key, prefix)) {
count++;
}
it.next();
}
}
return count;
}
}
| RocksDBSuite |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java | {
"start": 24744,
"end": 32095
} | class ____ extends LeveldbTimelineStore {
static final AtomicInteger ENTITIES_COUNT = new AtomicInteger(0);
TestTimelineStore() {
super();
}
@Override
public TimelinePutResponse put(TimelineEntities entities) {
ENTITIES_COUNT.getAndAdd(entities.getEntities().size());
return new TimelinePutResponse();
}
public static int getEntitiesCount() {
return ENTITIES_COUNT.get();
}
}
@Test
void testIfAnyDuplicateEntities() throws Exception {
// Create an application with some entities
ApplicationId appId =
ApplicationId.fromString("application_1501509265053_0002");
String user = UserGroupInformation.getCurrentUser().getShortUserName();
Path activeDirPath = getTestRootPath("active1");
Path doneDirPath = getTestRootPath("done1");
Path userBase = new Path(activeDirPath, user);
Path userAppRoot = new Path(userBase, appId.toString());
Path attemptDirPath = new Path(userAppRoot, getAttemptDirName(appId));
String logFileName = EntityGroupFSTimelineStore.ENTITY_LOG_PREFIX
+ EntityGroupPlugInForTest.getStandardTimelineGroupId(appId);
createTestFiles(appId, attemptDirPath, logFileName);
// stop the default store before creating new store to get the lock
store.stop();
EntityGroupFSTimelineStore newStore = new EntityGroupFSTimelineStore() {
@Override
protected AppState getAppState(ApplicationId appId) throws IOException {
return AppState.ACTIVE;
}
};
try {
// Start ATS with TestTimelineStore
Configuration newConfig = new YarnConfiguration(config);
newConfig.set(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_SUMMARY_STORE,
TestTimelineStore.class.getName());
newConfig.set(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_DONE_DIR,
doneDirPath.toString());
newConfig.set(
YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR,
activeDirPath.toString());
newStore.init(newConfig);
newStore.setFs(fs);
newStore.start();
// Validate if the initial entities count are correct
newStore.scanActiveLogs();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return TestTimelineStore.getEntitiesCount() == 2;
}
}, 100, 10000);
assertEquals(2, TestTimelineStore.getEntitiesCount(), "Wrong Initial Entities Count");
// Append the Summary log file with few more entities
TimelineEntities entities = PluginStoreTestUtils.generateTestEntities();
FSDataOutputStream outStream = fs.append(
new Path(attemptDirPath, TEST_SUMMARY_LOG_FILE_NAME));
JsonGenerator jsonGenerator
= new JsonFactory().createGenerator((OutputStream) outStream);
jsonGenerator.setPrettyPrinter(new MinimalPrettyPrinter("\n"));
ObjectMapper objMapper = new ObjectMapper();
objMapper.setAnnotationIntrospector(
new JaxbAnnotationIntrospector(TypeFactory.defaultInstance()));
objMapper.setSerializationInclusion(JsonInclude.Include.NON_NULL);
for (TimelineEntity entity : entities.getEntities()) {
objMapper.writeValue(jsonGenerator, entity);
}
outStream.close();
// Validate if there are any duplicates
newStore.scanActiveLogs();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return TestTimelineStore.getEntitiesCount() == 4;
}
}, 100, 10000);
assertEquals(4, TestTimelineStore.getEntitiesCount(), "Duplicate Entities present");
} finally {
if (newStore != null) {
newStore.stop();
}
fs.delete(userAppRoot, true);
}
}
@Test
void testStateStoreAndRecovery() throws Exception {
// Prepare the AppLogs Data
EntityGroupFSTimelineStore.AppLogs appLogs =
store.new AppLogs(mainTestAppId, mainTestAppDirPath, AppState.COMPLETED);
appLogs.scanForLogs();
List<LogInfo> summaryLogs = appLogs.getSummaryLogs();
List<EntityGroupFSTimelineStore.AppLogs> logsList = new ArrayList<>();
logsList.add(appLogs);
// Store the Log files
Path checkpointFile = new Path(fs.getHomeDirectory(), "atscheckpoint");
try (DataOutputStream dataOutputStream = fs.create(checkpointFile)) {
store.storeLogFiles(logsList, dataOutputStream);
} catch (IOException e) {
fail("Failed to store the log files");
}
// Recover the Log files and validate the contents
try (DataInputStream dataInputStream = fs.open(checkpointFile)) {
HashMap<String, Pair<Long, Long>> logFiles =
store.recoverLogFiles(dataInputStream);
assertEquals(summaryLogs.size(), logFiles.size());
for (LogInfo logInfo : summaryLogs) {
String logFileName = logInfo.getAttemptDirName() +
Path.SEPARATOR + logInfo.getFilename();
Pair<Long, Long> pair = logFiles.get(logFileName);
assertNotNull(pair, "Failed to recover " + logFileName);
assertTrue(logInfo.getLastProcessedTime() == pair.getLeft(),
"LastProcessedTime is not same");
assertTrue(logInfo.getOffset() == pair.getRight(),
"Offset is not same");
}
} catch (IOException e) {
fail("Failed to recover the log files");
}
}
private EntityGroupFSTimelineStore createAndStartTimelineStore(
AppState appstate) {
// stop before creating new store to get the lock
store.stop();
EntityGroupFSTimelineStore newStore = new EntityGroupFSTimelineStore() {
@Override
protected AppState getAppState(ApplicationId appId) throws IOException {
return appstate;
}
};
newStore.init(config);
newStore.setFs(fs);
newStore.start();
return newStore;
}
private void createTestFiles(ApplicationId appId, Path attemptDirPath)
throws IOException {
createTestFiles(appId, attemptDirPath, mainEntityLogFileName);
}
private void createTestFiles(ApplicationId appId, Path attemptDirPath,
String logPath) throws IOException {
TimelineEntities entities = PluginStoreTestUtils.generateTestEntities();
PluginStoreTestUtils.writeEntities(entities,
new Path(attemptDirPath, TEST_SUMMARY_LOG_FILE_NAME), fs);
Map<String, Set<Object>> primaryFilters = new HashMap<>();
Set<Object> appSet = new HashSet<Object>();
appSet.add(appId.toString());
primaryFilters.put(EntityGroupPlugInForTest.APP_ID_FILTER_NAME, appSet);
entityNew = PluginStoreTestUtils
.createEntity(appId.toString(), "type_3", 789L, null, null,
primaryFilters, null, "domain_id_1");
TimelineEntities entityList = new TimelineEntities();
entityList.addEntity(entityNew);
PluginStoreTestUtils.writeEntities(entityList,
new Path(attemptDirPath, logPath), fs);
FSDataOutputStream out = fs.create(
new Path(attemptDirPath, TEST_DOMAIN_LOG_FILE_NAME));
out.close();
}
private static Path getTestRootPath(String pathString) {
return fileContextTestHelper.getTestRootPath(fc, pathString);
}
private static String getAttemptDirName(ApplicationId appId) {
return ApplicationAttemptId.appAttemptIdStrPrefix + appId.toString() + "_1";
}
}
| TestTimelineStore |
java | google__gson | gson/src/test/java/com/google/gson/functional/ParameterizedTypesTest.java | {
"start": 17082,
"end": 17182
} | class ____ extends Quantity {
@SuppressWarnings("unused")
int q2 = 20;
}
private | MyQuantity |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/NamespaceHttpX509Tests.java | {
"start": 8591,
"end": 9528
} | class ____ {
@Bean
UserDetailsService userDetailsService() {
UserDetails user = User.withDefaultPasswordEncoder()
.username("rod@example.com")
.password("password")
.roles("USER", "ADMIN")
.build();
return new InMemoryUserDetailsManager(user);
}
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.authorizeHttpRequests((requests) -> requests
.anyRequest().hasRole("USER"))
.x509((x509) -> x509
.x509PrincipalExtractor(this::extractCommonName));
// @formatter:on
return http.build();
}
private String extractCommonName(X509Certificate certificate) {
X500Principal principal = certificate.getSubjectX500Principal();
return new X500Name(principal.getName()).getRDNs(BCStyle.CN)[0].getFirst().getValue().toString();
}
}
@EnableWebMvc
@Configuration
@EnableWebSecurity
public static | CustomPrincipalExtractorConfig |
java | apache__hadoop | hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperServiceKeys.java | {
"start": 1098,
"end": 2404
} | interface ____ {
public static final String ZKSERVICE_PREFIX =
RegistryConstants.REGISTRY_PREFIX + "zk.service.";
/**
* Key to define the JAAS context for the ZK service: {@value}.
*/
public static final String KEY_REGISTRY_ZKSERVICE_JAAS_CONTEXT =
ZKSERVICE_PREFIX + "service.jaas.context";
/**
* ZK servertick time: {@value}
*/
public static final String KEY_ZKSERVICE_TICK_TIME =
ZKSERVICE_PREFIX + "ticktime";
/**
* host to register on: {@value}.
*/
public static final String KEY_ZKSERVICE_HOST = ZKSERVICE_PREFIX + "host";
/**
* Default host to serve on -this is <code>localhost</code> as it
* is the only one guaranteed to be available: {@value}.
*/
public static final String DEFAULT_ZKSERVICE_HOST = "localhost";
/**
* port; 0 or below means "any": {@value}
*/
public static final String KEY_ZKSERVICE_PORT = ZKSERVICE_PREFIX + "port";
/**
* Directory containing data: {@value}
*/
public static final String KEY_ZKSERVICE_DIR = ZKSERVICE_PREFIX + "dir";
/**
* Should failed SASL clients be allowed: {@value}?
*
* Default is the ZK default: true
*/
public static final String KEY_ZKSERVICE_ALLOW_FAILED_SASL_CLIENTS =
ZKSERVICE_PREFIX + "allow.failed.sasl.clients";
}
| MicroZookeeperServiceKeys |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/function/xml/OracleXmlTableFunction.java | {
"start": 1692,
"end": 3155
} | class ____ extends XmlTableSetReturningFunctionTypeResolver {
@Override
protected void addSelectableMapping(List<SelectableMapping> selectableMappings, String name, JdbcMapping type, SqmToSqlAstConverter converter) {
if ( isEncodedBoolean( type ) ) {
//noinspection unchecked
final JdbcLiteralFormatter<Object> jdbcLiteralFormatter = type.getJdbcLiteralFormatter();
final Dialect dialect = converter.getCreationContext().getDialect();
final WrapperOptions wrapperOptions = converter.getCreationContext().getWrapperOptions();
final Object trueValue = type.convertToRelationalValue( true );
final Object falseValue = type.convertToRelationalValue( false );
final String trueFragment = jdbcLiteralFormatter.toJdbcLiteral( trueValue, dialect, wrapperOptions );
final String falseFragment = jdbcLiteralFormatter.toJdbcLiteral( falseValue, dialect, wrapperOptions );
selectableMappings.add( new SelectableMappingImpl(
"",
name,
new SelectablePath( name ),
"decode(" + Template.TEMPLATE + "." + name + ",'true'," + trueFragment + ",'false'," + falseFragment + ")",
null,
"varchar2(5)",
null,
null,
null,
null,
null,
false,
false,
false,
false,
false,
false,
type
));
}
else {
super.addSelectableMapping( selectableMappings, name, type, converter );
}
}
}
}
| OracleXmlTableSetReturningFunctionTypeResolver |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/FunctionalInterfaceClashTest.java | {
"start": 9807,
"end": 10383
} | class ____ extends BaseClass {
@Override
// BUG: Diagnostic contains: disambiguate with:
void conduct(Consumer<String> c) {}
void conduct(Function<String, Integer> f) {}
}
""")
.doTest();
}
@Test
public void negative_overriddenMethod() {
testHelper
.addSourceLines(
"pkg2/BaseClass.java",
"""
package pkg2;
import java.util.function.Function;
import java.util.function.Consumer;
public | ConductClass |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/errorhandler/RefErrorHandlerDefinition.java | {
"start": 1326,
"end": 2570
} | class ____ extends BaseErrorHandlerDefinition {
public static final String DEFAULT_ERROR_HANDLER_BUILDER = "CamelDefaultErrorHandlerBuilder";
@XmlAttribute(required = true)
@Metadata(javaType = "org.apache.camel.ErrorHandlerFactory")
private String ref;
public RefErrorHandlerDefinition() {
}
public RefErrorHandlerDefinition(RefErrorHandlerDefinition source) {
this.ref = source.ref;
}
@Override
public RefErrorHandlerDefinition copyDefinition() {
return new RefErrorHandlerDefinition(this);
}
public RefErrorHandlerDefinition(String ref) {
this.ref = ref;
}
@Override
public boolean supportTransacted() {
return false;
}
@Override
public ErrorHandlerFactory cloneBuilder() {
// clone not needed
return this;
}
public String getRef() {
return ref;
}
/**
* References to an existing or custom error handler.
*/
public void setRef(String ref) {
this.ref = ref;
}
/**
* References to an existing or custom error handler.
*/
public RefErrorHandlerDefinition ref(String ref) {
setRef(ref);
return this;
}
}
| RefErrorHandlerDefinition |
java | apache__camel | components/camel-twitter/src/test/java/org/apache/camel/component/twitter/SearchDirectIT.java | {
"start": 1189,
"end": 1476
} | class ____ extends CamelTwitterConsumerITSupport {
@Override
protected String getUri() {
return "twitter-search://java?type=direct&";
}
@Override
protected Logger getLogger() {
return LoggerFactory.getLogger(SearchDirectIT.class);
}
}
| SearchDirectIT |
java | apache__flink | flink-table/flink-sql-gateway/src/main/java/org/apache/flink/table/gateway/service/operation/OperationManager.java | {
"start": 8543,
"end": 21262
} | class ____ implements AutoCloseable {
private static final long WAIT_CLEAN_UP_MILLISECONDS = 5_000;
private final OperationHandle operationHandle;
private final AtomicReference<OperationStatus> status;
private final Callable<ResultFetcher> resultSupplier;
private volatile FutureTask<?> invocation;
private volatile ResultFetcher resultFetcher;
private volatile SqlExecutionException operationError;
public Operation(OperationHandle operationHandle, Callable<ResultFetcher> resultSupplier) {
this.operationHandle = operationHandle;
this.status = new AtomicReference<>(OperationStatus.INITIALIZED);
this.resultSupplier = resultSupplier;
}
void runBefore() {
updateState(OperationStatus.RUNNING);
}
void runAfter() {
updateState(OperationStatus.FINISHED);
}
public void run() {
try {
operationLock.acquire();
LOG.debug(
String.format(
"Operation %s acquires the operation lock.", operationHandle));
updateState(OperationStatus.PENDING);
Runnable work =
() -> {
try {
runBefore();
resultFetcher = resultSupplier.call();
runAfter();
} catch (InterruptedException e) {
// User cancel the execution.
LOG.error(
String.format(
"Operation %s is interrupted.", operationHandle),
e);
} catch (Throwable t) {
processThrowable(t);
}
};
// The returned future by the ExecutorService will not wrap the
// done method.
FutureTask<Void> copiedTask =
new FutureTask<Void>(work, null) {
@Override
protected void done() {
LOG.debug(
String.format(
"Release the operation lock: %s when task completes.",
operationHandle));
operationLock.release();
}
};
service.submit(copiedTask);
invocation = copiedTask;
// If it is canceled or closed, terminate the invocation.
OperationStatus current = status.get();
if (current == OperationStatus.CLOSED || current == OperationStatus.CANCELED) {
LOG.debug(
String.format(
"The current status is %s after updating the operation %s status to %s. Close the resources.",
current, operationHandle, OperationStatus.PENDING));
closeResources();
}
} catch (Throwable t) {
processThrowable(t);
throw new SqlGatewayException(
"Failed to submit the operation to the thread pool.", t);
} finally {
if (invocation == null) {
// failed to submit to the thread pool and release the lock.
LOG.debug(
String.format(
"Operation %s releases the operation lock when failed to submit the operation to the pool.",
operationHandle));
operationLock.release();
}
}
}
public void cancel() {
updateState(OperationStatus.CANCELED);
closeResources();
}
public void close() {
updateState(OperationStatus.CLOSED);
closeResources();
}
public ResultSet fetchResults(long token, int maxRows) {
return fetchResultsInternal(() -> resultFetcher.fetchResults(token, maxRows));
}
public ResultSet fetchResults(FetchOrientation orientation, int maxRows) {
return fetchResultsInternal(() -> resultFetcher.fetchResults(orientation, maxRows));
}
public ResolvedSchema getResultSchema() throws Exception {
awaitTermination();
OperationStatus current = status.get();
if (current != OperationStatus.FINISHED) {
throw new IllegalStateException(
String.format(
"The result schema is available when the Operation is in FINISHED state but the current status is %s.",
status));
}
return resultFetcher.getResultSchema();
}
public OperationInfo getOperationInfo() {
return new OperationInfo(status.get(), operationError);
}
public void awaitTermination() throws Exception {
synchronized (status) {
while (!status.get().isTerminalStatus()) {
status.wait();
}
}
OperationStatus current = status.get();
if (current == OperationStatus.ERROR) {
throw operationError;
}
}
private ResultSet fetchResultsInternal(Supplier<ResultSet> results) {
OperationStatus currentStatus = status.get();
if (currentStatus == OperationStatus.ERROR) {
throw operationError;
} else if (currentStatus == OperationStatus.FINISHED) {
return results.get();
} else if (currentStatus == OperationStatus.RUNNING
|| currentStatus == OperationStatus.PENDING
|| currentStatus == OperationStatus.INITIALIZED) {
return NotReadyResult.INSTANCE;
} else {
throw new SqlGatewayException(
String.format(
"Can not fetch results from the %s in %s status.",
operationHandle, currentStatus));
}
}
private void updateState(OperationStatus toStatus) {
OperationStatus currentStatus;
do {
currentStatus = status.get();
boolean isValid = OperationStatus.isValidStatusTransition(currentStatus, toStatus);
if (!isValid) {
String message =
String.format(
"Failed to convert the Operation Status from %s to %s for %s.",
currentStatus, toStatus, operationHandle);
throw new SqlGatewayException(message);
}
} while (!status.compareAndSet(currentStatus, toStatus));
synchronized (status) {
status.notifyAll();
}
LOG.debug(
String.format(
"Convert operation %s from %s to %s.",
operationHandle, currentStatus, toStatus));
}
private void closeResources() {
if (invocation != null && !invocation.isDone()) {
invocation.cancel(true);
waitTaskCleanup(invocation);
LOG.debug(String.format("Cancel the operation %s.", operationHandle));
}
if (resultFetcher != null) {
resultFetcher.close();
}
}
private void processThrowable(Throwable t) {
String msg = String.format("Failed to execute the operation %s.", operationHandle);
LOG.error(msg, t);
operationError = new SqlExecutionException(msg, t);
// Update status should be placed at last. Because the client is able to fetch exception
// when status is error.
updateState(OperationStatus.ERROR);
}
private void waitTaskCleanup(FutureTask<?> invocation) {
// thread is cleaned async, waiting for a while
Deadline deadline = Deadline.fromNow(Duration.ofMillis(WAIT_CLEAN_UP_MILLISECONDS));
while (deadline.hasTimeLeft()) {
Optional<Thread> threadOptional = getThreadInFuture(invocation);
if (!threadOptional.isPresent()) {
// thread has been cleaned up
return;
}
// try to release the use of the processor to let the task finish its cleanup.
Uninterruptibles.sleepUninterruptibly(1, TimeUnit.MILLISECONDS);
}
Optional<Thread> threadOptional = getThreadInFuture(invocation);
// Currently, SQL Gateway still doesn't have health reporter to notify the users the
// resource leak or HA to restart the running process. So we just dump the thread and
// throw an exception to notify the users.
threadOptional.ifPresent(this::throwExceptionWithThreadStackTrace);
}
private Optional<Thread> getThreadInFuture(FutureTask<?> invocation) {
try {
Class<?> k = FutureTask.class;
Field runnerField = k.getDeclaredField("runner");
runnerField.setAccessible(true);
Thread t = (Thread) runnerField.get(invocation);
return Optional.of(t);
} catch (Throwable e) {
// can't get thread
return Optional.empty();
}
}
private void throwExceptionWithThreadStackTrace(Thread thread) {
StackTraceElement[] stack = thread.getStackTrace();
StringBuilder stackTraceStr = new StringBuilder();
for (StackTraceElement e : stack) {
stackTraceStr.append("\tat ").append(e).append("\n");
}
String msg =
String.format(
"Operation '%s' did not react to \"Future.cancel(true)\" and "
+ "is stuck for %s seconds in method.\n"
+ "Thread name: %s, thread state: %s, thread stacktrace:\n%s",
operationHandle,
WAIT_CLEAN_UP_MILLISECONDS / 1000,
thread.getName(),
thread.getState(),
stackTraceStr);
throw new SqlCancelException(msg);
}
}
// -------------------------------------------------------------------------------------------
@VisibleForTesting
public int getOperationCount() {
return submittedOperations.size();
}
@VisibleForTesting
public Operation getOperation(OperationHandle operationHandle) {
return readLock(
() -> {
Operation operation = submittedOperations.get(operationHandle);
if (operation == null) {
throw new SqlGatewayException(
String.format(
"Can not find the submitted operation in the OperationManager with the %s.",
operationHandle));
}
return operation;
});
}
private void submitOperationInternal(OperationHandle handle, Operation operation) {
writeLock(() -> submittedOperations.put(handle, operation));
operation.run();
}
private void writeLock(Runnable runner) {
stateLock.writeLock().lock();
try {
if (!isRunning) {
throw new SqlGatewayException("The OperationManager is closed.");
}
runner.run();
} finally {
stateLock.writeLock().unlock();
}
}
private <T> T readLock(Supplier<T> supplier) {
stateLock.readLock().lock();
try {
if (!isRunning) {
throw new SqlGatewayException("The OperationManager is closed.");
}
return supplier.get();
} finally {
stateLock.readLock().unlock();
}
}
}
| Operation |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-api/src/test/java/org/apache/dubbo/rpc/proxy/AbstractProxyTest.java | {
"start": 1364,
"end": 3430
} | class ____ {
public static ProxyFactory factory;
@Test
void testGetProxy() {
URL url = URL.valueOf("test://test:11/test?group=dubbo&version=1.1");
MyInvoker<DemoService> invoker = new MyInvoker<>(url);
DemoService proxy = factory.getProxy(invoker);
Assertions.assertNotNull(proxy);
Assertions.assertTrue(Arrays.asList(proxy.getClass().getInterfaces()).contains(DemoService.class));
Assertions.assertTrue(Arrays.asList(proxy.getClass().getInterfaces()).contains(Destroyable.class));
Assertions.assertTrue(Arrays.asList(proxy.getClass().getInterfaces()).contains(EchoService.class));
Assertions.assertEquals(
invoker.invoke(new RpcInvocation(
"echo",
DemoService.class.getName(),
DemoService.class.getName() + ":dubbo",
new Class[] {String.class},
new Object[] {"aa"}))
.getValue(),
proxy.echo("aa"));
Destroyable destroyable = (Destroyable) proxy;
destroyable.$destroy();
Assertions.assertTrue(invoker.isDestroyed());
}
@Test
void testGetInvoker() {
URL url = URL.valueOf("test://test:11/test?group=dubbo&version=1.1");
DemoService origin = new DemoServiceImpl();
Invoker<DemoService> invoker = factory.getInvoker(new DemoServiceImpl(), DemoService.class, url);
Assertions.assertEquals(invoker.getInterface(), DemoService.class);
Assertions.assertEquals(
invoker.invoke(new RpcInvocation(
"echo",
DemoService.class.getName(),
DemoService.class.getName() + ":dubbo",
new Class[] {String.class},
new Object[] {"aa"}))
.getValue(),
origin.echo("aa"));
}
}
| AbstractProxyTest |
java | apache__kafka | streams/src/test/java/org/apache/kafka/test/ReadOnlySessionStoreStub.java | {
"start": 1443,
"end": 7652
} | class ____<K, V> implements ReadOnlySessionStore<K, V>, StateStore {
private final NavigableMap<K, List<KeyValue<Windowed<K>, V>>> sessions = new TreeMap<>();
private boolean open = true;
public void put(final Windowed<K> sessionKey, final V value) {
if (!sessions.containsKey(sessionKey.key())) {
sessions.put(sessionKey.key(), new ArrayList<>());
}
sessions.get(sessionKey.key()).add(KeyValue.pair(sessionKey, value));
}
@Override
public KeyValueIterator<Windowed<K>, V> findSessions(K key, long earliestSessionEndTime, long latestSessionStartTime) {
throw new UnsupportedOperationException("Moved from Session Store. Implement if needed");
}
@Override
public KeyValueIterator<Windowed<K>, V> backwardFindSessions(K key, long earliestSessionEndTime, long latestSessionStartTime) {
throw new UnsupportedOperationException("Moved from Session Store. Implement if needed");
}
@Override
public KeyValueIterator<Windowed<K>, V> findSessions(K keyFrom, K keyTo, long earliestSessionEndTime, long latestSessionStartTime) {
throw new UnsupportedOperationException("Moved from Session Store. Implement if needed");
}
@Override
public KeyValueIterator<Windowed<K>, V> backwardFindSessions(K keyFrom, K keyTo, long earliestSessionEndTime, long latestSessionStartTime) {
throw new UnsupportedOperationException("Moved from Session Store. Implement if needed");
}
@Override
public V fetchSession(K key, long earliestSessionEndTime, long latestSessionStartTime) {
throw new UnsupportedOperationException("Moved from Session Store. Implement if needed");
}
@Override
public KeyValueIterator<Windowed<K>, V> fetch(final K key) {
if (!open) {
throw new InvalidStateStoreException("not open");
}
if (!sessions.containsKey(key)) {
return new KeyValueIteratorStub<>(Collections.emptyIterator());
}
return new KeyValueIteratorStub<>(sessions.get(key).iterator());
}
@Override
public KeyValueIterator<Windowed<K>, V> backwardFetch(K key) {
if (!open) {
throw new InvalidStateStoreException("not open");
}
if (!sessions.containsKey(key)) {
return new KeyValueIteratorStub<>(Collections.emptyIterator());
}
return new KeyValueIteratorStub<>(sessions.descendingMap().get(key).iterator());
}
@Override
public KeyValueIterator<Windowed<K>, V> fetch(final K keyFrom, final K keyTo) {
if (!open) {
throw new InvalidStateStoreException("not open");
}
NavigableMap<K, List<KeyValue<Windowed<K>, V>>> subSessionsMap = getSubSessionsMap(keyFrom, keyTo);
if (subSessionsMap.isEmpty()) {
return new KeyValueIteratorStub<>(Collections.emptyIterator());
}
final Iterator<List<KeyValue<Windowed<K>, V>>> keysIterator = subSessionsMap.values().iterator();
return new KeyValueIteratorStub<>(
new Iterator<>() {
Iterator<KeyValue<Windowed<K>, V>> it;
@Override
public boolean hasNext() {
while (it == null || !it.hasNext()) {
if (!keysIterator.hasNext()) {
return false;
}
it = keysIterator.next().iterator();
}
return true;
}
@Override
public KeyValue<Windowed<K>, V> next() {
return it.next();
}
}
);
}
private NavigableMap<K, List<KeyValue<Windowed<K>, V>>> getSubSessionsMap(final K keyFrom, final K keyTo) {
final NavigableMap<K, List<KeyValue<Windowed<K>, V>>> subSessionsMap;
if (keyFrom == null && keyTo == null) { // fetch all
subSessionsMap = sessions;
} else if (keyFrom == null) {
subSessionsMap = sessions.headMap(keyTo, true);
} else if (keyTo == null) {
subSessionsMap = sessions.tailMap(keyFrom, true);
} else {
subSessionsMap = sessions.subMap(keyFrom, true, keyTo, true);
}
return subSessionsMap;
}
@Override
public KeyValueIterator<Windowed<K>, V> backwardFetch(K keyFrom, K keyTo) {
if (!open) {
throw new InvalidStateStoreException("not open");
}
NavigableMap<K, List<KeyValue<Windowed<K>, V>>> subSessionsMap = getSubSessionsMap(keyFrom, keyTo);
if (subSessionsMap.isEmpty()) {
return new KeyValueIteratorStub<>(Collections.emptyIterator());
}
final Iterator<List<KeyValue<Windowed<K>, V>>> keysIterator = subSessionsMap.descendingMap().values().iterator();
return new KeyValueIteratorStub<>(
new Iterator<>() {
Iterator<KeyValue<Windowed<K>, V>> it;
@Override
public boolean hasNext() {
while (it == null || !it.hasNext()) {
if (!keysIterator.hasNext()) {
return false;
}
it = keysIterator.next().iterator();
}
return true;
}
@Override
public KeyValue<Windowed<K>, V> next() {
return it.next();
}
}
);
}
@Override
public String name() {
return "";
}
@Override
public void init(StateStoreContext stateStoreContext, StateStore root) {}
@Override
public void flush() {
}
@Override
public void close() {
}
@Override
public boolean persistent() {
return false;
}
@Override
public boolean isOpen() {
return open;
}
@Override
public Position getPosition() {
throw new UnsupportedOperationException("Position handling not implemented");
}
public void setOpen(final boolean open) {
this.open = open;
}
}
| ReadOnlySessionStoreStub |
java | google__guava | android/guava-tests/test/com/google/common/collect/BenchmarkHelpers.java | {
"start": 4126,
"end": 4602
} | enum ____ {
TreeRangeSetImpl {
@Override
<K extends Comparable<K>> RangeSet<K> create(RangeSet<K> contents) {
return TreeRangeSet.create(contents);
}
},
ImmutableRangeSetImpl {
@Override
<K extends Comparable<K>> RangeSet<K> create(RangeSet<K> contents) {
return ImmutableRangeSet.copyOf(contents);
}
};
abstract <K extends Comparable<K>> RangeSet<K> create(RangeSet<K> contents);
}
public | RangeSetImpl |
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/ServletAnnotationControllerHandlerMethodTests.java | {
"start": 105734,
"end": 106236
} | class ____ {
@RequestMapping("")
public void myPath2(HttpServletResponse response) {
throw new IllegalStateException("test");
}
@RequestMapping("/bar")
public void myPath3(HttpServletResponse response) throws IOException {
response.getWriter().write("testX");
}
@ExceptionHandler
public void myPath2(Exception ex, HttpServletResponse response) throws IOException {
response.getWriter().write(ex.getMessage());
}
}
@Controller
private static | ControllerWithEmptyValueMapping |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/sort/VariableLengthByteKeyComparator.java | {
"start": 1542,
"end": 6445
} | class ____<IN>
extends TypeComparator<Tuple2<byte[], StreamRecord<IN>>> {
private byte[] keyReference;
private long timestampReference;
@Override
public int hash(Tuple2<byte[], StreamRecord<IN>> record) {
return record.hashCode();
}
@Override
public void setReference(Tuple2<byte[], StreamRecord<IN>> toCompare) {
this.keyReference = Arrays.copyOf(toCompare.f0, toCompare.f0.length);
this.timestampReference = toCompare.f1.asRecord().getTimestamp();
}
@Override
public boolean equalToReference(Tuple2<byte[], StreamRecord<IN>> candidate) {
return Arrays.equals(keyReference, candidate.f0)
&& timestampReference == candidate.f1.asRecord().getTimestamp();
}
@Override
public int compareToReference(
TypeComparator<Tuple2<byte[], StreamRecord<IN>>> referencedComparator) {
byte[] otherKey = ((VariableLengthByteKeyComparator<IN>) referencedComparator).keyReference;
long otherTimestamp =
((VariableLengthByteKeyComparator<IN>) referencedComparator).timestampReference;
int keyCmp = compare(otherKey, this.keyReference);
if (keyCmp != 0) {
return keyCmp;
}
return Long.compare(otherTimestamp, this.timestampReference);
}
@Override
public int compare(
Tuple2<byte[], StreamRecord<IN>> first, Tuple2<byte[], StreamRecord<IN>> second) {
int keyCmp = compare(first.f0, second.f0);
if (keyCmp != 0) {
return keyCmp;
}
return Long.compare(
first.f1.asRecord().getTimestamp(), second.f1.asRecord().getTimestamp());
}
private int compare(byte[] first, byte[] second) {
int firstLength = first.length;
int secondLength = second.length;
int minLength = Math.min(firstLength, secondLength);
for (int i = 0; i < minLength; i++) {
int cmp = Byte.compare(first[i], second[i]);
if (cmp != 0) {
return cmp;
}
}
return Integer.compare(firstLength, secondLength);
}
@Override
public int compareSerialized(DataInputView firstSource, DataInputView secondSource)
throws IOException {
int firstLength = firstSource.readInt();
int secondLength = secondSource.readInt();
int minLength = Math.min(firstLength, secondLength);
while (minLength-- > 0) {
byte firstValue = firstSource.readByte();
byte secondValue = secondSource.readByte();
int cmp = Byte.compare(firstValue, secondValue);
if (cmp != 0) {
return cmp;
}
}
int lengthCompare = Integer.compare(firstLength, secondLength);
if (lengthCompare != 0) {
return lengthCompare;
} else {
return Long.compare(firstSource.readLong(), secondSource.readLong());
}
}
@Override
public boolean supportsNormalizedKey() {
return true;
}
@Override
public int getNormalizeKeyLen() {
return Integer.MAX_VALUE;
}
@Override
public boolean isNormalizedKeyPrefixOnly(int keyBytes) {
return true;
}
@Override
public void putNormalizedKey(
Tuple2<byte[], StreamRecord<IN>> record,
MemorySegment target,
int offset,
int numBytes) {
BytesKeyNormalizationUtil.putNormalizedKey(
record, record.f0.length, target, offset, numBytes);
}
@Override
public boolean invertNormalizedKey() {
return false;
}
@Override
public TypeComparator<Tuple2<byte[], StreamRecord<IN>>> duplicate() {
return new VariableLengthByteKeyComparator<>();
}
@Override
public int extractKeys(Object record, Object[] target, int index) {
target[index] = record;
return 1;
}
@Override
public TypeComparator<?>[] getFlatComparators() {
return new TypeComparator[] {this};
}
// --------------------------------------------------------------------------------------------
// unsupported normalization
// --------------------------------------------------------------------------------------------
@Override
public boolean supportsSerializationWithKeyNormalization() {
return false;
}
@Override
public void writeWithKeyNormalization(
Tuple2<byte[], StreamRecord<IN>> record, DataOutputView target) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Tuple2<byte[], StreamRecord<IN>> readWithKeyDenormalization(
Tuple2<byte[], StreamRecord<IN>> reuse, DataInputView source) throws IOException {
throw new UnsupportedOperationException();
}
}
| VariableLengthByteKeyComparator |
java | grpc__grpc-java | xds/src/generated/thirdparty/grpc/io/envoyproxy/envoy/service/discovery/v3/AggregatedDiscoveryServiceGrpc.java | {
"start": 20903,
"end": 22179
} | class ____
extends AggregatedDiscoveryServiceBaseDescriptorSupplier
implements io.grpc.protobuf.ProtoMethodDescriptorSupplier {
private final java.lang.String methodName;
AggregatedDiscoveryServiceMethodDescriptorSupplier(java.lang.String methodName) {
this.methodName = methodName;
}
@java.lang.Override
public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() {
return getServiceDescriptor().findMethodByName(methodName);
}
}
private static volatile io.grpc.ServiceDescriptor serviceDescriptor;
public static io.grpc.ServiceDescriptor getServiceDescriptor() {
io.grpc.ServiceDescriptor result = serviceDescriptor;
if (result == null) {
synchronized (AggregatedDiscoveryServiceGrpc.class) {
result = serviceDescriptor;
if (result == null) {
serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME)
.setSchemaDescriptor(new AggregatedDiscoveryServiceFileDescriptorSupplier())
.addMethod(getStreamAggregatedResourcesMethod())
.addMethod(getDeltaAggregatedResourcesMethod())
.build();
}
}
}
return result;
}
}
| AggregatedDiscoveryServiceMethodDescriptorSupplier |
java | google__guava | guava/src/com/google/common/collect/Synchronized.java | {
"start": 4419,
"end": 7633
} | class ____<E extends @Nullable Object> extends SynchronizedObject
implements Collection<E> {
private SynchronizedCollection(Collection<E> delegate, @Nullable Object mutex) {
super(delegate, mutex);
}
@SuppressWarnings("unchecked")
@Override
Collection<E> delegate() {
return (Collection<E>) super.delegate();
}
@Override
public boolean add(E e) {
synchronized (mutex) {
return delegate().add(e);
}
}
@Override
public boolean addAll(Collection<? extends E> c) {
synchronized (mutex) {
return delegate().addAll(c);
}
}
@Override
public void clear() {
synchronized (mutex) {
delegate().clear();
}
}
@Override
public boolean contains(@Nullable Object o) {
synchronized (mutex) {
return delegate().contains(o);
}
}
@Override
public boolean containsAll(Collection<?> c) {
synchronized (mutex) {
return delegate().containsAll(c);
}
}
@Override
public boolean isEmpty() {
synchronized (mutex) {
return delegate().isEmpty();
}
}
@Override
public Iterator<E> iterator() {
return delegate().iterator(); // manually synchronized
}
@Override
public Spliterator<E> spliterator() {
synchronized (mutex) {
return delegate().spliterator();
}
}
@Override
public Stream<E> stream() {
synchronized (mutex) {
return delegate().stream();
}
}
@Override
public Stream<E> parallelStream() {
synchronized (mutex) {
return delegate().parallelStream();
}
}
@Override
public void forEach(Consumer<? super E> action) {
synchronized (mutex) {
delegate().forEach(action);
}
}
@Override
public boolean remove(@Nullable Object o) {
synchronized (mutex) {
return delegate().remove(o);
}
}
@Override
public boolean removeAll(Collection<?> c) {
synchronized (mutex) {
return delegate().removeAll(c);
}
}
@Override
public boolean retainAll(Collection<?> c) {
synchronized (mutex) {
return delegate().retainAll(c);
}
}
@Override
public boolean removeIf(Predicate<? super E> filter) {
synchronized (mutex) {
return delegate().removeIf(filter);
}
}
@Override
public int size() {
synchronized (mutex) {
return delegate().size();
}
}
@Override
public @Nullable Object[] toArray() {
synchronized (mutex) {
return delegate().toArray();
}
}
@Override
@SuppressWarnings("nullness") // b/192354773 in our checker affects toArray declarations
public <T extends @Nullable Object> T[] toArray(T[] a) {
synchronized (mutex) {
return delegate().toArray(a);
}
}
@GwtIncompatible @J2ktIncompatible private static final long serialVersionUID = 0;
}
@VisibleForTesting
static <E extends @Nullable Object> Set<E> set(Set<E> set, @Nullable Object mutex) {
return new SynchronizedSet<>(set, mutex);
}
static | SynchronizedCollection |
java | spring-projects__spring-framework | spring-web/src/jmh/java/org/springframework/web/bind/ServletRequestUtilsBenchmark.java | {
"start": 1224,
"end": 2409
} | class ____ {
public MockHttpServletRequest request = new MockHttpServletRequest();
public String parameterName = "nonExistingParam";
}
@Benchmark
public int intParameterWithDefaultValue(BenchmarkData data) {
return ServletRequestUtils.getIntParameter(data.request, data.parameterName, 0);
}
@Benchmark
public long longParameterWithDefaultValue(BenchmarkData data) {
return ServletRequestUtils.getLongParameter(data.request, data.parameterName, 0);
}
@Benchmark
public float floatParameterWithDefaultValue(BenchmarkData data) {
return ServletRequestUtils.getFloatParameter(data.request, data.parameterName, 0f);
}
@Benchmark
public double doubleParameterWithDefaultValue(BenchmarkData data) {
return ServletRequestUtils.getDoubleParameter(data.request, data.parameterName, 0d);
}
@Benchmark
public boolean booleanParameterWithDefaultValue(BenchmarkData data) {
return ServletRequestUtils.getBooleanParameter(data.request, data.parameterName, false);
}
@Benchmark
public String stringParameterWithDefaultValue(BenchmarkData data) {
return ServletRequestUtils.getStringParameter(data.request, data.parameterName, "defaultValue");
}
}
| BenchmarkData |
java | spring-projects__spring-framework | spring-aop/src/test/java/org/springframework/aop/framework/ProxyFactoryTests.java | {
"start": 16131,
"end": 16274
} | class ____ {
private final long time = System.currentTimeMillis();
public long getTime() {
return time;
}
}
@Order(2)
static | MyDate |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/MutableHttpRequestWrapper.java | {
"start": 1154,
"end": 3711
} | class ____<B> extends HttpRequestWrapper<B> implements MutableHttpRequest<B> {
private ConversionService conversionService;
@Nullable
private B body;
@Nullable
private URI uri;
protected MutableHttpRequestWrapper(ConversionService conversionService, HttpRequest<B> delegate) {
super(delegate);
this.conversionService = conversionService;
}
public static MutableHttpRequest<?> wrapIfNecessary(ConversionService conversionService, HttpRequest<?> request) {
if (request instanceof MutableHttpRequest<?> httpRequest) {
return httpRequest;
} else {
return new MutableHttpRequestWrapper<>(conversionService, request);
}
}
@NonNull
@Override
public Optional<B> getBody() {
if (body == null) {
return getDelegate().getBody();
} else {
return Optional.of(body);
}
}
@NonNull
@Override
public <T> Optional<T> getBody(@NonNull Class<T> type) {
if (body == null) {
return getDelegate().getBody(type);
} else {
return conversionService.convert(body, ConversionContext.of(type));
}
}
@Override
public <T> Optional<T> getBody(ArgumentConversionContext<T> conversionContext) {
if (body == null) {
return getDelegate().getBody(conversionContext);
} else {
return conversionService.convert(body, conversionContext);
}
}
@Override
public MutableHttpRequest<B> cookie(Cookie cookie) {
throw new UnsupportedOperationException();
}
@Override
public MutableHttpRequest<B> uri(URI uri) {
this.uri = uri;
return this;
}
@Override
@NonNull
public URI getUri() {
if (uri == null) {
return getDelegate().getUri();
} else {
return uri;
}
}
@NonNull
@Override
public MutableHttpParameters getParameters() {
return (MutableHttpParameters) super.getParameters();
}
@NonNull
@Override
public MutableHttpHeaders getHeaders() {
return (MutableHttpHeaders) super.getHeaders();
}
@SuppressWarnings("unchecked")
@Override
public <T> MutableHttpRequest<T> body(T body) {
this.body = (B) body;
return (MutableHttpRequest<T>) this;
}
@Override
public void setConversionService(ConversionService conversionService) {
this.conversionService = conversionService;
}
}
| MutableHttpRequestWrapper |
java | google__guice | extensions/assistedinject/test/com/google/inject/assistedinject/FactoryProvider2Test.java | {
"start": 13186,
"end": 13512
} | class ____ implements Car {
private final Provider<Set<String>> manufacturersProvider;
private final Color color;
@Inject
public Z(Provider<Set<String>> manufacturersProvider, @Assisted Color color) {
this.manufacturersProvider = manufacturersProvider;
this.color = color;
}
}
public static | Z |
java | apache__camel | components/camel-aws/camel-aws2-ec2/src/test/java/org/apache/camel/component/aws2/ec2/integration/Aws2EC2Base.java | {
"start": 1373,
"end": 1897
} | class ____ extends CamelTestSupport {
@SuppressWarnings("unused")
@RegisterExtension
public static AWSService service = AWSServiceFactory.createEC2Service();
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = super.createCamelContext();
AWS2EC2Component ec2 = context.getComponent("aws2-ec2", AWS2EC2Component.class);
ec2.getConfiguration().setAmazonEc2Client(AWSSDKClientUtils.newEC2Client());
return context;
}
}
| Aws2EC2Base |
java | quarkusio__quarkus | extensions/micrometer/deployment/src/test/java/io/quarkus/micrometer/deployment/pathparams/HttpPathParamLimitWithReactiveRoutesTest.java | {
"start": 2937,
"end": 3274
} | class ____ {
@Route(path = "/rr", methods = Route.HttpMethod.GET)
public String rr() {
return "hello";
}
@Route(path = "/rr/:message", methods = Route.HttpMethod.GET)
public String rrWithPathParam(@Param String message) {
return "hello " + message;
}
}
}
| Resource |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/asm/Context.java | {
"start": 1796,
"end": 5637
} | class ____ {
/** The prototypes of the attributes that must be parsed in this class. */
Attribute[] attributePrototypes;
/**
* The options used to parse this class. One or more of {@link ClassReader#SKIP_CODE}, {@link
* ClassReader#SKIP_DEBUG}, {@link ClassReader#SKIP_FRAMES}, {@link ClassReader#EXPAND_FRAMES} or
* {@link ClassReader#EXPAND_ASM_INSNS}.
*/
int parsingOptions;
/** The buffer used to read strings in the constant pool. */
char[] charBuffer;
// Information about the current method, i.e. the one read in the current (or latest) call
// to {@link ClassReader#readMethod()}.
/** The access flags of the current method. */
int currentMethodAccessFlags;
/** The name of the current method. */
String currentMethodName;
/** The descriptor of the current method. */
String currentMethodDescriptor;
/**
* The labels of the current method, indexed by bytecode offset (only bytecode offsets for which a
* label is needed have a non null associated Label).
*/
Label[] currentMethodLabels;
// Information about the current type annotation target, i.e. the one read in the current
// (or latest) call to {@link ClassReader#readAnnotationTarget()}.
/**
* The target_type and target_info of the current type annotation target, encoded as described in
* {@link TypeReference}.
*/
int currentTypeAnnotationTarget;
/** The target_path of the current type annotation target. */
TypePath currentTypeAnnotationTargetPath;
/** The start of each local variable range in the current local variable annotation. */
Label[] currentLocalVariableAnnotationRangeStarts;
/** The end of each local variable range in the current local variable annotation. */
Label[] currentLocalVariableAnnotationRangeEnds;
/**
* The local variable index of each local variable range in the current local variable annotation.
*/
int[] currentLocalVariableAnnotationRangeIndices;
// Information about the current stack map frame, i.e. the one read in the current (or latest)
// call to {@link ClassReader#readFrame()}.
/** The bytecode offset of the current stack map frame. */
int currentFrameOffset;
/**
* The type of the current stack map frame. One of {@link Opcodes#F_FULL}, {@link
* Opcodes#F_APPEND}, {@link Opcodes#F_CHOP}, {@link Opcodes#F_SAME} or {@link Opcodes#F_SAME1}.
*/
int currentFrameType;
/**
* The number of local variable types in the current stack map frame. Each type is represented
* with a single array element (even long and double).
*/
int currentFrameLocalCount;
/**
* The delta number of local variable types in the current stack map frame (each type is
* represented with a single array element - even long and double). This is the number of local
* variable types in this frame, minus the number of local variable types in the previous frame.
*/
int currentFrameLocalCountDelta;
/**
* The types of the local variables in the current stack map frame. Each type is represented with
* a single array element (even long and double), using the format described in {@link
* MethodVisitor#visitFrame}. Depending on {@link #currentFrameType}, this contains the types of
* all the local variables, or only those of the additional ones (compared to the previous frame).
*/
Object[] currentFrameLocalTypes;
/**
* The number stack element types in the current stack map frame. Each type is represented with a
* single array element (even long and double).
*/
int currentFrameStackCount;
/**
* The types of the stack elements in the current stack map frame. Each type is represented with a
* single array element (even long and double), using the format described in {@link
* MethodVisitor#visitFrame}.
*/
Object[] currentFrameStackTypes;
}
| Context |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/derivedidentities/bidirectional/CompositeIdDerivedIdWithIdClassTest.java | {
"start": 1781,
"end": 6256
} | class ____ {
@AfterEach
public void cleanup(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Test
@JiraKey(value = "HHH-11328")
public void testMergeTransientIdManyToOne(SessionFactoryScope scope) {
ShoppingCart transientCart = new ShoppingCart( "cart1" );
transientCart.addLineItem( new LineItem( 0, "description2", transientCart ) );
// assertion for HHH-11274 - checking for exception
final Object identifier = new PersistenceUnitUtilImpl( scope.getSessionFactory() ).getIdentifier( transientCart.getLineItems()
.get( 0 ) );
// merge ID with transient many-to-one
scope.inTransaction(
session ->
session.merge( transientCart )
);
scope.inTransaction(
session -> {
ShoppingCart updatedCart = session.get( ShoppingCart.class, "cart1" );
// assertion for HHH-11274 - checking for exception
new PersistenceUnitUtilImpl( scope.getSessionFactory() )
.getIdentifier( transientCart.getLineItems().get( 0 ) );
assertEquals( 1, updatedCart.getLineItems().size() );
assertEquals( "description2", updatedCart.getLineItems().get( 0 ).getDescription() );
}
);
}
@Test
@JiraKey(value = "HHH-10623")
public void testMergeDetachedIdManyToOne(SessionFactoryScope scope) {
ShoppingCart cart = new ShoppingCart( "cart1" );
scope.inTransaction(
session ->
session.persist( cart )
);
// cart is detached now
LineItem lineItem = new LineItem( 0, "description2", cart );
cart.addLineItem( lineItem );
// merge lineItem with an ID with detached many-to-one
scope.inTransaction(
session ->
session.merge( lineItem )
);
scope.inTransaction(
session -> {
ShoppingCart updatedCart = session.get( ShoppingCart.class, "cart1" );
assertEquals( 1, updatedCart.getLineItems().size() );
assertEquals( "description2", updatedCart.getLineItems().get( 0 ).getDescription() );
}
);
}
@Test
@JiraKey(value = "HHH-12007")
public void testBindTransientEntityWithTransientKeyManyToOne(SessionFactoryScope scope) {
ShoppingCart cart = new ShoppingCart( "cart" );
LineItem item = new LineItem( 0, "desc", cart );
scope.inTransaction(
session -> {
String cartId = session.createQuery(
"select c.id from Cart c left join c.lineItems i where i = :item",
String.class
).setParameter( "item", item ).uniqueResult();
assertNull( cartId );
assertFalse( session.contains( item ) );
assertFalse( session.contains( cart ) );
}
);
}
@Test
@JiraKey(value = "HHH-12007")
public void testBindTransientEntityWithPersistentKeyManyToOne(SessionFactoryScope scope) {
ShoppingCart cart = new ShoppingCart( "cart" );
LineItem item = new LineItem( 0, "desc", cart );
scope.inTransaction(
session -> {
session.persist( cart );
String cartId = session.createQuery(
"select c.id from Cart c left join c.lineItems i where i = :item",
String.class
).setParameter( "item", item ).uniqueResult();
assertNull( cartId );
assertFalse( session.contains( item ) );
assertTrue( session.contains( cart ) );
}
);
}
@Test
@JiraKey(value = "HHH-12007")
public void testBindTransientEntityWithDetachedKeyManyToOne(SessionFactoryScope scope) {
ShoppingCart cart = new ShoppingCart( "cart" );
LineItem item = new LineItem( 0, "desc", cart );
scope.inTransaction(
session -> {
String cartId = session.createQuery(
"select c.id from Cart c left join c.lineItems i where i = :item",
String.class
).setParameter( "item", item ).uniqueResult();
assertNull( cartId );
assertFalse( session.contains( item ) );
assertFalse( session.contains( cart ) );
}
);
}
@Test
@JiraKey(value = "HHH-12007")
public void testBindTransientEntityWithCopiedKeyManyToOne(SessionFactoryScope scope) {
ShoppingCart cart = new ShoppingCart( "cart" );
LineItem item = new LineItem( 0, "desc", new ShoppingCart( "cart" ) );
scope.inTransaction(
session -> {
String cartId = session.createQuery(
"select c.id from Cart c left join c.lineItems i where i = :item",
String.class
).setParameter( "item", item ).uniqueResult();
assertNull( cartId );
assertFalse( session.contains( item ) );
assertFalse( session.contains( cart ) );
}
);
}
@Entity(name = "Cart")
public static | CompositeIdDerivedIdWithIdClassTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/Aggregation.java | {
"start": 1614,
"end": 2989
} | class ____ extends ParseField.CommonFields {
public static final ParseField META = new ParseField("meta");
public static final ParseField BUCKETS = new ParseField("buckets");
public static final ParseField VALUE = new ParseField("value");
public static final ParseField VALUES = new ParseField("values");
public static final ParseField VALUE_AS_STRING = new ParseField("value_as_string");
public static final ParseField DOC_COUNT = new ParseField("doc_count");
public static final ParseField KEY = new ParseField("key");
public static final ParseField KEY_AS_STRING = new ParseField("key_as_string");
public static final ParseField FROM = new ParseField("from");
public static final ParseField FROM_AS_STRING = new ParseField("from_as_string");
public static final ParseField TO = new ParseField("to");
public static final ParseField TO_AS_STRING = new ParseField("to_as_string");
public static final ParseField MIN = new ParseField("min");
public static final ParseField MIN_AS_STRING = new ParseField("min_as_string");
public static final ParseField MAX = new ParseField("max");
public static final ParseField MAX_AS_STRING = new ParseField("max_as_string");
public static final ParseField SIZE = new ParseField("size");
}
}
| CommonFields |
java | spring-projects__spring-boot | module/spring-boot-actuator-autoconfigure/src/test/java/org/springframework/boot/actuate/autoconfigure/info/InfoContributorAutoConfigurationTests.java | {
"start": 10715,
"end": 10854
} | class ____ {
@Bean
SslInfo customSslInfo(SslBundles sslBundles) {
return new SslInfo(sslBundles);
}
}
}
| CustomSslInfoConfiguration |
java | apache__camel | components/camel-jackson-protobuf/src/test/java/org/apache/camel/component/jackson/protobuf/transform/ProtobufPojoDataTypeTransformerTest.java | {
"start": 1505,
"end": 6550
} | class ____ {
private final DefaultCamelContext camelContext = new DefaultCamelContext();
private final ProtobufPojoDataTypeTransformer transformer = new ProtobufPojoDataTypeTransformer();
@BeforeEach
void setup() {
transformer.setCamelContext(camelContext);
}
@Test
void shouldHandleJsonString() throws Exception {
Exchange exchange = new DefaultExchange(camelContext);
ProtobufSchema protobufSchema = getSchema();
exchange.setProperty(SchemaHelper.CONTENT_SCHEMA, protobufSchema);
exchange.setProperty(SchemaHelper.CONTENT_CLASS, Person.class.getName());
exchange.getMessage().setBody("""
{ "name": "Christoph", "age": 32 }
""");
transformer.transform(exchange.getMessage(), DataType.ANY, DataType.ANY);
Assertions.assertEquals(Person.class, exchange.getMessage().getBody().getClass());
Assertions.assertEquals("Christoph", exchange.getMessage().getBody(Person.class).name());
Assertions.assertEquals(32, exchange.getMessage().getBody(Person.class).age());
}
@Test
void shouldHandlePojo() throws Exception {
Exchange exchange = new DefaultExchange(camelContext);
ProtobufSchema protobufSchema = getSchema();
exchange.setProperty(SchemaHelper.CONTENT_SCHEMA, protobufSchema);
exchange.getMessage().setBody(new Person("Mickey", 20));
transformer.transform(exchange.getMessage(), DataType.ANY, DataType.ANY);
Assertions.assertEquals(Person.class, exchange.getMessage().getBody().getClass());
Assertions.assertEquals("Mickey", exchange.getMessage().getBody(Person.class).name());
Assertions.assertEquals(20, exchange.getMessage().getBody(Person.class).age());
}
@Test
void shouldHandleProtobufBinary() throws Exception {
Exchange exchange = new DefaultExchange(camelContext);
ProtobufSchema protobufSchema = getSchema();
exchange.setProperty(SchemaHelper.CONTENT_SCHEMA, protobufSchema);
exchange.setProperty(SchemaHelper.CONTENT_CLASS, Person.class.getName());
exchange.getMessage().setBody(Protobuf.mapper().writer(protobufSchema)
.writeValueAsBytes(new Person("Goofy", 25)));
transformer.transform(exchange.getMessage(), DataType.ANY, DataType.ANY);
Assertions.assertEquals(Person.class, exchange.getMessage().getBody().getClass());
Assertions.assertEquals("Goofy", exchange.getMessage().getBody(Person.class).name());
Assertions.assertEquals(25, exchange.getMessage().getBody(Person.class).age());
}
@Test
void shouldHandleAvroJsonNode() throws Exception {
Exchange exchange = new DefaultExchange(camelContext);
ProtobufSchema protobufSchema = getSchema();
exchange.setProperty(SchemaHelper.CONTENT_SCHEMA, protobufSchema);
exchange.setProperty(SchemaHelper.CONTENT_CLASS, Person.class.getName());
exchange.getMessage()
.setBody(Protobuf.mapper().writerFor(JsonNode.class).with(protobufSchema)
.writeValueAsBytes(Json.mapper().readTree("""
{ "name": "Goofy", "age": 25 }
""")));
transformer.transform(exchange.getMessage(), DataType.ANY, DataType.ANY);
Assertions.assertEquals(Person.class, exchange.getMessage().getBody().getClass());
Assertions.assertEquals("Goofy", exchange.getMessage().getBody(Person.class).name());
Assertions.assertEquals(25, exchange.getMessage().getBody(Person.class).age());
}
@Test
void shouldHandleExplicitContentClass() throws Exception {
Exchange exchange = new DefaultExchange(camelContext);
ProtobufSchema protobufSchema = getSchema();
exchange.setProperty(SchemaHelper.CONTENT_SCHEMA, protobufSchema);
exchange.setProperty(SchemaHelper.CONTENT_CLASS, Person.class.getName());
exchange.getMessage().setBody(new Person("Donald", 19));
transformer.transform(exchange.getMessage(), DataType.ANY, DataType.ANY);
Assertions.assertEquals(Person.class, exchange.getMessage().getBody().getClass());
Assertions.assertEquals("Donald", exchange.getMessage().getBody(Person.class).name());
Assertions.assertEquals(19, exchange.getMessage().getBody(Person.class).age());
}
@Test
public void shouldLookupDataTypeTransformer() throws Exception {
Transformer transformer = camelContext.getTransformerRegistry()
.resolveTransformer(new TransformerKey("protobuf-x-java-object"));
Assertions.assertNotNull(transformer);
Assertions.assertEquals(ProtobufPojoDataTypeTransformer.class, transformer.getClass());
}
private ProtobufSchema getSchema() throws IOException {
return Protobuf.mapper().schemaLoader()
.load(ProtobufPojoDataTypeTransformerTest.class.getResourceAsStream("Person.proto"));
}
}
| ProtobufPojoDataTypeTransformerTest |
java | grpc__grpc-java | api/src/main/java/io/grpc/InternalLogId.java | {
"start": 1251,
"end": 3185
} | class ____ anonymous.
* @param details a short, human readable string that describes the object the id is attached to.
* Typically this will be an address or target.
*/
public static InternalLogId allocate(Class<?> type, @Nullable String details) {
return allocate(getClassName(type), details);
}
/**
* Creates a log id.
*
* @param typeName the "Type" to be used when logging this id.
* @param details a short, human readable string that describes the object the id is attached to.
* Typically this will be an address or target.
*/
public static InternalLogId allocate(String typeName, @Nullable String details) {
return new InternalLogId(typeName, details, getNextId());
}
static long getNextId() {
return idAlloc.incrementAndGet();
}
private final String typeName;
@Nullable
private final String details;
private final long id;
InternalLogId(String typeName, String details, long id) {
checkNotNull(typeName, "typeName");
checkArgument(!typeName.isEmpty(), "empty type");
this.typeName = typeName;
this.details = details;
this.id = id;
}
public String getTypeName() {
return typeName;
}
@Nullable
public String getDetails() {
return details;
}
public long getId() {
return id;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(shortName());
if (details != null) {
sb.append(": (");
sb.append(details);
sb.append(')');
}
return sb.toString();
}
private static String getClassName(Class<?> type) {
String className = checkNotNull(type, "type").getSimpleName();
if (!className.isEmpty()) {
return className;
}
// + 1 removes the separating '.'
return type.getName().substring(type.getPackage().getName().length() + 1);
}
public String shortName() {
return typeName + "<" + id + ">";
}
}
| is |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/net/Facility.java | {
"start": 4598,
"end": 4843
} | enum ____ or null if name is null
*/
public static Facility toFacility(final String name) {
return toFacility(name, null);
}
/**
* Returns the Facility for the given string.
*
* @param name The Facility | value |
java | apache__logging-log4j2 | log4j-slf4j2-impl/src/main/java/org/apache/logging/slf4j/Log4jMDCAdapter.java | {
"start": 1217,
"end": 3744
} | class ____ implements MDCAdapter {
private static final Logger LOGGER = StatusLogger.getLogger();
private final ThreadLocalMapOfStacks mapOfStacks = new ThreadLocalMapOfStacks();
@Override
public void put(final String key, final String val) {
ThreadContext.put(key, val);
}
@Override
public String get(final String key) {
return ThreadContext.get(key);
}
@Override
public void remove(final String key) {
ThreadContext.remove(key);
}
@Override
public void clear() {
ThreadContext.clearMap();
}
@Override
public Map<String, String> getCopyOfContextMap() {
return ThreadContext.getContext();
}
@Override
public void setContextMap(final Map<String, String> map) {
ThreadContext.clearMap();
ThreadContext.putAll(map);
}
@Override
public void pushByKey(final String key, final String value) {
if (key == null) {
ThreadContext.push(value);
} else {
final String oldValue = mapOfStacks.peekByKey(key);
if (!Objects.equals(ThreadContext.get(key), oldValue)) {
LOGGER.warn("The key {} was used in both the string and stack-valued MDC.", key);
}
mapOfStacks.pushByKey(key, value);
ThreadContext.put(key, value);
}
}
@Override
public String popByKey(final String key) {
if (key == null) {
return ThreadContext.getDepth() > 0 ? ThreadContext.pop() : null;
}
final String value = mapOfStacks.popByKey(key);
if (!Objects.equals(ThreadContext.get(key), value)) {
LOGGER.warn("The key {} was used in both the string and stack-valued MDC.", key);
}
ThreadContext.put(key, mapOfStacks.peekByKey(key));
return value;
}
@Override
public Deque<String> getCopyOfDequeByKey(final String key) {
if (key == null) {
final ContextStack stack = ThreadContext.getImmutableStack();
final Deque<String> copy = new ArrayDeque<>(stack.size());
stack.forEach(copy::push);
return copy;
}
return mapOfStacks.getCopyOfDequeByKey(key);
}
@Override
public void clearDequeByKey(final String key) {
if (key == null) {
ThreadContext.clearStack();
} else {
mapOfStacks.clearByKey(key);
ThreadContext.put(key, null);
}
}
private static | Log4jMDCAdapter |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/DbType.java | {
"start": 75,
"end": 3071
} | enum ____ {
other(1 << 0),
jtds(1 << 1),
hsql(1 << 2),
db2(1 << 3),
postgresql(1 << 4),
sqlserver(1 << 5),
oracle(1 << 6),
mysql(1 << 7),
mariadb(1 << 8),
derby(1 << 9),
hive(1 << 10),
h2(1 << 11),
dm(1 << 12), // dm.jdbc.driver.DmDriver
kingbase(1 << 13),
gbase(1 << 14),
oceanbase(1 << 15),
informix(1 << 16),
odps(1 << 17),
teradata(1 << 18),
phoenix(1 << 19),
edb(1 << 20),
kylin(1 << 21), // org.apache.kylin.jdbc.Driver
sqlite(1 << 22),
ads(1 << 23),
presto(1 << 24),
elastic_search(1 << 25), // com.alibaba.xdriver.elastic.jdbc.ElasticDriver
hbase(1 << 26),
drds(1 << 27),
clickhouse(1 << 28),
blink(1 << 29),
@Deprecated
antspark(1 << 30),
spark(1 << 30),
oceanbase_oracle(1 << 31),
/**
* Alibaba Cloud PolarDB-Oracle 1.0
*/
polardb(1L << 32),
ali_oracle(1L << 33),
mock(1L << 34),
sybase(1L << 35),
highgo(1L << 36),
/**
* 非常成熟的开源mpp数据库
*/
greenplum(1L << 37),
/**
* 华为的mpp数据库
*/
gaussdb(1L << 38),
trino(1L << 39),
oscar(1L << 40),
tidb(1L << 41),
tydb(1L << 42),
starrocks(1L << 43),
goldendb(1L << 44),
snowflake(1L << 45),
redshift(1L << 46),
hologres(1L << 47),
bigquery(1L << 48),
impala(1L << 49),
doris(1L << 50),
lealone(1L << 51),
athena(1L << 52),
polardbx(1L << 53),
supersql(1L << 54),
databricks(1L << 55),
adb_mysql(1L << 56),
/**
* Alibaba Cloud PolarDB-Oracle 2.0
*/
polardb2(1L << 57),
synapse(1L << 58),
ingres(0),
cloudscape(0),
timesten(0),
as400(0),
sapdb(0),
kdb(0),
log4jdbc(0),
xugu(0),
firebirdsql(0),
JSQLConnect(0),
JTurbo(0),
interbase(0),
pointbase(0),
edbc(0),
mimer(0),
taosdata(0),
sundb(0);
public final long mask;
public final long hashCode64;
private DbType(long mask) {
this.mask = mask;
this.hashCode64 = FnvHash.hashCode64(name());
}
public static long of(DbType... types) {
long value = 0;
for (DbType type : types) {
value |= type.mask;
}
return value;
}
public static DbType of(String name) {
if (name == null || name.isEmpty()) {
return null;
}
if ("aliyun_ads".equalsIgnoreCase(name)) {
return ads;
}
if ("maxcompute".equalsIgnoreCase(name)) {
return odps;
}
try {
return valueOf(name);
} catch (Exception e) {
return null;
}
}
public static boolean isPostgreSQLDbStyle(DbType dbType) {
return dbType == DbType.postgresql || dbType == DbType.edb || dbType == DbType.greenplum || dbType == DbType.hologres;
}
public final boolean equals(String other) {
return this == of(other);
}
}
| DbType |
java | apache__spark | examples/src/main/java/org/apache/spark/examples/ml/JavaBinarizerExample.java | {
"start": 1312,
"end": 2349
} | class ____ {
public static void main(String[] args) {
SparkSession spark = SparkSession
.builder()
.appName("JavaBinarizerExample")
.getOrCreate();
// $example on$
List<Row> data = Arrays.asList(
RowFactory.create(0, 0.1),
RowFactory.create(1, 0.8),
RowFactory.create(2, 0.2)
);
StructType schema = new StructType(new StructField[]{
new StructField("id", DataTypes.IntegerType, false, Metadata.empty()),
new StructField("feature", DataTypes.DoubleType, false, Metadata.empty())
});
Dataset<Row> continuousDataFrame = spark.createDataFrame(data, schema);
Binarizer binarizer = new Binarizer()
.setInputCol("feature")
.setOutputCol("binarized_feature")
.setThreshold(0.5);
Dataset<Row> binarizedDataFrame = binarizer.transform(continuousDataFrame);
System.out.println("Binarizer output with Threshold = " + binarizer.getThreshold());
binarizedDataFrame.show();
// $example off$
spark.stop();
}
}
| JavaBinarizerExample |
java | hibernate__hibernate-orm | hibernate-testing/src/main/java/org/hibernate/testing/orm/junit/DialectFeatureChecks.java | {
"start": 37517,
"end": 37707
} | class ____ implements DialectFeatureCheck {
public boolean apply(Dialect dialect) {
return definesDdlType( dialect, SqlTypes.VECTOR_FLOAT32 );
}
}
public static | SupportsFloatVectorType |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/TestDescriptor.java | {
"start": 10000,
"end": 10951
} | interface ____ {
/**
* Combine the supplied {@code visitors} into a single {@code Visitor}.
*
* <p>If the supplied array contains only a single {@code Visitor}, that
* {@code Visitor} is returned as is.
*
* @param visitors the {@code Visitor}s to combine; never {@code null}
* or empty
* @return the combined {@code Visitor}
* @throws org.junit.platform.commons.PreconditionViolationException if
* {@code visitors} is {@code null}, contains {@code null} elements, or
* is empty
* @since 1.13
*/
@API(status = EXPERIMENTAL, since = "6.0")
static Visitor composite(Visitor... visitors) {
return CompositeTestDescriptorVisitor.from(visitors);
}
/**
* Visit a {@link TestDescriptor}.
*
* @param descriptor the {@code TestDescriptor} to visit; never {@code null}
*/
void visit(TestDescriptor descriptor);
}
/**
* Supported types for {@link TestDescriptor TestDescriptors}.
*/
| Visitor |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/value/enum2enum/OrderMapper.java | {
"start": 534,
"end": 1075
} | interface ____ {
OrderMapper INSTANCE = Mappers.getMapper( OrderMapper.class );
OrderDto orderEntityToDto(OrderEntity order);
@ValueMappings({
@ValueMapping(source = "EXTRA", target = "SPECIAL"),
@ValueMapping(source = "STANDARD", target = "DEFAULT"),
@ValueMapping(source = "NORMAL", target = "DEFAULT")
})
ExternalOrderType orderTypeToExternalOrderType(OrderType orderType);
@InheritInverseConfiguration
OrderType externalOrderTypeToOrderType(ExternalOrderType orderType);
}
| OrderMapper |
java | apache__avro | lang/java/thrift/src/test/java/org/apache/avro/thrift/test/Foo.java | {
"start": 19659,
"end": 20945
} | class ____<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, zip_args, Void> {
public zip() {
super("zip");
}
public zip_args getEmptyArgsInstance() {
return new zip_args();
}
public org.apache.thrift.async.AsyncMethodCallback<Void> getResultHandler(
final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new org.apache.thrift.async.AsyncMethodCallback<Void>() {
public void onComplete(Void o) {
}
public void onError(java.lang.Exception e) {
if (e instanceof org.apache.thrift.transport.TTransportException) {
_LOGGER.error("TTransportException inside handler", e);
fb.close();
} else {
_LOGGER.error("Exception inside oneway handler", e);
}
}
};
}
protected boolean isOneway() {
return true;
}
public void start(I iface, zip_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler)
throws org.apache.thrift.TException {
iface.zip(resultHandler);
}
}
}
public static | zip |
java | google__guice | core/test/com/google/inject/errors/MissingImplementationErrorTest.java | {
"start": 7323,
"end": 8027
} | class ____ extends AbstractModule {
@Provides
List<? extends String> provideString() {
throw new RuntimeException("not reachable");
}
@Provides
Dao provideInteger(List<String> dep) {
throw new RuntimeException("not reachable");
}
}
@Test
public void testInjectionMissingExtendsClause() {
CreationException exception =
assertThrows(
CreationException.class,
() -> Guice.createInjector(new InjectionMissingExtendsClauseModule()));
assertGuiceErrorEqualsIgnoreLineNumber(
exception.getMessage(), "missing_implementation_missing_extends_clause_java.txt");
}
private static final | InjectionMissingExtendsClauseModule |
java | reactor__reactor-core | reactor-core/src/jcstress/java/reactor/core/publisher/MonoDelayUntilStressTest.java | {
"start": 4922,
"end": 6054
} | class ____ {
final StressSubscriber<Integer> subscriber = new StressSubscriber<Integer>(1L);
StressSubscription<Integer> subscriptionOuter;
StressSubscription<Integer> subscriptionInner;
{
new Mono<Integer>() {
@Override
public void subscribe(CoreSubscriber<? super Integer> actual) {
subscriptionOuter = new StressSubscription<>(actual);
actual.onSubscribe(subscriptionOuter);
}
}
.delayUntil(__ -> new Mono<Integer>() {
@Override
public void subscribe(CoreSubscriber<? super Integer> actual) {
subscriptionInner = new StressSubscription<>(actual);
actual.onSubscribe(subscriptionInner);
}
})
.subscribe(subscriber);
}
@Actor
public void nextOuter() {
subscriptionOuter.actual.onNext(1);
}
@Actor
public void cancelFromActual() {
subscriber.cancel();
}
@Arbiter
public void arbiter(IIII_Result r) {
r.r1 = subscriber.onNextDiscarded.get();
r.r2 = subscriber.onNextCalls.get();
r.r3 = subscriptionOuter.cancelled.get() ? 1 : 0;
r.r4 = subscriptionInner.cancelled.get() ? 1 : 0;
}
}
}
| OnNextVsCancelStressTest |
java | apache__logging-log4j2 | log4j-iostreams/src/main/java/org/apache/logging/log4j/io/internal/InternalBufferedInputStream.java | {
"start": 1216,
"end": 2688
} | class ____ extends BufferedInputStream {
private static final String FQCN = InternalBufferedInputStream.class.getName();
public InternalBufferedInputStream(
final InputStream in,
final Charset charset,
final ExtendedLogger logger,
final String fqcn,
final Level level,
final Marker marker) {
super(new InternalInputStream(in, charset, logger, fqcn == null ? FQCN : fqcn, level, marker));
}
public InternalBufferedInputStream(
final InputStream in,
final Charset charset,
final int size,
final ExtendedLogger logger,
final String fqcn,
final Level level,
final Marker marker) {
super(new InternalInputStream(in, charset, logger, fqcn == null ? FQCN : fqcn, level, marker), size);
}
@Override
public void close() throws IOException {
super.close();
}
@Override
public synchronized int read() throws IOException {
return super.read();
}
@Override
public int read(final byte[] b) throws IOException {
return super.read(b, 0, b.length);
}
@Override
public synchronized int read(final byte[] b, final int off, final int len) throws IOException {
return super.read(b, off, len);
}
@Override
public String toString() {
return "{stream=" + this.in + '}';
}
}
| InternalBufferedInputStream |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/kstream/TableJoined.java | {
"start": 972,
"end": 1438
} | class ____ optional parameters that can be passed to
* {@link KTable#join(KTable, Function, ValueJoiner, TableJoined) KTable#join(KTable,Function,...)} and
* {@link KTable#leftJoin(KTable, Function, ValueJoiner, TableJoined) KTable#leftJoin(KTable,Function,...)}
* operations, for foreign key joins.
* @param <K> this key type ; key type for the left (primary) table
* @param <KO> other key type ; key type for the right (foreign key) table
*/
public | represents |
java | dropwizard__dropwizard | dropwizard-testing/src/test/java/io/dropwizard/testing/junit5/NestedResourceTest.java | {
"start": 507,
"end": 2386
} | class ____ {
private static final ConfigurationSourceProvider resourceConfigurationSourceProvider = new ResourceConfigurationSourceProvider();
private static final DropwizardAppExtension<TestConfiguration> staticApp = new DropwizardAppExtension<>(
TestApplication.class, "test-config.yaml", resourceConfigurationSourceProvider);
private static final DropwizardClientExtension staticClient = new DropwizardClientExtension();
private static final DAOTestExtension staticDao = DAOTestExtension.newBuilder().build();
private static final ResourceExtension staticResources = ResourceExtension.builder().build();
private final DropwizardAppExtension<TestConfiguration> app = new DropwizardAppExtension<>(
TestApplication.class, "test-config.yaml", resourceConfigurationSourceProvider);
private final DropwizardClientExtension client = new DropwizardClientExtension();
private final DAOTestExtension dao = DAOTestExtension.newBuilder().build();
private final ResourceExtension resources = ResourceExtension.builder().build();
@Test
void staticApp() {
assertThat(staticApp.getEnvironment()).isNotNull();
}
@Test
void staticClient() {
assertThat(staticClient.baseUri()).isNotNull();
}
@Test
void staticDao() {
assertThat(staticDao.getSessionFactory()).isNotNull();
}
@Test
void staticResources() {
assertThat(staticResources.target("")).isNotNull();
}
@Test
void app() {
assertThat(app.getEnvironment()).isNotNull();
}
@Test
void client() {
assertThat(client.baseUri()).isNotNull();
}
@Test
void dao() {
assertThat(dao.getSessionFactory()).isNotNull();
}
@Test
void resources() {
assertThat(resources.target("")).isNotNull();
}
@Nested
| NestedResourceTest |
java | dropwizard__dropwizard | dropwizard-testing/src/test/java/io/dropwizard/testing/app/ContextInjectionResource.java | {
"start": 396,
"end": 750
} | class ____ {
@GET
@Timed
public String getUriPath(@Context UriInfo uriInfo) {
return uriInfo.getPath();
}
@POST
public String getThis() {
throw new RuntimeException("Can't touch this");
}
@PATCH
public String echoPatch(String patchMessage) {
return patchMessage;
}
}
| ContextInjectionResource |
java | apache__hadoop | hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java | {
"start": 959,
"end": 2084
} | class ____ {
public static void main(String[] args) {
try {
if (args.length != 1) {
System.err.println("Usage: <URL>");
System.exit(-1);
}
AuthenticatedURL.Token token = new AuthenticatedURL.Token();
URL url = new URL(args[0]);
HttpURLConnection conn = new AuthenticatedURL().openConnection(url, token);
System.out.println();
System.out.println("Token value: " + token);
System.out.println("Status code: " + conn.getResponseCode() + " " + conn.getResponseMessage());
System.out.println();
if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
BufferedReader reader = new BufferedReader(
new InputStreamReader(
conn.getInputStream(), StandardCharsets.UTF_8));
String line = reader.readLine();
while (line != null) {
System.out.println(line);
line = reader.readLine();
}
reader.close();
}
System.out.println();
}
catch (Exception ex) {
System.err.println("ERROR: " + ex.getMessage());
System.exit(-1);
}
}
}
| WhoClient |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.