language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/deftyping/TestDefaultForScalars.java | {
"start": 1151,
"end": 6894
} | class ____ {
Object object;
protected ObjectWrapperForPoly() { }
public ObjectWrapperForPoly(final Object o) {
object = o;
}
public Object getObject() { return object; }
}
/*
/**********************************************************************
/* Test methods
/**********************************************************************
*/
private final ObjectMapper DEFAULT_TYPING_MAPPER = jsonMapperBuilder()
.activateDefaultTyping(NoCheckSubTypeValidator.instance)
.enable(DateTimeFeature.WRITE_DATES_AS_TIMESTAMPS)
.build();
/**
* Unit test to verify that limited number of core types do NOT include
* type information, even if declared as Object. This is only done for types
* that JSON scalar values natively map to: String, Integer and Boolean (and
* nulls never have type information)
*/
@Test
public void testNumericScalars() throws Exception
{
// no typing for Integer, Double, yes for others
assertEquals("[123]", DEFAULT_TYPING_MAPPER.writeValueAsString(new Object[] { Integer.valueOf(123) }));
assertEquals("[[\"java.lang.Long\",37]]", DEFAULT_TYPING_MAPPER.writeValueAsString(new Object[] { Long.valueOf(37) }));
assertEquals("[0.25]", DEFAULT_TYPING_MAPPER.writeValueAsString(new Object[] { Double.valueOf(0.25) }));
assertEquals("[[\"java.lang.Float\",0.5]]", DEFAULT_TYPING_MAPPER.writeValueAsString(new Object[] { Float.valueOf(0.5f) }));
}
@Test
public void testDateScalars() throws Exception
{
long ts = 12345678L;
assertEquals("[[\"java.util.Date\","+ts+"]]",
DEFAULT_TYPING_MAPPER.writeValueAsString(new Object[] { new Date(ts) }));
// Calendar is trickier... hmmh. Need to ensure round-tripping
Calendar c = Calendar.getInstance();
c.setTimeInMillis(ts);
String json = DEFAULT_TYPING_MAPPER.writeValueAsString(new Object[] { c });
assertEquals("[[\""+c.getClass().getName()+"\","+ts+"]]", json);
// and let's make sure it also comes back same way:
Object[] result = DEFAULT_TYPING_MAPPER.readValue(json, Object[].class);
assertEquals(1, result.length);
assertTrue(result[0] instanceof Calendar);
assertEquals(ts, ((Calendar) result[0]).getTimeInMillis());
}
@Test
public void testMiscScalars() throws Exception
{
// no typing for Strings, booleans
assertEquals("[\"abc\"]", DEFAULT_TYPING_MAPPER.writeValueAsString(new Object[] { "abc" }));
assertEquals("[true,null,false]", DEFAULT_TYPING_MAPPER.writeValueAsString(new Boolean[] { true, null, false }));
}
/**
* Test for verifying that contents of "untyped" homogenous arrays are properly
* handled,
*/
@Test
public void testScalarArrays() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.activateDefaultTyping(NoCheckSubTypeValidator.instance,
DefaultTyping.JAVA_LANG_OBJECT)
.enable(DateTimeFeature.WRITE_DATES_AS_TIMESTAMPS)
.build();
Object[] input = new Object[] {
"abc", new Date(1234567), null, Integer.valueOf(456)
};
String json = mapper.writeValueAsString(input);
assertEquals("[\"abc\",[\"java.util.Date\",1234567],null,456]", json);
// and should deserialize back as well:
Object[] output = mapper.readValue(json, Object[].class);
assertArrayEquals(input, output);
}
// Loosely scalar
@Test
public void test417() throws Exception
{
Jackson417Bean input = new Jackson417Bean();
String json = DEFAULT_TYPING_MAPPER.writeValueAsString(input);
Jackson417Bean result = DEFAULT_TYPING_MAPPER.readValue(json, Jackson417Bean.class);
assertEquals(input.foo, result.foo);
assertEquals(input.bar, result.bar);
}
// [databind#1395]: prevent attempts at including type info for primitives
@Test
public void testDefaultTypingWithLong() throws Exception
{
Data data = new Data();
data.key = 1L;
Map<String, Object> mapData = new HashMap<String, Object>();
mapData.put("longInMap", 2L);
mapData.put("longAsField", data);
// Configure Jackson to preserve types
StdTypeResolverBuilder resolver = new StdTypeResolverBuilder(JsonTypeInfo.Id.CLASS,
JsonTypeInfo.As.PROPERTY, "__t");
ObjectMapper mapper = jsonMapperBuilder()
.enable(SerializationFeature.INDENT_OUTPUT)
.polymorphicTypeValidator(new NoCheckSubTypeValidator())
.setDefaultTyping(resolver)
.build();
// Serialize
String json = mapper.writeValueAsString(mapData);
// Deserialize
Map<?,?> result = mapper.readValue(json, Map.class);
assertNotNull(result);
assertEquals(2, result.size());
}
// [databind#2236]: do need type info for NaN
@Test
public void testDefaultTypingWithNaN() throws Exception
{
final ObjectWrapperForPoly INPUT = new ObjectWrapperForPoly(Double.POSITIVE_INFINITY);
final String json = DEFAULT_TYPING_MAPPER.writeValueAsString(INPUT);
final ObjectWrapperForPoly result = DEFAULT_TYPING_MAPPER.readValue(json, ObjectWrapperForPoly.class);
assertEquals(Double.class, result.getObject().getClass());
assertEquals(INPUT.getObject().toString(), result.getObject().toString());
assertTrue(((Double) result.getObject()).isInfinite());
}
}
| ObjectWrapperForPoly |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/IndexSettings.java | {
"start": 3731,
"end": 13292
} | class ____ {
public static final Setting<List<String>> DEFAULT_FIELD_SETTING = Setting.stringListSetting(
"index.query.default_field",
Collections.singletonList("*"),
Property.IndexScope,
Property.Dynamic,
Property.ServerlessPublic
);
public static final Setting<Boolean> QUERY_STRING_LENIENT_SETTING = Setting.boolSetting(
"index.query_string.lenient",
false,
Property.IndexScope,
Property.ServerlessPublic
);
public static final Setting<Boolean> QUERY_STRING_ANALYZE_WILDCARD = Setting.boolSetting(
"indices.query.query_string.analyze_wildcard",
false,
Property.NodeScope
);
public static final Setting<Boolean> QUERY_STRING_ALLOW_LEADING_WILDCARD = Setting.boolSetting(
"indices.query.query_string.allowLeadingWildcard",
true,
Property.NodeScope
);
public static final Setting<Boolean> ALLOW_UNMAPPED = Setting.boolSetting(
"index.query.parse.allow_unmapped_fields",
true,
Property.IndexScope
);
public static final Setting<TimeValue> INDEX_TRANSLOG_SYNC_INTERVAL_SETTING = Setting.timeSetting(
"index.translog.sync_interval",
TimeValue.timeValueSeconds(5),
TimeValue.timeValueMillis(100),
Property.Dynamic,
Property.IndexScope
);
public static final Setting<TimeValue> INDEX_SEARCH_IDLE_AFTER = Setting.timeSetting(
"index.search.idle.after",
TimeValue.timeValueSeconds(30),
TimeValue.timeValueMinutes(0),
Property.IndexScope,
Property.Dynamic
);
public static final Setting<Translog.Durability> INDEX_TRANSLOG_DURABILITY_SETTING = Setting.enumSetting(
Translog.Durability.class,
"index.translog.durability",
Translog.Durability.REQUEST,
Property.Dynamic,
Property.IndexScope
);
public static final Setting<Boolean> INDEX_WARMER_ENABLED_SETTING = Setting.boolSetting(
"index.warmer.enabled",
true,
Property.Dynamic,
Property.IndexScope
);
public static final Setting<String> INDEX_CHECK_ON_STARTUP = new Setting<>("index.shard.check_on_startup", "false", (s) -> {
return switch (s) {
case "false", "true", "checksum" -> s;
default -> throw new IllegalArgumentException(
"unknown value for [index.shard.check_on_startup] must be one of " + "[true, false, checksum] but was: " + s
);
};
}, Property.IndexScope);
/**
* Index setting describing the maximum value of from + size on a query.
* The Default maximum value of from + size on a query is 10,000. This was chosen as
* a conservative default as it is sure to not cause trouble. Users can
* certainly profile their cluster and decide to set it to 100,000
* safely. 1,000,000 is probably way to high for any cluster to set
* safely.
*/
public static final Setting<Integer> MAX_RESULT_WINDOW_SETTING = Setting.intSetting(
"index.max_result_window",
10000,
1,
Property.Dynamic,
Property.IndexScope
);
/**
* Index setting describing the maximum value of from + size on an individual inner hit definition or
* top hits aggregation. The default maximum of 100 is defensive for the reason that the number of inner hit responses
* and number of top hits buckets returned is unbounded. Profile your cluster when increasing this setting.
*/
public static final Setting<Integer> MAX_INNER_RESULT_WINDOW_SETTING = Setting.intSetting(
"index.max_inner_result_window",
100,
1,
Property.Dynamic,
Property.IndexScope
);
/**
* Index setting describing the maximum value of allowed `script_fields`that can be retrieved
* per search request. The default maximum of 32 is defensive for the reason that retrieving
* script fields is a costly operation.
*/
public static final Setting<Integer> MAX_SCRIPT_FIELDS_SETTING = Setting.intSetting(
"index.max_script_fields",
32,
0,
Property.Dynamic,
Property.IndexScope
);
/**
* A setting describing the maximum number of tokens that can be
* produced using _analyze API. The default maximum of 10000 is defensive
* to prevent generating too many token objects.
*/
public static final Setting<Integer> MAX_TOKEN_COUNT_SETTING = Setting.intSetting(
"index.analyze.max_token_count",
10000,
1,
Property.Dynamic,
Property.IndexScope
);
/**
* A setting describing the maximum number of characters that will be analyzed for a highlight request.
* This setting is only applicable when highlighting is requested on a text that was indexed without
* offsets or term vectors.
* The default maximum of 1M characters is defensive as for highlighting larger texts,
* indexing with offsets or term vectors is recommended.
*/
public static final Setting<Integer> MAX_ANALYZED_OFFSET_SETTING = Setting.intSetting(
"index.highlight.max_analyzed_offset",
1000000,
1,
Property.Dynamic,
Property.IndexScope
);
/**
* Index setting to enable/disable the {@link UnifiedHighlighter.HighlightFlag#WEIGHT_MATCHES}
* mode of the unified highlighter.
*/
public static final Setting<Boolean> WEIGHT_MATCHES_MODE_ENABLED_SETTING = Setting.boolSetting(
"index.highlight.weight_matches_mode.enabled",
true,
Property.Dynamic,
Property.IndexScope
);
/**
* Index setting describing the maximum number of terms that can be used in Terms Query.
* The default maximum of 65536 terms is defensive, as extra processing and memory is involved
* for each additional term, and a large number of terms degrade the cluster performance.
*/
public static final Setting<Integer> MAX_TERMS_COUNT_SETTING = Setting.intSetting(
"index.max_terms_count",
65536,
1,
Property.Dynamic,
Property.IndexScope
);
/**
* Index setting describing for NGramTokenizer and NGramTokenFilter
* the maximum difference between
* max_gram (maximum length of characters in a gram) and
* min_gram (minimum length of characters in a gram).
* The default value is 1 as this is default difference in NGramTokenizer,
* and is defensive as it prevents generating too many index terms.
*/
public static final Setting<Integer> MAX_NGRAM_DIFF_SETTING = Setting.intSetting(
"index.max_ngram_diff",
1,
0,
Property.Dynamic,
Property.IndexScope
);
/**
* Index setting describing for ShingleTokenFilter
* the maximum difference between
* max_shingle_size and min_shingle_size.
* The default value is 3 is defensive as it prevents generating too many tokens.
*/
public static final Setting<Integer> MAX_SHINGLE_DIFF_SETTING = Setting.intSetting(
"index.max_shingle_diff",
3,
0,
Property.Dynamic,
Property.IndexScope
);
/**
* Index setting describing the maximum value of allowed `docvalue_fields`that can be retrieved
* per search request. The default maximum of 100 is defensive for the reason that retrieving
* doc values might incur a per-field per-document seek.
*/
public static final Setting<Integer> MAX_DOCVALUE_FIELDS_SEARCH_SETTING = Setting.intSetting(
"index.max_docvalue_fields_search",
100,
0,
Property.Dynamic,
Property.IndexScope
);
/**
* Index setting describing the maximum size of the rescore window. Defaults to {@link #MAX_RESULT_WINDOW_SETTING}
* because they both do the same thing: control the size of the heap of hits.
*/
public static final Setting<Integer> MAX_RESCORE_WINDOW_SETTING = Setting.intSetting(
"index.max_rescore_window",
MAX_RESULT_WINDOW_SETTING,
1,
Property.Dynamic,
Property.IndexScope
);
/**
* Only intended for stateless.
*/
public static final Setting<Boolean> INDEX_FAST_REFRESH_SETTING = Setting.boolSetting(
"index.fast_refresh",
false,
Property.Final,
Property.IndexScope
);
public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS);
public static final Setting<TimeValue> NODE_DEFAULT_REFRESH_INTERVAL_SETTING = Setting.timeSetting(
"node._internal.default_refresh_interval",
DEFAULT_REFRESH_INTERVAL,
TimeValue.MINUS_ONE,
Property.NodeScope
); // TODO: remove setting
public static TimeValue STATELESS_DEFAULT_REFRESH_INTERVAL = TimeValue.timeValueSeconds(5); // TODO: this value is still not final
public static TimeValue STATELESS_MIN_NON_FAST_REFRESH_INTERVAL = TimeValue.timeValueSeconds(5);
public static final Setting<TimeValue> INDEX_REFRESH_INTERVAL_SETTING = Setting.timeSetting("index.refresh_interval", (settings) -> {
if (EXISTING_SHARDS_ALLOCATOR_SETTING.get(settings).equals("stateless") && INDEX_FAST_REFRESH_SETTING.get(settings) == false) {
return STATELESS_DEFAULT_REFRESH_INTERVAL;
}
return DEFAULT_REFRESH_INTERVAL;
}, new RefreshIntervalValidator(), Property.Dynamic, Property.IndexScope, Property.ServerlessPublic);
static | IndexSettings |
java | apache__flink | flink-table/flink-sql-gateway/src/main/java/org/apache/flink/table/gateway/service/materializedtable/MaterializedTableManager.java | {
"start": 6862,
"end": 66296
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(MaterializedTableManager.class);
private final URLClassLoader userCodeClassLoader;
private final @Nullable WorkflowScheduler<? extends RefreshHandler> workflowScheduler;
private final String restEndpointUrl;
public MaterializedTableManager(
Configuration configuration, URLClassLoader userCodeClassLoader) {
this.userCodeClassLoader = userCodeClassLoader;
this.restEndpointUrl = buildRestEndpointUrl(configuration);
this.workflowScheduler = buildWorkflowScheduler(configuration, userCodeClassLoader);
}
private String buildRestEndpointUrl(Configuration configuration) {
Configuration restEndpointConfig =
Configuration.fromMap(
getEndpointConfig(configuration, SqlGatewayRestEndpointFactory.IDENTIFIER));
String address = restEndpointConfig.get(SqlGatewayRestOptions.ADDRESS);
int port = restEndpointConfig.get(SqlGatewayRestOptions.PORT);
return String.format("http://%s:%s", address, port);
}
private WorkflowScheduler<? extends RefreshHandler> buildWorkflowScheduler(
Configuration configuration, URLClassLoader userCodeClassLoader) {
return WorkflowSchedulerFactoryUtil.createWorkflowScheduler(
configuration, userCodeClassLoader);
}
public void open() throws Exception {
if (workflowScheduler != null) {
workflowScheduler.open();
}
}
public void close() throws Exception {
if (workflowScheduler != null) {
workflowScheduler.close();
}
}
public ResultFetcher callMaterializedTableOperation(
OperationExecutor operationExecutor,
OperationHandle handle,
MaterializedTableOperation op) {
if (op instanceof CreateMaterializedTableOperation) {
return callCreateMaterializedTableOperation(
operationExecutor, handle, (CreateMaterializedTableOperation) op);
} else if (op instanceof AlterMaterializedTableRefreshOperation) {
return callAlterMaterializedTableRefreshOperation(
operationExecutor, handle, (AlterMaterializedTableRefreshOperation) op);
} else if (op instanceof AlterMaterializedTableSuspendOperation) {
return callAlterMaterializedTableSuspend(
operationExecutor, handle, (AlterMaterializedTableSuspendOperation) op);
} else if (op instanceof AlterMaterializedTableResumeOperation) {
return callAlterMaterializedTableResume(
operationExecutor, handle, (AlterMaterializedTableResumeOperation) op);
} else if (op instanceof DropMaterializedTableOperation) {
return callDropMaterializedTableOperation(
operationExecutor, handle, (DropMaterializedTableOperation) op);
} else if (op instanceof AlterMaterializedTableChangeOperation) {
return callAlterMaterializedTableChangeOperation(
operationExecutor, handle, (AlterMaterializedTableChangeOperation) op);
}
throw new SqlExecutionException(
String.format(
"Unsupported Operation %s for materialized table.", op.asSummaryString()));
}
private ResultFetcher callCreateMaterializedTableOperation(
OperationExecutor operationExecutor,
OperationHandle handle,
CreateMaterializedTableOperation createMaterializedTableOperation) {
ResolvedCatalogMaterializedTable materializedTable =
createMaterializedTableOperation.getCatalogMaterializedTable();
if (CatalogMaterializedTable.RefreshMode.CONTINUOUS == materializedTable.getRefreshMode()) {
createMaterializedTableInContinuousMode(
operationExecutor, handle, createMaterializedTableOperation);
} else {
createMaterializedTableInFullMode(
operationExecutor, handle, createMaterializedTableOperation);
}
// Just return ok for unify different refresh job info of continuous and full mode, user
// should get the refresh job info via desc table.
return ResultFetcher.fromTableResult(handle, TABLE_RESULT_OK, false);
}
private void createMaterializedTableInContinuousMode(
OperationExecutor operationExecutor,
OperationHandle handle,
CreateMaterializedTableOperation createMaterializedTableOperation) {
// create materialized table first
operationExecutor.callExecutableOperation(handle, createMaterializedTableOperation);
ObjectIdentifier materializedTableIdentifier =
createMaterializedTableOperation.getTableIdentifier();
ResolvedCatalogMaterializedTable catalogMaterializedTable =
createMaterializedTableOperation.getCatalogMaterializedTable();
try {
executeContinuousRefreshJob(
operationExecutor,
handle,
catalogMaterializedTable,
materializedTableIdentifier,
Collections.emptyMap(),
Optional.empty());
} catch (Exception e) {
// drop materialized table while submit flink streaming job occur exception. Thus, weak
// atomicity is guaranteed
operationExecutor.callExecutableOperation(
handle, new DropMaterializedTableOperation(materializedTableIdentifier, true));
throw new SqlExecutionException(
String.format(
"Submit continuous refresh job for materialized table %s occur exception.",
materializedTableIdentifier),
e);
}
}
private void createMaterializedTableInFullMode(
OperationExecutor operationExecutor,
OperationHandle handle,
CreateMaterializedTableOperation createMaterializedTableOperation) {
if (workflowScheduler == null) {
throw new SqlExecutionException(
"The workflow scheduler must be configured when creating materialized table in full refresh mode.");
}
// create materialized table first
operationExecutor.callExecutableOperation(handle, createMaterializedTableOperation);
ObjectIdentifier materializedTableIdentifier =
createMaterializedTableOperation.getTableIdentifier();
ResolvedCatalogMaterializedTable catalogMaterializedTable =
createMaterializedTableOperation.getCatalogMaterializedTable();
final IntervalFreshness freshness = catalogMaterializedTable.getDefinitionFreshness();
String cronExpression = convertFreshnessToCron(freshness);
// create full refresh job
CreateRefreshWorkflow createRefreshWorkflow =
new CreatePeriodicRefreshWorkflow(
materializedTableIdentifier,
catalogMaterializedTable.getExpandedQuery(),
cronExpression,
getSessionInitializationConf(operationExecutor),
Collections.emptyMap(),
restEndpointUrl);
try {
RefreshHandler refreshHandler =
workflowScheduler.createRefreshWorkflow(createRefreshWorkflow);
RefreshHandlerSerializer refreshHandlerSerializer =
workflowScheduler.getRefreshHandlerSerializer();
byte[] serializedRefreshHandler = refreshHandlerSerializer.serialize(refreshHandler);
updateRefreshHandler(
operationExecutor,
handle,
materializedTableIdentifier,
catalogMaterializedTable,
CatalogMaterializedTable.RefreshStatus.ACTIVATED,
refreshHandler.asSummaryString(),
serializedRefreshHandler);
} catch (Exception e) {
// drop materialized table while create refresh workflow occur exception. Thus, weak
// atomicity is guaranteed
operationExecutor.callExecutableOperation(
handle, new DropMaterializedTableOperation(materializedTableIdentifier, true));
throw new SqlExecutionException(
String.format(
"Failed to create refresh workflow for materialized table %s.",
materializedTableIdentifier),
e);
}
}
private ResultFetcher callAlterMaterializedTableSuspend(
OperationExecutor operationExecutor,
OperationHandle handle,
AlterMaterializedTableSuspendOperation op) {
ObjectIdentifier tableIdentifier = op.getTableIdentifier();
ResolvedCatalogMaterializedTable materializedTable =
getCatalogMaterializedTable(operationExecutor, tableIdentifier);
// Initialization phase doesn't support resume operation.
if (CatalogMaterializedTable.RefreshStatus.INITIALIZING
== materializedTable.getRefreshStatus()) {
throw new SqlExecutionException(
String.format(
"Materialized table %s is being initialized and does not support suspend operation.",
tableIdentifier));
}
if (CatalogMaterializedTable.RefreshMode.CONTINUOUS == materializedTable.getRefreshMode()) {
suspendContinuousRefreshJob(
operationExecutor, handle, tableIdentifier, materializedTable);
} else {
suspendRefreshWorkflow(operationExecutor, handle, tableIdentifier, materializedTable);
}
return ResultFetcher.fromTableResult(handle, TABLE_RESULT_OK, false);
}
private CatalogMaterializedTable suspendContinuousRefreshJob(
OperationExecutor operationExecutor,
OperationHandle handle,
ObjectIdentifier tableIdentifier,
CatalogMaterializedTable materializedTable) {
try {
ContinuousRefreshHandler refreshHandler =
deserializeContinuousHandler(materializedTable.getSerializedRefreshHandler());
if (CatalogMaterializedTable.RefreshStatus.SUSPENDED
== materializedTable.getRefreshStatus()) {
throw new SqlExecutionException(
String.format(
"Materialized table %s continuous refresh job has been suspended, jobId is %s.",
tableIdentifier, refreshHandler.getJobId()));
}
String savepointPath = stopJobWithSavepoint(operationExecutor, handle, refreshHandler);
ContinuousRefreshHandler updateRefreshHandler =
new ContinuousRefreshHandler(
refreshHandler.getExecutionTarget(),
refreshHandler.getClusterId(),
refreshHandler.getJobId(),
savepointPath);
return updateRefreshHandler(
operationExecutor,
handle,
tableIdentifier,
materializedTable,
CatalogMaterializedTable.RefreshStatus.SUSPENDED,
updateRefreshHandler.asSummaryString(),
serializeContinuousHandler(updateRefreshHandler));
} catch (Exception e) {
throw new SqlExecutionException(
String.format(
"Failed to suspend the continuous refresh job for materialized table %s.",
tableIdentifier),
e);
}
}
private void suspendRefreshWorkflow(
OperationExecutor operationExecutor,
OperationHandle handle,
ObjectIdentifier tableIdentifier,
CatalogMaterializedTable materializedTable) {
if (CatalogMaterializedTable.RefreshStatus.SUSPENDED
== materializedTable.getRefreshStatus()) {
throw new SqlExecutionException(
String.format(
"Materialized table %s refresh workflow has been suspended.",
tableIdentifier));
}
if (workflowScheduler == null) {
throw new SqlExecutionException(
"The workflow scheduler must be configured when suspending materialized table in full refresh mode.");
}
try {
RefreshHandlerSerializer<?> refreshHandlerSerializer =
workflowScheduler.getRefreshHandlerSerializer();
RefreshHandler refreshHandler =
refreshHandlerSerializer.deserialize(
materializedTable.getSerializedRefreshHandler(), userCodeClassLoader);
ModifyRefreshWorkflow modifyRefreshWorkflow =
new SuspendRefreshWorkflow(refreshHandler);
workflowScheduler.modifyRefreshWorkflow(modifyRefreshWorkflow);
updateRefreshHandler(
operationExecutor,
handle,
tableIdentifier,
materializedTable,
CatalogMaterializedTable.RefreshStatus.SUSPENDED,
refreshHandler.asSummaryString(),
materializedTable.getSerializedRefreshHandler());
} catch (Exception e) {
throw new SqlExecutionException(
String.format(
"Failed to suspend the refresh workflow for materialized table %s.",
tableIdentifier),
e);
}
}
private ResultFetcher callAlterMaterializedTableResume(
OperationExecutor operationExecutor,
OperationHandle handle,
AlterMaterializedTableResumeOperation op) {
ObjectIdentifier tableIdentifier = op.getTableIdentifier();
ResolvedCatalogMaterializedTable catalogMaterializedTable =
getCatalogMaterializedTable(operationExecutor, tableIdentifier);
// Initialization phase doesn't support resume operation.
if (CatalogMaterializedTable.RefreshStatus.INITIALIZING
== catalogMaterializedTable.getRefreshStatus()) {
throw new SqlExecutionException(
String.format(
"Materialized table %s is being initialized and does not support resume operation.",
tableIdentifier));
}
if (CatalogMaterializedTable.RefreshMode.CONTINUOUS
== catalogMaterializedTable.getRefreshMode()) {
resumeContinuousRefreshJob(
operationExecutor,
handle,
tableIdentifier,
catalogMaterializedTable,
op.getDynamicOptions());
} else {
resumeRefreshWorkflow(
operationExecutor,
handle,
tableIdentifier,
catalogMaterializedTable,
op.getDynamicOptions());
}
return ResultFetcher.fromTableResult(handle, TABLE_RESULT_OK, false);
}
private void resumeContinuousRefreshJob(
OperationExecutor operationExecutor,
OperationHandle handle,
ObjectIdentifier tableIdentifier,
ResolvedCatalogMaterializedTable catalogMaterializedTable,
Map<String, String> dynamicOptions) {
ContinuousRefreshHandler refreshHandler =
deserializeContinuousHandler(
catalogMaterializedTable.getSerializedRefreshHandler());
// Repeated resume continuous refresh job is not supported
if (CatalogMaterializedTable.RefreshStatus.ACTIVATED
== catalogMaterializedTable.getRefreshStatus()) {
JobStatus jobStatus = getJobStatus(operationExecutor, handle, refreshHandler);
if (!jobStatus.isGloballyTerminalState()) {
throw new SqlExecutionException(
String.format(
"Materialized table %s continuous refresh job has been resumed, jobId is %s.",
tableIdentifier, refreshHandler.getJobId()));
}
}
Optional<String> restorePath = refreshHandler.getRestorePath();
try {
executeContinuousRefreshJob(
operationExecutor,
handle,
catalogMaterializedTable,
tableIdentifier,
dynamicOptions,
restorePath);
} catch (Exception e) {
throw new SqlExecutionException(
String.format(
"Failed to resume the continuous refresh job for materialized table %s.",
tableIdentifier),
e);
}
}
private void resumeRefreshWorkflow(
OperationExecutor operationExecutor,
OperationHandle handle,
ObjectIdentifier tableIdentifier,
CatalogMaterializedTable catalogMaterializedTable,
Map<String, String> dynamicOptions) {
// Repeated resume refresh workflow is not supported
if (CatalogMaterializedTable.RefreshStatus.ACTIVATED
== catalogMaterializedTable.getRefreshStatus()) {
throw new SqlExecutionException(
String.format(
"Materialized table %s refresh workflow has been resumed.",
tableIdentifier));
}
if (workflowScheduler == null) {
throw new SqlExecutionException(
"The workflow scheduler must be configured when resuming materialized table in full refresh mode.");
}
try {
RefreshHandlerSerializer<?> refreshHandlerSerializer =
workflowScheduler.getRefreshHandlerSerializer();
RefreshHandler refreshHandler =
refreshHandlerSerializer.deserialize(
catalogMaterializedTable.getSerializedRefreshHandler(),
userCodeClassLoader);
ModifyRefreshWorkflow modifyRefreshWorkflow =
new ResumeRefreshWorkflow(refreshHandler, dynamicOptions);
workflowScheduler.modifyRefreshWorkflow(modifyRefreshWorkflow);
updateRefreshHandler(
operationExecutor,
handle,
tableIdentifier,
catalogMaterializedTable,
CatalogMaterializedTable.RefreshStatus.ACTIVATED,
refreshHandler.asSummaryString(),
catalogMaterializedTable.getSerializedRefreshHandler());
} catch (Exception e) {
throw new SqlExecutionException(
String.format(
"Failed to resume the refresh workflow for materialized table %s.",
tableIdentifier),
e);
}
}
private void executeContinuousRefreshJob(
OperationExecutor operationExecutor,
OperationHandle handle,
CatalogMaterializedTable catalogMaterializedTable,
ObjectIdentifier materializedTableIdentifier,
Map<String, String> dynamicOptions,
Optional<String> restorePath) {
// Set job name, runtime mode, checkpoint interval
// TODO: Set minibatch related optimization options.
Configuration customConfig = new Configuration();
String jobName =
String.format(
"Materialized_table_%s_continuous_refresh_job",
materializedTableIdentifier.asSerializableString());
customConfig.set(NAME, jobName);
customConfig.set(RUNTIME_MODE, STREAMING);
restorePath.ifPresent(s -> customConfig.set(SAVEPOINT_PATH, s));
// Do not override the user-defined checkpoint interval
if (!operationExecutor
.getSessionContext()
.getSessionConf()
.contains(CheckpointingOptions.CHECKPOINTING_INTERVAL)) {
final Duration freshness =
validateAndGetIntervalFreshness(catalogMaterializedTable).toDuration();
customConfig.set(CheckpointingOptions.CHECKPOINTING_INTERVAL, freshness);
}
String insertStatement =
getInsertStatement(
materializedTableIdentifier,
catalogMaterializedTable.getExpandedQuery(),
dynamicOptions);
JobExecutionResult result =
executeRefreshJob(insertStatement, customConfig, operationExecutor, handle);
ContinuousRefreshHandler continuousRefreshHandler =
new ContinuousRefreshHandler(
result.executionTarget, result.clusterId, result.jobId);
byte[] serializedBytes = serializeContinuousHandler(continuousRefreshHandler);
updateRefreshHandler(
operationExecutor,
handle,
materializedTableIdentifier,
catalogMaterializedTable,
CatalogMaterializedTable.RefreshStatus.ACTIVATED,
continuousRefreshHandler.asSummaryString(),
serializedBytes);
}
private ResultFetcher callAlterMaterializedTableRefreshOperation(
OperationExecutor operationExecutor,
OperationHandle handle,
AlterMaterializedTableRefreshOperation alterMaterializedTableRefreshOperation) {
ObjectIdentifier materializedTableIdentifier =
alterMaterializedTableRefreshOperation.getTableIdentifier();
Map<String, String> partitionSpec =
alterMaterializedTableRefreshOperation.getPartitionSpec();
return refreshMaterializedTable(
operationExecutor,
handle,
materializedTableIdentifier,
partitionSpec,
Collections.emptyMap(),
false,
null);
}
public ResultFetcher refreshMaterializedTable(
OperationExecutor operationExecutor,
OperationHandle handle,
ObjectIdentifier materializedTableIdentifier,
Map<String, String> staticPartitions,
Map<String, String> dynamicOptions,
boolean isPeriodic,
@Nullable String scheduleTime) {
ResolvedCatalogMaterializedTable materializedTable =
getCatalogMaterializedTable(operationExecutor, materializedTableIdentifier);
Map<String, String> refreshPartitions =
isPeriodic
? getPeriodRefreshPartition(
scheduleTime,
materializedTable.getDefinitionFreshness(),
materializedTableIdentifier,
materializedTable.getOptions(),
operationExecutor
.getTableEnvironment()
.getConfig()
.getLocalTimeZone())
: staticPartitions;
validatePartitionSpec(refreshPartitions, materializedTable);
// Set job name, runtime mode
Configuration customConfig = new Configuration();
String jobName =
isPeriodic
? String.format(
"Materialized_table_%s_periodic_refresh_job",
materializedTableIdentifier.asSerializableString())
: String.format(
"Materialized_table_%s_one_time_refresh_job",
materializedTableIdentifier.asSerializableString());
customConfig.set(NAME, jobName);
customConfig.set(RUNTIME_MODE, BATCH);
String insertStatement =
getRefreshStatement(
materializedTableIdentifier,
materializedTable.getExpandedQuery(),
refreshPartitions,
dynamicOptions);
try {
LOG.info(
"Begin to refreshing the materialized table {}, statement: {}",
materializedTableIdentifier,
insertStatement);
JobExecutionResult result =
executeRefreshJob(insertStatement, customConfig, operationExecutor, handle);
Map<StringData, StringData> clusterInfo = new HashMap<>();
clusterInfo.put(
StringData.fromString(TARGET.key()),
StringData.fromString(result.executionTarget));
Optional<String> clusterIdKeyName = getClusterIdKeyName(result.executionTarget);
clusterIdKeyName.ifPresent(
s ->
clusterInfo.put(
StringData.fromString(s),
StringData.fromString(result.clusterId)));
return ResultFetcher.fromResults(
handle,
ResolvedSchema.of(
Column.physical(JOB_ID, DataTypes.STRING()),
Column.physical(
CLUSTER_INFO,
DataTypes.MAP(DataTypes.STRING(), DataTypes.STRING()))),
Collections.singletonList(
GenericRowData.of(
StringData.fromString(result.jobId),
new GenericMapData(clusterInfo))));
} catch (Exception e) {
throw new SqlExecutionException(
String.format(
"Refreshing the materialized table %s occur exception.",
materializedTableIdentifier),
e);
}
}
@VisibleForTesting
static Map<String, String> getPeriodRefreshPartition(
String scheduleTime,
IntervalFreshness freshness,
ObjectIdentifier materializedTableIdentifier,
Map<String, String> materializedTableOptions,
ZoneId localZoneId) {
if (scheduleTime == null) {
throw new ValidationException(
String.format(
"The scheduler time must not be null during the periodic refresh of the materialized table %s.",
materializedTableIdentifier));
}
Set<String> partitionFields =
materializedTableOptions.keySet().stream()
.filter(k -> k.startsWith(PARTITION_FIELDS))
.collect(Collectors.toSet());
Map<String, String> refreshPartitions = new HashMap<>();
for (String partKey : partitionFields) {
String partField =
partKey.substring(
PARTITION_FIELDS.length() + 1,
partKey.length() - (DATE_FORMATTER.length() + 1));
String partFieldFormatter = materializedTableOptions.get(partKey);
String partFiledValue =
formatTimestampStringWithOffset(
scheduleTime,
SCHEDULE_TIME_DATE_FORMATTER_DEFAULT,
partFieldFormatter,
TimeZone.getTimeZone(localZoneId),
-freshness.toDuration().toMillis());
if (partFiledValue == null) {
throw new SqlExecutionException(
String.format(
"Failed to parse a valid partition value for the field '%s' in materialized table %s using the scheduler time '%s' based on the date format '%s'.",
partField,
materializedTableIdentifier.asSerializableString(),
scheduleTime,
SCHEDULE_TIME_DATE_FORMATTER_DEFAULT));
}
refreshPartitions.put(partField, partFiledValue);
}
return refreshPartitions;
}
private void validatePartitionSpec(
Map<String, String> partitionSpec, ResolvedCatalogMaterializedTable table) {
ResolvedSchema schema = table.getResolvedSchema();
Set<String> allPartitionKeys = new HashSet<>(table.getPartitionKeys());
Set<String> unknownPartitionKeys = new HashSet<>();
Set<String> nonStringPartitionKeys = new HashSet<>();
for (String partitionKey : partitionSpec.keySet()) {
if (!schema.getColumn(partitionKey).isPresent()) {
unknownPartitionKeys.add(partitionKey);
continue;
}
if (!schema.getColumn(partitionKey)
.get()
.getDataType()
.getLogicalType()
.getTypeRoot()
.getFamilies()
.contains(LogicalTypeFamily.CHARACTER_STRING)) {
nonStringPartitionKeys.add(partitionKey);
}
}
if (!unknownPartitionKeys.isEmpty()) {
throw new ValidationException(
String.format(
"The partition spec contains unknown partition keys:\n\n%s\n\nAll known partition keys are:\n\n%s",
String.join("\n", unknownPartitionKeys),
String.join("\n", allPartitionKeys)));
}
if (!nonStringPartitionKeys.isEmpty()) {
throw new ValidationException(
String.format(
"Currently, refreshing materialized table only supports referring to char, varchar and string type"
+ " partition keys. All specified partition keys in partition specs with unsupported types are:\n\n%s",
String.join("\n", nonStringPartitionKeys)));
}
}
@VisibleForTesting
protected static String getRefreshStatement(
ObjectIdentifier tableIdentifier,
String definitionQuery,
Map<String, String> partitionSpec,
Map<String, String> dynamicOptions) {
String tableIdentifierWithDynamicOptions =
generateTableWithDynamicOptions(tableIdentifier, dynamicOptions);
StringBuilder insertStatement =
new StringBuilder(
String.format(
"INSERT OVERWRITE %s\n SELECT * FROM (%s)",
tableIdentifierWithDynamicOptions, definitionQuery));
if (!partitionSpec.isEmpty()) {
insertStatement.append("\n WHERE ");
insertStatement.append(
partitionSpec.entrySet().stream()
.map(
entry ->
String.format(
"%s = '%s'", entry.getKey(), entry.getValue()))
.reduce((s1, s2) -> s1 + " AND " + s2)
.get());
}
return insertStatement.toString();
}
private ResultFetcher callAlterMaterializedTableChangeOperation(
OperationExecutor operationExecutor,
OperationHandle handle,
AlterMaterializedTableChangeOperation op) {
ObjectIdentifier tableIdentifier = op.getTableIdentifier();
ResolvedCatalogMaterializedTable oldMaterializedTable =
getCatalogMaterializedTable(operationExecutor, tableIdentifier);
if (CatalogMaterializedTable.RefreshMode.FULL == oldMaterializedTable.getRefreshMode()) {
// directly apply the alter operation
AlterMaterializedTableChangeOperation alterMaterializedTableChangeOperation =
new AlterMaterializedTableChangeOperation(
tableIdentifier,
op.getTableChanges(),
op.getCatalogMaterializedTable());
return operationExecutor.callExecutableOperation(
handle, alterMaterializedTableChangeOperation);
}
if (CatalogMaterializedTable.RefreshStatus.ACTIVATED
== oldMaterializedTable.getRefreshStatus()) {
// 1. suspend the materialized table
CatalogMaterializedTable suspendMaterializedTable =
suspendContinuousRefreshJob(
operationExecutor, handle, tableIdentifier, oldMaterializedTable);
// 2. alter materialized table schema & query definition
CatalogMaterializedTable updatedMaterializedTable =
op.getCatalogMaterializedTable()
.copy(
suspendMaterializedTable.getRefreshStatus(),
suspendMaterializedTable
.getRefreshHandlerDescription()
.orElse(null),
suspendMaterializedTable.getSerializedRefreshHandler());
AlterMaterializedTableChangeOperation alterMaterializedTableChangeOperation =
new AlterMaterializedTableChangeOperation(
tableIdentifier, op.getTableChanges(), updatedMaterializedTable);
operationExecutor.callExecutableOperation(
handle, alterMaterializedTableChangeOperation);
// 3. resume the materialized table
try {
executeContinuousRefreshJob(
operationExecutor,
handle,
updatedMaterializedTable,
tableIdentifier,
Collections.emptyMap(),
Optional.empty());
} catch (Exception e) {
// Roll back the changes to the materialized table and restore the continuous
// refresh job
LOG.warn(
"Failed to start the continuous refresh job for materialized table {} using new query {}, rollback to origin query {}.",
tableIdentifier,
op.getCatalogMaterializedTable().getExpandedQuery(),
suspendMaterializedTable.getExpandedQuery(),
e);
AlterMaterializedTableChangeOperation rollbackChangeOperation =
generateRollbackAlterMaterializedTableOperation(
suspendMaterializedTable, alterMaterializedTableChangeOperation);
operationExecutor.callExecutableOperation(handle, rollbackChangeOperation);
ContinuousRefreshHandler continuousRefreshHandler =
deserializeContinuousHandler(
suspendMaterializedTable.getSerializedRefreshHandler());
executeContinuousRefreshJob(
operationExecutor,
handle,
suspendMaterializedTable,
tableIdentifier,
Collections.emptyMap(),
continuousRefreshHandler.getRestorePath());
throw new SqlExecutionException(
String.format(
"Failed to start the continuous refresh job using new query %s when altering materialized table %s select query.",
op.getCatalogMaterializedTable().getExpandedQuery(),
tableIdentifier),
e);
}
} else if (CatalogMaterializedTable.RefreshStatus.SUSPENDED
== oldMaterializedTable.getRefreshStatus()) {
// alter schema & definition query & refresh handler (reset savepoint path of refresh
// handler)
List<MaterializedTableChange> tableChanges = new ArrayList<>(op.getTableChanges());
TableChange.ModifyRefreshHandler modifyRefreshHandler =
generateResetSavepointTableChange(
oldMaterializedTable.getSerializedRefreshHandler());
tableChanges.add(modifyRefreshHandler);
CatalogMaterializedTable updatedMaterializedTable =
op.getCatalogMaterializedTable()
.copy(
oldMaterializedTable.getRefreshStatus(),
modifyRefreshHandler.getRefreshHandlerDesc(),
modifyRefreshHandler.getRefreshHandlerBytes());
AlterMaterializedTableChangeOperation alterMaterializedTableChangeOperation =
new AlterMaterializedTableChangeOperation(
tableIdentifier, tableChanges, updatedMaterializedTable);
operationExecutor.callExecutableOperation(
handle, alterMaterializedTableChangeOperation);
} else {
throw new SqlExecutionException(
String.format(
"Materialized table %s is being initialized and does not support alter operation.",
tableIdentifier));
}
return ResultFetcher.fromTableResult(handle, TABLE_RESULT_OK, false);
}
private AlterMaterializedTableChangeOperation generateRollbackAlterMaterializedTableOperation(
CatalogMaterializedTable oldMaterializedTable,
AlterMaterializedTableChangeOperation op) {
List<MaterializedTableChange> tableChanges = op.getTableChanges();
List<MaterializedTableChange> rollbackChanges = new ArrayList<>();
for (TableChange tableChange : tableChanges) {
if (tableChange instanceof TableChange.AddColumn) {
TableChange.AddColumn addColumn = (TableChange.AddColumn) tableChange;
rollbackChanges.add(TableChange.dropColumn(addColumn.getColumn().getName()));
} else if (tableChange instanceof TableChange.ModifyRefreshHandler) {
rollbackChanges.add(
TableChange.modifyRefreshHandler(
oldMaterializedTable.getRefreshHandlerDescription().orElse(null),
oldMaterializedTable.getSerializedRefreshHandler()));
} else if (tableChange instanceof TableChange.ModifyDefinitionQuery) {
rollbackChanges.add(
TableChange.modifyDefinitionQuery(oldMaterializedTable.getExpandedQuery()));
} else {
throw new ValidationException(
String.format(
"Failed to generate rollback changes for materialized table '%s'. "
+ "Unsupported table change detected: %s. ",
op.getTableIdentifier(), tableChange));
}
}
return new AlterMaterializedTableChangeOperation(
op.getTableIdentifier(), rollbackChanges, oldMaterializedTable);
}
private TableChange.ModifyRefreshHandler generateResetSavepointTableChange(
byte[] serializedContinuousHandler) {
ContinuousRefreshHandler continuousRefreshHandler =
deserializeContinuousHandler(serializedContinuousHandler);
ContinuousRefreshHandler resetContinuousRefreshHandler =
new ContinuousRefreshHandler(
continuousRefreshHandler.getExecutionTarget(),
continuousRefreshHandler.getClusterId(),
continuousRefreshHandler.getJobId());
return TableChange.modifyRefreshHandler(
resetContinuousRefreshHandler.asSummaryString(),
serializeContinuousHandler(resetContinuousRefreshHandler));
}
private ResultFetcher callDropMaterializedTableOperation(
OperationExecutor operationExecutor,
OperationHandle handle,
DropMaterializedTableOperation dropMaterializedTableOperation) {
ObjectIdentifier tableIdentifier = dropMaterializedTableOperation.getTableIdentifier();
boolean tableExists = operationExecutor.tableExists(tableIdentifier);
if (!tableExists) {
if (dropMaterializedTableOperation.isIfExists()) {
LOG.info(
"Materialized table {} does not exists, skip the drop operation.",
tableIdentifier);
return ResultFetcher.fromTableResult(handle, TABLE_RESULT_OK, false);
} else {
throw new ValidationException(
String.format(
"Materialized table with identifier %s does not exist.",
tableIdentifier));
}
}
ResolvedCatalogMaterializedTable materializedTable =
getCatalogMaterializedTable(operationExecutor, tableIdentifier);
CatalogMaterializedTable.RefreshStatus refreshStatus = materializedTable.getRefreshStatus();
if (CatalogMaterializedTable.RefreshStatus.ACTIVATED == refreshStatus
|| CatalogMaterializedTable.RefreshStatus.SUSPENDED == refreshStatus) {
CatalogMaterializedTable.RefreshMode refreshMode = materializedTable.getRefreshMode();
if (CatalogMaterializedTable.RefreshMode.FULL == refreshMode) {
deleteRefreshWorkflow(tableIdentifier, materializedTable);
} else if (CatalogMaterializedTable.RefreshMode.CONTINUOUS == refreshMode
&& CatalogMaterializedTable.RefreshStatus.ACTIVATED == refreshStatus) {
cancelContinuousRefreshJob(
operationExecutor, handle, tableIdentifier, materializedTable);
}
} else if (CatalogMaterializedTable.RefreshStatus.INITIALIZING == refreshStatus) {
throw new ValidationException(
String.format(
"Current refresh status of materialized table %s is initializing, skip the drop operation.",
tableIdentifier.asSerializableString()));
}
operationExecutor.callExecutableOperation(handle, dropMaterializedTableOperation);
return ResultFetcher.fromTableResult(handle, TABLE_RESULT_OK, false);
}
private void cancelContinuousRefreshJob(
OperationExecutor operationExecutor,
OperationHandle handle,
ObjectIdentifier tableIdentifier,
CatalogMaterializedTable materializedTable) {
ContinuousRefreshHandler refreshHandler =
deserializeContinuousHandler(materializedTable.getSerializedRefreshHandler());
// get job running status
JobStatus jobStatus = getJobStatus(operationExecutor, handle, refreshHandler);
if (!jobStatus.isTerminalState()) {
try {
cancelJob(operationExecutor, handle, refreshHandler);
} catch (Exception e) {
jobStatus = getJobStatus(operationExecutor, handle, refreshHandler);
if (!jobStatus.isTerminalState()) {
throw new SqlExecutionException(
String.format(
"Failed to drop the materialized table %s because the continuous refresh job %s could not be canceled."
+ " The current status of the continuous refresh job is %s.",
tableIdentifier, refreshHandler.getJobId(), jobStatus),
e);
} else {
LOG.warn(
"An exception occurred while canceling the continuous refresh job {} for materialized table {},"
+ " but since the job is in a terminal state, skip the cancel operation.",
refreshHandler.getJobId(),
tableIdentifier);
}
}
} else {
LOG.info(
"No need to cancel the continuous refresh job {} for materialized table {} as it is not currently running.",
refreshHandler.getJobId(),
tableIdentifier);
}
}
private void deleteRefreshWorkflow(
ObjectIdentifier tableIdentifier, CatalogMaterializedTable catalogMaterializedTable) {
if (workflowScheduler == null) {
throw new SqlExecutionException(
"The workflow scheduler must be configured when dropping materialized table in full refresh mode.");
}
try {
RefreshHandlerSerializer<?> refreshHandlerSerializer =
workflowScheduler.getRefreshHandlerSerializer();
RefreshHandler refreshHandler =
refreshHandlerSerializer.deserialize(
catalogMaterializedTable.getSerializedRefreshHandler(),
userCodeClassLoader);
DeleteRefreshWorkflow deleteRefreshWorkflow = new DeleteRefreshWorkflow(refreshHandler);
workflowScheduler.deleteRefreshWorkflow(deleteRefreshWorkflow);
} catch (Exception e) {
throw new SqlExecutionException(
String.format(
"Failed to delete the refresh workflow for materialized table %s.",
tableIdentifier),
e);
}
}
/**
* Retrieves the session configuration for initializing the periodic refresh job. The function
* filters out default context configurations and removes unnecessary configurations such as
* resources download directory and workflow scheduler related configurations.
*
* @param operationExecutor The OperationExecutor instance used to access the session context.
* @return A Map containing the session configurations for initializing session for executing
* the periodic refresh job.
*/
private Map<String, String> getSessionInitializationConf(OperationExecutor operationExecutor) {
Map<String, String> sessionConf =
operationExecutor.getSessionContext().getSessionConf().toMap();
// we only keep the session conf that is not in the default context or the conf value is
// different from the default context.
Map<String, String> defaultContextConf =
operationExecutor.getSessionContext().getDefaultContext().getFlinkConfig().toMap();
sessionConf
.entrySet()
.removeIf(
entry -> {
String key = entry.getKey();
String value = entry.getValue();
return defaultContextConf.containsKey(key)
&& defaultContextConf.get(key).equals(value);
});
// remove useless conf
sessionConf.remove(TableConfigOptions.RESOURCES_DOWNLOAD_DIR.key());
sessionConf.keySet().removeIf(key -> key.startsWith(WORKFLOW_SCHEDULER_PREFIX));
return sessionConf;
}
private static JobStatus getJobStatus(
OperationExecutor operationExecutor,
OperationHandle handle,
ContinuousRefreshHandler refreshHandler) {
ResultFetcher resultFetcher =
operationExecutor.callDescribeJobOperation(
getTableEnvironment(operationExecutor, refreshHandler),
handle,
new DescribeJobOperation(refreshHandler.getJobId()));
List<RowData> result = fetchAllResults(resultFetcher);
String jobStatus = result.get(0).getString(2).toString();
return JobStatus.valueOf(jobStatus);
}
private static void cancelJob(
OperationExecutor operationExecutor,
OperationHandle handle,
ContinuousRefreshHandler refreshHandler) {
operationExecutor.callStopJobOperation(
getTableEnvironment(operationExecutor, refreshHandler),
handle,
new StopJobOperation(refreshHandler.getJobId(), false, false));
}
private static String stopJobWithSavepoint(
OperationExecutor executor,
OperationHandle handle,
ContinuousRefreshHandler refreshHandler) {
// check savepoint dir is configured
Optional<String> savepointDir =
executor.getSessionContext().getSessionConf().getOptional(SAVEPOINT_DIRECTORY);
if (savepointDir.isEmpty()) {
throw new ValidationException(
"Savepoint directory is not configured, can't stop job with savepoint.");
}
String jobId = refreshHandler.getJobId();
ResultFetcher resultFetcher =
executor.callStopJobOperation(
getTableEnvironment(executor, refreshHandler),
handle,
new StopJobOperation(jobId, true, false));
List<RowData> results = fetchAllResults(resultFetcher);
return results.get(0).getString(0).toString();
}
private static TableEnvironmentInternal getTableEnvironment(
OperationExecutor executor, ContinuousRefreshHandler refreshHandler) {
String target = refreshHandler.getExecutionTarget();
Configuration sessionConfiguration = new Configuration();
sessionConfiguration.set(TARGET, target);
Optional<String> clusterIdKeyName = getClusterIdKeyName(target);
clusterIdKeyName.ifPresent(
s -> sessionConfiguration.setString(s, refreshHandler.getClusterId()));
return executor.getTableEnvironment(
executor.getSessionContext().getSessionState().resourceManager,
sessionConfiguration);
}
private ContinuousRefreshHandler deserializeContinuousHandler(byte[] serializedRefreshHandler) {
try {
return ContinuousRefreshHandlerSerializer.INSTANCE.deserialize(
serializedRefreshHandler, userCodeClassLoader);
} catch (IOException | ClassNotFoundException e) {
throw new SqlExecutionException(
"Deserialize ContinuousRefreshHandler occur exception.", e);
}
}
private byte[] serializeContinuousHandler(ContinuousRefreshHandler refreshHandler) {
try {
return ContinuousRefreshHandlerSerializer.INSTANCE.serialize(refreshHandler);
} catch (IOException e) {
throw new SqlExecutionException(
"Serialize ContinuousRefreshHandler occur exception.", e);
}
}
private ResolvedCatalogMaterializedTable getCatalogMaterializedTable(
OperationExecutor operationExecutor, ObjectIdentifier tableIdentifier) {
ResolvedCatalogBaseTable<?> resolvedCatalogBaseTable =
operationExecutor.getTable(tableIdentifier);
if (MATERIALIZED_TABLE != resolvedCatalogBaseTable.getTableKind()) {
throw new ValidationException(
String.format(
"Table %s is not a materialized table, does not support materialized table related operation.",
tableIdentifier));
}
return (ResolvedCatalogMaterializedTable) resolvedCatalogBaseTable;
}
private CatalogMaterializedTable updateRefreshHandler(
OperationExecutor operationExecutor,
OperationHandle operationHandle,
ObjectIdentifier materializedTableIdentifier,
CatalogMaterializedTable catalogMaterializedTable,
CatalogMaterializedTable.RefreshStatus refreshStatus,
String refreshHandlerSummary,
byte[] serializedRefreshHandler) {
CatalogMaterializedTable updatedMaterializedTable =
catalogMaterializedTable.copy(
refreshStatus, refreshHandlerSummary, serializedRefreshHandler);
List<MaterializedTableChange> tableChanges = new ArrayList<>();
tableChanges.add(TableChange.modifyRefreshStatus(refreshStatus));
tableChanges.add(
TableChange.modifyRefreshHandler(refreshHandlerSummary, serializedRefreshHandler));
AlterMaterializedTableChangeOperation alterMaterializedTableChangeOperation =
new AlterMaterializedTableChangeOperation(
materializedTableIdentifier, tableChanges, updatedMaterializedTable);
// update RefreshHandler to Catalog
operationExecutor.callExecutableOperation(
operationHandle, alterMaterializedTableChangeOperation);
return updatedMaterializedTable;
}
/** Generate insert statement for materialized table. */
@VisibleForTesting
protected static String getInsertStatement(
ObjectIdentifier materializedTableIdentifier,
String definitionQuery,
Map<String, String> dynamicOptions) {
return String.format(
"INSERT INTO %s\n%s",
generateTableWithDynamicOptions(materializedTableIdentifier, dynamicOptions),
definitionQuery);
}
private static String generateTableWithDynamicOptions(
ObjectIdentifier objectIdentifier, Map<String, String> dynamicOptions) {
StringBuilder builder = new StringBuilder(objectIdentifier.asSerializableString());
if (!dynamicOptions.isEmpty()) {
String hints =
dynamicOptions.entrySet().stream()
.map(e -> String.format("'%s'='%s'", e.getKey(), e.getValue()))
.collect(Collectors.joining(", "));
builder.append(String.format(" /*+ OPTIONS(%s) */", hints));
}
return builder.toString();
}
private static List<RowData> fetchAllResults(ResultFetcher resultFetcher) {
Long token = 0L;
List<RowData> results = new ArrayList<>();
while (token != null) {
ResultSet result = resultFetcher.fetchResults(token, Integer.MAX_VALUE);
results.addAll(result.getData());
token = result.getNextToken();
}
return results;
}
private static JobExecutionResult executeRefreshJob(
String script,
Configuration executionConfig,
OperationExecutor operationExecutor,
OperationHandle operationHandle) {
String executeTarget = operationExecutor.getSessionContext().getSessionConf().get(TARGET);
if (executeTarget == null || executeTarget.isEmpty() || "local".equals(executeTarget)) {
String errorMessage =
String.format(
"Unsupported execution target detected: %s."
+ "Currently, only the following execution targets are supported: "
+ "'remote', 'yarn-session', 'yarn-application', 'kubernetes-session', 'kubernetes-application'. ",
executeTarget);
LOG.error(errorMessage);
throw new ValidationException(errorMessage);
}
if (executeTarget.endsWith("application")) {
return executeApplicationJob(script, executionConfig, operationExecutor);
} else {
return executeNonApplicationJob(
script, executionConfig, operationExecutor, operationHandle);
}
}
private static JobExecutionResult executeNonApplicationJob(
String script,
Configuration executionConfig,
OperationExecutor operationExecutor,
OperationHandle operationHandle) {
String executeTarget = operationExecutor.getSessionContext().getSessionConf().get(TARGET);
String clusterId =
operationExecutor
.getSessionClusterId()
.orElseThrow(
() -> {
String errorMessage =
String.format(
"No cluster ID found when executing materialized table refresh job. Execution target is : %s",
executeTarget);
LOG.error(errorMessage);
return new ValidationException(errorMessage);
});
ResultFetcher resultFetcher =
operationExecutor.executeStatement(operationHandle, executionConfig, script);
List<RowData> results = fetchAllResults(resultFetcher);
String jobId = results.get(0).getString(0).toString();
return new JobExecutionResult(executeTarget, clusterId, jobId);
}
private static JobExecutionResult executeApplicationJob(
String script, Configuration executionConfig, OperationExecutor operationExecutor) {
List<String> arguments = new ArrayList<>();
arguments.add("--" + SqlDriver.OPTION_SQL_SCRIPT.getLongOpt());
arguments.add(script);
Configuration mergedConfig =
new Configuration(operationExecutor.getSessionContext().getSessionConf());
mergedConfig.addAll(executionConfig);
JobID jobId = new JobID();
mergedConfig.set(PIPELINE_FIXED_JOB_ID, jobId.toString());
ApplicationConfiguration applicationConfiguration =
new ApplicationConfiguration(
arguments.toArray(new String[0]), SqlDriver.class.getName());
try {
String clusterId =
new ApplicationClusterDeployer(new DefaultClusterClientServiceLoader())
.run(mergedConfig, applicationConfiguration)
.toString();
return new JobExecutionResult(mergedConfig.get(TARGET), clusterId, jobId.toString());
} catch (Throwable t) {
LOG.error("Failed to deploy script {} to application cluster.", script, t);
throw new SqlGatewayException("Failed to deploy script to cluster.", t);
}
}
private static | MaterializedTableManager |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/web/ServletContextAwareBeanWacTests.java | {
"start": 926,
"end": 1018
} | class ____ in fact use JUnit to run JUnit. ;)
*
* @author Sam Brannen
* @since 4.0.2
*/
| does |
java | ReactiveX__RxJava | src/jmh/java/io/reactivex/rxjava3/xmapz/ObservableSwitchMapSinglePerf.java | {
"start": 1073,
"end": 2709
} | class ____ {
@Param({ "1", "10", "100", "1000", "10000", "100000", "1000000" })
public int count;
Observable<Integer> observableConvert;
Observable<Integer> observableDedicated;
Observable<Integer> observablePlain;
@Setup
public void setup() {
Integer[] sourceArray = new Integer[count];
Arrays.fill(sourceArray, 777);
Observable<Integer> source = Observable.fromArray(sourceArray);
observablePlain = source.switchMap(new Function<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> apply(Integer v) {
return Observable.just(v);
}
});
observableConvert = source.switchMap(new Function<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> apply(Integer v) {
return Single.just(v).toObservable();
}
});
observableDedicated = source.switchMapSingle(new Function<Integer, Single<Integer>>() {
@Override
public Single<Integer> apply(Integer v) {
return Single.just(v);
}
});
}
@Benchmark
public Object observablePlain(Blackhole bh) {
return observablePlain.subscribeWith(new PerfConsumer(bh));
}
@Benchmark
public Object observableConvert(Blackhole bh) {
return observableConvert.subscribeWith(new PerfConsumer(bh));
}
@Benchmark
public Object observableDedicated(Blackhole bh) {
return observableDedicated.subscribeWith(new PerfConsumer(bh));
}
}
| ObservableSwitchMapSinglePerf |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/results/graph/instantiation/internal/ArgumentReader.java | {
"start": 631,
"end": 1656
} | class ____<A> implements DomainResultAssembler<A> {
private final DomainResultAssembler<A> delegateAssembler;
private final String alias;
public ArgumentReader(DomainResultAssembler<A> delegateAssembler, String alias) {
this.delegateAssembler = delegateAssembler;
this.alias = alias;
}
public String getAlias() {
return alias;
}
@Override
public @Nullable A assemble(RowProcessingState rowProcessingState) {
return delegateAssembler.assemble( rowProcessingState );
}
@Override
public JavaType<A> getAssembledJavaType() {
return delegateAssembler.getAssembledJavaType();
}
@Override
public void resolveState(RowProcessingState rowProcessingState) {
delegateAssembler.resolveState( rowProcessingState );
}
@Override
public @Nullable Initializer<?> getInitializer() {
return delegateAssembler.getInitializer();
}
@Override
public <X> void forEachResultAssembler(BiConsumer<Initializer<?>, X> consumer, X arg) {
delegateAssembler.forEachResultAssembler( consumer, arg );
}
}
| ArgumentReader |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/issues/RedeliveryErrorHandlerAsyncDelayedTwoCamelContextIssueTest.java | {
"start": 3062,
"end": 3256
} | class ____ {
int counter;
public void doSomething() {
if (counter++ < 2) {
throw new RuntimeException();
}
}
}
}
| ProblematicBean |
java | quarkusio__quarkus | integration-tests/mongodb-panache/src/main/java/io/quarkus/it/mongodb/panache/test/TestImperativeRepository.java | {
"start": 184,
"end": 275
} | class ____ implements PanacheMongoRepository<TestImperativeEntity> {
}
| TestImperativeRepository |
java | reactor__reactor-core | reactor-core/src/jcstress/java/reactor/core/scheduler/BasicSchedulersStressTest.java | {
"start": 8184,
"end": 9451
} | class ____ {
private final ParallelScheduler scheduler =
new ParallelScheduler(2, Thread::new);
{
scheduler.init();
}
@Actor
public void disposeGracefully(IIZ_Result r) {
final CountDownLatch latch = new CountDownLatch(1);
scheduler.disposeGracefully().doFinally(sig -> latch.countDown()).subscribe();
r.r1 = scheduler.state.initialResource.hashCode();
try {
latch.await(5, TimeUnit.SECONDS);
}
catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
@Actor
public void dispose(IIZ_Result r) {
scheduler.dispose();
r.r2 = scheduler.state.initialResource.hashCode();
}
@Arbiter
public void arbiter(IIZ_Result r) {
// Validate both disposals left the Scheduler in consistent state,
// assuming the await process coordinates on the resources as identified
// by r.r1 and r.r2, which should be equal.
boolean consistentState = r.r1 == r.r2;
r.r3 = consistentState && scheduler.isDisposed();
if (consistentState) {
//when that condition is true, we erase the r1/r2 state. that should greatly limit
//the output of "interesting acceptable state" in the dump should and error occur
r.r1 = r.r2 = 0;
}
}
}
}
| ParallelSchedulerDisposeGracefullyAndDisposeStressTest |
java | spring-projects__spring-security | core/src/test/java/org/springframework/security/aot/hint/AuthorizeReturnObjectCoreHintsRegistrarTests.java | {
"start": 2876,
"end": 3103
} | class ____ {
@AuthorizeReturnObject
public MySubObject get() {
return new MySubObject();
}
@AuthorizeReturnObject
public MyInterface getInterface() {
return new MyImplementation();
}
}
public static | MyObject |
java | apache__thrift | lib/java/src/main/java/org/apache/thrift/server/TThreadedSelectorServer.java | {
"start": 3765,
"end": 10922
} | enum ____ {
/**
* Require accepted connection registration to be handled by the executor. If the worker pool
* is saturated, further accepts will be closed immediately. Slightly increases latency due to
* an extra scheduling.
*/
FAIR_ACCEPT,
/**
* Handle the accepts as fast as possible, disregarding the status of the executor service.
*/
FAST_ACCEPT
}
private AcceptPolicy acceptPolicy = AcceptPolicy.FAST_ACCEPT;
public Args(TNonblockingServerTransport transport) {
super(transport);
}
public Args selectorThreads(int i) {
selectorThreads = i;
return this;
}
public int getSelectorThreads() {
return selectorThreads;
}
public Args workerThreads(int i) {
workerThreads = i;
return this;
}
public int getWorkerThreads() {
return workerThreads;
}
public int getStopTimeoutVal() {
return stopTimeoutVal;
}
public Args stopTimeoutVal(int stopTimeoutVal) {
this.stopTimeoutVal = stopTimeoutVal;
return this;
}
public TimeUnit getStopTimeoutUnit() {
return stopTimeoutUnit;
}
public Args stopTimeoutUnit(TimeUnit stopTimeoutUnit) {
this.stopTimeoutUnit = stopTimeoutUnit;
return this;
}
public ExecutorService getExecutorService() {
return executorService;
}
public Args executorService(ExecutorService executorService) {
this.executorService = executorService;
return this;
}
public int getAcceptQueueSizePerThread() {
return acceptQueueSizePerThread;
}
public Args acceptQueueSizePerThread(int acceptQueueSizePerThread) {
this.acceptQueueSizePerThread = acceptQueueSizePerThread;
return this;
}
public AcceptPolicy getAcceptPolicy() {
return acceptPolicy;
}
public Args acceptPolicy(AcceptPolicy acceptPolicy) {
this.acceptPolicy = acceptPolicy;
return this;
}
public void validate() {
if (selectorThreads <= 0) {
throw new IllegalArgumentException("selectorThreads must be positive.");
}
if (workerThreads < 0) {
throw new IllegalArgumentException("workerThreads must be non-negative.");
}
if (acceptQueueSizePerThread <= 0) {
throw new IllegalArgumentException("acceptQueueSizePerThread must be positive.");
}
}
}
// The thread handling all accepts
private AcceptThread acceptThread;
// Threads handling events on client transports
private final Set<SelectorThread> selectorThreads = new HashSet<>();
// This wraps all the functionality of queueing and thread pool management
// for the passing of Invocations from the selector thread(s) to the workers
// (if any).
private final ExecutorService invoker;
private final Args args;
/** Create the server with the specified Args configuration */
public TThreadedSelectorServer(Args args) {
super(args);
args.validate();
invoker = args.executorService == null ? createDefaultExecutor(args) : args.executorService;
this.args = args;
}
/**
* Start the accept and selector threads running to deal with clients.
*
* @return true if everything went ok, false if we couldn't start for some reason.
*/
@Override
protected boolean startThreads() {
try {
for (int i = 0; i < args.selectorThreads; ++i) {
selectorThreads.add(new SelectorThread(args.acceptQueueSizePerThread));
}
acceptThread =
new AcceptThread(
(TNonblockingServerTransport) serverTransport_,
createSelectorThreadLoadBalancer(selectorThreads));
for (SelectorThread thread : selectorThreads) {
thread.start();
}
acceptThread.start();
return true;
} catch (IOException e) {
LOGGER.error("Failed to start threads!", e);
return false;
}
}
/** Joins the accept and selector threads and shuts down the executor service. */
@Override
protected void waitForShutdown() {
try {
joinThreads();
} catch (InterruptedException e) {
// Non-graceful shutdown occurred
LOGGER.error("Interrupted while joining threads!", e);
}
gracefullyShutdownInvokerPool();
}
protected void joinThreads() throws InterruptedException {
// wait until the io threads exit
acceptThread.join();
for (SelectorThread thread : selectorThreads) {
thread.join();
}
}
/** Stop serving and shut everything down. */
@Override
public void stop() {
stopped_ = true;
// Stop queuing connect attempts asap
stopListening();
if (acceptThread != null) {
acceptThread.wakeupSelector();
}
for (SelectorThread thread : selectorThreads) {
if (thread != null) thread.wakeupSelector();
}
}
protected void gracefullyShutdownInvokerPool() {
// try to gracefully shut down the executor service
invoker.shutdown();
// Loop until awaitTermination finally does return without a interrupted
// exception. If we don't do this, then we'll shut down prematurely. We want
// to let the executorService clear it's task queue, closing client sockets
// appropriately.
long timeoutMS = args.stopTimeoutUnit.toMillis(args.stopTimeoutVal);
long now = System.currentTimeMillis();
while (timeoutMS >= 0) {
try {
invoker.awaitTermination(timeoutMS, TimeUnit.MILLISECONDS);
break;
} catch (InterruptedException ix) {
long newnow = System.currentTimeMillis();
timeoutMS -= (newnow - now);
now = newnow;
}
}
}
/**
* We override the standard invoke method here to queue the invocation for invoker service instead
* of immediately invoking. If there is no thread pool, handle the invocation inline on this
* thread
*/
@Override
protected boolean requestInvoke(FrameBuffer frameBuffer) {
Runnable invocation = getRunnable(frameBuffer);
if (invoker != null) {
try {
invoker.execute(invocation);
return true;
} catch (RejectedExecutionException rx) {
LOGGER.warn("ExecutorService rejected execution!", rx);
return false;
}
} else {
// Invoke on the caller's thread
invocation.run();
return true;
}
}
protected Runnable getRunnable(FrameBuffer frameBuffer) {
return new Invocation(frameBuffer);
}
/** Helper to create the invoker if one is not specified */
protected static ExecutorService createDefaultExecutor(Args options) {
return (options.workerThreads > 0) ? Executors.newFixedThreadPool(options.workerThreads) : null;
}
private static BlockingQueue<TNonblockingTransport> createDefaultAcceptQueue(int queueSize) {
if (queueSize == 0) {
// Unbounded queue
return new LinkedBlockingQueue<TNonblockingTransport>();
}
return new ArrayBlockingQueue<TNonblockingTransport>(queueSize);
}
/**
* The thread that selects on the server transport (listen socket) and accepts new connections to
* hand off to the IO selector threads
*/
protected | AcceptPolicy |
java | apache__flink | flink-core/src/test/java/org/apache/flink/testutils/ClassLoaderUtils.java | {
"start": 11434,
"end": 12090
} | class ____.
*
* <p>NOTE: Even though this method may throw IOExceptions, we do not declare those and rather
* wrap them in Runtime Exceptions. While this is generally discouraged, we do this here because
* it is merely a test utility and not production code, and it makes it easier to use this
* method during the initialization of variables and especially static variables.
*/
public static ObjectAndClassLoader<Exception> createExceptionObjectFromNewClassLoader() {
return createObjectFromNewClassLoader(
"TestExceptionForSerialization",
Exception.class,
"public | path |
java | quarkusio__quarkus | extensions/jackson/deployment/src/main/java/io/quarkus/jackson/deployment/JacksonProcessor.java | {
"start": 14637,
"end": 14933
} | class ____ be registered
// for reflection.
for (AnnotationInstance resolverInstance : index.getAnnotations(JSON_TYPE_ID_RESOLVER)) {
AnnotationValue value = resolverInstance.value("value");
if (value != null) {
// Add the type-id-resolver | must |
java | quarkusio__quarkus | extensions/vertx/deployment/src/test/java/io/quarkus/vertx/deployment/VertxCommonProducerTest.java | {
"start": 911,
"end": 1398
} | class ____ {
@Inject
Vertx vertx;
public void verify() throws Exception {
CountDownLatch latch = new CountDownLatch(1);
vertx.fileSystem().readFile("files/lorem.txt", ar -> {
if (ar.failed()) {
ar.cause().printStackTrace();
} else {
latch.countDown();
}
});
latch.await(5, TimeUnit.SECONDS);
}
}
}
| BeanUsingBareVertx |
java | quarkusio__quarkus | test-framework/junit5-internal/src/main/java/io/quarkus/test/ExclusivityChecker.java | {
"start": 91,
"end": 998
} | class ____ {
public static final String IO_QUARKUS_TESTING_TYPE = "io.quarkus.testing.type";
public static void checkTestType(ExtensionContext extensionContext, Class<?> current) {
ExtensionContext.Store store = extensionContext.getRoot().getStore(ExtensionContext.Namespace.GLOBAL);
Class<?> testType = store.get(IO_QUARKUS_TESTING_TYPE, Class.class);
if (testType != null) {
if (testType != QuarkusUnitTest.class && testType != QuarkusDevModeTest.class
&& testType != QuarkusProdModeTest.class) {
throw new IllegalStateException(
"Cannot mix both " + current.getName() + " based tests and " + testType.getName()
+ " based tests in the same run");
}
} else {
store.put(IO_QUARKUS_TESTING_TYPE, current);
}
}
}
| ExclusivityChecker |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/stubbing/StrictStubbingEndToEndTest.java | {
"start": 967,
"end": 3632
} | class ____ {
JUnitCore junit = new JUnitCore();
@After
public void after() {
new StateMaster().clearMockitoListeners();
}
@Test
public void finish_mocking_exception_does_not_hide_the_exception_from_test() {
Result result = junit.run(UnnecessaryStubbing.class);
assertThat(result)
// both exceptions are reported to JUnit:
.fails("unnecessary_stubbing", IllegalStateException.class)
.fails("unnecessary_stubbing", UnnecessaryStubbingException.class);
}
@Test
public void does_not_report_unused_stubbing_if_mismatch_reported() {
Result result = junit.run(ReportMismatchButNotUnusedStubbing.class);
assertThat(result).fails(1, PotentialStubbingProblem.class);
}
@Test
public void strict_stubbing_does_not_leak_to_other_tests() {
Result result =
junit.run(
LenientStrictness1.class,
StrictStubsPassing.class,
LenientStrictness2.class);
// all tests pass, lenient test cases contain incorrect stubbing
assertThat(result).succeeds(5);
}
@Test
public void detects_unfinished_session() {
Result result = junit.run(UnfinishedMocking.class);
assertThat(result)
.fails(
UnfinishedMockingSessionException.class,
"\n"
+ "Unfinished mocking session detected.\n"
+ "Previous MockitoSession was not concluded with 'finishMocking()'.\n"
+ "For examples of correct usage see javadoc for MockitoSession class.");
}
@Test
public void concurrent_sessions_in_different_threads() throws Exception {
final Map<Class, Result> results = new ConcurrentHashMap<Class, Result>();
concurrently(
new Runnable() {
public void run() {
results.put(StrictStubsPassing.class, junit.run(StrictStubsPassing.class));
}
},
new Runnable() {
public void run() {
results.put(
ReportMismatchButNotUnusedStubbing.class,
junit.run(ReportMismatchButNotUnusedStubbing.class));
}
});
assertThat(results.get(StrictStubsPassing.class)).succeeds(1);
assertThat(results.get(ReportMismatchButNotUnusedStubbing.class)).fails(1);
}
public static | StrictStubbingEndToEndTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/refaster/UClassIdentTest.java | {
"start": 1103,
"end": 2233
} | class ____ extends AbstractUTreeTest {
@Test
public void equality() throws CouldNotResolveImportException {
new EqualsTester()
.addEqualityGroup(UClassIdent.create("java.util.List"))
.addEqualityGroup(UClassIdent.create("com.sun.tools.javac.util.List"))
.addEqualityGroup(
UClassIdent.create("java.lang.String"),
UClassIdent.create(inliner.resolveClass("java.lang.String")))
.testEquals();
}
@Test
public void serialization() {
SerializableTester.reserializeAndAssert(UClassIdent.create("java.math.BigInteger"));
}
@Test
public void inline() {
ImportPolicy.bind(context, ImportPolicy.IMPORT_TOP_LEVEL);
context.put(PackageSymbol.class, Symtab.instance(context).rootPackage);
assertInlines("List", UClassIdent.create("java.util.List"));
assertInlines("Map.Entry", UClassIdent.create("java.util.Map.Entry"));
}
@Test
public void importConflicts() {
ImportPolicy.bind(context, ImportPolicy.IMPORT_TOP_LEVEL);
context.put(PackageSymbol.class, Symtab.instance(context).rootPackage);
// Test fully qualified | UClassIdentTest |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/examples/BufferExamples.java | {
"start": 555,
"end": 1667
} | class ____ {
public void example1() {
Buffer buff = Buffer.buffer();
}
public void example2() {
Buffer buff = Buffer.buffer("some string");
}
public void example4() {
byte[] bytes = new byte[] {1, 3, 5};
Buffer buff = Buffer.buffer(bytes);
}
public void example3() {
Buffer buff = Buffer.buffer("some string", "UTF-16");
}
public void example5() {
Buffer buff = Buffer.buffer(10000);
}
public void example6(NetSocket socket) {
Buffer buff = Buffer.buffer();
buff.appendInt(123).appendString("hello\n");
socket.write(buff);
}
public void example7() {
Buffer buff = Buffer.buffer();
buff.setInt(1000, 123);
buff.setString(0, "hello");
}
public void example8() {
Buffer buff = Buffer.buffer();
for (int i = 0; i < buff.length(); i += 4) {
System.out.println("int value at " + i + " is " + buff.getInt(i));
}
}
public void example9() {
Buffer buff = Buffer.buffer(128);
int pos = 15;
buff.setUnsignedByte(pos, (short) 200);
System.out.println(buff.getUnsignedByte(pos));
}
}
| BufferExamples |
java | elastic__elasticsearch | x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java | {
"start": 5740,
"end": 50476
} | class ____ extends AcknowledgedTransportMasterNodeAction<DownsampleAction.Request> {
private static final Logger logger = LogManager.getLogger(TransportDownsampleAction.class);
private final Client client;
private final IndicesService indicesService;
private final MasterServiceTaskQueue<DownsampleClusterStateUpdateTask> taskQueue;
private final MetadataCreateIndexService metadataCreateIndexService;
private final IndexScopedSettings indexScopedSettings;
private final PersistentTasksService persistentTasksService;
private final DownsampleMetrics downsampleMetrics;
private final ProjectResolver projectResolver;
private final Supplier<Long> nowSupplier;
private static final Set<String> FORBIDDEN_SETTINGS = Set.of(
IndexSettings.DEFAULT_PIPELINE.getKey(),
IndexSettings.FINAL_PIPELINE.getKey(),
IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey(),
LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey()
);
private static final Set<String> OVERRIDE_SETTINGS = Set.of(DataTier.TIER_PREFERENCE_SETTING.getKey());
/**
* This is the cluster state task executor for cluster state update actions.
* Visible for testing
*/
static final SimpleBatchedExecutor<DownsampleClusterStateUpdateTask, Void> STATE_UPDATE_TASK_EXECUTOR = new SimpleBatchedExecutor<>() {
@Override
public Tuple<ClusterState, Void> executeTask(DownsampleClusterStateUpdateTask task, ClusterState clusterState) throws Exception {
return Tuple.tuple(task.execute(clusterState), null);
}
@Override
public void taskSucceeded(DownsampleClusterStateUpdateTask task, Void unused) {
task.listener.onResponse(AcknowledgedResponse.TRUE);
}
};
@Inject
public TransportDownsampleAction(
Client client,
IndicesService indicesService,
ClusterService clusterService,
TransportService transportService,
ThreadPool threadPool,
MetadataCreateIndexService metadataCreateIndexService,
ActionFilters actionFilters,
ProjectResolver projectResolver,
IndexScopedSettings indexScopedSettings,
PersistentTasksService persistentTasksService,
DownsampleMetrics downsampleMetrics
) {
this(
new OriginSettingClient(client, ClientHelper.ROLLUP_ORIGIN),
indicesService,
clusterService,
transportService,
threadPool,
metadataCreateIndexService,
actionFilters,
projectResolver,
indexScopedSettings,
persistentTasksService,
downsampleMetrics,
clusterService.createTaskQueue("downsample", Priority.URGENT, STATE_UPDATE_TASK_EXECUTOR),
() -> client.threadPool().relativeTimeInMillis()
);
}
// For testing
TransportDownsampleAction(
Client client,
IndicesService indicesService,
ClusterService clusterService,
TransportService transportService,
ThreadPool threadPool,
MetadataCreateIndexService metadataCreateIndexService,
ActionFilters actionFilters,
ProjectResolver projectResolver,
IndexScopedSettings indexScopedSettings,
PersistentTasksService persistentTasksService,
DownsampleMetrics downsampleMetrics,
MasterServiceTaskQueue<DownsampleClusterStateUpdateTask> taskQueue,
Supplier<Long> nowSupplier
) {
super(
DownsampleAction.NAME,
transportService,
clusterService,
threadPool,
actionFilters,
DownsampleAction.Request::new,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
this.client = client;
this.indicesService = indicesService;
this.metadataCreateIndexService = metadataCreateIndexService;
this.projectResolver = projectResolver;
this.indexScopedSettings = indexScopedSettings;
this.taskQueue = taskQueue;
this.persistentTasksService = persistentTasksService;
this.downsampleMetrics = downsampleMetrics;
this.nowSupplier = nowSupplier;
}
private void recordSuccessMetrics(long startTime) {
recordOperation(startTime, DownsampleMetrics.ActionStatus.SUCCESS);
}
private void recordFailureMetrics(long startTime) {
recordOperation(startTime, DownsampleMetrics.ActionStatus.FAILED);
}
private void recordInvalidConfigurationMetrics(long startTime) {
recordOperation(startTime, DownsampleMetrics.ActionStatus.INVALID_CONFIGURATION);
}
private void recordOperation(long startTime, DownsampleMetrics.ActionStatus status) {
downsampleMetrics.recordOperation(TimeValue.timeValueMillis(nowSupplier.get() - startTime).getMillis(), status);
}
@Override
protected void masterOperation(
Task task,
DownsampleAction.Request request,
ClusterState state,
ActionListener<AcknowledgedResponse> listener
) {
logger.debug(
"Starting downsampling [{}] with [{}] interval",
request.getSourceIndex(),
request.getDownsampleConfig().getFixedInterval()
);
long startTime = nowSupplier.get();
String sourceIndexName = request.getSourceIndex();
IndexNameExpressionResolver.assertExpressionHasNullOrDataSelector(sourceIndexName);
IndexNameExpressionResolver.assertExpressionHasNullOrDataSelector(request.getTargetIndex());
final IndicesAccessControl indicesAccessControl = AuthorizationServiceField.INDICES_PERMISSIONS_VALUE.get(
threadPool.getThreadContext()
);
if (indicesAccessControl != null) {
final IndicesAccessControl.IndexAccessControl indexPermissions = indicesAccessControl.getIndexPermissions(sourceIndexName);
if (indexPermissions != null) {
boolean hasDocumentLevelPermissions = indexPermissions.getDocumentPermissions().hasDocumentLevelPermissions();
boolean hasFieldLevelSecurity = indexPermissions.getFieldPermissions().hasFieldLevelSecurity();
if (hasDocumentLevelPermissions || hasFieldLevelSecurity) {
recordInvalidConfigurationMetrics(startTime);
listener.onFailure(
new ElasticsearchException(
"Rollup forbidden for index [" + sourceIndexName + "] with document level or field level security settings."
)
);
return;
}
}
}
final ProjectMetadata projectMetadata = projectResolver.getProjectMetadata(state);
// Assert source index exists
IndexMetadata sourceIndexMetadata = projectMetadata.index(sourceIndexName);
if (sourceIndexMetadata == null) {
recordInvalidConfigurationMetrics(startTime);
listener.onFailure(new IndexNotFoundException(sourceIndexName));
return;
}
// Assert source index is a time_series index
if (IndexSettings.MODE.get(sourceIndexMetadata.getSettings()) != IndexMode.TIME_SERIES) {
recordInvalidConfigurationMetrics(startTime);
listener.onFailure(
new ElasticsearchException(
"Rollup requires setting ["
+ IndexSettings.MODE.getKey()
+ "="
+ IndexMode.TIME_SERIES
+ "] for index ["
+ sourceIndexName
+ "]"
)
);
return;
}
// Assert source index is read-only
if (state.blocks().indexBlocked(projectMetadata.id(), ClusterBlockLevel.WRITE, sourceIndexName) == false) {
recordInvalidConfigurationMetrics(startTime);
listener.onFailure(
new ElasticsearchException(
"Downsample requires setting [" + IndexMetadata.SETTING_BLOCKS_WRITE + " = true] for index [" + sourceIndexName + "]"
)
);
return;
}
final TaskId parentTask = new TaskId(clusterService.localNode().getId(), task.getId());
// Short circuit if target index has been downsampled:
final String downsampleIndexName = request.getTargetIndex();
if (canShortCircuit(downsampleIndexName, parentTask, request.getWaitTimeout(), startTime, projectMetadata, listener)) {
logger.info("Skipping downsampling, because a previous execution already completed downsampling");
return;
}
try {
MetadataCreateIndexService.validateIndexName(downsampleIndexName, projectMetadata, state.routingTable(projectMetadata.id()));
} catch (ResourceAlreadyExistsException e) {
// ignore index already exists
}
// Downsample will perform the following tasks:
// 1. Extract source index mappings
// 2. Extract downsample config from index mappings
// 3. Create the downsample index
// 4. Run downsample indexer
// 5. Make downsample index read-only and set replicas
// 6. Refresh downsample index
// 7. Mark downsample index as "completed successfully"
// 8. Flush the downsample index to disk
// At any point if there is an issue, delete the downsample index
// 1. Extract source index mappings
final GetMappingsRequest getMappingsRequest = new GetMappingsRequest(request.masterNodeTimeout()).indices(sourceIndexName);
getMappingsRequest.setParentTask(parentTask);
client.admin().indices().getMappings(getMappingsRequest, listener.delegateFailureAndWrap((delegate, getMappingsResponse) -> {
final Map<String, Object> sourceIndexMappings = getMappingsResponse.mappings()
.entrySet()
.stream()
.filter(entry -> sourceIndexName.equals(entry.getKey()))
.findFirst()
.map(mappingMetadata -> mappingMetadata.getValue().sourceAsMap())
.orElseThrow(() -> new IllegalArgumentException("No mapping found for downsample source index [" + sourceIndexName + "]"));
// 2. Extract downsample config from index mappings
final MapperService mapperService = indicesService.createIndexMapperServiceForValidation(sourceIndexMetadata);
final CompressedXContent sourceIndexCompressedXContent = new CompressedXContent(sourceIndexMappings);
mapperService.merge(MapperService.SINGLE_MAPPING_NAME, sourceIndexCompressedXContent, MapperService.MergeReason.INDEX_TEMPLATE);
// Validate downsampling interval
validateDownsamplingConfiguration(mapperService, request.getDownsampleConfig(), sourceIndexMetadata);
final List<String> dimensionFields = new ArrayList<>();
final List<String> metricFields = new ArrayList<>();
final List<String> labelFields = new ArrayList<>();
final TimeseriesFieldTypeHelper helper = new TimeseriesFieldTypeHelper.Builder(mapperService).build(
request.getDownsampleConfig().getTimestampField()
);
MappingVisitor.visitMapping(sourceIndexMappings, (field, mapping) -> {
var flattenedDimensions = helper.extractFlattenedDimensions(field, mapping);
if (flattenedDimensions != null) {
dimensionFields.addAll(flattenedDimensions);
} else if (helper.isTimeSeriesDimension(field, mapping)) {
dimensionFields.add(field);
} else if (helper.isTimeSeriesMetric(field, mapping)) {
metricFields.add(field);
} else if (helper.isTimeSeriesLabel(field, mapping)) {
labelFields.add(field);
}
});
ActionRequestValidationException validationException = new ActionRequestValidationException();
if (dimensionFields.isEmpty()) {
validationException.addValidationError("Index [" + sourceIndexName + "] does not contain any dimension fields");
}
if (validationException.validationErrors().isEmpty() == false) {
recordInvalidConfigurationMetrics(startTime);
delegate.onFailure(validationException);
return;
}
final String mapping;
try {
mapping = createDownsampleIndexMapping(helper, request.getDownsampleConfig(), mapperService, sourceIndexMappings);
} catch (IOException e) {
recordFailureMetrics(startTime);
delegate.onFailure(e);
return;
}
/*
* When creating the downsample index, we copy the index.number_of_shards from source index,
* and we set the index.number_of_replicas to 0, to avoid replicating the index being built.
* Also, we set the index.refresh_interval to -1.
* We will set the correct number of replicas and refresh the index later.
*
* We should note that there is a risk of losing a node during the downsample process. In this
* case downsample will fail.
*/
int minNumReplicas = clusterService.getSettings().getAsInt(Downsample.DOWNSAMPLE_MIN_NUMBER_OF_REPLICAS_NAME, 0);
// 3. Create downsample index
createDownsampleIndex(
projectMetadata.id(),
downsampleIndexName,
minNumReplicas,
sourceIndexMetadata,
mapping,
request,
ActionListener.wrap(createIndexResp -> {
if (createIndexResp.isAcknowledged()) {
performShardDownsampling(
projectMetadata.id(),
request,
delegate,
minNumReplicas,
sourceIndexMetadata,
downsampleIndexName,
parentTask,
startTime,
metricFields,
labelFields,
dimensionFields
);
} else {
recordFailureMetrics(startTime);
delegate.onFailure(new ElasticsearchException("Failed to create downsample index [" + downsampleIndexName + "]"));
}
}, e -> {
if (e instanceof ResourceAlreadyExistsException) {
if (canShortCircuit(
request.getTargetIndex(),
parentTask,
request.getWaitTimeout(),
startTime,
clusterService.state().metadata().getProject(projectMetadata.id()),
listener
)) {
logger.info("Downsample tasks are not created, because a previous execution already completed downsampling");
return;
}
performShardDownsampling(
projectMetadata.id(),
request,
delegate,
minNumReplicas,
sourceIndexMetadata,
downsampleIndexName,
parentTask,
startTime,
metricFields,
labelFields,
dimensionFields
);
} else {
recordFailureMetrics(startTime);
delegate.onFailure(e);
}
})
);
}));
}
/**
* Shortcircuit when another downsample api invocation already completed successfully.
*/
private boolean canShortCircuit(
String targetIndexName,
TaskId parentTask,
TimeValue waitTimeout,
long startTime,
ProjectMetadata projectMetadata,
ActionListener<AcknowledgedResponse> listener
) {
IndexMetadata targetIndexMetadata = projectMetadata.index(targetIndexName);
if (targetIndexMetadata == null) {
return false;
}
var downsampleStatus = IndexMetadata.INDEX_DOWNSAMPLE_STATUS.get(targetIndexMetadata.getSettings());
if (downsampleStatus == DownsampleTaskStatus.UNKNOWN) {
// This isn't a downsample index, so fail:
listener.onFailure(new ResourceAlreadyExistsException(targetIndexMetadata.getIndex()));
return true;
} else if (downsampleStatus == DownsampleTaskStatus.SUCCESS) {
listener.onResponse(AcknowledgedResponse.TRUE);
return true;
}
// In case the write block has been set on the target index means that the shard level downsampling itself was successful,
// but the previous invocation failed later performing settings update, refresh or force merge.
// The write block is used a signal to resume from the refresh part of the downsample api invocation.
if (targetIndexMetadata.getSettings().get(IndexMetadata.SETTING_BLOCKS_WRITE) != null) {
var refreshRequest = new RefreshRequest(targetIndexMetadata.getIndex().getName());
refreshRequest.setParentTask(parentTask);
client.admin()
.indices()
.refresh(
refreshRequest,
new RefreshDownsampleIndexActionListener(
projectMetadata.id(),
listener,
parentTask,
targetIndexMetadata.getIndex().getName(),
waitTimeout,
startTime
)
);
return true;
}
return false;
}
// 3. downsample index created or already exist (in case of retry). Run downsample indexer persistent task on each shard.
private void performShardDownsampling(
final ProjectId projectId,
DownsampleAction.Request request,
ActionListener<AcknowledgedResponse> listener,
int minNumReplicas,
IndexMetadata sourceIndexMetadata,
String downsampleIndexName,
TaskId parentTask,
long startTime,
List<String> metricFields,
List<String> labelFields,
List<String> dimensionFields
) {
final int numberOfShards = sourceIndexMetadata.getNumberOfShards();
final Index sourceIndex = sourceIndexMetadata.getIndex();
// NOTE: before we set the number of replicas to 0, as a result here we are
// only dealing with primary shards.
final AtomicInteger countDown = new AtomicInteger(numberOfShards);
final AtomicBoolean errorReported = new AtomicBoolean(false);
for (int shardNum = 0; shardNum < numberOfShards; shardNum++) {
final ShardId shardId = new ShardId(sourceIndex, shardNum);
final String persistentTaskId = createPersistentTaskId(
downsampleIndexName,
shardId,
request.getDownsampleConfig().getInterval()
);
final DownsampleShardTaskParams params = createPersistentTaskParams(
request.getDownsampleConfig(),
sourceIndexMetadata,
downsampleIndexName,
metricFields,
labelFields,
dimensionFields,
shardId
);
Predicate<PersistentTasksCustomMetadata.PersistentTask<?>> predicate = runningTask -> {
if (runningTask == null) {
// NOTE: don't need to wait if the persistent task completed and was removed
return true;
}
DownsampleShardPersistentTaskState runningPersistentTaskState = (DownsampleShardPersistentTaskState) runningTask.getState();
return runningPersistentTaskState != null && runningPersistentTaskState.done();
};
var taskListener = new PersistentTasksService.WaitForPersistentTaskListener<>() {
@Override
public void onResponse(PersistentTasksCustomMetadata.PersistentTask<PersistentTaskParams> persistentTask) {
if (persistentTask != null) {
var runningPersistentTaskState = (DownsampleShardPersistentTaskState) persistentTask.getState();
if (runningPersistentTaskState != null) {
if (runningPersistentTaskState.failed()) {
onFailure(new ElasticsearchException("downsample task [" + persistentTaskId + "] failed"));
return;
} else if (runningPersistentTaskState.cancelled()) {
onFailure(new ElasticsearchException("downsample task [" + persistentTaskId + "] cancelled"));
return;
}
}
}
logger.info("Downsampling task [" + persistentTaskId + " completed for shard " + params.shardId());
if (countDown.decrementAndGet() == 0) {
logger.info("All downsampling tasks completed [" + numberOfShards + "]");
updateTargetIndexSettingStep(
projectId,
request,
listener,
minNumReplicas,
sourceIndexMetadata,
downsampleIndexName,
parentTask,
startTime
);
}
}
@Override
public void onFailure(Exception e) {
logger.error("error while waiting for downsampling persistent task", e);
if (errorReported.getAndSet(true) == false) {
recordFailureMetrics(startTime);
}
listener.onFailure(e);
}
};
persistentTasksService.sendStartRequest(
persistentTaskId,
DownsampleShardTask.TASK_NAME,
params,
TimeValue.THIRTY_SECONDS /* TODO should this be configurable? longer by default? infinite? */,
ActionListener.wrap(
startedTask -> persistentTasksService.waitForPersistentTaskCondition(
projectId,
startedTask.getId(),
predicate,
request.getWaitTimeout(),
taskListener
),
e -> {
if (e instanceof ResourceAlreadyExistsException) {
logger.info("Task [" + persistentTaskId + "] already exists. Waiting.");
persistentTasksService.waitForPersistentTaskCondition(
projectId,
persistentTaskId,
predicate,
request.getWaitTimeout(),
taskListener
);
} else {
listener.onFailure(new ElasticsearchException("Task [" + persistentTaskId + "] failed starting", e));
}
}
)
);
}
}
// 4. Make downsample index read-only and set the correct number of replicas
private void updateTargetIndexSettingStep(
ProjectId projectId,
final DownsampleAction.Request request,
final ActionListener<AcknowledgedResponse> listener,
int minNumReplicas,
final IndexMetadata sourceIndexMetadata,
final String downsampleIndexName,
final TaskId parentTask,
final long startTime
) {
// 4. Make downsample index read-only and set the correct number of replicas
final Settings.Builder settings = Settings.builder().put(IndexMetadata.SETTING_BLOCKS_WRITE, true);
// Number of replicas had been previously set to 0 to speed up index population
if (sourceIndexMetadata.getNumberOfReplicas() > 0 && minNumReplicas == 0) {
settings.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, sourceIndexMetadata.getNumberOfReplicas());
}
// Setting index.hidden has been initially set to true. We revert this to the value of the
// source index
if (sourceIndexMetadata.isHidden() == false) {
if (sourceIndexMetadata.getSettings().keySet().contains(IndexMetadata.SETTING_INDEX_HIDDEN)) {
settings.put(IndexMetadata.SETTING_INDEX_HIDDEN, false);
} else {
settings.putNull(IndexMetadata.SETTING_INDEX_HIDDEN);
}
}
UpdateSettingsRequest updateSettingsReq = new UpdateSettingsRequest(settings.build(), downsampleIndexName);
updateSettingsReq.setParentTask(parentTask);
client.admin()
.indices()
.updateSettings(
updateSettingsReq,
new UpdateDownsampleIndexSettingsActionListener(
projectId,
listener,
parentTask,
downsampleIndexName,
request.getWaitTimeout(),
startTime
)
);
}
private static DownsampleShardTaskParams createPersistentTaskParams(
final DownsampleConfig downsampleConfig,
final IndexMetadata sourceIndexMetadata,
final String targetIndexName,
final List<String> metricFields,
final List<String> labelFields,
final List<String> dimensionFields,
final ShardId shardId
) {
return new DownsampleShardTaskParams(
downsampleConfig,
targetIndexName,
parseTimestamp(sourceIndexMetadata, IndexSettings.TIME_SERIES_START_TIME),
parseTimestamp(sourceIndexMetadata, IndexSettings.TIME_SERIES_END_TIME),
shardId,
metricFields.toArray(new String[0]),
labelFields.toArray(new String[0]),
dimensionFields.toArray(new String[0])
);
}
private static long parseTimestamp(final IndexMetadata sourceIndexMetadata, final Setting<Instant> timestampSetting) {
return OffsetDateTime.parse(sourceIndexMetadata.getSettings().get(timestampSetting.getKey()), DateTimeFormatter.ISO_DATE_TIME)
.toInstant()
.toEpochMilli();
}
private static String createPersistentTaskId(final String targetIndex, final ShardId shardId, final DateHistogramInterval interval) {
return DOWNSAMPLED_INDEX_PREFIX + targetIndex + "-" + shardId.id() + "-" + interval;
}
@Override
protected ClusterBlockException checkBlock(DownsampleAction.Request request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
/**
* This method creates the mapping for the downsample index, based on the
* mapping (dimensions and metrics) from the source index, as well as the
* downsample configuration.
*
* @param config the downsample configuration
* @param sourceIndexMappings a map with the source index mapping
* @return the mapping of the downsample index
*/
public static String createDownsampleIndexMapping(
final TimeseriesFieldTypeHelper helper,
final DownsampleConfig config,
final MapperService mapperService,
final Map<String, Object> sourceIndexMappings
) throws IOException {
final XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
addDynamicTemplates(builder);
builder.startObject("properties");
addTimestampField(config, sourceIndexMappings, builder);
addMetricFieldOverwrites(config, helper, sourceIndexMappings, builder);
builder.endObject(); // match initial startObject
builder.endObject(); // match startObject("properties")
final CompressedXContent mappingDiffXContent = CompressedXContent.fromJSON(
XContentHelper.convertToJson(BytesReference.bytes(builder), false, XContentType.JSON)
);
return mapperService.merge(MapperService.SINGLE_MAPPING_NAME, mappingDiffXContent, MapperService.MergeReason.INDEX_TEMPLATE)
.mappingSource()
.uncompressed()
.utf8ToString();
}
/**
* Adds metric mapping overwrites. When downsampling certain metrics change their mapping type. For example,
* when we are using the aggregate sampling method, the mapping of a gauge metric becomes an aggregate_metric_double.
*/
private static void addMetricFieldOverwrites(
final DownsampleConfig config,
final TimeseriesFieldTypeHelper helper,
final Map<String, Object> sourceIndexMappings,
final XContentBuilder builder
) {
// The last value sampling method preserves the source mapping.
if (config.getSamplingMethodOrDefault() == DownsampleConfig.SamplingMethod.LAST_VALUE) {
return;
}
MappingVisitor.visitMapping(sourceIndexMappings, (field, mapping) -> {
if (helper.isTimeSeriesMetric(field, mapping)) {
try {
addMetricFieldMapping(builder, field, mapping);
} catch (IOException e) {
throw new ElasticsearchException("Error while adding metric for field [" + field + "]");
}
}
});
}
private static void addTimestampField(
final DownsampleConfig config,
Map<String, Object> sourceIndexMappings,
final XContentBuilder builder
) throws IOException {
final String timestampField = config.getTimestampField();
final String dateIntervalType = config.getIntervalType();
final String dateInterval = config.getInterval().toString();
final String timezone = config.getTimeZone();
builder.startObject(timestampField);
MappingVisitor.visitMapping(sourceIndexMappings, (field, mapping) -> {
try {
if (timestampField.equals(field)) {
final String timestampType = String.valueOf(mapping.get("type"));
builder.field("type", timestampType != null ? timestampType : DateFieldMapper.CONTENT_TYPE);
if (mapping.get("format") != null) {
builder.field("format", mapping.get("format"));
}
if (mapping.get("ignore_malformed") != null) {
builder.field("ignore_malformed", mapping.get("ignore_malformed"));
}
}
} catch (IOException e) {
throw new ElasticsearchException("Unable to create timestamp field mapping for field [" + timestampField + "]", e);
}
});
builder.startObject("meta")
.field(dateIntervalType, dateInterval)
.field(DownsampleConfig.TIME_ZONE, timezone)
.endObject()
.endObject();
}
// public for testing
public record AggregateMetricDoubleFieldSupportedMetrics(String defaultMetric, List<String> supportedMetrics) {}
// public for testing
public static AggregateMetricDoubleFieldSupportedMetrics getSupportedMetrics(
final TimeSeriesParams.MetricType metricType,
final Map<String, ?> fieldProperties
) {
boolean sourceIsAggregate = fieldProperties.get("type").equals(AggregateMetricDoubleFieldMapper.CONTENT_TYPE);
List<String> supportedAggs = List.of(metricType.supportedAggs());
if (sourceIsAggregate) {
@SuppressWarnings("unchecked")
List<String> currentAggs = (List<String>) fieldProperties.get(AggregateMetricDoubleFieldMapper.Names.METRICS);
supportedAggs = supportedAggs.stream().filter(currentAggs::contains).toList();
}
assert supportedAggs.size() > 0;
String defaultMetric = "max";
if (supportedAggs.contains(defaultMetric) == false) {
defaultMetric = supportedAggs.get(0);
}
if (sourceIsAggregate) {
defaultMetric = Objects.requireNonNullElse(
(String) fieldProperties.get(AggregateMetricDoubleFieldMapper.Names.DEFAULT_METRIC),
defaultMetric
);
}
return new AggregateMetricDoubleFieldSupportedMetrics(defaultMetric, supportedAggs);
}
private static void addMetricFieldMapping(final XContentBuilder builder, final String field, final Map<String, ?> fieldProperties)
throws IOException {
final TimeSeriesParams.MetricType metricType = TimeSeriesParams.MetricType.fromString(
fieldProperties.get(TIME_SERIES_METRIC_PARAM).toString()
);
builder.startObject(field);
if (metricType == TimeSeriesParams.MetricType.COUNTER) {
// For counters, we keep the same field type, because they store
// only one value (the last value of the counter)
for (String fieldProperty : fieldProperties.keySet()) {
builder.field(fieldProperty, fieldProperties.get(fieldProperty));
}
} else {
var supported = getSupportedMetrics(metricType, fieldProperties);
builder.field("type", AggregateMetricDoubleFieldMapper.CONTENT_TYPE)
.stringListField(AggregateMetricDoubleFieldMapper.Names.METRICS, supported.supportedMetrics)
.field(AggregateMetricDoubleFieldMapper.Names.DEFAULT_METRIC, supported.defaultMetric)
.field(TIME_SERIES_METRIC_PARAM, metricType);
}
builder.endObject();
}
private static void validateDownsamplingConfiguration(
MapperService mapperService,
DownsampleConfig config,
IndexMetadata sourceIndexMetadata
) {
MappedFieldType timestampFieldType = mapperService.fieldType(config.getTimestampField());
assert timestampFieldType != null : "Cannot find timestamp field [" + config.getTimestampField() + "] in the mapping";
ActionRequestValidationException e = new ActionRequestValidationException();
Map<String, String> meta = timestampFieldType.meta();
if (meta.isEmpty() == false) {
String sourceInterval = meta.get(config.getIntervalType());
if (sourceInterval != null) {
try {
DownsampleConfig.validateSourceAndTargetIntervals(new DateHistogramInterval(sourceInterval), config.getFixedInterval());
} catch (IllegalArgumentException exception) {
e.addValidationError("Source index is a downsampled index. " + exception.getMessage());
}
DownsampleConfig.SamplingMethod sourceSamplingMethod = DownsampleConfig.SamplingMethod.fromIndexMetadata(
sourceIndexMetadata
);
if (Objects.equals(sourceSamplingMethod, config.getSamplingMethodOrDefault()) == false) {
e.addValidationError(
"Source index is a downsampled index. Downsampling method ["
+ config.getSamplingMethodOrDefault()
+ "] is not compatible with the source index downsampling method ["
+ sourceSamplingMethod
+ "]."
);
}
}
// Validate that timezones match
String sourceTimezone = meta.get(DownsampleConfig.TIME_ZONE);
if (sourceTimezone != null && sourceTimezone.equals(config.getTimeZone()) == false) {
e.addValidationError(
"Source index is a downsampled index. Downsampling timezone ["
+ config.getTimeZone()
+ "] cannot be different than the source index timezone ["
+ sourceTimezone
+ "]."
);
}
if (e.validationErrors().isEmpty() == false) {
throw e;
}
}
}
/**
* Copy index settings from the source index to the downsample index. Settings that
* have already been set in the downsample index will not be overridden.
*/
static IndexMetadata.Builder copyIndexMetadata(
final IndexMetadata sourceIndexMetadata,
final IndexMetadata downsampleIndexMetadata,
final IndexScopedSettings indexScopedSettings
) {
// Copy index settings from the source index, but do not override the settings
// that already have been set in the downsample index
final Settings.Builder targetSettings = Settings.builder().put(downsampleIndexMetadata.getSettings());
for (final String key : sourceIndexMetadata.getSettings().keySet()) {
final Setting<?> setting = indexScopedSettings.get(key);
if (setting == null) {
assert indexScopedSettings.isPrivateSetting(key) : "expected [" + key + "] to be private but it was not";
} else if (setting.getProperties().contains(Setting.Property.NotCopyableOnResize)) {
// we leverage the NotCopyableOnResize setting property for downsample, because
// the same rules with resize apply
continue;
}
// Do not copy index settings which are valid for the source index but not for the target index
if (FORBIDDEN_SETTINGS.contains(key)) {
continue;
}
if (OVERRIDE_SETTINGS.contains(key)) {
targetSettings.put(key, sourceIndexMetadata.getSettings().get(key));
}
// Do not override settings that have already been set in the downsample index.
if (targetSettings.keys().contains(key)) {
continue;
}
targetSettings.copy(key, sourceIndexMetadata.getSettings());
}
/*
* Add the origin index name and UUID to the downsample index metadata.
* If the origin index is a downsample index, we will add the name and UUID
* of the first index that we initially rolled up.
*/
Index sourceIndex = sourceIndexMetadata.getIndex();
if (IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_UUID.exists(sourceIndexMetadata.getSettings()) == false
|| IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_NAME.exists(sourceIndexMetadata.getSettings()) == false) {
targetSettings.put(IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_NAME.getKey(), sourceIndex.getName())
.put(IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_UUID.getKey(), sourceIndex.getUUID());
}
targetSettings.put(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME_KEY, sourceIndex.getName());
targetSettings.put(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_UUID_KEY, sourceIndex.getUUID());
return IndexMetadata.builder(downsampleIndexMetadata).settings(targetSettings);
}
/**
* Configure the dynamic templates to always map strings to the keyword field type.
*/
private static void addDynamicTemplates(final XContentBuilder builder) throws IOException {
builder.startArray("dynamic_templates")
.startObject()
.startObject("strings")
.field("match_mapping_type", "string")
.startObject("mapping")
.field("type", "keyword")
.endObject()
.endObject()
.endObject()
.endArray();
}
private void createDownsampleIndex(
ProjectId projectId,
String downsampleIndexName,
int minNumReplicas,
IndexMetadata sourceIndexMetadata,
String mapping,
DownsampleAction.Request request,
ActionListener<AcknowledgedResponse> listener
) {
var downsampleInterval = request.getDownsampleConfig().getInterval().toString();
Settings.Builder builder = Settings.builder()
.put(IndexMetadata.SETTING_INDEX_HIDDEN, true)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, sourceIndexMetadata.getNumberOfShards())
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, minNumReplicas)
.put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "-1")
.put(IndexMetadata.INDEX_DOWNSAMPLE_STATUS.getKey(), DownsampleTaskStatus.STARTED)
.put(IndexMetadata.INDEX_DOWNSAMPLE_INTERVAL.getKey(), downsampleInterval)
.put(IndexMetadata.INDEX_DOWNSAMPLE_METHOD.getKey(), request.getDownsampleConfig().getSamplingMethodOrDefault().toString())
.put(IndexSettings.MODE.getKey(), sourceIndexMetadata.getIndexMode())
.putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), sourceIndexMetadata.getRoutingPaths())
.put(
IndexSettings.TIME_SERIES_START_TIME.getKey(),
sourceIndexMetadata.getSettings().get(IndexSettings.TIME_SERIES_START_TIME.getKey())
)
.put(
IndexSettings.TIME_SERIES_END_TIME.getKey(),
sourceIndexMetadata.getSettings().get(IndexSettings.TIME_SERIES_END_TIME.getKey())
);
if (sourceIndexMetadata.getTimeSeriesDimensions().isEmpty() == false) {
builder.putList(IndexMetadata.INDEX_DIMENSIONS.getKey(), sourceIndexMetadata.getTimeSeriesDimensions());
}
if (sourceIndexMetadata.getSettings().hasValue(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey())) {
builder.put(
MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(),
sourceIndexMetadata.getSettings().get(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey())
);
}
if (sourceIndexMetadata.getSettings().hasValue(FieldMapper.IGNORE_MALFORMED_SETTING.getKey())) {
builder.put(
FieldMapper.IGNORE_MALFORMED_SETTING.getKey(),
sourceIndexMetadata.getSettings().get(FieldMapper.IGNORE_MALFORMED_SETTING.getKey())
);
}
CreateIndexClusterStateUpdateRequest createIndexClusterStateUpdateRequest = new CreateIndexClusterStateUpdateRequest(
"downsample",
projectId,
downsampleIndexName,
downsampleIndexName
).settings(builder.build()).settingsSystemProvided(true).mappings(mapping).waitForActiveShards(ActiveShardCount.ONE);
var delegate = new AllocationActionListener<>(listener, threadPool.getThreadContext());
taskQueue.submitTask("create-downsample-index [" + downsampleIndexName + "]", new DownsampleClusterStateUpdateTask(listener) {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
logger.debug("Creating downsample index [{}]", downsampleIndexName);
return metadataCreateIndexService.applyCreateIndexRequest(
currentState,
createIndexClusterStateUpdateRequest,
true,
// Copy index metadata from source index to downsample index
(builder, indexMetadata) -> builder.put(copyIndexMetadata(sourceIndexMetadata, indexMetadata, indexScopedSettings)),
delegate.reroute()
);
}
}, request.masterNodeTimeout());
}
/**
* A specialized cluster state update task that always takes a listener handling an
* AcknowledgedResponse, as all template actions have simple acknowledged yes/no responses.
*/
abstract static | TransportDownsampleAction |
java | mockito__mockito | mockito-extensions/mockito-errorprone/src/test/java/org/mockito/errorprone/bugpatterns/MockitoInternalUsageTest.java | {
"start": 4314,
"end": 4649
} | class ____ {}")
.doTest();
}
@Test
public void testNegativeCases() {
compilationHelper
.addSourceLines(
"Test.java",
"package org.mockito;",
"import org.mockito.internal.MockitoCore;",
" | Test |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/embeddables/generics/GenericEmbeddedIdentifierMappedSuperclassTest.java | {
"start": 11866,
"end": 11960
} | class ____ extends UserAccessReport {
}
@MappedSuperclass
public static | UserAccessReportEntity |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/rest/MultipartUploadExtension.java | {
"start": 16298,
"end": 17154
} | class ____<R extends RequestBody>
implements RuntimeMessageHeaders<R, EmptyResponseBody, EmptyMessageParameters> {
@Override
public Class<EmptyResponseBody> getResponseClass() {
return EmptyResponseBody.class;
}
@Override
public HttpResponseStatus getResponseStatusCode() {
return HttpResponseStatus.OK;
}
@Override
public String getDescription() {
return "";
}
@Override
public EmptyMessageParameters getUnresolvedMessageParameters() {
return EmptyMessageParameters.getInstance();
}
@Override
public HttpMethodWrapper getHttpMethod() {
return HttpMethodWrapper.POST;
}
}
/** Simple test {@link RequestBody}. */
protected static final | TestHeadersBase |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/unionsubclass/Being.java | {
"start": 286,
"end": 1325
} | class ____ {
private long id;
private String identity;
private Location location;
private List things = new ArrayList();
private Map info = new HashMap();
/**
* @return Returns the id.
*/
public long getId() {
return id;
}
/**
* @param id The id to set.
*/
public void setId(long id) {
this.id = id;
}
/**
* @return Returns the identity.
*/
public String getIdentity() {
return identity;
}
/**
* @param identity The identity to set.
*/
public void setIdentity(String identity) {
this.identity = identity;
}
/**
* @return Returns the location.
*/
public Location getLocation() {
return location;
}
/**
* @param location The location to set.
*/
public void setLocation(Location location) {
this.location = location;
}
public String getSpecies() {
return null;
}
public List getThings() {
return things;
}
public void setThings(List things) {
this.things = things;
}
public Map getInfo() {
return info;
}
public void setInfo(Map info) {
this.info = info;
}
}
| Being |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/routing/GlobalRoutingTable.java | {
"start": 17549,
"end": 18676
} | class ____ {
private final ImmutableOpenMap.Builder<ProjectId, RoutingTable> projectRouting;
public Builder(GlobalRoutingTable init) {
this.projectRouting = ImmutableOpenMap.builder(init.routingTables);
}
public Builder() {
this.projectRouting = ImmutableOpenMap.builder();
}
public Builder put(ProjectId id, RoutingTable routing) {
this.projectRouting.put(id, routing);
return this;
}
public Builder put(ProjectId id, RoutingTable.Builder routing) {
return put(id, routing.build());
}
public Builder removeProject(ProjectId projectId) {
this.projectRouting.remove(projectId);
return this;
}
public Builder clear() {
this.projectRouting.clear();
return this;
}
public GlobalRoutingTable build() {
return new GlobalRoutingTable(projectRouting.build());
}
}
@Override
public String toString() {
return "global_routing_table{" + routingTables + "}";
}
}
| Builder |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/booleanarray/BooleanArrayAssert_contains_at_Index_Test.java | {
"start": 1016,
"end": 1416
} | class ____ extends BooleanArrayAssertBaseTest {
private final Index index = someIndex();
@Override
protected BooleanArrayAssert invoke_api_method() {
return assertions.contains(true, index);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertContains(getInfo(assertions), getActual(assertions), true, index);
}
}
| BooleanArrayAssert_contains_at_Index_Test |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/sptests/Parameter.java | {
"start": 703,
"end": 1178
} | class ____ {
private Integer addend1;
private Integer addend2;
private Integer sum;
public Integer getAddend1() {
return addend1;
}
public void setAddend1(Integer addend1) {
this.addend1 = addend1;
}
public Integer getAddend2() {
return addend2;
}
public void setAddend2(Integer addend2) {
this.addend2 = addend2;
}
public Integer getSum() {
return sum;
}
public void setSum(Integer sum) {
this.sum = sum;
}
}
| Parameter |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializerSnapshotData.java | {
"start": 15954,
"end": 16899
} | class ____<T>
implements BiFunctionWithException<DataInputView, String, Class<T>, IOException> {
private final ClassLoader classLoader;
private ClassResolverByName(ClassLoader classLoader) {
this.classLoader = classLoader;
}
@SuppressWarnings("unchecked")
@Override
public Class<T> apply(DataInputView stream, String unused) throws IOException {
String className = stream.readUTF();
try {
return (Class<T>) Class.forName(className, false, classLoader);
} catch (ClassNotFoundException e) {
LOG.warn(
"Cannot find registered class "
+ className
+ " for Kryo serialization in classpath.",
e);
return null;
}
}
}
private static final | ClassResolverByName |
java | quarkusio__quarkus | integration-tests/opentelemetry-quickstart/src/test/java/io/quarkus/it/opentelemetry/OpenTelemetryDisabledIT.java | {
"start": 359,
"end": 537
} | class ____ extends OpenTelemetryDisabledTest {
@Override
protected void buildGlobalTelemetryInstance() {
// When running native tests the test | OpenTelemetryDisabledIT |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/strategy/ValidityAuditStrategyRevEndTestCustomRevEnt.java | {
"start": 2077,
"end": 17166
} | class ____ {
private final String revendTimestampColumName = "REVEND_TIMESTAMP";
private Integer p1_id;
private Integer p2_id;
private Integer c1_1_id;
private Integer c1_2_id;
private Integer c2_1_id;
private Integer c2_2_id;
private Map<Number, CustomDateRevEntity> revisions;
@BeforeClassTemplate
public void initData(EntityManagerFactoryScope scope) {
EntityManager em = scope.getEntityManagerFactory().createEntityManager();
final var session = em.unwrap( SessionImplementor.class );
final var ddlTypeRegistry = session.getTypeConfiguration().getDdlTypeRegistry();
final var dialect = session.getDialect();
// We need first to modify the columns in the middle (join table) to
// allow null values. Hbm2ddl doesn't seem
// to allow this.
em.getTransaction().begin();
session.createNativeQuery( "DROP TABLE children" ).executeUpdate();
session.createNativeQuery( "DROP TABLE children_AUD" ).executeUpdate();
em.getTransaction().commit();
em.clear();
em.getTransaction().begin();
session.createNativeQuery(
"CREATE TABLE children ( parent_id " + ddlTypeRegistry.getTypeName( Types.INTEGER, dialect ) +
", child1_id " + ddlTypeRegistry.getTypeName( Types.INTEGER, dialect ) + dialect.getNullColumnString() +
", child2_id " + ddlTypeRegistry.getTypeName( Types.INTEGER, dialect ) + dialect.getNullColumnString() + " )"
)
.executeUpdate();
session.createNativeQuery(
"CREATE TABLE children_AUD ( REV " + ddlTypeRegistry.getTypeName( Types.INTEGER, dialect ) + " NOT NULL" +
", REVEND " + ddlTypeRegistry.getTypeName( Types.INTEGER, dialect ) +
", " + revendTimestampColumName + " " + ddlTypeRegistry.getTypeName( Types.TIMESTAMP, dialect ) +
", REVTYPE " + ddlTypeRegistry.getTypeName( Types.TINYINT, dialect ) +
", parent_id " + ddlTypeRegistry.getTypeName( Types.INTEGER, dialect ) +
", child1_id " + ddlTypeRegistry.getTypeName( Types.INTEGER, dialect ) + dialect.getNullColumnString() +
", child2_id " + ddlTypeRegistry.getTypeName( Types.INTEGER, dialect ) + dialect.getNullColumnString() + " )"
)
.executeUpdate();
em.getTransaction().commit();
em.clear();
ParentEntity p1 = new ParentEntity( "parent_1" );
ParentEntity p2 = new ParentEntity( "parent_2" );
Child1Entity c1_1 = new Child1Entity( "child1_1" );
Child1Entity c1_2 = new Child1Entity( "child1_2" );
Child2Entity c2_1 = new Child2Entity( "child2_1" );
Child2Entity c2_2 = new Child2Entity( "child2_2" );
// Revision 1
em.getTransaction().begin();
em.persist( p1 );
em.persist( p2 );
em.persist( c1_1 );
em.persist( c1_2 );
em.persist( c2_1 );
em.persist( c2_2 );
em.getTransaction().commit();
em.clear();
// Revision 2 - (p1: c1_1, p2: c2_1)
em.getTransaction().begin();
p1 = em.find( ParentEntity.class, p1.getId() );
p2 = em.find( ParentEntity.class, p2.getId() );
c1_1 = em.find( Child1Entity.class, c1_1.getId() );
c2_1 = em.find( Child2Entity.class, c2_1.getId() );
p1.getChildren1().add( c1_1 );
p2.getChildren2().add( c2_1 );
em.getTransaction().commit();
em.clear();
// Revision 3 - (p1: c1_1, c1_2, c2_2, p2: c1_1, c2_1)
em.getTransaction().begin();
p1 = em.find( ParentEntity.class, p1.getId() );
p2 = em.find( ParentEntity.class, p2.getId() );
c1_1 = em.find( Child1Entity.class, c1_1.getId() );
c1_2 = em.find( Child1Entity.class, c1_2.getId() );
c2_2 = em.find( Child2Entity.class, c2_2.getId() );
p1.getChildren1().add( c1_2 );
p1.getChildren2().add( c2_2 );
p2.getChildren1().add( c1_1 );
em.getTransaction().commit();
em.clear();
// Revision 4 - (p1: c1_2, c2_2, p2: c1_1, c2_1, c2_2)
em.getTransaction().begin();
p1 = em.find( ParentEntity.class, p1.getId() );
p2 = em.find( ParentEntity.class, p2.getId() );
c1_1 = em.find( Child1Entity.class, c1_1.getId() );
c2_2 = em.find( Child2Entity.class, c2_2.getId() );
p1.getChildren1().remove( c1_1 );
p2.getChildren2().add( c2_2 );
em.getTransaction().commit();
em.clear();
// Revision 5 - (p1: c2_2, p2: c1_1, c2_1)
em.getTransaction().begin();
p1 = em.find( ParentEntity.class, p1.getId() );
p2 = em.find( ParentEntity.class, p2.getId() );
c1_2 = em.find( Child1Entity.class, c1_2.getId() );
c2_2 = em.find( Child2Entity.class, c2_2.getId() );
c2_2.getParents().remove( p2 );
c1_2.getParents().remove( p1 );
em.getTransaction().commit();
em.clear();
//
p1_id = p1.getId();
p2_id = p2.getId();
c1_1_id = c1_1.getId();
c1_2_id = c1_2.getId();
c2_1_id = c2_1.getId();
c2_2_id = c2_2.getId();
Set<Number> revisionNumbers = new HashSet<Number>();
revisionNumbers.addAll( Arrays.asList( 1, 2, 3, 4, 5 ) );
revisions = AuditReaderFactory.get( em ).findRevisions(
CustomDateRevEntity.class,
revisionNumbers
);
assertEquals( 5, revisions.size() );
em.close();
}
@Test
public void testRevisionsCounts(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
assertEquals( Arrays.asList( 1, 2, 3, 4 ),
auditReader.getRevisions( ParentEntity.class, p1_id ) );
assertEquals( Arrays.asList( 1, 2, 3, 4 ),
auditReader.getRevisions( ParentEntity.class, p2_id ) );
assertEquals( Arrays.asList( 1 ),
auditReader.getRevisions( Child1Entity.class, c1_1_id ) );
assertEquals( Arrays.asList( 1, 5 ),
auditReader.getRevisions( Child1Entity.class, c1_2_id ) );
assertEquals( Arrays.asList( 1 ),
auditReader.getRevisions( Child2Entity.class, c2_1_id ) );
assertEquals( Arrays.asList( 1, 5 ),
auditReader.getRevisions( Child2Entity.class, c2_2_id ) );
} );
}
@Test
public void testAllRevEndTimeStamps(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
List<Map<String, Object>> p1RevList = getRevisions(
em,
ParentEntity.class,
p1_id
);
List<Map<String, Object>> p2RevList = getRevisions(
em,
ParentEntity.class,
p2_id
);
List<Map<String, Object>> c1_1_List = getRevisions(
em,
Child1Entity.class,
c1_1_id
);
List<Map<String, Object>> c1_2_List = getRevisions(
em,
Child1Entity.class,
c1_2_id
);
List<Map<String, Object>> c2_1_List = getRevisions(
em,
Child2Entity.class,
c2_1_id
);
List<Map<String, Object>> c2_2_List = getRevisions(
em,
Child2Entity.class,
c2_2_id
);
verifyRevEndTimeStamps( "ParentEntity: " + p1_id, p1RevList );
verifyRevEndTimeStamps( "ParentEntity: " + p2_id, p2RevList );
verifyRevEndTimeStamps( "Child1Entity: " + c1_1_id, c1_1_List );
verifyRevEndTimeStamps( "Child1Entity: " + c1_2_id, c1_2_List );
verifyRevEndTimeStamps( "Child2Entity: " + c2_1_id, c2_1_List );
verifyRevEndTimeStamps( "Child2Entity: " + c2_2_id, c2_2_List );
} );
}
@Test
public void testHistoryOfParent1(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
Child1Entity c1_1 = em.find( Child1Entity.class, c1_1_id );
Child1Entity c1_2 = em.find( Child1Entity.class, c1_2_id );
Child2Entity c2_2 = em.find( Child2Entity.class, c2_2_id );
ParentEntity rev1 = auditReader.find( ParentEntity.class, p1_id, 1 );
ParentEntity rev2 = auditReader.find( ParentEntity.class, p1_id, 2 );
ParentEntity rev3 = auditReader.find( ParentEntity.class, p1_id, 3 );
ParentEntity rev4 = auditReader.find( ParentEntity.class, p1_id, 4 );
ParentEntity rev5 = auditReader.find( ParentEntity.class, p1_id, 5 );
assertTrue( TestTools.checkCollection( rev1.getChildren1() ) );
assertTrue( TestTools.checkCollection( rev2.getChildren1(), c1_1 ) );
assertTrue( TestTools.checkCollection( rev3.getChildren1(), c1_1, c1_2 ) );
assertTrue( TestTools.checkCollection( rev4.getChildren1(), c1_2 ) );
assertTrue( TestTools.checkCollection( rev5.getChildren1() ) );
assertTrue( TestTools.checkCollection( rev1.getChildren2() ) );
assertTrue( TestTools.checkCollection( rev2.getChildren2() ) );
assertTrue( TestTools.checkCollection( rev3.getChildren2(), c2_2 ) );
assertTrue( TestTools.checkCollection( rev4.getChildren2(), c2_2 ) );
assertTrue( TestTools.checkCollection( rev5.getChildren2(), c2_2 ) );
} );
}
@Test
public void testHistoryOfParent2(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
Child1Entity c1_1 = em.find( Child1Entity.class, c1_1_id );
Child2Entity c2_1 = em.find( Child2Entity.class, c2_1_id );
Child2Entity c2_2 = em.find( Child2Entity.class, c2_2_id );
ParentEntity rev1 = auditReader.find( ParentEntity.class, p2_id, 1 );
ParentEntity rev2 = auditReader.find( ParentEntity.class, p2_id, 2 );
ParentEntity rev3 = auditReader.find( ParentEntity.class, p2_id, 3 );
ParentEntity rev4 = auditReader.find( ParentEntity.class, p2_id, 4 );
ParentEntity rev5 = auditReader.find( ParentEntity.class, p2_id, 5 );
assertTrue( TestTools.checkCollection( rev1.getChildren1() ) );
assertTrue( TestTools.checkCollection( rev2.getChildren1() ) );
assertTrue( TestTools.checkCollection( rev3.getChildren1(), c1_1 ) );
assertTrue( TestTools.checkCollection( rev4.getChildren1(), c1_1 ) );
assertTrue( TestTools.checkCollection( rev5.getChildren1(), c1_1 ) );
assertTrue( TestTools.checkCollection( rev1.getChildren2() ) );
assertTrue( TestTools.checkCollection( rev2.getChildren2(), c2_1 ) );
assertTrue( TestTools.checkCollection( rev3.getChildren2(), c2_1 ) );
assertTrue( TestTools.checkCollection( rev4.getChildren2(), c2_1, c2_2 ) );
assertTrue( TestTools.checkCollection( rev5.getChildren2(), c2_1 ) );
} );
}
@Test
public void testHistoryOfChild1_1(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
ParentEntity p1 = em.find( ParentEntity.class, p1_id );
ParentEntity p2 = em.find( ParentEntity.class, p2_id );
Child1Entity rev1 = auditReader.find( Child1Entity.class, c1_1_id, 1 );
Child1Entity rev2 = auditReader.find( Child1Entity.class, c1_1_id, 2 );
Child1Entity rev3 = auditReader.find( Child1Entity.class, c1_1_id, 3 );
Child1Entity rev4 = auditReader.find( Child1Entity.class, c1_1_id, 4 );
Child1Entity rev5 = auditReader.find( Child1Entity.class, c1_1_id, 5 );
assertTrue( TestTools.checkCollection( rev1.getParents() ) );
assertTrue( TestTools.checkCollection( rev2.getParents(), p1 ) );
assertTrue( TestTools.checkCollection( rev3.getParents(), p1, p2 ) );
assertTrue( TestTools.checkCollection( rev4.getParents(), p2 ) );
assertTrue( TestTools.checkCollection( rev5.getParents(), p2 ) );
} );
}
@Test
public void testHistoryOfChild1_2(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
ParentEntity p1 = em.find( ParentEntity.class, p1_id );
Child1Entity rev1 = auditReader.find( Child1Entity.class, c1_2_id, 1 );
Child1Entity rev2 = auditReader.find( Child1Entity.class, c1_2_id, 2 );
Child1Entity rev3 = auditReader.find( Child1Entity.class, c1_2_id, 3 );
Child1Entity rev4 = auditReader.find( Child1Entity.class, c1_2_id, 4 );
Child1Entity rev5 = auditReader.find( Child1Entity.class, c1_2_id, 5 );
assertTrue( TestTools.checkCollection( rev1.getParents() ) );
assertTrue( TestTools.checkCollection( rev2.getParents() ) );
assertTrue( TestTools.checkCollection( rev3.getParents(), p1 ) );
assertTrue( TestTools.checkCollection( rev4.getParents(), p1 ) );
assertTrue( TestTools.checkCollection( rev5.getParents() ) );
} );
}
@Test
public void testHistoryOfChild2_1(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
ParentEntity p2 = em.find( ParentEntity.class, p2_id );
Child2Entity rev1 = auditReader.find( Child2Entity.class, c2_1_id, 1 );
Child2Entity rev2 = auditReader.find( Child2Entity.class, c2_1_id, 2 );
Child2Entity rev3 = auditReader.find( Child2Entity.class, c2_1_id, 3 );
Child2Entity rev4 = auditReader.find( Child2Entity.class, c2_1_id, 4 );
Child2Entity rev5 = auditReader.find( Child2Entity.class, c2_1_id, 5 );
assertTrue( TestTools.checkCollection( rev1.getParents() ) );
assertTrue( TestTools.checkCollection( rev2.getParents(), p2 ) );
assertTrue( TestTools.checkCollection( rev3.getParents(), p2 ) );
assertTrue( TestTools.checkCollection( rev4.getParents(), p2 ) );
assertTrue( TestTools.checkCollection( rev5.getParents(), p2 ) );
} );
}
@Test
public void testHistoryOfChild2_2(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
ParentEntity p1 = em.find( ParentEntity.class, p1_id );
ParentEntity p2 = em.find( ParentEntity.class, p2_id );
Child2Entity rev1 = auditReader.find( Child2Entity.class, c2_2_id, 1 );
Child2Entity rev2 = auditReader.find( Child2Entity.class, c2_2_id, 2 );
Child2Entity rev3 = auditReader.find( Child2Entity.class, c2_2_id, 3 );
Child2Entity rev4 = auditReader.find( Child2Entity.class, c2_2_id, 4 );
Child2Entity rev5 = auditReader.find( Child2Entity.class, c2_2_id, 5 );
assertTrue( TestTools.checkCollection( rev1.getParents() ) );
assertTrue( TestTools.checkCollection( rev2.getParents() ) );
assertTrue( TestTools.checkCollection( rev3.getParents(), p1 ) );
assertTrue( TestTools.checkCollection( rev4.getParents(), p1, p2 ) );
assertTrue( TestTools.checkCollection( rev5.getParents(), p1 ) );
} );
}
private List<Map<String, Object>> getRevisions(
EntityManager em, Class<?> originalEntityClazz, Integer originalEntityId) {
// Build the query:
// select auditEntity from
// org.hibernate.orm.test.envers.entities.manytomany.sametable.ParentEntity_AUD
// auditEntity where auditEntity.originalId.id = :originalEntityId
StringBuilder builder = new StringBuilder( "select auditEntity from " );
builder.append( originalEntityClazz.getName() )
.append( "_AUD auditEntity" );
builder.append( " where auditEntity.originalId.id = :originalEntityId" );
Query qry = em.createQuery( builder.toString() );
qry.setParameter( "originalEntityId", originalEntityId );
@SuppressWarnings("unchecked")
List<Map<String, Object>> resultList = qry.getResultList();
return resultList;
}
private void verifyRevEndTimeStamps(
String debugInfo,
List<Map<String, Object>> revisionEntities) {
for ( Map<String, Object> revisionEntity : revisionEntities ) {
Date revendTimestamp = (Date) revisionEntity
.get( revendTimestampColumName );
CustomDateRevEntity revEnd = (CustomDateRevEntity) revisionEntity
.get( "REVEND" );
if ( revendTimestamp == null ) {
assertNull( revEnd );
}
else {
assertEquals( revendTimestamp.getTime(), revEnd.getDateTimestamp().getTime() );
}
}
}
}
| ValidityAuditStrategyRevEndTestCustomRevEnt |
java | apache__thrift | lib/java/src/main/java/org/apache/thrift/server/TSaslNonblockingServer.java | {
"start": 12249,
"end": 12807
} | class ____ implements Runnable {
private final NonblockingSaslHandler statemachine;
private Computation(NonblockingSaslHandler statemachine) {
this.statemachine = statemachine;
}
@Override
public void run() {
try {
while (!statemachine.isCurrentPhaseDone()) {
statemachine.runCurrentPhase();
}
stateTransitions.add(statemachine);
wakeup();
} catch (Throwable e) {
LOGGER.error("Damn it!", e);
}
}
}
}
private | Computation |
java | quarkusio__quarkus | integration-tests/rest-client-reactive-http2/src/main/java/io/quarkus/it/rest/client/http2/multipart/MultipartClient.java | {
"start": 7136,
"end": 7387
} | class ____ {
@FormParam("file")
@PartType(MediaType.APPLICATION_OCTET_STREAM)
public File file;
@FormParam("fileName")
@PartType(MediaType.TEXT_PLAIN)
public String fileName;
}
| WithFileAsBinaryFile |
java | elastic__elasticsearch | x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/expression/function/scalar/string/EndsWith.java | {
"start": 1052,
"end": 2076
} | class ____ extends BinaryComparisonCaseInsensitiveFunction {
public EndsWith(Source source, Expression input, Expression pattern, boolean caseInsensitive) {
super(source, input, pattern, caseInsensitive);
}
public Expression input() {
return left();
}
public Expression pattern() {
return right();
}
@Override
protected Pipe makePipe() {
return new EndsWithFunctionPipe(source(), this, Expressions.pipe(left()), Expressions.pipe(right()), isCaseInsensitive());
}
@Override
public Object fold() {
return doProcess(left().fold(), right().fold(), isCaseInsensitive());
}
@Override
protected NodeInfo<? extends Expression> info() {
return NodeInfo.create(this, EndsWith::new, left(), right(), isCaseInsensitive());
}
@Override
public Expression replaceChildren(List<Expression> newChildren) {
return new EndsWith(source(), newChildren.get(0), newChildren.get(1), isCaseInsensitive());
}
}
| EndsWith |
java | playframework__playframework | documentation/manual/working/javaGuide/main/forms/code/javaguide/forms/JavaForms.java | {
"start": 14101,
"end": 15365
} | class ____ extends MockJavaAction {
private final MessagesApi messagesApi;
PartialFormSignupController(
JavaHandlerComponents javaHandlerComponents, MessagesApi messagesApi) {
super(javaHandlerComponents);
this.messagesApi = messagesApi;
}
public Result index(Http.Request request) {
// #partial-validate-signup
Form<PartialUserForm> form =
formFactory().form(PartialUserForm.class, SignUpCheck.class).bindFromRequest(request);
// #partial-validate-signup
Messages messages = this.messagesApi.preferred(request);
if (form.hasErrors()) {
return badRequest(javaguide.forms.html.view.render(form, messages));
} else {
PartialUserForm user = form.get();
return ok("Got user " + user);
}
}
}
@Test
public void partialFormLoginValidation() {
Result result =
call(
new PartialFormLoginController(
instanceOf(JavaHandlerComponents.class), instanceOf(MessagesApi.class)),
fakeRequest("POST", "/").bodyForm(ImmutableMap.of()),
mat);
// Run it through the template
assertThat(contentAsString(result)).contains("This field is required");
}
public | PartialFormSignupController |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/StateWithExecutionGraphTest.java | {
"start": 8281,
"end": 10109
} | class ____ extends StateWithExecutionGraph {
private final CompletableFuture<JobStatus> globallyTerminalStateFuture =
new CompletableFuture<>();
TestingStateWithExecutionGraph(
Context context,
ExecutionGraph executionGraph,
ExecutionGraphHandler executionGraphHandler,
OperatorCoordinatorHandler operatorCoordinatorHandler,
Logger logger,
ClassLoader userCodeClassLoader) {
super(
context,
executionGraph,
executionGraphHandler,
operatorCoordinatorHandler,
logger,
userCodeClassLoader,
new ArrayList<>());
}
public CompletableFuture<JobStatus> getGloballyTerminalStateFuture() {
return globallyTerminalStateFuture;
}
@Override
public void cancel() {}
@Override
public JobStatus getJobStatus() {
return getExecutionGraph().getState();
}
@Override
void onFailure(Throwable cause, CompletableFuture<Map<String, String>> failureLabels) {}
@Override
void onGloballyTerminalState(JobStatus globallyTerminalState) {
globallyTerminalStateFuture.complete(globallyTerminalState);
}
@Override
public void handleGlobalFailure(
Throwable cause, CompletableFuture<Map<String, String>> failureLabels) {}
@Override
boolean updateTaskExecutionState(
TaskExecutionStateTransition taskExecutionStateTransition,
CompletableFuture<Map<String, String>> failureLabels) {
return false;
}
}
}
| TestingStateWithExecutionGraph |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/ClassUtils.java | {
"start": 19457,
"end": 19785
} | class ____ %s", name));
}
if (dim < 1) {
return className;
}
className = className.substring(dim);
if (className.startsWith("L")) {
if (!className.endsWith(";") || className.length() < 3) {
throw new IllegalArgumentException(String.format("Invalid | name |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/comment/jpa/CommentsTest.java | {
"start": 1135,
"end": 2775
} | class ____ {
private static final String TABLE_NAME = "TestEntity";
private static final String SEC_TABLE_NAME = "TestEntity2";
private static final String TABLE_COMMENT = "I am a table";
private static final String SEC_TABLE_COMMENT = "I am a table too";
@Test
@JiraKey(value = "HHH-4369")
public void testComments() {
StandardServiceRegistry ssr = ServiceRegistryUtil.serviceRegistry();
Metadata metadata = new MetadataSources(ssr).addAnnotatedClass(TestEntity.class).buildMetadata();
org.hibernate.mapping.Table table = StreamSupport.stream(metadata.getDatabase().getNamespaces().spliterator(), false)
.flatMap(namespace -> namespace.getTables().stream()).filter(t -> t.getName().equals(TABLE_NAME))
.findFirst().orElse(null);
assertThat(table.getComment(), is(TABLE_COMMENT));
assertThat(table.getColumns().size(), is(6));
for (org.hibernate.mapping.Column col : table.getColumns()) {
assertThat(col.getComment(), is("I am " + col.getName()));
}
table = StreamSupport.stream(metadata.getDatabase().getNamespaces().spliterator(), false)
.flatMap(namespace -> namespace.getTables().stream()).filter(t -> t.getName().equals(SEC_TABLE_NAME))
.findFirst().orElse(null);
assertThat(table.getComment(), is(SEC_TABLE_COMMENT));
assertThat(table.getColumns().size(), is(2));
long count = table.getColumns().stream().filter(col -> "This is a date".equalsIgnoreCase(col.getComment())).count();
assertThat(count, is(1L));
}
@Entity(name = "Person")
@Table(name = TABLE_NAME, comment = TABLE_COMMENT)
@SecondaryTable(name = SEC_TABLE_NAME, comment = SEC_TABLE_COMMENT)
public static | CommentsTest |
java | apache__maven | impl/maven-impl/src/test/java/org/apache/maven/impl/DefaultSourceRootTest.java | {
"start": 1798,
"end": 11349
} | class ____ {
@Mock
private Session session;
@BeforeEach
public void setup() {
LenientStubber stub = Mockito.lenient();
stub.when(session.requireProjectScope(eq("main"))).thenReturn(ProjectScope.MAIN);
stub.when(session.requireProjectScope(eq("test"))).thenReturn(ProjectScope.TEST);
stub.when(session.requireLanguage(eq("java"))).thenReturn(Language.JAVA_FAMILY);
stub.when(session.requireLanguage(eq("resources"))).thenReturn(Language.RESOURCES);
}
/**
* Returns the output directory relative to the base directory.
*/
private static Function<ProjectScope, String> outputDirectory() {
return (scope) -> {
if (scope == ProjectScope.MAIN) {
return "target/classes";
} else if (scope == ProjectScope.TEST) {
return "target/test-classes";
} else {
return "target";
}
};
}
@Test
void testMainJavaDirectory() {
var source = DefaultSourceRoot.fromModel(
session,
Path.of("myproject"),
outputDirectory(),
Source.newBuilder().build());
assertTrue(source.module().isEmpty());
assertEquals(ProjectScope.MAIN, source.scope());
assertEquals(Language.JAVA_FAMILY, source.language());
assertEquals(Path.of("myproject", "src", "main", "java"), source.directory());
assertTrue(source.targetVersion().isEmpty());
}
@Test
void testTestJavaDirectory() {
var source = DefaultSourceRoot.fromModel(
session,
Path.of("myproject"),
outputDirectory(),
Source.newBuilder().scope("test").build());
assertTrue(source.module().isEmpty());
assertEquals(ProjectScope.TEST, source.scope());
assertEquals(Language.JAVA_FAMILY, source.language());
assertEquals(Path.of("myproject", "src", "test", "java"), source.directory());
assertTrue(source.targetVersion().isEmpty());
}
@Test
void testTestResourceDirectory() {
var source = DefaultSourceRoot.fromModel(
session,
Path.of("myproject"),
outputDirectory(),
Source.newBuilder().scope("test").lang("resources").build());
assertTrue(source.module().isEmpty());
assertEquals(ProjectScope.TEST, source.scope());
assertEquals(Language.RESOURCES, source.language());
assertEquals(Path.of("myproject", "src", "test", "resources"), source.directory());
assertTrue(source.targetVersion().isEmpty());
}
@Test
void testModuleMainDirectory() {
var source = DefaultSourceRoot.fromModel(
session,
Path.of("myproject"),
outputDirectory(),
Source.newBuilder().module("org.foo.bar").build());
assertEquals("org.foo.bar", source.module().orElseThrow());
assertEquals(ProjectScope.MAIN, source.scope());
assertEquals(Language.JAVA_FAMILY, source.language());
assertEquals(Path.of("myproject", "src", "org.foo.bar", "main", "java"), source.directory());
assertTrue(source.targetVersion().isEmpty());
}
@Test
void testModuleTestDirectory() {
var source = DefaultSourceRoot.fromModel(
session,
Path.of("myproject"),
outputDirectory(),
Source.newBuilder().module("org.foo.bar").scope("test").build());
assertEquals("org.foo.bar", source.module().orElseThrow());
assertEquals(ProjectScope.TEST, source.scope());
assertEquals(Language.JAVA_FAMILY, source.language());
assertEquals(Path.of("myproject", "src", "org.foo.bar", "test", "java"), source.directory());
assertTrue(source.targetVersion().isEmpty());
}
/**
* Tests that relative target paths are stored as relative paths.
*/
@Test
void testRelativeMainTargetPath() {
var source = DefaultSourceRoot.fromModel(
session,
Path.of("myproject"),
outputDirectory(),
Source.newBuilder().targetPath("user-output").build());
assertEquals(ProjectScope.MAIN, source.scope());
assertEquals(Language.JAVA_FAMILY, source.language());
assertEquals(Path.of("user-output"), source.targetPath().orElseThrow());
}
/**
* Tests that relative target paths are stored as relative paths.
*/
@Test
void testRelativeTestTargetPath() {
var source = DefaultSourceRoot.fromModel(
session,
Path.of("myproject"),
outputDirectory(),
Source.newBuilder().targetPath("user-output").scope("test").build());
assertEquals(ProjectScope.TEST, source.scope());
assertEquals(Language.JAVA_FAMILY, source.language());
assertEquals(Path.of("user-output"), source.targetPath().orElseThrow());
}
/*MNG-11062*/
@Test
void testExtractsTargetPathFromResource() {
// Test the Resource constructor with relative targetPath
// targetPath should be kept as relative path
Resource resource = Resource.newBuilder()
.directory("src/test/resources")
.targetPath("test-output")
.build();
DefaultSourceRoot sourceRoot = new DefaultSourceRoot(Path.of("myproject"), ProjectScope.TEST, resource);
Optional<Path> targetPath = sourceRoot.targetPath();
assertTrue(targetPath.isPresent(), "targetPath should be present");
assertEquals(Path.of("test-output"), targetPath.get(), "targetPath should be relative to output directory");
assertEquals(Path.of("myproject", "src", "test", "resources"), sourceRoot.directory());
assertEquals(ProjectScope.TEST, sourceRoot.scope());
assertEquals(Language.RESOURCES, sourceRoot.language());
}
/*MNG-11062*/
@Test
void testHandlesNullTargetPathFromResource() {
// Test null targetPath handling
Resource resource =
Resource.newBuilder().directory("src/test/resources").build();
// targetPath is null by default
DefaultSourceRoot sourceRoot = new DefaultSourceRoot(Path.of("myproject"), ProjectScope.TEST, resource);
Optional<Path> targetPath = sourceRoot.targetPath();
assertFalse(targetPath.isPresent(), "targetPath should be empty when null");
}
/*MNG-11062*/
@Test
void testHandlesEmptyTargetPathFromResource() {
// Test empty string targetPath
Resource resource = Resource.newBuilder()
.directory("src/test/resources")
.targetPath("")
.build();
DefaultSourceRoot sourceRoot = new DefaultSourceRoot(Path.of("myproject"), ProjectScope.TEST, resource);
Optional<Path> targetPath = sourceRoot.targetPath();
assertFalse(targetPath.isPresent(), "targetPath should be empty for empty string");
}
/*MNG-11062*/
@Test
void testHandlesPropertyPlaceholderInTargetPath() {
// Test property placeholder preservation
Resource resource = Resource.newBuilder()
.directory("src/test/resources")
.targetPath("${project.build.directory}/custom")
.build();
DefaultSourceRoot sourceRoot = new DefaultSourceRoot(Path.of("myproject"), ProjectScope.MAIN, resource);
Optional<Path> targetPath = sourceRoot.targetPath();
assertTrue(targetPath.isPresent(), "Property placeholder targetPath should be present");
assertEquals(
Path.of("${project.build.directory}/custom"),
targetPath.get(),
"Property placeholder should be kept as-is (relative path)");
}
/*MNG-11062*/
@Test
void testResourceConstructorRequiresNonNullDirectory() {
// Test that null directory throws exception
Resource resource = Resource.newBuilder().build();
// directory is null by default
assertThrows(
IllegalArgumentException.class,
() -> new DefaultSourceRoot(Path.of("myproject"), ProjectScope.TEST, resource),
"Should throw exception for null directory");
}
/*MNG-11062*/
@Test
void testResourceConstructorPreservesOtherProperties() {
// Test that other Resource properties are correctly preserved
Resource resource = Resource.newBuilder()
.directory("src/test/resources")
.targetPath("test-classes")
.filtering("true")
.includes(List.of("*.properties"))
.excludes(List.of("*.tmp"))
.build();
DefaultSourceRoot sourceRoot = new DefaultSourceRoot(Path.of("myproject"), ProjectScope.TEST, resource);
// Verify all properties are preserved
assertEquals(
Path.of("test-classes"),
sourceRoot.targetPath().orElseThrow(),
"targetPath should be relative to output directory");
assertTrue(sourceRoot.stringFiltering(), "Filtering should be true");
assertEquals(1, sourceRoot.includes().size());
assertTrue(sourceRoot.includes().contains("*.properties"));
assertEquals(1, sourceRoot.excludes().size());
assertTrue(sourceRoot.excludes().contains("*.tmp"));
}
}
| DefaultSourceRootTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/fetch/subselect/SubselectOneToManyTest.java | {
"start": 1536,
"end": 4644
} | class ____ {
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final Parent parent1 = new Parent();
final Child child1 = new Child( parent1, "a" );
final Child child2 = new Child( parent1, "b" );
final Child child3 = new Child( parent1, "c" );
parent1.getChildren().addAll( List.of( child1, child2, child3 ) );
session.persist( parent1 );
final GrandParent grandParent = new GrandParent( "Luigi" );
final Parent parent2 = new Parent( grandParent );
final Child child4 = new Child( parent2, "d" );
final Child child5 = new Child( parent2, "e" );
parent2.getChildren().addAll( List.of( child4, child5 ) );
session.persist( grandParent );
session.persist( parent2 );
} );
}
@AfterAll
public void tearDown(SessionFactoryScope scope) {
scope.inTransaction( session -> {
session.createMutationQuery( "delete from Child" ).executeUpdate();
session.createMutationQuery( "delete from Parent" ).executeUpdate();
session.createMutationQuery( "delete from GrandParent" ).executeUpdate();
} );
}
@Test
public void testIsNull(SessionFactoryScope scope) {
final SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
statementInspector.clear();
scope.inTransaction( session -> {
final CriteriaBuilder cb = session.getCriteriaBuilder();
final CriteriaQuery<Parent> query = cb.createQuery( Parent.class );
final Root<Parent> root = query.from( Parent.class );
query.select( root ).where( cb.isNull( root.get( "grandParent" ) ) );
final Parent parent = session.createQuery( query ).getSingleResult();
assertThat( parent.getId() ).isEqualTo( 1L );
assertThat( parent.getGrandParent() ).isNull();
assertThat( parent.getChildren() ).hasSize( 3 );
statementInspector.assertExecutedCount( 2 ); // 1 query for parent, 1 for children
statementInspector.assertNumberOfOccurrenceInQuery( 0, "join", 0 );
statementInspector.assertNumberOfOccurrenceInQuery( 1, "join", 0 );
} );
}
@Test
public void testShouldJoin(SessionFactoryScope scope) {
final SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
statementInspector.clear();
scope.inTransaction( session -> {
final CriteriaBuilder cb = session.getCriteriaBuilder();
final CriteriaQuery<Parent> query = cb.createQuery( Parent.class );
final Root<Parent> root = query.from( Parent.class );
query.select( root ).where( cb.equal( root.get( "grandParent" ).get( "name" ), "Luigi" ) );
final Parent parent = session.createQuery( query ).getSingleResult();
assertThat( parent.getId() ).isEqualTo( 2L );
assertThat( parent.getGrandParent().getName() ).isEqualTo( "Luigi" );
assertThat( parent.getChildren() ).hasSize( 2 );
statementInspector.assertExecutedCount( 3 ); // 1 query for parent, 1 for grandparent, 1 for children
statementInspector.assertNumberOfOccurrenceInQuery( 0, "join", 1 );
statementInspector.assertNumberOfOccurrenceInQuery( 2, "join", 0 );
} );
}
@Entity( name = "GrandParent" )
public static | SubselectOneToManyTest |
java | apache__kafka | streams/examples/src/main/java/org/apache/kafka/streams/examples/pageview/PageViewTypedDemo.java | {
"start": 5594,
"end": 6262
} | interface ____ registering types that can be de/serialized with {@link JSONSerde}.
*/
@SuppressWarnings("DefaultAnnotationParam") // being explicit for the example
@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "_t")
@JsonSubTypes({
@JsonSubTypes.Type(value = PageView.class, name = "pv"),
@JsonSubTypes.Type(value = UserProfile.class, name = "up"),
@JsonSubTypes.Type(value = PageViewByRegion.class, name = "pvbr"),
@JsonSubTypes.Type(value = WindowedPageViewByRegion.class, name = "wpvbr"),
@JsonSubTypes.Type(value = RegionCount.class, name = "rc")
})
public | for |
java | apache__rocketmq | tools/src/main/java/org/apache/rocketmq/tools/command/auth/ListUserSubCommand.java | {
"start": 1546,
"end": 4855
} | class ____ implements SubCommand {
private static final String FORMAT = "%-16s %-22s %-22s %-22s%n";
@Override
public String commandName() {
return "listUser";
}
@Override
public String commandDesc() {
return "List user from cluster.";
}
@Override
public Options buildCommandlineOptions(Options options) {
OptionGroup optionGroup = new OptionGroup();
Option opt = new Option("b", "brokerAddr", true, "list user for which broker");
optionGroup.addOption(opt);
opt = new Option("c", "clusterName", true, "list user for specified cluster");
optionGroup.addOption(opt);
optionGroup.setRequired(true);
options.addOptionGroup(optionGroup);
opt = new Option("f", "filter", true, "the filter to list users");
opt.setRequired(false);
options.addOption(opt);
return options;
}
@Override
public void execute(CommandLine commandLine, Options options,
RPCHook rpcHook) throws SubCommandException {
DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook);
defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis()));
try {
String filter = StringUtils.trim(commandLine.getOptionValue('f'));
if (commandLine.hasOption('b')) {
String addr = StringUtils.trim(commandLine.getOptionValue('b'));
defaultMQAdminExt.start();
List<UserInfo> userInfos = defaultMQAdminExt.listUser(addr, filter);
if (CollectionUtils.isNotEmpty(userInfos)) {
printUsers(userInfos);
}
return;
} else if (commandLine.hasOption('c')) {
String clusterName = StringUtils.trim(commandLine.getOptionValue('c'));
defaultMQAdminExt.start();
Set<String> masterSet =
CommandUtil.fetchMasterAddrByClusterName(defaultMQAdminExt, clusterName);
if (CollectionUtils.isEmpty(masterSet)) {
throw new SubCommandException(this.getClass().getSimpleName() + " command failed, there is no broker in cluster.");
}
for (String masterAddr : masterSet) {
List<UserInfo> userInfos = defaultMQAdminExt.listUser(masterAddr, filter);
if (CollectionUtils.isNotEmpty(userInfos)) {
printUsers(userInfos);
System.out.printf("get user from %s success.%n", masterAddr);
break;
}
}
return;
}
ServerUtil.printCommandLineHelp("mqadmin " + this.commandName(), options);
} catch (Exception e) {
throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e);
} finally {
defaultMQAdminExt.shutdown();
}
}
private void printUsers(List<UserInfo> users) {
System.out.printf(FORMAT, "#UserName", "#Password", "#UserType", "#UserStatus");
users.forEach(user -> System.out.printf(FORMAT, user.getUsername(), user.getPassword(), user.getUserType(), user.getUserStatus()));
}
}
| ListUserSubCommand |
java | elastic__elasticsearch | modules/apm/src/test/java/org/elasticsearch/telemetry/apm/RecordingOtelMeter.java | {
"start": 21092,
"end": 21894
} | class ____ extends AbstractBuilder implements LongHistogramBuilder {
RecordingLongHistogramBuilder(AbstractBuilder other) {
super(other);
}
@Override
public LongHistogramBuilder setDescription(String description) {
innerSetDescription(description);
return this;
}
@Override
public LongHistogramBuilder setUnit(String unit) {
innerSetUnit(unit);
return this;
}
@Override
public LongHistogram build() {
LongHistogramRecorder histogram = new LongHistogramRecorder(name);
recorder.register(histogram, histogram.getInstrument(), name, description, unit);
return histogram;
}
}
private | RecordingLongHistogramBuilder |
java | google__auto | value/src/it/functional/src/test/java/com/google/auto/value/AutoValueTest.java | {
"start": 27839,
"end": 28500
} | class ____<E> extends ArrayList<E> implements Comparable<ComparableList<E>> {
private static final long serialVersionUID = 1L;
@Override
public int compareTo(ComparableList<E> list) {
throw new UnsupportedOperationException();
}
}
ComparableList<String> emptyList = new ComparableList<String>();
GenericClassHairyBounds<ComparableList<String>, String> instance =
GenericClassHairyBounds.create(emptyList, ImmutableMap.of(emptyList, "23"));
assertEquals(instance, instance);
assertEquals(emptyList, instance.key());
assertEquals(ImmutableMap.of(emptyList, "23"), instance.map());
}
| ComparableList |
java | google__dagger | hilt-core/main/java/dagger/hilt/EntryPoints.java | {
"start": 1326,
"end": 1518
} | interface ____ is given.
*
* @param component The Hilt-generated component instance. For convenience, also takes component
* manager instances as well.
* @param entryPoint The | that |
java | hibernate__hibernate-orm | hibernate-agroal/src/test/java/org/hibernate/test/agroal/AgroalTransactionIsolationConfigTest.java | {
"start": 1026,
"end": 1291
} | class ____ extends BaseTransactionIsolationConfigTest {
@Override
protected ConnectionProvider getConnectionProviderUnderTest(ServiceRegistryScope registryScope) {
return new GradleParallelTestingAgroalConnectionProvider();
}
}
| AgroalTransactionIsolationConfigTest |
java | grpc__grpc-java | api/src/main/java/io/grpc/ServiceProviders.java | {
"start": 2391,
"end": 2996
} | class ____ to
// get a reliable result.
Collections.sort(list, Collections.reverseOrder(new Comparator<T>() {
@Override
public int compare(T f1, T f2) {
int pd = priorityAccessor.getPriority(f1) - priorityAccessor.getPriority(f2);
if (pd != 0) {
return pd;
}
return f1.getClass().getName().compareTo(f2.getClass().getName());
}
}));
return Collections.unmodifiableList(list);
}
/**
* Returns true if the {@link ClassLoader} is for android.
*/
static boolean isAndroid(ClassLoader cl) {
try {
// Specify a | names |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java | {
"start": 31217,
"end": 32622
} | class ____ extends OpenFileStats {
// Operation types
static final String OP_APPEND_NAME = "append";
public static final String APPEND_NEW_BLK = "-appendNewBlk";
static final String OP_APPEND_USAGE =
"-op " + OP_APPEND_NAME + OP_USAGE_ARGS + " [" + APPEND_NEW_BLK + ']';
private boolean appendNewBlk = false;
AppendFileStats(List<String> args) {
super(args);
}
@Override
String getOpName() {
return OP_APPEND_NAME;
}
@Override
void parseArguments(List<String> args) {
appendNewBlk = args.contains(APPEND_NEW_BLK);
if (this.appendNewBlk) {
args.remove(APPEND_NEW_BLK);
}
super.parseArguments(args);
}
@Override
long executeOp(int daemonId, int inputIdx, String ignore)
throws IOException {
long start = Time.now();
String src = fileNames[daemonId][inputIdx];
EnumSetWritable<CreateFlag> enumSet = null;
if (appendNewBlk) {
enumSet = new EnumSetWritable<>(EnumSet.of(CreateFlag.NEW_BLOCK));
} else {
enumSet = new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND));
}
clientProto.append(src, "TestClient", enumSet);
long end = Time.now();
return end - start;
}
}
/**
* List file status statistics.
*
* Measure how many get-file-status calls the name-node can handle per second.
*/
| AppendFileStats |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java | {
"start": 101916,
"end": 104478
} | class
____ (value instanceof Tuple) {
Tuple t = (Tuple) value;
int numFields = t.getArity();
if (numFields != countFieldsInClass(value.getClass())) {
// not a tuple since it has more fields.
return analyzePojo(
value.getClass(),
new ArrayList<>(),
null,
null); // we immediately call analyze Pojo here, because
// there is currently no other type that can handle such a class.
}
TypeInformation<?>[] infos = new TypeInformation[numFields];
for (int i = 0; i < numFields; i++) {
Object field = t.getField(i);
if (field == null) {
throw new InvalidTypesException(
"Automatic type extraction is not possible on candidates with null values. "
+ "Please specify the types directly.");
}
infos[i] = privateGetForObject(field);
}
return new TupleTypeInfo(value.getClass(), infos);
} else if (value instanceof Row) {
Row row = (Row) value;
int arity = row.getArity();
for (int i = 0; i < arity; i++) {
if (row.getField(i) == null) {
LOG.warn(
"Cannot extract type of Row field, because of Row field["
+ i
+ "] is null. "
+ "Should define RowTypeInfo explicitly.");
return privateGetForClass((Class<X>) value.getClass(), new ArrayList<>());
}
}
TypeInformation<?>[] typeArray = new TypeInformation<?>[arity];
for (int i = 0; i < arity; i++) {
typeArray[i] = TypeExtractor.getForObject(row.getField(i));
}
return (TypeInformation<X>) new RowTypeInfo(typeArray);
} else {
return privateGetForClass((Class<X>) value.getClass(), new ArrayList<>());
}
}
// ------------------------------------------------------------------------
// Utilities to handle Hadoop's 'Writable' type via reflection
// ------------------------------------------------------------------------
// visible for testing
static boolean isHadoopWritable(Class<?> typeClass) {
// check if this is directly the writable | if |
java | apache__flink | flink-core/src/main/java/org/apache/flink/types/parser/LongParser.java | {
"start": 1023,
"end": 6449
} | class ____ extends FieldParser<Long> {
private long result;
@Override
public int parseField(byte[] bytes, int startPos, int limit, byte[] delimiter, Long reusable) {
if (startPos == limit) {
setErrorState(ParseErrorState.EMPTY_COLUMN);
return -1;
}
long val = 0;
boolean neg = false;
final int delimLimit = limit - delimiter.length + 1;
if (bytes[startPos] == '-') {
neg = true;
startPos++;
// check for empty field with only the sign
if (startPos == limit
|| (startPos < delimLimit && delimiterNext(bytes, startPos, delimiter))) {
setErrorState(ParseErrorState.NUMERIC_VALUE_ORPHAN_SIGN);
return -1;
}
}
for (int i = startPos; i < limit; i++) {
if (i < delimLimit && delimiterNext(bytes, i, delimiter)) {
if (i == startPos) {
setErrorState(ParseErrorState.EMPTY_COLUMN);
return -1;
}
this.result = neg ? -val : val;
return i + delimiter.length;
}
if (bytes[i] < 48 || bytes[i] > 57) {
setErrorState(ParseErrorState.NUMERIC_VALUE_ILLEGAL_CHARACTER);
return -1;
}
val *= 10;
val += bytes[i] - 48;
// check for overflow / underflow
if (val < 0) {
// this is an overflow/underflow, unless we hit exactly the Long.MIN_VALUE
if (neg && val == Long.MIN_VALUE) {
this.result = Long.MIN_VALUE;
if (i + 1 >= limit) {
return limit;
} else if (i + 1 < delimLimit && delimiterNext(bytes, i + 1, delimiter)) {
return i + 1 + delimiter.length;
} else {
setErrorState(ParseErrorState.NUMERIC_VALUE_OVERFLOW_UNDERFLOW);
return -1;
}
} else {
setErrorState(ParseErrorState.NUMERIC_VALUE_OVERFLOW_UNDERFLOW);
return -1;
}
}
}
this.result = neg ? -val : val;
return limit;
}
@Override
public Long createValue() {
return Long.MIN_VALUE;
}
@Override
public Long getLastResult() {
return Long.valueOf(this.result);
}
/**
* Static utility to parse a field of type long from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
* @param length The length of the byte sequence (counting from the offset).
* @return The parsed value.
* @throws NumberFormatException Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final long parseField(byte[] bytes, int startPos, int length) {
return parseField(bytes, startPos, length, (char) 0xffff);
}
/**
* Static utility to parse a field of type long from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
* @param length The length of the byte sequence (counting from the offset).
* @param delimiter The delimiter that terminates the field.
* @return The parsed value.
* @throws NumberFormatException Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final long parseField(byte[] bytes, int startPos, int length, char delimiter) {
long val = 0;
boolean neg = false;
if (bytes[startPos] == delimiter) {
throw new NumberFormatException("Empty field.");
}
if (bytes[startPos] == '-') {
neg = true;
startPos++;
length--;
if (length == 0 || bytes[startPos] == delimiter) {
throw new NumberFormatException("Orphaned minus sign.");
}
}
for (; length > 0; startPos++, length--) {
if (bytes[startPos] == delimiter) {
return neg ? -val : val;
}
if (bytes[startPos] < 48 || bytes[startPos] > 57) {
throw new NumberFormatException("Invalid character.");
}
val *= 10;
val += bytes[startPos] - 48;
// check for overflow / underflow
if (val < 0) {
// this is an overflow/underflow, unless we hit exactly the Long.MIN_VALUE
if (neg && val == Long.MIN_VALUE) {
if (length == 1 || bytes[startPos + 1] == delimiter) {
return Long.MIN_VALUE;
} else {
throw new NumberFormatException("value overflow");
}
} else {
throw new NumberFormatException("value overflow");
}
}
}
return neg ? -val : val;
}
}
| LongParser |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/lucene/store/BytesReferenceIndexInput.java | {
"start": 776,
"end": 4458
} | class ____ extends IndexInput {
private final BytesReference bytesReference;
private int filePointer;
private StreamInput streamInput;
public BytesReferenceIndexInput(String resourceDescription, BytesReference bytesReference) {
this(resourceDescription, bytesReference, 0);
}
private BytesReferenceIndexInput(String resourceDescription, BytesReference bytesReference, int filePointer) {
super(resourceDescription);
this.bytesReference = bytesReference;
this.filePointer = filePointer;
}
@Override
public void close() throws IOException {}
@Override
public long getFilePointer() {
return filePointer;
}
private StreamInput getOrOpenStreamInput() throws IOException {
if (streamInput == null) {
streamInput = bytesReference.slice(filePointer, bytesReference.length() - filePointer).streamInput();
}
return streamInput;
}
@Override
public void seek(long longPos) throws IOException {
if (longPos < 0) {
throw new IllegalArgumentException("Seeking to negative position: " + longPos);
} else if (longPos > bytesReference.length()) {
throw new EOFException("seek past EOF");
}
var pos = (int) longPos;
if (pos < filePointer) {
streamInput = null;
} else if (streamInput != null) {
final var toSkip = pos - filePointer;
final var skipped = streamInput.skip(toSkip);
assert skipped == toSkip;
}
filePointer = pos;
}
@Override
public long length() {
return bytesReference.length();
}
@Override
public IndexInput slice(String sliceDescription, long offset, long length) throws IOException {
if (offset >= 0L && length >= 0L && offset + length <= bytesReference.length()) {
return new BytesReferenceIndexInput(sliceDescription, bytesReference.slice((int) offset, (int) length));
} else {
throw new IllegalArgumentException(
Strings.format(
"slice() %s out of bounds: offset=%d,length=%d,fileLength=%d: %s",
sliceDescription,
offset,
length,
bytesReference.length(),
this
)
);
}
}
@Override
public byte readByte() throws IOException {
try {
return getOrOpenStreamInput().readByte();
} finally {
filePointer += 1;
}
}
@Override
public void readBytes(byte[] b, int offset, int len) throws IOException {
getOrOpenStreamInput().readBytes(b, offset, len);
filePointer += len;
}
@Override
public short readShort() throws IOException {
try {
return Short.reverseBytes(getOrOpenStreamInput().readShort());
} finally {
filePointer += Short.BYTES;
}
}
@Override
public int readInt() throws IOException {
try {
return Integer.reverseBytes(getOrOpenStreamInput().readInt());
} finally {
filePointer += Integer.BYTES;
}
}
@Override
public long readLong() throws IOException {
try {
return Long.reverseBytes(getOrOpenStreamInput().readLong());
} finally {
filePointer += Long.BYTES;
}
}
@SuppressWarnings("MethodDoesntCallSuperMethod")
@Override
public IndexInput clone() {
return new BytesReferenceIndexInput(toString(), bytesReference, filePointer);
}
}
| BytesReferenceIndexInput |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/Sinks.java | {
"start": 56181,
"end": 60398
} | interface ____<T> extends Empty<T> {
/**
* Try to complete the {@link Mono} with an element, generating an {@link Subscriber#onNext(Object) onNext} signal
* immediately followed by an {@link Subscriber#onComplete() onComplete} signal. A {@code null} value
* will only trigger the onComplete. The result of the attempt is represented as an {@link EmitResult},
* which possibly indicates error cases.
* <p>
* See the list of failure {@link EmitResult} in {@link #emitValue(Object, EmitFailureHandler)} javadoc for an
* example of how each of these can be dealt with, to decide if the emit API would be a good enough fit instead.
* <p>
* Might throw an unchecked exception as a last resort (eg. in case of a fatal error downstream which cannot
* be propagated to any asynchronous handler, a bubbling exception, ...).
*
* @param value the value to emit and complete with, or {@code null} to only trigger an onComplete
* @return an {@link EmitResult}, which should be checked to distinguish different possible failures
* @see #emitValue(Object, Sinks.EmitFailureHandler)
* @see Subscriber#onNext(Object)
* @see Subscriber#onComplete()
*/
EmitResult tryEmitValue(@Nullable T value);
/**
* A simplified attempt at emitting a non-null element via the {@link #tryEmitValue(Object)} API, generating an
* {@link Subscriber#onNext(Object) onNext} signal immediately followed by an {@link Subscriber#onComplete()} signal.
* If the result of the attempt is not a {@link EmitResult#isSuccess() success}, implementations SHOULD retry the
* {@link #tryEmitValue(Object)} call IF the provided {@link EmitFailureHandler} returns {@code true}.
* Otherwise, failures are dealt with in a predefined way that might depend on the actual sink implementation
* (see below for the vanilla reactor-core behavior).
* <p>
* Generally, {@link #tryEmitValue(Object)} is preferable since it allows a custom handling
* of error cases, although this implies checking the returned {@link EmitResult} and correctly
* acting on it. This API is intended as a good default for convenience.
* <p>
* When the {@link EmitResult} is not a success, vanilla reactor-core operators have the following behavior:
* <ul>
* <li>
* {@link EmitResult#FAIL_ZERO_SUBSCRIBER}: no particular handling. should ideally discard the value but at that
* point there's no {@link Subscriber} from which to get a contextual discard handler.
* </li>
* <li>
* {@link EmitResult#FAIL_OVERFLOW}: discard the value ({@link Operators#onDiscard(Object, Context)})
* then call {@link #emitError(Throwable, Sinks.EmitFailureHandler)} with a {@link Exceptions#failWithOverflow(String)} exception.
* </li>
* <li>
* {@link EmitResult#FAIL_CANCELLED}: discard the value ({@link Operators#onDiscard(Object, Context)}).
* </li>
* <li>
* {@link EmitResult#FAIL_TERMINATED}: drop the value ({@link Operators#onNextDropped(Object, Context)}).
* </li>
* <li>
* {@link EmitResult#FAIL_NON_SERIALIZED}: throw an {@link EmissionException} mentioning RS spec rule 1.3.
* Note that {@link Sinks#unsafe()} never trigger this result. It would be possible for an {@link EmitFailureHandler}
* to busy-loop and optimistically wait for the contention to disappear to avoid this case for safe sinks...
* </li>
* </ul>
* <p>
* Might throw an unchecked exception as a last resort (eg. in case of a fatal error downstream which cannot
* be propagated to any asynchronous handler, a bubbling exception, a {@link EmitResult#FAIL_NON_SERIALIZED}
* as described above, ...).
*
* @param value the value to emit and complete with, a {@code null} is actually acceptable to only trigger an onComplete
* @param failureHandler the failure handler that allows retrying failed {@link EmitResult}.
* @throws EmissionException on non-serialized access
* @see #tryEmitValue(Object)
* @see Subscriber#onNext(Object)
* @see Subscriber#onComplete()
*/
void emitValue(@Nullable T value, EmitFailureHandler failureHandler);
}
}
| One |
java | apache__kafka | tools/src/test/java/org/apache/kafka/tools/ResetIntegrationTest.java | {
"start": 2679,
"end": 21661
} | class ____ extends AbstractResetIntegrationTest {
private static final String NON_EXISTING_TOPIC = "nonExistingTopic";
public static final EmbeddedKafkaCluster CLUSTER;
static {
final Properties brokerProps = new Properties();
// we double the value passed to `time.sleep` in each iteration in one of the map functions, so we disable
// expiration of connections by the brokers to avoid errors when `AdminClient` sends requests after potentially
// very long sleep times
brokerProps.put(SocketServerConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG, -1L);
CLUSTER = new EmbeddedKafkaCluster(3, brokerProps);
}
@BeforeAll
public static void startCluster() throws IOException {
CLUSTER.start();
}
@AfterAll
public static void closeCluster() {
CLUSTER.stop();
}
@Override
Map<String, Object> getClientSslConfig() {
return null;
}
@BeforeEach
public void before(final TestInfo testInfo) throws Exception {
cluster = CLUSTER;
prepareTest(testInfo);
}
@AfterEach
public void after() throws Exception {
cleanupTest();
}
@Test
public void shouldNotAllowToResetWhileStreamsIsRunning(final TestInfo testInfo) throws Exception {
final String appID = safeUniqueTestName(testInfo);
final String[] parameters = new String[] {
"--application-id", appID,
"--bootstrap-server", cluster.bootstrapServers(),
"--input-topics", NON_EXISTING_TOPIC
};
final Properties cleanUpConfig = new Properties();
cleanUpConfig.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 100);
cleanUpConfig.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, Integer.toString(CLEANUP_CONSUMER_TIMEOUT));
streamsConfig.put(StreamsConfig.APPLICATION_ID_CONFIG, appID);
// RUN
streams = new KafkaStreams(setupTopologyWithoutIntermediateUserTopic(), streamsConfig);
IntegrationTestUtils.startApplicationAndWaitUntilRunning(streams);
final int exitCode = new StreamsResetter().execute(parameters, cleanUpConfig);
assertEquals(1, exitCode);
streams.close();
}
@Test
public void shouldNotAllowToResetWhenInputTopicAbsent(final TestInfo testInfo) {
final String appID = safeUniqueTestName(testInfo);
final String[] parameters = new String[] {
"--application-id", appID,
"--bootstrap-server", cluster.bootstrapServers(),
"--input-topics", NON_EXISTING_TOPIC
};
final Properties cleanUpConfig = new Properties();
cleanUpConfig.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 100);
cleanUpConfig.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, Integer.toString(CLEANUP_CONSUMER_TIMEOUT));
final int exitCode = new StreamsResetter().execute(parameters, cleanUpConfig);
assertEquals(1, exitCode);
}
@Test
public void shouldDefaultToClassicGroupProtocol(final TestInfo testInfo) {
final String appID = safeUniqueTestName(testInfo);
final String[] parameters = new String[] {
"--application-id", appID,
"--bootstrap-server", cluster.bootstrapServers(),
"--input-topics", INPUT_TOPIC
};
final Properties cleanUpConfig = new Properties();
// Set properties that are only allowed under the CLASSIC group protocol.
cleanUpConfig.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 100);
cleanUpConfig.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, Integer.toString(CLEANUP_CONSUMER_TIMEOUT));
final int exitCode = new StreamsResetter().execute(parameters, cleanUpConfig);
assertEquals(0, exitCode, "Resetter should use the CLASSIC group protocol");
}
@Test
public void shouldAllowGroupProtocolClassic(final TestInfo testInfo) {
final String appID = safeUniqueTestName(testInfo);
final String[] parameters = new String[] {
"--application-id", appID,
"--bootstrap-server", cluster.bootstrapServers(),
"--input-topics", INPUT_TOPIC
};
final Properties cleanUpConfig = new Properties();
// Protocol config CLASSIC not needed but allowed.
cleanUpConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name());
cleanUpConfig.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 100);
cleanUpConfig.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, Integer.toString(CLEANUP_CONSUMER_TIMEOUT));
int exitCode = new StreamsResetter().execute(parameters, cleanUpConfig);
assertEquals(0, exitCode, "Resetter should allow setting group protocol to CLASSIC");
}
@Test
public void shouldOverwriteGroupProtocolOtherThanClassic(final TestInfo testInfo) {
final String appID = safeUniqueTestName(testInfo);
final String[] parameters = new String[] {
"--application-id", appID,
"--bootstrap-server", cluster.bootstrapServers(),
"--input-topics", INPUT_TOPIC
};
final Properties cleanUpConfig = new Properties();
// Protocol config other than CLASSIC allowed but overwritten to CLASSIC.
cleanUpConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name());
cleanUpConfig.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 100);
cleanUpConfig.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, Integer.toString(CLEANUP_CONSUMER_TIMEOUT));
int exitCode = new StreamsResetter().execute(parameters, cleanUpConfig);
assertEquals(0, exitCode, "Resetter should overwrite the group protocol to CLASSIC");
}
@Test
public void shouldNotAllowToResetWhenIntermediateTopicAbsent(final TestInfo testInfo) {
final String appID = safeUniqueTestName(testInfo);
final String[] parameters = new String[] {
"--application-id", appID,
"--bootstrap-server", cluster.bootstrapServers(),
"--intermediate-topics", NON_EXISTING_TOPIC
};
final Properties cleanUpConfig = new Properties();
cleanUpConfig.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 100);
cleanUpConfig.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, Integer.toString(CLEANUP_CONSUMER_TIMEOUT));
final int exitCode = new StreamsResetter().execute(parameters, cleanUpConfig);
assertEquals(1, exitCode);
}
@Test
public void shouldNotAllowToResetWhenSpecifiedInternalTopicDoesNotExist(final TestInfo testInfo) {
final String appID = safeUniqueTestName(testInfo);
final String[] parameters = new String[] {
"--application-id", appID,
"--bootstrap-server", cluster.bootstrapServers(),
"--internal-topics", NON_EXISTING_TOPIC
};
final Properties cleanUpConfig = new Properties();
cleanUpConfig.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 100);
cleanUpConfig.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, Integer.toString(CLEANUP_CONSUMER_TIMEOUT));
final int exitCode = new StreamsResetter().execute(parameters, cleanUpConfig);
assertEquals(1, exitCode);
}
@Test
public void shouldNotAllowToResetWhenSpecifiedInternalTopicIsNotInternal(final TestInfo testInfo) {
final String appID = safeUniqueTestName(testInfo);
final String[] parameters = new String[] {
"--application-id", appID,
"--bootstrap-server", cluster.bootstrapServers(),
"--internal-topics", INPUT_TOPIC
};
final Properties cleanUpConfig = new Properties();
cleanUpConfig.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 100);
cleanUpConfig.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, Integer.toString(CLEANUP_CONSUMER_TIMEOUT));
final int exitCode = new StreamsResetter().execute(parameters, cleanUpConfig);
assertEquals(1, exitCode);
}
@Test
public void testDeprecatedConfig(final TestInfo testInfo) throws IOException {
File configFile = TestUtils.tempFile("client.id=my-client");
final String appID = safeUniqueTestName(testInfo);
final String[] parameters = new String[] {
"--application-id", appID,
"--bootstrap-server", cluster.bootstrapServers(),
"--internal-topics", INPUT_TOPIC,
"--config-file", configFile.getAbsolutePath()
};
try (final MockedStatic<Admin> mockedAdmin = Mockito.mockStatic(Admin.class, Mockito.CALLS_REAL_METHODS)) {
String output = ToolsTestUtils.captureStandardOut(() -> {
new StreamsResetter().execute(parameters);
});
assertTrue(output.contains("Option --config-file has been deprecated and will be removed in a future version. Use --command-config instead."));
ArgumentCaptor<Properties> argumentCaptor = ArgumentCaptor.forClass(Properties.class);
mockedAdmin.verify(() -> Admin.create(argumentCaptor.capture()));
final Properties actualProps = argumentCaptor.getValue();
assertEquals("my-client", actualProps.get(AdminClientConfig.CLIENT_ID_CONFIG));
}
}
@Test
public void testCommandConfig(final TestInfo testInfo) throws IOException {
File configFile = TestUtils.tempFile("client.id=my-client");
final String appID = safeUniqueTestName(testInfo);
final String[] parameters = new String[] {
"--application-id", appID,
"--bootstrap-server", cluster.bootstrapServers(),
"--internal-topics", INPUT_TOPIC,
"--command-config", configFile.getAbsolutePath()
};
try (final MockedStatic<Admin> mockedAdmin = Mockito.mockStatic(Admin.class, Mockito.CALLS_REAL_METHODS)) {
new StreamsResetter().execute(parameters);
ArgumentCaptor<Properties> argumentCaptor = ArgumentCaptor.forClass(Properties.class);
mockedAdmin.verify(() -> Admin.create(argumentCaptor.capture()));
final Properties actualProps = argumentCaptor.getValue();
assertEquals("my-client", actualProps.get(AdminClientConfig.CLIENT_ID_CONFIG));
}
}
@Test
public void testCommandConfigAndDeprecatedConfigPresent(final TestInfo testInfo) throws IOException {
File configFile = TestUtils.tempFile("client.id=my-client");
final String appID = safeUniqueTestName(testInfo);
final String[] parameters = new String[] {
"--application-id", appID,
"--bootstrap-server", cluster.bootstrapServers(),
"--internal-topics", INPUT_TOPIC,
"--config-file", configFile.getAbsolutePath(),
"--command-config", configFile.getAbsolutePath()
};
try (final MockedStatic<Admin> mockedAdmin = Mockito.mockStatic(Admin.class, Mockito.CALLS_REAL_METHODS)) {
// Mock Exit because CommandLineUtils.checkInvalidArgs calls exit
Exit.setExitProcedure(new ToolsTestUtils.MockExitProcedure());
String output = ToolsTestUtils.captureStandardErr(() -> {
new StreamsResetter().execute(parameters);
});
assertTrue(output.contains(String.format("Option \"%s\" can't be used with option \"%s\"",
"[config-file]", "[command-config]")));
} finally {
Exit.resetExitProcedure();
}
}
@Test
public void testResetWhenLongSessionTimeoutConfiguredWithForceOption(final TestInfo testInfo) throws Exception {
final String appID = safeUniqueTestName(testInfo);
streamsConfig.put(StreamsConfig.APPLICATION_ID_CONFIG, appID);
streamsConfig.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, Integer.toString(STREAMS_CONSUMER_TIMEOUT * 100));
// Run
streams = new KafkaStreams(setupTopologyWithoutIntermediateUserTopic(), streamsConfig);
IntegrationTestUtils.startApplicationAndWaitUntilRunning(streams);
final List<KeyValue<Long, Long>> result = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
streams.close();
// RESET
streams = new KafkaStreams(setupTopologyWithoutIntermediateUserTopic(), streamsConfig);
streams.cleanUp();
// Reset would fail since long session timeout has been configured
final boolean cleanResult = tryCleanGlobal(false, null, null, appID);
assertFalse(cleanResult);
// Reset will success with --force, it will force delete active members on broker side
cleanGlobal(false, "--force", null, appID);
assertTrue(isEmptyConsumerGroup(adminClient, appID), "Group is not empty after cleanGlobal");
assertInternalTopicsGotDeleted(null);
// RE-RUN
IntegrationTestUtils.startApplicationAndWaitUntilRunning(streams);
final List<KeyValue<Long, Long>> resultRerun = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
streams.close();
assertEquals(result, resultRerun);
cleanGlobal(false, "--force", null, appID);
}
@Test
public void testReprocessingFromFileAfterResetWithoutIntermediateUserTopic(final TestInfo testInfo) throws Exception {
final String appID = safeUniqueTestName(testInfo);
streamsConfig.put(StreamsConfig.APPLICATION_ID_CONFIG, appID);
// RUN
streams = new KafkaStreams(setupTopologyWithoutIntermediateUserTopic(), streamsConfig);
IntegrationTestUtils.startApplicationAndWaitUntilRunning(streams);
final List<KeyValue<Long, Long>> result = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
streams.close();
waitForEmptyConsumerGroup(adminClient, appID, TIMEOUT_MULTIPLIER * STREAMS_CONSUMER_TIMEOUT);
// RESET
final File resetFile = TestUtils.tempFile("reset", ".csv");
try (final BufferedWriter writer = new BufferedWriter(new FileWriter(resetFile))) {
writer.write(INPUT_TOPIC + ",0,1");
}
streams = new KafkaStreams(setupTopologyWithoutIntermediateUserTopic(), streamsConfig);
streams.cleanUp();
cleanGlobal(false, "--from-file", resetFile.getAbsolutePath(), appID);
waitForEmptyConsumerGroup(adminClient, appID, TIMEOUT_MULTIPLIER * STREAMS_CONSUMER_TIMEOUT);
assertInternalTopicsGotDeleted(null);
resetFile.deleteOnExit();
// RE-RUN
IntegrationTestUtils.startApplicationAndWaitUntilRunning(streams);
final List<KeyValue<Long, Long>> resultRerun = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 5);
streams.close();
result.remove(0);
assertEquals(result, resultRerun);
waitForEmptyConsumerGroup(adminClient, appID, TIMEOUT_MULTIPLIER * STREAMS_CONSUMER_TIMEOUT);
cleanGlobal(false, null, null, appID);
}
@Test
public void testReprocessingFromDateTimeAfterResetWithoutIntermediateUserTopic(final TestInfo testInfo) throws Exception {
final String appID = safeUniqueTestName(testInfo);
streamsConfig.put(StreamsConfig.APPLICATION_ID_CONFIG, appID);
// RUN
streams = new KafkaStreams(setupTopologyWithoutIntermediateUserTopic(), streamsConfig);
IntegrationTestUtils.startApplicationAndWaitUntilRunning(streams);
final List<KeyValue<Long, Long>> result = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
streams.close();
waitForEmptyConsumerGroup(adminClient, appID, TIMEOUT_MULTIPLIER * STREAMS_CONSUMER_TIMEOUT);
// RESET
final File resetFile = TestUtils.tempFile("reset", ".csv");
try (final BufferedWriter writer = new BufferedWriter(new FileWriter(resetFile))) {
writer.write(INPUT_TOPIC + ",0,1");
}
streams = new KafkaStreams(setupTopologyWithoutIntermediateUserTopic(), streamsConfig);
streams.cleanUp();
final SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS");
final Calendar calendar = Calendar.getInstance();
calendar.add(Calendar.DATE, -1);
cleanGlobal(false, "--to-datetime", format.format(calendar.getTime()), appID);
waitForEmptyConsumerGroup(adminClient, appID, TIMEOUT_MULTIPLIER * STREAMS_CONSUMER_TIMEOUT);
assertInternalTopicsGotDeleted(null);
resetFile.deleteOnExit();
// RE-RUN
IntegrationTestUtils.startApplicationAndWaitUntilRunning(streams);
final List<KeyValue<Long, Long>> resultRerun = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
streams.close();
assertEquals(result, resultRerun);
waitForEmptyConsumerGroup(adminClient, appID, TIMEOUT_MULTIPLIER * STREAMS_CONSUMER_TIMEOUT);
cleanGlobal(false, null, null, appID);
}
@Test
public void testReprocessingByDurationAfterResetWithoutIntermediateUserTopic(final TestInfo testInfo) throws Exception {
final String appID = safeUniqueTestName(testInfo);
streamsConfig.put(StreamsConfig.APPLICATION_ID_CONFIG, appID);
// RUN
streams = new KafkaStreams(setupTopologyWithoutIntermediateUserTopic(), streamsConfig);
IntegrationTestUtils.startApplicationAndWaitUntilRunning(streams);
final List<KeyValue<Long, Long>> result = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
streams.close();
waitForEmptyConsumerGroup(adminClient, appID, TIMEOUT_MULTIPLIER * STREAMS_CONSUMER_TIMEOUT);
// RESET
final File resetFile = TestUtils.tempFile("reset", ".csv");
try (final BufferedWriter writer = new BufferedWriter(new FileWriter(resetFile))) {
writer.write(INPUT_TOPIC + ",0,1");
}
streams = new KafkaStreams(setupTopologyWithoutIntermediateUserTopic(), streamsConfig);
streams.cleanUp();
cleanGlobal(false, "--by-duration", "PT1M", appID);
waitForEmptyConsumerGroup(adminClient, appID, TIMEOUT_MULTIPLIER * STREAMS_CONSUMER_TIMEOUT);
assertInternalTopicsGotDeleted(null);
resetFile.deleteOnExit();
// RE-RUN
IntegrationTestUtils.startApplicationAndWaitUntilRunning(streams);
final List<KeyValue<Long, Long>> resultRerun = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
streams.close();
assertEquals(result, resultRerun);
waitForEmptyConsumerGroup(adminClient, appID, TIMEOUT_MULTIPLIER * STREAMS_CONSUMER_TIMEOUT);
cleanGlobal(false, null, null, appID);
}
}
| ResetIntegrationTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestableFederationInterceptor.java | {
"start": 8340,
"end": 9237
} | class ____
extends UnmanagedApplicationManager {
public TestableUnmanagedApplicationManager(Configuration conf,
ApplicationId appId, String queueName, String submitter,
String appNameSuffix, boolean keepContainersAcrossApplicationAttempts,
String rmName, ApplicationSubmissionContext originalAppSubmissionContext) {
super(conf, appId, queueName, submitter, appNameSuffix,
keepContainersAcrossApplicationAttempts, rmName, originalAppSubmissionContext);
}
@Override
protected AMHeartbeatRequestHandler createAMHeartbeatRequestHandler(
Configuration conf, ApplicationId appId,
AMRMClientRelayer rmProxyRelayer) {
return new TestableAMRequestHandlerThread(conf, appId, rmProxyRelayer);
}
/**
* We override this method here to return a mock RM instances. The base
* | TestableUnmanagedApplicationManager |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_issue_232.java | {
"start": 143,
"end": 763
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
String source = "{\"code\": 0, \"data\": {\"country\": \"China\", \"country_id\": \"CN\", \"area\": \"East China\", \"area_id\": \"300000\", \"region\": \"Jiangsu Province \",\" region_id \":\" 320000 \",\" city \":\" Nanjing \",\" city_id \":\" 320100 \",\" county \":\" \",\" county_id \":\" - 1 \",\" isp \":\" China Unicom \",\" isp_id \":\" 100026 \",\" ip \":\" 58.240.65.50 \"}}";
JSONObject object = JSONObject.parseObject (source);
Assert.assertEquals(0, object.getIntValue("code"));
}
}
| Bug_for_issue_232 |
java | apache__spark | core/src/main/java/org/apache/spark/io/NioBufferedFileInputStream.java | {
"start": 1574,
"end": 4993
} | class ____ extends InputStream {
private static final Cleaner CLEANER = Cleaner.create();
private static final int DEFAULT_BUFFER_SIZE_BYTES = 8192;
private final Cleaner.Cleanable cleanable;
private final ByteBuffer byteBuffer;
private final FileChannel fileChannel;
public NioBufferedFileInputStream(File file, int bufferSizeInBytes) throws IOException {
byteBuffer = Platform.allocateDirectBuffer(bufferSizeInBytes);
fileChannel = FileChannel.open(file.toPath(), StandardOpenOption.READ);
byteBuffer.flip();
this.cleanable = CLEANER.register(this, new ResourceCleaner(fileChannel, byteBuffer));
}
public NioBufferedFileInputStream(File file) throws IOException {
this(file, DEFAULT_BUFFER_SIZE_BYTES);
}
/**
* Checks whether data is left to be read from the input stream.
* @return true if data is left, false otherwise
*/
private boolean refill() throws IOException {
if (!byteBuffer.hasRemaining()) {
byteBuffer.clear();
int nRead = 0;
while (nRead == 0) {
nRead = fileChannel.read(byteBuffer);
}
byteBuffer.flip();
if (nRead < 0) {
return false;
}
}
return true;
}
@Override
public synchronized int read() throws IOException {
if (!refill()) {
return -1;
}
return byteBuffer.get() & 0xFF;
}
@Override
public synchronized int read(byte[] b, int offset, int len) throws IOException {
if (offset < 0 || len < 0 || offset + len < 0 || offset + len > b.length) {
throw new IndexOutOfBoundsException();
}
if (!refill()) {
return -1;
}
len = Math.min(len, byteBuffer.remaining());
byteBuffer.get(b, offset, len);
return len;
}
@Override
public synchronized int available() throws IOException {
return byteBuffer.remaining();
}
@Override
public synchronized long skip(long n) throws IOException {
if (n <= 0L) {
return 0L;
}
if (byteBuffer.remaining() >= n) {
// The buffered content is enough to skip
byteBuffer.position(byteBuffer.position() + (int) n);
return n;
}
long skippedFromBuffer = byteBuffer.remaining();
long toSkipFromFileChannel = n - skippedFromBuffer;
// Discard everything we have read in the buffer.
byteBuffer.position(0);
byteBuffer.flip();
return skippedFromBuffer + skipFromFileChannel(toSkipFromFileChannel);
}
private long skipFromFileChannel(long n) throws IOException {
long currentFilePosition = fileChannel.position();
long size = fileChannel.size();
if (n > size - currentFilePosition) {
fileChannel.position(size);
return size - currentFilePosition;
} else {
fileChannel.position(currentFilePosition + n);
return n;
}
}
@Override
public synchronized void close() throws IOException {
try {
this.cleanable.clean();
} catch (UncheckedIOException re) {
if (re.getCause() != null) {
throw re.getCause();
} else {
throw re;
}
}
}
private record ResourceCleaner(
FileChannel fileChannel,
ByteBuffer byteBuffer) implements Runnable {
@Override
public void run() {
try {
fileChannel.close();
} catch (IOException e) {
throw new UncheckedIOException(e);
} finally {
StorageUtils.dispose(byteBuffer);
}
}
}
}
| NioBufferedFileInputStream |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UngroupedOverloadsTest.java | {
"start": 20127,
"end": 20468
} | class ____ {
void foo() {}
/** doc */
void foo(int x) {}
void bar() {}
}
""")
.doTest(TestMode.TEXT_MATCH);
}
@Test
public void diagnostic() {
compilationHelper
.addSourceLines(
"Test.java",
"""
| Test |
java | apache__camel | components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/sftp/integration/SftpSimpleConsumeStreamingIT.java | {
"start": 1324,
"end": 2624
} | class ____ extends SftpServerTestSupport {
@Test
public void testSftpSimpleConsume() throws Exception {
String expected = "Hello World";
// create file using regular file
template.sendBodyAndHeader("file://" + service.getFtpRootDir(), expected, Exchange.FILE_NAME, "hello.txt");
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
mock.expectedHeaderReceived(Exchange.FILE_NAME, "hello.txt");
mock.expectedBodiesReceived(expected);
context.getRouteController().startRoute("foo");
MockEndpoint.assertIsSatisfied(context);
InputStream is = mock.getExchanges().get(0).getIn().getBody(InputStream.class);
assertNotNull(is);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("sftp://localhost:{{ftp.server.port}}/{{ftp.root.dir}}"
+ "?username=admin&password=admin&delay=10000&disconnect=true&streamDownload=true&knownHostsFile="
+ service.getKnownHostsFile()).routeId("foo")
.noAutoStartup().to("mock:result");
}
};
}
}
| SftpSimpleConsumeStreamingIT |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/type/contributor/usertype/ContributedUserTypeTest.java | {
"start": 1566,
"end": 3891
} | class ____ {
@AfterEach
public void tearDown(SessionFactoryScope scope) {
scope.dropData();
}
@Test
@JiraKey( "HHH-14408" )
public void test(SessionFactoryScope scope) {
final Type type = scope.getSessionFactory()
.getMappingMetamodel()
.getEntityDescriptor( StringWrapperTestEntity.class )
.getPropertyType( "stringWrapper" );
Assertions.assertInstanceOf( CustomType.class, type,
"Type was initialized too early i.e. before type-contributors were run" );
}
@Test
@JiraKey( "HHH-17181" )
public void testComposite(SessionFactoryScope scope) {
final Type type = scope.getSessionFactory()
.getMappingMetamodel()
.getEntityDescriptor( MyCompositeValueTestEntity.class )
.getPropertyType( "compositeValue" );
Assertions.assertInstanceOf( UserComponentType.class, type );
}
@Test
@JiraKey( "HHH-17100" )
public void testParameter(SessionFactoryScope scope) {
scope.inSession(
session -> {
session.createSelectionQuery( "from StringWrapperTestEntity e where e.stringWrapper = :p" )
.setParameter( "p", new StringWrapper( "abc" ) )
.getResultList();
}
);
}
@Test
@JiraKey( "HHH-17181" )
public void testCompositeParameter(SessionFactoryScope scope) {
scope.inSession(
session -> {
session.createSelectionQuery( "from MyCompositeValueTestEntity e where e.compositeValue = :c" )
.setParameter( "c", new MyCompositeValue( 1L, "1" ) )
.getResultList();
}
);
}
@Test
@Jira( value = "https://hibernate.atlassian.net/browse/HHH-17635" )
public void testServiceLoadedCustomUserType(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
Wallet wallet = new Wallet();
wallet.setId( 1L );
wallet.setMoney( new MonetaryAmount( new BigDecimal( 1000 ), Currency.getInstance("EUR")) );
session.persist( wallet );
}
);
scope.inTransaction(
session -> {
Wallet w = session.createSelectionQuery( "from Wallet", Wallet.class ).getSingleResult();
MonetaryAmount amount = w.getMoney();
Assertions.assertNotNull( amount );
Assertions.assertEquals( 1000, amount.getAmount().intValue() );
Assertions.assertEquals( "EUR", amount.getCurrency().getCurrencyCode() );
}
);
}
@Entity( name = "StringWrapperTestEntity" )
public static | ContributedUserTypeTest |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/stream/WindowRankTestPrograms.java | {
"start": 1207,
"end": 20919
} | class ____ {
static final Row[] BEFORE_DATA = {
Row.of("2020-04-15 08:00:05", new BigDecimal(4.00), "A", "supplier1"),
Row.of("2020-04-15 08:00:06", new BigDecimal(4.00), "C", "supplier2"),
Row.of("2020-04-15 08:00:07", new BigDecimal(2.00), "G", "supplier1"),
Row.of("2020-04-15 08:00:08", new BigDecimal(2.00), "B", "supplier3"),
Row.of("2020-04-15 08:00:09", new BigDecimal(5.00), "D", "supplier4"),
Row.of("2020-04-15 08:00:11", new BigDecimal(2.00), "B", "supplier3"),
Row.of("2020-04-15 08:00:13", new BigDecimal(1.00), "E", "supplier1"),
Row.of("2020-04-15 08:00:15", new BigDecimal(3.00), "H", "supplier2"),
Row.of("2020-04-15 08:00:17", new BigDecimal(6.00), "F", "supplier5")
};
static final Row[] AFTER_DATA = {
Row.of("2020-04-15 08:00:21", new BigDecimal(2.00), "B", "supplier7"),
Row.of("2020-04-15 08:00:23", new BigDecimal(1.00), "A", "supplier4"),
Row.of("2020-04-15 08:00:25", new BigDecimal(3.00), "C", "supplier3"),
Row.of("2020-04-15 08:00:28", new BigDecimal(6.00), "A", "supplier8")
};
static final SourceTestStep SOURCE =
SourceTestStep.newBuilder("bid_t")
.addSchema(
"ts STRING",
"price DECIMAL(10,2)",
"item STRING",
"supplier_id STRING",
"`bid_time` AS TO_TIMESTAMP(`ts`)",
"`proc_time` AS PROCTIME()",
"WATERMARK for `bid_time` AS `bid_time` - INTERVAL '1' SECOND")
.producedBeforeRestore(BEFORE_DATA)
.producedAfterRestore(AFTER_DATA)
.build();
static final String[] SINK_SCHEMA = {
"window_start TIMESTAMP(3)",
"window_end TIMESTAMP(3)",
"bid_time TIMESTAMP(3)",
"supplier_id STRING",
"price DECIMAL(10,2)",
"item STRING",
"row_num BIGINT"
};
static final String[] SINK_TVF_AGG_SCHEMA = {
"window_start TIMESTAMP(3)",
"window_end TIMESTAMP(3)",
"supplier_id STRING",
"total_price DECIMAL(10,2)",
"cnt BIGINT",
"row_num BIGINT"
};
static final String TUMBLE_TVF =
"TABLE(TUMBLE(TABLE bid_t, DESCRIPTOR(bid_time), INTERVAL '10' SECOND))";
static final String HOP_TVF =
"TABLE(HOP(TABLE bid_t, DESCRIPTOR(bid_time), INTERVAL '5' SECOND, INTERVAL '10' SECOND))";
static final String CUMULATE_TVF =
"TABLE(CUMULATE(TABLE bid_t, DESCRIPTOR(bid_time), INTERVAL '5' SECOND, INTERVAL '10' SECOND))";
static final String QUERY_TVF_TOP_N =
"INSERT INTO sink_t SELECT *\n"
+ " FROM (\n"
+ " SELECT\n"
+ " window_start,\n"
+ " window_end, \n"
+ " bid_time,\n"
+ " supplier_id,\n"
+ " price,\n"
+ " item,\n"
+ " ROW_NUMBER() OVER (PARTITION BY window_start, window_end ORDER BY price %s) AS row_num\n"
+ " FROM %s\n" // Window TVF
+ " ) WHERE row_num <= 3"; // row_num must be greater than 1
static final String QUERY_TVF_AGG_TOP_N =
"INSERT INTO sink_t SELECT *\n"
+ " FROM (\n"
+ " SELECT *, ROW_NUMBER() OVER (PARTITION BY window_start, window_end ORDER BY price %s) as row_num\n"
+ " FROM (\n"
+ " SELECT window_start, window_end, supplier_id, SUM(price) as price, COUNT(*) as cnt\n"
+ " FROM %s\n" // Window TVF
+ " GROUP BY window_start, window_end, supplier_id\n"
+ " )\n"
+ " ) WHERE row_num <= 3"; // row_num must be greater than 1
static final TableTestProgram WINDOW_RANK_TUMBLE_TVF_MIN_TOP_N =
TableTestProgram.of(
"window-rank-tumble-tvf-min-top-n",
"validates window min top-n follows after tumbling window")
.setupTableSource(SOURCE)
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(SINK_SCHEMA)
.consumedBeforeRestore(
"+I[2020-04-15T08:00, 2020-04-15T08:00:10, 2020-04-15T08:00:07, supplier1, 2.00, G, 1]",
"+I[2020-04-15T08:00, 2020-04-15T08:00:10, 2020-04-15T08:00:08, supplier3, 2.00, B, 2]",
"+I[2020-04-15T08:00, 2020-04-15T08:00:10, 2020-04-15T08:00:05, supplier1, 4.00, A, 3]")
.consumedAfterRestore(
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:20, 2020-04-15T08:00:13, supplier1, 1.00, E, 1]",
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:20, 2020-04-15T08:00:11, supplier3, 2.00, B, 2]",
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:20, 2020-04-15T08:00:15, supplier2, 3.00, H, 3]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:30, 2020-04-15T08:00:23, supplier4, 1.00, A, 1]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:30, 2020-04-15T08:00:21, supplier7, 2.00, B, 2]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:30, 2020-04-15T08:00:25, supplier3, 3.00, C, 3]")
.build())
.runSql(String.format(QUERY_TVF_TOP_N, "ASC", TUMBLE_TVF))
.build();
static final TableTestProgram WINDOW_RANK_TUMBLE_TVF_AGG_MIN_TOP_N =
TableTestProgram.of(
"window-rank-tumble-tvf-agg-min-top-n",
"validates window min top-n with tumbling window follows after window aggregation")
.setupTableSource(SOURCE)
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(SINK_TVF_AGG_SCHEMA)
.consumedBeforeRestore(
"+I[2020-04-15T08:00, 2020-04-15T08:00:10, supplier3, 2.00, 1, 1]",
"+I[2020-04-15T08:00, 2020-04-15T08:00:10, supplier2, 4.00, 1, 2]",
"+I[2020-04-15T08:00, 2020-04-15T08:00:10, supplier4, 5.00, 1, 3]")
.consumedAfterRestore(
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:20, supplier1, 1.00, 1, 1]",
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:20, supplier3, 2.00, 1, 2]",
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:20, supplier2, 3.00, 1, 3]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:30, supplier4, 1.00, 1, 1]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:30, supplier7, 2.00, 1, 2]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:30, supplier3, 3.00, 1, 3]")
.build())
.runSql(String.format(QUERY_TVF_AGG_TOP_N, "ASC", TUMBLE_TVF))
.build();
static final TableTestProgram WINDOW_RANK_TUMBLE_TVF_MAX_TOP_N =
TableTestProgram.of(
"window-rank-tumble-tvf-max-top-n",
"validates window max top-n follows after tumbling window")
.setupTableSource(SOURCE)
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(SINK_SCHEMA)
.consumedBeforeRestore(
"+I[2020-04-15T08:00, 2020-04-15T08:00:10, 2020-04-15T08:00:09, supplier4, 5.00, D, 1]",
"+I[2020-04-15T08:00, 2020-04-15T08:00:10, 2020-04-15T08:00:05, supplier1, 4.00, A, 2]",
"+I[2020-04-15T08:00, 2020-04-15T08:00:10, 2020-04-15T08:00:06, supplier2, 4.00, C, 3]")
.consumedAfterRestore(
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:20, 2020-04-15T08:00:17, supplier5, 6.00, F, 1]",
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:20, 2020-04-15T08:00:15, supplier2, 3.00, H, 2]",
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:20, 2020-04-15T08:00:11, supplier3, 2.00, B, 3]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:30, 2020-04-15T08:00:28, supplier8, 6.00, A, 1]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:30, 2020-04-15T08:00:25, supplier3, 3.00, C, 2]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:30, 2020-04-15T08:00:21, supplier7, 2.00, B, 3]")
.build())
.runSql(String.format(QUERY_TVF_TOP_N, "DESC", TUMBLE_TVF))
.build();
static final TableTestProgram WINDOW_RANK_TUMBLE_TVF_AGG_MAX_TOP_N =
TableTestProgram.of(
"window-rank-tumble-tvf-agg-max-top-n",
"validates window max top-n with tumbling window follows after window aggregation")
.setupTableSource(SOURCE)
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(SINK_TVF_AGG_SCHEMA)
.consumedBeforeRestore(
"+I[2020-04-15T08:00, 2020-04-15T08:00:10, supplier1, 6.00, 2, 1]",
"+I[2020-04-15T08:00, 2020-04-15T08:00:10, supplier4, 5.00, 1, 2]",
"+I[2020-04-15T08:00, 2020-04-15T08:00:10, supplier2, 4.00, 1, 3]")
.consumedAfterRestore(
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:20, supplier5, 6.00, 1, 1]",
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:20, supplier2, 3.00, 1, 2]",
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:20, supplier3, 2.00, 1, 3]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:30, supplier8, 6.00, 1, 1]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:30, supplier3, 3.00, 1, 2]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:30, supplier7, 2.00, 1, 3]")
.build())
.runSql(String.format(QUERY_TVF_AGG_TOP_N, "DESC", TUMBLE_TVF))
.build();
static final TableTestProgram WINDOW_RANK_HOP_TVF_MIN_TOP_N =
TableTestProgram.of(
"window-rank-hop-tvf-min-top-n",
"validates window min top-n follows after hop window")
.setupTableSource(SOURCE)
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(SINK_SCHEMA)
.consumedBeforeRestore(
"+I[2020-04-15T08:00, 2020-04-15T08:00:10, 2020-04-15T08:00:07, supplier1, 2.00, G, 1]",
"+I[2020-04-15T08:00, 2020-04-15T08:00:10, 2020-04-15T08:00:08, supplier3, 2.00, B, 2]",
"+I[2020-04-15T08:00, 2020-04-15T08:00:10, 2020-04-15T08:00:05, supplier1, 4.00, A, 3]",
"+I[2020-04-15T08:00:05, 2020-04-15T08:00:15, 2020-04-15T08:00:13, supplier1, 1.00, E, 1]",
"+I[2020-04-15T08:00:05, 2020-04-15T08:00:15, 2020-04-15T08:00:07, supplier1, 2.00, G, 2]",
"+I[2020-04-15T08:00:05, 2020-04-15T08:00:15, 2020-04-15T08:00:08, supplier3, 2.00, B, 3]")
.consumedAfterRestore(
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:20, 2020-04-15T08:00:13, supplier1, 1.00, E, 1]",
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:20, 2020-04-15T08:00:11, supplier3, 2.00, B, 2]",
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:20, 2020-04-15T08:00:15, supplier2, 3.00, H, 3]",
"+I[2020-04-15T08:00:15, 2020-04-15T08:00:25, 2020-04-15T08:00:23, supplier4, 1.00, A, 1]",
"+I[2020-04-15T08:00:15, 2020-04-15T08:00:25, 2020-04-15T08:00:21, supplier7, 2.00, B, 2]",
"+I[2020-04-15T08:00:15, 2020-04-15T08:00:25, 2020-04-15T08:00:15, supplier2, 3.00, H, 3]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:30, 2020-04-15T08:00:23, supplier4, 1.00, A, 1]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:30, 2020-04-15T08:00:21, supplier7, 2.00, B, 2]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:30, 2020-04-15T08:00:25, supplier3, 3.00, C, 3]",
"+I[2020-04-15T08:00:25, 2020-04-15T08:00:35, 2020-04-15T08:00:25, supplier3, 3.00, C, 1]",
"+I[2020-04-15T08:00:25, 2020-04-15T08:00:35, 2020-04-15T08:00:28, supplier8, 6.00, A, 2]")
.build())
.runSql(String.format(QUERY_TVF_TOP_N, "ASC", HOP_TVF))
.build();
static final TableTestProgram WINDOW_RANK_HOP_TVF_NAMED_MIN_TOP_1 =
TableTestProgram.of(
"window-rank-hop-tvf-named-min-top-n",
"validates window min top-n follows after hop window")
.setupTableSource(
SourceTestStep.newBuilder("bid_t")
.addSchema(
"ts STRING",
"price DECIMAL(10,2)",
"supplier_id STRING",
"`bid_time` AS TO_TIMESTAMP(`ts`)",
"WATERMARK for `bid_time` AS `bid_time` - INTERVAL '1' SECOND")
.producedValues(
Row.of(
"2020-04-15 08:00:05",
new BigDecimal(4.00),
"supplier1"))
.build())
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema("bid_time TIMESTAMP(3)", "supplier_id STRING")
.consumedValues(
"+I[2020-04-15T08:00:05, supplier1]",
"+I[2020-04-15T08:00:05, supplier1]")
.build())
.runSql(
"INSERT INTO sink_t(bid_time, supplier_id) "
+ "SELECT bid_time, supplier_id\n"
+ " FROM (\n"
+ " SELECT\n"
+ " bid_time,\n"
+ " supplier_id,\n"
+ " ROW_NUMBER() OVER (PARTITION BY window_start, window_end ORDER BY price ASC) AS row_num\n"
+ " FROM TABLE(HOP(\n"
+ " DATA => TABLE bid_t,\n"
+ " TIMECOL => DESCRIPTOR(`bid_time`),\n"
+ " SLIDE => INTERVAL '5' SECOND,\n"
+ " SIZE => INTERVAL '10' SECOND))\n"
+ " ) WHERE row_num <= 3")
.build();
static final TableTestProgram WINDOW_RANK_CUMULATE_TVF_MIN_TOP_N =
TableTestProgram.of(
"window-rank-cumulate-tvf-min-top-n",
"validates window min top-n follows after cumulate window")
.setupTableSource(SOURCE)
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(SINK_SCHEMA)
.consumedBeforeRestore(
"+I[2020-04-15T08:00, 2020-04-15T08:00:10, 2020-04-15T08:00:07, supplier1, 2.00, G, 1]",
"+I[2020-04-15T08:00, 2020-04-15T08:00:10, 2020-04-15T08:00:08, supplier3, 2.00, B, 2]",
"+I[2020-04-15T08:00, 2020-04-15T08:00:10, 2020-04-15T08:00:05, supplier1, 4.00, A, 3]",
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:15, 2020-04-15T08:00:13, supplier1, 1.00, E, 1]",
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:15, 2020-04-15T08:00:11, supplier3, 2.00, B, 2]")
.consumedAfterRestore(
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:20, 2020-04-15T08:00:13, supplier1, 1.00, E, 1]",
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:20, 2020-04-15T08:00:11, supplier3, 2.00, B, 2]",
"+I[2020-04-15T08:00:10, 2020-04-15T08:00:20, 2020-04-15T08:00:15, supplier2, 3.00, H, 3]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:25, 2020-04-15T08:00:23, supplier4, 1.00, A, 1]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:25, 2020-04-15T08:00:21, supplier7, 2.00, B, 2]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:30, 2020-04-15T08:00:23, supplier4, 1.00, A, 1]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:30, 2020-04-15T08:00:21, supplier7, 2.00, B, 2]",
"+I[2020-04-15T08:00:20, 2020-04-15T08:00:30, 2020-04-15T08:00:25, supplier3, 3.00, C, 3]")
.build())
.runSql(String.format(QUERY_TVF_TOP_N, "ASC", CUMULATE_TVF))
.build();
}
| WindowRankTestPrograms |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/DataNodeUsageReport.java | {
"start": 3834,
"end": 5012
} | class ____ {
private long bytesWrittenPerSec;
private long bytesReadPerSec;
private long writeTime;
private long readTime;
private long blocksWrittenPerSec;
private long blocksReadPerSec;
private long timestamp;
public DataNodeUsageReport build() {
return new DataNodeUsageReport(this);
}
public Builder setBytesWrittenPerSec(long bWrittenPerSec) {
this.bytesWrittenPerSec = bWrittenPerSec;
return this;
}
public Builder setBytesReadPerSec(long bReadPerSec) {
this.bytesReadPerSec = bReadPerSec;
return this;
}
public Builder setWriteTime(long wTime) {
this.writeTime = wTime;
return this;
}
public Builder setReadTime(long rTime) {
this.readTime = rTime;
return this;
}
public Builder setBlocksWrittenPerSec(long wBlock) {
this.blocksWrittenPerSec = wBlock;
return this;
}
public Builder setBlocksReadPerSec(long rBlock) {
this.blocksReadPerSec = rBlock;
return this;
}
public Builder setTimestamp(long ts) {
this.timestamp = ts;
return this;
}
public Builder() {
}
}
}
| Builder |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/path/HelloResource.java | {
"start": 273,
"end": 617
} | class ____ base URI is the application path, see ApplicationPath.
* For an annotated method the base URI is the effective URI of the containing class. For the purposes of
* absolutizing a path against the base URI , a leading '/' in a path is ignored and base URIs are treated
* as if they ended in '/'.
* </quote>
*/
@Path("hello")
public | the |
java | alibaba__nacos | naming/src/main/java/com/alibaba/nacos/naming/healthcheck/interceptor/HealthCheckEnableInterceptor.java | {
"start": 922,
"end": 1339
} | class ____ extends AbstractHealthCheckInterceptor {
@Override
public boolean intercept(NacosHealthCheckTask object) {
try {
return !ApplicationUtils.getBean(SwitchDomain.class).isHealthCheckEnabled();
} catch (Exception e) {
return true;
}
}
@Override
public int order() {
return Integer.MIN_VALUE;
}
}
| HealthCheckEnableInterceptor |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/ConfigurationPropertiesTests.java | {
"start": 94783,
"end": 95109
} | class ____ implements Converter<String, WithPublicObjectToObjectMethod> {
@Override
public WithPublicObjectToObjectMethod convert(String source) {
return new WithPublicObjectToObjectMethod(source);
}
}
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties
static | WithObjectToObjectMethodConverter |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java | {
"start": 14127,
"end": 14492
} | class ____ responsible for handling all of the RPC calls to the It is
* created, started, and stopped by {@link Router}. It implements the
* {@link ClientProtocol} to mimic a
* {@link org.apache.hadoop.hdfs.server.namenode.NameNode NameNode} and proxies
* the requests to the active
* {@link org.apache.hadoop.hdfs.server.namenode.NameNode NameNode}.
*/
public | is |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/orderupdates/OrderUpdateNestedEmbeddedIdTest.java | {
"start": 3206,
"end": 3671
} | class ____ {
@EmbeddedId
private Child1Id child1Id;
@OneToMany( cascade = CascadeType.ALL )
@JoinColumn( name = "child1_id", referencedColumnName = "id" )
@JoinColumn( name = "child1_version", referencedColumnName = "version" )
private List<Child2> child2s;
public Child1() {
}
public Child1(Child1Id child1Id, List<Child2> child2s) {
this.child1Id = child1Id;
this.child2s = child2s;
}
}
@Entity( name = "Child2" )
public static | Child1 |
java | google__guice | core/test/com/google/inject/DuplicateBindingsTest.java | {
"start": 20185,
"end": 21111
} | class ____ implements Provider<Object> {
private String equality;
private boolean throwOnEquals;
private boolean throwOnHashcode;
@Override
public boolean equals(Object obj) {
if (throwOnEquals) {
throw new RuntimeException();
} else if (obj instanceof HashEqualsTester) {
HashEqualsTester o = (HashEqualsTester) obj;
if (o.throwOnEquals) {
throw new RuntimeException();
}
if (equality == null && o.equality == null) {
return this == o;
} else {
return Objects.equal(equality, o.equality);
}
} else {
return false;
}
}
@Override
public int hashCode() {
if (throwOnHashcode) {
throw new RuntimeException();
} else {
return super.hashCode();
}
}
@Override
public Object get() {
return new Object();
}
}
}
| HashEqualsTester |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java | {
"start": 1054,
"end": 1249
} | class ____ {
/**
* Structural elements of an FSImage that may be encountered within the
* file. ImageVisitors are able to handle processing any of these elements.
*/
public | ImageVisitor |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/jmx/export/MBeanExporter.java | {
"start": 43299,
"end": 43674
} | class ____ the bean
* @param beanName the name of the bean
*/
boolean include(Class<?> beanClass, String beanName);
}
/**
* Extension of {@link LazyInitTargetSource} that will inject a
* {@link org.springframework.jmx.export.notification.NotificationPublisher}
* into the lazy resource as it is created if required.
*/
@SuppressWarnings("serial")
private | of |
java | apache__kafka | connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationExactlyOnceTest.java | {
"start": 1385,
"end": 5404
} | class ____ extends MirrorConnectorsIntegrationBaseTest {
@BeforeEach
public void startClusters() throws Exception {
mm2Props.put(
PRIMARY_CLUSTER_ALIAS + "." + DistributedConfig.EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG,
DistributedConfig.ExactlyOnceSourceSupport.ENABLED.toString()
);
mm2Props.put(
BACKUP_CLUSTER_ALIAS + "." + DistributedConfig.EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG,
DistributedConfig.ExactlyOnceSourceSupport.ENABLED.toString()
);
for (Properties brokerProps : List.of(primaryBrokerProps, backupBrokerProps)) {
brokerProps.put("transaction.state.log.replication.factor", "1");
brokerProps.put("transaction.state.log.min.isr", "1");
}
super.startClusters();
}
@Override
@Test
public void testReplication() throws Exception {
super.testReplication();
// Augment the base replication test case with some extra testing of the offset management
// API introduced in KIP-875
// We do this only when exactly-once support is enabled in order to avoid having to worry about
// zombie tasks producing duplicate records and/or writing stale offsets to the offsets topic
String backupTopic1 = remoteTopicName("test-topic-1", PRIMARY_CLUSTER_ALIAS);
String backupTopic2 = remoteTopicName("test-topic-2", PRIMARY_CLUSTER_ALIAS);
stopMirrorMakerConnectors(backup, MirrorSourceConnector.class);
// Explicitly move back to offset 0
// Note that the connector treats the offset as the last-consumed offset,
// so it will start reading the topic partition from offset 1 when it resumes
alterMirrorMakerSourceConnectorOffsets(backup, n -> 0L, "test-topic-1");
// Reset the offsets for test-topic-2
resetSomeMirrorMakerSourceConnectorOffsets(backup, "test-topic-2");
resumeMirrorMakerConnectors(backup, MirrorSourceConnector.class);
int expectedRecordsTopic1 = NUM_RECORDS_PRODUCED + ((NUM_RECORDS_PER_PARTITION - 1) * NUM_PARTITIONS);
assertEquals(expectedRecordsTopic1, backup.kafka().consume(expectedRecordsTopic1, RECORD_TRANSFER_DURATION_MS, backupTopic1).count(),
"Records were not re-replicated to backup cluster after altering offsets.");
int expectedRecordsTopic2 = NUM_RECORDS_PER_PARTITION * 2;
assertEquals(expectedRecordsTopic2, backup.kafka().consume(expectedRecordsTopic2, RECORD_TRANSFER_DURATION_MS, backupTopic2).count(),
"New topic was not re-replicated to backup cluster after altering offsets.");
@SuppressWarnings("unchecked")
Class<? extends Connector>[] connectorsToReset = CONNECTOR_LIST.toArray(new Class[0]);
stopMirrorMakerConnectors(backup, connectorsToReset);
// Resetting the offsets for the heartbeat and checkpoint connectors doesn't have any effect
// on their behavior, but users may want to wipe offsets from them to prevent the offsets topic
// from growing infinitely. So, we include them in the list of connectors to reset as a sanity check
// to make sure that this action can be performed successfully
resetAllMirrorMakerConnectorOffsets(backup, connectorsToReset);
resumeMirrorMakerConnectors(backup, connectorsToReset);
expectedRecordsTopic1 += NUM_RECORDS_PRODUCED;
assertEquals(expectedRecordsTopic1, backup.kafka().consume(expectedRecordsTopic1, RECORD_TRANSFER_DURATION_MS, backupTopic1).count(),
"Records were not re-replicated to backup cluster after resetting offsets.");
expectedRecordsTopic2 += NUM_RECORDS_PER_PARTITION;
assertEquals(expectedRecordsTopic2, backup.kafka().consume(expectedRecordsTopic2, RECORD_TRANSFER_DURATION_MS, backupTopic2).count(),
"New topic was not re-replicated to backup cluster after resetting offsets.");
}
}
| MirrorConnectorsIntegrationExactlyOnceTest |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/recursive/fieldlocation/FieldLocation_hierarchyMatches_Test.java | {
"start": 1130,
"end": 2751
} | class ____ {
@ParameterizedTest(name = "{0} is or is a child of {1}")
@MethodSource
void hierarchyMatches_should_return_true(List<String> fieldPath, String other) {
// GIVEN
FieldLocation field = new FieldLocation(fieldPath);
// WHEN
boolean result = field.hierarchyMatches(other);
// THEN
then(result).as("%s is or is a child of %s", field, other).isTrue();
}
private static Stream<Arguments> hierarchyMatches_should_return_true() {
return Stream.of(arguments(list("name"), "name"),
arguments(list("name", "first"), "name"),
arguments(list("name", "first", "nickname"), "name"),
arguments(list("name", "first", "nickname"), "name.first"),
arguments(list("name", "first"), "name.first"));
}
@ParameterizedTest(name = "{0} is not nor a child of {1}")
@MethodSource
void hierarchyMatches_should_return_false(List<String> fieldPath, String other) {
// GIVEN
FieldLocation field = new FieldLocation(fieldPath);
// WHEN
boolean result = field.hierarchyMatches(other);
// THEN
then(result).as("%s is not nor a child of %s", field, other).isFalse();
}
private static Stream<Arguments> hierarchyMatches_should_return_false() {
return Stream.of(arguments(list("person", "name"), "name"),
arguments(list("names"), "name"),
arguments(list("nickname"), "name"),
arguments(list("name"), "nickname"),
arguments(list("first", "nickname"), "name"));
}
}
| FieldLocation_hierarchyMatches_Test |
java | apache__camel | core/camel-support/src/main/java/org/apache/camel/support/cache/ServicePool.java | {
"start": 6936,
"end": 9579
} | class ____ implements Pool<S> {
private final Endpoint endpoint;
private volatile S s;
SinglePool(Endpoint endpoint) {
this.endpoint = requireNonNull(endpoint);
}
@Override
public S acquire() throws Exception {
cleanupEvicts();
if (s == null) {
lock.lock();
try {
if (s == null) {
LOG.trace("Creating service from endpoint: {}", endpoint);
S tempS = creator.apply(endpoint);
endpoint.getCamelContext().addService(tempS, true, true);
s = tempS;
}
} finally {
lock.unlock();
}
}
LOG.trace("Acquired service: {}", s);
return s;
}
@Override
public void release(S s) {
cleanupEvicts();
// noop
LOG.trace("Released service: {}", s);
}
@Override
public int size() {
return s != null ? 1 : 0;
}
@Override
public void stop() {
S toStop;
lock.lock();
try {
toStop = s;
s = null;
} finally {
lock.unlock();
}
doStop(toStop);
pool.remove(endpoint);
}
@Override
public void evict(S s) {
singlePoolEvicted.putIfAbsent(endpoint, this);
}
@Override
public void cleanUp() {
cleanupEvicts();
}
private void cleanupEvicts() {
if (!singlePoolEvicted.isEmpty()) {
for (Map.Entry<Endpoint, Pool<S>> entry : singlePoolEvicted.entrySet()) {
Endpoint e = entry.getKey();
Pool<S> p = entry.getValue();
doStop(e);
p.stop();
singlePoolEvicted.remove(e);
}
}
}
void doStop(Service s) {
if (s != null) {
ServicePool.stop(s);
try {
endpoint.getCamelContext().removeService(s);
} catch (Exception e) {
LOG.debug("Error removing service: {}. This exception is ignored.", s, e);
}
}
}
}
/**
* Pool used for non-singleton producers or consumers which are not thread-safe and can only be used by one worker
* thread at any given time.
*/
private | SinglePool |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/JSONObjectTest_getDate.java | {
"start": 139,
"end": 421
} | class ____ extends TestCase {
public void test_get_empty() throws Exception {
JSONObject obj = new JSONObject();
obj.put("value", "");
Assert.assertEquals("", obj.get("value"));
Assert.assertNull(obj.getDate("value"));
}
}
| JSONObjectTest_getDate |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/atomic/longarray/AtomicLongArrayAssert_endsWith_Test.java | {
"start": 878,
"end": 1210
} | class ____ extends AtomicLongArrayAssertBaseTest {
@Override
protected AtomicLongArrayAssert invoke_api_method() {
return assertions.endsWith(6, 8);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertEndsWith(info(), internalArray(), arrayOf(6, 8));
}
}
| AtomicLongArrayAssert_endsWith_Test |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/FunctionalInterfaceMethodChangedTest.java | {
"start": 882,
"end": 1332
} | class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(FunctionalInterfaceMethodChanged.class, getClass());
@Test
public void positiveCase() {
compilationHelper
.addSourceLines(
"FunctionalInterfaceMethodChangedPositiveCases.java",
"""
package com.google.errorprone.bugpatterns.testdata;
public | FunctionalInterfaceMethodChangedTest |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/function/FormatFunction.java | {
"start": 6350,
"end": 26419
} | class ____<T> extends SelfRenderingSqmFunction<T> {
private final boolean supportsPatternLiterals;
private final TypeConfiguration typeConfiguration;
public FormatSqmFunction(
SqmFunctionDescriptor descriptor,
FunctionRenderer renderer,
List<? extends SqmTypedNode<?>> arguments,
ReturnableType<T> impliedResultType,
ArgumentsValidator argumentsValidator,
FunctionReturnTypeResolver returnTypeResolver,
boolean supportsPatternLiterals,
QueryEngine queryEngine) {
super(
descriptor,
renderer,
arguments,
impliedResultType,
argumentsValidator,
returnTypeResolver,
queryEngine.getCriteriaBuilder(),
"format"
);
this.supportsPatternLiterals = supportsPatternLiterals;
this.typeConfiguration = queryEngine.getTypeConfiguration();
}
@Override
public Expression convertToSqlAst(SqmToSqlAstConverter walker) {
final List<SqlAstNode> arguments = resolveSqlAstArguments( getArguments(), walker );
final ReturnableType<?> resultType = resolveResultType( walker );
final MappingModelExpressible<?> mappingModelExpressible =
resultType == null
? null
: getMappingModelExpressible( walker, resultType, arguments );
final SqlAstNode expression = arguments.get( 0 );
if ( expression instanceof SqlTupleContainer sqlTupleContainer ) {
// SqlTupleContainer means this is a composite temporal type i.e. uses `@TimeZoneStorage(COLUMN)`
// The support for this kind of type requires that we inject the offset from the second column
// as literal into the pattern, and apply the formatting on the date time part
final SqlTuple sqlTuple = sqlTupleContainer.getSqlTuple();
final FunctionRenderer timestampaddFunction = getFunction( walker, "timestampadd" );
final BasicType<Integer> integerType = typeConfiguration.getBasicTypeRegistry()
.resolve( StandardBasicTypes.INTEGER );
arguments.set( 0, getOffsetAdjusted( sqlTuple, timestampaddFunction, integerType ) );
if ( getArgumentsValidator() != null ) {
getArgumentsValidator().validateSqlTypes( arguments, getFunctionName() );
}
final Format format = (Format) arguments.get( 1 );
// If the format contains a time zone or offset, we must replace that with the offset column
if ( format.getFormat().contains( "x" ) || !supportsPatternLiterals ) {
final FunctionRenderer concatFunction = getFunction( walker, "concat" );
final FunctionRenderer substringFunction = getFunction( walker, "substring", 3 );
final BasicType<String> stringType = typeConfiguration.getBasicTypeRegistry()
.resolve( StandardBasicTypes.STRING );
final Dialect dialect = walker.getCreationContext().getDialect();
Expression formatExpression = null;
final StringBuilder sb = new StringBuilder();
final StringBuilderSqlAppender sqlAppender = new StringBuilderSqlAppender( sb );
final String delimiter;
if ( supportsPatternLiterals ) {
dialect.appendDatetimeFormat( sqlAppender, "'a'" );
delimiter = sb.substring( 0, sb.indexOf( "a" ) ).replace( "''", "'" );
}
else {
delimiter = "";
}
final String[] chunks = splitFull( "'", format.getFormat() );
final Expression offsetExpression = sqlTuple.getExpressions().get( 1 );
// Splitting by `'` will put actual format pattern parts to even indices and literal pattern parts
// to uneven indices. We will only replace the time zone and offset pattern in the format pattern parts
for ( int i = 0; i < chunks.length; i += 2 ) {
// The general idea is to replace the various patterns `xxx`, `xx` and `x` by concatenating
// the offset column as literal i.e. `HH:mmxxx` is translated to `HH:mm'''||offset||'''`
// xxx stands for the full offset i.e. `+01:00`
// xx stands for the medium offset i.e. `+0100`
// x stands for the small offset i.e. `+01`
final String[] fullParts = splitFull( "xxx", chunks[i] );
for ( int j = 0; j < fullParts.length; j++ ) {
if ( fullParts[j].isEmpty() ) {
continue;
}
final String[] mediumParts = splitFull( "xx", fullParts[j] );
for ( int k = 0; k < mediumParts.length; k++ ) {
if ( mediumParts[k].isEmpty() ) {
continue;
}
final String[] smallParts = splitFull( "x", mediumParts[k] );
for ( int l = 0; l < smallParts.length; l++ ) {
if ( smallParts[l].isEmpty() ) {
continue;
}
sb.setLength( 0 );
dialect.appendDatetimeFormat( sqlAppender, smallParts[l] );
final String formatPart = sb.toString();
if ( supportsPatternLiterals ) {
formatExpression = concat(
concatFunction,
stringType,
formatExpression,
new QueryLiteral<>( formatPart, stringType )
);
}
else {
formatExpression = concat(
concatFunction,
stringType,
formatExpression,
new SelfRenderingFunctionSqlAstExpression(
getFunctionName(),
getFunctionRenderer(),
List.of(
arguments.get( 0 ),
new QueryLiteral<>( formatPart, stringType )
),
resultType,
mappingModelExpressible
)
);
}
if ( l + 1 < smallParts.length ) {
// This is for `x` patterns, which require `+01`
// so we concat `substring(offset, 1, 4)`
// Since the offset is always in the full format
formatExpression = concatAsLiteral(
concatFunction,
stringType,
delimiter,
formatExpression,
createSmallOffset(
concatFunction,
substringFunction,
stringType,
integerType,
offsetExpression
)
);
}
}
if ( k + 1 < mediumParts.length ) {
// This is for `xx` patterns, which require `+0100`
// so we concat `substring(offset, 1, 4)||substring(offset, 4, 6)`
// Since the offset is always in the full format
formatExpression = concatAsLiteral(
concatFunction,
stringType,
delimiter,
formatExpression,
createMediumOffset(
concatFunction,
substringFunction,
stringType,
integerType,
offsetExpression
)
);
}
}
if ( j + 1 < fullParts.length ) {
formatExpression = concatAsLiteral(
concatFunction,
stringType,
delimiter,
formatExpression,
createFullOffset(
concatFunction,
stringType,
integerType,
offsetExpression
)
);
}
}
if ( i + 1 < chunks.length ) {
// Handle the pattern literal content
final String formatLiteralPart;
if ( supportsPatternLiterals ) {
sb.setLength( 0 );
dialect.appendDatetimeFormat( sqlAppender, "'" + chunks[i + 1] + "'" );
formatLiteralPart = sb.toString().replace( "''", "'" );
}
else {
formatLiteralPart = chunks[i + 1];
}
formatExpression = concat(
concatFunction,
stringType,
formatExpression,
new QueryLiteral<>(
formatLiteralPart,
stringType
)
);
}
}
if ( supportsPatternLiterals ) {
arguments.set( 1, formatExpression );
}
else {
return formatExpression;
}
}
}
else {
if ( getArgumentsValidator() != null ) {
getArgumentsValidator().validateSqlTypes( arguments, getFunctionName() );
}
if ( !supportsPatternLiterals ) {
final FunctionRenderer concatFunction = getFunction( walker, "concat" );
final BasicType<String> stringType = typeConfiguration.getBasicTypeRegistry()
.resolve( StandardBasicTypes.STRING );
Expression formatExpression = null;
final Format format = (Format) arguments.get( 1 );
final String[] chunks = splitFull( "'", format.getFormat() );
// Splitting by `'` will put actual format pattern parts to even indices and literal pattern parts
// to uneven indices. We need to apply the format parts and then concatenate because the pattern
// doesn't support literals
for ( int i = 0; i < chunks.length; i += 2 ) {
formatExpression = concat(
concatFunction,
stringType,
formatExpression,
new SelfRenderingFunctionSqlAstExpression(
getFunctionName(),
getFunctionRenderer(),
List.of( arguments.get( 0 ), new Format( chunks[i] ) ),
resultType,
mappingModelExpressible
)
);
if ( i + 1 < chunks.length ) {
// Handle the pattern literal content
formatExpression = concat(
concatFunction,
stringType,
formatExpression,
new QueryLiteral<>( chunks[i + 1], stringType )
);
}
}
return formatExpression;
}
}
return new SelfRenderingFunctionSqlAstExpression(
getFunctionName(),
getFunctionRenderer(),
arguments,
resultType,
mappingModelExpressible
);
}
private FunctionRenderer getFunction(SqmToSqlAstConverter walker, String name) {
return (FunctionRenderer)
walker.getCreationContext().getSqmFunctionRegistry().findFunctionDescriptor( name );
}
private FunctionRenderer getFunction(SqmToSqlAstConverter walker, String name, int argumentCount) {
final SqmFunctionDescriptor functionDescriptor =
walker.getCreationContext().getSqmFunctionRegistry()
.findFunctionDescriptor( name );
return functionDescriptor instanceof MultipatternSqmFunctionDescriptor multipatternSqmFunctionDescriptor
? (FunctionRenderer) multipatternSqmFunctionDescriptor.getFunction( argumentCount )
: (FunctionRenderer) functionDescriptor;
}
private SqlAstNode getOffsetAdjusted(
SqlTuple sqlTuple,
FunctionRenderer timestampaddFunction,
BasicType<Integer> integerType) {
final Expression instantExpression = sqlTuple.getExpressions().get( 0 );
final Expression offsetExpression = sqlTuple.getExpressions().get( 1 );
return new SelfRenderingFunctionSqlAstExpression(
"timestampadd",
timestampaddFunction,
List.of(
new DurationUnit( TemporalUnit.SECOND, integerType ),
offsetExpression,
instantExpression
),
(ReturnableType<?>) instantExpression.getExpressionType(),
instantExpression.getExpressionType()
);
}
private Expression createFullOffset(
FunctionRenderer concatFunction,
BasicType<String> stringType,
BasicType<Integer> integerType,
Expression offsetExpression) {
if ( offsetExpression.getExpressionType().getSingleJdbcMapping().getJdbcType().isString() ) {
return offsetExpression;
}
else {
// ZoneOffset as seconds
final CaseSearchedExpression caseSearchedExpression =
zoneOffsetSeconds( stringType, integerType, offsetExpression );
final Expression hours = getHours( integerType, offsetExpression );
final Expression minutes = getMinutes( integerType, offsetExpression );
final CaseSearchedExpression minuteStart = new CaseSearchedExpression( stringType );
minuteStart.getWhenFragments().add(
new CaseSearchedExpression.WhenFragment(
new BetweenPredicate(
minutes,
new QueryLiteral<>( -9, integerType ),
new QueryLiteral<>( 9, integerType ),
false,
null
),
new QueryLiteral<>( ":0", stringType )
)
);
minuteStart.otherwise( new QueryLiteral<>( ":", stringType ) );
return concat(
concatFunction,
stringType,
concat(
concatFunction,
stringType,
concat( concatFunction, stringType, caseSearchedExpression, hours ),
minuteStart
),
minutes
);
}
}
private Expression createMediumOffset(
FunctionRenderer concatFunction,
FunctionRenderer substringFunction,
BasicType<String> stringType,
BasicType<Integer> integerType,
Expression offsetExpression) {
if ( offsetExpression.getExpressionType().getSingleJdbcMapping().getJdbcType().isString() ) {
return concat(
concatFunction,
stringType,
createSmallOffset(
concatFunction,
substringFunction,
stringType,
integerType,
offsetExpression
),
new SelfRenderingFunctionSqlAstExpression(
"substring",
substringFunction,
List.of(
offsetExpression,
new QueryLiteral<>( 4, integerType ),
new QueryLiteral<>( 6, integerType )
),
stringType,
stringType
)
);
}
else {
// ZoneOffset as seconds
final CaseSearchedExpression caseSearchedExpression =
zoneOffsetSeconds( stringType, integerType, offsetExpression );
final Expression hours = getHours( integerType, offsetExpression );
final Expression minutes = getMinutes( integerType, offsetExpression );
final CaseSearchedExpression minuteStart = new CaseSearchedExpression( stringType );
minuteStart.getWhenFragments().add(
new CaseSearchedExpression.WhenFragment(
new BetweenPredicate(
minutes,
new QueryLiteral<>( -9, integerType ),
new QueryLiteral<>( 9, integerType ),
false,
null
),
new QueryLiteral<>( "0", stringType )
)
);
minuteStart.otherwise( new QueryLiteral<>( "", stringType ) );
return concat(
concatFunction,
stringType,
concat(
concatFunction,
stringType,
concat( concatFunction, stringType, caseSearchedExpression, hours ),
minuteStart
),
minutes
);
}
}
private Expression createSmallOffset(
FunctionRenderer concatFunction,
FunctionRenderer substringFunction,
BasicType<String> stringType,
BasicType<Integer> integerType,
Expression offsetExpression) {
if ( offsetExpression.getExpressionType().getSingleJdbcMapping().getJdbcType().isString() ) {
return new SelfRenderingFunctionSqlAstExpression(
"substring",
substringFunction,
List.of(
offsetExpression,
new QueryLiteral<>( 1, integerType ),
new QueryLiteral<>( 4, integerType )
),
stringType,
stringType
);
}
else {
// ZoneOffset as seconds
final CaseSearchedExpression caseSearchedExpression =
zoneOffsetSeconds( stringType, integerType, offsetExpression );
final Expression hours = getHours( integerType, offsetExpression );
return concat( concatFunction, stringType, caseSearchedExpression, hours );
}
}
private Expression concatAsLiteral(
FunctionRenderer concatFunction,
BasicType<String> stringType,
String delimiter,
Expression expression,
Expression expression2) {
return concat(
concatFunction,
stringType,
concat(
concatFunction,
stringType,
concat(
concatFunction,
stringType,
expression,
new QueryLiteral<>( delimiter, stringType )
),
expression2
),
new QueryLiteral<>( delimiter, stringType )
);
}
private Expression concat(
FunctionRenderer concatFunction,
BasicType<String> stringType,
Expression expression,
Expression expression2) {
if ( expression == null ) {
return expression2;
}
else if ( expression instanceof SelfRenderingFunctionSqlAstExpression<?> selfRenderingFunction
&& "concat".equals( selfRenderingFunction.getFunctionName() ) ) {
final List<SqlAstNode> list = (List<SqlAstNode>) selfRenderingFunction.getArguments();
final SqlAstNode lastOperand = list.get( list.size() - 1 );
if ( expression2 instanceof QueryLiteral<?> literal2
&& lastOperand instanceof QueryLiteral<?> literalOperand ) {
list.set(
list.size() - 1,
new QueryLiteral<>(
literalOperand.getLiteralValue().toString()
+ literal2.getLiteralValue().toString(),
stringType
)
);
}
else {
list.add( expression2 );
}
return expression;
}
else if ( expression2 instanceof SelfRenderingFunctionSqlAstExpression<?> selfRenderingFunction
&& "concat".equals( selfRenderingFunction.getFunctionName() ) ) {
final List<SqlAstNode> list = (List<SqlAstNode>) selfRenderingFunction.getArguments();
final SqlAstNode firstOperand = list.get( 0 );
if ( expression instanceof QueryLiteral<?> literal
&& firstOperand instanceof QueryLiteral<?> literalOperand ) {
list.set(
list.size() - 1,
new QueryLiteral<>(
literal.getLiteralValue().toString() +
literalOperand.getLiteralValue().toString(),
stringType
)
);
}
else {
list.add( 0, expression );
}
return expression2;
}
else if ( expression instanceof QueryLiteral<?> literal
&& expression2 instanceof QueryLiteral<?> literal2 ) {
return new QueryLiteral<>(
literal.getLiteralValue().toString() +
literal2.getLiteralValue().toString(),
stringType
);
}
else {
final List<Expression> list = new ArrayList<>( 2 );
list.add( expression );
list.add( expression2 );
return new SelfRenderingFunctionSqlAstExpression<>(
"concat",
concatFunction,
list,
stringType,
stringType
);
}
}
private Expression getHours(
BasicType<Integer> integerType,
Expression offsetExpression) {
return /*new SelfRenderingFunctionSqlAstExpression(
"cast",
castFunction,
List.of(*/
new BinaryArithmeticExpression(
offsetExpression,
DIVIDE_PORTABLE,
new QueryLiteral<>( 3600, integerType ),
integerType
)/*,
new CastTarget( integerType )
),
integerType,
integerType
)*/;
}
private Expression getMinutes(
BasicType<Integer> integerType,
Expression offsetExpression){
return /*new SelfRenderingFunctionSqlAstExpression(
"cast",
castFunction,
List.of(*/
new BinaryArithmeticExpression(
new BinaryArithmeticExpression(
offsetExpression,
MODULO,
new QueryLiteral<>( 3600, integerType ),
integerType
),
DIVIDE_PORTABLE,
new QueryLiteral<>( 60, integerType ),
integerType
)/*,
new CastTarget( integerType )
),
integerType,
integerType
)*/;
}
}
private static CaseSearchedExpression zoneOffsetSeconds(BasicType<String> stringType, BasicType<Integer> integerType, Expression offsetExpression) {
final CaseSearchedExpression caseSearchedExpression = new CaseSearchedExpression(stringType);
caseSearchedExpression.getWhenFragments().add(
new CaseSearchedExpression.WhenFragment(
new ComparisonPredicate(
offsetExpression,
LESS_THAN_OR_EQUAL,
new QueryLiteral<>( -36000, integerType)
),
new QueryLiteral<>( "-", stringType)
)
);
caseSearchedExpression.getWhenFragments().add(
new CaseSearchedExpression.WhenFragment(
new ComparisonPredicate(
offsetExpression,
LESS_THAN,
new QueryLiteral<>( 0, integerType)
),
new QueryLiteral<>( "-0", stringType)
)
);
caseSearchedExpression.getWhenFragments().add(
new CaseSearchedExpression.WhenFragment(
new ComparisonPredicate(
offsetExpression,
GREATER_THAN_OR_EQUAL,
new QueryLiteral<>( 36000, integerType)
),
new QueryLiteral<>( "+", stringType)
)
);
caseSearchedExpression.otherwise( new QueryLiteral<>( "+0", stringType) );
return caseSearchedExpression;
}
}
| FormatSqmFunction |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/querycache/QueryCacheTest.java | {
"start": 23162,
"end": 24114
} | class ____ implements Interceptor {
private volatile CountDownLatch blockLatch;
private volatile CountDownLatch waitLatch;
@Override
public boolean onLoad(Object entity, Object id, Object[] state, String[] propertyNames, Type[] types){
onLoad();
return true;
}
private void onLoad() {
// Synchronize load and update activities
try {
if ( waitLatch != null ) {
waitLatch.countDown();
}
if ( blockLatch != null ) {
blockLatch.await();
}
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException( e );
}
}
public void blockOnLoad() {
blockLatch = new CountDownLatch( 1 );
waitLatch = new CountDownLatch( 1 );
}
public void waitOnLoad() throws InterruptedException {
waitLatch.await();
}
public void unblockOnLoad() {
if ( blockLatch != null ) {
blockLatch.countDown();
}
}
}
public static | DelayLoadOperations |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/injectionstrategy/jsr330/_default/Jsr330DefaultCompileOptionFieldMapperTest.java | {
"start": 1877,
"end": 3581
} | class ____ {
@RegisterExtension
final GeneratedSource generatedSource = new GeneratedSource();
@Inject
@Named
private CustomerJsr330DefaultCompileOptionFieldMapper customerMapper;
private ConfigurableApplicationContext context;
@BeforeEach
public void springUp() {
context = new AnnotationConfigApplicationContext( getClass() );
context.getAutowireCapableBeanFactory().autowireBean( this );
}
@AfterEach
public void springDown() {
if ( context != null ) {
context.close();
}
}
@ProcessorTest
public void shouldConvertToTarget() {
// given
CustomerEntity customerEntity = new CustomerEntity();
customerEntity.setName( "Samuel" );
customerEntity.setGender( Gender.MALE );
// when
CustomerDto customerDto = customerMapper.asTarget( customerEntity );
// then
assertThat( customerDto ).isNotNull();
assertThat( customerDto.getName() ).isEqualTo( "Samuel" );
assertThat( customerDto.getGender() ).isEqualTo( GenderDto.M );
}
@ProcessorTest
public void shouldHaveFieldInjection() {
generatedSource.forMapper( CustomerJsr330DefaultCompileOptionFieldMapper.class )
.content()
.contains( "import javax.inject.Inject;" )
.contains( "import javax.inject.Named;" )
.contains( "import javax.inject.Singleton;" )
.contains( "@Inject" + lineSeparator() + " private GenderJsr330DefaultCompileOptionFieldMapper" )
.doesNotContain( "public CustomerJsr330DefaultCompileOptionFieldMapperImpl(" );
}
}
| Jsr330DefaultCompileOptionFieldMapperTest |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/appender/rolling/action/CompositeAction.java | {
"start": 1009,
"end": 3066
} | class ____ extends AbstractAction {
/**
* Actions to perform.
*/
private final Action[] actions;
/**
* Stop on error.
*/
private final boolean stopOnError;
/**
* Construct a new composite action.
*
* @param actions list of actions, may not be null.
* @param stopOnError if true, stop on the first false return value or exception.
*/
public CompositeAction(final List<Action> actions, final boolean stopOnError) {
this.actions = new Action[actions.size()];
actions.toArray(this.actions);
this.stopOnError = stopOnError;
}
/**
* {@inheritDoc}
*/
@Override
public void run() {
try {
execute();
} catch (final IOException ex) {
LOGGER.warn("Exception during file rollover.", ex);
}
}
/**
* Execute sequence of actions.
*
* @return true if all actions were successful.
* @throws IOException on IO error.
*/
@Override
public boolean execute() throws IOException {
if (stopOnError) {
for (final Action action : actions) {
if (!action.execute()) {
return false;
}
}
return true;
}
boolean status = true;
IOException exception = null;
for (final Action action : actions) {
try {
status &= action.execute();
} catch (final IOException ex) {
status = false;
if (exception == null) {
exception = ex;
}
}
}
if (exception != null) {
throw exception;
}
return status;
}
@Override
public String toString() {
return CompositeAction.class.getSimpleName() + Arrays.toString(actions);
}
public Action[] getActions() {
return actions;
}
public boolean isStopOnError() {
return stopOnError;
}
}
| CompositeAction |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/Hamlet.java | {
"start": 496558,
"end": 507845
} | class ____<T extends __> extends EImp<T> implements HamletSpec.CITE {
public CITE(String name, T parent, EnumSet<EOpt> opts) {
super(name, parent, opts);
}
@Override
public CITE<T> $id(String value) {
addAttr("id", value);
return this;
}
@Override
public CITE<T> $class(String value) {
addAttr("class", value);
return this;
}
@Override
public CITE<T> $title(String value) {
addAttr("title", value);
return this;
}
@Override
public CITE<T> $style(String value) {
addAttr("style", value);
return this;
}
@Override
public CITE<T> $lang(String value) {
addAttr("lang", value);
return this;
}
@Override
public CITE<T> $dir(Dir value) {
addAttr("dir", value);
return this;
}
@Override
public CITE<T> $onclick(String value) {
addAttr("onclick", value);
return this;
}
@Override
public CITE<T> $ondblclick(String value) {
addAttr("ondblclick", value);
return this;
}
@Override
public CITE<T> $onmousedown(String value) {
addAttr("onmousedown", value);
return this;
}
@Override
public CITE<T> $onmouseup(String value) {
addAttr("onmouseup", value);
return this;
}
@Override
public CITE<T> $onmouseover(String value) {
addAttr("onmouseover", value);
return this;
}
@Override
public CITE<T> $onmousemove(String value) {
addAttr("onmousemove", value);
return this;
}
@Override
public CITE<T> $onmouseout(String value) {
addAttr("onmouseout", value);
return this;
}
@Override
public CITE<T> $onkeypress(String value) {
addAttr("onkeypress", value);
return this;
}
@Override
public CITE<T> $onkeydown(String value) {
addAttr("onkeydown", value);
return this;
}
@Override
public CITE<T> $onkeyup(String value) {
addAttr("onkeyup", value);
return this;
}
@Override
public CITE<T> __(Object... lines) {
_p(true, lines);
return this;
}
@Override
public CITE<T> _r(Object... lines) {
_p(false, lines);
return this;
}
@Override
public B<CITE<T>> b() {
closeAttrs();
return b_(this, true);
}
@Override
public CITE<T> b(String cdata) {
return b().__(cdata).__();
}
@Override
public CITE<T> b(String selector, String cdata) {
return setSelector(b(), selector).__(cdata).__();
}
@Override
public I<CITE<T>> i() {
closeAttrs();
return i_(this, true);
}
@Override
public CITE<T> i(String cdata) {
return i().__(cdata).__();
}
@Override
public CITE<T> i(String selector, String cdata) {
return setSelector(i(), selector).__(cdata).__();
}
@Override
public SMALL<CITE<T>> small() {
closeAttrs();
return small_(this, true);
}
@Override
public CITE<T> small(String cdata) {
return small().__(cdata).__();
}
@Override
public CITE<T> small(String selector, String cdata) {
return setSelector(small(), selector).__(cdata).__();
}
@Override
public CITE<T> em(String cdata) {
return em().__(cdata).__();
}
@Override
public EM<CITE<T>> em() {
closeAttrs();
return em_(this, true);
}
@Override
public CITE<T> em(String selector, String cdata) {
return setSelector(em(), selector).__(cdata).__();
}
@Override
public STRONG<CITE<T>> strong() {
closeAttrs();
return strong_(this, true);
}
@Override
public CITE<T> strong(String cdata) {
return strong().__(cdata).__();
}
@Override
public CITE<T> strong(String selector, String cdata) {
return setSelector(strong(), selector).__(cdata).__();
}
@Override
public DFN<CITE<T>> dfn() {
closeAttrs();
return dfn_(this, true);
}
@Override
public CITE<T> dfn(String cdata) {
return dfn().__(cdata).__();
}
@Override
public CITE<T> dfn(String selector, String cdata) {
return setSelector(dfn(), selector).__(cdata).__();
}
@Override
public CODE<CITE<T>> code() {
closeAttrs();
return code_(this, true);
}
@Override
public CITE<T> code(String cdata) {
return code().__(cdata).__();
}
@Override
public CITE<T> code(String selector, String cdata) {
return setSelector(code(), selector).__(cdata).__();
}
@Override
public CITE<T> samp(String cdata) {
return samp().__(cdata).__();
}
@Override
public SAMP<CITE<T>> samp() {
closeAttrs();
return samp_(this, true);
}
@Override
public CITE<T> samp(String selector, String cdata) {
return setSelector(samp(), selector).__(cdata).__();
}
@Override
public KBD<CITE<T>> kbd() {
closeAttrs();
return kbd_(this, true);
}
@Override
public CITE<T> kbd(String cdata) {
return kbd().__(cdata).__();
}
@Override
public CITE<T> kbd(String selector, String cdata) {
return setSelector(kbd(), selector).__(cdata).__();
}
@Override
public VAR<CITE<T>> var() {
closeAttrs();
return var_(this, true);
}
@Override
public CITE<T> var(String cdata) {
return var().__(cdata).__();
}
@Override
public CITE<T> var(String selector, String cdata) {
return setSelector(var(), selector).__(cdata).__();
}
@Override
public CITE<CITE<T>> cite() {
closeAttrs();
return cite_(this, true);
}
@Override
public CITE<T> cite(String cdata) {
return cite().__(cdata).__();
}
@Override
public CITE<T> cite(String selector, String cdata) {
return setSelector(cite(), selector).__(cdata).__();
}
@Override
public ABBR<CITE<T>> abbr() {
closeAttrs();
return abbr_(this, true);
}
@Override
public CITE<T> abbr(String cdata) {
return abbr().__(cdata).__();
}
@Override
public CITE<T> abbr(String selector, String cdata) {
return setSelector(abbr(), selector).__(cdata).__();
}
@Override
public A<CITE<T>> a() {
closeAttrs();
return a_(this, true);
}
@Override
public A<CITE<T>> a(String selector) {
return setSelector(a(), selector);
}
@Override
public CITE<T> a(String href, String anchorText) {
return a().$href(href).__(anchorText).__();
}
@Override
public CITE<T> a(String selector, String href, String anchorText) {
return setSelector(a(), selector).$href(href).__(anchorText).__();
}
@Override
public IMG<CITE<T>> img() {
closeAttrs();
return img_(this, true);
}
@Override
public CITE<T> img(String src) {
return img().$src(src).__();
}
@Override
public OBJECT<CITE<T>> object() {
closeAttrs();
return object_(this, true);
}
@Override
public OBJECT<CITE<T>> object(String selector) {
return setSelector(object(), selector);
}
@Override
public SUB<CITE<T>> sub() {
closeAttrs();
return sub_(this, true);
}
@Override
public CITE<T> sub(String cdata) {
return sub().__(cdata).__();
}
@Override
public CITE<T> sub(String selector, String cdata) {
return setSelector(sub(), selector).__(cdata).__();
}
@Override
public SUP<CITE<T>> sup() {
closeAttrs();
return sup_(this, true);
}
@Override
public CITE<T> sup(String cdata) {
return sup().__(cdata).__();
}
@Override
public CITE<T> sup(String selector, String cdata) {
return setSelector(sup(), selector).__(cdata).__();
}
@Override
public MAP<CITE<T>> map() {
closeAttrs();
return map_(this, true);
}
@Override
public MAP<CITE<T>> map(String selector) {
return setSelector(map(), selector);
}
@Override
public CITE<T> q(String cdata) {
return q().__(cdata).__();
}
@Override
public CITE<T> q(String selector, String cdata) {
return setSelector(q(), selector).__(cdata).__();
}
@Override
public Q<CITE<T>> q() {
closeAttrs();
return q_(this, true);
}
@Override
public BR<CITE<T>> br() {
closeAttrs();
return br_(this, true);
}
@Override
public CITE<T> br(String selector) {
return setSelector(br(), selector).__();
}
@Override
public BDO<CITE<T>> bdo() {
closeAttrs();
return bdo_(this, true);
}
@Override
public CITE<T> bdo(Dir dir, String cdata) {
return bdo().$dir(dir).__(cdata).__();
}
@Override
public SPAN<CITE<T>> span() {
closeAttrs();
return span_(this, true);
}
@Override
public CITE<T> span(String cdata) {
return span().__(cdata).__();
}
@Override
public CITE<T> span(String selector, String cdata) {
return setSelector(span(), selector).__(cdata).__();
}
@Override
public SCRIPT<CITE<T>> script() {
closeAttrs();
return script_(this, true);
}
@Override
public CITE<T> script(String src) {
return setScriptSrc(script(), src).__();
}
@Override
public INS<CITE<T>> ins() {
closeAttrs();
return ins_(this, true);
}
@Override
public CITE<T> ins(String cdata) {
return ins().__(cdata).__();
}
@Override
public DEL<CITE<T>> del() {
closeAttrs();
return del_(this, true);
}
@Override
public CITE<T> del(String cdata) {
return del().__(cdata).__();
}
@Override
public LABEL<CITE<T>> label() {
closeAttrs();
return label_(this, true);
}
@Override
public CITE<T> label(String forId, String cdata) {
return label().$for(forId).__(cdata).__();
}
@Override
public INPUT<CITE<T>> input(String selector) {
return setSelector(input(), selector);
}
@Override
public INPUT<CITE<T>> input() {
closeAttrs();
return input_(this, true);
}
@Override
public SELECT<CITE<T>> select() {
closeAttrs();
return select_(this, true);
}
@Override
public SELECT<CITE<T>> select(String selector) {
return setSelector(select(), selector);
}
@Override
public TEXTAREA<CITE<T>> textarea(String selector) {
return setSelector(textarea(), selector);
}
@Override
public TEXTAREA<CITE<T>> textarea() {
closeAttrs();
return textarea_(this, true);
}
@Override
public CITE<T> textarea(String selector, String cdata) {
return setSelector(textarea(), selector).__(cdata).__();
}
@Override
public BUTTON<CITE<T>> button() {
closeAttrs();
return button_(this, true);
}
@Override
public BUTTON<CITE<T>> button(String selector) {
return setSelector(button(), selector);
}
@Override
public CITE<T> button(String selector, String cdata) {
return setSelector(button(), selector).__(cdata).__();
}
}
public | CITE |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/ExecNodeMetadata.java | {
"start": 1977,
"end": 2383
} | class ____ be annotated directly with multiple
* {@link ExecNodeMetadata} annotations, or with a single {@link MultipleExecNodeMetadata}
* annotation where the {@link MultipleExecNodeMetadata#value()} is an array of {@link
* ExecNodeMetadata} annotations.
*/
@Documented
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
@Repeatable(value = MultipleExecNodeMetadata.class)
@Internal
public @ | can |
java | spring-projects__spring-boot | documentation/spring-boot-docs/src/main/java/org/springframework/boot/docs/testing/utilities/testresttemplate/MySpringBootTestsConfiguration.java | {
"start": 1654,
"end": 1733
} | class ____ {
@RestController
private static final | MySpringBootTestsConfiguration |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/ide/IdeFileBuildItem.java | {
"start": 239,
"end": 496
} | class ____ extends SimpleBuildItem {
private final Set<Ide> detectedIDEs;
IdeFileBuildItem(Set<Ide> detectedIDEs) {
this.detectedIDEs = detectedIDEs;
}
Set<Ide> getDetectedIDEs() {
return detectedIDEs;
}
}
| IdeFileBuildItem |
java | google__guice | extensions/servlet/src/com/google/inject/servlet/LinkedFilterBindingImpl.java | {
"start": 861,
"end": 1588
} | class ____ extends AbstractServletModuleBinding<Key<? extends Filter>>
implements LinkedFilterBinding {
LinkedFilterBindingImpl(
Map<String, String> initParams,
Key<? extends Filter> target,
UriPatternMatcher patternMatcher) {
super(initParams, target, patternMatcher);
}
@Override
public Key<? extends Filter> getLinkedKey() {
return getTarget();
}
@Override
public String toString() {
return MoreObjects.toStringHelper(LinkedFilterBinding.class)
.add("pattern", getPattern())
.add("initParams", getInitParams())
.add("uriPatternType", getUriPatternType())
.add("linkedFilterKey", getLinkedKey())
.toString();
}
}
| LinkedFilterBindingImpl |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/instrumentation/InstrumentationService.java | {
"start": 9606,
"end": 11159
} | class ____ implements JSONAware, JSONStreamAware {
Variable<Long> variable;
long[] values;
private AtomicLong sum;
private int last;
private boolean full;
void init(int size, Variable<Long> variable) {
this.variable = variable;
values = new long[size];
sum = new AtomicLong();
last = 0;
}
void sample() {
int index = last;
long valueGoingOut = values[last];
full = full || last == (values.length - 1);
last = (last + 1) % values.length;
values[index] = variable.getValue();
sum.addAndGet(-valueGoingOut + values[index]);
}
double getRate() {
return ((double) sum.get()) / ((full) ? values.length : ((last == 0) ? 1 : last));
}
@SuppressWarnings("unchecked")
private JSONObject getJSON() {
JSONObject json = new JSONObject();
json.put("sampler", getRate());
json.put("size", (full) ? values.length : last);
return json;
}
@Override
public String toJSONString() {
return getJSON().toJSONString();
}
@Override
public void writeJSONString(Writer out) throws IOException {
out.write(toJSONString());
}
}
@Override
public void addSampler(String group, String name, int samplingSize, Variable<Long> variable) {
Sampler sampler = getToAdd(group, name, Sampler.class, samplerLock, samplers);
samplerLock.lock();
try {
sampler.init(samplingSize, variable);
samplersList.add(sampler);
} finally {
samplerLock.unlock();
}
}
| Sampler |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/EqualsHashCodeTest.java | {
"start": 2382,
"end": 2738
} | class ____ extends Super {
// BUG: Diagnostic contains:
public boolean equals(Object o) {
return false;
}
}
""")
.doTest();
}
@Test
public void inherited() {
compilationHelper
.addSourceLines(
"Super.java",
"""
| Test |
java | apache__rocketmq | proxy/src/main/java/org/apache/rocketmq/proxy/processor/channel/RemoteChannelConverter.java | {
"start": 864,
"end": 939
} | interface ____ {
RemoteChannel toRemoteChannel();
}
| RemoteChannelConverter |
java | spring-projects__spring-boot | module/spring-boot-restclient-test/src/test/java/org/springframework/boot/restclient/test/MockServerRestTemplateCustomizerTests.java | {
"start": 1799,
"end": 8283
} | class ____ {
private MockServerRestTemplateCustomizer customizer;
@BeforeEach
void setup() {
this.customizer = new MockServerRestTemplateCustomizer();
}
@Test
void createShouldUseSimpleRequestExpectationManager() {
MockServerRestTemplateCustomizer customizer = new MockServerRestTemplateCustomizer();
customizer.customize(new RestTemplate());
assertThat(customizer.getServer()).extracting("expectationManager")
.isInstanceOf(SimpleRequestExpectationManager.class);
}
@Test
@SuppressWarnings("NullAway") // Test null check
void createWhenExpectationManagerClassIsNullShouldThrowException() {
Class<? extends RequestExpectationManager> expectationManager = null;
assertThatIllegalArgumentException().isThrownBy(() -> new MockServerRestTemplateCustomizer(expectationManager))
.withMessageContaining("'expectationManager' must not be null");
}
@Test
@SuppressWarnings("NullAway") // Test null check
void createWhenExpectationManagerSupplierIsNullShouldThrowException() {
Supplier<? extends RequestExpectationManager> expectationManagerSupplier = null;
assertThatIllegalArgumentException()
.isThrownBy(() -> new MockServerRestTemplateCustomizer(expectationManagerSupplier))
.withMessageContaining("'expectationManagerSupplier' must not be null");
}
@Test
void createShouldUseExpectationManagerClass() {
MockServerRestTemplateCustomizer customizer = new MockServerRestTemplateCustomizer(
UnorderedRequestExpectationManager.class);
customizer.customize(new RestTemplate());
assertThat(customizer.getServer()).extracting("expectationManager")
.isInstanceOf(UnorderedRequestExpectationManager.class);
}
@Test
void createShouldUseSupplier() {
MockServerRestTemplateCustomizer customizer = new MockServerRestTemplateCustomizer(
UnorderedRequestExpectationManager::new);
customizer.customize(new RestTemplate());
assertThat(customizer.getServer()).extracting("expectationManager")
.isInstanceOf(UnorderedRequestExpectationManager.class);
}
@Test
void detectRootUriShouldDefaultToTrue() {
MockServerRestTemplateCustomizer customizer = new MockServerRestTemplateCustomizer(
UnorderedRequestExpectationManager.class);
customizer.customize(new RestTemplateBuilder().rootUri("https://example.com").build());
assertThat(customizer.getServer()).extracting("expectationManager")
.isInstanceOf(RootUriRequestExpectationManager.class);
}
@Test
void setDetectRootUriShouldDisableRootUriDetection() {
this.customizer.setDetectRootUri(false);
this.customizer.customize(new RestTemplateBuilder().rootUri("https://example.com").build());
assertThat(this.customizer.getServer()).extracting("expectationManager")
.isInstanceOf(SimpleRequestExpectationManager.class);
}
@Test
void bufferContentShouldDefaultToFalse() {
MockServerRestTemplateCustomizer customizer = new MockServerRestTemplateCustomizer();
RestTemplate restTemplate = new RestTemplate();
customizer.customize(restTemplate);
assertThat(restTemplate.getRequestFactory()).isInstanceOf(ClientHttpRequestFactory.class);
}
@Test
void setBufferContentShouldEnableContentBuffering() {
MockServerRestTemplateCustomizer customizer = new MockServerRestTemplateCustomizer();
RestTemplate restTemplate = new RestTemplate();
customizer.setBufferContent(true);
customizer.customize(restTemplate);
assertThat(restTemplate.getRequestFactory()).isInstanceOf(BufferingClientHttpRequestFactory.class);
}
@Test
void customizeShouldBindServer() {
RestTemplate template = new RestTemplateBuilder(this.customizer).build();
this.customizer.getServer().expect(requestTo("/test")).andRespond(withSuccess());
template.getForEntity("/test", String.class);
this.customizer.getServer().verify();
}
@Test
void getServerWhenNoServersAreBoundShouldThrowException() {
assertThatIllegalStateException().isThrownBy(this.customizer::getServer)
.withMessageContaining("Unable to return a single MockRestServiceServer since "
+ "MockServerRestTemplateCustomizer has not been bound to a RestTemplate");
}
@Test
void getServerWhenMultipleServersAreBoundShouldThrowException() {
this.customizer.customize(new RestTemplate());
this.customizer.customize(new RestTemplate());
assertThatIllegalStateException().isThrownBy(this.customizer::getServer)
.withMessageContaining("Unable to return a single MockRestServiceServer since "
+ "MockServerRestTemplateCustomizer has been bound to more than one RestTemplate");
}
@Test
void getServerWhenSingleServerIsBoundShouldReturnServer() {
RestTemplate template = new RestTemplate();
this.customizer.customize(template);
assertThat(this.customizer.getServer()).isEqualTo(this.customizer.getServer(template));
}
@Test
void getServerWhenRestTemplateIsFoundShouldReturnServer() {
RestTemplate template1 = new RestTemplate();
RestTemplate template2 = new RestTemplate();
this.customizer.customize(template1);
this.customizer.customize(template2);
assertThat(this.customizer.getServer(template1)).isNotNull();
assertThat(this.customizer.getServer(template2)).isNotNull().isNotSameAs(this.customizer.getServer(template1));
}
@Test
void getServerWhenRestTemplateIsNotFoundShouldReturnNull() {
RestTemplate template1 = new RestTemplate();
RestTemplate template2 = new RestTemplate();
this.customizer.customize(template1);
assertThat(this.customizer.getServer(template1)).isNotNull();
assertThat(this.customizer.getServer(template2)).isNull();
}
@Test
void getServersShouldReturnServers() {
RestTemplate template1 = new RestTemplate();
RestTemplate template2 = new RestTemplate();
this.customizer.customize(template1);
this.customizer.customize(template2);
assertThat(this.customizer.getServers()).containsOnlyKeys(template1, template2);
}
@Test
void getExpectationManagersShouldReturnExpectationManagers() {
RestTemplate template1 = new RestTemplate();
RestTemplate template2 = new RestTemplate();
this.customizer.customize(template1);
this.customizer.customize(template2);
RequestExpectationManager manager1 = this.customizer.getExpectationManagers().get(template1);
RequestExpectationManager manager2 = this.customizer.getExpectationManagers().get(template2);
assertThat(this.customizer.getServer(template1)).extracting("expectationManager").isEqualTo(manager1);
assertThat(this.customizer.getServer(template2)).extracting("expectationManager").isEqualTo(manager2);
}
}
| MockServerRestTemplateCustomizerTests |
java | spring-projects__spring-boot | core/spring-boot-test/src/main/java/org/springframework/boot/test/json/JsonContentAssert.java | {
"start": 46386,
"end": 49038
} | class ____ {
private final String expression;
private final JsonPath jsonPath;
JsonPathValue(CharSequence expression, Object... args) {
org.springframework.util.Assert.hasText((expression != null) ? expression.toString() : null,
"'expression' must not be empty");
this.expression = String.format(expression.toString(), args);
this.jsonPath = JsonPath.compile(this.expression);
}
void assertHasEmptyValue() {
if (ObjectUtils.isEmpty(getValue(false)) || isIndefiniteAndEmpty()) {
return;
}
failWithMessage(getExpectedValueMessage("an empty value"));
}
void assertDoesNotHaveEmptyValue() {
if (!ObjectUtils.isEmpty(getValue(false))) {
return;
}
failWithMessage(getExpectedValueMessage("a non-empty value"));
}
void assertHasPath() {
try {
read();
}
catch (PathNotFoundException ex) {
failWithMessage("No JSON path \"%s\" found", this.expression);
}
}
void assertDoesNotHavePath() {
try {
read();
failWithMessage("Expecting no JSON path \"%s\"", this.expression);
}
catch (PathNotFoundException ex) {
// Ignore
}
}
void assertHasValue(@Nullable Class<?> type, String expectedDescription) {
Object value = getValue(true);
if (value == null || isIndefiniteAndEmpty()) {
failWithNoValueMessage();
}
if (type != null && !type.isInstance(value)) {
failWithMessage(getExpectedValueMessage(expectedDescription));
}
}
void assertDoesNotHaveValue() {
if (getValue(false) == null || isIndefiniteAndEmpty()) {
return;
}
failWithMessage(getExpectedValueMessage("no value"));
}
private boolean isIndefiniteAndEmpty() {
return !isDefinite() && isEmpty();
}
private boolean isDefinite() {
return this.jsonPath.isDefinite();
}
private boolean isEmpty() {
return ObjectUtils.isEmpty(getValue(false));
}
@Nullable Object getValue(boolean required) {
try {
return read();
}
catch (Exception ex) {
if (required) {
failWithNoValueMessage();
}
return null;
}
}
private void failWithNoValueMessage() {
failWithMessage("No value at JSON path \"%s\"", this.expression);
}
private Object read() {
CharSequence json = JsonContentAssert.this.actual;
return this.jsonPath.read((json != null) ? json.toString() : null, JsonContentAssert.this.configuration);
}
private String getExpectedValueMessage(String expectedDescription) {
return String.format("Expected %s at JSON path \"%s\" but found: %s", expectedDescription, this.expression,
ObjectUtils.nullSafeToString(StringUtils.quoteIfString(getValue(false))));
}
}
}
| JsonPathValue |
java | apache__logging-log4j2 | log4j-osgi-test/src/test/java/org/apache/logging/log4j/osgi/tests/CustomConfiguration.java | {
"start": 1633,
"end": 2551
} | class ____ extends AbstractConfiguration {
private final ListAppender appender = new ListAppender();
CustomConfiguration(final LoggerContext loggerContext) {
this(loggerContext, ConfigurationSource.NULL_SOURCE);
}
/**
* Constructor to create the default configuration.
*/
CustomConfiguration(final LoggerContext loggerContext, final ConfigurationSource source) {
super(loggerContext, source);
setName("Custom");
appender.start();
addAppender(appender);
final LoggerConfig root = getRootLogger();
root.addAppender(appender, null, null);
root.setLevel(Level.ALL);
}
@Override
protected void doConfigure() {}
public List<LogEvent> getEvents() {
return appender.getEvents();
}
public void clearEvents() {
appender.getEvents().clear();
}
private static final | CustomConfiguration |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/h12/BiStreamServerCallListener.java | {
"start": 1130,
"end": 2308
} | class ____ extends AbstractServerCallListener {
private StreamObserver<Object> requestObserver;
public BiStreamServerCallListener(
RpcInvocation invocation, Invoker<?> invoker, FlowControlStreamObserver<Object> responseObserver) {
super(invocation, invoker, responseObserver);
invocation.setArguments(new Object[] {responseObserver});
invoke();
}
@Override
@SuppressWarnings("unchecked")
public void onReturn(Object value) {
requestObserver = (StreamObserver<Object>) value;
}
@Override
public void onMessage(Object message) {
if (message instanceof Object[]) {
message = ((Object[]) message)[0];
}
requestObserver.onNext(message);
if (((FlowControlStreamObserver<Object>) responseObserver).isAutoRequestN()) {
((FlowControlStreamObserver<Object>) responseObserver).request(1);
}
}
@Override
public void onCancel(long code) {
requestObserver.onError(new HttpStatusException((int) code));
}
@Override
public void onComplete() {
requestObserver.onCompleted();
}
}
| BiStreamServerCallListener |
java | apache__maven | compat/maven-compat/src/main/java/org/apache/maven/artifact/installer/ArtifactInstaller.java | {
"start": 1011,
"end": 2339
} | interface ____ {
String ROLE = ArtifactInstaller.class.getName();
/**
* Install an artifact from a particular directory. The artifact handler is used to determine
* the filename of the source file.
*
* @param basedir the directory where the artifact is stored
* @param finalName the name of the artifact sans extension
* @param artifact the artifact definition
* @param localRepository the local repository to install into
* @throws ArtifactInstallationException if an error occurred installing the artifact
* @deprecated to be removed before 2.0 after the install/deploy plugins use the alternate
* method
*/
@Deprecated
void install(String basedir, String finalName, Artifact artifact, ArtifactRepository localRepository)
throws ArtifactInstallationException;
/**
* Install an artifact from a particular file.
*
* @param source the file to install
* @param artifact the artifact definition
* @param localRepository the local repository to install into
* @throws ArtifactInstallationException if an error occurred installing the artifact
*/
void install(File source, Artifact artifact, ArtifactRepository localRepository)
throws ArtifactInstallationException;
}
| ArtifactInstaller |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/method/configuration/NamespaceGlobalMethodSecurityExpressionHandlerTests.java | {
"start": 2003,
"end": 3114
} | class ____ {
public final SpringTestContext spring = new SpringTestContext(this);
@Autowired(required = false)
private MethodSecurityService service;
@Test
@WithMockUser
public void methodSecurityWhenUsingCustomPermissionEvaluatorThenPreAuthorizesAccordingly() {
this.spring.register(CustomAccessDecisionManagerConfig.class, MethodSecurityServiceConfig.class).autowire();
assertThat(this.service.hasPermission("granted")).isNull();
assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(() -> this.service.hasPermission("denied"));
}
@Test
@WithMockUser
public void methodSecurityWhenUsingCustomPermissionEvaluatorThenPostAuthorizesAccordingly() {
this.spring.register(CustomAccessDecisionManagerConfig.class, MethodSecurityServiceConfig.class).autowire();
assertThat(this.service.postHasPermission("granted")).isNull();
assertThatExceptionOfType(AccessDeniedException.class)
.isThrownBy(() -> this.service.postHasPermission("denied"));
}
@Configuration
@EnableGlobalMethodSecurity(prePostEnabled = true)
public static | NamespaceGlobalMethodSecurityExpressionHandlerTests |
java | apache__camel | components/camel-google/camel-google-sheets/src/test/java/org/apache/camel/component/google/sheets/SheetsSpreadsheetsIT.java | {
"start": 2113,
"end": 2408
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(SheetsSpreadsheetsIT.class);
private static final String PATH_PREFIX
= GoogleSheetsApiCollection.getCollection().getApiName(SheetsSpreadsheetsApiMethod.class).getName();
@Nested
| SheetsSpreadsheetsIT |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/AutoValueImmutableFieldsTest.java | {
"start": 9107,
"end": 9511
} | class ____ {
@SuppressWarnings("mutable")
public abstract Collection<String> countries();
}
""")
.doTest();
}
@Test
public void matchesNonPublic() {
compilationHelper
.addSourceLines(
"in/Test.java",
"""
import com.google.auto.value.AutoValue;
@AutoValue
abstract | Test |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/context/MergedContextConfiguration.java | {
"start": 4406,
"end": 5089
} | class ____ which the configuration was merged
* @param locations the merged context resource locations
* @param classes the merged annotated classes
* @param activeProfiles the merged active bean definition profiles
* @param contextLoader the resolved {@code ContextLoader}
*/
public MergedContextConfiguration(Class<?> testClass, String @Nullable [] locations, Class<?> @Nullable [] classes,
String @Nullable [] activeProfiles, ContextLoader contextLoader) {
this(testClass, locations, classes, null, activeProfiles, contextLoader);
}
/**
* Create a new {@code MergedContextConfiguration} instance for the
* supplied parameters.
* @param testClass the test | for |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/source/abilities/SupportsDynamicFiltering.java | {
"start": 2318,
"end": 2749
} | interface ____ tells the source which fields can be
* applied for filtering and the source needs to pick the fields that can be supported and return
* them to planner. Then the planner will build the plan and construct the operator which will send
* the data to the source in runtime.
*
* <p>In the future, more flexible filtering can be pushed into the source connectors through this
* interface.
*/
@PublicEvolving
public | just |
java | netty__netty | codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketDecoderConfig.java | {
"start": 4222,
"end": 6355
} | class ____ {
private int maxFramePayloadLength;
private boolean expectMaskedFrames;
private boolean allowMaskMismatch;
private boolean allowExtensions;
private boolean closeOnProtocolViolation;
private boolean withUTF8Validator;
private Builder(WebSocketDecoderConfig decoderConfig) {
ObjectUtil.checkNotNull(decoderConfig, "decoderConfig");
maxFramePayloadLength = decoderConfig.maxFramePayloadLength();
expectMaskedFrames = decoderConfig.expectMaskedFrames();
allowMaskMismatch = decoderConfig.allowMaskMismatch();
allowExtensions = decoderConfig.allowExtensions();
closeOnProtocolViolation = decoderConfig.closeOnProtocolViolation();
withUTF8Validator = decoderConfig.withUTF8Validator();
}
public Builder maxFramePayloadLength(int maxFramePayloadLength) {
this.maxFramePayloadLength = maxFramePayloadLength;
return this;
}
public Builder expectMaskedFrames(boolean expectMaskedFrames) {
this.expectMaskedFrames = expectMaskedFrames;
return this;
}
public Builder allowMaskMismatch(boolean allowMaskMismatch) {
this.allowMaskMismatch = allowMaskMismatch;
return this;
}
public Builder allowExtensions(boolean allowExtensions) {
this.allowExtensions = allowExtensions;
return this;
}
public Builder closeOnProtocolViolation(boolean closeOnProtocolViolation) {
this.closeOnProtocolViolation = closeOnProtocolViolation;
return this;
}
public Builder withUTF8Validator(boolean withUTF8Validator) {
this.withUTF8Validator = withUTF8Validator;
return this;
}
public WebSocketDecoderConfig build() {
return new WebSocketDecoderConfig(
maxFramePayloadLength, expectMaskedFrames, allowMaskMismatch,
allowExtensions, closeOnProtocolViolation, withUTF8Validator);
}
}
}
| Builder |
java | apache__spark | sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/TableChange.java | {
"start": 25779,
"end": 26510
} | class ____ implements TableChange {
private final NamedReference[] clusteringColumns;
private ClusterBy(NamedReference[] clusteringColumns) {
this.clusteringColumns = clusteringColumns;
}
public NamedReference[] clusteringColumns() { return clusteringColumns; }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ClusterBy that = (ClusterBy) o;
return Arrays.equals(clusteringColumns, that.clusteringColumns());
}
@Override
public int hashCode() {
return Arrays.hashCode(clusteringColumns);
}
}
/** A TableChange to alter table and add a constraint. */
final | ClusterBy |
java | apache__camel | core/camel-management-api/src/main/java/org/apache/camel/api/management/mbean/ManagedAggregateProcessorMBean.java | {
"start": 973,
"end": 6621
} | interface ____ extends ManagedProcessorMBean {
@ManagedAttribute(description = "The language for the expression")
String getCorrelationExpressionLanguage();
@ManagedAttribute(description = "Correlation Expression")
String getCorrelationExpression();
@ManagedAttribute(description = "Completion timeout in millis")
long getCompletionTimeout();
@ManagedAttribute(description = "The language for the expression")
String getCompletionTimeoutLanguage();
@ManagedAttribute(description = "Completion timeout expression")
String getCompletionTimeoutExpression();
@ManagedAttribute(description = "Completion interval in millis")
long getCompletionInterval();
@ManagedAttribute(description = "Completion timeout checker interval in millis")
long getCompletionTimeoutCheckerInterval();
@ManagedAttribute(description = "Completion size")
int getCompletionSize();
@ManagedAttribute(description = "The language for the expression")
String getCompletionSizeExpressionLanguage();
@ManagedAttribute(description = "Completion size expression")
String getCompletionSizeExpression();
@ManagedAttribute(description = "Complete from batch consumers")
boolean isCompletionFromBatchConsumer();
@ManagedAttribute(description = "Complete all previous groups on new incoming correlation group")
boolean isCompletionOnNewCorrelationGroup();
@ManagedAttribute(description = "Ignore invalid correlation keys")
boolean isIgnoreInvalidCorrelationKeys();
@ManagedAttribute(description = "Whether to close the correlation group on completion if this value is > 0.")
Integer getCloseCorrelationKeyOnCompletion();
@ManagedAttribute(description = "Parallel mode")
boolean isParallelProcessing();
@ManagedAttribute(description = "Optimistic locking")
boolean isOptimisticLocking();
@ManagedAttribute(description = "Whether or not to eager check for completion when a new incoming Exchange has been received")
boolean isEagerCheckCompletion();
@ManagedAttribute(description = "The language for the predicate")
String getCompletionPredicateLanguage();
@ManagedAttribute(description = "A Predicate to indicate when an aggregated exchange is complete")
String getCompletionPredicate();
@ManagedAttribute(description = "Whether or not exchanges which complete due to a timeout should be discarded")
boolean isDiscardOnCompletionTimeout();
@ManagedAttribute(description = "Indicates to complete all current aggregated exchanges when the context is stopped")
boolean isForceCompletionOnStop();
@ManagedAttribute(description = "Indicates to wait to complete all current and partial (pending) aggregated exchanges when the context is stopped")
boolean isCompleteAllOnStop();
@ManagedAttribute(description = "Number of completed exchanges which are currently in-flight")
int getInProgressCompleteExchanges();
@ManagedOperation(description = "Number of groups currently in the aggregation repository")
int aggregationRepositoryGroups();
@ManagedOperation(description = "To force completing a specific group by its key")
int forceCompletionOfGroup(String key);
@ManagedOperation(description = "To force complete of all groups")
int forceCompletionOfAllGroups();
@ManagedOperation(description = "To force discarding a specific group by its key")
int forceDiscardingOfGroup(String key);
@ManagedOperation(description = "To force discarding of all groups")
int forceDiscardingOfAllGroups();
@ManagedAttribute(description = "Current number of closed correlation keys in the memory cache")
int getClosedCorrelationKeysCacheSize();
@ManagedOperation(description = "Clear all the closed correlation keys stored in the cache")
void clearClosedCorrelationKeysCache();
@ManagedAttribute(description = "Total number of exchanges arrived into the aggregator")
long getTotalIn();
@ManagedAttribute(description = "Total number of exchanges completed and outgoing from the aggregator")
long getTotalCompleted();
@ManagedAttribute(description = "Total number of exchanged completed by completion size trigger")
long getCompletedBySize();
@ManagedAttribute(description = "Total number of exchanged completed by completion aggregation strategy trigger")
long getCompletedByStrategy();
@ManagedAttribute(description = "Total number of exchanged completed by completion interval (timeout) trigger")
long getCompletedByInterval();
@ManagedAttribute(description = "Total number of exchanged completed by completion timeout trigger")
long getCompletedByTimeout();
@ManagedAttribute(description = "Total number of exchanged completed by completion predicate trigger")
long getCompletedByPredicate();
@ManagedAttribute(description = "Total number of exchanged completed by completion batch consumer trigger")
long getCompletedByBatchConsumer();
@ManagedAttribute(description = "Total number of exchanged completed by completion force trigger")
long getCompletedByForce();
@ManagedAttribute(description = "Total number of exchanged discarded")
long getDiscarded();
@ManagedOperation(description = " Reset the statistics counters")
void resetStatistics();
@Override
@ManagedAttribute(description = "Sets whether statistics is enabled")
boolean isStatisticsEnabled();
@Override
@ManagedAttribute(description = "Sets whether statistics is enabled")
void setStatisticsEnabled(boolean statisticsEnabled);
}
| ManagedAggregateProcessorMBean |
java | netty__netty | codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2PingFrame.java | {
"start": 788,
"end": 1888
} | class ____ implements Http2PingFrame {
private final long content;
private final boolean ack;
public DefaultHttp2PingFrame(long content) {
this(content, false);
}
public DefaultHttp2PingFrame(long content, boolean ack) {
this.content = content;
this.ack = ack;
}
@Override
public boolean ack() {
return ack;
}
@Override
public String name() {
return "PING";
}
@Override
public long content() {
return content;
}
@Override
public boolean equals(Object o) {
if (!(o instanceof Http2PingFrame)) {
return false;
}
Http2PingFrame other = (Http2PingFrame) o;
return ack == other.ack() && content == other.content();
}
@Override
public int hashCode() {
int hash = super.hashCode();
hash = hash * 31 + (ack ? 1 : 0);
return hash;
}
@Override
public String toString() {
return StringUtil.simpleClassName(this) + "(content=" + content + ", ack=" + ack + ')';
}
}
| DefaultHttp2PingFrame |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/context/request/RequestContextListener.java | {
"start": 1961,
"end": 3554
} | class ____ implements ServletRequestListener {
private static final String REQUEST_ATTRIBUTES_ATTRIBUTE =
RequestContextListener.class.getName() + ".REQUEST_ATTRIBUTES";
@Override
public void requestInitialized(ServletRequestEvent requestEvent) {
if (!(requestEvent.getServletRequest() instanceof HttpServletRequest request)) {
throw new IllegalArgumentException(
"Request is not an HttpServletRequest: " + requestEvent.getServletRequest());
}
ServletRequestAttributes attributes = new ServletRequestAttributes(request);
request.setAttribute(REQUEST_ATTRIBUTES_ATTRIBUTE, attributes);
LocaleContextHolder.setLocale(request.getLocale());
RequestContextHolder.setRequestAttributes(attributes);
}
@Override
public void requestDestroyed(ServletRequestEvent requestEvent) {
ServletRequestAttributes attributes = null;
Object reqAttr = requestEvent.getServletRequest().getAttribute(REQUEST_ATTRIBUTES_ATTRIBUTE);
if (reqAttr instanceof ServletRequestAttributes servletRequestAttributes) {
attributes = servletRequestAttributes;
}
RequestAttributes threadAttributes = RequestContextHolder.getRequestAttributes();
if (threadAttributes != null) {
// We're assumably within the original request thread...
LocaleContextHolder.resetLocaleContext();
RequestContextHolder.resetRequestAttributes();
if (attributes == null && threadAttributes instanceof ServletRequestAttributes servletRequestAttributes) {
attributes = servletRequestAttributes;
}
}
if (attributes != null) {
attributes.requestCompleted();
}
}
}
| RequestContextListener |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.