language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/state/internals/SessionKeySchemaTest.java | {
"start": 2415,
"end": 3945
} | class ____ {
private static final Map<SchemaType, KeySchema> SCHEMA_TYPE_MAP = mkMap(
mkEntry(SchemaType.SessionKeySchema, new SessionKeySchema()),
mkEntry(SchemaType.PrefixedKeyFirstSchema, new KeyFirstSessionKeySchema()),
mkEntry(SchemaType.PrefixedTimeFirstSchema, new TimeFirstSessionKeySchema())
);
private static final Map<SchemaType, Function<Windowed<Bytes>, Bytes>> WINDOW_TO_STORE_BINARY_MAP = mkMap(
mkEntry(SchemaType.SessionKeySchema, SessionKeySchema::toBinary),
mkEntry(SchemaType.PrefixedKeyFirstSchema, KeyFirstSessionKeySchema::toBinary),
mkEntry(SchemaType.PrefixedTimeFirstSchema, TimeFirstSessionKeySchema::toBinary)
);
private static final Map<SchemaType, Function<byte[], Long>> EXTRACT_END_TS_MAP = mkMap(
mkEntry(SchemaType.SessionKeySchema, SessionKeySchema::extractEndTimestamp),
mkEntry(SchemaType.PrefixedKeyFirstSchema, KeyFirstSessionKeySchema::extractEndTimestamp),
mkEntry(SchemaType.PrefixedTimeFirstSchema, TimeFirstSessionKeySchema::extractEndTimestamp)
);
private static final Map<SchemaType, Function<byte[], Long>> EXTRACT_START_TS_MAP = mkMap(
mkEntry(SchemaType.SessionKeySchema, SessionKeySchema::extractStartTimestamp),
mkEntry(SchemaType.PrefixedKeyFirstSchema, KeyFirstSessionKeySchema::extractStartTimestamp),
mkEntry(SchemaType.PrefixedTimeFirstSchema, TimeFirstSessionKeySchema::extractStartTimestamp)
);
@FunctionalInterface
| SessionKeySchemaTest |
java | apache__flink | flink-models/flink-model-openai/src/main/java/org/apache/flink/model/openai/OpenAIUtils.java | {
"start": 1282,
"end": 3032
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(OpenAIUtils.class);
private static final Object LOCK = new Object();
private static final Map<ReferenceKey, ReferenceValue> cache = new HashMap<>();
public static OpenAIClientAsync createAsyncClient(String baseUrl, String apiKey, int numRetry) {
synchronized (LOCK) {
ReferenceKey key = new ReferenceKey(baseUrl, apiKey);
ReferenceValue value = cache.get(key);
if (value != null) {
LOG.debug("Returning an existing OpenAI client.");
value.referenceCount.incrementAndGet();
return value.client;
}
LOG.debug("Building a new OpenAI client.");
OpenAIClientAsync client =
OpenAIOkHttpClientAsync.builder()
.apiKey(apiKey)
.baseUrl(baseUrl)
.maxRetries(numRetry)
.build();
cache.put(key, new ReferenceValue(client));
return client;
}
}
public static void releaseAsyncClient(String baseUrl, String apiKey) {
synchronized (LOCK) {
ReferenceKey key = new ReferenceKey(baseUrl, apiKey);
ReferenceValue value = cache.get(key);
Preconditions.checkNotNull(
value, "The creation and release of OpenAI client does not match.");
int count = value.referenceCount.decrementAndGet();
if (count == 0) {
LOG.debug("Closing the OpenAI client.");
cache.remove(key);
value.client.close();
}
}
}
private static | OpenAIUtils |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/jdbc/NullTest.java | {
"start": 929,
"end": 1156
} | class ____ {
@Test
void shouldGetTypeAndTypeHandlerForNullStringType() {
assertEquals(JdbcType.VARCHAR, Null.STRING.getJdbcType());
assertTrue(Null.STRING.getTypeHandler() instanceof StringTypeHandler);
}
}
| NullTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokens.java | {
"start": 4332,
"end": 5210
} | class ____ extends JerseyTestBase {
private static File testRootDir;
private static File httpSpnegoKeytabFile = new File(
KerberosTestUtils.getKeytabFile());
private static String httpSpnegoPrincipal = KerberosTestUtils
.getServerPrincipal();
private static MiniKdc testMiniKDC;
private static MockRM rm;
private boolean isKerberosAuth = false;
private ResourceConfig config;
private HttpServletRequest request = mock(HttpServletRequest.class);
@Override
protected Application configure() {
config = new ResourceConfig();
config.register(RMWebServices.class);
config.register(GenericExceptionHandler.class);
config.register(TestRMWebServicesAppsModification.TestRMCustomAuthFilter.class);
config.register(new JettisonFeature()).register(JAXBContextResolver.class);
return config;
}
private | TestRMWebServicesDelegationTokens |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/util/jartestprogram/UtilFunctionWrapper.java | {
"start": 1011,
"end": 1114
} | class ____ {
/** Static factory for a lambda filter function. */
public static | UtilFunctionWrapper |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java | {
"start": 1326,
"end": 9137
} | class ____ extends BaseNodesResponse<ClusterStatsNodeResponse> implements ToXContentFragment {
final ClusterStatsNodes nodesStats;
final ClusterStatsIndices indicesStats;
final ClusterHealthStatus status;
final ClusterSnapshotStats clusterSnapshotStats;
final RepositoryUsageStats repositoryUsageStats;
final CCSTelemetrySnapshot ccsMetrics;
final CCSTelemetrySnapshot esqlMetrics;
final long timestamp;
final String clusterUUID;
private final Map<String, RemoteClusterStats> remoteClustersStats;
public static final String CCS_TELEMETRY_FIELD_NAME = "_search";
public static final String ESQL_TELEMETRY_FIELD_NAME = "_esql";
public ClusterStatsResponse(
long timestamp,
String clusterUUID,
ClusterName clusterName,
List<ClusterStatsNodeResponse> nodes,
List<FailedNodeException> failures,
MappingStats mappingStats,
AnalysisStats analysisStats,
VersionStats versionStats,
ClusterSnapshotStats clusterSnapshotStats,
Map<String, RemoteClusterStats> remoteClustersStats,
boolean skipMRT
) {
super(clusterName, nodes, failures);
this.clusterUUID = clusterUUID;
this.timestamp = timestamp;
nodesStats = new ClusterStatsNodes(nodes);
indicesStats = new ClusterStatsIndices(nodes, mappingStats, analysisStats, versionStats);
ccsMetrics = new CCSTelemetrySnapshot(skipMRT == false);
esqlMetrics = new CCSTelemetrySnapshot(false);
ClusterHealthStatus status = null;
for (ClusterStatsNodeResponse response : nodes) {
// only the master node populates the status
if (response.clusterStatus() != null) {
status = response.clusterStatus();
break;
}
}
nodes.forEach(node -> {
ccsMetrics.add(node.getSearchCcsMetrics());
esqlMetrics.add(node.getEsqlCcsMetrics());
});
this.status = status;
this.clusterSnapshotStats = clusterSnapshotStats;
this.repositoryUsageStats = nodes.stream()
.map(ClusterStatsNodeResponse::repositoryUsageStats)
// only populated on snapshot nodes (i.e. master and data nodes)
.filter(r -> r.isEmpty() == false)
// stats should be the same on every node so just pick one of them
.findAny()
.orElse(RepositoryUsageStats.EMPTY);
this.remoteClustersStats = remoteClustersStats;
}
public String getClusterUUID() {
return this.clusterUUID;
}
public long getTimestamp() {
return this.timestamp;
}
public ClusterHealthStatus getStatus() {
return this.status;
}
public ClusterStatsNodes getNodesStats() {
return nodesStats;
}
public ClusterStatsIndices getIndicesStats() {
return indicesStats;
}
public CCSTelemetrySnapshot getCcsMetrics() {
return ccsMetrics;
}
public Map<String, RemoteClusterStats> getRemoteClustersStats() {
return remoteClustersStats;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
TransportAction.localOnly();
}
@Override
protected List<ClusterStatsNodeResponse> readNodesFrom(StreamInput in) throws IOException {
return TransportAction.localOnly();
}
@Override
protected void writeNodesTo(StreamOutput out, List<ClusterStatsNodeResponse> nodes) throws IOException {
TransportAction.localOnly();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("cluster_uuid", getClusterUUID());
builder.field("timestamp", getTimestamp());
if (status != null) {
builder.field("status", status.name().toLowerCase(Locale.ROOT));
}
builder.startObject("indices");
indicesStats.toXContent(builder, params);
builder.endObject();
builder.startObject("nodes");
nodesStats.toXContent(builder, params);
builder.endObject();
builder.field("snapshots");
clusterSnapshotStats.toXContent(builder, params);
builder.field("repositories");
repositoryUsageStats.toXContent(builder, params);
builder.startObject("ccs");
if (remoteClustersStats != null) {
builder.field("clusters", remoteClustersStats);
}
builder.startObject(CCS_TELEMETRY_FIELD_NAME);
ccsMetrics.toXContent(builder, params);
builder.endObject();
if (esqlMetrics.getTotalCount() > 0) {
builder.startObject(ESQL_TELEMETRY_FIELD_NAME);
esqlMetrics.toXContent(builder, params);
builder.endObject();
}
builder.endObject();
return builder;
}
@Override
public String toString() {
return Strings.toString(this, true, true);
}
/**
* Represents the information about a remote cluster.
*/
public record RemoteClusterStats(
String clusterUUID,
String mode,
Optional<Boolean> skipUnavailable,
String transportCompress,
Set<String> versions,
String status,
long nodesCount,
long shardsCount,
long indicesCount,
long indicesBytes,
long heapBytes,
long memBytes
) implements ToXContentFragment {
public RemoteClusterStats(String mode, Optional<Boolean> skipUnavailable, String transportCompress) {
this(
"unavailable",
mode,
skipUnavailable,
transportCompress.toLowerCase(Locale.ROOT),
Set.of(),
"unavailable",
0,
0,
0,
0,
0,
0
);
}
public RemoteClusterStats acceptResponse(RemoteClusterStatsResponse remoteResponse) {
return new RemoteClusterStats(
remoteResponse.getClusterUUID(),
mode,
skipUnavailable,
transportCompress,
remoteResponse.getVersions(),
remoteResponse.getStatus().name().toLowerCase(Locale.ROOT),
remoteResponse.getNodesCount(),
remoteResponse.getShardsCount(),
remoteResponse.getIndicesCount(),
remoteResponse.getIndicesBytes(),
remoteResponse.getHeapBytes(),
remoteResponse.getMemBytes()
);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("cluster_uuid", clusterUUID);
builder.field("mode", mode);
if (skipUnavailable.isPresent()) {
builder.field("skip_unavailable", skipUnavailable.get());
}
builder.field("transport.compress", transportCompress);
builder.field("status", status);
builder.field("version", versions);
builder.field("nodes_count", nodesCount);
builder.field("shards_count", shardsCount);
builder.field("indices_count", indicesCount);
builder.humanReadableField("indices_total_size_in_bytes", "indices_total_size", ByteSizeValue.ofBytes(indicesBytes));
builder.humanReadableField("max_heap_in_bytes", "max_heap", ByteSizeValue.ofBytes(heapBytes));
builder.humanReadableField("mem_total_in_bytes", "mem_total", ByteSizeValue.ofBytes(memBytes));
builder.endObject();
return builder;
}
}
}
| ClusterStatsResponse |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/ConfigurationPropertiesTests.java | {
"start": 64637,
"end": 65048
} | class ____ {
@Bean
static PropertySourcesPlaceholderConfigurer configurer1() {
return new PropertySourcesPlaceholderConfigurer();
}
@Bean
static PropertySourcesPlaceholderConfigurer configurer2() {
return new PropertySourcesPlaceholderConfigurer();
}
}
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties
static | MultiplePropertySourcesPlaceholderConfigurerConfiguration |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/i18n/SessionLocaleResolver.java | {
"start": 2762,
"end": 8054
} | class ____ extends AbstractLocaleContextResolver {
/**
* Default name of the session attribute that holds the Locale.
* <p>Only used internally by this implementation.
* <p>Use {@code RequestContext(Utils).getLocale()}
* to retrieve the current locale in controllers or views.
* @see org.springframework.web.servlet.support.RequestContext#getLocale
* @see org.springframework.web.servlet.support.RequestContextUtils#getLocale
*/
public static final String LOCALE_SESSION_ATTRIBUTE_NAME = SessionLocaleResolver.class.getName() + ".LOCALE";
/**
* Default name of the session attribute that holds the TimeZone.
* <p>Only used internally by this implementation.
* <p>Use {@code RequestContext(Utils).getTimeZone()}
* to retrieve the current time zone in controllers or views.
* @see org.springframework.web.servlet.support.RequestContext#getTimeZone
* @see org.springframework.web.servlet.support.RequestContextUtils#getTimeZone
*/
public static final String TIME_ZONE_SESSION_ATTRIBUTE_NAME = SessionLocaleResolver.class.getName() + ".TIME_ZONE";
private String localeAttributeName = LOCALE_SESSION_ATTRIBUTE_NAME;
private String timeZoneAttributeName = TIME_ZONE_SESSION_ATTRIBUTE_NAME;
private Function<HttpServletRequest, Locale> defaultLocaleFunction = request -> {
Locale defaultLocale = getDefaultLocale();
return (defaultLocale != null ? defaultLocale : request.getLocale());
};
private Function<HttpServletRequest, @Nullable TimeZone> defaultTimeZoneFunction = request -> getDefaultTimeZone();
/**
* Specify the name of the corresponding attribute in the {@code HttpSession},
* holding the current {@link Locale} value.
* <p>The default is an internal {@link #LOCALE_SESSION_ATTRIBUTE_NAME}.
* @since 4.3.8
*/
public void setLocaleAttributeName(String localeAttributeName) {
this.localeAttributeName = localeAttributeName;
}
/**
* Specify the name of the corresponding attribute in the {@code HttpSession},
* holding the current {@link TimeZone} value.
* <p>The default is an internal {@link #TIME_ZONE_SESSION_ATTRIBUTE_NAME}.
* @since 4.3.8
*/
public void setTimeZoneAttributeName(String timeZoneAttributeName) {
this.timeZoneAttributeName = timeZoneAttributeName;
}
/**
* Set the function used to determine the default locale for the given request,
* called if no {@link Locale} session attribute has been found.
* <p>The default implementation returns the configured
* {@linkplain #setDefaultLocale(Locale) default locale}, if any, and otherwise
* falls back to the request's {@code Accept-Language} header locale or the
* default locale for the server.
* @param defaultLocaleFunction the function used to determine the default locale
* @since 6.0
* @see #setDefaultLocale
* @see jakarta.servlet.http.HttpServletRequest#getLocale()
*/
public void setDefaultLocaleFunction(Function<HttpServletRequest, Locale> defaultLocaleFunction) {
Assert.notNull(defaultLocaleFunction, "defaultLocaleFunction must not be null");
this.defaultLocaleFunction = defaultLocaleFunction;
}
/**
* Set the function used to determine the default time zone for the given request,
* called if no {@link TimeZone} session attribute has been found.
* <p>The default implementation returns the configured default time zone,
* if any, or {@code null} otherwise.
* @param defaultTimeZoneFunction the function used to determine the default time zone
* @since 6.0
* @see #setDefaultTimeZone
*/
public void setDefaultTimeZoneFunction(Function<HttpServletRequest, @Nullable TimeZone> defaultTimeZoneFunction) {
Assert.notNull(defaultTimeZoneFunction, "defaultTimeZoneFunction must not be null");
this.defaultTimeZoneFunction = defaultTimeZoneFunction;
}
@Override
public Locale resolveLocale(HttpServletRequest request) {
Locale locale = (Locale) WebUtils.getSessionAttribute(request, this.localeAttributeName);
if (locale == null) {
locale = this.defaultLocaleFunction.apply(request);
}
return locale;
}
@Override
public LocaleContext resolveLocaleContext(final HttpServletRequest request) {
return new TimeZoneAwareLocaleContext() {
@Override
public Locale getLocale() {
Locale locale = (Locale) WebUtils.getSessionAttribute(request, localeAttributeName);
if (locale == null) {
locale = defaultLocaleFunction.apply(request);
}
return locale;
}
@Override
public @Nullable TimeZone getTimeZone() {
TimeZone timeZone = (TimeZone) WebUtils.getSessionAttribute(request, timeZoneAttributeName);
if (timeZone == null) {
timeZone = defaultTimeZoneFunction.apply(request);
}
return timeZone;
}
};
}
@Override
public void setLocaleContext(HttpServletRequest request, @Nullable HttpServletResponse response,
@Nullable LocaleContext localeContext) {
Locale locale = null;
TimeZone timeZone = null;
if (localeContext != null) {
locale = localeContext.getLocale();
if (localeContext instanceof TimeZoneAwareLocaleContext timeZoneAwareLocaleContext) {
timeZone = timeZoneAwareLocaleContext.getTimeZone();
}
}
WebUtils.setSessionAttribute(request, this.localeAttributeName, locale);
WebUtils.setSessionAttribute(request, this.timeZoneAttributeName, timeZone);
}
}
| SessionLocaleResolver |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Issue1005.java | {
"start": 179,
"end": 1083
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
Model model = JSON.parseObject("{\"values\":[[1,2,3]]}", Model.class);
assertNotNull(model.values);
assertEquals(3, model.values[0].size());
assertEquals(Byte.class, model.values[0].get(0).getClass());
assertEquals(Byte.class, model.values[0].get(1).getClass());
assertEquals(Byte.class, model.values[0].get(2).getClass());
}
public void test_for_List() throws Exception {
Model2 model = JSON.parseObject("{\"values\":[1,2,3]}", Model2.class);
assertNotNull(model.values);
assertEquals(3, model.values.size());
assertEquals(Byte.class, model.values.get(0).getClass());
assertEquals(Byte.class, model.values.get(1).getClass());
assertEquals(Byte.class, model.values.get(2).getClass());
}
public static | Issue1005 |
java | spring-projects__spring-boot | module/spring-boot-pulsar/src/main/java/org/springframework/boot/pulsar/autoconfigure/PulsarProperties.java | {
"start": 17622,
"end": 19316
} | class ____ {
/**
* Maximum number of times that a message will be redelivered before being
* sent to the dead letter queue.
*/
private int maxRedeliverCount;
/**
* Name of the retry topic where the failing messages will be sent.
*/
private @Nullable String retryLetterTopic;
/**
* Name of the dead topic where the failing messages will be sent.
*/
private @Nullable String deadLetterTopic;
/**
* Name of the initial subscription of the dead letter topic. When not set,
* the initial subscription will not be created. However, when the property is
* set then the broker's 'allowAutoSubscriptionCreation' must be enabled or
* the DLQ producer will fail.
*/
private @Nullable String initialSubscriptionName;
public int getMaxRedeliverCount() {
return this.maxRedeliverCount;
}
public void setMaxRedeliverCount(int maxRedeliverCount) {
this.maxRedeliverCount = maxRedeliverCount;
}
public @Nullable String getRetryLetterTopic() {
return this.retryLetterTopic;
}
public void setRetryLetterTopic(@Nullable String retryLetterTopic) {
this.retryLetterTopic = retryLetterTopic;
}
public @Nullable String getDeadLetterTopic() {
return this.deadLetterTopic;
}
public void setDeadLetterTopic(@Nullable String deadLetterTopic) {
this.deadLetterTopic = deadLetterTopic;
}
public @Nullable String getInitialSubscriptionName() {
return this.initialSubscriptionName;
}
public void setInitialSubscriptionName(@Nullable String initialSubscriptionName) {
this.initialSubscriptionName = initialSubscriptionName;
}
}
}
public static | DeadLetterPolicy |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java | {
"start": 132634,
"end": 136591
} | class ____ extends FilterFileChannel {
private final FailSwitch fail;
private final boolean partialWrite;
private final boolean throwUnknownException;
public ThrowingFileChannel(FailSwitch fail, boolean partialWrite, boolean throwUnknownException, FileChannel delegate)
throws MockDirectoryWrapper.FakeIOException {
super(delegate);
this.fail = fail;
this.partialWrite = partialWrite;
this.throwUnknownException = throwUnknownException;
if (fail.fail()) {
throw new MockDirectoryWrapper.FakeIOException();
}
}
@Override
public int read(ByteBuffer dst) throws IOException {
if (fail.fail()) {
throw new MockDirectoryWrapper.FakeIOException();
}
return super.read(dst);
}
@Override
public long read(ByteBuffer[] dsts, int offset, int length) throws IOException {
if (fail.fail()) {
throw new MockDirectoryWrapper.FakeIOException();
}
return super.read(dsts, offset, length);
}
@Override
public long write(ByteBuffer[] srcs, int offset, int length) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public int write(ByteBuffer src, long position) throws IOException {
if (fail.fail()) {
if (partialWrite) {
if (src.hasRemaining()) {
final int pos = src.position();
final int limit = src.limit();
src.limit(randomIntBetween(pos, limit));
super.write(src, position);
src.limit(limit);
src.position(pos);
throw new IOException("__FAKE__ no space left on device");
}
}
if (throwUnknownException) {
throw new UnknownException();
} else {
throw new MockDirectoryWrapper.FakeIOException();
}
}
return super.write(src, position);
}
@Override
public int write(ByteBuffer src) throws IOException {
if (fail.fail()) {
if (partialWrite) {
if (src.hasRemaining()) {
final int pos = src.position();
final int limit = src.limit();
src.limit(randomIntBetween(pos, limit));
super.write(src);
src.limit(limit);
src.position(pos);
throw new IOException("__FAKE__ no space left on device");
}
}
if (throwUnknownException) {
throw new UnknownException();
} else {
throw new MockDirectoryWrapper.FakeIOException();
}
}
return super.write(src);
}
@Override
public void force(boolean metadata) throws IOException {
if (fail.fail()) {
if (throwUnknownException) {
throw new UnknownException();
} else {
throw new MockDirectoryWrapper.FakeIOException();
}
}
super.force(metadata);
}
@Override
public long position() throws IOException {
if (fail.fail()) {
if (throwUnknownException) {
throw new UnknownException();
} else {
throw new MockDirectoryWrapper.FakeIOException();
}
}
return super.position();
}
}
private static final | ThrowingFileChannel |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/Order.java | {
"start": 5695,
"end": 8374
} | class ____ sorted by the
* attribute with the given name, in the given direction. If the
* named attribute is of textual type, with the specified
* precedence for null values. If the named attribute is of
* textual type, the ordering is case-sensitive.
*/
static <T> Order<T> by(Class<T> entityClass, String attributeName, SortDirection direction, Nulls nullPrecedence) {
return new NamedAttributeOrder<>( direction, nullPrecedence, entityClass, attributeName );
}
/**
* An order where the result set is sorted by the select item
* in the given position with smaller values first. If the
* item is of textual type, the ordering is case-sensitive.
*/
static Order<Object[]> asc(int element) {
return new ElementOrder<>( ASCENDING, Nulls.NONE, element );
}
/**
* An order where the result set is sorted by the select item
* in the given position with larger values first. If the
* item is of textual type, the ordering is case-sensitive.
*/
static Order<Object[]> desc(int element) {
return new ElementOrder<>( DESCENDING, Nulls.NONE, element );
}
/**
* An order where the result set is sorted by the select item
* in the given position, in the given direction. If the item
* is of textual type, the ordering is case-sensitive.
*/
static Order<Object[]> by(int element, SortDirection direction) {
return new ElementOrder<>( direction, Nulls.NONE, element );
}
/**
* An order where the result set is sorted by the select item
* in the given position in the given direction, with the specified
* case-sensitivity.
*/
static Order<Object[]> by(int element, SortDirection direction, boolean ignoreCase) {
return new ElementOrder<>( direction, Nulls.NONE, element, !ignoreCase );
}
/**
* An order where the result set is sorted by the select item
* in the given position in the given direction, with the specified
* precedence for null values. If the named attribute is of
* textual type, the ordering is case-sensitive.
*/
static Order<Object[]> by(int element, SortDirection direction, Nulls nullPrecedence) {
return new ElementOrder<>( direction, nullPrecedence, element );
}
/**
* The direction, {@linkplain SortDirection#ASCENDING ascending} or
* {@linkplain SortDirection#DESCENDING descending}, in which results
* are sorted.
*
* @since 7
*/
SortDirection direction();
/**
* The {@linkplain Nulls ordering of null values}.
*
* @since 7
*/
Nulls nullPrecedence();
/**
* For a lexicographic order based on textual values, whether case
* is significant.
*
* @since 7
*/
boolean caseSensitive();
/**
* For an order based on an entity attribute, the entity | is |
java | apache__flink | flink-formats/flink-parquet/src/test/java/org/apache/flink/formats/parquet/row/ParquetRowDataWriterTest.java | {
"start": 3520,
"end": 21711
} | class ____ {
private static final RowType ROW_TYPE =
RowType.of(
new VarCharType(VarCharType.MAX_LENGTH),
new VarBinaryType(VarBinaryType.MAX_LENGTH),
new BooleanType(),
new TinyIntType(),
new SmallIntType(),
new IntType(),
new BigIntType(),
new FloatType(),
new DoubleType(),
new TimestampType(9),
new DecimalType(5, 0),
new DecimalType(15, 0),
new DecimalType(20, 0));
private static final RowType ROW_TYPE_COMPLEX =
RowType.of(
new ArrayType(true, new IntType()),
new MapType(
new VarCharType(VarCharType.MAX_LENGTH),
new VarCharType(VarCharType.MAX_LENGTH)),
RowType.of(new VarCharType(VarCharType.MAX_LENGTH), new IntType()));
private static final RowType MAP_ROW_TYPE =
RowType.of(
new MapType(
new VarCharType(true, VarCharType.MAX_LENGTH),
new VarCharType(VarCharType.MAX_LENGTH)));
private static final RowType NESTED_ARRAY_MAP_TYPE =
RowType.of(
new IntType(),
new ArrayType(true, new ArrayType(true, new IntType())),
new ArrayType(
true,
new MapType(
true,
new VarCharType(false, VarCharType.MAX_LENGTH),
new VarCharType(VarCharType.MAX_LENGTH))));
private static final RowType NESTED_ARRAY_ROW_TYPE =
RowType.of(new IntType(), new ArrayType(true, RowType.of(new IntType())));
@SuppressWarnings("unchecked")
private static final DataFormatConverters.DataFormatConverter<RowData, Row> CONVERTER_COMPLEX =
DataFormatConverters.getConverterForDataType(
TypeConversions.fromLogicalToDataType(ROW_TYPE_COMPLEX));
@SuppressWarnings("unchecked")
private static final DataFormatConverters.DataFormatConverter<RowData, Row> CONVERTER =
DataFormatConverters.getConverterForDataType(
TypeConversions.fromLogicalToDataType(ROW_TYPE));
@SuppressWarnings("unchecked")
private static final DataFormatConverters.DataFormatConverter<RowData, Row> MAP_CONVERTER =
DataFormatConverters.getConverterForDataType(
TypeConversions.fromLogicalToDataType(MAP_ROW_TYPE));
@SuppressWarnings("unchecked")
private static final DataFormatConverters.DataFormatConverter<RowData, Row>
NESTED_ARRAY_MAP_CONVERTER =
DataFormatConverters.getConverterForDataType(
TypeConversions.fromLogicalToDataType(NESTED_ARRAY_MAP_TYPE));
@SuppressWarnings("unchecked")
private static final DataFormatConverters.DataFormatConverter<RowData, Row>
NESTED_ARRAY_ROW_CONVERTER =
DataFormatConverters.getConverterForDataType(
TypeConversions.fromLogicalToDataType(NESTED_ARRAY_ROW_TYPE));
@Test
void testTypes(@TempDir java.nio.file.Path folder) throws Exception {
Configuration conf = new Configuration();
innerTest(folder, conf, true);
innerTest(folder, conf, false);
complexTypeTest(folder, conf, true);
complexTypeTest(folder, conf, false);
nestedArrayAndMapTest(folder, conf, true);
nestedArrayAndMapTest(folder, conf, false);
nestedArrayAndRowTest(folder, conf, true);
nestedArrayAndRowTest(folder, conf, false);
invalidTypeTest(folder, conf, true);
invalidTypeTest(folder, conf, false);
}
@Test
void testCompression(@TempDir java.nio.file.Path folder) throws Exception {
Configuration conf = new Configuration();
conf.set(ParquetOutputFormat.COMPRESSION, "GZIP");
innerTest(folder, conf, true);
innerTest(folder, conf, false);
complexTypeTest(folder, conf, true);
complexTypeTest(folder, conf, false);
nestedArrayAndMapTest(folder, conf, true);
nestedArrayAndMapTest(folder, conf, false);
nestedArrayAndRowTest(folder, conf, true);
nestedArrayAndRowTest(folder, conf, false);
invalidTypeTest(folder, conf, true);
invalidTypeTest(folder, conf, false);
}
@Test
public void testInt64Timestamp(@TempDir java.nio.file.Path folder) throws Exception {
Configuration conf = new Configuration();
conf.set(IDENTIFIER + "." + WRITE_INT64_TIMESTAMP.key(), "true");
conf.set(IDENTIFIER + "." + TIMESTAMP_TIME_UNIT.key(), "nanos");
innerTest(folder, conf, true);
innerTest(folder, conf, false);
complexTypeTest(folder, conf, true);
complexTypeTest(folder, conf, false);
invalidTypeTest(folder, conf, true);
invalidTypeTest(folder, conf, false);
}
private void innerTest(java.nio.file.Path folder, Configuration conf, boolean utcTimestamp)
throws IOException {
Path path = new Path(folder.toString(), UUID.randomUUID().toString());
int number = 1000;
List<Row> rows = new ArrayList<>(number);
for (int i = 0; i < number; i++) {
Integer v = i;
rows.add(
Row.of(
String.valueOf(v),
String.valueOf(v).getBytes(StandardCharsets.UTF_8),
v % 2 == 0,
v.byteValue(),
v.shortValue(),
v,
v.longValue(),
v.floatValue(),
v.doubleValue(),
toDateTime(v),
BigDecimal.valueOf(v),
BigDecimal.valueOf(v),
BigDecimal.valueOf(v)));
}
ParquetWriterFactory<RowData> factory =
ParquetRowDataBuilder.createWriterFactory(ROW_TYPE, conf, utcTimestamp);
BulkWriter<RowData> writer =
factory.create(path.getFileSystem().create(path, FileSystem.WriteMode.OVERWRITE));
for (int i = 0; i < number; i++) {
writer.addElement(CONVERTER.toInternal(rows.get(i)));
}
writer.flush();
writer.finish();
// verify
ParquetColumnarRowSplitReader reader =
ParquetSplitReaderUtil.genPartColumnarRowReader(
utcTimestamp,
true,
conf,
ROW_TYPE.getFieldNames().toArray(new String[0]),
ROW_TYPE.getChildren().stream()
.map(TypeConversions::fromLogicalToDataType)
.toArray(DataType[]::new),
new HashMap<>(),
IntStream.range(0, ROW_TYPE.getFieldCount()).toArray(),
50,
path,
0,
Long.MAX_VALUE);
int cnt = 0;
while (!reader.reachedEnd()) {
Row row = CONVERTER.toExternal(reader.nextRecord());
assertThat(row).isEqualTo(rows.get(cnt));
cnt++;
}
assertThat(cnt).isEqualTo(number);
}
public void complexTypeTest(java.nio.file.Path folder, Configuration conf, boolean utcTimestamp)
throws Exception {
Path path = new Path(folder.toString(), UUID.randomUUID().toString());
int number = 1000;
List<Row> rows = new ArrayList<>(number);
Map<String, String> mapData = new HashMap<>();
mapData.put("k1", "v1");
mapData.put("k2", null);
for (int i = 0; i < number; i++) {
Integer v = i;
rows.add(Row.of(new Integer[] {v}, mapData, Row.of(String.valueOf(v), v)));
}
ParquetWriterFactory<RowData> factory =
ParquetRowDataBuilder.createWriterFactory(ROW_TYPE_COMPLEX, conf, utcTimestamp);
BulkWriter<RowData> writer =
factory.create(path.getFileSystem().create(path, FileSystem.WriteMode.OVERWRITE));
for (int i = 0; i < number; i++) {
writer.addElement(CONVERTER_COMPLEX.toInternal(rows.get(i)));
}
writer.flush();
writer.finish();
File file = new File(path.getPath());
final List<Row> fileContent = readParquetFile(file);
assertThat(fileContent).isEqualTo(rows);
}
public void invalidTypeTest(java.nio.file.Path folder, Configuration conf, boolean utcTimestamp)
throws IOException {
Path path = new Path(folder.toString(), UUID.randomUUID().toString());
ParquetWriterFactory<RowData> factory =
ParquetRowDataBuilder.createWriterFactory(MAP_ROW_TYPE, conf, utcTimestamp);
final BulkWriter<RowData> rowDataBulkWriter =
factory.create(path.getFileSystem().create(path, FileSystem.WriteMode.OVERWRITE));
Map<String, String> mapData = new HashMap<>();
mapData.put(null, "v1");
final Row row = Row.of(mapData);
assertThatThrownBy(
() -> rowDataBulkWriter.addElement(MAP_CONVERTER.toInternal(row)),
"Parquet does not support null keys in a map. See https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#maps for more details.")
.isInstanceOf(RuntimeException.class);
}
public void nestedArrayAndMapTest(
java.nio.file.Path folder, Configuration conf, boolean utcTimestamp) throws Exception {
Path path = new Path(folder.toString(), UUID.randomUUID().toString());
int number = 1000;
List<Row> rows = new ArrayList<>(number);
for (int i = 0; i < number; i++) {
Integer v = i;
Map<String, String> mp1 = new HashMap<>();
Map<String, String> mp2 = new HashMap<>();
mp2.put("key_" + i, null);
mp2.put("key@" + i, "val@" + i);
rows.add(
Row.of(
v,
new Integer[][] {{i, i + 1, null}, {i, i + 2, null}, null},
new Map[] {null, mp1, mp2}));
}
ParquetWriterFactory<RowData> factory =
ParquetRowDataBuilder.createWriterFactory(
NESTED_ARRAY_MAP_TYPE, conf, utcTimestamp);
BulkWriter<RowData> writer =
factory.create(path.getFileSystem().create(path, FileSystem.WriteMode.OVERWRITE));
for (int i = 0; i < number; i++) {
writer.addElement(NESTED_ARRAY_MAP_CONVERTER.toInternal(rows.get(i)));
}
writer.flush();
writer.finish();
File file = new File(path.getPath());
final List<Row> fileContent = readNestedArrayAndMap(file);
assertThat(fileContent).isEqualTo(rows);
}
public void nestedArrayAndRowTest(
java.nio.file.Path folder, Configuration conf, boolean utcTimestamp) throws Exception {
Path path = new Path(folder.toString(), UUID.randomUUID().toString());
int number = 1000;
List<Row> rows = new ArrayList<>(number);
for (int i = 0; i < number; i++) {
Integer v = i;
Integer v1 = i + number + 1;
rows.add(Row.of(v, new Row[] {Row.of(v1)}));
}
ParquetWriterFactory<RowData> factory =
ParquetRowDataBuilder.createWriterFactory(
NESTED_ARRAY_ROW_TYPE, conf, utcTimestamp);
BulkWriter<RowData> writer =
factory.create(path.getFileSystem().create(path, FileSystem.WriteMode.OVERWRITE));
for (int i = 0; i < number; i++) {
writer.addElement(NESTED_ARRAY_ROW_CONVERTER.toInternal(rows.get(i)));
}
writer.flush();
writer.finish();
File file = new File(path.getPath());
final List<Row> fileContent = readNestedArrayAndRowParquetFile(file);
assertThat(fileContent).isEqualTo(rows);
}
private static List<Row> readParquetFile(File file) throws IOException {
InputFile inFile =
HadoopInputFile.fromPath(
new org.apache.hadoop.fs.Path(file.toURI()), new Configuration());
ArrayList<Row> results = new ArrayList<>();
try (ParquetReader<GenericRecord> reader =
AvroParquetReader.<GenericRecord>builder(inFile).build()) {
GenericRecord next;
while ((next = reader.read()) != null) {
Integer c0 = (Integer) ((ArrayList<GenericData.Record>) next.get(0)).get(0).get(0);
HashMap<Utf8, Utf8> map = ((HashMap<Utf8, Utf8>) next.get(1));
String c21 = ((GenericData.Record) next.get(2)).get(0).toString();
Integer c22 = (Integer) ((GenericData.Record) next.get(2)).get(1);
Map<String, String> c1 = new HashMap<>();
for (Utf8 key : map.keySet()) {
String k = key == null ? null : key.toString();
String v = map.get(key) == null ? null : map.get(key).toString();
c1.put(k, v);
}
Row row = Row.of(new Integer[] {c0}, c1, Row.of(c21, c22));
results.add(row);
}
}
return results;
}
// TODO: If parquet vectorized reader support nested array or map, remove this function
private static List<Row> readNestedArrayAndMap(File file) throws IOException {
InputFile inFile =
HadoopInputFile.fromPath(
new org.apache.hadoop.fs.Path(file.toURI()), new Configuration());
ArrayList<Row> results = new ArrayList<>();
try (ParquetReader<GenericRecord> reader =
AvroParquetReader.<GenericRecord>builder(inFile).build()) {
GenericRecord next;
while ((next = reader.read()) != null) {
Integer c0 = (Integer) next.get(0);
// read array<array<int>>
List<Integer[]> nestedArray = new ArrayList<>();
ArrayList<GenericData.Record> recordList =
(ArrayList<GenericData.Record>) next.get(1);
recordList.forEach(
record -> {
ArrayList<GenericData.Record> origVals =
(ArrayList<GenericData.Record>) record.get(0);
List<Integer> intArrays = (origVals == null) ? null : new ArrayList<>();
if (origVals != null) {
origVals.forEach(
r -> {
intArrays.add((Integer) r.get(0));
});
}
nestedArray.add(
origVals == null ? null : intArrays.toArray(new Integer[0]));
});
// read array<map<String, String>>
List<Map<String, String>> nestedMap = new ArrayList<>();
recordList = (ArrayList<GenericData.Record>) next.get(2);
recordList.forEach(
record -> {
Map<Utf8, Utf8> origMp = (Map<Utf8, Utf8>) record.get(0);
Map<String, String> mp = (origMp == null) ? null : new HashMap<>();
if (origMp != null) {
for (Utf8 key : origMp.keySet()) {
String k = key == null ? null : key.toString();
String v =
origMp.get(key) == null
? null
: origMp.get(key).toString();
mp.put(k, v);
}
}
nestedMap.add(mp);
});
Row row =
Row.of(
c0,
nestedArray.toArray(new Integer[0][0]),
nestedMap.toArray(new Map[0]));
results.add(row);
}
}
return results;
}
private static List<Row> readNestedArrayAndRowParquetFile(File file) throws IOException {
InputFile inFile =
HadoopInputFile.fromPath(
new org.apache.hadoop.fs.Path(file.toURI()), new Configuration());
ArrayList<Row> results = new ArrayList<>();
try (ParquetReader<GenericRecord> reader =
AvroParquetReader.<GenericRecord>builder(inFile).build()) {
GenericRecord next;
while ((next = reader.read()) != null) {
Integer c0 = (Integer) next.get(0);
List<Row> nestedArray = new ArrayList<>();
ArrayList<GenericData.Record> recordList =
(ArrayList<GenericData.Record>) next.get(1);
for (GenericData.Record record : recordList) {
nestedArray.add(Row.of(((GenericData.Record) record.get(0)).get(0)));
}
Row row = Row.of(c0, nestedArray.toArray(new Row[0]));
results.add(row);
}
}
return results;
}
private LocalDateTime toDateTime(Integer v) {
v = (v > 0 ? v : -v) % 1000;
return LocalDateTime.now().plusNanos(v).plusSeconds(v);
}
}
| ParquetRowDataWriterTest |
java | elastic__elasticsearch | x-pack/plugin/mapper-exponential-histogram/src/main/java/org/elasticsearch/xpack/exponentialhistogram/ExponentialHistogramFieldMapper.java | {
"start": 10452,
"end": 33935
} | class ____ extends MappedFieldType {
private final TimeSeriesParams.MetricType metricType;
// Visible for testing
public ExponentialHistogramFieldType(String name, Map<String, String> meta, TimeSeriesParams.MetricType metricType) {
super(name, IndexType.docValuesOnly(), false, meta);
this.metricType = metricType;
}
@Override
public String typeName() {
return CONTENT_TYPE;
}
@Override
public ValueFetcher valueFetcher(SearchExecutionContext context, String format) {
return SourceValueFetcher.identity(name(), context, format);
}
@Override
public boolean isSearchable() {
return false;
}
@Override
public boolean isAggregatable() {
return true;
}
@Override
public TimeSeriesParams.MetricType getMetricType() {
return metricType;
}
@Override
public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext) {
return (cache, breakerService) -> new IndexExponentialHistogramFieldData(name()) {
@Override
public LeafExponentialHistogramFieldData load(LeafReaderContext context) {
return new LeafExponentialHistogramFieldData() {
@Override
public ExponentialHistogramValuesReader getHistogramValues() throws IOException {
return new DocValuesReader(context.reader(), fieldName);
}
@Override
public DocValuesScriptFieldFactory getScriptFieldFactory(String name) {
throw new UnsupportedOperationException("The [" + CONTENT_TYPE + "] field does not " + "support scripts");
}
@Override
public SortedBinaryDocValues getBytesValues() {
throw new UnsupportedOperationException(
"String representation of doc values " + "for [" + CONTENT_TYPE + "] fields is not supported"
);
}
@Override
public FormattedDocValues getFormattedValues(DocValueFormat format) {
return createFormattedDocValues(context.reader(), fieldName);
}
@Override
public long ramBytesUsed() {
return 0; // No dynamic allocations
}
};
}
@Override
public LeafExponentialHistogramFieldData loadDirect(LeafReaderContext context) throws Exception {
return load(context);
}
@Override
public SortField sortField(
Object missingValue,
MultiValueMode sortMode,
XFieldComparatorSource.Nested nested,
boolean reverse
) {
throw new IllegalArgumentException("can't sort on the [" + CONTENT_TYPE + "] field");
}
@Override
public BucketedSort newBucketedSort(
BigArrays bigArrays,
Object missingValue,
MultiValueMode sortMode,
XFieldComparatorSource.Nested nested,
SortOrder sortOrder,
DocValueFormat format,
int bucketSize,
BucketedSort.ExtraData extra
) {
throw new IllegalArgumentException("can't sort on the [" + CONTENT_TYPE + "] field");
}
};
}
@Override
public Query termQuery(Object value, SearchExecutionContext context) {
throw new IllegalArgumentException(
"[" + CONTENT_TYPE + "] field do not support searching, " + "use dedicated aggregations instead: [" + name() + "]"
);
}
@Override
public BlockLoader blockLoader(BlockLoaderContext blContext) {
DoublesBlockLoader minimaLoader = new DoublesBlockLoader(valuesMinSubFieldName(name()), NumericUtils::sortableLongToDouble);
DoublesBlockLoader maximaLoader = new DoublesBlockLoader(valuesMaxSubFieldName(name()), NumericUtils::sortableLongToDouble);
DoublesBlockLoader sumsLoader = new DoublesBlockLoader(valuesSumSubFieldName(name()), NumericUtils::sortableLongToDouble);
// we store the counts as integers for better compression, but the block requires doubles. So we simply cast the value
DoublesBlockLoader valueCountsLoader = new DoublesBlockLoader(valuesCountSubFieldName(name()), longVal -> (double) longVal);
DoublesBlockLoader zeroThresholdsLoader = new DoublesBlockLoader(
zeroThresholdSubFieldName(name()),
NumericUtils::sortableLongToDouble
);
BytesRefsFromBinaryBlockLoader bytesLoader = new BytesRefsFromBinaryBlockLoader(name());
return new BlockDocValuesReader.DocValuesBlockLoader() {
@Override
public Builder builder(BlockFactory factory, int expectedCount) {
return factory.exponentialHistogramBlockBuilder(expectedCount);
}
@Override
public AllReader reader(LeafReaderContext context) throws IOException {
AllReader bytesReader = bytesLoader.reader(context);
BlockLoader.AllReader minimaReader = minimaLoader.reader(context);
BlockLoader.AllReader maximaReader = maximaLoader.reader(context);
AllReader sumsReader = sumsLoader.reader(context);
AllReader valueCountsReader = valueCountsLoader.reader(context);
AllReader zeroThresholdsReader = zeroThresholdsLoader.reader(context);
return new AllReader() {
@Override
public boolean canReuse(int startingDocID) {
return minimaReader.canReuse(startingDocID)
&& maximaReader.canReuse(startingDocID)
&& sumsReader.canReuse(startingDocID)
&& valueCountsReader.canReuse(startingDocID)
&& zeroThresholdsReader.canReuse(startingDocID)
&& bytesReader.canReuse(startingDocID);
}
@Override
public String toString() {
return "BlockDocValuesReader.ExponentialHistogram";
}
@Override
public Block read(BlockFactory factory, Docs docs, int offset, boolean nullsFiltered) throws IOException {
Block minima = null;
Block maxima = null;
Block sums = null;
Block valueCounts = null;
Block zeroThresholds = null;
Block encodedBytes = null;
Block result;
boolean success = false;
try {
minima = minimaReader.read(factory, docs, offset, nullsFiltered);
maxima = maximaReader.read(factory, docs, offset, nullsFiltered);
sums = sumsReader.read(factory, docs, offset, nullsFiltered);
valueCounts = valueCountsReader.read(factory, docs, offset, nullsFiltered);
zeroThresholds = zeroThresholdsReader.read(factory, docs, offset, nullsFiltered);
encodedBytes = bytesReader.read(factory, docs, offset, nullsFiltered);
result = factory.buildExponentialHistogramBlockDirect(
minima,
maxima,
sums,
valueCounts,
zeroThresholds,
encodedBytes
);
success = true;
} finally {
if (success == false) {
Releasables.close(minima, maxima, sums, valueCounts, zeroThresholds, encodedBytes);
}
}
return result;
}
@Override
public void read(int docId, StoredFields storedFields, Builder builder) throws IOException {
ExponentialHistogramBuilder histogramBuilder = (ExponentialHistogramBuilder) builder;
minimaReader.read(docId, storedFields, histogramBuilder.minima());
maximaReader.read(docId, storedFields, histogramBuilder.maxima());
sumsReader.read(docId, storedFields, histogramBuilder.sums());
valueCountsReader.read(docId, storedFields, histogramBuilder.valueCounts());
zeroThresholdsReader.read(docId, storedFields, histogramBuilder.zeroThresholds());
bytesReader.read(docId, storedFields, histogramBuilder.encodedHistograms());
}
};
}
};
}
}
// Visible for testing
static FormattedDocValues createFormattedDocValues(LeafReader reader, String fieldName) {
return new FormattedDocValues() {
boolean hasNext = false;
ExponentialHistogramValuesReader delegate;
private ExponentialHistogramValuesReader lazyDelegate() throws IOException {
if (delegate == null) {
delegate = new DocValuesReader(reader, fieldName);
}
return delegate;
}
@Override
public boolean advanceExact(int docId) throws IOException {
hasNext = lazyDelegate().advanceExact(docId);
return hasNext;
}
@Override
public int docValueCount() throws IOException {
return 1; // no multivalue support, so always 1
}
@Override
public Object nextValue() throws IOException {
if (hasNext == false) {
throw new IllegalStateException("No value available, make sure to call advanceExact() first");
}
hasNext = false;
return lazyDelegate().histogramValue();
}
};
}
@Override
protected boolean supportsParsingObject() {
return true;
}
@Override
public void parse(DocumentParserContext context) throws IOException {
context.path().add(leafName());
boolean shouldStoreMalformedDataForSyntheticSource = context.mappingLookup().isSourceSynthetic() && ignoreMalformed();
XContentParser.Token token;
XContentSubParser subParser = null;
XContentBuilder malformedDataForSyntheticSource = null;
try {
token = context.parser().currentToken();
if (token == XContentParser.Token.VALUE_NULL) {
context.path().remove();
return;
}
ensureExpectedToken(XContentParser.Token.START_OBJECT, token, context.parser());
if (shouldStoreMalformedDataForSyntheticSource) {
var copyingParser = new CopyingXContentParser(context.parser());
malformedDataForSyntheticSource = copyingParser.getBuilder();
subParser = new XContentSubParser(copyingParser);
} else {
subParser = new XContentSubParser(context.parser());
}
subParser.nextToken();
ExponentialHistogramParser.ParsedExponentialHistogram parsedHistogram;
if (coerce()
&& subParser.currentToken() == XContentParser.Token.FIELD_NAME
&& HistogramParser.isHistogramSubFieldName(subParser.currentName())) {
HistogramParser.ParsedHistogram parsedTDigest = HistogramParser.parse(fullPath(), subParser);
parsedHistogram = ParsedHistogramConverter.tDigestToExponential(parsedTDigest);
} else {
parsedHistogram = ExponentialHistogramParser.parse(fullPath(), subParser);
}
if (context.doc().getByKey(fieldType().name()) != null) {
throw new IllegalArgumentException(
"Field ["
+ fullPath()
+ "] of type ["
+ typeName()
+ "] doesn't support indexing multiple values for the same field in the same document"
);
}
long totalValueCount;
try {
totalValueCount = getTotalValueCount(parsedHistogram);
} catch (ArithmeticException e) {
throw new IllegalArgumentException(
"Field [" + fullPath() + "] has a total value count exceeding the allowed maximum value of " + Long.MAX_VALUE
);
}
double sum = validateOrEstimateSum(parsedHistogram, subParser);
double min = validateOrEstimateMin(parsedHistogram, subParser);
double max = validateOrEstimateMax(parsedHistogram, subParser);
HistogramDocValueFields docValues = buildDocValueFields(
fullPath(),
parsedHistogram.scale(),
parsedHistogram.negativeBuckets(),
parsedHistogram.positiveBuckets(),
parsedHistogram.zeroThreshold(),
totalValueCount,
sum,
min,
max
);
docValues.addToDoc(context.doc());
} catch (Exception ex) {
if (ignoreMalformed.value() == false) {
throw new DocumentParsingException(
context.parser().getTokenLocation(),
"failed to parse field [" + fieldType().name() + "] of type [" + fieldType().typeName() + "]",
ex
);
}
if (subParser != null) {
// close the subParser so we advance to the end of the object
subParser.close();
} else if (shouldStoreMalformedDataForSyntheticSource) {
// We have a malformed value, but it's not an object given that `subParser` is null.
// So we just remember whatever it is.
malformedDataForSyntheticSource = XContentBuilder.builder(context.parser().contentType().xContent())
.copyCurrentStructure(context.parser());
}
if (malformedDataForSyntheticSource != null) {
context.doc().add(IgnoreMalformedStoredValues.storedField(fullPath(), malformedDataForSyntheticSource));
}
context.addIgnoredField(fieldType().name());
}
context.path().remove();
}
// Visible for testing, to construct realistic doc values in tests
public static HistogramDocValueFields buildDocValueFields(
String fieldName,
int scale,
List<IndexWithCount> negativeBuckets,
List<IndexWithCount> positiveBuckets,
double zeroThreshold,
long totalValueCount,
double sum,
double min,
double max
) throws IOException {
BytesStreamOutput histogramBytesOutput = new BytesStreamOutput();
CompressedExponentialHistogram.writeHistogramBytes(
histogramBytesOutput,
scale,
IndexWithCount.asBuckets(scale, negativeBuckets).iterator(),
IndexWithCount.asBuckets(scale, positiveBuckets).iterator()
);
BytesRef histoBytes = histogramBytesOutput.bytes().toBytesRef();
BinaryDocValuesField histoField = new BinaryDocValuesField(fieldName, histoBytes);
long thresholdAsLong = NumericUtils.doubleToSortableLong(zeroThreshold);
NumericDocValuesField zeroThresholdField = new NumericDocValuesField(zeroThresholdSubFieldName(fieldName), thresholdAsLong);
NumericDocValuesField valuesCountField = new NumericDocValuesField(valuesCountSubFieldName(fieldName), totalValueCount);
// for empty histograms, we store null as sum so that SUM() / COUNT() in ESQL yields NULL without warnings
NumericDocValuesField sumField = null;
if (totalValueCount > 0) {
sumField = new NumericDocValuesField(valuesSumSubFieldName(fieldName), NumericUtils.doubleToSortableLong(sum));
} else {
// empty histogram must have a sum of 0.0
assert sum == 0.0;
}
NumericDocValuesField minField = null;
if (Double.isNaN(min) == false) {
minField = new NumericDocValuesField(valuesMinSubFieldName(fieldName), NumericUtils.doubleToSortableLong(min));
}
NumericDocValuesField maxField = null;
if (Double.isNaN(max) == false) {
maxField = new NumericDocValuesField(valuesMaxSubFieldName(fieldName), NumericUtils.doubleToSortableLong(max));
}
HistogramDocValueFields docValues = new HistogramDocValueFields(
histoField,
zeroThresholdField,
valuesCountField,
sumField,
minField,
maxField
);
return docValues;
}
// Visible for testing
public record HistogramDocValueFields(
BinaryDocValuesField histo,
NumericDocValuesField zeroThreshold,
NumericDocValuesField valuesCount,
@Nullable NumericDocValuesField sumField,
@Nullable NumericDocValuesField minField,
@Nullable NumericDocValuesField maxField
) {
public void addToDoc(LuceneDocument doc) {
doc.addWithKey(histo.name(), histo);
doc.add(zeroThreshold);
doc.add(valuesCount);
if (sumField != null) {
doc.add(sumField);
}
if (minField != null) {
doc.add(minField);
}
if (maxField != null) {
doc.add(maxField);
}
}
public List<IndexableField> fieldsAsList() {
List<IndexableField> fields = new ArrayList<>();
fields.add(histo);
fields.add(zeroThreshold);
fields.add(valuesCount);
if (sumField != null) {
fields.add(sumField);
}
if (minField != null) {
fields.add(minField);
}
if (maxField != null) {
fields.add(maxField);
}
return fields;
}
}
private static boolean isEmpty(ExponentialHistogramParser.ParsedExponentialHistogram histogram) {
return histogram.positiveBuckets().isEmpty() && histogram.negativeBuckets().isEmpty() && histogram.zeroCount() == 0;
}
private double validateOrEstimateSum(ExponentialHistogramParser.ParsedExponentialHistogram histogram, XContentSubParser subParser) {
if (histogram.sum() == null) {
return ExponentialHistogramUtils.estimateSum(
IndexWithCount.asBuckets(histogram.scale(), histogram.negativeBuckets()).iterator(),
IndexWithCount.asBuckets(histogram.scale(), histogram.positiveBuckets()).iterator()
);
}
if (isEmpty(histogram) && histogram.sum() != 0.0) {
throw new DocumentParsingException(
subParser.getTokenLocation(),
"error parsing field [" + fullPath() + "], sum field must be zero if the histogram is empty, but got " + histogram.sum()
);
}
return histogram.sum();
}
private double validateOrEstimateMin(ExponentialHistogramParser.ParsedExponentialHistogram histogram, XContentSubParser subParser) {
if (histogram.min() == null) {
OptionalDouble estimatedMin = ExponentialHistogramUtils.estimateMin(
ZeroBucket.create(histogram.zeroThreshold(), histogram.zeroCount()),
IndexWithCount.asBuckets(histogram.scale(), histogram.negativeBuckets()),
IndexWithCount.asBuckets(histogram.scale(), histogram.positiveBuckets())
);
return estimatedMin.isPresent() ? estimatedMin.getAsDouble() : Double.NaN;
}
if (isEmpty(histogram)) {
throw new DocumentParsingException(
subParser.getTokenLocation(),
"error parsing field [" + fullPath() + "], min field must be null if the histogram is empty, but got " + histogram.min()
);
}
return histogram.min();
}
private double validateOrEstimateMax(ExponentialHistogramParser.ParsedExponentialHistogram histogram, XContentSubParser subParser) {
if (histogram.max() == null) {
OptionalDouble estimatedMax = ExponentialHistogramUtils.estimateMax(
ZeroBucket.create(histogram.zeroThreshold(), histogram.zeroCount()),
IndexWithCount.asBuckets(histogram.scale(), histogram.negativeBuckets()),
IndexWithCount.asBuckets(histogram.scale(), histogram.positiveBuckets())
);
return estimatedMax.isPresent() ? estimatedMax.getAsDouble() : Double.NaN;
}
if (isEmpty(histogram)) {
throw new DocumentParsingException(
subParser.getTokenLocation(),
"error parsing field [" + fullPath() + "], max field must be null if the histogram is empty, but got " + histogram.max()
);
}
return histogram.max();
}
private static long getTotalValueCount(ExponentialHistogramParser.ParsedExponentialHistogram histogram) {
long totalValueCount = histogram.zeroCount();
for (IndexWithCount bucket : histogram.positiveBuckets()) {
totalValueCount = Math.addExact(totalValueCount, bucket.count());
}
for (IndexWithCount bucket : histogram.negativeBuckets()) {
totalValueCount = Math.addExact(totalValueCount, bucket.count());
}
return totalValueCount;
}
@Override
protected FieldMapper.SyntheticSourceSupport syntheticSourceSupport() {
return new FieldMapper.SyntheticSourceSupport.Native(
() -> new CompositeSyntheticFieldLoader(
leafName(),
fullPath(),
new ExponentialHistogramSyntheticFieldLoader(),
new CompositeSyntheticFieldLoader.MalformedValuesLayer(fullPath())
)
);
}
private static | ExponentialHistogramFieldType |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/support/spring/mock/testcase/FastJsonpHttpMessageConverter4Case3Test.java | {
"start": 2257,
"end": 8116
} | class ____ extends WebMvcConfigurerAdapter {
@Bean
public FastJsonpResponseBodyAdvice fastJsonpResponseBodyAdvice() {
return new FastJsonpResponseBodyAdvice();
}
@Override
public void extendMessageConverters(List<HttpMessageConverter<?>> converters) {
converters.add(0, new FastJsonpHttpMessageConverter4());
super.extendMessageConverters(converters);
}
}
@Before
public void setup() {
this.mockMvc = MockMvcBuilders.webAppContextSetup(this.wac) //
.addFilter(new CharacterEncodingFilter("UTF-8", true)) // 设置服务器端返回的字符集为:UTF-8
.build();
}
@Test
public void checkDefaultJSONPQueryParamNames() {
String[] expected = { "callback", "jsonp" };
Assert.assertArrayEquals(expected, FastJsonpResponseBodyAdvice.DEFAULT_JSONP_QUERY_PARAM_NAMES);
}
@Test
public void isInjectComponent() {
wac.getBean(FastJsonpResponseBodyAdvice.class);
}
@Test
public void test1() throws Exception {
JSONObject json = new JSONObject();
json.put("id", 123);
json.put("name", "哈哈哈");
mockMvc.perform(
(post("/fastjson/test1").characterEncoding("UTF-8").content(json.toJSONString())
.contentType(MediaType.APPLICATION_JSON))).andExpect(status().isOk()).andDo(print());
}
@Test
public void test1_2() throws Exception {
JSONObject json = new JSONObject();
json.put("id", 123);
json.put("name", "哈哈哈");
ResultActions actions = mockMvc.perform((post("/fastjson/test1?callback=fnUpdateSome").characterEncoding(
"UTF-8").content(json.toJSONString()).contentType(MediaType.APPLICATION_JSON)));
actions.andDo(print());
actions.andExpect(status().isOk()).andExpect(content().contentType(APPLICATION_JAVASCRIPT));
String content = actions.andReturn().getResponse().getContentAsString();
assertTrue(content.equals("/**/fnUpdateSome({\"name\":\"哈哈哈\",\"id\":123})")
|| content.equals("/**/fnUpdateSome({\"id\":123,\"name\":\"哈哈哈\"})")); }
@Test
public void test2() throws Exception {
String jsonStr = "[{\"name\":\"p1\",\"sonList\":[{\"name\":\"s1\"}]},{\"name\":\"p2\",\"sonList\":[{\"name\":\"s2\"},{\"name\":\"s3\"}]}]";
mockMvc.perform(
(post("/fastjson/test2").characterEncoding("UTF-8").content(jsonStr)
.contentType(MediaType.APPLICATION_JSON))).andExpect(status().isOk()).andDo(print());
}
@Test
public void test2_2() throws Exception {
String jsonStr = "[{\"name\":\"p1\",\"sonList\":[{\"name\":\"s1\"}]},{\"name\":\"p2\",\"sonList\":[{\"name\":\"s2\"},{\"name\":\"s3\"}]}]";
ResultActions actions = mockMvc.perform((post("/fastjson/test2?jsonp=fnUpdateSome").characterEncoding("UTF-8")
.content(jsonStr).contentType(MediaType.APPLICATION_JSON)));
actions.andDo(print());
actions.andExpect(status().isOk()).andExpect(content().contentType(APPLICATION_JAVASCRIPT));
String content = actions.andReturn().getResponse().getContentAsString();
assertTrue(content.equals("/**/fnUpdateSome({\"p1\":1,\"p2\":2})")
|| content.equals("/**/fnUpdateSome({\"p2\":2,\"p1\":1})")); }
@Test
public void test3() throws Exception {
List<Object> list = this.mockMvc.perform(post("/fastjson/test3")).andReturn().getResponse()
.getHeaderValues("Content-Length");
Assert.assertNotEquals(list.size(), 0);
}
@Test
public void test3_2() throws Exception {
ResultActions actions = this.mockMvc.perform(post("/fastjson/test3?jsonp=fnUpdateSome"));
actions.andDo(print());
actions.andExpect(status().isOk()).andExpect(content().contentType(APPLICATION_JAVASCRIPT))
.andExpect(content().string("/**/fnUpdateSome({})"));
}
@Test
public void test4() throws Exception {
String jsonStr = "{\"t\":{\"id\":123,\"name\":\"哈哈哈\"}}";
mockMvc.perform(
(post("/fastjson/test4").characterEncoding("UTF-8").content(jsonStr)
.contentType(MediaType.APPLICATION_JSON))).andDo(print());
}
@Test
public void test4_2() throws Exception {
String jsonStr = "{\"t\":{\"id\":123,\"name\":\"哈哈哈\"}}";
ResultActions actions = mockMvc.perform((post("/fastjson/test4?callback=myUpdate").characterEncoding("UTF-8")
.content(jsonStr).contentType(MediaType.APPLICATION_JSON)));
actions.andDo(print());
actions.andExpect(status().isOk())
.andExpect(content().contentType(APPLICATION_JAVASCRIPT))
.andExpect(content().string("/**/myUpdate(\"{\\\"t\\\":{\\\"id\\\":123,\\\"name\\\":\\\"哈哈哈\\\"}}\")"));
}
@Test
public void test5() throws Exception {
String jsonStr = "{\"packet\":{\"smsType\":\"USER_LOGIN\"}}";
mockMvc.perform(
(post("/fastjson/test5").characterEncoding("UTF-8").content(jsonStr)
.contentType(MediaType.APPLICATION_JSON))).andDo(print());
}
@Test
public void test5_2() throws Exception {
String jsonStr = "{\"packet\":{\"smsType\":\"USER_LOGIN\"}}";
ResultActions actions = mockMvc.perform((post("/fastjson/test5?callback=myUpdate").characterEncoding("UTF-8")
.content(jsonStr).contentType(MediaType.APPLICATION_JSON)));
actions.andDo(print());
actions.andExpect(status().isOk())
.andExpect(content().contentType(APPLICATION_JAVASCRIPT))
.andExpect(content().string("/**/myUpdate(\"{\\\"packet\\\":{\\\"smsType\\\":\\\"USER_LOGIN\\\"}}\")"));
}
}
| Config |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/creators/FactoryCreatorTypeBinding2894Test.java | {
"start": 1543,
"end": 1931
} | class ____ {
public int x;
protected Value() { }
protected Value(int x0) { x = x0; }
@Override
public boolean equals(Object o) {
return (o instanceof Value) && ((Value) o).x == x;
}
@Override
public String toString() {
return "[Value x="+x+"]";
}
}
// [databind#2895]
static | Value |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/ConfigurationPropertiesTests.java | {
"start": 60080,
"end": 60278
} | class ____ {
}
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(IgnoreUnknownFieldsFalseIgnoreInvalidFieldsTrueProperties.class)
static | IgnoreUnknownFieldsFalseConfiguration |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/oracle/ast/stmt/OracleConstraint.java | {
"start": 905,
"end": 1460
} | interface ____ extends OracleSQLObject, SQLConstraint, SQLTableElement {
SQLName getExceptionsInto();
void setExceptionsInto(SQLName exceptionsInto);
Boolean getDeferrable();
void setDeferrable(Boolean enable);
Boolean getEnable();
void setEnable(Boolean enable);
Boolean getValidate();
void setValidate(Boolean validate);
Initially getInitially();
void setInitially(Initially value);
OracleUsingIndexClause getUsing();
void setUsing(OracleUsingIndexClause using);
public static | OracleConstraint |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigTests.java | {
"start": 642,
"end": 3060
} | class ____ extends InferenceConfigItemTestCase<TextSimilarityConfig> {
public static TextSimilarityConfig mutateForVersion(TextSimilarityConfig instance, TransportVersion version) {
return new TextSimilarityConfig(
instance.getVocabularyConfig(),
InferenceConfigTestScaffolding.mutateTokenizationForVersion(instance.getTokenization(), version),
instance.getResultsField(),
instance.getSpanScoreFunction().toString()
);
}
@Override
protected boolean supportsUnknownFields() {
return true;
}
@Override
protected Predicate<String> getRandomFieldsExcludeFilter() {
return field -> field.isEmpty() == false;
}
@Override
protected TextSimilarityConfig doParseInstance(XContentParser parser) throws IOException {
return TextSimilarityConfig.fromXContentLenient(parser);
}
@Override
protected Writeable.Reader<TextSimilarityConfig> instanceReader() {
return TextSimilarityConfig::new;
}
@Override
protected TextSimilarityConfig createTestInstance() {
return createRandom();
}
@Override
protected TextSimilarityConfig mutateInstance(TextSimilarityConfig instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected TextSimilarityConfig mutateInstanceForVersion(TextSimilarityConfig instance, TransportVersion version) {
return mutateForVersion(instance, version);
}
public static TextSimilarityConfig createRandom() {
return new TextSimilarityConfig(
randomBoolean() ? null : VocabularyConfigTests.createRandom(),
randomBoolean()
? null
: randomFrom(
BertTokenizationTests.createRandomWithSpan(),
MPNetTokenizationTests.createRandomWithSpan(),
RobertaTokenizationTests.createRandomWithSpan()
),
randomBoolean() ? null : randomAlphaOfLength(7),
randomBoolean()
? null
: randomFrom(
Arrays.stream(TextSimilarityConfig.SpanScoreFunction.values())
.map(TextSimilarityConfig.SpanScoreFunction::toString)
.toArray(String[]::new)
)
);
}
}
| TextSimilarityConfigTests |
java | spring-projects__spring-boot | smoke-test/spring-boot-smoke-test-cache/src/test/java/smoketest/cache/SampleCacheApplicationTests.java | {
"start": 1028,
"end": 1606
} | class ____ {
@Autowired
private CacheManager cacheManager;
@Autowired
private CountryRepository countryRepository;
@Test
void validateCache() {
Cache countries = this.cacheManager.getCache("countries");
assertThat(countries).isNotNull();
countries.clear(); // Simple test assuming the cache is empty
assertThat(countries.get("BE")).isNull();
Country be = this.countryRepository.findByCode("BE");
ValueWrapper belgium = countries.get("BE");
assertThat(belgium).isNotNull();
assertThat((Country) belgium.get()).isEqualTo(be);
}
}
| SampleCacheApplicationTests |
java | apache__camel | test-infra/camel-test-infra-hazelcast/src/main/java/org/apache/camel/test/infra/hazelcast/services/HazelcastEmbeddedInfraService.java | {
"start": 1118,
"end": 3172
} | class ____ implements HazelcastInfraService {
@Override
public void registerProperties() {
}
@Override
public void initialize() {
}
@Override
public void shutdown() {
}
@Override
public Config createConfiguration(String name, int port, String instanceName, String componentName) {
Config config = new Config();
if (componentName == "configuration") {
if (name == null) {
if (instanceName != null) {
config.setInstanceName(instanceName);
}
config.getNetworkConfig().setPort(port);
config.getNetworkConfig().getJoin().getAwsConfig().setEnabled(false);
config.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(true);
config.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(false);
} else {
config.setInstanceName(name + "-" + instanceName);
config.getMetricsConfig().setEnabled(false);
config.getNetworkConfig().setPort(port);
config.getNetworkConfig().getJoin().getAutoDetectionConfig().setEnabled(false);
}
} else if ((componentName == "list") || (componentName == "seda") || (componentName == "set")) {
config.getNetworkConfig().getJoin().getAutoDetectionConfig().setEnabled(false);
} else if (componentName == "idempotent") {
config.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(false);
config.getNetworkConfig().getJoin().getAutoDetectionConfig().setEnabled(false);
} else if (componentName == "aggregation") {
config.setInstanceName(instanceName);
config.getMetricsConfig().setEnabled(false);
config.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
config.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true).addMember("127.0.0.1");
}
return config;
}
}
| HazelcastEmbeddedInfraService |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/JavaLangClashTest.java | {
"start": 906,
"end": 1463
} | class ____ {
private final CompilationTestHelper testHelper =
CompilationTestHelper.newInstance(JavaLangClash.class, getClass());
// TODO(b/67718586): javac 9 doesn't want to compile sources in java.lang
private static final ImmutableList<String> JAVA8_JAVACOPTS =
ImmutableList.of("-source", "8", "-target", "8");
@Test
public void positive() {
testHelper
.addSourceLines(
"foo/String.java",
"""
package foo;
// BUG: Diagnostic contains:
public | JavaLangClashTest |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/OptionalDoubleShouldHaveValueCloseToPercentage.java | {
"start": 995,
"end": 3897
} | class ____ extends BasicErrorMessageFactory {
private OptionalDoubleShouldHaveValueCloseToPercentage(double expected) {
super("%nExpecting an OptionalDouble with value:%n" +
" %s%n" +
"but was empty.",
expected);
}
private OptionalDoubleShouldHaveValueCloseToPercentage(OptionalDouble actual, double expected, Percentage percentage,
double expectedPercentage) {
super("%nExpecting actual:%n" +
" %s%n" +
"to be close to:%n" +
" %s%n" +
"by less than %s but difference was %s%%.%n" +
"(a difference of exactly %s being considered valid)",
actual, expected, percentage, expectedPercentage, percentage);
}
/**
* Indicates that the provided {@link java.util.OptionalDouble} is empty so it doesn't have the expected value.
*
* @param expectedValue the value we expect to be in an {@link java.util.OptionalDouble}.
* @return a error message factory.
*/
public static OptionalDoubleShouldHaveValueCloseToPercentage shouldHaveValueCloseToPercentage(double expectedValue) {
return new OptionalDoubleShouldHaveValueCloseToPercentage(expectedValue);
}
/**
* Indicates that the provided {@link java.util.OptionalDouble} has a value, but it is not within the given positive
* percentage.
*
* @param actual the {@link java.util.OptionalDouble} which has a value
* @param expectedValue the value we expect to be in the provided {@link java.util.OptionalDouble}
* @param percentage the given positive percentage
* @return an error message factory
*/
@SuppressWarnings("OptionalGetWithoutIsPresent")
public static OptionalDoubleShouldHaveValueCloseToPercentage shouldHaveValueCloseToPercentage(OptionalDouble actual,
double expectedValue,
Percentage percentage) {
return shouldHaveValueCloseToPercentage(actual, expectedValue, percentage, abs(expectedValue - actual.getAsDouble()));
}
private static OptionalDoubleShouldHaveValueCloseToPercentage shouldHaveValueCloseToPercentage(OptionalDouble optional,
double expectedValue,
Percentage percentage,
double difference) {
double actualPercentage = difference / expectedValue * 100d;
return new OptionalDoubleShouldHaveValueCloseToPercentage(optional, expectedValue, percentage, actualPercentage);
}
}
| OptionalDoubleShouldHaveValueCloseToPercentage |
java | apache__kafka | server-common/src/main/java/org/apache/kafka/server/network/EndpointReadyFutures.java | {
"start": 5590,
"end": 8351
} | class ____ {
final String endpointName;
final TreeSet<String> incomplete;
final CompletableFuture<Void> future;
EndpointReadyFuture(Endpoint endpoint, Collection<String> stageNames) {
this.endpointName = endpoint.listener();
this.incomplete = new TreeSet<>(stageNames);
this.future = new CompletableFuture<>();
}
void completeStage(String stageName) {
boolean done = false;
synchronized (EndpointReadyFuture.this) {
if (incomplete.remove(stageName)) {
if (incomplete.isEmpty()) {
done = true;
} else {
log.info("{} completed for endpoint {}. Still waiting for {}.",
stageName, endpointName, incomplete);
}
}
}
if (done) {
if (future.complete(null)) {
log.info("{} completed for endpoint {}. Endpoint is now READY.",
stageName, endpointName);
}
}
}
void failStage(String what, Throwable exception) {
if (future.completeExceptionally(exception)) {
synchronized (EndpointReadyFuture.this) {
incomplete.clear();
}
log.warn("Endpoint {} will never become ready because we encountered an {} exception",
endpointName, what, exception);
}
}
}
private final Logger log;
private final Map<Endpoint, CompletableFuture<Void>> futures;
private EndpointReadyFutures(
LogContext logContext,
Map<Endpoint, List<EndpointCompletionStage>> endpointStages
) {
this.log = logContext.logger(EndpointReadyFutures.class);
Map<Endpoint, CompletableFuture<Void>> newFutures = new HashMap<>();
endpointStages.forEach((endpoint, stages) -> {
List<String> stageNames = new ArrayList<>();
stages.forEach(stage -> stageNames.add(stage.name));
EndpointReadyFuture readyFuture = new EndpointReadyFuture(endpoint, stageNames);
newFutures.put(endpoint, readyFuture.future);
stages.forEach(stage -> stage.future.whenComplete((__, exception) -> {
if (exception != null) {
readyFuture.failStage(stage.name, exception);
} else {
readyFuture.completeStage(stage.name);
}
}));
});
this.futures = newFutures;
}
public Map<Endpoint, CompletableFuture<Void>> futures() {
return futures;
}
}
| EndpointReadyFuture |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/cdi/general/hibernatesearch/TheApplicationScopedBean.java | {
"start": 347,
"end": 850
} | class ____ {
@jakarta.inject.Inject
private TheNestedDependentBean nestedDependentBean;
public TheApplicationScopedBean() {
Monitor.theApplicationScopedBean().instantiated();
}
public void ensureInitialized() {
nestedDependentBean.ensureInitialized();
}
@PostConstruct
public void postConstruct() {
Monitor.theApplicationScopedBean().postConstructCalled();
}
@PreDestroy
public void preDestroy() {
Monitor.theApplicationScopedBean().preDestroyCalled();
}
}
| TheApplicationScopedBean |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/JsonValueSerializationTest.java | {
"start": 4513,
"end": 4636
} | class ____ {
@JsonValue
public String value() {
return "value";
}
}
static | Bean838 |
java | google__guava | android/guava/src/com/google/common/collect/CompactHashMap.java | {
"start": 25673,
"end": 26957
} | class ____ extends AbstractSet<K> {
@Override
public int size() {
return CompactHashMap.this.size();
}
@Override
public boolean contains(@Nullable Object o) {
return CompactHashMap.this.containsKey(o);
}
@Override
public boolean remove(@Nullable Object o) {
Map<K, V> delegate = delegateOrNull();
return (delegate != null)
? delegate.keySet().remove(o)
: CompactHashMap.this.removeHelper(o) != NOT_FOUND;
}
@Override
public Iterator<K> iterator() {
return keySetIterator();
}
@Override
public void clear() {
CompactHashMap.this.clear();
}
}
Iterator<K> keySetIterator() {
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
return delegate.keySet().iterator();
}
return new Itr<K>() {
@Override
@ParametricNullness
K getOutput(int entry) {
return key(entry);
}
};
}
@LazyInit private transient @Nullable Set<Entry<K, V>> entrySetView;
@Override
public Set<Entry<K, V>> entrySet() {
return (entrySetView == null) ? entrySetView = createEntrySet() : entrySetView;
}
Set<Entry<K, V>> createEntrySet() {
return new EntrySetView();
}
@WeakOuter
private final | KeySetView |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java | {
"start": 2652,
"end": 34514
} | class ____ extends Storage {
static final String TRASH_ROOT_DIR = "trash";
/**
* A marker file that is created on each root directory if a rolling upgrade
* is in progress. The NN does not inform the DN when a rolling upgrade is
* finalized. All the DN can infer is whether or not a rolling upgrade is
* currently in progress. When the rolling upgrade is not in progress:
* 1. If the marker file is present, then a rolling upgrade just completed.
* If a 'previous' directory exists, it can be deleted now.
* 2. If the marker file is absent, then a regular upgrade may be in
* progress. Do not delete the 'previous' directory.
*/
static final String ROLLING_UPGRADE_MARKER_FILE = "RollingUpgradeInProgress";
private static final String BLOCK_POOL_ID_PATTERN_BASE =
Pattern.quote(File.separator) +
"BP-\\d+-\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}-\\d+" +
Pattern.quote(File.separator);
private static final Pattern BLOCK_POOL_PATH_PATTERN = Pattern.compile(
"^(.*)(" + BLOCK_POOL_ID_PATTERN_BASE + ")(.*)$");
private static final Pattern BLOCK_POOL_CURRENT_PATH_PATTERN = Pattern.compile(
"^(.*)(" + BLOCK_POOL_ID_PATTERN_BASE + ")(" + STORAGE_DIR_CURRENT + ")(.*)$");
private static final Pattern BLOCK_POOL_TRASH_PATH_PATTERN = Pattern.compile(
"^(.*)(" + BLOCK_POOL_ID_PATTERN_BASE + ")(" + TRASH_ROOT_DIR + ")(.*)$");
private String blockpoolID = ""; // id of the blockpool
private Daemon trashCleaner;
public BlockPoolSliceStorage(StorageInfo storageInfo, String bpid) {
super(storageInfo);
blockpoolID = bpid;
}
/**
* These maps are used as an optimization to avoid one filesystem operation
* per storage on each heartbeat response.
*/
private static Set<String> storagesWithRollingUpgradeMarker;
private static Set<String> storagesWithoutRollingUpgradeMarker;
BlockPoolSliceStorage(int namespaceID, String bpID, long cTime,
String clusterId) {
super(NodeType.DATA_NODE);
this.namespaceID = namespaceID;
this.blockpoolID = bpID;
this.cTime = cTime;
this.clusterID = clusterId;
storagesWithRollingUpgradeMarker = Collections.newSetFromMap(
new ConcurrentHashMap<String, Boolean>());
storagesWithoutRollingUpgradeMarker = Collections.newSetFromMap(
new ConcurrentHashMap<String, Boolean>());
}
private BlockPoolSliceStorage() {
super(NodeType.DATA_NODE);
storagesWithRollingUpgradeMarker = Collections.newSetFromMap(
new ConcurrentHashMap<String, Boolean>());
storagesWithoutRollingUpgradeMarker = Collections.newSetFromMap(
new ConcurrentHashMap<String, Boolean>());
}
// Expose visibility for VolumeBuilder#commit().
public void addStorageDir(StorageDirectory sd) {
super.addStorageDir(sd);
}
/**
* Load one storage directory. Recover from previous transitions if required.
* @param nsInfo namespace information
* @param location the root path of the storage directory
* @param startOpt startup option
* @param callables list of callable storage directory
* @param conf configuration
* @return
* @throws IOException
*/
private StorageDirectory loadStorageDirectory(NamespaceInfo nsInfo,
StorageLocation location, StartupOption startOpt,
List<Callable<StorageDirectory>> callables, Configuration conf)
throws IOException {
StorageDirectory sd = new StorageDirectory(
nsInfo.getBlockPoolID(), null, true, location);
try {
StorageState curState = sd.analyzeStorage(startOpt, this, true);
// sd is locked but not opened
switch (curState) {
case NORMAL:
break;
case NON_EXISTENT:
LOG.info("Block pool storage directory for location {} and block pool"
+ " id {} does not exist", location, nsInfo.getBlockPoolID());
throw new IOException("Storage directory for location " + location +
" and block pool id " + nsInfo.getBlockPoolID() +
" does not exist");
case NOT_FORMATTED: // format
LOG.info("Block pool storage directory for location {} and block pool"
+ " id {} is not formatted. Formatting ...", location,
nsInfo.getBlockPoolID());
format(sd, nsInfo);
break;
default: // recovery part is common
sd.doRecover(curState);
}
// 2. Do transitions
// Each storage directory is treated individually.
// During startup some of them can upgrade or roll back
// while others could be up-to-date for the regular startup.
if (!doTransition(sd, nsInfo, startOpt, callables, conf)) {
// 3. Check CTime and update successfully loaded storage.
if (getCTime() != nsInfo.getCTime()) {
throw new IOException("Datanode CTime (=" + getCTime()
+ ") is not equal to namenode CTime (=" + nsInfo.getCTime() + ")");
}
setServiceLayoutVersion(getServiceLayoutVersion());
writeProperties(sd);
}
return sd;
} catch (IOException ioe) {
sd.unlock();
throw ioe;
}
}
/**
* Analyze and load storage directories. Recover from previous transitions if
* required.
*
* The block pool storages are either all analyzed or none of them is loaded.
* Therefore, a failure on loading any block pool storage results a faulty
* data volume.
*
* @param nsInfo namespace information
* @param location storage directories of block pool
* @param startOpt startup option
* @param callables list of callable storage directory
* @param conf configuration
* @return an array of loaded block pool directories.
* @throws IOException on error
*/
List<StorageDirectory> loadBpStorageDirectories(NamespaceInfo nsInfo,
StorageLocation location, StartupOption startOpt,
List<Callable<StorageDirectory>> callables, Configuration conf)
throws IOException {
List<StorageDirectory> succeedDirs = Lists.newArrayList();
try {
if (containsStorageDir(location, nsInfo.getBlockPoolID())) {
throw new IOException(
"BlockPoolSliceStorage.recoverTransitionRead: " +
"attempt to load an used block storage: " + location);
}
final StorageDirectory sd = loadStorageDirectory(
nsInfo, location, startOpt, callables, conf);
succeedDirs.add(sd);
} catch (IOException e) {
LOG.warn("Failed to analyze storage directories for block pool {}",
nsInfo.getBlockPoolID(), e);
throw e;
}
return succeedDirs;
}
/**
* Analyze storage directories. Recover from previous transitions if required.
*
* The block pool storages are either all analyzed or none of them is loaded.
* Therefore, a failure on loading any block pool storage results a faulty
* data volume.
*
* @param nsInfo namespace information
* @param location storage directories of block pool
* @param startOpt startup option
* @param callables list of callable storage directory
* @param conf configuration
* @throws IOException on error
*/
List<StorageDirectory> recoverTransitionRead(NamespaceInfo nsInfo,
StorageLocation location, StartupOption startOpt,
List<Callable<StorageDirectory>> callables, Configuration conf)
throws IOException {
LOG.info("Analyzing storage directories for bpid {}", nsInfo
.getBlockPoolID());
final List<StorageDirectory> loaded = loadBpStorageDirectories(
nsInfo, location, startOpt, callables, conf);
for (StorageDirectory sd : loaded) {
addStorageDir(sd);
}
return loaded;
}
/**
* Format a block pool slice storage.
* @param dnCurDir DataStorage current directory
* @param nsInfo the name space info
* @throws IOException Signals that an I/O exception has occurred.
*/
void format(File dnCurDir, NamespaceInfo nsInfo) throws IOException {
File curBpDir = getBpRoot(nsInfo.getBlockPoolID(), dnCurDir);
StorageDirectory bpSdir = new StorageDirectory(curBpDir);
format(bpSdir, nsInfo);
}
/**
* Format a block pool slice storage.
* @param bpSdir the block pool storage
* @param nsInfo the name space info
* @throws IOException Signals that an I/O exception has occurred.
*/
private void format(StorageDirectory bpSdir, NamespaceInfo nsInfo) throws IOException {
LOG.info("Formatting block pool {} directory {}", blockpoolID, bpSdir
.getCurrentDir());
bpSdir.clearDirectory(); // create directory
this.layoutVersion = DataNodeLayoutVersion.getCurrentLayoutVersion();
this.cTime = nsInfo.getCTime();
this.namespaceID = nsInfo.getNamespaceID();
this.blockpoolID = nsInfo.getBlockPoolID();
writeProperties(bpSdir);
}
/**
* Remove block pool level storage directory.
* @param absPathToRemove the absolute path of the root for the block pool
* level storage to remove.
*/
void remove(File absPathToRemove) {
Preconditions.checkArgument(absPathToRemove.isAbsolute());
LOG.info("Removing block level storage: {}", absPathToRemove);
for (Iterator<StorageDirectory> it = getStorageDirs().iterator();
it.hasNext(); ) {
StorageDirectory sd = it.next();
if (sd.getRoot().getAbsoluteFile().equals(absPathToRemove)) {
getStorageDirs().remove(sd);
break;
}
}
}
/**
* Set layoutVersion, namespaceID and blockpoolID into block pool storage
* VERSION file
*/
@Override
protected void setPropertiesFromFields(Properties props, StorageDirectory sd)
throws IOException {
props.setProperty("layoutVersion", String.valueOf(layoutVersion));
props.setProperty("namespaceID", String.valueOf(namespaceID));
props.setProperty("blockpoolID", blockpoolID);
props.setProperty("cTime", String.valueOf(cTime));
}
/** Validate and set block pool ID */
private void setBlockPoolID(File storage, String bpid)
throws InconsistentFSStateException {
if (bpid == null || bpid.equals("")) {
throw new InconsistentFSStateException(storage, "file "
+ STORAGE_FILE_VERSION + " is invalid.");
}
if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) {
throw new InconsistentFSStateException(storage,
"Unexpected blockpoolID " + bpid + ". Expected " + blockpoolID);
}
blockpoolID = bpid;
}
@Override
protected void setFieldsFromProperties(Properties props, StorageDirectory sd)
throws IOException {
setLayoutVersion(props, sd);
setNamespaceID(props, sd);
setcTime(props, sd);
String sbpid = props.getProperty("blockpoolID");
setBlockPoolID(sd.getRoot(), sbpid);
}
/**
* Analyze whether a transition of the BP state is required and
* perform it if necessary.
* <br>
* Rollback if:
* previousLV >= LAYOUT_VERSION && prevCTime <= namenode.cTime.
* Upgrade if:
* this.LV > LAYOUT_VERSION || this.cTime < namenode.cTime
* Regular startup if:
* this.LV = LAYOUT_VERSION && this.cTime = namenode.cTime
*
* @param sd storage directory @{literal <SD>/current/<bpid>}
* @param nsInfo namespace info
* @param startOpt startup option
* @param callables list of callable storage directory
* @param conf configuration
* @return true if the new properties has been written.
*/
private boolean doTransition(StorageDirectory sd, NamespaceInfo nsInfo,
StartupOption startOpt, List<Callable<StorageDirectory>> callables,
Configuration conf) throws IOException {
if (sd.getStorageLocation().getStorageType() == StorageType.PROVIDED) {
return false; // regular startup for PROVIDED storage directories
}
if (startOpt == StartupOption.ROLLBACK && sd.getPreviousDir().exists()) {
Preconditions.checkState(!getTrashRootDir(sd).exists(),
sd.getPreviousDir() + " and " + getTrashRootDir(sd) + " should not " +
" both be present.");
doRollback(sd, nsInfo); // rollback if applicable
} else if (startOpt == StartupOption.ROLLBACK &&
!sd.getPreviousDir().exists()) {
// Restore all the files in the trash. The restored files are retained
// during rolling upgrade rollback. They are deleted during rolling
// upgrade downgrade.
int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd));
LOG.info("Restored {} block files from trash.", restored);
}
readProperties(sd);
checkVersionUpgradable(this.layoutVersion);
assert this.layoutVersion >= DataNodeLayoutVersion.getCurrentLayoutVersion()
: "Future version is not allowed";
if (getNamespaceID() != nsInfo.getNamespaceID()) {
throw new IOException("Incompatible namespaceIDs in "
+ sd.getRoot().getCanonicalPath() + ": namenode namespaceID = "
+ nsInfo.getNamespaceID() + "; datanode namespaceID = "
+ getNamespaceID());
}
if (!blockpoolID.equals(nsInfo.getBlockPoolID())) {
throw new IOException("Incompatible blockpoolIDs in "
+ sd.getRoot().getCanonicalPath() + ": namenode blockpoolID = "
+ nsInfo.getBlockPoolID() + "; datanode blockpoolID = "
+ blockpoolID);
}
if (this.layoutVersion == DataNodeLayoutVersion.getCurrentLayoutVersion()
&& this.cTime == nsInfo.getCTime()) {
return false; // regular startup
}
if (this.layoutVersion > DataNodeLayoutVersion.getCurrentLayoutVersion()) {
int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd));
LOG.info("Restored {} block files from trash " +
"before the layout upgrade. These blocks will be moved to " +
"the previous directory during the upgrade", restored);
}
if (this.layoutVersion > DataNodeLayoutVersion.getCurrentLayoutVersion()
|| this.cTime < nsInfo.getCTime()) {
doUpgrade(sd, nsInfo, callables, conf); // upgrade
return true;
}
// layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime
// must shutdown
throw new IOException("Datanode state: LV = " + this.getLayoutVersion()
+ " CTime = " + this.getCTime()
+ " is newer than the namespace state: LV = "
+ nsInfo.getLayoutVersion() + " CTime = " + nsInfo.getCTime());
}
/**
* Upgrade to any release after 0.22 (0.22 included) release
* e.g. 0.22 => 0.23
* Upgrade procedure is as follows:
* <ol>
* <li>If {@literal <SD>/current/<bpid>/previous} exists then delete it</li>
* <li>Rename {@literal <SD>/current/<bpid>/current} to
* {@literal <SD>/current/bpid/current/previous.tmp}</li>
* <li>Create new {@literal <SD>current/<bpid>/current} directory</li>
* <li>Hard links for block files are created from previous.tmp to current</li>
* <li>Save new version file in current directory</li>
* <li>Rename previous.tmp to previous</li>
* </ol>
*
* @param bpSd storage directory {@literal <SD>/current/<bpid>}
* @param nsInfo Namespace Info from the namenode
* @throws IOException on error
*/
private void doUpgrade(final StorageDirectory bpSd,
final NamespaceInfo nsInfo,
final List<Callable<StorageDirectory>> callables,
final Configuration conf) throws IOException {
// Upgrading is applicable only to release with federation or after
if (!DataNodeLayoutVersion.supports(
LayoutVersion.Feature.FEDERATION, layoutVersion)) {
return;
}
// no upgrades for storage directories that are PROVIDED
if (bpSd.getRoot() == null) {
return;
}
final int oldLV = getLayoutVersion();
LOG.info("Upgrading block pool storage directory {}.\n old LV = {}; old"
+ " CTime = {}.\n new LV = {}; new CTime = {}",
bpSd.getRoot(), oldLV, this.getCTime(),
DataNodeLayoutVersion.getCurrentLayoutVersion(), nsInfo.getCTime());
// get <SD>/previous directory
String dnRoot = getDataNodeStorageRoot(bpSd.getRoot().getCanonicalPath());
StorageDirectory dnSdStorage = new StorageDirectory(new File(dnRoot));
File dnPrevDir = dnSdStorage.getPreviousDir();
// If <SD>/previous directory exists delete it
if (dnPrevDir.exists()) {
deleteDir(dnPrevDir);
}
final File bpCurDir = bpSd.getCurrentDir();
final File bpPrevDir = bpSd.getPreviousDir();
assert bpCurDir.exists() : "BP level current directory must exist.";
cleanupDetachDir(new File(bpCurDir, DataStorage.STORAGE_DIR_DETACHED));
// 1. Delete <SD>/current/<bpid>/previous dir before upgrading
if (bpPrevDir.exists()) {
deleteDir(bpPrevDir);
}
final File bpTmpDir = bpSd.getPreviousTmp();
assert !bpTmpDir.exists() : "previous.tmp directory must not exist.";
// 2. Rename <SD>/current/<bpid>/current to
// <SD>/current/<bpid>/previous.tmp
rename(bpCurDir, bpTmpDir);
final String name = "block pool " + blockpoolID + " at " + bpSd.getRoot();
if (callables == null) {
doUpgrade(name, bpSd, nsInfo, bpPrevDir, bpTmpDir, bpCurDir, oldLV, conf);
} else {
callables.add(new Callable<StorageDirectory>() {
@Override
public StorageDirectory call() throws Exception {
doUpgrade(name, bpSd, nsInfo, bpPrevDir, bpTmpDir, bpCurDir, oldLV,
conf);
return bpSd;
}
});
}
}
private void doUpgrade(String name, final StorageDirectory bpSd,
NamespaceInfo nsInfo, final File bpPrevDir, final File bpTmpDir,
final File bpCurDir, final int oldLV, Configuration conf)
throws IOException {
// 3. Create new <SD>/current with block files hardlinks and VERSION
linkAllBlocks(bpTmpDir, bpCurDir, oldLV, conf);
this.layoutVersion = DataNodeLayoutVersion.getCurrentLayoutVersion();
assert this.namespaceID == nsInfo.getNamespaceID()
: "Data-node and name-node layout versions must be the same.";
this.cTime = nsInfo.getCTime();
writeProperties(bpSd);
// 4.rename <SD>/current/<bpid>/previous.tmp to
// <SD>/current/<bpid>/previous
rename(bpTmpDir, bpPrevDir);
LOG.info("Upgrade of {} is complete", name);
}
/**
* Cleanup the detachDir.
*
* If the directory is not empty report an error; Otherwise remove the
* directory.
*
* @param detachDir detach directory
* @throws IOException if the directory is not empty or it can not be removed
*/
private void cleanupDetachDir(File detachDir) throws IOException {
if (!DataNodeLayoutVersion.supports(
LayoutVersion.Feature.APPEND_RBW_DIR, layoutVersion)
&& detachDir.exists() && detachDir.isDirectory()) {
if (FileUtil.list(detachDir).length != 0) {
throw new IOException("Detached directory " + detachDir
+ " is not empty. Please manually move each file under this "
+ "directory to the finalized directory if the finalized "
+ "directory tree does not have the file.");
} else if (!detachDir.delete()) {
throw new IOException("Cannot remove directory " + detachDir);
}
}
}
/**
* Restore all files from the trash directory to their corresponding
* locations under current/
*/
private int restoreBlockFilesFromTrash(File trashRoot)
throws IOException {
int filesRestored = 0;
File[] children = trashRoot.exists() ? trashRoot.listFiles() : null;
if (children == null) {
return 0;
}
File restoreDirectory = null;
for (File child : children) {
if (child.isDirectory()) {
// Recurse to process subdirectories.
filesRestored += restoreBlockFilesFromTrash(child);
continue;
}
if (restoreDirectory == null) {
restoreDirectory = new File(getRestoreDirectory(child));
if (!restoreDirectory.exists() && !restoreDirectory.mkdirs()) {
throw new IOException("Failed to create directory " + restoreDirectory);
}
}
final File newChild = new File(restoreDirectory, child.getName());
if (newChild.exists() && newChild.length() >= child.length()) {
// Failsafe - we should not hit this case but let's make sure
// we never overwrite a newer version of a block file with an
// older version.
LOG.info("Not overwriting {} with smaller file from " +
"trash directory. This message can be safely ignored.", newChild);
} else if (!child.renameTo(newChild)) {
throw new IOException("Failed to rename " + child + " to " + newChild);
} else {
++filesRestored;
}
}
FileUtil.fullyDelete(trashRoot);
return filesRestored;
}
/*
* Roll back to old snapshot at the block pool level
* If previous directory exists:
* <ol>
* <li>Rename <SD>/current/<bpid>/current to removed.tmp</li>
* <li>Rename * <SD>/current/<bpid>/previous to current</li>
* <li>Remove removed.tmp</li>
* </ol>
*
* Do nothing if previous directory does not exist.
* @param bpSd Block pool storage directory at <SD>/current/<bpid>
*/
void doRollback(StorageDirectory bpSd, NamespaceInfo nsInfo)
throws IOException {
File prevDir = bpSd.getPreviousDir();
// regular startup if previous dir does not exist
if (prevDir == null || !prevDir.exists()) {
return;
}
// read attributes out of the VERSION file of previous directory
BlockPoolSliceStorage prevInfo = new BlockPoolSliceStorage();
prevInfo.readPreviousVersionProperties(bpSd);
// We allow rollback to a state, which is either consistent with
// the namespace state or can be further upgraded to it.
// In another word, we can only roll back when ( storedLV >= software LV)
// && ( DN.previousCTime <= NN.ctime)
if (!(prevInfo.getLayoutVersion() >=
DataNodeLayoutVersion.getCurrentLayoutVersion() &&
prevInfo.getCTime() <= nsInfo.getCTime())) { // cannot rollback
throw new InconsistentFSStateException(bpSd.getRoot(),
"Cannot rollback to a newer state.\nDatanode previous state: LV = "
+ prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime()
+ " is newer than the namespace state: LV = "
+ DataNodeLayoutVersion.getCurrentLayoutVersion() + " CTime = "
+ nsInfo.getCTime());
}
LOG.info("Rolling back storage directory {}.\n target LV = {}; target "
+ "CTime = {}", bpSd.getRoot(), nsInfo.getLayoutVersion(),
nsInfo.getCTime());
File tmpDir = bpSd.getRemovedTmp();
assert !tmpDir.exists() : "removed.tmp directory must not exist.";
// 1. rename current to tmp
File curDir = bpSd.getCurrentDir();
assert curDir.exists() : "Current directory must exist.";
rename(curDir, tmpDir);
// 2. rename previous to current
rename(prevDir, curDir);
// 3. delete removed.tmp dir
deleteDir(tmpDir);
LOG.info("Rollback of {} is complete", bpSd.getRoot());
}
/*
* Finalize the block pool storage by deleting <BP>/previous directory
* that holds the snapshot.
*/
void doFinalize(File dnCurDir) throws IOException {
if (dnCurDir == null) {
return; //we do nothing if the directory is null
}
File bpRoot = getBpRoot(blockpoolID, dnCurDir);
StorageDirectory bpSd = new StorageDirectory(bpRoot);
// block pool level previous directory
File prevDir = bpSd.getPreviousDir();
if (!prevDir.exists()) {
return; // already finalized
}
final String dataDirPath = bpSd.getRoot().getCanonicalPath();
LOG.info("Finalizing upgrade for storage directory {}.\n cur LV = {}; "
+ "cur CTime = {}", dataDirPath, this.getLayoutVersion(),
this.getCTime());
assert bpSd.getCurrentDir().exists() : "Current directory must exist.";
// rename previous to finalized.tmp
final File tmpDir = bpSd.getFinalizedTmp();
rename(prevDir, tmpDir);
// delete finalized.tmp dir in a separate thread
new Daemon(new Runnable() {
@Override
public void run() {
try {
deleteDir(tmpDir);
} catch (IOException ex) {
LOG.error("Finalize upgrade for {} failed.", dataDirPath, ex);
}
LOG.info("Finalize upgrade for {} is complete.", dataDirPath);
}
@Override
public String toString() {
return "Finalize " + dataDirPath;
}
}).start();
}
/**
* Hardlink all finalized and RBW blocks in fromDir to toDir
*
* @param fromDir directory where the snapshot is stored
* @param toDir the current data directory
* @throws IOException if error occurs during hardlink
*/
private static void linkAllBlocks(File fromDir, File toDir,
int diskLayoutVersion, Configuration conf) throws IOException {
// do the link
// hardlink finalized blocks in tmpDir
HardLink hardLink = new HardLink();
DataStorage.linkBlocks(fromDir, toDir, DataStorage.STORAGE_DIR_FINALIZED,
diskLayoutVersion, hardLink, conf);
DataStorage.linkBlocks(fromDir, toDir, DataStorage.STORAGE_DIR_RBW,
diskLayoutVersion, hardLink, conf);
LOG.info("Linked blocks from {} to {}. {}", fromDir, toDir,
hardLink.linkStats.report());
}
/**
* gets the data node storage directory based on block pool storage
*/
private static String getDataNodeStorageRoot(String bpRoot) {
Matcher matcher = BLOCK_POOL_PATH_PATTERN.matcher(bpRoot);
if (matcher.matches()) {
// return the data node root directory
return matcher.group(1);
}
return bpRoot;
}
@Override
public String toString() {
return super.toString() + ";bpid=" + blockpoolID;
}
/**
* Get a block pool storage root based on data node storage root
* @param bpID block pool ID
* @param dnCurDir data node storage root directory
* @return root directory for block pool storage
*/
public static File getBpRoot(String bpID, File dnCurDir) {
return new File(dnCurDir, bpID);
}
@Override
public boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException {
return false;
}
private File getTrashRootDir(StorageDirectory sd) {
return new File(sd.getRoot(), TRASH_ROOT_DIR);
}
/**
* Determine whether we can use trash for the given blockFile. Trash
* is disallowed if a 'previous' directory exists for the
* storage directory containing the block.
*/
@VisibleForTesting
public boolean isTrashAllowed(File blockFile) {
Matcher matcher = BLOCK_POOL_CURRENT_PATH_PATTERN.matcher(blockFile.getParent());
String previousDir = matcher.replaceFirst("$1$2" + STORAGE_DIR_PREVIOUS);
return !(new File(previousDir)).exists();
}
/**
* Get a target subdirectory under trash/ for a given block file that is being
* deleted.
*
* The subdirectory structure under trash/ mirrors that under current/ to keep
* implicit memory of where the files are to be restored (if necessary).
*
* @return the trash directory for a given block file that is being deleted.
*/
public String getTrashDirectory(ReplicaInfo info) {
URI blockURI = info.getBlockURI();
try{
File blockFile = new File(blockURI);
return getTrashDirectory(blockFile);
} catch (IllegalArgumentException e) {
LOG.warn("Failed to get block file for replica {}", info, e);
}
return null;
}
private String getTrashDirectory(File blockFile) {
if (isTrashAllowed(blockFile)) {
Matcher matcher = BLOCK_POOL_CURRENT_PATH_PATTERN.matcher(blockFile.getParent());
String trashDirectory = matcher.replaceFirst("$1$2" + TRASH_ROOT_DIR + "$4");
return trashDirectory;
}
return null;
}
/**
* Get a target subdirectory under current/ for a given block file that is
* being restored from trash.
*
* The subdirectory structure under trash/ mirrors that under current/ to keep
* implicit memory of where the files are to be restored.
* @param blockFile block file that is being restored from trash.
* @return the target directory to restore a previously deleted block file.
*/
@VisibleForTesting
String getRestoreDirectory(File blockFile) {
Matcher matcher = BLOCK_POOL_TRASH_PATH_PATTERN.matcher(blockFile.getParent());
String restoreDirectory = matcher.replaceFirst("$1$2" + STORAGE_DIR_CURRENT + "$4");
LOG.info("Restoring {} to {}", blockFile, restoreDirectory);
return restoreDirectory;
}
/**
* Delete all files and directories in the trash directories.
*/
public void clearTrash() {
final List<File> trashRoots = new ArrayList<>();
for (StorageDirectory sd : getStorageDirs()) {
File trashRoot = getTrashRootDir(sd);
if (trashRoot.exists() && sd.getPreviousDir().exists()) {
LOG.error("Trash and PreviousDir shouldn't both exist for storage "
+ "directory {}", sd);
assert false;
} else {
trashRoots.add(trashRoot);
}
}
stopTrashCleaner();
trashCleaner = new Daemon(new Runnable() {
@Override
public void run() {
for(File trashRoot : trashRoots){
FileUtil.fullyDelete(trashRoot);
LOG.info("Cleared trash for storage directory {}", trashRoot);
}
}
@Override
public String toString() {
return "clearTrash() for " + blockpoolID;
}
});
trashCleaner.start();
}
public void stopTrashCleaner() {
if (trashCleaner != null) {
trashCleaner.interrupt();
}
}
/** trash is enabled if at least one storage directory contains trash root */
@VisibleForTesting
public boolean trashEnabled() {
for (StorageDirectory sd : getStorageDirs()) {
if (getTrashRootDir(sd).exists()) {
return true;
}
}
return false;
}
/**
* Create a rolling upgrade marker file for each BP storage root, if it
* does not exist already.
* @param dnStorageDirs
*/
public void setRollingUpgradeMarkers(List<StorageDirectory> dnStorageDirs)
throws IOException {
for (StorageDirectory sd : dnStorageDirs) {
if (sd.getCurrentDir() == null) {
return;
}
File bpRoot = getBpRoot(blockpoolID, sd.getCurrentDir());
File markerFile = new File(bpRoot, ROLLING_UPGRADE_MARKER_FILE);
if (!storagesWithRollingUpgradeMarker.contains(bpRoot.toString())) {
if (!markerFile.exists() && markerFile.createNewFile()) {
LOG.info("Created {}", markerFile);
} else {
LOG.info("{} already exists.", markerFile);
}
storagesWithRollingUpgradeMarker.add(bpRoot.toString());
storagesWithoutRollingUpgradeMarker.remove(bpRoot.toString());
}
}
}
/**
* Check whether the rolling upgrade marker file exists for each BP storage
* root. If it does exist, then the marker file is cleared and more
* importantly the layout upgrade is finalized.
* @param dnStorageDirs
*/
public void clearRollingUpgradeMarkers(List<StorageDirectory> dnStorageDirs)
throws IOException {
for (StorageDirectory sd : dnStorageDirs) {
if (sd.getCurrentDir() == null) {
continue;
}
File bpRoot = getBpRoot(blockpoolID, sd.getCurrentDir());
File markerFile = new File(bpRoot, ROLLING_UPGRADE_MARKER_FILE);
if (!storagesWithoutRollingUpgradeMarker.contains(bpRoot.toString())) {
if (markerFile.exists()) {
LOG.info("Deleting {}", markerFile);
doFinalize(sd.getCurrentDir());
if (!markerFile.delete()) {
LOG.warn("Failed to delete {}", markerFile);
}
}
storagesWithoutRollingUpgradeMarker.add(bpRoot.toString());
storagesWithRollingUpgradeMarker.remove(bpRoot.toString());
}
}
}
}
| BlockPoolSliceStorage |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/bind/BindableRuntimeHintsRegistrarTests.java | {
"start": 22641,
"end": 22814
} | class ____ {
private final ListenerRetry retry = new ListenerRetry();
public ListenerRetry getRetry() {
return this.retry;
}
}
public abstract static | Simple |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/web/servlet/result/MockMvcResultMatchers.java | {
"start": 1272,
"end": 1493
} | class ____ a Java editor favorite. To navigate to
* this setting, open the Preferences and type "favorites".
*
* @author Rossen Stoyanchev
* @author Brian Clozel
* @author Sam Brannen
* @since 3.2
*/
public abstract | as |
java | redisson__redisson | redisson/src/test/java/org/redisson/rx/RedissonTimeSeriesRxTest.java | {
"start": 318,
"end": 906
} | class ____ extends BaseRxTest {
@Test
public void testOrder() {
RTimeSeriesRx<String, Object> t = redisson.getTimeSeries("test");
sync(t.add(4, "40"));
sync(t.add(2, "20"));
sync(t.add(1, "10", 1, TimeUnit.SECONDS));
Collection<TimeSeriesEntry<String, Object>> r11 = sync(t.entryRange(1, 5));
assertThat(r11).containsExactly(new TimeSeriesEntry<>(1,"10"),
new TimeSeriesEntry<>(2, "20"),
new TimeSeriesEntry<>(4, "40"));
}
}
| RedissonTimeSeriesRxTest |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/SampleExec.java | {
"start": 784,
"end": 2663
} | class ____ extends UnaryExec {
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(
PhysicalPlan.class,
"SampleExec",
SampleExec::new
);
private final Expression probability;
public SampleExec(Source source, PhysicalPlan child, Expression probability) {
super(source, child);
this.probability = probability;
}
public SampleExec(StreamInput in) throws IOException {
this(
Source.readFrom((PlanStreamInput) in),
in.readNamedWriteable(PhysicalPlan.class), // child
in.readNamedWriteable(Expression.class) // probability
);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
source().writeTo(out);
out.writeNamedWriteable(child());
out.writeNamedWriteable(probability);
}
@Override
public UnaryExec replaceChild(PhysicalPlan newChild) {
return new SampleExec(source(), newChild, probability);
}
@Override
protected NodeInfo<? extends PhysicalPlan> info() {
return NodeInfo.create(this, SampleExec::new, child(), probability);
}
/**
* Returns the name of the writeable object
*/
@Override
public String getWriteableName() {
return ENTRY.name;
}
public Expression probability() {
return probability;
}
@Override
public int hashCode() {
return Objects.hash(child(), probability);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
var other = (SampleExec) obj;
return Objects.equals(child(), other.child()) && Objects.equals(probability, other.probability);
}
}
| SampleExec |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/odps/OdpsSelectTest14.java | {
"start": 909,
"end": 2198
} | class ____ extends TestCase {
public void test_select() throws Exception {
String sql = "SELECT split_part(content, '\\001')[1] FROM dual;";
assertEquals("SELECT split_part(content, '\\001')[1]\n" +
"FROM dual;", SQLUtils.formatOdps(sql));
assertEquals("select split_part(content, '\\001')[1]\n" +
"from dual;", SQLUtils.formatOdps(sql, SQLUtils.DEFAULT_LCASE_FORMAT_OPTION));
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, JdbcConstants.ODPS);
SQLStatement stmt = statementList.get(0);
assertEquals(1, statementList.size());
SchemaStatVisitor visitor = SQLUtils.createSchemaStatVisitor(JdbcConstants.ODPS);
stmt.accept(visitor);
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(0, visitor.getTables().size());
assertEquals(1, visitor.getColumns().size());
assertEquals(0, visitor.getConditions().size());
// assertTrue(visitor.getColumns().contains(new Column("abc", "name")));
}
}
| OdpsSelectTest14 |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/converted/converter/QueryTest.java | {
"start": 1415,
"end": 4408
} | class ____ {
private static final float SALARY = 267.89f;
private static final float EXPECTED_NON_CONVERTED = 26789f;
@Test
public void testJpqlFloatLiteral(SessionFactoryScope scope) {
scope.inTransaction( session -> {
Employee jDoe = session.createQuery( "from Employee e where e.salary = " + SALARY + "f", Employee.class ).uniqueResult();
assertNotNull( jDoe );
} );
}
@Test
public void testJpqlBooleanLiteral(SessionFactoryScope scope) {
scope.inTransaction( session -> {
assertNotNull( session.createQuery( "from Employee e where e.active = true", Employee.class ).uniqueResult() );
assertNull( session.createQuery( "from Employee e where e.active = false", Employee.class ).uniqueResult() );
} );
}
@Test
@JiraKey( "HHH-13082" )
public void testNativeQueryResult(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final NativeQuery<Object[]> query = session.createNativeQuery( "select id, salary from EMP", "emp_id_salary", Object[].class );
final List<Object[]> results = query.list();
assertThat( results ).hasSize( 1 );
final Object[] values = results.get( 0 );
assertThat( values[0] ).isEqualTo( 1 );
assertThat( values[1] ).isEqualTo( SALARY );
} );
}
@Test
public void testNativeQueryResultWithResultClass(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final NativeQuery<Object[]> query = session.createNativeQuery( "select id, salary from EMP", "emp_id_salary", Object[].class );
final List<Object[]> results = query.list();
assertThat( results ).hasSize( 1 );
final Object[] values = results.get( 0 );
assertThat( values[0] ).isEqualTo( 1 );
assertThat( values[1] ).isEqualTo( SALARY );
} );
}
@Test
@JiraKey( "HHH-14975" )
public void testAutoAppliedConverterAsNativeQueryResult(SessionFactoryScope scope) {
scope.inTransaction( (session) -> {
final NativeQuery<Object[]> query = session.createNativeQuery( "select id, salary from EMP", "emp_id_salary2", Object[].class );
final List<Object[]> results = query.list();
assertThat( results ).hasSize( 1 );
final Object[] values = results.get( 0 );
assertThat( values[0] ).isEqualTo( 1 );
assertThat( values[1] ).isEqualTo( EXPECTED_NON_CONVERTED );
} );
}
@BeforeEach
public void setUpTestData(SessionFactoryScope scope) {
scope.inTransaction( session -> session.persist(new Employee(1, new Name("John", "Q.", "Doe" ), SALARY)) );
}
@AfterEach
public void cleanUpTestData(SessionFactoryScope scope) {
scope.inTransaction( session -> scope.dropData() );
}
@Entity( name = "Employee" )
@Table( name = "EMP" )
@SqlResultSetMapping(
name = "emp_id_salary",
columns = {
@ColumnResult( name = "id" ),
@ColumnResult( name = "salary", type = SalaryConverter.class )
}
)
@SqlResultSetMapping(
name = "emp_id_salary2",
columns = {
@ColumnResult( name = "id" ),
@ColumnResult( name = "salary", type = Float.class )
}
)
public static | QueryTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java | {
"start": 900,
"end": 6447
} | class ____ implements Rescorer {
private static final int MAX_CALLS_BEFORE_QUERY_TIMEOUT_CHECK = 10;
public static final Rescorer INSTANCE = new QueryRescorer();
@Override
public TopDocs rescore(TopDocs topDocs, IndexSearcher searcher, RescoreContext rescoreContext) throws IOException {
assert rescoreContext != null;
if (topDocs == null || topDocs.scoreDocs.length == 0) {
return topDocs;
}
final QueryRescoreContext rescore = (QueryRescoreContext) rescoreContext;
org.apache.lucene.search.Rescorer rescorer = new org.apache.lucene.search.QueryRescorer(rescore.parsedQuery().query()) {
int count = 0;
@Override
protected float combine(float firstPassScore, boolean secondPassMatches, float secondPassScore) {
if (count % MAX_CALLS_BEFORE_QUERY_TIMEOUT_CHECK == 0) {
rescore.checkCancellation();
}
count++;
if (secondPassMatches) {
return rescore.scoreMode.combine(
firstPassScore * rescore.queryWeight(),
secondPassScore * rescore.rescoreQueryWeight()
);
}
// TODO: shouldn't this be up to the ScoreMode? I.e., we should just invoke ScoreMode.combine, passing 0.0f for the
// secondary score?
return firstPassScore * rescore.queryWeight();
}
};
// First take top slice of incoming docs, to be rescored:
TopDocs topNFirstPass = Rescorer.topN(topDocs, rescoreContext.getWindowSize());
// Save doc IDs for which rescoring was applied to be used in score explanation
Set<Integer> topNDocIDs = Arrays.stream(topNFirstPass.scoreDocs).map(scoreDoc -> scoreDoc.doc).collect(toUnmodifiableSet());
rescoreContext.setRescoredDocs(topNDocIDs);
// Rescore them:
TopDocs rescored = rescorer.rescore(searcher, topNFirstPass, rescoreContext.getWindowSize());
// Splice back to non-topN hits and resort all of them:
return combine(topDocs, rescored, (QueryRescoreContext) rescoreContext);
}
@Override
public Explanation explain(int topLevelDocId, IndexSearcher searcher, RescoreContext rescoreContext, Explanation sourceExplanation)
throws IOException {
if (sourceExplanation == null) {
// this should not happen but just in case
return Explanation.noMatch("nothing matched");
}
QueryRescoreContext rescore = (QueryRescoreContext) rescoreContext;
float primaryWeight = rescore.queryWeight();
Explanation prim;
if (sourceExplanation.isMatch()) {
prim = Explanation.match(
sourceExplanation.getValue().floatValue() * primaryWeight,
"product of:",
sourceExplanation,
Explanation.match(primaryWeight, "primaryWeight")
);
} else {
prim = Explanation.noMatch("First pass did not match", sourceExplanation);
}
if (rescoreContext.isRescored(topLevelDocId)) {
Explanation rescoreExplain = searcher.explain(rescore.parsedQuery().query(), topLevelDocId);
// NOTE: we don't use Lucene's Rescorer.explain because we want to insert our own description with which ScoreMode was used.
// Maybe we should add QueryRescorer.explainCombine to Lucene?
if (rescoreExplain != null && rescoreExplain.isMatch()) {
float secondaryWeight = rescore.rescoreQueryWeight();
Explanation sec = Explanation.match(
rescoreExplain.getValue().floatValue() * secondaryWeight,
"product of:",
rescoreExplain,
Explanation.match(secondaryWeight, "secondaryWeight")
);
QueryRescoreMode scoreMode = rescore.scoreMode();
return Explanation.match(
scoreMode.combine(prim.getValue().floatValue(), sec.getValue().floatValue()),
scoreMode + " of:",
prim,
sec
);
}
}
return prim;
}
/** Modifies incoming TopDocs (in) by replacing the top hits with resorted's hits, and then resorting all hits. */
private static TopDocs combine(TopDocs in, TopDocs resorted, QueryRescoreContext ctx) {
System.arraycopy(resorted.scoreDocs, 0, in.scoreDocs, 0, resorted.scoreDocs.length);
if (in.scoreDocs.length > resorted.scoreDocs.length) {
// These hits were not rescored (beyond the rescore window), so we treat them the same as a hit that did get rescored but did
// not match the 2nd pass query:
for (int i = resorted.scoreDocs.length; i < in.scoreDocs.length; i++) {
// TODO: shouldn't this be up to the ScoreMode? I.e., we should just invoke ScoreMode.combine, passing 0.0f for the
// secondary score?
in.scoreDocs[i].score *= ctx.queryWeight();
}
// TODO: this is wrong, i.e. we are comparing apples and oranges at this point. It would be better if we always rescored all
// incoming first pass hits, instead of allowing recoring of just the top subset:
Arrays.sort(in.scoreDocs, SCORE_DOC_COMPARATOR);
}
return in;
}
public static | QueryRescorer |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/functions/aggfunctions/CountAggFunction.java | {
"start": 1710,
"end": 3162
} | class ____ extends DeclarativeAggregateFunction {
private final UnresolvedReferenceExpression count = unresolvedRef("count");
@Override
public int operandCount() {
return 1;
}
@Override
public UnresolvedReferenceExpression[] aggBufferAttributes() {
return new UnresolvedReferenceExpression[] {count};
}
@Override
public DataType[] getAggBufferTypes() {
return new DataType[] {DataTypes.BIGINT()};
}
@Override
public DataType getResultType() {
return DataTypes.BIGINT();
}
@Override
public Expression[] initialValuesExpressions() {
return new Expression[] {/* count= */ literal(0L, getResultType().notNull())};
}
@Override
public Expression[] accumulateExpressions() {
return new Expression[] {
/* count= */ ifThenElse(isNull(operand(0)), count, plus(count, literal(1L)))
};
}
@Override
public Expression[] retractExpressions() {
return new Expression[] {
/* count= */ ifThenElse(isNull(operand(0)), count, minus(count, literal(1L)))
};
}
@Override
public Expression[] mergeExpressions() {
return new Expression[] {/* count= */ plus(count, mergeOperand(count))};
}
// If all input are nulls, count will be 0 and we will get result 0.
@Override
public Expression getValueExpression() {
return count;
}
}
| CountAggFunction |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/JpaListenerOnPrivateMethodOfApplicationScopedCdiBeanTest.java | {
"start": 2002,
"end": 2170
} | class ____ {
@PostPersist
private void postPersist(SomeEntity someEntity) {
fail("should not reach here");
}
}
}
| SomeEntityListener |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/BasicTypeSerializerUpgradeTestSpecifications.java | {
"start": 9653,
"end": 10099
} | class ____
implements TypeSerializerUpgradeTestBase.PreUpgradeSetup<ByteValue> {
@Override
public TypeSerializer<ByteValue> createPriorSerializer() {
return ByteValueSerializer.INSTANCE;
}
@Override
public ByteValue createTestData() {
return new ByteValue((byte) 42);
}
}
/** ByteValueSerializerVerifier. */
public static final | ByteValueSerializerSetup |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/testng/TimedTransactionalTestNGSpringContextTests.java | {
"start": 1154,
"end": 1515
} | class ____ extends AbstractTransactionalTestNGSpringContextTests {
@Test
void testWithoutTimeout() {
assertThatTransaction().isActive();
}
// TODO Enable TestNG test with timeout once we have a solution.
@Test(timeOut = 10000, enabled = false)
void testWithTimeout() {
assertThatTransaction().isActive();
}
}
| TimedTransactionalTestNGSpringContextTests |
java | apache__camel | components/camel-aws/camel-aws2-cw/src/generated/java/org/apache/camel/component/aws2/cw/Cw2EndpointConfigurer.java | {
"start": 734,
"end": 8983
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
Cw2Endpoint target = (Cw2Endpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesskey":
case "accessKey": target.getConfiguration().setAccessKey(property(camelContext, java.lang.String.class, value)); return true;
case "amazoncwclient":
case "amazonCwClient": target.getConfiguration().setAmazonCwClient(property(camelContext, software.amazon.awssdk.services.cloudwatch.CloudWatchClient.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "name": target.getConfiguration().setName(property(camelContext, java.lang.String.class, value)); return true;
case "overrideendpoint":
case "overrideEndpoint": target.getConfiguration().setOverrideEndpoint(property(camelContext, boolean.class, value)); return true;
case "profilecredentialsname":
case "profileCredentialsName": target.getConfiguration().setProfileCredentialsName(property(camelContext, java.lang.String.class, value)); return true;
case "proxyhost":
case "proxyHost": target.getConfiguration().setProxyHost(property(camelContext, java.lang.String.class, value)); return true;
case "proxyport":
case "proxyPort": target.getConfiguration().setProxyPort(property(camelContext, java.lang.Integer.class, value)); return true;
case "proxyprotocol":
case "proxyProtocol": target.getConfiguration().setProxyProtocol(property(camelContext, software.amazon.awssdk.core.Protocol.class, value)); return true;
case "region": target.getConfiguration().setRegion(property(camelContext, java.lang.String.class, value)); return true;
case "secretkey":
case "secretKey": target.getConfiguration().setSecretKey(property(camelContext, java.lang.String.class, value)); return true;
case "sessiontoken":
case "sessionToken": target.getConfiguration().setSessionToken(property(camelContext, java.lang.String.class, value)); return true;
case "timestamp": target.getConfiguration().setTimestamp(property(camelContext, java.time.Instant.class, value)); return true;
case "trustallcertificates":
case "trustAllCertificates": target.getConfiguration().setTrustAllCertificates(property(camelContext, boolean.class, value)); return true;
case "unit": target.getConfiguration().setUnit(property(camelContext, java.lang.String.class, value)); return true;
case "uriendpointoverride":
case "uriEndpointOverride": target.getConfiguration().setUriEndpointOverride(property(camelContext, java.lang.String.class, value)); return true;
case "usedefaultcredentialsprovider":
case "useDefaultCredentialsProvider": target.getConfiguration().setUseDefaultCredentialsProvider(property(camelContext, boolean.class, value)); return true;
case "useprofilecredentialsprovider":
case "useProfileCredentialsProvider": target.getConfiguration().setUseProfileCredentialsProvider(property(camelContext, boolean.class, value)); return true;
case "usesessioncredentials":
case "useSessionCredentials": target.getConfiguration().setUseSessionCredentials(property(camelContext, boolean.class, value)); return true;
case "value": target.getConfiguration().setValue(property(camelContext, java.lang.Double.class, value)); return true;
default: return false;
}
}
@Override
public String[] getAutowiredNames() {
return new String[]{"amazonCwClient"};
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesskey":
case "accessKey": return java.lang.String.class;
case "amazoncwclient":
case "amazonCwClient": return software.amazon.awssdk.services.cloudwatch.CloudWatchClient.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "name": return java.lang.String.class;
case "overrideendpoint":
case "overrideEndpoint": return boolean.class;
case "profilecredentialsname":
case "profileCredentialsName": return java.lang.String.class;
case "proxyhost":
case "proxyHost": return java.lang.String.class;
case "proxyport":
case "proxyPort": return java.lang.Integer.class;
case "proxyprotocol":
case "proxyProtocol": return software.amazon.awssdk.core.Protocol.class;
case "region": return java.lang.String.class;
case "secretkey":
case "secretKey": return java.lang.String.class;
case "sessiontoken":
case "sessionToken": return java.lang.String.class;
case "timestamp": return java.time.Instant.class;
case "trustallcertificates":
case "trustAllCertificates": return boolean.class;
case "unit": return java.lang.String.class;
case "uriendpointoverride":
case "uriEndpointOverride": return java.lang.String.class;
case "usedefaultcredentialsprovider":
case "useDefaultCredentialsProvider": return boolean.class;
case "useprofilecredentialsprovider":
case "useProfileCredentialsProvider": return boolean.class;
case "usesessioncredentials":
case "useSessionCredentials": return boolean.class;
case "value": return java.lang.Double.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
Cw2Endpoint target = (Cw2Endpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesskey":
case "accessKey": return target.getConfiguration().getAccessKey();
case "amazoncwclient":
case "amazonCwClient": return target.getConfiguration().getAmazonCwClient();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "name": return target.getConfiguration().getName();
case "overrideendpoint":
case "overrideEndpoint": return target.getConfiguration().isOverrideEndpoint();
case "profilecredentialsname":
case "profileCredentialsName": return target.getConfiguration().getProfileCredentialsName();
case "proxyhost":
case "proxyHost": return target.getConfiguration().getProxyHost();
case "proxyport":
case "proxyPort": return target.getConfiguration().getProxyPort();
case "proxyprotocol":
case "proxyProtocol": return target.getConfiguration().getProxyProtocol();
case "region": return target.getConfiguration().getRegion();
case "secretkey":
case "secretKey": return target.getConfiguration().getSecretKey();
case "sessiontoken":
case "sessionToken": return target.getConfiguration().getSessionToken();
case "timestamp": return target.getConfiguration().getTimestamp();
case "trustallcertificates":
case "trustAllCertificates": return target.getConfiguration().isTrustAllCertificates();
case "unit": return target.getConfiguration().getUnit();
case "uriendpointoverride":
case "uriEndpointOverride": return target.getConfiguration().getUriEndpointOverride();
case "usedefaultcredentialsprovider":
case "useDefaultCredentialsProvider": return target.getConfiguration().isUseDefaultCredentialsProvider();
case "useprofilecredentialsprovider":
case "useProfileCredentialsProvider": return target.getConfiguration().isUseProfileCredentialsProvider();
case "usesessioncredentials":
case "useSessionCredentials": return target.getConfiguration().isUseSessionCredentials();
case "value": return target.getConfiguration().getValue();
default: return null;
}
}
}
| Cw2EndpointConfigurer |
java | quarkusio__quarkus | extensions/amazon-lambda-rest/deployment/src/main/java/io/quarkus/amazon/lambda/http/deployment/AmazonLambdaHttpProcessor.java | {
"start": 2035,
"end": 5918
} | class ____ {
private static final DotName AWS_PROXY_REQUEST_CONTEXT = DotName.createSimple(AwsProxyRequestContext.class);
@BuildStep
public void setupCDI(BuildProducer<AdditionalBeanBuildItem> additionalBeans) {
AdditionalBeanBuildItem.Builder builder = AdditionalBeanBuildItem.builder();
builder.addBeanClasses(AwsHttpContextProducers.class).setUnremovable();
additionalBeans.produce(builder.build());
}
@BuildStep
public void setupSecurity(BuildProducer<AdditionalBeanBuildItem> additionalBeans,
LambdaHttpBuildTimeConfig config) {
if (!config.enableSecurity())
return;
AdditionalBeanBuildItem.Builder builder = AdditionalBeanBuildItem.builder().setUnremovable();
builder.addBeanClass(LambdaHttpAuthenticationMechanism.class)
.addBeanClass(DefaultLambdaIdentityProvider.class);
additionalBeans.produce(builder.build());
}
@BuildStep
@Record(ExecutionTime.RUNTIME_INIT)
public void setupConfig(LambdaHttpRecorder recorder) {
// force config to be set as static var in the recorder - TODO - rewrite this, it shouldn't use static vars
recorder.setConfig();
}
@BuildStep
public RequireVirtualHttpBuildItem requestVirtualHttp() {
return RequireVirtualHttpBuildItem.ALWAYS_VIRTUAL;
}
@BuildStep
public ProvidedAmazonLambdaHandlerBuildItem setHandler() {
return new ProvidedAmazonLambdaHandlerBuildItem(LambdaHttpHandler.class, "AWS Lambda HTTP");
}
@BuildStep
public void registerReflectionClasses(BuildProducer<ReflectiveClassBuildItem> reflectiveClassBuildItemBuildProducer) {
reflectiveClassBuildItemBuildProducer
.produce(ReflectiveClassBuildItem.builder(AlbContext.class,
ApiGatewayAuthorizerContext.class,
ApiGatewayRequestIdentity.class,
AwsProxyRequest.class,
AwsProxyRequestContext.class,
AwsProxyResponse.class,
CognitoAuthorizerClaims.class,
ErrorModel.class,
Headers.class,
MultiValuedTreeMap.class)
.reason(getClass().getName())
.methods().fields().build());
}
/**
* Lambda provides /tmp for temporary files. Set vertx cache dir
*/
@BuildStep(onlyIf = IsProduction.class)
void setTempDir(BuildProducer<SystemPropertyBuildItem> systemProperty) {
systemProperty.produce(new SystemPropertyBuildItem(CACHE_DIR_BASE_PROP_NAME, "/tmp/quarkus"));
}
@BuildStep
public void generateScripts(OutputTargetBuildItem target,
BuildProducer<ArtifactResultBuildItem> artifactResultProducer) throws Exception {
String lambdaName = LambdaUtil.artifactToLambda(target.getBaseName());
String output = LambdaUtil.copyResource("lambda/bootstrap-example.sh");
LambdaUtil.writeFile(target, "bootstrap-example.sh", output);
output = LambdaUtil.copyResource("http/sam.jvm.yaml")
.replace("${lambdaName}", lambdaName);
LambdaUtil.writeFile(target, "sam.jvm.yaml", output);
output = LambdaUtil.copyResource("http/sam.native.yaml")
.replace("${lambdaName}", lambdaName);
LambdaUtil.writeFile(target, "sam.native.yaml", output);
}
@BuildStep
public void resteasyReactiveIntegration(BuildProducer<ContextTypeBuildItem> contextTypeProducer,
BuildProducer<UnremovableBeanBuildItem> unremovableBeanProducer) {
contextTypeProducer.produce(new ContextTypeBuildItem(AWS_PROXY_REQUEST_CONTEXT));
unremovableBeanProducer.produce(UnremovableBeanBuildItem.beanTypes(AWS_PROXY_REQUEST_CONTEXT));
}
}
| AmazonLambdaHttpProcessor |
java | google__auto | value/src/it/functional/src/test/java/com/google/auto/value/gwt/GwtCompilationTest.java | {
"start": 11723,
"end": 15234
} | class ____"
+ "<K extends Comparable<K>, V extends K>"
+ " extends CustomFieldSerializer<AutoValue_Baz<K, V>> {",
"",
" public static <K extends Comparable<K>, V extends K> AutoValue_Baz<K, V>"
+ " instantiate(",
" SerializationStreamReader streamReader) throws SerializationException {",
" @SuppressWarnings(\"unchecked\")",
" Map<K, V> map = (Map<K, V>) streamReader.readObject();",
" @SuppressWarnings(\"unchecked\")",
" ImmutableMap<K, V> immutableMap = (ImmutableMap<K, V>) streamReader.readObject();",
" AutoValue_Baz.Builder<K, V> builder$ = new AutoValue_Baz.Builder<K, V>();",
" builder$.map(map);",
" builder$.immutableMapBuilder().putAll(immutableMap);",
" return (AutoValue_Baz<K, V>) builder$.build();",
" }",
"",
" public static <K extends Comparable<K>, V extends K> void serialize(",
" SerializationStreamWriter streamWriter,",
" AutoValue_Baz<K, V> instance) throws SerializationException {",
" streamWriter.writeObject(instance.map());",
" streamWriter.writeObject(instance.immutableMap());",
" }",
"",
" public static <K extends Comparable<K>, V extends K> void deserialize(",
" @SuppressWarnings(\"unused\") SerializationStreamReader streamReader,",
" @SuppressWarnings(\"unused\") AutoValue_Baz<K, V> instance) {",
" }",
"",
" @SuppressWarnings(\"unused\")",
" private int dummy_2865d9ec;",
"",
" @Override",
" public void deserializeInstance(",
" SerializationStreamReader streamReader,",
" AutoValue_Baz<K, V> instance) {",
" deserialize(streamReader, instance);",
" }",
"",
" @Override",
" public boolean hasCustomInstantiateInstance() {",
" return true;",
" }",
"",
" @Override",
" public AutoValue_Baz<K, V> instantiateInstance(",
" SerializationStreamReader streamReader) throws SerializationException {",
" return instantiate(streamReader);",
" }",
"",
" @Override",
" public void serializeInstance(",
" SerializationStreamWriter streamWriter,",
" AutoValue_Baz<K, V> instance) throws SerializationException {",
" serialize(streamWriter, instance);",
" }",
"}");
Compilation compilation =
javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject, GWT_COMPATIBLE);
assertThat(compilation).succeededWithoutWarnings();
assertThat(compilation)
.generatedSourceFile("foo.bar.AutoValue_Baz_CustomFieldSerializer")
.hasSourceEquivalentTo(expectedOutput);
}
private String generatedAnnotationType() {
return isJavaxAnnotationProcessingGeneratedAvailable()
? "javax.annotation.processing.Generated"
: "javax.annotation.Generated";
}
private boolean isJavaxAnnotationProcessingGeneratedAvailable() {
return SourceVersion.latestSupported().compareTo(SourceVersion.RELEASE_8) > 0;
}
}
| AutoValue_Baz_CustomFieldSerializer |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/annotations/MapKeyJdbcType.java | {
"start": 671,
"end": 884
} | interface ____ {
/**
* The descriptor to use for the map-key column
*
* @see org.hibernate.annotations.JdbcType#value
*/
Class<? extends org.hibernate.type.descriptor.jdbc.JdbcType> value();
}
| MapKeyJdbcType |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/WhatsAppEndpointBuilderFactory.java | {
"start": 10915,
"end": 12117
} | class ____ {
/**
* The internal instance of the builder used to access to all the
* methods representing the name of headers.
*/
private static final WhatsAppHeaderNameBuilder INSTANCE = new WhatsAppHeaderNameBuilder();
/**
* Phone Number ID taken from WhatsApp Meta for Developers Dashboard.
*
* The option is a: {@code Object} type.
*
* Group: producer
*
* @return the name of the header {@code WhatsAppPhoneNumberId}.
*/
public String whatsAppPhoneNumberId() {
return "CamelWhatsAppPhoneNumberId";
}
/**
* Recipient phone number associated with Phone Number ID.
*
* The option is a: {@code Object} type.
*
* Group: producer
*
* @return the name of the header {@code
* WhatsAppRecipientPhoneNumberId}.
*/
public String whatsAppRecipientPhoneNumberId() {
return "CamelWhatsAppRecipientPhoneNumberId";
}
}
static WhatsAppEndpointBuilder endpointBuilder(String componentName, String path) {
| WhatsAppHeaderNameBuilder |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/FullyFinishedOperatorStateTest.java | {
"start": 1631,
"end": 4242
} | class ____ {
@Test
void testFullyFinishedOperatorState() {
OperatorState operatorState =
new FullyFinishedOperatorState(null, null, new OperatorID(), 5, 256);
assertThat(operatorState.isFullyFinished()).isTrue();
assertThat(operatorState.getSubtaskStates()).isEmpty();
assertThat(operatorState.getStates()).isEmpty();
assertThat(operatorState.getNumberCollectedStates()).isZero();
assertThatThrownBy(() -> operatorState.putState(0, OperatorSubtaskState.builder().build()))
.as("Should not be able to put new subtask states for a fully finished state")
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(
() ->
operatorState.setCoordinatorState(
new ByteStreamStateHandle("test", new byte[] {1, 2, 3, 4})))
.as("Should not be able to put new subtask states for a fully finished state")
.isInstanceOf(UnsupportedOperationException.class);
}
@Test
void testGetDiscardables() throws IOException {
Tuple2<List<StateObject>, OperatorSubtaskState> opSubtaskStates1 =
generateSampleOperatorSubtaskState();
Tuple2<List<StateObject>, OperatorSubtaskState> opSubtaskStates2 =
generateSampleOperatorSubtaskState();
OperatorState operatorState = new OperatorState(null, null, new OperatorID(), 2, 256);
operatorState.putState(0, opSubtaskStates1.f1);
operatorState.putState(1, opSubtaskStates2.f1);
ByteStreamStateHandle coordinatorState =
new ByteStreamStateHandle("test", new byte[] {1, 2, 3, 4});
operatorState.setCoordinatorState(coordinatorState);
HashSet<StateObject> discardables = new HashSet<>();
discardables.addAll(opSubtaskStates1.f0.subList(0, 4));
discardables.add(((InputChannelStateHandle) opSubtaskStates1.f0.get(4)).getDelegate());
discardables.add(
((ResultSubpartitionStateHandle) opSubtaskStates1.f0.get(5)).getDelegate());
discardables.addAll(opSubtaskStates2.f0.subList(0, 4));
discardables.add(((InputChannelStateHandle) opSubtaskStates2.f0.get(4)).getDelegate());
discardables.add(
((ResultSubpartitionStateHandle) opSubtaskStates2.f0.get(5)).getDelegate());
discardables.add(coordinatorState);
assertThat(new HashSet<>(operatorState.getDiscardables())).isEqualTo(discardables);
}
}
| FullyFinishedOperatorStateTest |
java | junit-team__junit5 | junit-platform-commons/src/main/java/org/junit/platform/commons/util/ReflectionUtils.java | {
"start": 24311,
"end": 24556
} | class ____ its <em>primitive name</em> or <em>fully qualified
* name</em>, using the supplied {@link ClassLoader}.
*
* <p>See {@link org.junit.platform.commons.support.ReflectionSupport#tryToLoadClass(String)}
* for details on support for | by |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/eventtime/TimestampAssignerSupplier.java | {
"start": 2160,
"end": 2703
} | class ____ be used to register new metrics with Flink and to create a
* nested hierarchy based on the group names. See {@link MetricGroup} for more information
* for the metrics system.
*
* @see MetricGroup
*/
MetricGroup getMetricGroup();
}
/**
* We need an actual class. Implementing this as a lambda in {@link
* #of(SerializableTimestampAssigner)} would not allow the {@link ClosureCleaner} to "reach"
* into the {@link SerializableTimestampAssigner}.
*/
| can |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/transform/AliasToEntityMapResultTransformer.java | {
"start": 552,
"end": 1552
} | class ____ implements ResultTransformer<Map<String,Object>>, TypedTupleTransformer<Map<String,Object>> {
public static final AliasToEntityMapResultTransformer INSTANCE = new AliasToEntityMapResultTransformer();
/**
* Disallow instantiation of AliasToEntityMapResultTransformer.
*/
private AliasToEntityMapResultTransformer() {
}
@SuppressWarnings({ "rawtypes", "unchecked" })
@Override
public Class getTransformedType() {
return Map.class;
}
@Override
public Map<String,Object> transformTuple(Object[] tuple, String[] aliases) {
Map<String,Object> result = CollectionHelper.mapOfSize( tuple.length );
for ( int i = 0; i < tuple.length; i++ ) {
String alias = aliases[i];
if ( alias != null ) {
result.put( alias, tuple[i] );
}
}
return result;
}
/**
* Serialization hook for ensuring singleton uniqueing.
*
* @return The singleton instance : {@link #INSTANCE}
*/
private Object readResolve() {
return INSTANCE;
}
}
| AliasToEntityMapResultTransformer |
java | alibaba__fastjson | src/main/java/com/alibaba/fastjson/spi/Module.java | {
"start": 273,
"end": 445
} | interface ____ {
ObjectDeserializer createDeserializer(ParserConfig config, Class type);
ObjectSerializer createSerializer(SerializeConfig config, Class type);
}
| Module |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/results/jdbc/internal/JdbcValuesMappingProducerProviderStandard.java | {
"start": 820,
"end": 2120
} | class ____ implements JdbcValuesMappingProducerProvider {
/**
* Singleton access
*/
public static final JdbcValuesMappingProducerProviderStandard INSTANCE = new JdbcValuesMappingProducerProviderStandard();
@Override
public JdbcValuesMappingProducer buildMappingProducer(
SelectStatement sqlAst,
SessionFactoryImplementor sessionFactory) {
return new JdbcValuesMappingProducerStandard( getSelections( sqlAst ), sqlAst.getDomainResultDescriptors() );
}
private static List<SqlSelection> getSelections(SelectStatement selectStatement) {
if ( selectStatement.getQueryPart() instanceof QueryGroup queryGroup ) {
for ( var queryPart : queryGroup.getQueryParts() ) {
final var selectClause = queryPart.getFirstQuerySpec().getSelectClause();
if ( !( selectClause.getSqlSelections().get( 0 )
.getExpressionType().getSingleJdbcMapping().getJdbcType()
instanceof NullJdbcType ) ) {
return selectClause.getSqlSelections();
}
}
}
return selectStatement.getQuerySpec().getSelectClause().getSqlSelections();
}
@Override
public ResultSetMapping buildResultSetMapping(
String name,
boolean isDynamic,
SessionFactoryImplementor sessionFactory) {
return new ResultSetMappingImpl( name, isDynamic );
}
}
| JdbcValuesMappingProducerProviderStandard |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/web/reactive/server/UserWebTestClientConfigurer.java | {
"start": 2249,
"end": 3232
} | class ____ implements WebTestClientConfigurer {
private final SslInfo info;
private UserWebTestClientConfigurer(SslInfo info) {
this.info = info;
}
@Override
public void afterConfigurerAdded(
WebTestClient.Builder builder, @Nullable WebHttpHandlerBuilder httpHandlerBuilder,
@Nullable ClientHttpConnector connector) {
Assert.state(httpHandlerBuilder != null, "This configurer is applicable only to a mock WebFlux server");
httpHandlerBuilder.filters(filters -> filters.add(0, new UserWebFilter()));
}
/**
* Create a configurer with the given {@link X509Certificate X509 certificate(s)}.
*/
public static UserWebTestClientConfigurer x509(X509Certificate... certificates) {
return sslInfo(SslInfo.from("1", certificates));
}
/**
* Create a configurer with the given {@link SslInfo}.
*/
public static UserWebTestClientConfigurer sslInfo(SslInfo info) {
return new UserWebTestClientConfigurer(info);
}
private final | UserWebTestClientConfigurer |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ImpossibleNullComparisonTest.java | {
"start": 15641,
"end": 16388
} | class ____ {
void test() {
TestProtoMessage message = TestProtoMessage.newBuilder().build();
TestFieldProtoMessage field = message.getMessage();
assertNotNull("Message", message.getMessage());
assertThat(message.getMessage()).isNotNull();
}
}
""")
.setArgs(ImmutableList.of("-XepOpt:ProtoFieldNullComparison:MatchTestAssertions=false"))
.doTest();
}
@Test
public void optional() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import com.google.errorprone.bugpatterns.proto.ProtoTest.TestProtoMessage;
import java.util.Optional;
| Test |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/processor/assignment/TaskTopicPartition.java | {
"start": 987,
"end": 1206
} | class ____ during the assignment process to distinguish
* TopicPartitions type. Since the assignment logic can depend on the type of topic we're
* looking at, and the rack information of the partition, this container | used |
java | grpc__grpc-java | grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java | {
"start": 35427,
"end": 36401
} | class ____ {
private final GrpclbClientLoadRecorder loadRecorder;
private final String token;
DropEntry(GrpclbClientLoadRecorder loadRecorder, String token) {
this.loadRecorder = checkNotNull(loadRecorder, "loadRecorder");
this.token = checkNotNull(token, "token");
}
PickResult picked() {
loadRecorder.recordDroppedRequest(token);
return DROP_PICK_RESULT;
}
@Override
public String toString() {
// This is printed in logs. Only include useful information.
return "drop(" + token + ")";
}
@Override
public int hashCode() {
return Objects.hashCode(loadRecorder, token);
}
@Override
public boolean equals(Object other) {
if (!(other instanceof DropEntry)) {
return false;
}
DropEntry that = (DropEntry) other;
return Objects.equal(loadRecorder, that.loadRecorder) && Objects.equal(token, that.token);
}
}
@VisibleForTesting
| DropEntry |
java | spring-projects__spring-boot | smoke-test/spring-boot-smoke-test-graphql/src/main/java/smoketest/graphql/ProjectController.java | {
"start": 940,
"end": 1400
} | class ____ {
private final List<Project> projects;
public ProjectController() {
this.projects = Arrays.asList(new Project("spring-boot", "Spring Boot"),
new Project("spring-graphql", "Spring GraphQL"), new Project("spring-framework", "Spring Framework"));
}
@QueryMapping
public Optional<Project> project(@Argument String slug) {
return this.projects.stream().filter((project) -> project.getSlug().equals(slug)).findFirst();
}
}
| ProjectController |
java | apache__maven | its/core-it-suite/src/test/resources/mng-4091/plugin-dependency/maven-it-plugin-plugin-dependency/src/main/java/org/apache/maven/plugin/coreit/DerivedItMojo.java | {
"start": 1228,
"end": 1870
} | class ____ extends EvalMojo {
/**
* The path to the output file for the properties with the expression values. For each expression given by the
* parameter {@link #expressions}, a similar named properties key will be used to save the expression value. If an
* expression evaluated to <code>null</code>, there will be no corresponding key in the properties file.
*/
@Parameter
private File file;
@Override
public void execute() throws MojoFailureException, MojoExecutionException {
if (file != null) {
super.setOutputFile(file);
}
super.execute();
}
}
| DerivedItMojo |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/access/hierarchicalroles/RoleHierarchyImpl.java | {
"start": 1271,
"end": 2709
} | class ____ a role hierarchy for use with various access checking components.
*
* <p>
* Here is an example configuration of a role hierarchy (hint: read the ">" sign as
* "includes"):
*
* <pre>
* <property name="hierarchy">
* <value>
* ROLE_A > ROLE_B
* ROLE_B > ROLE_AUTHENTICATED
* ROLE_AUTHENTICATED > ROLE_UNAUTHENTICATED
* </value>
* </property>
* </pre>
*
* <p>
* Explanation of the above:
* <ul>
* <li>In effect every user with ROLE_A also has ROLE_B, ROLE_AUTHENTICATED and
* ROLE_UNAUTHENTICATED;</li>
* <li>every user with ROLE_B also has ROLE_AUTHENTICATED and ROLE_UNAUTHENTICATED;</li>
* <li>every user with ROLE_AUTHENTICATED also has ROLE_UNAUTHENTICATED.</li>
* </ul>
*
* <p>
* Hierarchical Roles will dramatically shorten your access rules (and also make the
* access rules much more elegant).
*
* <p>
* Consider this access rule for Spring Security's RoleVoter (background: every user that
* is authenticated should be able to log out):
* <pre>/logout.html=ROLE_A,ROLE_B,ROLE_AUTHENTICATED</pre>
*
* With hierarchical roles this can now be shortened to:
* <pre>/logout.html=ROLE_AUTHENTICATED</pre>
*
* In addition to shorter rules this will also make your access rules more readable and
* your intentions clearer.
*
* @author Michael Mayr
* @author Josh Cummings
*/
public final | defines |
java | spring-projects__spring-security | crypto/src/test/java/org/springframework/security/crypto/argon2/Argon2EncodingUtilsTests.java | {
"start": 977,
"end": 6765
} | class ____ {
private final Base64.Decoder decoder = Base64.getDecoder();
private TestDataEntry testDataEntry1 = new TestDataEntry(
"$argon2i$v=19$m=1024,t=3,p=2$Y1JkRmJDdzIzZ3oyTWx4aw$cGE5Cbd/cx7micVhXVBdH5qTr66JI1iUyuNNVAnErXs",
new Argon2EncodingUtils.Argon2Hash(this.decoder.decode("cGE5Cbd/cx7micVhXVBdH5qTr66JI1iUyuNNVAnErXs"),
(new Argon2Parameters.Builder(Argon2Parameters.ARGON2_i)).withVersion(19)
.withMemoryAsKB(1024)
.withIterations(3)
.withParallelism(2)
.withSalt("cRdFbCw23gz2Mlxk".getBytes())
.build()));
private TestDataEntry testDataEntry2 = new TestDataEntry(
"$argon2id$v=19$m=333,t=5,p=2$JDR8N3k1QWx0$+PrEoHOHsWkU9lnsxqnOFrWTVEuOh7ZRIUIbe2yUG8FgTYNCWJfHQI09JAAFKzr2JAvoejEpTMghUt0WsntQYA",
new Argon2EncodingUtils.Argon2Hash(
this.decoder.decode(
"+PrEoHOHsWkU9lnsxqnOFrWTVEuOh7ZRIUIbe2yUG8FgTYNCWJfHQI09JAAFKzr2JAvoejEpTMghUt0WsntQYA"),
(new Argon2Parameters.Builder(Argon2Parameters.ARGON2_id)).withVersion(19)
.withMemoryAsKB(333)
.withIterations(5)
.withParallelism(2)
.withSalt("$4|7y5Alt".getBytes())
.build()));
@Test
public void decodeWhenValidEncodedHashWithIThenDecodeCorrectly() {
assertArgon2HashEquals(this.testDataEntry1.decoded, Argon2EncodingUtils.decode(this.testDataEntry1.encoded));
}
@Test
public void decodeWhenValidEncodedHashWithIDThenDecodeCorrectly() {
assertArgon2HashEquals(this.testDataEntry2.decoded, Argon2EncodingUtils.decode(this.testDataEntry2.encoded));
}
@Test
public void encodeWhenValidArgumentsWithIThenEncodeToCorrectHash() {
assertThat(Argon2EncodingUtils.encode(this.testDataEntry1.decoded.getHash(),
this.testDataEntry1.decoded.getParameters()))
.isEqualTo(this.testDataEntry1.encoded);
}
@Test
public void encodeWhenValidArgumentsWithID2ThenEncodeToCorrectHash() {
assertThat(Argon2EncodingUtils.encode(this.testDataEntry2.decoded.getHash(),
this.testDataEntry2.decoded.getParameters()))
.isEqualTo(this.testDataEntry2.encoded);
}
@Test
public void encodeWhenNonexistingAlgorithmThenThrowException() {
assertThatIllegalArgumentException().isThrownBy(() -> Argon2EncodingUtils.encode(new byte[] { 0, 1, 2, 3 },
(new Argon2Parameters.Builder(3)).withVersion(19)
.withMemoryAsKB(333)
.withIterations(5)
.withParallelism(2)
.build()));
}
@Test
public void decodeWhenNotAnArgon2HashThenThrowException() {
assertThatIllegalArgumentException().isThrownBy(() -> Argon2EncodingUtils.decode("notahash"));
}
@Test
public void decodeWhenNonexistingAlgorithmThenThrowException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> Argon2EncodingUtils.decode(
"$argon2x$v=19$m=1024,t=3,p=2$Y1JkRmJDdzIzZ3oyTWx4aw$cGE5Cbd/cx7micVhXVBdH5qTr66JI1iUyuNNVAnErXs"))
.withMessageContaining("argon2x");
}
@Test
public void decodeWhenIllegalVersionParameterThenThrowException() {
assertThatIllegalArgumentException().isThrownBy(() -> Argon2EncodingUtils
.decode("$argon2i$v=x$m=1024,t=3,p=2$Y1JkRmJDdzIzZ3oyTWx4aw$cGE5Cbd/cx7micVhXVBdH5qTr66JI1iUyuNNVAnErXs"));
}
@Test
public void decodeWhenIllegalMemoryParameterThenThrowException() {
assertThatIllegalArgumentException().isThrownBy(() -> Argon2EncodingUtils
.decode("$argon2i$v=19$m=x,t=3,p=2$Y1JkRmJDdzIzZ3oyTWx4aw$cGE5Cbd/cx7micVhXVBdH5qTr66JI1iUyuNNVAnErXs"));
}
@Test
public void decodeWhenIllegalIterationsParameterThenThrowException() {
assertThatIllegalArgumentException().isThrownBy(() -> Argon2EncodingUtils
.decode("$argon2i$v=19$m=1024,t=x,p=2$Y1JkRmJDdzIzZ3oyTWx4aw$cGE5Cbd/cx7micVhXVBdH5qTr66JI1iUyuNNVAnErXs"));
}
@Test
public void decodeWhenIllegalParallelityParameterThenThrowException() {
assertThatIllegalArgumentException().isThrownBy(() -> Argon2EncodingUtils
.decode("$argon2i$v=19$m=1024,t=3,p=x$Y1JkRmJDdzIzZ3oyTWx4aw$cGE5Cbd/cx7micVhXVBdH5qTr66JI1iUyuNNVAnErXs"));
}
@Test
public void decodeWhenMissingVersionParameterThenThrowException() {
assertThatIllegalArgumentException().isThrownBy(() -> Argon2EncodingUtils
.decode("$argon2i$m=1024,t=3,p=x$Y1JkRmJDdzIzZ3oyTWx4aw$cGE5Cbd/cx7micVhXVBdH5qTr66JI1iUyuNNVAnErXs"));
}
@Test
public void decodeWhenMissingMemoryParameterThenThrowException() {
assertThatIllegalArgumentException().isThrownBy(() -> Argon2EncodingUtils
.decode("$argon2i$v=19$t=3,p=2$Y1JkRmJDdzIzZ3oyTWx4aw$cGE5Cbd/cx7micVhXVBdH5qTr66JI1iUyuNNVAnErXs"));
}
@Test
public void decodeWhenMissingIterationsParameterThenThrowException() {
assertThatIllegalArgumentException().isThrownBy(() -> Argon2EncodingUtils
.decode("$argon2i$v=19$m=1024,p=2$Y1JkRmJDdzIzZ3oyTWx4aw$cGE5Cbd/cx7micVhXVBdH5qTr66JI1iUyuNNVAnErXs"));
}
@Test
public void decodeWhenMissingParallelityParameterThenThrowException() {
assertThatIllegalArgumentException().isThrownBy(() -> Argon2EncodingUtils
.decode("$argon2i$v=19$m=1024,t=3$Y1JkRmJDdzIzZ3oyTWx4aw$cGE5Cbd/cx7micVhXVBdH5qTr66JI1iUyuNNVAnErXs"));
}
private void assertArgon2HashEquals(Argon2EncodingUtils.Argon2Hash expected,
Argon2EncodingUtils.Argon2Hash actual) {
assertThat(actual.getHash()).isEqualTo(expected.getHash());
assertThat(actual.getParameters().getSalt()).isEqualTo(expected.getParameters().getSalt());
assertThat(actual.getParameters().getType()).isEqualTo(expected.getParameters().getType());
assertThat(actual.getParameters().getVersion()).isEqualTo(expected.getParameters().getVersion());
assertThat(actual.getParameters().getMemory()).isEqualTo(expected.getParameters().getMemory());
assertThat(actual.getParameters().getIterations()).isEqualTo(expected.getParameters().getIterations());
assertThat(actual.getParameters().getLanes()).isEqualTo(expected.getParameters().getLanes());
}
private static | Argon2EncodingUtilsTests |
java | apache__kafka | connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConfigTransformerTest.java | {
"start": 1943,
"end": 5388
} | class ____ {
public static final String MY_KEY = "myKey";
public static final String MY_CONNECTOR = "myConnector";
public static final String TEST_KEY = "testKey";
public static final String TEST_PATH = "testPath";
public static final String TEST_KEY_WITH_TTL = "testKeyWithTTL";
public static final String TEST_KEY_WITH_LONGER_TTL = "testKeyWithLongerTTL";
public static final String TEST_RESULT = "testResult";
public static final String TEST_RESULT_WITH_TTL = "testResultWithTTL";
public static final String TEST_RESULT_WITH_LONGER_TTL = "testResultWithLongerTTL";
@Mock
private Herder herder;
@Mock
private Worker worker;
@Mock
private HerderRequest requestId;
private WorkerConfigTransformer configTransformer;
@BeforeEach
public void setup() {
configTransformer = new WorkerConfigTransformer(worker, Map.of("test", new TestConfigProvider()));
}
@Test
public void testReplaceVariable() {
// Execution
Map<String, String> result = configTransformer.transform(MY_CONNECTOR, Map.of(MY_KEY, "${test:testPath:testKey}"));
// Assertions
assertEquals(TEST_RESULT, result.get(MY_KEY));
}
@Test
public void testReplaceVariableWithTTL() {
// Execution
Map<String, String> props = new HashMap<>();
props.put(MY_KEY, "${test:testPath:testKeyWithTTL}");
props.put(CONFIG_RELOAD_ACTION_CONFIG, CONFIG_RELOAD_ACTION_NONE);
Map<String, String> result = configTransformer.transform(MY_CONNECTOR, props);
// Assertions
assertEquals(TEST_RESULT_WITH_TTL, result.get(MY_KEY));
}
@Test
public void testReplaceVariableWithTTLAndScheduleRestart() {
// Setup
when(worker.herder()).thenReturn(herder);
when(herder.restartConnector(eq(1L), eq(MY_CONNECTOR), notNull())).thenReturn(requestId);
// Execution
Map<String, String> result = configTransformer.transform(MY_CONNECTOR, Map.of(MY_KEY, "${test:testPath:testKeyWithTTL}"));
// Assertions
assertEquals(TEST_RESULT_WITH_TTL, result.get(MY_KEY));
verify(herder).restartConnector(eq(1L), eq(MY_CONNECTOR), notNull());
}
@Test
public void testReplaceVariableWithTTLFirstCancelThenScheduleRestart() {
// Setup
when(worker.herder()).thenReturn(herder);
when(herder.restartConnector(eq(1L), eq(MY_CONNECTOR), notNull())).thenReturn(requestId);
when(herder.restartConnector(eq(10L), eq(MY_CONNECTOR), notNull())).thenReturn(requestId);
// Execution
Map<String, String> result = configTransformer.transform(MY_CONNECTOR, Map.of(MY_KEY, "${test:testPath:testKeyWithTTL}"));
// Assertions
assertEquals(TEST_RESULT_WITH_TTL, result.get(MY_KEY));
verify(herder).restartConnector(eq(1L), eq(MY_CONNECTOR), notNull());
// Execution
result = configTransformer.transform(MY_CONNECTOR, Map.of(MY_KEY, "${test:testPath:testKeyWithLongerTTL}"));
// Assertions
assertEquals(TEST_RESULT_WITH_LONGER_TTL, result.get(MY_KEY));
verify(requestId, times(1)).cancel();
verify(herder).restartConnector(eq(10L), eq(MY_CONNECTOR), notNull());
}
@Test
public void testTransformNullConfiguration() {
assertNull(configTransformer.transform(MY_CONNECTOR, null));
}
public static | WorkerConfigTransformerTest |
java | apache__camel | components/camel-aws/camel-aws2-lambda/src/generated/java/org/apache/camel/component/aws2/lambda/Lambda2EndpointConfigurer.java | {
"start": 738,
"end": 8691
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
Lambda2Endpoint target = (Lambda2Endpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesskey":
case "accessKey": target.getConfiguration().setAccessKey(property(camelContext, java.lang.String.class, value)); return true;
case "awslambdaclient":
case "awsLambdaClient": target.getConfiguration().setAwsLambdaClient(property(camelContext, software.amazon.awssdk.services.lambda.LambdaClient.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "operation": target.getConfiguration().setOperation(property(camelContext, org.apache.camel.component.aws2.lambda.Lambda2Operations.class, value)); return true;
case "overrideendpoint":
case "overrideEndpoint": target.getConfiguration().setOverrideEndpoint(property(camelContext, boolean.class, value)); return true;
case "pojorequest":
case "pojoRequest": target.getConfiguration().setPojoRequest(property(camelContext, boolean.class, value)); return true;
case "profilecredentialsname":
case "profileCredentialsName": target.getConfiguration().setProfileCredentialsName(property(camelContext, java.lang.String.class, value)); return true;
case "proxyhost":
case "proxyHost": target.getConfiguration().setProxyHost(property(camelContext, java.lang.String.class, value)); return true;
case "proxyport":
case "proxyPort": target.getConfiguration().setProxyPort(property(camelContext, java.lang.Integer.class, value)); return true;
case "proxyprotocol":
case "proxyProtocol": target.getConfiguration().setProxyProtocol(property(camelContext, software.amazon.awssdk.core.Protocol.class, value)); return true;
case "region": target.getConfiguration().setRegion(property(camelContext, java.lang.String.class, value)); return true;
case "secretkey":
case "secretKey": target.getConfiguration().setSecretKey(property(camelContext, java.lang.String.class, value)); return true;
case "sessiontoken":
case "sessionToken": target.getConfiguration().setSessionToken(property(camelContext, java.lang.String.class, value)); return true;
case "trustallcertificates":
case "trustAllCertificates": target.getConfiguration().setTrustAllCertificates(property(camelContext, boolean.class, value)); return true;
case "uriendpointoverride":
case "uriEndpointOverride": target.getConfiguration().setUriEndpointOverride(property(camelContext, java.lang.String.class, value)); return true;
case "usedefaultcredentialsprovider":
case "useDefaultCredentialsProvider": target.getConfiguration().setUseDefaultCredentialsProvider(property(camelContext, boolean.class, value)); return true;
case "useprofilecredentialsprovider":
case "useProfileCredentialsProvider": target.getConfiguration().setUseProfileCredentialsProvider(property(camelContext, boolean.class, value)); return true;
case "usesessioncredentials":
case "useSessionCredentials": target.getConfiguration().setUseSessionCredentials(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public String[] getAutowiredNames() {
return new String[]{"awsLambdaClient"};
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesskey":
case "accessKey": return java.lang.String.class;
case "awslambdaclient":
case "awsLambdaClient": return software.amazon.awssdk.services.lambda.LambdaClient.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "operation": return org.apache.camel.component.aws2.lambda.Lambda2Operations.class;
case "overrideendpoint":
case "overrideEndpoint": return boolean.class;
case "pojorequest":
case "pojoRequest": return boolean.class;
case "profilecredentialsname":
case "profileCredentialsName": return java.lang.String.class;
case "proxyhost":
case "proxyHost": return java.lang.String.class;
case "proxyport":
case "proxyPort": return java.lang.Integer.class;
case "proxyprotocol":
case "proxyProtocol": return software.amazon.awssdk.core.Protocol.class;
case "region": return java.lang.String.class;
case "secretkey":
case "secretKey": return java.lang.String.class;
case "sessiontoken":
case "sessionToken": return java.lang.String.class;
case "trustallcertificates":
case "trustAllCertificates": return boolean.class;
case "uriendpointoverride":
case "uriEndpointOverride": return java.lang.String.class;
case "usedefaultcredentialsprovider":
case "useDefaultCredentialsProvider": return boolean.class;
case "useprofilecredentialsprovider":
case "useProfileCredentialsProvider": return boolean.class;
case "usesessioncredentials":
case "useSessionCredentials": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
Lambda2Endpoint target = (Lambda2Endpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesskey":
case "accessKey": return target.getConfiguration().getAccessKey();
case "awslambdaclient":
case "awsLambdaClient": return target.getConfiguration().getAwsLambdaClient();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "operation": return target.getConfiguration().getOperation();
case "overrideendpoint":
case "overrideEndpoint": return target.getConfiguration().isOverrideEndpoint();
case "pojorequest":
case "pojoRequest": return target.getConfiguration().isPojoRequest();
case "profilecredentialsname":
case "profileCredentialsName": return target.getConfiguration().getProfileCredentialsName();
case "proxyhost":
case "proxyHost": return target.getConfiguration().getProxyHost();
case "proxyport":
case "proxyPort": return target.getConfiguration().getProxyPort();
case "proxyprotocol":
case "proxyProtocol": return target.getConfiguration().getProxyProtocol();
case "region": return target.getConfiguration().getRegion();
case "secretkey":
case "secretKey": return target.getConfiguration().getSecretKey();
case "sessiontoken":
case "sessionToken": return target.getConfiguration().getSessionToken();
case "trustallcertificates":
case "trustAllCertificates": return target.getConfiguration().isTrustAllCertificates();
case "uriendpointoverride":
case "uriEndpointOverride": return target.getConfiguration().getUriEndpointOverride();
case "usedefaultcredentialsprovider":
case "useDefaultCredentialsProvider": return target.getConfiguration().isUseDefaultCredentialsProvider();
case "useprofilecredentialsprovider":
case "useProfileCredentialsProvider": return target.getConfiguration().isUseProfileCredentialsProvider();
case "usesessioncredentials":
case "useSessionCredentials": return target.getConfiguration().isUseSessionCredentials();
default: return null;
}
}
}
| Lambda2EndpointConfigurer |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/cascade/PersistOnLazyCollectionTests.java | {
"start": 1417,
"end": 4118
} | class ____ {
@Test
public void testMutation(SessionFactoryScope scope) {
final SQLStatementInspector sqlCollector = scope.getCollectingStatementInspector();
final Order detached = scope.fromTransaction( (session) -> {
sqlCollector.clear();
final Order order = session.find( Order.class, 1 );
// make sure lineItems is not initialized
assertThat( Hibernate.isInitialized( order.getLineItems() ) ).isFalse();
assertThat( sqlCollector.getSqlQueries() ).hasSize( 1 );
assertThat( sqlCollector.getSqlQueries().get( 0 ) ).doesNotContain( " line_items " );
// mutate order, which will trigger a flush before we return
order.setName( "Order 00001" );
return order;
} );
// make sure lineItems is still not initialized
assertThat( Hibernate.isInitialized( detached.getLineItems() ) ).isFalse();
try {
//noinspection ResultOfMethodCallIgnored
detached.getLineItems().size();
fail( "Should throw LazyInitializationException" );
}
catch (LazyInitializationException expected) {
}
}
@Test
public void testCascadePersist(SessionFactoryScope scope) {
final SQLStatementInspector sqlCollector = scope.getCollectingStatementInspector();
final Order detached = scope.fromTransaction( (session) -> {
sqlCollector.clear();
final Order order = session.find( Order.class, 1 );
// make sure lineItems is not initialized
assertThat( Hibernate.isInitialized( order.getLineItems() ) ).isFalse();
assertThat( sqlCollector.getSqlQueries() ).hasSize( 1 );
assertThat( sqlCollector.getSqlQueries().get( 0 ) ).doesNotContain( " line_items " );
// create a Payment and persist, which will cascade the persist to Order
// - we want to make sure this won't trigger initializing the lazy collection
final Payment payment = new Payment( 1, order, "123456789" );
session.persist( payment );
return order;
} );
// make sure lineItems is still not initialized
assertThat( Hibernate.isInitialized( detached.getLineItems() ) ).isFalse();
try {
//noinspection ResultOfMethodCallIgnored
detached.getLineItems().size();
fail( "Should throw LazyInitializationException" );
}
catch (LazyInitializationException expected) {
}
}
@BeforeEach
void createTestData(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( (session) -> {
final Order order = new Order( 1, "Order 1" );
order.addLineItem( new LineItem( 1, "Line 1" ) );
order.addLineItem( new LineItem( 2, "Line 2" ) );
session.persist( order );
} );
}
@AfterEach
void dropTestData(SessionFactoryScope factoryScope) {
factoryScope.dropData();
}
@Entity
@Table(name = "orders")
public static | PersistOnLazyCollectionTests |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/ConfigurationPropertiesCharSequenceToObjectConverterTests.java | {
"start": 4080,
"end": 4246
} | class ____ implements Converter<String, Long> {
@Override
public Long convert(String source) {
return Long.valueOf(source);
}
}
static | StringToLongConverter |
java | apache__camel | components/camel-fastjson/src/test/java/org/apache/camel/component/fastjson/TestPojo.java | {
"start": 856,
"end": 1351
} | class ____ {
private String name;
public String getName() {
return this.name;
}
public void setName(String name) {
this.name = name;
}
@Override
public boolean equals(Object obj) {
return this.name.equals(((TestPojo) obj).getName());
}
@Override
public int hashCode() {
return name != null ? name.hashCode() : 0;
}
@Override
public String toString() {
return "TestPojo[" + name + "]";
}
}
| TestPojo |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/ToStringReturnsNull.java | {
"start": 2846,
"end": 3178
} | class ____ extends TreeScanner<Boolean, Void> {
@Override
public Boolean scan(Tree tree, Void unused) {
return Boolean.TRUE.equals(super.scan(tree, null));
}
@Override
public final Boolean reduce(Boolean a, Boolean b) {
return Boolean.TRUE.equals(a) || Boolean.TRUE.equals(b);
}
}
}
| BooleanScanner |
java | netty__netty | codec-redis/src/test/java/io/netty/handler/codec/redis/RedisCodecTestUtil.java | {
"start": 782,
"end": 1460
} | class ____ {
private RedisCodecTestUtil() {
}
static byte[] bytesOf(long value) {
return bytesOf(Long.toString(value));
}
static byte[] bytesOf(String s) {
return s.getBytes(CharsetUtil.UTF_8);
}
static byte[] bytesOf(ByteBuf buf) {
byte[] data = new byte[buf.readableBytes()];
buf.readBytes(data);
return data;
}
static String stringOf(ByteBuf buf) {
return new String(bytesOf(buf));
}
static ByteBuf byteBufOf(String s) {
return byteBufOf(bytesOf(s));
}
static ByteBuf byteBufOf(byte[] data) {
return Unpooled.wrappedBuffer(data);
}
}
| RedisCodecTestUtil |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/ext/ExternalTypeIdWithCreator3045Test.java | {
"start": 2242,
"end": 4403
} | class ____ {
public final long time;
public String type;
public Object data;
@JsonCreator
public MyJson3045(@JsonProperty("time") long t) {
time = t;
}
@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.EXTERNAL_PROPERTY,
property = "type")
@JsonTypeIdResolver(ChildBaseByParentTypeResolver.class)
public void setData(Object data) {
this.data = data;
}
@Override
public String toString() {
return "[time="+time+", type="+type+", data="+data+"]";
}
}
// Need to explicitly allow `Object` as base type
private final ObjectMapper MAPPER = jsonMapperBuilder()
.polymorphicTypeValidator(NoCheckSubTypeValidator.instance)
.build();
public void testExternalIdWithAnySetter3045() throws Exception
{
// First cases where the last Creator argument comes last:
_testExternalIdWithAnySetter3045(a2q(
"{'type':'track','data':{'data-internal':'toto'},'time':345}"));
_testExternalIdWithAnySetter3045(a2q(
"{'data':{'data-internal':'toto'},'type':'track', 'time':345}"));
// then a case where it comes in the middle
_testExternalIdWithAnySetter3045(a2q(
"{'data':{'data-internal':'toto'},'time':345, 'type':'track'}"));
// and finally one where we'll start with it
_testExternalIdWithAnySetter3045(a2q(
"{'time':345, 'type':'track', 'data':{'data-internal':'toto'}}"));
}
private void _testExternalIdWithAnySetter3045(String input) throws Exception
{
MyJson3045 result = MAPPER.readValue(input, MyJson3045.class);
assertEquals(345, result.time);
if (result.data == null) {
fail("Expected non-null data; result object = "+result);
}
assertEquals("track", result.type);
assertEquals(MyData.class, result.data.getClass());
MyData data = (MyData) result.data;
assertEquals(1, data.size());
assertEquals("toto", data.find("data-internal"));
}
}
| MyJson3045 |
java | elastic__elasticsearch | x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ClassificationIT.java | {
"start": 4146,
"end": 55162
} | class ____ extends MlNativeDataFrameAnalyticsIntegTestCase {
static final String BOOLEAN_FIELD = "boolean-field";
static final String NUMERICAL_FIELD = "numerical-field";
static final String DISCRETE_NUMERICAL_FIELD = "discrete-numerical-field";
static final String TEXT_FIELD = "text-field";
static final String KEYWORD_FIELD = "keyword-field";
static final String NESTED_FIELD = "outer-field.inner-field";
static final String ALIAS_TO_KEYWORD_FIELD = "alias-to-keyword-field";
static final String ALIAS_TO_NESTED_FIELD = "alias-to-nested-field";
static final List<Boolean> BOOLEAN_FIELD_VALUES = List.of(false, true);
static final List<Double> NUMERICAL_FIELD_VALUES = List.of(1.0, 2.0);
static final List<Integer> DISCRETE_NUMERICAL_FIELD_VALUES = List.of(10, 20);
static final List<String> KEYWORD_FIELD_VALUES = List.of("cat", "dog");
private String jobId;
private String sourceIndex;
private String destIndex;
private boolean analysisUsesExistingDestIndex;
@Before
public void setupLogging() {
updateClusterSettings(
Settings.builder()
.put("logger.org.elasticsearch.xpack.ml.process", "DEBUG")
.put("logger.org.elasticsearch.xpack.ml.dataframe", "DEBUG")
);
}
@After
public void cleanup() {
updateClusterSettings(
Settings.builder().putNull("logger.org.elasticsearch.xpack.ml.process").putNull("logger.org.elasticsearch.xpack.ml.dataframe")
);
cleanUp();
}
public void testSingleNumericFeatureAndMixedTrainingAndNonTrainingRows() throws Exception {
initialize("classification_single_numeric_feature_and_mixed_data_set");
String predictedClassField = KEYWORD_FIELD + "_prediction";
indexData(sourceIndex, 300, 50, KEYWORD_FIELD);
DataFrameAnalyticsConfig config = buildAnalytics(
jobId,
sourceIndex,
destIndex,
null,
new Classification(
KEYWORD_FIELD,
BoostedTreeParams.builder().setNumTopFeatureImportanceValues(1).build(),
null,
null,
null,
null,
null,
null,
null
)
);
putAnalytics(config);
assertIsStopped(jobId);
assertProgressIsZero(jobId);
startAnalytics(jobId);
waitUntilAnalyticsIsStopped(jobId);
client().admin().indices().refresh(new RefreshRequest(destIndex));
assertResponse(prepareSearch(sourceIndex).setTrackTotalHits(true).setSize(1000), sourceData -> {
for (SearchHit hit : sourceData.getHits()) {
Map<String, Object> destDoc = getDestDoc(config, hit);
Map<String, Object> resultsObject = getFieldValue(destDoc, "ml");
assertThat(getFieldValue(resultsObject, predictedClassField), is(in(KEYWORD_FIELD_VALUES)));
assertThat(getFieldValue(resultsObject, "is_training"), is(destDoc.containsKey(KEYWORD_FIELD)));
assertTopClasses(resultsObject, 2, KEYWORD_FIELD, KEYWORD_FIELD_VALUES);
@SuppressWarnings("unchecked")
List<Map<String, Object>> importanceArray = (List<Map<String, Object>>) resultsObject.get("feature_importance");
assertThat(importanceArray, hasSize(greaterThan(0)));
}
});
assertProgressComplete(jobId);
assertStoredProgressHits(jobId, 1);
assertModelStatePersisted(stateDocId());
assertExactlyOneInferenceModelPersisted(jobId);
assertMlResultsFieldMappings(destIndex, predictedClassField, "keyword");
assertThatAuditMessagesMatch(
jobId,
"Created analytics with type [classification]",
"Estimated memory usage [",
"Starting analytics on node",
"Started analytics",
expectedDestIndexAuditMessage(),
"Started reindexing to destination index [" + destIndex + "]",
"Finished reindexing to destination index [" + destIndex + "]",
"Started loading data",
"Started analyzing",
"Started writing results",
"Finished analysis"
);
assertEvaluation(KEYWORD_FIELD, KEYWORD_FIELD_VALUES, "ml." + predictedClassField);
}
public void testWithDatastreams() throws Exception {
initialize("classification_with_datastreams", true);
String predictedClassField = KEYWORD_FIELD + "_prediction";
indexData(sourceIndex, 300, 50, KEYWORD_FIELD);
DataFrameAnalyticsConfig config = buildAnalytics(
jobId,
sourceIndex,
destIndex,
null,
new Classification(
KEYWORD_FIELD,
BoostedTreeParams.builder().setNumTopFeatureImportanceValues(1).build(),
null,
null,
null,
null,
null,
null,
null
)
);
putAnalytics(config);
assertIsStopped(jobId);
assertProgressIsZero(jobId);
startAnalytics(jobId);
waitUntilAnalyticsIsStopped(jobId);
client().admin().indices().refresh(new RefreshRequest(destIndex));
assertResponse(prepareSearch(sourceIndex).setTrackTotalHits(true).setSize(1000), sourceData -> {
for (SearchHit hit : sourceData.getHits()) {
Map<String, Object> destDoc = getDestDoc(config, hit);
Map<String, Object> resultsObject = getFieldValue(destDoc, "ml");
assertThat(getFieldValue(resultsObject, predictedClassField), is(in(KEYWORD_FIELD_VALUES)));
assertThat(getFieldValue(resultsObject, "is_training"), is(destDoc.containsKey(KEYWORD_FIELD)));
assertTopClasses(resultsObject, 2, KEYWORD_FIELD, KEYWORD_FIELD_VALUES);
@SuppressWarnings("unchecked")
List<Map<String, Object>> importanceArray = (List<Map<String, Object>>) resultsObject.get("feature_importance");
assertThat(importanceArray, hasSize(greaterThan(0)));
}
});
assertProgressComplete(jobId);
assertStoredProgressHits(jobId, 1);
assertModelStatePersisted(stateDocId());
assertExactlyOneInferenceModelPersisted(jobId);
assertMlResultsFieldMappings(destIndex, predictedClassField, "keyword");
assertThatAuditMessagesMatch(
jobId,
"Created analytics with type [classification]",
"Estimated memory usage [",
"Starting analytics on node",
"Started analytics",
expectedDestIndexAuditMessage(),
"Started reindexing to destination index [" + destIndex + "]",
"Finished reindexing to destination index [" + destIndex + "]",
"Started loading data",
"Started analyzing",
"Started writing results",
"Finished analysis"
);
assertEvaluation(KEYWORD_FIELD, KEYWORD_FIELD_VALUES, "ml." + predictedClassField);
}
public void testWithOnlyTrainingRowsAndTrainingPercentIsHundred() throws Exception {
initialize("classification_only_training_data_and_training_percent_is_100");
String predictedClassField = KEYWORD_FIELD + "_prediction";
indexData(sourceIndex, 300, 0, KEYWORD_FIELD);
DataFrameAnalyticsConfig config = buildAnalytics(jobId, sourceIndex, destIndex, null, new Classification(KEYWORD_FIELD));
putAnalytics(config);
assertIsStopped(jobId);
assertProgressIsZero(jobId);
startAnalytics(jobId);
waitUntilAnalyticsIsStopped(jobId);
client().admin().indices().refresh(new RefreshRequest(destIndex));
assertResponse(prepareSearch(sourceIndex).setTrackTotalHits(true).setSize(1000), sourceData -> {
for (SearchHit hit : sourceData.getHits()) {
Map<String, Object> destDoc = getDestDoc(config, hit);
Map<String, Object> resultsObject = getFieldValue(destDoc, "ml");
assertThat(getFieldValue(resultsObject, predictedClassField), is(in(KEYWORD_FIELD_VALUES)));
assertThat(getFieldValue(resultsObject, "is_training"), is(true));
assertTopClasses(resultsObject, 2, KEYWORD_FIELD, KEYWORD_FIELD_VALUES);
}
});
GetDataFrameAnalyticsStatsAction.Response.Stats stats = getAnalyticsStats(jobId);
assertThat(stats.getDataCounts().getJobId(), equalTo(jobId));
assertThat(stats.getDataCounts().getTrainingDocsCount(), equalTo(300L));
assertThat(stats.getDataCounts().getTestDocsCount(), equalTo(0L));
assertThat(stats.getDataCounts().getSkippedDocsCount(), equalTo(0L));
assertProgressComplete(jobId);
assertStoredProgressHits(jobId, 1);
assertModelStatePersisted(stateDocId());
assertExactlyOneInferenceModelPersisted(jobId);
assertMlResultsFieldMappings(destIndex, predictedClassField, "keyword");
assertThatAuditMessagesMatch(
jobId,
"Created analytics with type [classification]",
"Estimated memory usage [",
"Starting analytics on node",
"Started analytics",
expectedDestIndexAuditMessage(),
"Started reindexing to destination index [" + destIndex + "]",
"Finished reindexing to destination index [" + destIndex + "]",
"Started loading data",
"Started analyzing",
"Started writing results",
"Finished analysis"
);
assertEvaluation(KEYWORD_FIELD, KEYWORD_FIELD_VALUES, "ml." + predictedClassField);
}
public void testWithCustomFeatureProcessors() throws Exception {
initialize("classification_with_custom_feature_processors");
String predictedClassField = KEYWORD_FIELD + "_prediction";
indexData(sourceIndex, 100, 0, KEYWORD_FIELD);
DataFrameAnalyticsConfig config = buildAnalytics(
jobId,
sourceIndex,
destIndex,
null,
new Classification(
KEYWORD_FIELD,
BoostedTreeParams.builder().setNumTopFeatureImportanceValues(0).build(),
null,
null,
2,
10.0,
42L,
Arrays.asList(
new OneHotEncoding(
ALIAS_TO_KEYWORD_FIELD,
Map.of(KEYWORD_FIELD_VALUES.get(0), "cat_column_custom", KEYWORD_FIELD_VALUES.get(1), "dog_column_custom"),
true
),
new OneHotEncoding(
ALIAS_TO_NESTED_FIELD,
Map.of(KEYWORD_FIELD_VALUES.get(0), "cat_column_custom_1", KEYWORD_FIELD_VALUES.get(1), "dog_column_custom_1"),
true
),
new OneHotEncoding(
NESTED_FIELD,
Map.of(KEYWORD_FIELD_VALUES.get(0), "cat_column_custom_2", KEYWORD_FIELD_VALUES.get(1), "dog_column_custom_2"),
true
),
new OneHotEncoding(
TEXT_FIELD,
Map.of(KEYWORD_FIELD_VALUES.get(0), "cat_column_custom_3", KEYWORD_FIELD_VALUES.get(1), "dog_column_custom_3"),
true
)
),
null
)
);
putAnalytics(config);
assertIsStopped(jobId);
assertProgressIsZero(jobId);
startAnalytics(jobId);
waitUntilAnalyticsIsStopped(jobId);
assertResponse(prepareSearch(sourceIndex).setTrackTotalHits(true).setSize(1000), sourceData -> {
for (SearchHit hit : sourceData.getHits()) {
Map<String, Object> destDoc = getDestDoc(config, hit);
Map<String, Object> resultsObject = getFieldValue(destDoc, "ml");
assertThat(getFieldValue(resultsObject, predictedClassField), is(in(KEYWORD_FIELD_VALUES)));
assertTopClasses(resultsObject, 2, KEYWORD_FIELD, KEYWORD_FIELD_VALUES);
}
});
client().admin().indices().refresh(new RefreshRequest(destIndex));
assertProgressComplete(jobId);
assertStoredProgressHits(jobId, 1);
assertModelStatePersisted(stateDocId());
assertExactlyOneInferenceModelPersisted(jobId);
assertMlResultsFieldMappings(destIndex, predictedClassField, "keyword");
assertThatAuditMessagesMatch(
jobId,
"Created analytics with type [classification]",
"Estimated memory usage [",
"Starting analytics on node",
"Started analytics",
expectedDestIndexAuditMessage(),
"Started reindexing to destination index [" + destIndex + "]",
"Finished reindexing to destination index [" + destIndex + "]",
"Started loading data",
"Started analyzing",
"Started writing results",
"Finished analysis"
);
assertEvaluation(KEYWORD_FIELD, KEYWORD_FIELD_VALUES, "ml." + predictedClassField);
GetTrainedModelsAction.Response response = client().execute(
GetTrainedModelsAction.INSTANCE,
new GetTrainedModelsAction.Request(jobId + "*", Collections.emptyList(), Collections.singleton("definition"))
).actionGet();
assertThat(response.getResources().results().size(), equalTo(1));
TrainedModelConfig modelConfig = response.getResources().results().get(0);
modelConfig.ensureParsedDefinition(xContentRegistry());
assertThat(modelConfig.getModelDefinition().getPreProcessors().size(), greaterThan(0));
for (int i = 0; i < 4; i++) {
PreProcessor preProcessor = modelConfig.getModelDefinition().getPreProcessors().get(i);
assertThat(preProcessor.isCustom(), is(true));
}
for (int i = 4; i < modelConfig.getModelDefinition().getPreProcessors().size(); i++) {
PreProcessor preProcessor = modelConfig.getModelDefinition().getPreProcessors().get(i);
assertThat(preProcessor.isCustom(), is(false));
}
}
public <T> void testWithOnlyTrainingRowsAndTrainingPercentIsFifty(
String jobId,
String dependentVariable,
List<T> dependentVariableValues,
String expectedMappingTypeForPredictedField
) throws Exception {
initialize(jobId);
String predictedClassField = dependentVariable + "_prediction";
indexData(sourceIndex, 300, 0, dependentVariable);
int numTopClasses = randomBoolean() ? 2 : -1; // Occasionally it's worth testing the special value -1.
int expectedNumTopClasses = 2;
DataFrameAnalyticsConfig config = buildAnalytics(
jobId,
sourceIndex,
destIndex,
null,
new Classification(dependentVariable, BoostedTreeParams.builder().build(), null, null, numTopClasses, 50.0, null, null, null)
);
putAnalytics(config);
assertIsStopped(jobId);
assertProgressIsZero(jobId);
startAnalytics(jobId);
waitUntilAnalyticsIsStopped(jobId);
client().admin().indices().refresh(new RefreshRequest(destIndex));
assertResponse(prepareSearch(sourceIndex).setTrackTotalHits(true).setSize(1000), sourceData -> {
int trainingRowsCount = 0;
int nonTrainingRowsCount = 0;
for (SearchHit hit : sourceData.getHits()) {
Map<String, Object> destDoc = getDestDoc(config, hit);
Map<String, Object> resultsObject = getFieldValue(destDoc, "ml");
assertThat(getFieldValue(resultsObject, predictedClassField), is(in(dependentVariableValues)));
assertTopClasses(resultsObject, expectedNumTopClasses, dependentVariable, dependentVariableValues);
// Let's just assert there's both training and non-training results
//
boolean isTraining = getFieldValue(resultsObject, "is_training");
if (isTraining) {
trainingRowsCount++;
} else {
nonTrainingRowsCount++;
}
}
assertThat(trainingRowsCount, greaterThan(0));
assertThat(nonTrainingRowsCount, greaterThan(0));
});
GetDataFrameAnalyticsStatsAction.Response.Stats stats = getAnalyticsStats(jobId);
assertThat(stats.getDataCounts().getJobId(), equalTo(jobId));
assertThat(stats.getDataCounts().getTrainingDocsCount(), greaterThan(0L));
assertThat(stats.getDataCounts().getTrainingDocsCount(), lessThan(300L));
assertThat(stats.getDataCounts().getTestDocsCount(), greaterThan(0L));
assertThat(stats.getDataCounts().getTestDocsCount(), lessThan(300L));
assertThat(stats.getDataCounts().getSkippedDocsCount(), equalTo(0L));
assertProgressComplete(jobId);
assertStoredProgressHits(jobId, 1);
assertModelStatePersisted(stateDocId());
assertExactlyOneInferenceModelPersisted(jobId);
assertMlResultsFieldMappings(destIndex, predictedClassField, expectedMappingTypeForPredictedField);
assertThatAuditMessagesMatch(
jobId,
"Created analytics with type [classification]",
"Estimated memory usage [",
"Starting analytics on node",
"Started analytics",
expectedDestIndexAuditMessage(),
"Started reindexing to destination index [" + destIndex + "]",
"Finished reindexing to destination index [" + destIndex + "]",
"Started loading data",
"Started analyzing",
"Started writing results",
"Finished analysis"
);
assertEvaluation(dependentVariable, dependentVariableValues, "ml." + predictedClassField);
}
public void testWithOnlyTrainingRowsAndTrainingPercentIsFifty_DependentVariableIsKeyword() throws Exception {
testWithOnlyTrainingRowsAndTrainingPercentIsFifty(
"classification_training_percent_is_50_keyword",
KEYWORD_FIELD,
KEYWORD_FIELD_VALUES,
"keyword"
);
}
public void testWithOnlyTrainingRowsAndTrainingPercentIsFifty_DependentVariableIsInteger() throws Exception {
testWithOnlyTrainingRowsAndTrainingPercentIsFifty(
"classification_training_percent_is_50_integer",
DISCRETE_NUMERICAL_FIELD,
DISCRETE_NUMERICAL_FIELD_VALUES,
"integer"
);
}
public void testWithOnlyTrainingRowsAndTrainingPercentIsFifty_DependentVariableIsDouble() {
ElasticsearchStatusException e = expectThrows(
ElasticsearchStatusException.class,
() -> testWithOnlyTrainingRowsAndTrainingPercentIsFifty(
"classification_training_percent_is_50_double",
NUMERICAL_FIELD,
NUMERICAL_FIELD_VALUES,
null
)
);
assertThat(e.getMessage(), startsWith("invalid types [double] for required field [numerical-field];"));
}
public void testWithOnlyTrainingRowsAndTrainingPercentIsFifty_DependentVariableIsText() {
ElasticsearchStatusException e = expectThrows(
ElasticsearchStatusException.class,
() -> testWithOnlyTrainingRowsAndTrainingPercentIsFifty(
"classification_training_percent_is_50_text",
TEXT_FIELD,
KEYWORD_FIELD_VALUES,
null
)
);
assertThat(e.getMessage(), startsWith("field [text-field] of type [text] is non-aggregatable"));
}
public void testWithOnlyTrainingRowsAndTrainingPercentIsFifty_DependentVariableIsTextAndKeyword() throws Exception {
testWithOnlyTrainingRowsAndTrainingPercentIsFifty(
"classification_training_percent_is_50_text_and_keyword",
TEXT_FIELD + ".keyword",
KEYWORD_FIELD_VALUES,
"keyword"
);
}
public void testWithOnlyTrainingRowsAndTrainingPercentIsFifty_DependentVariableIsBoolean() throws Exception {
testWithOnlyTrainingRowsAndTrainingPercentIsFifty(
"classification_training_percent_is_50_boolean",
BOOLEAN_FIELD,
BOOLEAN_FIELD_VALUES,
"boolean"
);
}
public void testStopAndRestart() throws Exception {
initialize("classification_stop_and_restart");
String predictedClassField = KEYWORD_FIELD + "_prediction";
indexData(sourceIndex, 350, 0, KEYWORD_FIELD);
DataFrameAnalyticsConfig config = buildAnalytics(jobId, sourceIndex, destIndex, null, new Classification(KEYWORD_FIELD));
putAnalytics(config);
assertIsStopped(jobId);
assertProgressIsZero(jobId);
NodeAcknowledgedResponse response = startAnalytics(jobId);
assertThat(response.getNode(), not(emptyString()));
String phaseToWait = randomFrom("reindexing", "loading_data", "feature_selection", "fine_tuning_parameters");
waitUntilSomeProgressHasBeenMadeForPhase(jobId, phaseToWait);
stopAnalytics(jobId);
waitUntilAnalyticsIsStopped(jobId);
// Now let's start it again
try {
response = startAnalytics(jobId);
assertThat(response.getNode(), not(emptyString()));
} catch (Exception e) {
if (e.getMessage().equals("Cannot start because the job has already finished")) {
// That means the job had managed to complete
} else {
throw e;
}
}
waitUntilAnalyticsIsStopped(jobId);
assertResponse(prepareSearch(sourceIndex).setTrackTotalHits(true).setSize(1000), sourceData -> {
for (SearchHit hit : sourceData.getHits()) {
Map<String, Object> destDoc = getDestDoc(config, hit);
Map<String, Object> resultsObject = getFieldValue(destDoc, "ml");
assertThat(getFieldValue(resultsObject, predictedClassField), is(in(KEYWORD_FIELD_VALUES)));
assertThat(getFieldValue(resultsObject, "is_training"), is(true));
assertTopClasses(resultsObject, 2, KEYWORD_FIELD, KEYWORD_FIELD_VALUES);
}
});
assertProgressComplete(jobId);
assertStoredProgressHits(jobId, 1);
assertModelStatePersisted(stateDocId());
assertAtLeastOneInferenceModelPersisted(jobId);
assertMlResultsFieldMappings(destIndex, predictedClassField, "keyword");
assertEvaluation(KEYWORD_FIELD, KEYWORD_FIELD_VALUES, "ml." + predictedClassField);
}
public void testDependentVariableCardinalityTooHighError() throws Exception {
initialize("cardinality_too_high");
indexData(sourceIndex, 6, 5, KEYWORD_FIELD);
// Index enough documents to have more classes than the allowed limit
BulkRequestBuilder bulkRequestBuilder = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
for (int i = 0; i < Classification.MAX_DEPENDENT_VARIABLE_CARDINALITY - 1; i++) {
IndexRequest indexRequest = new IndexRequest(sourceIndex).source(KEYWORD_FIELD, "fox-" + i);
bulkRequestBuilder.add(indexRequest);
}
BulkResponse bulkResponse = bulkRequestBuilder.get();
if (bulkResponse.hasFailures()) {
fail("Failed to index data: " + bulkResponse.buildFailureMessage());
}
DataFrameAnalyticsConfig config = buildAnalytics(jobId, sourceIndex, destIndex, null, new Classification(KEYWORD_FIELD));
putAnalytics(config);
ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> startAnalytics(jobId));
assertThat(e.status().getStatus(), equalTo(400));
assertThat(e.getMessage(), equalTo("Field [keyword-field] must have at most [100] distinct values but there were at least [101]"));
}
public void testDependentVariableCardinalityTooHighButWithQueryMakesItWithinRange() throws Exception {
initialize("cardinality_too_high_with_query");
indexData(sourceIndex, 6, 5, KEYWORD_FIELD);
// Index enough documents to have more classes than the allowed limit
BulkRequestBuilder bulkRequestBuilder = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
for (int i = 0; i < Classification.MAX_DEPENDENT_VARIABLE_CARDINALITY - 1; i++) {
IndexRequest indexRequest = new IndexRequest(sourceIndex).source(KEYWORD_FIELD, "fox-" + i);
bulkRequestBuilder.add(indexRequest);
}
BulkResponse bulkResponse = bulkRequestBuilder.get();
if (bulkResponse.hasFailures()) {
fail("Failed to index data: " + bulkResponse.buildFailureMessage());
}
QueryBuilder query = QueryBuilders.boolQuery().filter(QueryBuilders.termsQuery(KEYWORD_FIELD, KEYWORD_FIELD_VALUES));
DataFrameAnalyticsConfig config = buildAnalytics(jobId, sourceIndex, destIndex, null, new Classification(KEYWORD_FIELD), query);
putAnalytics(config);
// Should not throw
startAnalytics(jobId);
waitUntilAnalyticsIsStopped(jobId);
assertProgressComplete(jobId);
}
public void testDependentVariableIsNested() throws Exception {
initialize("dependent_variable_is_nested");
String predictedClassField = NESTED_FIELD + "_prediction";
indexData(sourceIndex, 100, 0, NESTED_FIELD);
DataFrameAnalyticsConfig config = buildAnalytics(jobId, sourceIndex, destIndex, null, new Classification(NESTED_FIELD));
putAnalytics(config);
startAnalytics(jobId);
waitUntilAnalyticsIsStopped(jobId);
assertProgressComplete(jobId);
assertStoredProgressHits(jobId, 1);
assertModelStatePersisted(stateDocId());
assertExactlyOneInferenceModelPersisted(jobId);
assertMlResultsFieldMappings(destIndex, predictedClassField, "keyword");
assertEvaluation(NESTED_FIELD, KEYWORD_FIELD_VALUES, "ml." + predictedClassField);
}
public void testDependentVariableIsAliasToKeyword() throws Exception {
initialize("dependent_variable_is_alias");
String predictedClassField = ALIAS_TO_KEYWORD_FIELD + "_prediction";
indexData(sourceIndex, 100, 0, KEYWORD_FIELD);
DataFrameAnalyticsConfig config = buildAnalytics(jobId, sourceIndex, destIndex, null, new Classification(ALIAS_TO_KEYWORD_FIELD));
putAnalytics(config);
startAnalytics(jobId);
waitUntilAnalyticsIsStopped(jobId);
assertProgressComplete(jobId);
assertStoredProgressHits(jobId, 1);
assertModelStatePersisted(stateDocId());
assertExactlyOneInferenceModelPersisted(jobId);
assertMlResultsFieldMappings(destIndex, predictedClassField, "keyword");
assertEvaluation(ALIAS_TO_KEYWORD_FIELD, KEYWORD_FIELD_VALUES, "ml." + predictedClassField);
}
public void testDependentVariableIsAliasToNested() throws Exception {
initialize("dependent_variable_is_alias_to_nested");
String predictedClassField = ALIAS_TO_NESTED_FIELD + "_prediction";
indexData(sourceIndex, 100, 0, NESTED_FIELD);
DataFrameAnalyticsConfig config = buildAnalytics(jobId, sourceIndex, destIndex, null, new Classification(ALIAS_TO_NESTED_FIELD));
putAnalytics(config);
startAnalytics(jobId);
waitUntilAnalyticsIsStopped(jobId);
assertProgressComplete(jobId);
assertStoredProgressHits(jobId, 1);
assertModelStatePersisted(stateDocId());
assertExactlyOneInferenceModelPersisted(jobId);
assertMlResultsFieldMappings(destIndex, predictedClassField, "keyword");
assertEvaluation(ALIAS_TO_NESTED_FIELD, KEYWORD_FIELD_VALUES, "ml." + predictedClassField);
}
public void testTwoJobsWithSameRandomizeSeedUseSameTrainingSet() throws Exception {
String sourceIndex = "classification_two_jobs_with_same_randomize_seed_source";
String dependentVariable = KEYWORD_FIELD;
createIndex(sourceIndex, false);
// We use 100 rows as we can't set this too low. If too low it is possible
// we only train with rows of one of the two classes which leads to a failure.
indexData(sourceIndex, 100, 0, dependentVariable);
String firstJobId = "classification_two_jobs_with_same_randomize_seed_1";
String firstJobDestIndex = firstJobId + "_dest";
BoostedTreeParams boostedTreeParams = BoostedTreeParams.builder()
.setLambda(1.0)
.setGamma(1.0)
.setEta(1.0)
.setFeatureBagFraction(1.0)
.setMaxTrees(1)
.build();
DataFrameAnalyticsConfig firstJob = buildAnalytics(
firstJobId,
sourceIndex,
firstJobDestIndex,
null,
new Classification(dependentVariable, boostedTreeParams, null, null, 1, 50.0, null, null, null)
);
putAnalytics(firstJob);
startAnalytics(firstJobId);
waitUntilAnalyticsIsStopped(firstJobId);
String secondJobId = "classification_two_jobs_with_same_randomize_seed_2";
String secondJobDestIndex = secondJobId + "_dest";
long randomizeSeed = ((Classification) firstJob.getAnalysis()).getRandomizeSeed();
DataFrameAnalyticsConfig secondJob = buildAnalytics(
secondJobId,
sourceIndex,
secondJobDestIndex,
null,
new Classification(dependentVariable, boostedTreeParams, null, null, 1, 50.0, randomizeSeed, null, null)
);
putAnalytics(secondJob);
startAnalytics(secondJobId);
waitUntilAnalyticsIsStopped(secondJobId);
// Now we compare they both used the same training rows
Set<String> firstRunTrainingRowsIds = getTrainingRowsIds(firstJobDestIndex);
Set<String> secondRunTrainingRowsIds = getTrainingRowsIds(secondJobDestIndex);
assertThat(secondRunTrainingRowsIds, equalTo(firstRunTrainingRowsIds));
}
public void testSetUpgradeMode_ExistingTaskGetsUnassigned() throws Exception {
initialize("classification_set_upgrade_mode");
indexData(sourceIndex, 300, 0, KEYWORD_FIELD);
assertThat(upgradeMode(), is(false));
DataFrameAnalyticsConfig config = buildAnalytics(jobId, sourceIndex, destIndex, null, new Classification(KEYWORD_FIELD));
putAnalytics(config);
startAnalytics(jobId);
assertThat(analyticsTaskList(), hasSize(1));
assertThat(analyticsAssignedTaskList(), hasSize(1));
setUpgradeModeTo(true);
assertThat(analyticsTaskList(), hasSize(1));
assertThat(analyticsAssignedTaskList(), is(empty()));
assertBusy(() -> {
try {
GetDataFrameAnalyticsStatsAction.Response.Stats analyticsStats = getAnalyticsStats(jobId);
assertThat(analyticsStats.getAssignmentExplanation(), is(equalTo(AWAITING_UPGRADE.getExplanation())));
assertThat(analyticsStats.getNode(), is(nullValue()));
} catch (ElasticsearchException e) {
logger.error(() -> "[" + jobId + "] Encountered exception while fetching analytics stats", e);
fail(e.getDetailedMessage());
}
});
setUpgradeModeTo(false);
assertThat(analyticsTaskList(), hasSize(1));
assertBusy(() -> assertThat(analyticsAssignedTaskList(), hasSize(1)));
assertBusy(() -> {
try {
GetDataFrameAnalyticsStatsAction.Response.Stats analyticsStats = getAnalyticsStats(jobId);
assertThat(analyticsStats.getAssignmentExplanation(), is(not(equalTo(AWAITING_UPGRADE.getExplanation()))));
} catch (ElasticsearchException e) {
logger.error(() -> "[" + jobId + "] Encountered exception while fetching analytics stats", e);
fail(e.getDetailedMessage());
}
});
waitUntilAnalyticsIsStopped(jobId);
assertProgressComplete(jobId);
}
public void testSetUpgradeMode_NewTaskDoesNotStart() throws Exception {
initialize("classification_set_upgrade_mode_task_should_not_start");
indexData(sourceIndex, 100, 0, KEYWORD_FIELD);
assertThat(upgradeMode(), is(false));
DataFrameAnalyticsConfig config = buildAnalytics(jobId, sourceIndex, destIndex, null, new Classification(KEYWORD_FIELD));
putAnalytics(config);
setUpgradeModeTo(true);
ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> startAnalytics(config.getId()));
assertThat(e.status(), is(equalTo(RestStatus.TOO_MANY_REQUESTS)));
assertThat(
e.getMessage(),
is(equalTo("Cannot perform cluster:admin/xpack/ml/data_frame/analytics/start action while upgrade mode is enabled"))
);
assertThat(analyticsTaskList(), is(empty()));
assertThat(analyticsAssignedTaskList(), is(empty()));
}
public void testDeleteExpiredData_RemovesUnusedState() throws Exception {
initialize("classification_delete_expired_data");
indexData(sourceIndex, 100, 0, KEYWORD_FIELD);
DataFrameAnalyticsConfig config = buildAnalytics(jobId, sourceIndex, destIndex, null, new Classification(KEYWORD_FIELD));
putAnalytics(config);
startAnalytics(jobId);
waitUntilAnalyticsIsStopped(jobId);
assertProgressComplete(jobId);
assertStoredProgressHits(jobId, 1);
assertModelStatePersisted(stateDocId());
assertExactlyOneInferenceModelPersisted(jobId);
// Call _delete_expired_data API and check nothing was deleted
assertThat(deleteExpiredData().isDeleted(), is(true));
assertStoredProgressHits(jobId, 1);
assertModelStatePersisted(stateDocId());
// Delete the config straight from the config index
DeleteResponse deleteResponse = client().prepareDelete(".ml-config", DataFrameAnalyticsConfig.documentId(jobId))
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
.get();
assertThat(deleteResponse.status(), equalTo(RestStatus.OK));
// Now calling the _delete_expired_data API should remove unused state
assertThat(deleteExpiredData().isDeleted(), is(true));
assertHitCount(prepareSearch(".ml-state*"), 0);
}
public void testUpdateAnalytics() throws Exception {
initialize("update_analytics_description");
DataFrameAnalyticsConfig config = buildAnalytics(jobId, sourceIndex, destIndex, null, new Classification(KEYWORD_FIELD));
putAnalytics(config);
assertThat(getOnlyElement(getAnalytics(jobId)).getDescription(), is(nullValue()));
updateAnalytics(new DataFrameAnalyticsConfigUpdate.Builder(jobId).setDescription("updated-description-1").build());
assertThat(getOnlyElement(getAnalytics(jobId)).getDescription(), is(equalTo("updated-description-1")));
// Noop update
updateAnalytics(new DataFrameAnalyticsConfigUpdate.Builder(jobId).build());
assertThat(getOnlyElement(getAnalytics(jobId)).getDescription(), is(equalTo("updated-description-1")));
updateAnalytics(new DataFrameAnalyticsConfigUpdate.Builder(jobId).setDescription("updated-description-2").build());
assertThat(getOnlyElement(getAnalytics(jobId)).getDescription(), is(equalTo("updated-description-2")));
}
public void testTooLowConfiguredMemoryStillStarts() throws Exception {
initialize("low_memory_analysis");
indexData(sourceIndex, 10_000, 0, NESTED_FIELD);
DataFrameAnalyticsConfig config = new DataFrameAnalyticsConfig.Builder(
buildAnalytics(jobId, sourceIndex, destIndex, null, new Classification(NESTED_FIELD))
).setModelMemoryLimit(ByteSizeValue.ofKb(1)).build();
putAnalytics(config);
// Shouldn't throw
startAnalytics(jobId);
waitUntilAnalyticsIsFailed(jobId);
// It could be marked as failed...
forceStopAnalytics(jobId);
waitUntilAnalyticsIsStopped(jobId);
}
public void testWithSearchRuntimeMappings() throws Exception {
initialize("classification_with_search_runtime_mappings");
indexData(sourceIndex, 300, 50, KEYWORD_FIELD);
String numericRuntimeField = NUMERICAL_FIELD + "_runtime";
String dependentVariableRuntimeField = KEYWORD_FIELD + "_runtime";
String predictedClassField = dependentVariableRuntimeField + "_prediction";
Map<String, Object> numericRuntimeFieldMapping = new HashMap<>();
numericRuntimeFieldMapping.put("type", "double");
numericRuntimeFieldMapping.put("script", "emit(doc['" + NUMERICAL_FIELD + "'].value)");
Map<String, Object> dependentVariableRuntimeFieldMapping = new HashMap<>();
dependentVariableRuntimeFieldMapping.put("type", "keyword");
dependentVariableRuntimeFieldMapping.put(
"script",
"if (doc['" + KEYWORD_FIELD + "'].size() > 0) { emit(doc['" + KEYWORD_FIELD + "'].value); }"
);
Map<String, Object> runtimeFields = new HashMap<>();
runtimeFields.put(numericRuntimeField, numericRuntimeFieldMapping);
runtimeFields.put(dependentVariableRuntimeField, dependentVariableRuntimeFieldMapping);
DataFrameAnalyticsConfig config = new DataFrameAnalyticsConfig.Builder().setId(jobId)
.setSource(new DataFrameAnalyticsSource(new String[] { sourceIndex }, null, null, runtimeFields))
.setDest(new DataFrameAnalyticsDest(destIndex, null))
.setAnalyzedFields(FetchSourceContext.of(true, new String[] { numericRuntimeField, dependentVariableRuntimeField }, null))
.setAnalysis(
new Classification(
dependentVariableRuntimeField,
BoostedTreeParams.builder().setNumTopFeatureImportanceValues(1).build(),
predictedClassField,
null,
null,
null,
null,
null,
null
)
)
.build();
putAnalytics(config);
assertIsStopped(jobId);
assertProgressIsZero(jobId);
startAnalytics(jobId);
waitUntilAnalyticsIsStopped(jobId);
client().admin().indices().refresh(new RefreshRequest(destIndex));
assertResponse(prepareSearch(destIndex).setTrackTotalHits(true).setSize(1000), destData -> {
for (SearchHit hit : destData.getHits()) {
Map<String, Object> destDoc = hit.getSourceAsMap();
Map<String, Object> resultsObject = getFieldValue(destDoc, "ml");
assertThat(getFieldValue(resultsObject, predictedClassField), is(in(KEYWORD_FIELD_VALUES)));
assertThat(getFieldValue(resultsObject, "is_training"), is(destDoc.containsKey(KEYWORD_FIELD)));
assertTopClasses(resultsObject, 2, dependentVariableRuntimeField, KEYWORD_FIELD_VALUES);
@SuppressWarnings("unchecked")
List<Map<String, Object>> importanceArray = (List<Map<String, Object>>) resultsObject.get("feature_importance");
assertThat(importanceArray, hasSize(1));
assertThat(importanceArray.get(0), hasEntry("feature_name", numericRuntimeField));
}
});
assertProgressComplete(jobId);
assertStoredProgressHits(jobId, 1);
assertModelStatePersisted(stateDocId());
assertExactlyOneInferenceModelPersisted(jobId);
assertMlResultsFieldMappings(destIndex, predictedClassField, "keyword");
assertThatAuditMessagesMatch(
jobId,
"Created analytics with type [classification]",
"Estimated memory usage [",
"Starting analytics on node",
"Started analytics",
expectedDestIndexAuditMessage(),
"Started reindexing to destination index [" + destIndex + "]",
"Finished reindexing to destination index [" + destIndex + "]",
"Started loading data",
"Started analyzing",
"Started writing results",
"Finished analysis"
);
assertEvaluation(KEYWORD_FIELD, KEYWORD_FIELD_VALUES, "ml." + predictedClassField);
}
public void testPreview() throws Exception {
initialize("preview_analytics");
indexData(sourceIndex, 300, 50, KEYWORD_FIELD);
DataFrameAnalyticsConfig config = buildAnalytics(jobId, sourceIndex, destIndex, null, new Classification(KEYWORD_FIELD));
putAnalytics(config);
List<Map<String, Object>> preview = previewDataFrame(jobId).getFeatureValues();
for (Map<String, Object> feature : preview) {
assertThat(
feature.keySet(),
containsInAnyOrder(
BOOLEAN_FIELD,
KEYWORD_FIELD,
NUMERICAL_FIELD,
DISCRETE_NUMERICAL_FIELD,
TEXT_FIELD + ".keyword",
NESTED_FIELD,
ALIAS_TO_KEYWORD_FIELD,
ALIAS_TO_NESTED_FIELD
)
);
}
}
public void testPreviewWithProcessors() throws Exception {
initialize("processed_preview_analytics");
indexData(sourceIndex, 300, 50, KEYWORD_FIELD);
DataFrameAnalyticsConfig config = buildAnalytics(
jobId,
sourceIndex,
destIndex,
null,
new Classification(
KEYWORD_FIELD,
BoostedTreeParams.builder().setNumTopFeatureImportanceValues(0).build(),
null,
null,
2,
10.0,
42L,
Arrays.asList(
new OneHotEncoding(
NESTED_FIELD,
Map.of(KEYWORD_FIELD_VALUES.get(0), "cat_column_custom_2", KEYWORD_FIELD_VALUES.get(1), "dog_column_custom_2"),
true
),
new OneHotEncoding(
TEXT_FIELD,
Map.of(KEYWORD_FIELD_VALUES.get(0), "cat_column_custom_3", KEYWORD_FIELD_VALUES.get(1), "dog_column_custom_3"),
true
)
),
null
)
);
putAnalytics(config);
List<Map<String, Object>> preview = previewDataFrame(jobId).getFeatureValues();
for (Map<String, Object> feature : preview) {
assertThat(
feature.keySet(),
hasItems(
BOOLEAN_FIELD,
KEYWORD_FIELD,
NUMERICAL_FIELD,
DISCRETE_NUMERICAL_FIELD,
"cat_column_custom_2",
"dog_column_custom_2",
"cat_column_custom_3",
"dog_column_custom_3"
)
);
assertThat(feature.keySet(), not(hasItems(NESTED_FIELD, TEXT_FIELD)));
}
}
private static <T> T getOnlyElement(List<T> list) {
assertThat(list, hasSize(1));
return list.get(0);
}
private void initialize(String jobId) {
initialize(jobId, false);
}
private void initialize(String jobId, boolean isDatastream) {
this.jobId = jobId;
this.sourceIndex = jobId + "_source_index";
this.destIndex = sourceIndex + "_results";
this.analysisUsesExistingDestIndex = randomBoolean();
createIndex(sourceIndex, isDatastream);
if (analysisUsesExistingDestIndex) {
createIndex(destIndex, false);
}
}
static void createIndex(String index, boolean isDatastream) {
String mapping = Strings.format(
"""
{
"properties": {
"@timestamp": {
"type": "date"
},
"%s": {
"type": "boolean"
},
"%s": {
"type": "double"
},
"%s": {
"type": "integer"
},
"%s": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"%s": {
"type": "keyword"
},
"%s": {
"type": "keyword"
},
"%s": {
"type": "alias",
"path": "%s"
},
"%s": {
"type": "alias",
"path": "%s"
}
}
}""",
BOOLEAN_FIELD,
NUMERICAL_FIELD,
DISCRETE_NUMERICAL_FIELD,
TEXT_FIELD,
KEYWORD_FIELD,
NESTED_FIELD,
ALIAS_TO_KEYWORD_FIELD,
KEYWORD_FIELD,
ALIAS_TO_NESTED_FIELD,
NESTED_FIELD
);
if (isDatastream) {
try {
createDataStreamAndTemplate(index, mapping);
} catch (IOException ex) {
throw new ElasticsearchException(ex);
}
} else {
client().admin().indices().prepareCreate(index).setMapping(mapping).get();
}
}
static void indexData(String sourceIndex, int numTrainingRows, int numNonTrainingRows, String dependentVariable) {
BulkRequestBuilder bulkRequestBuilder = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
for (int i = 0; i < numTrainingRows; i++) {
List<Object> source = List.of(
"@timestamp",
"2020-12-12",
BOOLEAN_FIELD,
BOOLEAN_FIELD_VALUES.get(i % BOOLEAN_FIELD_VALUES.size()),
NUMERICAL_FIELD,
NUMERICAL_FIELD_VALUES.get(i % NUMERICAL_FIELD_VALUES.size()),
DISCRETE_NUMERICAL_FIELD,
DISCRETE_NUMERICAL_FIELD_VALUES.get(i % DISCRETE_NUMERICAL_FIELD_VALUES.size()),
TEXT_FIELD,
KEYWORD_FIELD_VALUES.get(i % KEYWORD_FIELD_VALUES.size()),
KEYWORD_FIELD,
KEYWORD_FIELD_VALUES.get(i % KEYWORD_FIELD_VALUES.size()),
NESTED_FIELD,
KEYWORD_FIELD_VALUES.get(i % KEYWORD_FIELD_VALUES.size())
);
IndexRequest indexRequest = new IndexRequest(sourceIndex).source(source.toArray()).opType(DocWriteRequest.OpType.CREATE);
bulkRequestBuilder.add(indexRequest);
}
for (int i = numTrainingRows; i < numTrainingRows + numNonTrainingRows; i++) {
List<Object> source = new ArrayList<>();
if (BOOLEAN_FIELD.equals(dependentVariable) == false) {
source.addAll(List.of(BOOLEAN_FIELD, BOOLEAN_FIELD_VALUES.get(i % BOOLEAN_FIELD_VALUES.size())));
}
if (NUMERICAL_FIELD.equals(dependentVariable) == false) {
source.addAll(List.of(NUMERICAL_FIELD, NUMERICAL_FIELD_VALUES.get(i % NUMERICAL_FIELD_VALUES.size())));
}
if (DISCRETE_NUMERICAL_FIELD.equals(dependentVariable) == false) {
source.addAll(
List.of(DISCRETE_NUMERICAL_FIELD, DISCRETE_NUMERICAL_FIELD_VALUES.get(i % DISCRETE_NUMERICAL_FIELD_VALUES.size()))
);
}
if (TEXT_FIELD.equals(dependentVariable) == false) {
source.addAll(List.of(TEXT_FIELD, KEYWORD_FIELD_VALUES.get(i % KEYWORD_FIELD_VALUES.size())));
}
if (KEYWORD_FIELD.equals(dependentVariable) == false) {
source.addAll(List.of(KEYWORD_FIELD, KEYWORD_FIELD_VALUES.get(i % KEYWORD_FIELD_VALUES.size())));
}
if (NESTED_FIELD.equals(dependentVariable) == false) {
source.addAll(List.of(NESTED_FIELD, KEYWORD_FIELD_VALUES.get(i % KEYWORD_FIELD_VALUES.size())));
}
source.addAll(List.of("@timestamp", "2020-12-12"));
IndexRequest indexRequest = new IndexRequest(sourceIndex).source(source.toArray()).opType(DocWriteRequest.OpType.CREATE);
bulkRequestBuilder.add(indexRequest);
}
BulkResponse bulkResponse = bulkRequestBuilder.get();
if (bulkResponse.hasFailures()) {
fail("Failed to index data: " + bulkResponse.buildFailureMessage());
}
}
private static Map<String, Object> getDestDoc(DataFrameAnalyticsConfig config, SearchHit hit) {
GetResponse destDocGetResponse = client().prepareGet().setIndex(config.getDest().getIndex()).setId(hit.getId()).get();
assertThat(destDocGetResponse.isExists(), is(true));
Map<String, Object> sourceDoc = hit.getSourceAsMap();
Map<String, Object> destDoc = destDocGetResponse.getSource();
for (String field : sourceDoc.keySet()) {
assertThat(destDoc, hasKey(field));
assertThat(destDoc.get(field), equalTo(sourceDoc.get(field)));
}
return destDoc;
}
private static <T> void assertTopClasses(
Map<String, Object> resultsObject,
int numTopClasses,
String dependentVariable,
List<T> dependentVariableValues
) {
List<Map<String, Object>> topClasses = getFieldValue(resultsObject, "top_classes");
assertThat(topClasses, hasSize(numTopClasses));
List<T> classNames = new ArrayList<>(topClasses.size());
List<Double> classProbabilities = new ArrayList<>(topClasses.size());
List<Double> classScores = new ArrayList<>(topClasses.size());
for (Map<String, Object> topClass : topClasses) {
classNames.add(getFieldValue(topClass, "class_name"));
classProbabilities.add(getFieldValue(topClass, "class_probability"));
classScores.add(getFieldValue(topClass, "class_score"));
}
// Assert that all the predicted | ClassificationIT |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/decorators/generics/DecoratorWithTypeVariableTest.java | {
"start": 2026,
"end": 2190
} | interface ____<K, V> {
String doSomething(MyParameterizedType<K, V> myParameterizedType);
}
@Decorator
@Priority(1)
public static | MyInterface |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/env/NodeMetadata.java | {
"start": 1198,
"end": 5142
} | class ____ {
static final String NODE_ID_KEY = "node_id";
static final String NODE_VERSION_KEY = "node_version";
static final String OLDEST_INDEX_VERSION_KEY = "oldest_index_version";
private final String nodeId;
private final BuildVersion nodeVersion;
private final BuildVersion previousNodeVersion;
private final IndexVersion oldestIndexVersion;
private NodeMetadata(
final String nodeId,
final BuildVersion buildVersion,
final BuildVersion previousBuildVersion,
final IndexVersion oldestIndexVersion
) {
this.nodeId = Objects.requireNonNull(nodeId);
this.nodeVersion = Objects.requireNonNull(buildVersion);
this.previousNodeVersion = Objects.requireNonNull(previousBuildVersion);
this.oldestIndexVersion = Objects.requireNonNull(oldestIndexVersion);
}
public NodeMetadata(final String nodeId, final BuildVersion buildVersion, final IndexVersion oldestIndexVersion) {
this(nodeId, buildVersion, buildVersion, oldestIndexVersion);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
NodeMetadata that = (NodeMetadata) o;
return nodeId.equals(that.nodeId)
&& nodeVersion.equals(that.nodeVersion)
&& oldestIndexVersion.equals(that.oldestIndexVersion)
&& Objects.equals(previousNodeVersion, that.previousNodeVersion);
}
@Override
public int hashCode() {
return Objects.hash(nodeId, nodeVersion, previousNodeVersion, oldestIndexVersion);
}
@Override
public String toString() {
return "NodeMetadata{"
+ "nodeId='"
+ nodeId
+ '\''
+ ", nodeVersion="
+ nodeVersion
+ ", previousNodeVersion="
+ previousNodeVersion
+ ", oldestIndexVersion="
+ oldestIndexVersion
+ '}';
}
public String nodeId() {
return nodeId;
}
public BuildVersion nodeVersion() {
return nodeVersion;
}
/**
* When a node starts we read the existing node metadata from disk (see NodeEnvironment@loadNodeMetadata), store a reference to the
* node version that we read from there in {@code previousNodeVersion} and then proceed to upgrade the version to
* the current version of the node ({@link NodeMetadata#upgradeToCurrentVersion()} before storing the node metadata again on disk.
* In doing so, {@code previousNodeVersion} refers to the previously last known version that this node was started on.
*/
public BuildVersion previousNodeVersion() {
return previousNodeVersion;
}
public IndexVersion oldestIndexVersion() {
return oldestIndexVersion;
}
public void verifyUpgradeToCurrentVersion() {
if (nodeVersion.onOrAfterMinimumCompatible() == false) {
throw new IllegalStateException(
"cannot upgrade a node from version ["
+ nodeVersion
+ "] directly to version ["
+ Build.current().version()
+ "], "
+ "upgrade to version ["
+ Build.current().minWireCompatVersion()
+ "] first."
);
}
if (nodeVersion.isFutureVersion()) {
throw new IllegalStateException(
"cannot downgrade a node from version [" + nodeVersion + "] to version [" + Build.current().version() + "]"
);
}
}
public NodeMetadata upgradeToCurrentVersion() {
verifyUpgradeToCurrentVersion();
return nodeVersion.equals(BuildVersion.current())
? this
: new NodeMetadata(nodeId, BuildVersion.current(), nodeVersion, oldestIndexVersion);
}
private static | NodeMetadata |
java | apache__camel | components/camel-base64/src/test/java/org/apache/camel/dataformat/base64/Base64DataFormatDefaultsTest.java | {
"start": 891,
"end": 3532
} | class ____ extends Base64DataFormatTestBase {
private static final String ENCODED = "IrRWhNZNjFxQ6WXJEIsehbnFdurtgacAq+t6Zh3uYlyclF3HAx995mbIydQlymM8V3yA+Yb1p3Ij\r\n"
+ "7AS1VQaUNHAljNpHUqrWR6EmASZV/EQvR5Gk8XDvRrrtkoDm+jdZ/XKfest2OIzhixZF1mcqyi1P\r\n"
+ "Hep/rFnVPclO9WOWtCCRhz+U2soBzNBtvTc6x1pz1gOZcoOEFKHSf2kmkq1/7hHFl5Cb9nbSBgyp\r\n"
+ "lFzsInVBfCkRxXAFixwbC3B+LB8e15zSMvoG6okyDs7C8QShIZCXGHlsuUiH96izUbfB8qpTQK80\r\n"
+ "PPAisxYhF/gb678wvO5e/03AmFmYbBqzwoNQ6PoZKFI8a4PUrLoCLrUnKQgwOXueb1y8d4bsVGrX\r\n"
+ "H5QUFgAE3yZEn2ZQtVv6bZnm3lvBe/LLRD4xIU2Pcm5e+DJUZhHcl/8MaioDWFgYPLftDKvEUwLB\r\n"
+ "3IFWLSKMKFoeXn2nkwxsCHrzhajhbkKl1+H9I7Gkd19DyAoPIriWOJScog+mcP0iqG9iMqYFko2n\r\n"
+ "rh2rr+jcyKFBhrRUuNw3W8+h+FOwZDLcBmuTv2lEOvUdaPgD+1e6fXpuxhiih4wf/zlakeVa031T\r\n"
+ "9c0/HN02z0cAhLT1vtEA0zDn6OzzhY//Mh332ZmC+xro+e9o2a6+dnwamDtLuRgDDd+EcoUQpfEL\r\n"
+ "XobX3ZSX7OQw1ZXxWiJLtSOc5yLRkdbxdLK/C6fkcY4cqc/RwBGYtXN7Z1ENG/s/LnrZnRU/ErMW\r\n"
+ "RtbRwehA/0a2KSbNOMwK8BpzDruXufLXZcGaDKRUektQfdX4XhhYESt1drewlQLVaEWrZBR8JOd5\r\n"
+ "mckulPhwHp2Q00YyoScEj6Rs/9siyv49/FSaRCbnfzl3CRnNvCOD1cvF4OneYbVJCMOY49ucFmN/\r\n"
+ "mBCyxLOtJ4Zz8EG1FC81QTg3Scw+FdFDsCgr7DqVrmPOLikqq6wJdLBjyHXuMiVP9Fq/aAxvXEgj\r\n"
+ "RuVnN20wn2tUOXeaN4XqziQ66M229HsY0BX5riJ00yXArDxd+I9mFDpw/UDnGBAE2P//1fU1ns1A\r\n"
+ "6zQ6hTv7axdlw3/FnOAdymEKqED9CPfbiDvJygcAcxv2fyORHQ+TiprMGxckAlnLZ2pGl+gOzbtZ\r\n"
+ "zJgecyFJHBbhtkubGD4zzQhuJJw8ypqppSxqDs8SAW2frj42UT9qRMeCBGXLa1wyISt4GI6iOnfw\r\n"
+ "TCRJ/SE7CVrEfmdmROlJpAJHfUlQIJq1aW3mTE5zTmAygypxRUDCmA+eY9wdCicFp6YptdCEK3P2\r\n"
+ "7QzZsSASAByd5jxHMiIBkdwGzj1501xZ7hFLJDXDTQ==\r\n";
public Base64DataFormatDefaultsTest() {
format = new Base64DataFormat();
}
@Test
void testEncode() throws Exception {
runEncoderTest(DECODED, ENCODED.getBytes());
}
@Test
void testDecode() throws Exception {
runDecoderTest(ENCODED.getBytes(), DECODED);
}
}
| Base64DataFormatDefaultsTest |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/util/internal/SystemUtils.java | {
"start": 964,
"end": 2087
} | class ____ {
private static final Logger LOGGER = StatusLogger.getLogger();
private static String getJavaVendor() {
try {
return System.getProperty("java.vendor");
} catch (final SecurityException e) {
LOGGER.warn("Unable to determine Java vendor.", e);
}
return "Unknown";
}
public static boolean isOsAndroid() {
return getJavaVendor().contains("Android");
}
/**
* Checks if the current runtime is GraalVM.
* <p>
* See <a href="https://www.graalvm.org/sdk/javadoc/org/graalvm/nativeimage/ImageInfo.html#PROPERTY_IMAGE_CODE_KEY">ImageInfo.PROPERTY_IMAGE_CODE_KEY</a>.
* </p>
* @return true if the current runtime is GraalVM, false otherwise.
*/
public static boolean isGraalVm() {
try {
return System.getProperty("org.graalvm.nativeimage.imagecode") != null;
} catch (final SecurityException e) {
LOGGER.debug("Unable to determine if the current runtime is GraalVM.", e);
return false;
}
}
private SystemUtils() {}
}
| SystemUtils |
java | apache__dubbo | dubbo-remoting/dubbo-remoting-api/src/main/java/org/apache/dubbo/remoting/transport/DecodeHandler.java | {
"start": 1354,
"end": 2597
} | class ____ extends AbstractChannelHandlerDelegate {
private static final ErrorTypeAwareLogger log = LoggerFactory.getErrorTypeAwareLogger(DecodeHandler.class);
public DecodeHandler(ChannelHandler handler) {
super(handler);
}
@Override
public void received(Channel channel, Object message) throws RemotingException {
if (message instanceof Decodeable) {
decode(message);
}
if (message instanceof Request) {
decode(((Request) message).getData());
}
if (message instanceof Response) {
decode(((Response) message).getResult());
}
handler.received(channel, message);
}
private void decode(Object message) {
if (!(message instanceof Decodeable)) {
return;
}
try {
((Decodeable) message).decode();
if (log.isDebugEnabled()) {
log.debug("Decode decodeable message " + message.getClass().getName());
}
} catch (Throwable e) {
if (log.isWarnEnabled()) {
log.warn(TRANSPORT_FAILED_DECODE, "", "", "Call Decodeable.decode failed: " + e.getMessage(), e);
}
}
}
}
| DecodeHandler |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/JobAlreadyDoneException.java | {
"start": 1013,
"end": 1693
} | class ____ extends JobException {
private static final long serialVersionUID = 5906282500322681417L;
JobAlreadyDoneException(JobID jobId) {
super(
"The job ("
+ jobId
+ ") has already terminated, but we cannot tell the final "
+ "state and also not serve the job result. This can happen if Flink "
+ "fails over shortly after finishing a job and before reporting "
+ "the result. Please check the output of your job to see whether the "
+ "execution was successful or not.");
}
}
| JobAlreadyDoneException |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/CounterGroupPBImpl.java | {
"start": 1463,
"end": 5665
} | class ____ extends ProtoBase<CounterGroupProto> implements CounterGroup {
CounterGroupProto proto = CounterGroupProto.getDefaultInstance();
CounterGroupProto.Builder builder = null;
boolean viaProto = false;
private Map<String, Counter> counters = null;
public CounterGroupPBImpl() {
builder = CounterGroupProto.newBuilder();
}
public CounterGroupPBImpl(CounterGroupProto proto) {
this.proto = proto;
viaProto = true;
}
public CounterGroupProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.counters != null) {
addContersToProto();
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = CounterGroupProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public String getName() {
CounterGroupProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasName()) {
return null;
}
return (p.getName());
}
@Override
public void setName(String name) {
maybeInitBuilder();
if (name == null) {
builder.clearName();
return;
}
builder.setName((name));
}
@Override
public String getDisplayName() {
CounterGroupProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasDisplayName()) {
return null;
}
return (p.getDisplayName());
}
@Override
public void setDisplayName(String displayName) {
maybeInitBuilder();
if (displayName == null) {
builder.clearDisplayName();
return;
}
builder.setDisplayName((displayName));
}
@Override
public Map<String, Counter> getAllCounters() {
initCounters();
return this.counters;
}
@Override
public Counter getCounter(String key) {
initCounters();
return this.counters.get(key);
}
private void initCounters() {
if (this.counters != null) {
return;
}
CounterGroupProtoOrBuilder p = viaProto ? proto : builder;
List<StringCounterMapProto> list = p.getCountersList();
this.counters = new HashMap<String, Counter>();
for (StringCounterMapProto c : list) {
this.counters.put(c.getKey(), convertFromProtoFormat(c.getValue()));
}
}
@Override
public void addAllCounters(final Map<String, Counter> counters) {
if (counters == null)
return;
initCounters();
this.counters.putAll(counters);
}
private void addContersToProto() {
maybeInitBuilder();
builder.clearCounters();
if (counters == null)
return;
Iterable<StringCounterMapProto> iterable = new Iterable<StringCounterMapProto>() {
@Override
public Iterator<StringCounterMapProto> iterator() {
return new Iterator<StringCounterMapProto>() {
Iterator<String> keyIter = counters.keySet().iterator();
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public StringCounterMapProto next() {
String key = keyIter.next();
return StringCounterMapProto.newBuilder().setKey(key).setValue(convertToProtoFormat(counters.get(key))).build();
}
@Override
public boolean hasNext() {
return keyIter.hasNext();
}
};
}
};
builder.addAllCounters(iterable);
}
@Override
public void setCounter(String key, Counter val) {
initCounters();
this.counters.put(key, val);
}
@Override
public void removeCounter(String key) {
initCounters();
this.counters.remove(key);
}
@Override
public void clearCounters() {
initCounters();
this.counters.clear();
}
private CounterPBImpl convertFromProtoFormat(CounterProto p) {
return new CounterPBImpl(p);
}
private CounterProto convertToProtoFormat(Counter t) {
return ((CounterPBImpl)t).getProto();
}
}
| CounterGroupPBImpl |
java | apache__maven | impl/maven-cli/src/main/java/org/apache/maven/cling/invoker/cisupport/CIDetector.java | {
"start": 953,
"end": 1038
} | interface ____ detect CI system process runs on, if any.
*
* @since 4.0.0
*/
public | to |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/inject/scope/custom/SpecialBeanScope.java | {
"start": 384,
"end": 1231
} | class ____ implements CustomScope<SpecialBean> {
private final Map<BeanIdentifier, CreatedBean<?>> beans = new ConcurrentHashMap<>();
public Map<BeanIdentifier, CreatedBean<?>> getBeans() {
return beans;
}
@Override
public Class<SpecialBean> annotationType() {
return SpecialBean.class;
}
@Override
public <T> T getOrCreate(BeanCreationContext<T> context) {
return (T) beans.computeIfAbsent(context.id(), key -> context.create()).bean();
}
@Override
public <T> Optional<T> remove(BeanIdentifier identifier) {
final CreatedBean<?> createdBean = beans.remove(identifier);
if (createdBean != null) {
createdBean.close();
return (Optional<T>) Optional.of(createdBean.bean());
}
return Optional.empty();
}
}
| SpecialBeanScope |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/completable/CompletableUsing.java | {
"start": 3047,
"end": 6253
} | class ____<R>
extends AtomicReference<Object>
implements CompletableObserver, Disposable {
private static final long serialVersionUID = -674404550052917487L;
final CompletableObserver downstream;
final Consumer<? super R> disposer;
final boolean eager;
Disposable upstream;
UsingObserver(CompletableObserver actual, R resource, Consumer<? super R> disposer, boolean eager) {
super(resource);
this.downstream = actual;
this.disposer = disposer;
this.eager = eager;
}
@Override
public void dispose() {
if (eager) {
disposeResource();
upstream.dispose();
upstream = DisposableHelper.DISPOSED;
} else {
upstream.dispose();
upstream = DisposableHelper.DISPOSED;
disposeResource();
}
}
@SuppressWarnings("unchecked")
void disposeResource() {
Object resource = getAndSet(this);
if (resource != this) {
try {
disposer.accept((R)resource);
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
RxJavaPlugins.onError(ex);
}
}
}
@Override
public boolean isDisposed() {
return upstream.isDisposed();
}
@Override
public void onSubscribe(Disposable d) {
if (DisposableHelper.validate(this.upstream, d)) {
this.upstream = d;
downstream.onSubscribe(this);
}
}
@SuppressWarnings("unchecked")
@Override
public void onError(Throwable e) {
upstream = DisposableHelper.DISPOSED;
if (eager) {
Object resource = getAndSet(this);
if (resource != this) {
try {
disposer.accept((R)resource);
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
e = new CompositeException(e, ex);
}
} else {
return;
}
}
downstream.onError(e);
if (!eager) {
disposeResource();
}
}
@SuppressWarnings("unchecked")
@Override
public void onComplete() {
upstream = DisposableHelper.DISPOSED;
if (eager) {
Object resource = getAndSet(this);
if (resource != this) {
try {
disposer.accept((R)resource);
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
downstream.onError(ex);
return;
}
} else {
return;
}
}
downstream.onComplete();
if (!eager) {
disposeResource();
}
}
}
}
| UsingObserver |
java | elastic__elasticsearch | test/framework/src/test/java/org/elasticsearch/transport/DisruptableMockTransportTests.java | {
"start": 26863,
"end": 27759
} | class ____ extends TransportResponse {
private final RefCounted refCounted;
TestResponse() {
activeRequestCount++;
refCounted = AbstractRefCounted.of(() -> activeRequestCount--);
}
TestResponse(StreamInput in) {
activeRequestCount++;
refCounted = AbstractRefCounted.of(() -> activeRequestCount--);
}
@Override
public void writeTo(StreamOutput out) {}
@Override
public void incRef() {
refCounted.incRef();
}
@Override
public boolean tryIncRef() {
return refCounted.tryIncRef();
}
@Override
public boolean decRef() {
return refCounted.decRef();
}
@Override
public boolean hasReferences() {
return refCounted.hasReferences();
}
}
}
| TestResponse |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/async/AsyncProcessorAwaitManagerInterruptWithRedeliveryTest.java | {
"start": 4872,
"end": 4989
} | class ____ {
public void callMe() throws Exception {
throw new Exception();
}
}
}
| MyBean |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/profile/query/InternalQueryProfileTree.java | {
"start": 1059,
"end": 1528
} | class ____ extends AbstractInternalProfileTree<QueryProfileBreakdown, Query> {
/** Rewrite time */
private final AtomicLong rewriteTime = new AtomicLong(0L);
@Override
protected QueryProfileBreakdown createProfileBreakdown() {
return new QueryProfileBreakdown();
}
@Override
protected String getTypeFromElement(Query query) {
// Anonymous classes won't have a name,
// we need to get the super | InternalQueryProfileTree |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java | {
"start": 14088,
"end": 15189
} | class ____/value map
configurationMemberVariables = new HashMap<>();
LOG_CONFIG.debug("Reading configuration classes\n");
for (Class c : configurationClasses) {
Field[] fields = c.getDeclaredFields();
Map<String, String> memberMap =
extractMemberVariablesFromConfigurationFields(fields);
if (memberMap != null) {
configurationMemberVariables.putAll(memberMap);
}
}
LOG_CONFIG.debug("\n=====\n");
// Create XML key/value map
LOG_XML.debug("Reading XML property files\n");
xmlKeyValueMap = extractPropertiesFromXml(xmlFilename);
LOG_XML.debug("\n=====\n");
// Create default configuration variable key/value map
LOG.debug("Reading Config property files for defaults\n");
configurationDefaultVariables = new HashMap<>();
Arrays.stream(configurationClasses)
.map(Class::getDeclaredFields)
.map(this::extractDefaultVariablesFromConfigurationFields)
.filter(Objects::nonNull)
.forEach(map -> configurationDefaultVariables.putAll(map));
LOG.debug("\n=====\n");
// Find | member |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/bootstrap/BootstrapRegistry.java | {
"start": 5408,
"end": 5723
} | enum ____ {
/**
* A singleton instance. The {@link InstanceSupplier} will be called only once and
* the same instance will be returned each time.
*/
SINGLETON,
/**
* A prototype instance. The {@link InstanceSupplier} will be called whenever an
* instance is needed.
*/
PROTOTYPE
}
}
| Scope |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableBackgroundEvent.java | {
"start": 1071,
"end": 1882
} | class ____<T> extends BackgroundEvent implements CompletableEvent<T> {
private final CompletableFuture<T> future;
private final long deadlineMs;
/**
* <em>Note</em>: the {@code deadlineMs} is the future time of expiration, <em>not</em> a timeout.
*/
protected CompletableBackgroundEvent(final Type type, final long deadlineMs) {
super(type);
this.future = new CompletableFuture<>();
this.deadlineMs = deadlineMs;
}
@Override
public CompletableFuture<T> future() {
return future;
}
@Override
public long deadlineMs() {
return deadlineMs;
}
@Override
protected String toStringBase() {
return super.toStringBase() + ", future=" + future + ", deadlineMs=" + deadlineMs;
}
}
| CompletableBackgroundEvent |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/sqm/tree/from/SqmFromClauseContainer.java | {
"start": 247,
"end": 418
} | interface ____ {
/**
* Obtains this container's SqmFromClause.
*
* @return This container's SqmFromClause.
*/
SqmFromClause getFromClause();
}
| SqmFromClauseContainer |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointFailureReason.java | {
"start": 913,
"end": 3841
} | enum ____ {
PERIODIC_SCHEDULER_SHUTDOWN(true, "Periodic checkpoint scheduler is shut down."),
TOO_MANY_CHECKPOINT_REQUESTS(true, "The maximum number of queued checkpoint requests exceeded"),
MINIMUM_TIME_BETWEEN_CHECKPOINTS(
true,
"The minimum time between checkpoints is still pending. "
+ "Checkpoint will be triggered after the minimum time."),
NOT_ALL_REQUIRED_TASKS_RUNNING(true, "Not all required tasks are currently running."),
IO_EXCEPTION(
true, "An Exception occurred while triggering the checkpoint. IO-problem detected."),
BLOCKING_OUTPUT_EXIST(true, "Blocking output edge exists in running tasks."),
CHECKPOINT_ASYNC_EXCEPTION(false, "Asynchronous task checkpoint failed."),
CHANNEL_STATE_SHARED_STREAM_EXCEPTION(
false,
"The checkpoint was aborted due to exception of other subtasks sharing the ChannelState file."),
CHECKPOINT_EXPIRED(false, "Checkpoint expired before completing."),
CHECKPOINT_SUBSUMED(false, "Checkpoint has been subsumed."),
CHECKPOINT_DECLINED(false, "Checkpoint was declined."),
CHECKPOINT_DECLINED_TASK_NOT_READY(false, "Checkpoint was declined (tasks not ready)"),
CHECKPOINT_DECLINED_TASK_CLOSING(false, "Checkpoint was declined (task is closing)"),
CHECKPOINT_DECLINED_SUBSUMED(
false, "Checkpoint was canceled because a barrier from newer checkpoint was received."),
CHECKPOINT_DECLINED_ON_CANCELLATION_BARRIER(
false, "Task received cancellation from one of its inputs"),
CHECKPOINT_DECLINED_INPUT_END_OF_STREAM(
false, "Checkpoint was declined because one input stream is finished"),
CHECKPOINT_COORDINATOR_SHUTDOWN(false, "CheckpointCoordinator shutdown."),
CHECKPOINT_COORDINATOR_SUSPEND(false, "Checkpoint Coordinator is suspending."),
JOB_FAILOVER_REGION(false, "FailoverRegion is restarting."),
TASK_FAILURE(false, "Task has failed."),
TASK_CHECKPOINT_FAILURE(false, "Task local checkpoint failure."),
UNKNOWN_TASK_CHECKPOINT_NOTIFICATION_FAILURE(
false, "Unknown task for the checkpoint to notify."),
FINALIZE_CHECKPOINT_FAILURE(false, "Failure to finalize checkpoint."),
TRIGGER_CHECKPOINT_FAILURE(false, "Trigger checkpoint failure.");
// ------------------------------------------------------------------------
private final boolean preFlight;
private final String message;
CheckpointFailureReason(boolean isPreFlight, String message) {
this.preFlight = isPreFlight;
this.message = message;
}
public String message() {
return message;
}
/**
* @return true if this value indicates a failure reason happening before a checkpoint is passed
* to a job's tasks.
*/
public boolean isPreFlight() {
return preFlight;
}
}
| CheckpointFailureReason |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/ref/RefTest8.java | {
"start": 216,
"end": 1151
} | class ____ extends TestCase {
public void test_bug_for_juqkai() throws Exception {
C c = new C();
Map<String, Object> a = Collections.<String,Object>singletonMap("c", c);
Map<String, Object> b = Collections.<String,Object>singletonMap("c", c);
Map<String, Object> vo = new HashMap<String, Object>();
vo.put("a", a);
vo.put("b", b);
Object[] root = new Object[] { vo };
String text = JSON.toJSONString(root);
System.out.println(text);
VO[] array2 = JSON.parseObject(text, VO[].class);
Assert.assertEquals(1, array2.length);
Assert.assertNotNull(array2[0].getA());
Assert.assertNotNull(array2[0].getB());
Assert.assertNotNull(array2[0].getA().getC());
Assert.assertNotNull(array2[0].getB().getC());
Assert.assertSame(array2[0].getA().getC(), array2[0].getB().getC());
}
private static | RefTest8 |
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/ServletAnnotationControllerHandlerMethodTests.java | {
"start": 111890,
"end": 112127
} | interface ____ {
@RequestMapping(method = RequestMethod.GET)
String get(Model model);
@RequestMapping(method = RequestMethod.POST)
String post(@ModelAttribute("object1") Object object1);
}
static | MySessionAttributesControllerIfc |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/testutil/compilation/annotation/ExpectedNote.java | {
"start": 621,
"end": 816
} | interface ____ {
String value();
/**
* The notes in the order they are expected
*/
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD)
public @ | ExpectedNote |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/impl/ConversionHelper.java | {
"start": 859,
"end": 3389
} | class ____ {
@SuppressWarnings("unchecked")
public static Object toObject(Object obj) {
if (obj instanceof Map) {
return toJsonObject((Map<String, Object>) obj);
} else if (obj instanceof List) {
return toJsonArray((List<Object>) obj);
} else if (obj instanceof CharSequence) {
return obj.toString();
}
return obj;
}
@SuppressWarnings("unchecked")
private static Object toJsonElement(Object obj) {
if (obj instanceof Map) {
return toJsonObject((Map<String, Object>) obj);
} else if (obj instanceof List) {
return toJsonArray((List<Object>) obj);
} else if (obj instanceof CharSequence) {
return obj.toString();
} else if (obj instanceof Buffer) {
return BASE64_ENCODER.encodeToString(((Buffer) obj).getBytes());
}
return obj;
}
public static JsonObject toJsonObject(Map<String, Object> map) {
if (map == null) {
return null;
}
map = new LinkedHashMap<>(map);
map.entrySet().forEach(e -> e.setValue(toJsonElement(e.getValue())));
return new JsonObject(map);
}
public static JsonArray toJsonArray(List<Object> list) {
if (list == null) {
return null;
}
list = new ArrayList<>(list);
for (int i = 0; i < list.size(); i++) {
list.set(i, toJsonElement(list.get(i)));
}
return new JsonArray(list);
}
@SuppressWarnings("unchecked")
public static <T> T fromObject(Object obj) {
if (obj instanceof JsonObject) {
return (T) fromJsonObject((JsonObject) obj);
} else if (obj instanceof JsonArray) {
return (T) fromJsonArray((JsonArray) obj);
} else if (obj instanceof Instant) {
return (T) ISO_INSTANT.format((Instant) obj);
} else if (obj instanceof byte[]) {
return (T) BASE64_ENCODER.encodeToString((byte[]) obj);
} else if (obj instanceof Enum) {
return (T) ((Enum) obj).name();
}
return (T) obj;
}
public static Map<String, Object> fromJsonObject(JsonObject json) {
if (json == null) {
return null;
}
Map<String, Object> map = new LinkedHashMap<>(json.getMap());
map.entrySet().forEach(entry -> {
entry.setValue(fromObject(entry.getValue()));
});
return map;
}
public static List<Object> fromJsonArray(JsonArray json) {
if (json == null) {
return null;
}
List<Object> list = new ArrayList<>(json.getList());
for (int i = 0; i < list.size(); i++) {
list.set(i, fromObject(list.get(i)));
}
return list;
}
}
| ConversionHelper |
java | spring-projects__spring-framework | spring-aop/src/test/java/org/springframework/aop/aspectj/annotation/AbstractAspectJAdvisorFactoryTests.java | {
"start": 29451,
"end": 29484
} | class ____ {
| AbstractMakeModifiable |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/ChunkComponentBuilderFactory.java | {
"start": 1375,
"end": 1828
} | interface ____ {
/**
* Chunk (camel-chunk)
* Transform messages using Chunk templating engine.
*
* Category: transformation
* Since: 2.15
* Maven coordinates: org.apache.camel:camel-chunk
*
* @return the dsl builder
*/
static ChunkComponentBuilder chunk() {
return new ChunkComponentBuilderImpl();
}
/**
* Builder for the Chunk component.
*/
| ChunkComponentBuilderFactory |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/select/OracleSelectTest16.java | {
"start": 967,
"end": 2417
} | class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = "select privilege#,level from sysauth$ connect by grantee#=prior privilege# and privilege#>0 start with grantee#=:1 and privilege#>0";
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, JdbcConstants.ORACLE);
SQLStatement statemen = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
SchemaStatVisitor visitor = SQLUtils.createSchemaStatVisitor(JdbcConstants.ORACLE);
statemen.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertTrue(visitor.getTables().containsKey(new TableStat.Name("sysauth$")));
assertEquals(2, visitor.getColumns().size());
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "*")));
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "YEAR")));
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "order_mode")));
}
}
| OracleSelectTest16 |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/checkpoints/CheckpointStatisticDetailsHeaders.java | {
"start": 1314,
"end": 2652
} | class ____
implements RuntimeMessageHeaders<
EmptyRequestBody, CheckpointStatistics, CheckpointMessageParameters> {
private static final CheckpointStatisticDetailsHeaders INSTANCE =
new CheckpointStatisticDetailsHeaders();
public static final String URL = "/jobs/:jobid/checkpoints/details/:checkpointid";
private CheckpointStatisticDetailsHeaders() {}
@Override
public Class<EmptyRequestBody> getRequestClass() {
return EmptyRequestBody.class;
}
@Override
public Class<CheckpointStatistics> getResponseClass() {
return CheckpointStatistics.class;
}
@Override
public HttpResponseStatus getResponseStatusCode() {
return HttpResponseStatus.OK;
}
@Override
public CheckpointMessageParameters getUnresolvedMessageParameters() {
return new CheckpointMessageParameters();
}
@Override
public HttpMethodWrapper getHttpMethod() {
return HttpMethodWrapper.GET;
}
@Override
public String getTargetRestEndpointURL() {
return URL;
}
public static CheckpointStatisticDetailsHeaders getInstance() {
return INSTANCE;
}
@Override
public String getDescription() {
return "Returns details for a checkpoint.";
}
}
| CheckpointStatisticDetailsHeaders |
java | apache__spark | core/src/main/java/org/apache/spark/util/collection/TimSort.java | {
"start": 1090,
"end": 1300
} | class ____ package private. We put
* a simple Scala wrapper {@link org.apache.spark.util.collection.Sorter}, which is available to
* package org.apache.spark.
*
* The purpose of the port is to generalize the | is |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/chararrays/CharArrays_assertContainsExactlyInAnyOrder_Test.java | {
"start": 1872,
"end": 11753
} | class ____ extends CharArraysBaseTest {
@Test
void should_pass_if_actual_contains_given_values_exactly_in_any_order() {
arrays.assertContainsExactlyInAnyOrder(someInfo(), actual, arrayOf('a', 'b', 'c'));
}
@Test
void should_pass_if_actual_and_given_values_are_empty() {
arrays.assertContainsExactlyInAnyOrder(someInfo(), emptyArray(), emptyArray());
}
@Test
void should_pass_if_actual_contains_given_values_exactly_but_in_different_order() {
AssertionInfo info = someInfo();
arrays.assertContainsExactlyInAnyOrder(info, actual, arrayOf('a', 'c', 'b'));
}
@Test
void should_fail_if_arrays_have_different_sizes() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arrays.assertContainsExactlyInAnyOrder(someInfo(), actual,
arrayOf('a', 'b')));
}
@Test
void should_fail_if_expected_empty_and_actual_is_not() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arrays.assertContainsExactlyInAnyOrder(someInfo(), actual,
emptyArray()));
}
@Test
void should_throw_error_if_expected_null() {
assertThatNullPointerException().isThrownBy(() -> arrays.assertContainsExactlyInAnyOrder(someInfo(), actual, null))
.withMessage(valuesToLookForIsNull());
}
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arrays.assertContainsExactlyInAnyOrder(someInfo(), null,
arrayOf('b')))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_actual_does_not_contain_given_values_exactly() {
AssertionInfo info = someInfo();
char[] expected = { 'a', 'b', 'e' };
Throwable error = catchThrowable(() -> arrays.assertContainsExactlyInAnyOrder(info, actual, expected));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldContainExactlyInAnyOrder(actual, expected, newArrayList('e'), newArrayList('c'),
StandardComparisonStrategy.instance()));
}
@Test
void should_fail_if_actual_contains_all_given_values_but_size_differ() {
AssertionInfo info = someInfo();
char[] expected = { 'a', 'b' };
Throwable error = catchThrowable(() -> arrays.assertContainsExactlyInAnyOrder(info, actual, expected));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info,
shouldContainExactlyInAnyOrder(actual, expected, emptyList(), newArrayList('c'),
StandardComparisonStrategy.instance()));
}
@Test
void should_fail_if_actual_contains_duplicates_and_expected_does_not() {
AssertionInfo info = someInfo();
actual = arrayOf('a', 'b', 'b');
char[] expected = { 'a', 'b' };
Throwable error = catchThrowable(() -> arrays.assertContainsExactlyInAnyOrder(info, actual, expected));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info,
shouldContainExactlyInAnyOrder(actual, expected, emptyList(), newArrayList('b'),
StandardComparisonStrategy.instance()));
}
@Test
void should_fail_if_expected_contains_duplicates_and_actual_does_not() {
AssertionInfo info = someInfo();
actual = arrayOf('a', 'b');
char[] expected = { 'a', 'b', 'b' };
Throwable error = catchThrowable(() -> arrays.assertContainsExactlyInAnyOrder(info, actual, expected));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info,
shouldContainExactlyInAnyOrder(actual, expected, newArrayList('b'), emptyList(),
StandardComparisonStrategy.instance()));
}
// ------------------------------------------------------------------------------------------------------------------
// tests using a custom comparison strategy
// ------------------------------------------------------------------------------------------------------------------
@Test
void should_pass_if_actual_contains_given_values_exactly_in_any_order_according_to_custom_comparison_strategy() {
arraysWithCustomComparisonStrategy.assertContainsExactlyInAnyOrder(someInfo(), actual, arrayOf('a', 'B', 'c'));
}
@Test
void should_pass_if_actual_contains_given_values_exactly_in_different_order_according_to_custom_comparison_strategy() {
char[] expected = { 'A', 'c', 'b' };
arraysWithCustomComparisonStrategy.assertContainsExactlyInAnyOrder(someInfo(), actual, expected);
}
@Test
void should_fail_if_expected_empty_and_actual_is_not_whatever_custom_comparison_strategy_is() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arraysWithCustomComparisonStrategy.assertContainsExactlyInAnyOrder(someInfo(),
actual,
emptyArray()));
}
@Test
void should_throw_error_if_expected_null_whatever_custom_comparison_strategy_is() {
assertThatNullPointerException().isThrownBy(() -> arraysWithCustomComparisonStrategy.assertContainsExactlyInAnyOrder(someInfo(),
actual,
null))
.withMessage(valuesToLookForIsNull());
}
@Test
void should_fail_if_actual_is_null_whatever_custom_comparison_strategy_is() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arraysWithCustomComparisonStrategy.assertContainsExactlyInAnyOrder(someInfo(),
null,
arrayOf('b')))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_actual_does_not_contain_given_values_exactly_according_to_custom_comparison_strategy() {
AssertionInfo info = someInfo();
char[] expected = { 'a', 'B', 'e' };
Throwable error = catchThrowable(() -> arraysWithCustomComparisonStrategy.assertContainsExactlyInAnyOrder(info, actual,
expected));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldContainExactlyInAnyOrder(actual, expected, newArrayList('e'), newArrayList('c'),
caseInsensitiveComparisonStrategy));
}
@Test
void should_fail_if_actual_contains_all_given_values_but_size_differ_according_to_custom_comparison_strategy() {
AssertionInfo info = someInfo();
char[] expected = { 'a', 'b' };
Throwable error = catchThrowable(() -> arraysWithCustomComparisonStrategy.assertContainsExactlyInAnyOrder(info, actual,
expected));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info,
shouldContainExactlyInAnyOrder(actual, expected, emptyList(), newArrayList('c'),
caseInsensitiveComparisonStrategy));
}
@Test
void should_fail_if_actual_contains_duplicates_and_expected_does_not_according_to_custom_comparison_strategy() {
AssertionInfo info = someInfo();
actual = arrayOf('a', 'b', 'b');
char[] expected = { 'a', 'b' };
Throwable error = catchThrowable(() -> arraysWithCustomComparisonStrategy.assertContainsExactlyInAnyOrder(info, actual,
expected));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info,
shouldContainExactlyInAnyOrder(actual, expected, emptyList(), newArrayList('b'),
caseInsensitiveComparisonStrategy));
}
@Test
void should_fail_if_expected_contains_duplicates_and_actual_does_not_according_to_custom_comparison_strategy() {
AssertionInfo info = someInfo();
actual = arrayOf('a', 'b');
char[] expected = { 'a', 'b', 'b' };
Throwable error = catchThrowable(() -> arraysWithCustomComparisonStrategy.assertContainsExactlyInAnyOrder(info, actual,
expected));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info,
shouldContainExactlyInAnyOrder(actual, expected, newArrayList('b'), emptyList(),
caseInsensitiveComparisonStrategy));
}
}
| CharArrays_assertContainsExactlyInAnyOrder_Test |
java | apache__kafka | raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandler.java | {
"start": 3377,
"end": 14832
} | class ____ {
private final KRaftControlRecordStateMachine partitionState;
private final RequestSender requestSender;
private final Time time;
private final Logger logger;
public AddVoterHandler(
KRaftControlRecordStateMachine partitionState,
RequestSender requestSender,
Time time,
LogContext logContext
) {
this.partitionState = partitionState;
this.requestSender = requestSender;
this.time = time;
this.logger = logContext.logger(AddVoterHandler.class);
}
public CompletableFuture<AddRaftVoterResponseData> handleAddVoterRequest(
LeaderState<?> leaderState,
ReplicaKey voterKey,
Endpoints voterEndpoints,
boolean ackWhenCommitted,
long currentTimeMs
) {
// Check if there are any pending voter change requests
if (leaderState.isOperationPending(currentTimeMs)) {
return CompletableFuture.completedFuture(
RaftUtil.addVoterResponse(
Errors.REQUEST_TIMED_OUT,
"Request timed out waiting for leader to handle previous voter change request"
)
);
}
// Check that the leader has established a HWM and committed the current epoch
Optional<Long> highWatermark = leaderState.highWatermark().map(LogOffsetMetadata::offset);
if (highWatermark.isEmpty()) {
return CompletableFuture.completedFuture(
RaftUtil.addVoterResponse(
Errors.REQUEST_TIMED_OUT,
"Request timed out waiting for leader to establish HWM and fence previous voter changes"
)
);
}
// Check that the cluster supports kraft.version >= 1
KRaftVersion kraftVersion = partitionState.lastKraftVersion();
if (!kraftVersion.isReconfigSupported()) {
return CompletableFuture.completedFuture(
RaftUtil.addVoterResponse(
Errors.UNSUPPORTED_VERSION,
String.format(
"Cluster doesn't support adding voter because the %s feature is %s",
kraftVersion.featureName(),
kraftVersion.featureLevel()
)
)
);
}
// Check that there are no uncommitted VotersRecord
Optional<LogHistory.Entry<VoterSet>> votersEntry = partitionState.lastVoterSetEntry();
if (votersEntry.isEmpty() || votersEntry.get().offset() >= highWatermark.get()) {
return CompletableFuture.completedFuture(
RaftUtil.addVoterResponse(
Errors.REQUEST_TIMED_OUT,
String.format(
"Request timed out waiting for voters to commit the latest voter change at %s with HWM %d",
votersEntry.map(LogHistory.Entry::offset),
highWatermark.get()
)
)
);
}
// Check that the new voter id is not part of the current voter set
VoterSet voters = votersEntry.get().value();
if (voters.voterIds().contains(voterKey.id())) {
return CompletableFuture.completedFuture(
RaftUtil.addVoterResponse(
Errors.DUPLICATE_VOTER,
String.format(
"The voter id for %s is already part of the set of voters %s.",
voterKey,
voters.voterKeys()
)
)
);
}
// Send API_VERSIONS request to new voter to discover their supported kraft.version range
OptionalLong timeout = requestSender.send(
voterEndpoints
.address(requestSender.listenerName())
.map(address -> new Node(voterKey.id(), address.getHostName(), address.getPort()))
.orElseThrow(
() -> new IllegalArgumentException(
String.format(
"Provided listeners %s do not contain a listener for %s",
voterEndpoints,
requestSender.listenerName()
)
)
),
this::buildApiVersionsRequest,
currentTimeMs
);
if (timeout.isEmpty()) {
return CompletableFuture.completedFuture(
RaftUtil.addVoterResponse(
Errors.REQUEST_TIMED_OUT,
String.format("New voter %s is not ready to receive requests", voterKey)
)
);
}
AddVoterHandlerState state = new AddVoterHandlerState(
voterKey,
voterEndpoints,
ackWhenCommitted,
time.timer(timeout.getAsLong())
);
leaderState.resetAddVoterHandlerState(
Errors.UNKNOWN_SERVER_ERROR,
null,
Optional.of(state)
);
return state.future();
}
public boolean handleApiVersionsResponse(
LeaderState<?> leaderState,
Node source,
Errors error,
Optional<ApiVersionsResponseData.SupportedFeatureKey> supportedKraftVersions,
long currentTimeMs
) {
Optional<AddVoterHandlerState> handlerState = leaderState.addVoterHandlerState();
if (handlerState.isEmpty()) {
// There are no pending add operation just ignore the api response
return true;
}
// Check that the API_VERSIONS response matches the id of the voter getting added
AddVoterHandlerState current = handlerState.get();
if (!current.expectingApiResponse(source.id())) {
logger.info(
"API_VERSIONS response is not expected from {}: voterKey is {}, lastOffset is {}",
source,
current.voterKey(),
current.lastOffset()
);
return true;
}
// Abort operation if the API_VERSIONS returned an error
if (error != Errors.NONE) {
logger.info(
"Aborting add voter operation for {} at {} since API_VERSIONS returned an error {}",
current.voterKey(),
current.voterEndpoints(),
error
);
leaderState.resetAddVoterHandlerState(
Errors.REQUEST_TIMED_OUT,
String.format(
"Aborted add voter operation for since API_VERSIONS returned an error %s",
error
),
Optional.empty()
);
return false;
}
// Check that the new voter supports the kraft.version for reconfiguration
KRaftVersion kraftVersion = partitionState.lastKraftVersion();
if (!validVersionRange(kraftVersion, supportedKraftVersions)) {
logger.info(
"Aborting add voter operation for {} at {} since kraft.version range {} doesn't " +
"support reconfiguration",
current.voterKey(),
current.voterEndpoints(),
supportedKraftVersions
);
leaderState.resetAddVoterHandlerState(
Errors.INVALID_REQUEST,
String.format(
"Aborted add voter operation for %s since the %s range %s doesn't " +
"support the finalized version %s",
current.voterKey(),
KRaftVersion.FEATURE_NAME,
supportedKraftVersions
.map(
range -> String.format(
"(min: %s, max: %s",
range.minVersion(),
range.maxVersion()
)
)
.orElse("(min: 0, max: 0)"),
kraftVersion.featureLevel()
),
Optional.empty()
);
return true;
}
// Check that the new voter is caught up to the LEO to avoid delays in HWM increases
if (!leaderState.isReplicaCaughtUp(current.voterKey(), currentTimeMs)) {
logger.info(
"Aborting add voter operation for {} at {} since it is lagging behind: {}",
current.voterKey(),
current.voterEndpoints(),
leaderState.getReplicaState(current.voterKey())
);
leaderState.resetAddVoterHandlerState(
Errors.REQUEST_TIMED_OUT,
String.format(
"Aborted add voter operation for %s since it is lagging behind",
current.voterKey()
),
Optional.empty()
);
return true;
}
// Add the new voter to the set of voters and append the record to the log
VoterSet newVoters = partitionState
.lastVoterSet()
.addVoter(
VoterSet.VoterNode.of(
current.voterKey(),
current.voterEndpoints(),
new SupportedVersionRange(
supportedKraftVersions.get().minVersion(),
supportedKraftVersions.get().maxVersion()
)
)
)
.orElseThrow(() ->
new IllegalStateException(
String.format(
"Unable to add %s to the set of voters %s",
current.voterKey(),
partitionState.lastVoterSet()
)
)
);
current.setLastOffset(leaderState.appendVotersRecord(newVoters, currentTimeMs));
if (!current.ackWhenCommitted()) {
// complete the future to send response, but do not reset the state,
// since the new voter set is not yet committed
current.future().complete(RaftUtil.addVoterResponse(Errors.NONE, null));
}
return true;
}
public void highWatermarkUpdated(LeaderState<?> leaderState) {
leaderState.addVoterHandlerState().ifPresent(current ->
leaderState.highWatermark().ifPresent(highWatermark ->
current.lastOffset().ifPresent(lastOffset -> {
if (highWatermark.offset() > lastOffset) {
// VotersRecord with the added voter was committed; complete the RPC
leaderState.resetAddVoterHandlerState(Errors.NONE, null, Optional.empty());
}
})
)
);
}
private ApiVersionsRequestData buildApiVersionsRequest() {
return new ApiVersionsRequest.Builder().build().data();
}
private boolean validVersionRange(
KRaftVersion finalizedVersion,
Optional<ApiVersionsResponseData.SupportedFeatureKey> supportedKraftVersions
) {
return supportedKraftVersions.isPresent() &&
(supportedKraftVersions.get().minVersion() <= finalizedVersion.featureLevel() &&
supportedKraftVersions.get().maxVersion() >= finalizedVersion.featureLevel());
}
}
| AddVoterHandler |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/adaptive/AdaptiveScheduler.java | {
"start": 11443,
"end": 11539
} | class ____ used to avoid passing around
* multiple config options.
*/
public static | is |
java | apache__camel | components/camel-mllp/src/test/java/org/apache/camel/test/mllp/PassthroughProcessor.java | {
"start": 1089,
"end": 1641
} | class ____ implements Processor {
String id;
Logger log = LoggerFactory.getLogger(this.getClass());
public PassthroughProcessor(String id) {
this.id = id;
}
@Override
public void process(Exchange exchange) {
String message = exchange.getIn().getBody(String.class);
if (null != message) {
String msh = message.substring(0, message.indexOf('\r'));
log.debug("Processing MSH {}: \n{}\n", id, msh);
}
log.debug("Null inbound message body");
}
}
| PassthroughProcessor |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/routing/allocation/ExistingShardsAllocator.java | {
"start": 3858,
"end": 5371
} | interface ____ {
/**
* Initializes the current unassigned shard and moves it from the unassigned list.
*
* @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated.
*/
ShardRouting initialize(
String nodeId,
@Nullable String existingAllocationId,
long expectedShardSize,
RoutingChangesObserver routingChangesObserver
);
/**
* Removes and ignores the unassigned shard (will be ignored for this run, but
* will be added back to unassigned once the metadata is constructed again).
* Typically this is used when an allocation decision prevents a shard from being allocated such
* that subsequent consumers of this API won't try to allocate this shard again.
*
* @param attempt the result of the allocation attempt
*/
void removeAndIgnore(UnassignedInfo.AllocationStatus attempt, RoutingChangesObserver changes);
/**
* updates the unassigned info and recovery source on the current unassigned shard
*
* @param unassignedInfo the new unassigned info to use
* @param recoverySource the new recovery source to use
* @return the shard with unassigned info updated
*/
ShardRouting updateUnassigned(UnassignedInfo unassignedInfo, RecoverySource recoverySource, RoutingChangesObserver changes);
}
}
| UnassignedAllocationHandler |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/window/CountWindow.java | {
"start": 1489,
"end": 2703
} | class ____ extends Window {
private final long id;
public CountWindow(long id) {
this.id = id;
}
/** Gets the id (0-based) of the window. */
public long getId() {
return id;
}
@Override
public long maxTimestamp() {
return Long.MAX_VALUE;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
CountWindow window = (CountWindow) o;
return id == window.id;
}
@Override
public int hashCode() {
return MathUtils.longToIntWithBitMixing(id);
}
@Override
public String toString() {
return "CountWindow{" + "id=" + id + '}';
}
@Override
public int compareTo(Window o) {
CountWindow that = (CountWindow) o;
return Long.compare(this.id, that.id);
}
// ------------------------------------------------------------------------
// Serializer
// ------------------------------------------------------------------------
/** The serializer used to write the CountWindow type. */
public static | CountWindow |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.