language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/util/BigObjectArray.java | {
"start": 917,
"end": 3623
} | class ____<T> extends AbstractBigArray implements ObjectArray<T> {
private static final BigObjectArray<Long> ESTIMATOR = new BigObjectArray<Long>(0, BigArrays.NON_RECYCLING_INSTANCE);
private Object[][] pages;
/** Constructor. */
BigObjectArray(long size, BigArrays bigArrays) {
super(OBJECT_PAGE_SIZE, bigArrays, true);
this.size = size;
pages = new Object[numPages(size)][];
for (int i = 0; i < pages.length; ++i) {
pages[i] = newObjectPage(i);
}
}
@SuppressWarnings("unchecked")
@Override
public T get(long index) {
final int pageIndex = pageIndex(index);
final int indexInPage = indexInPage(index);
return (T) pages[pageIndex][indexInPage];
}
@Override
public void set(long index, T value) {
final int pageIndex = pageIndex(index);
final int indexInPage = indexInPage(index);
final Object[] page = pages[pageIndex];
page[indexInPage] = value;
}
@Override
public T getAndSet(long index, T value) {
final int pageIndex = pageIndex(index);
final int indexInPage = indexInPage(index);
final Object[] page = pages[pageIndex];
@SuppressWarnings("unchecked")
final T ret = (T) page[indexInPage];
page[indexInPage] = value;
return ret;
}
@Override
protected int numBytesPerElement() {
return Integer.BYTES;
}
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
@Override
public void resize(long newSize) {
final int numPages = numPages(newSize);
if (numPages > pages.length) {
pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
}
for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
pages[i] = newObjectPage(i);
}
for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
pages[i] = null;
releasePage(i);
}
this.size = newSize;
}
/** Estimates the number of bytes that would be consumed by an array of the given size. */
public static long estimateRamBytes(final long size) {
return ESTIMATOR.ramBytesEstimated(size);
}
private Object[] newObjectPage(int page) {
if (recycler != null) {
final Recycler.V<Object[]> v = recycler.objectPage();
return registerNewPage(v, page, PageCacheRecycler.OBJECT_PAGE_SIZE);
} else {
return new Object[PageCacheRecycler.OBJECT_PAGE_SIZE];
}
}
}
| BigObjectArray |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java | {
"start": 3988,
"end": 18281
} | class ____ extends SpatialRelations {
SpatialRelationsContains(SpatialCoordinateTypes spatialCoordinateType, CoordinateEncoder encoder, ShapeIndexer shapeIndexer) {
super(ShapeField.QueryRelation.CONTAINS, spatialCoordinateType, encoder, shapeIndexer);
}
@Override
protected boolean geometryRelatesGeometry(BytesRef left, BytesRef right) throws IOException {
Component2D[] rightComponent2Ds = asLuceneComponent2Ds(crsType, fromBytesRef(right));
return geometryRelatesGeometries(left, rightComponent2Ds);
}
@Override
protected void processSourceAndSource(BooleanBlock.Builder builder, int position, BytesRefBlock left, BytesRefBlock right)
throws IOException {
if (right.getValueCount(position) < 1) {
builder.appendNull();
} else {
processSourceAndConstant(builder, position, left, asLuceneComponent2Ds(crsType, right, position));
}
}
@Override
protected void processPointDocValuesAndSource(
BooleanBlock.Builder builder,
int position,
LongBlock leftValue,
BytesRefBlock rightValue
) throws IOException {
processPointDocValuesAndConstant(builder, position, leftValue, asLuceneComponent2Ds(crsType, rightValue, position));
}
private boolean geometryRelatesGeometries(BytesRef left, Component2D[] rightComponent2Ds) throws IOException {
Geometry leftGeom = fromBytesRef(left);
GeometryDocValueReader leftDocValueReader = asGeometryDocValueReader(coordinateEncoder, shapeIndexer, leftGeom);
return geometryRelatesGeometries(leftDocValueReader, rightComponent2Ds);
}
private boolean geometryRelatesGeometries(GeometryDocValueReader leftDocValueReader, Component2D[] rightComponent2Ds)
throws IOException {
for (Component2D rightComponent2D : rightComponent2Ds) {
// Every component of the right geometry must be contained within the left geometry for this to pass
if (geometryRelatesGeometry(leftDocValueReader, rightComponent2D) == false) {
return false;
}
}
return true;
}
private void processSourceAndConstant(BooleanBlock.Builder builder, int position, BytesRefBlock left, @Fixed Component2D[] right)
throws IOException {
if (left.getValueCount(position) < 1) {
builder.appendNull();
} else {
final GeometryDocValueReader reader = asGeometryDocValueReader(coordinateEncoder, shapeIndexer, left, position);
builder.appendBoolean(geometryRelatesGeometries(reader, right));
}
}
private void processPointDocValuesAndConstant(
BooleanBlock.Builder builder,
int position,
LongBlock left,
@Fixed Component2D[] right
) throws IOException {
if (left.getValueCount(position) < 1) {
builder.appendNull();
} else {
final GeometryDocValueReader reader = asGeometryDocValueReader(
coordinateEncoder,
shapeIndexer,
left,
position,
spatialCoordinateType::longAsPoint
);
builder.appendBoolean(geometryRelatesGeometries(reader, right));
}
}
}
@FunctionInfo(
returnType = { "boolean" },
description = """
Returns whether the first geometry contains the second geometry.
This is the inverse of the <<esql-st_within,ST_WITHIN>> function.""",
examples = @Example(file = "spatial_shapes", tag = "st_contains-airport_city_boundaries")
)
public SpatialContains(
Source source,
@Param(name = "geomA", type = { "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" }, description = """
Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`.
If `null`, the function returns `null`.""") Expression left,
@Param(name = "geomB", type = { "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" }, description = """
Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`.
If `null`, the function returns `null`.
The second parameter must also have the same coordinate system as the first.
This means it is not possible to combine `geo_*` and `cartesian_*` parameters.""") Expression right
) {
this(source, left, right, false, false);
}
SpatialContains(Source source, Expression left, Expression right, boolean leftDocValues, boolean rightDocValues) {
super(source, left, right, leftDocValues, rightDocValues);
}
private SpatialContains(StreamInput in) throws IOException {
super(in, false, false);
}
@Override
public String getWriteableName() {
return ENTRY.name;
}
@Override
public ShapeRelation queryRelation() {
return ShapeRelation.CONTAINS;
}
@Override
public SpatialContains withDocValues(boolean foundLeft, boolean foundRight) {
// Only update the docValues flags if the field is found in the attributes
boolean leftDV = leftDocValues || foundLeft;
boolean rightDV = rightDocValues || foundRight;
return new SpatialContains(source(), left(), right(), leftDV, rightDV);
}
@Override
protected SpatialContains replaceChildren(Expression newLeft, Expression newRight) {
return new SpatialContains(source(), newLeft, newRight, leftDocValues, rightDocValues);
}
@Override
protected NodeInfo<? extends Expression> info() {
return NodeInfo.create(this, SpatialContains::new, left(), right());
}
@Override
public Object fold(FoldContext ctx) {
try {
GeometryDocValueReader docValueReader = asGeometryDocValueReader(ctx, crsType(), left());
Geometry rightGeom = makeGeometryFromLiteral(ctx, right());
Component2D[] components = asLuceneComponent2Ds(crsType(), rightGeom);
return (crsType() == SpatialCrsType.GEO)
? GEO.geometryRelatesGeometries(docValueReader, components)
: CARTESIAN.geometryRelatesGeometries(docValueReader, components);
} catch (IOException e) {
throw new IllegalArgumentException("Failed to fold constant fields: " + e.getMessage(), e);
}
}
@Override
Map<SpatialEvaluatorFactory.SpatialEvaluatorKey, SpatialEvaluatorFactory<?, ?>> evaluatorRules() {
return evaluatorMap;
}
/**
* To keep the number of evaluators to a minimum, we swap the arguments to get the WITHIN relation.
* This also makes other optimizations, like lucene-pushdown, simpler to develop.
*/
@Override
public SpatialRelatesFunction surrogate() {
if (left().foldable() && right().foldable() == false) {
return new SpatialWithin(source(), right(), left(), rightDocValues, leftDocValues);
}
return this;
}
private static final Map<SpatialEvaluatorFactory.SpatialEvaluatorKey, SpatialEvaluatorFactory<?, ?>> evaluatorMap = new HashMap<>();
static {
// Support geo_point and geo_shape from source and constant combinations
for (DataType spatialType : new DataType[] { GEO_POINT, GEO_SHAPE }) {
for (DataType otherType : new DataType[] { GEO_POINT, GEO_SHAPE }) {
evaluatorMap.put(
SpatialEvaluatorFactory.SpatialEvaluatorKey.fromSources(spatialType, otherType),
new SpatialEvaluatorFactory.SpatialEvaluatorFactoryWithFields(SpatialContainsGeoSourceAndSourceEvaluator.Factory::new)
);
evaluatorMap.put(
SpatialEvaluatorFactory.SpatialEvaluatorKey.fromSourceAndConstant(spatialType, otherType),
new SpatialEvaluatorFactory.SpatialEvaluatorWithConstantArrayFactory(
SpatialContainsGeoSourceAndConstantEvaluator.Factory::new
)
);
if (DataType.isSpatialPoint(spatialType)) {
evaluatorMap.put(
SpatialEvaluatorFactory.SpatialEvaluatorKey.fromSources(spatialType, otherType).withLeftDocValues(),
new SpatialEvaluatorFactory.SpatialEvaluatorFactoryWithFields(
SpatialContainsGeoPointDocValuesAndSourceEvaluator.Factory::new
)
);
evaluatorMap.put(
SpatialEvaluatorFactory.SpatialEvaluatorKey.fromSourceAndConstant(spatialType, otherType).withLeftDocValues(),
new SpatialEvaluatorFactory.SpatialEvaluatorWithConstantArrayFactory(
SpatialContainsGeoPointDocValuesAndConstantEvaluator.Factory::new
)
);
}
}
}
// Support cartesian_point and cartesian_shape from source and constant combinations
for (DataType spatialType : new DataType[] { CARTESIAN_POINT, CARTESIAN_SHAPE }) {
for (DataType otherType : new DataType[] { CARTESIAN_POINT, CARTESIAN_SHAPE }) {
evaluatorMap.put(
SpatialEvaluatorFactory.SpatialEvaluatorKey.fromSources(spatialType, otherType),
new SpatialEvaluatorFactory.SpatialEvaluatorFactoryWithFields(
SpatialContainsCartesianSourceAndSourceEvaluator.Factory::new
)
);
evaluatorMap.put(
SpatialEvaluatorFactory.SpatialEvaluatorKey.fromSourceAndConstant(spatialType, otherType),
new SpatialEvaluatorFactory.SpatialEvaluatorWithConstantArrayFactory(
SpatialContainsCartesianSourceAndConstantEvaluator.Factory::new
)
);
if (DataType.isSpatialPoint(spatialType)) {
evaluatorMap.put(
SpatialEvaluatorFactory.SpatialEvaluatorKey.fromSources(spatialType, otherType).withLeftDocValues(),
new SpatialEvaluatorFactory.SpatialEvaluatorFactoryWithFields(
SpatialContainsCartesianPointDocValuesAndSourceEvaluator.Factory::new
)
);
evaluatorMap.put(
SpatialEvaluatorFactory.SpatialEvaluatorKey.fromSourceAndConstant(spatialType, otherType).withLeftDocValues(),
new SpatialEvaluatorFactory.SpatialEvaluatorWithConstantArrayFactory(
SpatialContainsCartesianPointDocValuesAndConstantEvaluator.Factory::new
)
);
}
}
}
}
@Evaluator(extraName = "GeoSourceAndConstant", warnExceptions = { IllegalArgumentException.class, IOException.class })
static void processGeoSourceAndConstant(BooleanBlock.Builder results, @Position int p, BytesRefBlock left, @Fixed Component2D[] right)
throws IOException {
GEO.processSourceAndConstant(results, p, left, right);
}
@Evaluator(extraName = "GeoSourceAndSource", warnExceptions = { IllegalArgumentException.class, IOException.class })
static void processGeoSourceAndSource(BooleanBlock.Builder builder, @Position int p, BytesRefBlock left, BytesRefBlock right)
throws IOException {
GEO.processSourceAndSource(builder, p, left, right);
}
@Evaluator(extraName = "GeoPointDocValuesAndConstant", warnExceptions = { IllegalArgumentException.class, IOException.class })
static void processGeoPointDocValuesAndConstant(
BooleanBlock.Builder builder,
@Position int p,
LongBlock left,
@Fixed Component2D[] right
) throws IOException {
GEO.processPointDocValuesAndConstant(builder, p, left, right);
}
@Evaluator(extraName = "GeoPointDocValuesAndSource", warnExceptions = { IllegalArgumentException.class, IOException.class })
static void processGeoPointDocValuesAndSource(BooleanBlock.Builder builder, @Position int p, LongBlock left, BytesRefBlock right)
throws IOException {
GEO.processPointDocValuesAndSource(builder, p, left, right);
}
@Evaluator(extraName = "CartesianSourceAndConstant", warnExceptions = { IllegalArgumentException.class, IOException.class })
static void processCartesianSourceAndConstant(
BooleanBlock.Builder builder,
@Position int p,
BytesRefBlock left,
@Fixed Component2D[] right
) throws IOException {
CARTESIAN.processSourceAndConstant(builder, p, left, right);
}
@Evaluator(extraName = "CartesianSourceAndSource", warnExceptions = { IllegalArgumentException.class, IOException.class })
static void processCartesianSourceAndSource(BooleanBlock.Builder builder, @Position int p, BytesRefBlock left, BytesRefBlock right)
throws IOException {
CARTESIAN.processSourceAndSource(builder, p, left, right);
}
@Evaluator(extraName = "CartesianPointDocValuesAndConstant", warnExceptions = { IllegalArgumentException.class, IOException.class })
static void processCartesianPointDocValuesAndConstant(
BooleanBlock.Builder builder,
@Position int p,
LongBlock left,
@Fixed Component2D[] right
) throws IOException {
CARTESIAN.processPointDocValuesAndConstant(builder, p, left, right);
}
@Evaluator(extraName = "CartesianPointDocValuesAndSource", warnExceptions = { IllegalArgumentException.class, IOException.class })
static void processCartesianPointDocValuesAndSource(BooleanBlock.Builder builder, @Position int p, LongBlock left, BytesRefBlock right)
throws IOException {
CARTESIAN.processPointDocValuesAndSource(builder, p, left, right);
}
}
| SpatialRelationsContains |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/ShouldBeCurrentYearMonth.java | {
"start": 869,
"end": 1415
} | class ____ extends BasicErrorMessageFactory {
/**
* Creates a new <code>{@link ShouldBeCurrentYearMonth}</code>.
* @param actual the actual value in the failed assertion.
* @return the created {@code ErrorMessageFactory}.
*/
public static ErrorMessageFactory shouldBeCurrentYearMonth(YearMonth actual) {
return new ShouldBeCurrentYearMonth(actual);
}
private ShouldBeCurrentYearMonth(YearMonth actual) {
super("%nExpecting actual:%n %s%nto be the current YearMonth but was not.", actual);
}
}
| ShouldBeCurrentYearMonth |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/admin/FeatureMetadata.java | {
"start": 1207,
"end": 4238
} | class ____ {
private final Map<String, FinalizedVersionRange> finalizedFeatures;
private final Optional<Long> finalizedFeaturesEpoch;
private final Map<String, SupportedVersionRange> supportedFeatures;
FeatureMetadata(final Map<String, FinalizedVersionRange> finalizedFeatures,
final Optional<Long> finalizedFeaturesEpoch,
final Map<String, SupportedVersionRange> supportedFeatures) {
this.finalizedFeatures = new HashMap<>(finalizedFeatures);
this.finalizedFeaturesEpoch = finalizedFeaturesEpoch;
this.supportedFeatures = new HashMap<>(supportedFeatures);
}
/**
* Returns a map of finalized feature versions. Each entry in the map contains a key being a
* feature name and the value being a range of version levels supported by every broker in the
* cluster.
*/
public Map<String, FinalizedVersionRange> finalizedFeatures() {
return new HashMap<>(finalizedFeatures);
}
/**
* The epoch for the finalized features.
* If the returned value is empty, it means the finalized features are absent/unavailable.
*/
public Optional<Long> finalizedFeaturesEpoch() {
return finalizedFeaturesEpoch;
}
/**
* Returns a map of supported feature versions. Each entry in the map contains a key being a
* feature name and the value being a range of versions supported by a particular broker in the
* cluster.
*/
public Map<String, SupportedVersionRange> supportedFeatures() {
return new HashMap<>(supportedFeatures);
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (!(other instanceof FeatureMetadata)) {
return false;
}
final FeatureMetadata that = (FeatureMetadata) other;
return Objects.equals(this.finalizedFeatures, that.finalizedFeatures) &&
Objects.equals(this.finalizedFeaturesEpoch, that.finalizedFeaturesEpoch) &&
Objects.equals(this.supportedFeatures, that.supportedFeatures);
}
@Override
public int hashCode() {
return Objects.hash(finalizedFeatures, finalizedFeaturesEpoch, supportedFeatures);
}
private static <ValueType> String mapToString(final Map<String, ValueType> featureVersionsMap) {
return String.format(
"{%s}",
featureVersionsMap
.entrySet()
.stream()
.map(entry -> String.format("(%s -> %s)", entry.getKey(), entry.getValue()))
.collect(joining(", "))
);
}
@Override
public String toString() {
return String.format(
"FeatureMetadata{finalizedFeatures:%s, finalizedFeaturesEpoch:%s, supportedFeatures:%s}",
mapToString(finalizedFeatures),
finalizedFeaturesEpoch.map(Object::toString).orElse("<none>"),
mapToString(supportedFeatures));
}
}
| FeatureMetadata |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/support/hierarchical/WorkerThreadPoolHierarchicalTestExecutorService.java | {
"start": 20393,
"end": 20602
} | enum ____ {
EXECUTED_BY_DIFFERENT_WORKER, RESOURCE_LOCK_UNAVAILABLE, EXECUTED_BY_THIS_WORKER;
private boolean isExecuted() {
return this != RESOURCE_LOCK_UNAVAILABLE;
}
}
private | WorkStealResult |
java | apache__camel | components/camel-netty-http/src/test/java/org/apache/camel/component/netty/http/NettyHttpSSLSNITest.java | {
"start": 1597,
"end": 5674
} | class ____ extends BaseNettyTest {
@Override
public boolean isUseRouteBuilder() {
return false;
}
@Test
public void testSSLAddsDefaultServerNameIndication() throws Exception {
getMockEndpoint("mock:output").expectedBodiesReceived("localhost");
context.getRegistry().bind("customSSLContextParameters", createSSLContextParameters(null));
context.addRoutes(nettyServerThatRespondsWithSNI());
context.addRoutes(new RouteBuilder() {
public void configure() {
from("direct:in")
.setHeader(Exchange.HTTP_METHOD, constant("GET"))
.to("netty-http:https://localhost:{{port}}?sslContextParameters=#customSSLContextParameters")
.to("mock:output");
}
});
context.start();
template.sendBody("direct:in", null);
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testSSLAddsCustomServerNameIndication() throws Exception {
String customSNI = "custom.host.name";
getMockEndpoint("mock:output").expectedBodiesReceived(customSNI);
SSLContextClientParameters customClientParameters = new SSLContextClientParameters();
customClientParameters.setSniHostName(customSNI);
context.getRegistry().bind("customSSLContextParameters", createSSLContextParameters(customClientParameters));
context.addRoutes(nettyServerThatRespondsWithSNI());
context.addRoutes(new RouteBuilder() {
public void configure() {
from("direct:in")
.setHeader(Exchange.HTTP_METHOD, constant("GET"))
.to("netty-http:https://localhost:{{port}}?sslContextParameters=#customSSLContextParameters")
.to("mock:output");
}
});
context.start();
template.sendBody("direct:in", null);
MockEndpoint.assertIsSatisfied(context);
}
private SSLContextParameters createSSLContextParameters(SSLContextClientParameters clientParameters) {
SSLContextParameters sslContextParameters = new SSLContextParameters();
sslContextParameters.setClientParameters(clientParameters);
KeyStoreParameters trustStoreParameters = new KeyStoreParameters();
trustStoreParameters.setResource("jsse/localhost.p12");
trustStoreParameters.setPassword("changeit");
TrustManagersParameters trustManagersParameters = new TrustManagersParameters();
trustManagersParameters.setKeyStore(trustStoreParameters);
sslContextParameters.setTrustManagers(trustManagersParameters);
return sslContextParameters;
}
private RouteBuilder nettyServerThatRespondsWithSNI() {
return new RouteBuilder() {
@Override
public void configure() {
from("netty-http:https://localhost:{{port}}?ssl=true&passphrase=changeit&keyStoreResource=jsse/localhost.p12&trustStoreResource=jsse/localhost.p12")
.process(exchange -> {
ExtendedSSLSession extendedSSLSession
= exchange.getIn().getHeader(NettyConstants.NETTY_SSL_SESSION, ExtendedSSLSession.class);
if (extendedSSLSession != null) {
List<SNIServerName> serverNames = extendedSSLSession.getRequestedServerNames();
if (serverNames.size() == 1) {
exchange.getMessage().setBody(((SNIHostName) serverNames.get(0)).getAsciiName());
} else {
exchange.getMessage().setBody("SNI is missing or incorrect");
}
} else {
exchange.getMessage().setBody("Cannot determine success without ExtendedSSLSession");
}
});
}
};
}
}
| NettyHttpSSLSNITest |
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/config/annotation/DubboReference.java | {
"start": 3549,
"end": 5212
} | interface ____
*/
@Deprecated
boolean generic() default false;
/**
* When enable, prefer to call local service in the same JVM if it's present, default value is true
*
* @deprecated using scope="local" or scope="remote" instead
*/
@Deprecated
boolean injvm() default true;
/**
* Check if service provider is available during boot up, default value is true
*/
boolean check() default true;
/**
* Whether eager initialize the reference bean when all properties are set, default value is true ( null as true)
*
* @see ReferenceConfigBase#shouldInit()
*/
boolean init() default true;
/**
* Whether to make connection when the client is created, the default value is false
*/
boolean lazy() default false;
/**
* Export an stub service for event dispatch, default value is false.
* <p>
* see org.apache.dubbo.rpc.Constants#STUB_EVENT_METHODS_KEY
*/
boolean stubevent() default false;
/**
* Whether to reconnect if connection is lost, if not specify, reconnect is enabled by default, and the interval
* for retry connecting is 2000 ms
* <p>
* see org.apache.dubbo.remoting.Constants#DEFAULT_RECONNECT_PERIOD
*/
String reconnect() default "";
/**
* Whether to stick to the same node in the cluster, the default value is false
* <p>
* see Constants#DEFAULT_CLUSTER_STICKY
*/
boolean sticky() default false;
/**
* How the proxy is generated, legal values include: jdk, javassist
*/
String proxy() default "";
/**
* Service stub name, use | class |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/model/ast/builder/MutationGroupBuilder.java | {
"start": 956,
"end": 4458
} | class ____ implements SelectableConsumer {
private final MutationType mutationType;
private final EntityMutationTarget mutationTarget;
private final Map<String, TableMutationBuilder<?>> tableMutationBuilderMap;
public MutationGroupBuilder(MutationType mutationType, EntityMutationTarget mutationTarget) {
this.mutationType = mutationType;
this.mutationTarget = mutationTarget;
this.tableMutationBuilderMap = new LinkedHashMap<>();
}
public MutationType getMutationType() {
return mutationType;
}
public EntityMutationTarget getMutationTarget() {
return mutationTarget;
}
public <B extends TableMutationBuilder<?>> B findTableDetailsBuilder(String name) {
//noinspection unchecked
return (B) tableMutationBuilderMap.get( name );
}
public <B extends TableMutationBuilder<?>> B getTableDetailsBuilder(String name) {
final B builder = findTableDetailsBuilder( name );
if ( builder == null ) {
throw new RuntimeException(
"Expecting already existing TableMutationBuilder : " + name
);
}
return builder;
}
public void addTableDetailsBuilder(TableMutationBuilder<?> builder) {
tableMutationBuilderMap.put( builder.getMutatingTable().getTableName(), builder );
}
public void forEachTableMutationBuilder(Consumer<TableMutationBuilder<?>> consumer) {
tableMutationBuilderMap.forEach( (name, mutationBuilder) -> consumer.accept( mutationBuilder ) );
}
@Override
public void accept(int selectionIndex, SelectableMapping selectableMapping) {
final EntityPersister entityPersister = mutationTarget.getTargetPart().getEntityPersister();
final String tableNameForMutation = entityPersister.physicalTableNameForMutation( selectableMapping );
final ColumnValuesTableMutationBuilder mutationBuilder = findTableDetailsBuilder( tableNameForMutation );
mutationBuilder.addValueColumn( selectableMapping );
}
public MutationGroup buildMutationGroup() {
if ( tableMutationBuilderMap.isEmpty() ) {
throw new IllegalStateException(
String.format(
Locale.ROOT,
"Mutation group contained no table mutations - %s : `%s`",
mutationType,
mutationTarget.getNavigableRole().getFullPath()
)
);
}
if ( tableMutationBuilderMap.size() == 1 ) {
final TableMutationBuilder<?> tableMutationBuilder = tableMutationBuilderMap.entrySet().iterator().next().getValue();
final TableMutation<?> mutation = tableMutationBuilder.buildMutation();
if ( mutation == null ) {
return new MutationGroupNone( mutationType, mutationTarget );
}
return new MutationGroupSingle( mutationType, mutationTarget, mutation );
}
final ArrayList<TableMutation<?>> tableMutations = new ArrayList<>( tableMutationBuilderMap.size() );
tableMutationBuilderMap.forEach( (name, tableDetailsBuilder) -> {
final TableMutation<?> tableMutation = tableDetailsBuilder.buildMutation();
if ( tableMutation != null ) {
tableMutations.add( tableMutation );
}
} );
if ( tableMutations.isEmpty() ) {
return new MutationGroupNone( mutationType, mutationTarget );
}
if ( tableMutations.size() == 1 ) {
return new MutationGroupSingle( mutationType, mutationTarget, tableMutations.get( 0 ) );
}
return new MutationGroupStandard( mutationType, mutationTarget, tableMutations );
}
@Override
public String toString() {
return String.format(
Locale.ROOT,
"MutationGroupBuilder( %s:`%s` )",
mutationType.name(),
mutationTarget.getNavigableRole().getFullPath()
);
}
}
| MutationGroupBuilder |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/enumeratedvalue/EnumeratedValueTests.java | {
"start": 1124,
"end": 4082
} | class ____ {
@DomainModel(annotatedClasses = Person.class)
@SessionFactory(useCollectingStatementInspector = true)
@Test
void testBasicUsage(SessionFactoryScope scope) {
scope.inTransaction( (session) -> {
session.persist( new Person( 1, "John", Gender.MALE, Status.ACTIVE ) );
} );
scope.inTransaction( (session) -> {
session.doWork( (connection) -> {
try (Statement statement = connection.createStatement()) {
try (ResultSet resultSet = statement.executeQuery( "select gender, status from persons" )) {
assertThat( resultSet.next() ).isTrue();
final String storedGender = resultSet.getString( 1 );
assertThat( storedGender ).isEqualTo( "M" );
final int storedStatus = resultSet.getInt( 2 );
assertThat( storedStatus ).isEqualTo( 200 );
}
}
} );
} );
}
@DomainModel(annotatedClasses = Person.class)
@SessionFactory(useCollectingStatementInspector = true)
@Test
void testNulls(SessionFactoryScope scope) {
scope.inTransaction( (session) -> {
session.persist( new Person( 1, "John", null, null ) );
} );
scope.inTransaction( (session) -> {
session.doWork( (connection) -> {
try (Statement statement = connection.createStatement()) {
try (ResultSet resultSet = statement.executeQuery( "select gender, status from persons" )) {
assertThat( resultSet.next() ).isTrue();
final String storedGender = resultSet.getString( 1 );
assertThat( resultSet.wasNull() ).isTrue();
assertThat( storedGender ).isNull();
final int storedStatus = resultSet.getInt( 2 );
assertThat( resultSet.wasNull() ).isTrue();
}
}
} );
} );
}
@DomainModel(annotatedClasses = Person.class)
@SessionFactory(useCollectingStatementInspector = true)
@RequiresDialectFeature( feature = DialectFeatureChecks.SupportsColumnCheck.class )
@Test
void verifyCheckConstraints(SessionFactoryScope scope) {
scope.inTransaction( (session) -> session.doWork( (connection) -> {
try (PreparedStatement statement = connection.prepareStatement( "insert into persons (id, gender) values (?, ?)" ) ) {
statement.setInt( 1, 100 );
// this would work without check constraints or with check constraints based solely on EnumType#STRING
statement.setString( 2, "MALE" );
statement.executeUpdate();
fail( "Expecting a failure" );
}
catch (SQLException expected) {
}
try (PreparedStatement statement = connection.prepareStatement( "insert into persons (id, status) values (?, ?)" ) ) {
statement.setInt( 1, 101 );
// this would work without check constraints or with check constraints based solely on EnumType#ORDINAL
statement.setInt( 2, 1 );
statement.executeUpdate();
fail( "Expecting a failure" );
}
catch (SQLException expected) {
}
} ) );
}
@AfterEach
void dropTestData(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
public | EnumeratedValueTests |
java | spring-projects__spring-security | docs/src/test/java/org/springframework/security/docs/reactive/configuration/customizerbeanordering/CustomizerBeanOrderingTests.java | {
"start": 1589,
"end": 2690
} | class ____ {
public final SpringTestContext spring = new SpringTestContext(this);
@Autowired
private WebTestClient webTest;
@Test
void authorizationOrdered() throws Exception {
this.spring.register(
CustomizerBeanOrderingConfiguration.class).autowire();
// @formatter:off
this.webTest.mutateWith(mockUser("admin").roles("ADMIN"))
.get()
.uri("https://localhost/admins/1")
.exchange()
.expectStatus().isOk();
this.webTest.mutateWith(mockUser("user").roles("USER"))
.get()
.uri("https://localhost/admins/1")
.exchange()
.expectStatus().isForbidden();
this.webTest.mutateWith(mockUser("user").roles("USER"))
.get()
.uri("https://localhost/users/1")
.exchange()
.expectStatus().isOk();
this.webTest.mutateWith(mockUser("user").roles("OTHER"))
.get()
.uri("https://localhost/users/1")
.exchange()
.expectStatus().isForbidden();
this.webTest.mutateWith(mockUser("authenticated").roles("OTHER"))
.get()
.uri("https://localhost/other")
.exchange()
.expectStatus().isOk();
// @formatter:on
}
}
| CustomizerBeanOrderingTests |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/argumentselectiondefects/EnclosedByReverseHeuristicTest.java | {
"start": 3576,
"end": 3874
} | class ____ {
abstract void target(Object first, Object second);
void test(Object first, Object second) {
// BUG: Diagnostic contains: true
target(second, first);
}
}
""")
.doTest();
}
}
| Reverse |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketsAggregation.java | {
"start": 1026,
"end": 1642
} | interface ____ {
/**
* @return The key associated with the bucket
*/
Object getKey();
/**
* @return The key associated with the bucket as a string
*/
String getKeyAsString();
/**
* @return The number of documents that fall within this bucket
*/
long getDocCount();
/**
* @return The sub-aggregations of this bucket
*/
InternalAggregations getAggregations();
}
/**
* @return The buckets of this aggregation.
*/
List<? extends Bucket> getBuckets();
}
| Bucket |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/JsonValueSerializationTest.java | {
"start": 1361,
"end": 1692
} | class ____<T>
extends ValueClass<T>
{
public ToStringValueClass(T value) { super(value); }
// Also, need to use this annotation to help
@JsonSerialize(using=ToStringSerializer.class)
@Override
@JsonValue T value() { return super.value(); }
}
final static | ToStringValueClass |
java | apache__camel | dsl/camel-kamelet-main/src/main/java/org/apache/camel/main/download/DependencyDownloaderClassResolver.java | {
"start": 5472,
"end": 5938
} | class ____ implements ResourceResolverListener {
@Override
public void onLoadResourceAsStream(String uri) {
try {
if ("META-INF/services/org/apache/camel/knative/transport/http-consumer".equals(uri)) {
MainHttpServerFactory.setupHttpServer(getCamelContext(), silent);
}
} catch (Exception e) {
// ignore
}
}
}
}
| KNativeHttpServerFactory |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/JooqEndpointBuilderFactory.java | {
"start": 1456,
"end": 1585
} | interface ____ {
/**
* Builder for endpoint consumers for the JOOQ component.
*/
public | JooqEndpointBuilderFactory |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_smoothrat6.java | {
"start": 249,
"end": 1005
} | class ____ extends TestCase {
public void test_set() throws Exception {
Set<Object> set = new LinkedHashSet<Object>();
set.add(3L);
set.add(4L);
Entity entity = new Entity();
entity.setValue(set);
String text = JSON.toJSONString(entity, SerializerFeature.WriteClassName);
System.out.println(text);
Assert.assertEquals("{\"@type\":\"com.alibaba.json.bvt.bug.Bug_for_smoothrat6$Entity\",\"value\":Set[3L,4L]}",
text);
Entity entity2 = JSON.parseObject(text, Entity.class);
Assert.assertEquals(set, entity2.getValue());
//Assert.assertEquals(set.getClass(), entity2.getValue().getClass());
}
public static | Bug_for_smoothrat6 |
java | alibaba__fastjson | src/test/java/com/alibaba/json/test/FNV32_CollisionTest_All.java | {
"start": 458,
"end": 3682
} | class ____ extends TestCase {
char[] digLetters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_".toCharArray();
//char[] digLetters = "0123456789".toCharArray();
Random r = new Random();
int[] powers = new int[10];
{
for (int i = 0; i < powers.length; ++i) {
powers[i] = (int) Math.pow(digLetters.length, i);
}
}
private BitSet[] bits = new BitSet[16];
private File file = new File("/Users/wenshao/Downloads/fnv/hash.bin");
FileOutputStream out;
protected void setUp() throws Exception {
out = new FileOutputStream(file);
for (int i = 0; i < bits.length; ++i) {
bits[i] = new BitSet(Integer.MAX_VALUE);
}
}
protected void tearDown() throws Exception {
out.close();
}
public void test_fnv_hash() throws Exception {
int collisionCount = 0;
long id_hash_64 = fnv_hash("name".toCharArray());
int id_hash_32 = Math.abs((int) id_hash_64);
//bitset.set(id_hash_32);
long v = 0;
long time = System.currentTimeMillis();
NumberFormat format = NumberFormat.getInstance();
byte[] b = new byte[8];
for (int len = 1; len <= 5; ++len){
char[] chars = new char[len];
long n = (long) Math.pow(digLetters.length, chars.length);
for (; v < n; ++v) {
long hash = fnv1a_64_magic_hashcode;
for (int i = 0; i < chars.length; ++i) {
int power = powers[chars.length - i - 1];
int d = (int) ((v / power) % digLetters.length);
char c = digLetters[d];
hash ^= c;
hash *= fnv1a_64_magic_prime;
}
b[7] = (byte) (hash );
b[6] = (byte) (hash >>> 8);
b[5] = (byte) (hash >>> 16);
b[4] = (byte) (hash >>> 24);
b[3] = (byte) (hash >>> 32);
b[2] = (byte) (hash >>> 40);
b[1] = (byte) (hash >>> 48);
b[0] = (byte) (hash >>> 56);
out.write(b);
if (v != 0 && v % (1000 * 1000 * 10) == 0) {
long now = System.currentTimeMillis();
long millis = now - time;
time = now;
System.out.println("millis : " + millis + ", collision " + format.format(collisionCount) + ", " + format.format(v));
}
}
System.out.println("end : " + len);
}
}
String build(long v, int len) {
char[] chars = new char[len];
for (int i = 0; i < chars.length; ++i) {
int power = powers[chars.length - i - 1];
int d = (int) ((v / power) % digLetters.length);
chars[i] = digLetters[d];
}
return new String(chars);
}
static long fnv_hash(char[] chars) {
long hash = fnv1a_64_magic_hashcode;
for (int i = 0; i < chars.length; ++i) {
char c = chars[i];
hash ^= c;
hash *= fnv1a_64_magic_prime;
}
return hash;
}
}
| FNV32_CollisionTest_All |
java | apache__logging-log4j2 | log4j-api/src/main/java/org/apache/logging/log4j/util/PrivateSecurityManagerStackTraceUtil.java | {
"start": 3706,
"end": 3891
} | class ____ extends SecurityManager {
@Override
protected Class<?>[] getClassContext() {
return super.getClassContext();
}
}
}
| PrivateSecurityManager |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/createTable/MySqlCreateTableTest148_national_char.java | {
"start": 609,
"end": 1730
} | class ____ extends TestCase {
public void test_0() throws Exception {
String sql = "create temporary table if not exists `tb_kxipe` (\n" +
"\t col_vttevt national char(128),\n" +
"\t col_wqq national varchar(128)\n" +
") comment 'comment' ";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
MySqlCreateTableStatement stmt = (MySqlCreateTableStatement) statementList.get(0);
assertEquals(1, statementList.size());
assertEquals("CREATE TEMPORARY TABLE IF NOT EXISTS `tb_kxipe` (\n" +
"\tcol_vttevt national char(128),\n" +
"\tcol_wqq national varchar(128)\n" +
") COMMENT 'comment'", stmt.toString());
assertEquals("create temporary table if not exists `tb_kxipe` (\n" +
"\tcol_vttevt national char(128),\n" +
"\tcol_wqq national varchar(128)\n" +
") comment 'comment'", stmt.toLowerCaseString());
}
}
| MySqlCreateTableTest148_national_char |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/FakeFSDataInputStream.java | {
"start": 1089,
"end": 1746
} | class ____
extends FilterInputStream implements Seekable, PositionedReadable {
public FakeFSDataInputStream(InputStream in) { super(in); }
public void seek(long pos) throws IOException { }
public long getPos() throws IOException { return -1; }
public boolean seekToNewSource(long targetPos) throws IOException {
return false;
}
public int read(long position, byte[] buffer, int offset, int length)
throws IOException { return -1; }
public void readFully(long position, byte[] buffer, int offset, int length)
throws IOException { }
public void readFully(long position, byte[] buffer) throws IOException { }
}
| FakeFSDataInputStream |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java | {
"start": 17703,
"end": 20390
} | class ____ extends FilterFileSystem {
TargetFileSystem(FileSystem fs) {
super(fs);
}
void writeStreamToFile(InputStream in, PathData target,
boolean lazyPersist, boolean direct)
throws IOException {
FSDataOutputStream out = null;
try {
out = create(target, lazyPersist);
IOUtils.copyBytes(in, out, getConf(), true);
} finally {
if (!direct) {
deleteOnExit(target.path);
}
IOUtils.closeStream(out); // just in case copyBytes didn't
}
}
// tag created files as temp files
FSDataOutputStream create(PathData item, boolean lazyPersist)
throws IOException {
if (lazyPersist) {
long defaultBlockSize;
try {
defaultBlockSize = getDefaultBlockSize();
} catch (NotInMountpointException ex) {
// ViewFileSystem#getDefaultBlockSize() throws an exception as it
// needs a target FS to retrive the default block size from.
// Hence, for ViewFs, we should call getDefaultBlockSize with the
// target path.
defaultBlockSize = getDefaultBlockSize(item.path);
}
EnumSet<CreateFlag> createFlags =
EnumSet.of(CREATE, LAZY_PERSIST, OVERWRITE);
return create(item.path,
FsPermission.getFileDefault().applyUMask(
FsPermission.getUMask(getConf())),
createFlags,
getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
IO_FILE_BUFFER_SIZE_DEFAULT),
(short) 1,
defaultBlockSize,
null,
null);
} else {
return create(item.path, true);
}
}
void rename(PathData src, PathData target) throws IOException {
// the rename method with an option to delete the target is deprecated
if (target.exists && !delete(target.path, false)) {
// too bad we don't know why it failed
PathIOException e = new PathIOException(target.toString());
e.setOperation("delete");
throw e;
}
if (!rename(src.path, target.path)) {
// too bad we don't know why it failed
PathIOException e = new PathIOException(src.toString());
e.setOperation("rename");
e.setTargetPath(target.toString());
throw e;
}
// cancel delete on exit if rename is successful
cancelDeleteOnExit(src.path);
}
@Override
public void close() {
// purge any remaining temp files, but don't close underlying fs
processDeleteOnExit();
}
}
}
| TargetFileSystem |
java | apache__hadoop | hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/FileHandle.java | {
"start": 1306,
"end": 5005
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(FileHandle.class);
private static final String HEXES = "0123456789abcdef";
private static final int HANDLE_LEN = 32;
private byte[] handle; // Opaque handle
private long fileId = -1;
private int namenodeId = -1;
public FileHandle() {
handle = null;
}
/**
* Handle is a 32 bytes number. For HDFS, the last 8 bytes is fileId
* For ViewFs, last 8 byte is fileId while 4 bytes before that is namenodeId
* @param v file id
* @param n namenode id
*/
public FileHandle(long v, int n) {
fileId = v;
namenodeId = n;
handle = new byte[HANDLE_LEN];
handle[0] = (byte)(v >>> 56);
handle[1] = (byte)(v >>> 48);
handle[2] = (byte)(v >>> 40);
handle[3] = (byte)(v >>> 32);
handle[4] = (byte)(v >>> 24);
handle[5] = (byte)(v >>> 16);
handle[6] = (byte)(v >>> 8);
handle[7] = (byte)(v >>> 0);
handle[8] = (byte) (n >>> 24);
handle[9] = (byte) (n >>> 16);
handle[10] = (byte) (n >>> 8);
handle[11] = (byte) (n >>> 0);
for (int i = 12; i < HANDLE_LEN; i++) {
handle[i] = (byte) 0;
}
}
public FileHandle(long v) {
this(v, 0);
}
public FileHandle(String s) {
MessageDigest digest;
try {
digest = MessageDigest.getInstance("MD5");
handle = new byte[HANDLE_LEN];
} catch (NoSuchAlgorithmException e) {
LOG.warn("MD5 MessageDigest unavailable.");
handle = null;
return;
}
byte[] in = s.getBytes(StandardCharsets.UTF_8);
digest.update(in);
byte[] digestbytes = digest.digest();
for (int i = 0; i < 16; i++) {
handle[i] = (byte) 0;
}
for (int i = 16; i < 32; i++) {
handle[i] = digestbytes[i - 16];
}
}
public boolean serialize(XDR out) {
out.writeInt(handle.length);
out.writeFixedOpaque(handle);
return true;
}
private long bytesToLong(byte[] data, int offset) {
ByteBuffer buffer = ByteBuffer.allocate(8);
for (int i = 0; i < 8; i++) {
buffer.put(data[i + offset]);
}
buffer.flip(); // need flip
return buffer.getLong();
}
private int bytesToInt(byte[] data, int offset) {
ByteBuffer buffer = ByteBuffer.allocate(4);
for (int i = 0; i < 4; i++) {
buffer.put(data[i + offset]);
}
buffer.flip(); // need flip
return buffer.getInt();
}
public boolean deserialize(XDR xdr) {
if (!XDR.verifyLength(xdr, 32)) {
return false;
}
int size = xdr.readInt();
handle = xdr.readFixedOpaque(size);
fileId = bytesToLong(handle, 0);
namenodeId = bytesToInt(handle, 8);
return true;
}
private static String hex(byte b) {
StringBuilder strBuilder = new StringBuilder();
strBuilder.append(HEXES.charAt((b & 0xF0) >> 4)).append(
HEXES.charAt((b & 0x0F)));
return strBuilder.toString();
}
public long getFileId() {
return fileId;
}
public int getNamenodeId() {
return namenodeId;
}
public byte[] getContent() {
return handle.clone();
}
@Override
public String toString() {
StringBuilder s = new StringBuilder();
for (int i = 0; i < handle.length; i++) {
s.append(hex(handle[i]));
}
return s.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof FileHandle)) {
return false;
}
FileHandle h = (FileHandle) o;
return Arrays.equals(handle, h.handle);
}
@Override
public int hashCode() {
return Arrays.hashCode(handle);
}
public String dumpFileHandle() {
return "fileId: " + fileId + " namenodeId: " + namenodeId;
}
}
| FileHandle |
java | apache__camel | components/camel-jaxb/src/test/java/org/apache/camel/example/JaxbConcurrentDataFormatTest.java | {
"start": 1314,
"end": 2912
} | class ____ extends CamelTestSupport {
@Test
public void testNoConcurrentProducers() throws Exception {
doSendMessages(1, 1);
}
@Test
public void testConcurrentProducers() throws Exception {
doSendMessages(10, 5);
}
private void doSendMessages(int files, int poolSize) throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(files);
getMockEndpoint("mock:result").assertNoDuplicates(body());
ExecutorService executor = Executors.newFixedThreadPool(poolSize);
for (int i = 0; i < files; i++) {
final int index = i;
executor.submit(new Callable<Object>() {
public Object call() {
PurchaseOrder bean = new PurchaseOrder();
bean.setName("Beer");
bean.setAmount(Double.valueOf(index));
bean.setPrice(Double.valueOf(index) * 2);
template.sendBody("direct:start", bean);
return null;
}
});
}
MockEndpoint.assertIsSatisfied(context);
executor.shutdownNow();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
DataFormat jaxb = new JaxbDataFormat("org.apache.camel.example");
from("direct:start").marshal(jaxb).to("direct:marshalled");
from("direct:marshalled").unmarshal(jaxb).to("mock:result");
}
};
}
}
| JaxbConcurrentDataFormatTest |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AsinTests.java | {
"start": 807,
"end": 2602
} | class ____ extends AbstractScalarFunctionTestCase {
public AsinTests(@Name("TestCase") Supplier<TestCaseSupplier.TestCase> testCaseSupplier) {
this.testCase = testCaseSupplier.get();
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
// values in range
List<TestCaseSupplier> suppliers = TestCaseSupplier.forUnaryCastingToDouble("AsinEvaluator", "val", Math::asin, -1d, 1d, List.of());
suppliers = anyNullIsNull(true, suppliers);
// Values out of range
suppliers.addAll(
TestCaseSupplier.forUnaryCastingToDouble(
"AsinEvaluator",
"val",
k -> null,
Double.NEGATIVE_INFINITY,
Math.nextDown(-1d),
List.of(
"Line 1:1: evaluation of [source] failed, treating result as null. Only first 20 failures recorded.",
"Line 1:1: java.lang.ArithmeticException: Asin input out of range"
)
)
);
suppliers.addAll(
TestCaseSupplier.forUnaryCastingToDouble(
"AsinEvaluator",
"val",
k -> null,
Math.nextUp(1d),
Double.POSITIVE_INFINITY,
List.of(
"Line 1:1: evaluation of [source] failed, treating result as null. Only first 20 failures recorded.",
"Line 1:1: java.lang.ArithmeticException: Asin input out of range"
)
)
);
return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers);
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new Asin(source, args.get(0));
}
}
| AsinTests |
java | netty__netty | common/src/main/java/io/netty/util/ResourceLeakDetector.java | {
"start": 14497,
"end": 14725
} | interface ____ {
/**
* Will be called once a leak is detected.
*/
void onLeak(String resourceType, String records);
}
@SuppressWarnings("deprecation")
private static final | LeakListener |
java | google__dagger | javatests/dagger/functional/subcomponent/UnresolvableChildComponentModule.java | {
"start": 706,
"end": 1419
} | class ____ {
/**
* Provides a qualified version of the {@link UnresolvableChildComponent}'s builder. If the
* subcomponent were actually installed in a component, this would be a duplicate binding; but
* since that doesn't happen, this binding is OK.
*/
@Provides
@SomeQualifier
static UnresolvableChildComponent.Builder unresolvableChildComponentBuilder() {
return new UnresolvableChildComponent.Builder() {
@Override
public UnresolvableChildComponent build() {
return new UnresolvableChildComponent() {
@Override
public String unboundString() {
return "unbound";
}
};
}
};
}
}
| UnresolvableChildComponentModule |
java | apache__thrift | lib/java/src/main/java/org/apache/thrift/transport/TMemoryInputTransport.java | {
"start": 897,
"end": 3361
} | class ____ extends TEndpointTransport {
private byte[] buf_;
private int pos_;
private int endPos_;
public TMemoryInputTransport() throws TTransportException {
this(new TConfiguration());
}
public TMemoryInputTransport(TConfiguration _configuration) throws TTransportException {
this(_configuration, new byte[0]);
}
public TMemoryInputTransport(byte[] buf) throws TTransportException {
this(new TConfiguration(), buf);
}
public TMemoryInputTransport(TConfiguration _configuration, byte[] buf)
throws TTransportException {
this(_configuration, buf, 0, buf.length);
}
public TMemoryInputTransport(byte[] buf, int offset, int length) throws TTransportException {
this(new TConfiguration(), buf, offset, length);
}
public TMemoryInputTransport(TConfiguration _configuration, byte[] buf, int offset, int length)
throws TTransportException {
super(_configuration);
reset(buf, offset, length);
updateKnownMessageSize(length);
}
public void reset(byte[] buf) {
reset(buf, 0, buf.length);
}
public void reset(byte[] buf, int offset, int length) {
buf_ = buf;
pos_ = offset;
endPos_ = offset + length;
try {
resetConsumedMessageSize(-1);
} catch (TTransportException e) {
// ignore
}
}
public void clear() {
buf_ = null;
try {
resetConsumedMessageSize(-1);
} catch (TTransportException e) {
// ignore
}
}
@Override
public void close() {}
@Override
public boolean isOpen() {
return true;
}
@Override
public void open() throws TTransportException {}
@Override
public int read(byte[] buf, int off, int len) throws TTransportException {
int bytesRemaining = getBytesRemainingInBuffer();
int amtToRead = (len > bytesRemaining ? bytesRemaining : len);
if (amtToRead > 0) {
System.arraycopy(buf_, pos_, buf, off, amtToRead);
consumeBuffer(amtToRead);
countConsumedMessageBytes(amtToRead);
}
return amtToRead;
}
@Override
public void write(byte[] buf, int off, int len) throws TTransportException {
throw new UnsupportedOperationException("No writing allowed!");
}
@Override
public byte[] getBuffer() {
return buf_;
}
public int getBufferPosition() {
return pos_;
}
public int getBytesRemainingInBuffer() {
return endPos_ - pos_;
}
public void consumeBuffer(int len) {
pos_ += len;
}
}
| TMemoryInputTransport |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionVertexCancelTest.java | {
"start": 2478,
"end": 11231
} | class ____ {
@RegisterExtension
static final TestExecutorExtension<ScheduledExecutorService> EXECUTOR_RESOURCE =
TestingUtils.defaultExecutorExtension();
// --------------------------------------------------------------------------------------------
// Canceling in different states
// --------------------------------------------------------------------------------------------
@Test
void testCancelFromCreated() {
try {
final ExecutionVertex vertex = getExecutionVertex();
assertThat(vertex.getExecutionState()).isEqualTo(ExecutionState.CREATED);
vertex.cancel();
assertThat(vertex.getExecutionState()).isEqualTo(ExecutionState.CANCELED);
assertThat(vertex.getFailureInfo()).isNotPresent();
assertThat(vertex.getStateTimestamp(ExecutionState.CREATED)).isGreaterThan(0);
assertThat(vertex.getStateTimestamp(ExecutionState.CANCELING)).isGreaterThan(0);
assertThat(vertex.getStateTimestamp(ExecutionState.CANCELED)).isGreaterThan(0);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
@Test
void testCancelFromScheduled() {
try {
final ExecutionVertex vertex = getExecutionVertex();
setVertexState(vertex, ExecutionState.SCHEDULED);
assertThat(vertex.getExecutionState()).isEqualTo(ExecutionState.SCHEDULED);
vertex.cancel();
assertThat(vertex.getExecutionState()).isEqualTo(ExecutionState.CANCELED);
assertThat(vertex.getFailureInfo()).isNotPresent();
assertThat(vertex.getStateTimestamp(ExecutionState.CREATED)).isGreaterThan(0);
assertThat(vertex.getStateTimestamp(ExecutionState.CANCELING)).isGreaterThan(0);
assertThat(vertex.getStateTimestamp(ExecutionState.CANCELED)).isGreaterThan(0);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
@Test
void testCancelFromRunning() {
try {
final ExecutionVertex vertex = getExecutionVertex();
LogicalSlot slot =
new TestingLogicalSlotBuilder()
.setTaskManagerGateway(
new CancelSequenceSimpleAckingTaskManagerGateway(1))
.createTestingLogicalSlot();
setVertexResource(vertex, slot);
setVertexState(vertex, ExecutionState.RUNNING);
assertThat(vertex.getExecutionState()).isEqualTo(ExecutionState.RUNNING);
vertex.cancel();
vertex.getCurrentExecutionAttempt()
.completeCancelling(); // response by task manager once actually canceled
assertThat(vertex.getExecutionState()).isEqualTo(ExecutionState.CANCELED);
assertThat(slot.isAlive()).isFalse();
assertThat(vertex.getFailureInfo()).isNotPresent();
assertThat(vertex.getStateTimestamp(ExecutionState.CREATED)).isGreaterThan(0);
assertThat(vertex.getStateTimestamp(ExecutionState.CANCELING)).isGreaterThan(0);
assertThat(vertex.getStateTimestamp(ExecutionState.CANCELED)).isGreaterThan(0);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
@Test
void testRepeatedCancelFromRunning() {
try {
final ExecutionVertex vertex = getExecutionVertex();
LogicalSlot slot =
new TestingLogicalSlotBuilder()
.setTaskManagerGateway(
new CancelSequenceSimpleAckingTaskManagerGateway(1))
.createTestingLogicalSlot();
setVertexResource(vertex, slot);
setVertexState(vertex, ExecutionState.RUNNING);
assertThat(vertex.getExecutionState()).isEqualTo(ExecutionState.RUNNING);
vertex.cancel();
assertThat(vertex.getExecutionState()).isEqualTo(ExecutionState.CANCELING);
vertex.cancel();
assertThat(vertex.getExecutionState()).isEqualTo(ExecutionState.CANCELING);
// callback by TaskManager after canceling completes
vertex.getCurrentExecutionAttempt().completeCancelling();
assertThat(vertex.getExecutionState()).isEqualTo(ExecutionState.CANCELED);
assertThat(slot.isAlive()).isFalse();
assertThat(vertex.getFailureInfo()).isNotPresent();
assertThat(vertex.getStateTimestamp(ExecutionState.CREATED)).isGreaterThan(0);
assertThat(vertex.getStateTimestamp(ExecutionState.CANCELING)).isGreaterThan(0);
assertThat(vertex.getStateTimestamp(ExecutionState.CANCELED)).isGreaterThan(0);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
@Test
void testCancelFromRunningDidNotFindTask() {
// this may happen when the task finished or failed while the call was in progress
try {
final ExecutionVertex vertex = getExecutionVertex();
LogicalSlot slot =
new TestingLogicalSlotBuilder()
.setTaskManagerGateway(
new CancelSequenceSimpleAckingTaskManagerGateway(1))
.createTestingLogicalSlot();
setVertexResource(vertex, slot);
setVertexState(vertex, ExecutionState.RUNNING);
assertThat(vertex.getExecutionState()).isEqualTo(ExecutionState.RUNNING);
vertex.cancel();
assertThat(vertex.getExecutionState()).isEqualTo(ExecutionState.CANCELING);
assertThat(vertex.getFailureInfo()).isNotPresent();
assertThat(vertex.getStateTimestamp(ExecutionState.CREATED)).isGreaterThan(0);
assertThat(vertex.getStateTimestamp(ExecutionState.CANCELING)).isGreaterThan(0);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
@Test
void testCancelCallFails() {
try {
final ExecutionVertex vertex = getExecutionVertex();
LogicalSlot slot =
new TestingLogicalSlotBuilder()
.setTaskManagerGateway(
new CancelSequenceSimpleAckingTaskManagerGateway(0))
.createTestingLogicalSlot();
setVertexResource(vertex, slot);
setVertexState(vertex, ExecutionState.RUNNING);
assertThat(vertex.getExecutionState()).isEqualTo(ExecutionState.RUNNING);
vertex.cancel();
// Callback fails, leading to CANCELED
assertThat(vertex.getExecutionState()).isEqualTo(ExecutionState.CANCELED);
assertThat(slot.isAlive()).isFalse();
assertThat(vertex.getStateTimestamp(ExecutionState.CREATED)).isGreaterThan(0);
assertThat(vertex.getStateTimestamp(ExecutionState.CANCELING)).isGreaterThan(0);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
@Test
void testSendCancelAndReceiveFail() throws Exception {
final SchedulerBase scheduler =
SchedulerTestingUtils.createScheduler(
JobGraphTestUtils.streamingJobGraph(createNoOpVertex(10)),
ComponentMainThreadExecutorServiceAdapter.forMainThread(),
EXECUTOR_RESOURCE.getExecutor());
final ExecutionGraph graph = scheduler.getExecutionGraph();
scheduler.startScheduling();
ExecutionGraphTestUtils.switchAllVerticesToRunning(graph);
assertThat(graph.getState()).isEqualTo(JobStatus.RUNNING);
final ExecutionVertex[] vertices =
graph.getVerticesTopologically().iterator().next().getTaskVertices();
assertThat(graph.getRegisteredExecutions()).hasSize(vertices.length);
final Execution exec = vertices[3].getCurrentExecutionAttempt();
exec.cancel();
assertThat(exec.getState()).isEqualTo(ExecutionState.CANCELING);
exec.markFailed(new Exception("test"));
assertThat(
exec.getState() == ExecutionState.FAILED
|| exec.getState() == ExecutionState.CANCELED)
.isTrue();
assertThat(exec.getAssignedResource().isAlive()).isFalse();
assertThat(graph.getRegisteredExecutions()).hasSize(vertices.length - 1);
}
private static | ExecutionVertexCancelTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java | {
"start": 81391,
"end": 84864
} | class ____ implements Writeable, ToXContentObject {
private final String index;
private final float boost;
IndexBoost(String index, float boost) {
this.index = index;
this.boost = boost;
}
IndexBoost(StreamInput in) throws IOException {
index = in.readString();
boost = in.readFloat();
}
IndexBoost(XContentParser parser) throws IOException {
XContentParser.Token token = parser.currentToken();
if (token == XContentParser.Token.START_OBJECT) {
token = parser.nextToken();
if (token == XContentParser.Token.FIELD_NAME) {
index = parser.currentName();
} else {
throw new ParsingException(
parser.getTokenLocation(),
"Expected [" + XContentParser.Token.FIELD_NAME + "] in [" + INDICES_BOOST_FIELD + "] but found [" + token + "]",
parser.getTokenLocation()
);
}
token = parser.nextToken();
if (token == XContentParser.Token.VALUE_NUMBER) {
boost = parser.floatValue();
} else {
throw new ParsingException(
parser.getTokenLocation(),
"Expected [" + XContentParser.Token.VALUE_NUMBER + "] in [" + INDICES_BOOST_FIELD + "] but found [" + token + "]",
parser.getTokenLocation()
);
}
token = parser.nextToken();
if (token != XContentParser.Token.END_OBJECT) {
throw new ParsingException(
parser.getTokenLocation(),
"Expected [" + XContentParser.Token.END_OBJECT + "] in [" + INDICES_BOOST_FIELD + "] but found [" + token + "]",
parser.getTokenLocation()
);
}
} else {
throw new ParsingException(
parser.getTokenLocation(),
"Expected [" + XContentParser.Token.START_OBJECT + "] in [" + parser.currentName() + "] but found [" + token + "]",
parser.getTokenLocation()
);
}
}
public String getIndex() {
return index;
}
public float getBoost() {
return boost;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(index);
out.writeFloat(boost);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(index, boost);
builder.endObject();
return builder;
}
@Override
public int hashCode() {
return Objects.hash(index, boost);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
IndexBoost other = (IndexBoost) obj;
return Objects.equals(index, other.index) && Objects.equals(boost, other.boost);
}
}
public static | IndexBoost |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng5668AfterPhaseExecutionTest.java | {
"start": 1093,
"end": 2470
} | class ____ extends AbstractMavenIntegrationTestCase {
@Test
void testAfterPhaseExecutionOnFailure() throws Exception {
File testDir = extractResources("/mng-5668-after-phase-execution");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
try {
verifier.addCliArgument("-b");
verifier.addCliArgument("concurrent");
verifier.addCliArgument("verify");
verifier.execute();
fail("Build should have failed");
} catch (VerificationException e) {
// expected
}
// Verify that marker files were created in the expected order
verifier.verifyFilePresent("target/before-verify.txt");
verifier.verifyFilePresent("target/verify-failed.txt");
verifier.verifyFilePresent("target/after-verify.txt");
// Verify the execution order through timestamps
long beforeTime = new File(testDir, "target/before-verify.txt").lastModified();
long failTime = new File(testDir, "target/verify-failed.txt").lastModified();
long afterTime = new File(testDir, "target/after-verify.txt").lastModified();
assertTrue(beforeTime <= failTime);
assertTrue(failTime <= afterTime);
}
}
| MavenITmng5668AfterPhaseExecutionTest |
java | apache__kafka | server/src/main/java/org/apache/kafka/server/SimpleApiVersionManager.java | {
"start": 1525,
"end": 3849
} | class ____ implements ApiVersionManager {
private final ApiMessageType.ListenerType listenerType;
private final Features<SupportedVersionRange> brokerFeatures;
private final boolean enableUnstableLastVersion;
private final Supplier<FinalizedFeatures> featuresProvider;
private final ApiVersionsResponseData.ApiVersionCollection apiVersions;
/**
* SimpleApiVersionManager constructor
* @param listenerType the listener type
* @param enableUnstableLastVersion whether to enable unstable last version, see
* {@link org.apache.kafka.server.config.ServerConfigs#UNSTABLE_API_VERSIONS_ENABLE_CONFIG}
* @param featuresProvider a provider to the finalized features supported
*/
public SimpleApiVersionManager(ApiMessageType.ListenerType listenerType,
boolean enableUnstableLastVersion,
Supplier<FinalizedFeatures> featuresProvider) {
this.listenerType = listenerType;
this.brokerFeatures = BrokerFeatures.defaultSupportedFeatures(enableUnstableLastVersion);
this.enableUnstableLastVersion = enableUnstableLastVersion;
this.featuresProvider = featuresProvider;
this.apiVersions = ApiVersionsResponse.collectApis(listenerType, ApiKeys.apisForListener(listenerType), enableUnstableLastVersion);
}
@Override
public boolean enableUnstableLastVersion() {
return enableUnstableLastVersion;
}
@Override
public ApiMessageType.ListenerType listenerType() {
return listenerType;
}
@Override
public ApiVersionsResponse apiVersionResponse(int throttleTimeMs, boolean alterFeatureLevel0) {
FinalizedFeatures currentFeatures = features();
return new ApiVersionsResponse.Builder()
.setThrottleTimeMs(throttleTimeMs)
.setApiVersions(apiVersions)
.setSupportedFeatures(brokerFeatures)
.setFinalizedFeatures(currentFeatures.finalizedFeatures())
.setFinalizedFeaturesEpoch(currentFeatures.finalizedFeaturesEpoch())
.setAlterFeatureLevel0(alterFeatureLevel0)
.build();
}
@Override
public FinalizedFeatures features() {
return featuresProvider.get();
}
}
| SimpleApiVersionManager |
java | apache__camel | core/camel-management/src/test/java/org/apache/camel/management/ManagedResetIncludeProcessorsTest.java | {
"start": 1355,
"end": 4769
} | class ____ extends ManagementTestSupport {
@Test
public void testReset() throws Exception {
// get the stats for the route
MBeanServer mbeanServer = getMBeanServer();
QueryExp queryExp = Query.match(new AttributeValueExp("RouteId"), new StringValueExp("first"));
Set<ObjectName> set = mbeanServer.queryNames(new ObjectName("*:type=routes,*"), queryExp);
assertEquals(1, set.size());
ObjectName on = set.iterator().next();
// send in 5 messages
template.sendBody("direct:start", "A");
template.sendBody("direct:start", "B");
template.sendBody("direct:start", "C");
template.sendBody("direct:start", "D");
template.sendBody("direct:start", "E");
// and 1 for the 2nd route
template.sendBody("direct:baz", "F");
assertMockEndpointsSatisfied();
// should be 5 on the route
Long completed = (Long) mbeanServer.getAttribute(on, "ExchangesCompleted");
assertEquals(5, completed.longValue());
// and on the processors as well
set = mbeanServer.queryNames(new ObjectName("*:type=processors,*"), queryExp);
assertEquals(3, set.size());
for (ObjectName name : set) {
completed = (Long) mbeanServer.getAttribute(name, "ExchangesCompleted");
assertEquals(5, completed.longValue());
}
// reset which should reset all processors also
mbeanServer.invoke(on, "reset", new Object[] { true }, new String[] { "boolean" });
// should be 0 on the route
completed = (Long) mbeanServer.getAttribute(on, "ExchangesCompleted");
assertEquals(0, completed.longValue());
// and on the processors as well
set = mbeanServer.queryNames(new ObjectName("*:type=processors,*"), queryExp);
assertEquals(3, set.size());
for (ObjectName name : set) {
completed = (Long) mbeanServer.getAttribute(name, "ExchangesCompleted");
assertEquals(0, completed.longValue());
}
// test that the 2nd route is untouched, as we only reset the first route
queryExp = Query.match(new AttributeValueExp("RouteId"), new StringValueExp("second"));
set = mbeanServer.queryNames(new ObjectName("*:type=routes,*"), queryExp);
assertEquals(1, set.size());
on = set.iterator().next();
completed = (Long) mbeanServer.getAttribute(on, "ExchangesCompleted");
assertEquals(1, completed.longValue());
// and on the processors as well
set = mbeanServer.queryNames(new ObjectName("*:type=processors,*"), queryExp);
assertEquals(1, set.size());
for (ObjectName name : set) {
completed = (Long) mbeanServer.getAttribute(name, "ExchangesCompleted");
assertEquals(1, completed.longValue());
}
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").routeId("first")
.to("log:foo").id("foo")
.to("log:bar").id("bar")
.to("mock:result").id("mock");
from("direct:baz").routeId("second")
.to("mock:baz").id("baz");
}
};
}
}
| ManagedResetIncludeProcessorsTest |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java | {
"start": 8447,
"end": 30500
} | class ____ implements BalancerMXBean {
static final Logger LOG = LoggerFactory.getLogger(Balancer.class);
static final Path BALANCER_ID_PATH = new Path("/system/balancer.id");
private static final String USAGE = "Usage: hdfs balancer"
+ "\n\t[-policy <policy>]\tthe balancing policy: "
+ BalancingPolicy.Node.INSTANCE.getName() + " or "
+ BalancingPolicy.Pool.INSTANCE.getName()
+ "\n\t[-threshold <threshold>]\tPercentage of disk capacity"
+ "\n\t[-exclude [-f <hosts-file> | <comma-separated list of hosts>]]"
+ "\tExcludes the specified datanodes."
+ "\n\t[-include [-f <hosts-file> | <comma-separated list of hosts>]]"
+ "\tIncludes only the specified datanodes."
+ "\n\t[-source [-f <hosts-file> | <comma-separated list of hosts>]]"
+ "\tPick only the specified datanodes as source nodes."
+ "\n\t[-excludeSource [-f <hosts-file> | <comma-separated list of hosts>]]"
+ "\tExcludes the specified datanodes to be selected as a source."
+ "\n\t[-target [-f <hosts-file> | <comma-separated list of hosts>]]"
+ "\tPick only the specified datanodes as target nodes."
+ "\n\t[-excludeTarget [-f <hosts-file> | <comma-separated list of hosts>]]"
+ "\tExcludes the specified datanodes from being selected as a target."
+ "\n\t[-blockpools <comma-separated list of blockpool ids>]"
+ "\tThe balancer will only run on blockpools included in this list."
+ "\n\t[-idleiterations <idleiterations>]"
+ "\tNumber of consecutive idle iterations (-1 for Infinite) before "
+ "exit."
+ "\n\t[-runDuringUpgrade]"
+ "\tWhether to run the balancer during an ongoing HDFS upgrade."
+ "This is usually not desired since it will not affect used space "
+ "on over-utilized machines."
+ "\n\t[-asService]\tRun as a long running service."
+ "\n\t[-sortTopNodes]"
+ "\tSort datanodes based on the utilization so "
+ "that highly utilized datanodes get scheduled first."
+ "\n\t[-limitOverUtilizedNum <specified maximum number of overUtilized datanodes>]"
+ "\tLimit the maximum number of overUtilized datanodes."
+ "\n\t[-hotBlockTimeInterval]\tprefer to move cold blocks.";
@VisibleForTesting
private static volatile boolean serviceRunning = false;
private static final AtomicInteger EXCEPTIONS_SINCE_LAST_BALANCE =
new AtomicInteger(0);
private static final AtomicInteger
FAILED_TIMES_SINCE_LAST_SUCCESSFUL_BALANCE = new AtomicInteger(0);
private final Dispatcher dispatcher;
private final NameNodeConnector nnc;
private final BalancingPolicy policy;
private final Set<String> sourceNodes;
private final Set<String> excludedSourceNodes;
private final Set<String> targetNodes;
private final Set<String> excludedTargetNodes;
private final boolean runDuringUpgrade;
private final double threshold;
private final long maxSizeToMove;
private final long defaultBlockSize;
private final boolean sortTopNodes;
private final int limitOverUtilizedNum;
private final BalancerMetrics metrics;
private ObjectName balancerInfoBeanName;
// all data node lists
private final Collection<Source> overUtilized = new LinkedList<Source>();
private final Collection<Source> aboveAvgUtilized = new LinkedList<Source>();
private final Collection<StorageGroup> belowAvgUtilized
= new LinkedList<StorageGroup>();
private final Collection<StorageGroup> underUtilized
= new LinkedList<StorageGroup>();
/* Check that this Balancer is compatible with the Block Placement Policy
* used by the Namenode.
*/
private static void checkReplicationPolicyCompatibility(Configuration conf
) throws UnsupportedActionException {
BlockPlacementPolicies placementPolicies =
new BlockPlacementPolicies(conf, null, NetworkTopology.getInstance(conf), null);
if (!(placementPolicies.getPolicy(CONTIGUOUS) instanceof
BlockPlacementPolicyDefault)) {
throw new UnsupportedActionException(
"Balancer without BlockPlacementPolicyDefault");
}
}
static long getLong(Configuration conf, String key, long defaultValue) {
final long v = conf.getLong(key, defaultValue);
LOG.info(key + " = " + v + " (default=" + defaultValue + ")");
if (v <= 0) {
throw new HadoopIllegalArgumentException(key + " = " + v + " <= " + 0);
}
return v;
}
static long getLongBytes(Configuration conf, String key, long defaultValue) {
final long v = conf.getLongBytes(key, defaultValue);
LOG.info(key + " = " + v + " (default=" + defaultValue + ")");
if (v <= 0) {
throw new HadoopIllegalArgumentException(key + " = " + v + " <= " + 0);
}
return v;
}
static int getInt(Configuration conf, String key, int defaultValue) {
final int v = conf.getInt(key, defaultValue);
LOG.info(key + " = " + v + " (default=" + defaultValue + ")");
if (v <= 0) {
throw new HadoopIllegalArgumentException(key + " = " + v + " <= " + 0);
}
return v;
}
static int getExceptionsSinceLastBalance() {
return EXCEPTIONS_SINCE_LAST_BALANCE.get();
}
static int getFailedTimesSinceLastSuccessfulBalance() {
return FAILED_TIMES_SINCE_LAST_SUCCESSFUL_BALANCE.get();
}
/**
* Construct a balancer.
* Initialize balancer. It sets the value of the threshold, and
* builds the communication proxies to
* namenode as a client and a secondary namenode and retry proxies
* when connection fails.
*/
Balancer(NameNodeConnector theblockpool, BalancerParameters p,
Configuration conf) {
// NameNode configuration parameters for balancing
getInt(conf, DFSConfigKeys.DFS_NAMENODE_GETBLOCKS_MAX_QPS_KEY,
DFSConfigKeys.DFS_NAMENODE_GETBLOCKS_MAX_QPS_DEFAULT);
final long movedWinWidth = getLong(conf,
DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY,
DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_DEFAULT);
final int moverThreads = getInt(conf,
DFSConfigKeys.DFS_BALANCER_MOVERTHREADS_KEY,
DFSConfigKeys.DFS_BALANCER_MOVERTHREADS_DEFAULT);
final int dispatcherThreads = getInt(conf,
DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_KEY,
DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_DEFAULT);
final long getBlocksSize = getLongBytes(conf,
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_SIZE_KEY,
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_SIZE_DEFAULT);
final long getBlocksMinBlockSize = getLongBytes(conf,
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_DEFAULT);
final int blockMoveTimeout = conf.getInt(
DFSConfigKeys.DFS_BALANCER_BLOCK_MOVE_TIMEOUT,
DFSConfigKeys.DFS_BALANCER_BLOCK_MOVE_TIMEOUT_DEFAULT);
final int maxNoMoveInterval = conf.getInt(
DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY,
DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_DEFAULT);
final long maxIterationTime = conf.getLong(
DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_KEY,
DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_DEFAULT);
/**
* Balancer prefer to get blocks which are belong to the cold files
* created before this time period.
*/
final long hotBlockTimeInterval =
p.getHotBlockTimeInterval() != 0L ? p.getHotBlockTimeInterval() :
conf.getTimeDuration(
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_KEY,
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS);
// DataNode configuration parameters for balancing
final int maxConcurrentMovesPerNode = getInt(conf,
DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
getLongBytes(conf, DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY,
DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT);
this.nnc = theblockpool;
this.dispatcher =
new Dispatcher(theblockpool, p.getIncludedNodes(),
p.getExcludedNodes(), movedWinWidth, moverThreads,
dispatcherThreads, maxConcurrentMovesPerNode, getBlocksSize,
getBlocksMinBlockSize, blockMoveTimeout, maxNoMoveInterval,
maxIterationTime, hotBlockTimeInterval, conf);
this.threshold = p.getThreshold();
this.policy = p.getBalancingPolicy();
this.sourceNodes = p.getSourceNodes();
this.excludedSourceNodes = p.getExcludedSourceNodes();
this.targetNodes = p.getTargetNodes();
this.excludedTargetNodes = p.getExcludedTargetNodes();
this.runDuringUpgrade = p.getRunDuringUpgrade();
this.sortTopNodes = p.getSortTopNodes();
this.limitOverUtilizedNum = p.getLimitOverUtilizedNum();
this.maxSizeToMove = getLongBytes(conf,
DFSConfigKeys.DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY,
DFSConfigKeys.DFS_BALANCER_MAX_SIZE_TO_MOVE_DEFAULT);
this.defaultBlockSize = getLongBytes(conf,
DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
this.metrics = BalancerMetrics.create(this);
registerBalancerMXBean();
}
private static long getCapacity(DatanodeStorageReport report, StorageType t) {
long capacity = 0L;
for(StorageReport r : report.getStorageReports()) {
if (r.getStorage().getStorageType() == t) {
capacity += r.getCapacity();
}
}
return capacity;
}
private long getRemaining(DatanodeStorageReport report, StorageType t) {
long remaining = 0L;
for(StorageReport r : report.getStorageReports()) {
if (r.getStorage().getStorageType() == t) {
if (r.getRemaining() >= defaultBlockSize) {
remaining += r.getRemaining();
}
}
}
return remaining;
}
/**
* Given a datanode storage set, build a network topology and decide
* over-utilized storages, above average utilized storages,
* below average utilized storages, and underutilized storages.
* The input datanode storage set is shuffled in order to randomize
* to the storage matching later on.
*
* @return the number of bytes needed to move in order to balance the cluster.
*/
private long init(List<DatanodeStorageReport> reports) {
// compute average utilization
for (DatanodeStorageReport r : reports) {
policy.accumulateSpaces(r);
}
policy.initAvgUtilization();
// Store the capacity % of over utilized nodes for sorting, if needed.
Map<Source, Double> overUtilizedPercentage = new HashMap<>();
// create network topology and classify utilization collections:
// over-utilized, above-average, below-average and under-utilized.
long overLoadedBytes = 0L, underLoadedBytes = 0L;
for(DatanodeStorageReport r : reports) {
final DDatanode dn = dispatcher.newDatanode(r.getDatanodeInfo());
final boolean isValidSource = Util.isIncluded(sourceNodes, dn.getDatanodeInfo()) &&
!Util.isExcluded(excludedSourceNodes, dn.getDatanodeInfo());
final boolean isValidTarget = Util.isIncluded(targetNodes, dn.getDatanodeInfo()) &&
!Util.isExcluded(excludedTargetNodes, dn.getDatanodeInfo());
for(StorageType t : StorageType.getMovableTypes()) {
final Double utilization = policy.getUtilization(r, t);
if (utilization == null) { // datanode does not have such storage type
continue;
}
final double average = policy.getAvgUtilization(t);
if (utilization >= average && !isValidSource) {
LOG.info("{} [{}] utilization {} >= average {}, but it's either not specified"
+ " or excluded as a source; skipping.", dn, t, utilization, average);
continue;
}
if (utilization <= average && !isValidTarget) {
LOG.info("{} [{}] utilization {} <= average {}, but it's either not specified"
+ " or excluded as a target; skipping.",
dn, t, utilization, average);
continue;
}
final double utilizationDiff = utilization - average;
final long capacity = getCapacity(r, t);
final double thresholdDiff = Math.abs(utilizationDiff) - threshold;
final long maxSize2Move = computeMaxSize2Move(capacity,
getRemaining(r, t), utilizationDiff, maxSizeToMove);
final StorageGroup g;
if (utilizationDiff > 0) {
final Source s = dn.addSource(t, maxSize2Move, dispatcher);
if (thresholdDiff <= 0) { // within threshold
aboveAvgUtilized.add(s);
} else {
overLoadedBytes += percentage2bytes(thresholdDiff, capacity);
overUtilized.add(s);
overUtilizedPercentage.put(s, utilization);
}
g = s;
} else {
g = dn.addTarget(t, maxSize2Move);
if (thresholdDiff <= 0) { // within threshold
belowAvgUtilized.add(g);
} else {
underLoadedBytes += percentage2bytes(thresholdDiff, capacity);
underUtilized.add(g);
}
}
dispatcher.getStorageGroupMap().put(g);
}
}
if (sortTopNodes) {
sortOverUtilized(overUtilizedPercentage);
}
// Limit the maximum number of overUtilized datanodes
// If excludedOverUtilizedNum is greater than 0, The overUtilized nodes num is limited
int excludedOverUtilizedNum = Math.max(overUtilized.size() - limitOverUtilizedNum, 0);
if (excludedOverUtilizedNum > 0) {
limitOverUtilizedNum();
}
logUtilizationCollections();
metrics.setNumOfOverUtilizedNodes(overUtilized.size());
metrics.setNumOfUnderUtilizedNodes(underUtilized.size());
Preconditions.checkState(dispatcher.getStorageGroupMap().size() - excludedOverUtilizedNum
== overUtilized.size() + underUtilized.size() + aboveAvgUtilized.size()
+ belowAvgUtilized.size(),
"Mismatched number of storage groups");
// return number of bytes to be moved in order to make the cluster balanced
return Math.max(overLoadedBytes, underLoadedBytes);
}
private void sortOverUtilized(Map<Source, Double> overUtilizedPercentage) {
Preconditions.checkState(overUtilized instanceof List,
"Collection overUtilized is not a List.");
LOG.info("Sorting over-utilized nodes by capacity" +
" to bring down top used datanode capacity faster");
List<Source> list = (List<Source>) overUtilized;
list.sort(
(Source source1, Source source2) ->
(Double.compare(overUtilizedPercentage.get(source2),
overUtilizedPercentage.get(source1)))
);
}
private void limitOverUtilizedNum() {
Preconditions.checkState(overUtilized instanceof LinkedList,
"Collection overUtilized is not a LinkedList.");
LinkedList<Source> list = (LinkedList<Source>) overUtilized;
LOG.info("Limiting over-utilized nodes num, if using the '-sortTopNodes' param," +
" the overUtilized nodes of top will be retained");
int size = overUtilized.size();
for (int i = 0; i < size - limitOverUtilizedNum; i++) {
list.removeLast();
}
}
private static long computeMaxSize2Move(final long capacity, final long remaining,
final double utilizationDiff, final long max) {
final double diff = Math.abs(utilizationDiff);
long maxSizeToMove = percentage2bytes(diff, capacity);
if (utilizationDiff < 0) {
maxSizeToMove = Math.min(remaining, maxSizeToMove);
}
return Math.min(max, maxSizeToMove);
}
private static long percentage2bytes(double percentage, long capacity) {
Preconditions.checkArgument(percentage >= 0, "percentage = %s < 0",
percentage);
return (long)(percentage * capacity / 100.0);
}
/* log the over utilized & under utilized nodes */
private void logUtilizationCollections() {
logUtilizationCollection("over-utilized", overUtilized);
if (LOG.isTraceEnabled()) {
logUtilizationCollection("above-average", aboveAvgUtilized);
logUtilizationCollection("below-average", belowAvgUtilized);
}
logUtilizationCollection("underutilized", underUtilized);
}
private static <T extends StorageGroup>
void logUtilizationCollection(String name, Collection<T> items) {
LOG.info(items.size() + " " + name + ": " + items);
}
/**
* Decide all <source, target> pairs and
* the number of bytes to move from a source to a target
* Maximum bytes to be moved per storage group is
* min(1 Band worth of bytes, MAX_SIZE_TO_MOVE).
* @return total number of bytes to move in this iteration
*/
private long chooseStorageGroups() {
// First, match nodes on the same node group if cluster is node group aware
if (dispatcher.getCluster().isNodeGroupAware()) {
chooseStorageGroups(Matcher.SAME_NODE_GROUP);
}
// Then, match nodes on the same rack
chooseStorageGroups(Matcher.SAME_RACK);
// At last, match all remaining nodes
chooseStorageGroups(Matcher.ANY_OTHER);
return dispatcher.bytesToMove();
}
/** Decide all <source, target> pairs according to the matcher. */
private void chooseStorageGroups(final Matcher matcher) {
/* first step: match each overUtilized datanode (source) to
* one or more underUtilized datanodes (targets).
*/
LOG.info("chooseStorageGroups for " + matcher + ": overUtilized => underUtilized");
chooseStorageGroups(overUtilized, underUtilized, matcher);
/* match each remaining overutilized datanode (source) to
* below average utilized datanodes (targets).
* Note only overutilized datanodes that haven't had that max bytes to move
* satisfied in step 1 are selected
*/
LOG.info("chooseStorageGroups for " + matcher + ": overUtilized => belowAvgUtilized");
chooseStorageGroups(overUtilized, belowAvgUtilized, matcher);
/* match each remaining underutilized datanode (target) to
* above average utilized datanodes (source).
* Note only underutilized datanodes that have not had that max bytes to
* move satisfied in step 1 are selected.
*/
LOG.info("chooseStorageGroups for " + matcher + ": underUtilized => aboveAvgUtilized");
chooseStorageGroups(underUtilized, aboveAvgUtilized, matcher);
}
/**
* For each datanode, choose matching nodes from the candidates. Either the
* datanodes or the candidates are source nodes with (utilization > Avg), and
* the others are target nodes with (utilization < Avg).
*/
private <G extends StorageGroup, C extends StorageGroup>
void chooseStorageGroups(Collection<G> groups, Collection<C> candidates,
Matcher matcher) {
for(final Iterator<G> i = groups.iterator(); i.hasNext();) {
final G g = i.next();
for(; choose4One(g, candidates, matcher); );
if (!g.hasSpaceForScheduling()) {
i.remove();
}
}
}
/**
* For the given datanode, choose a candidate and then schedule it.
* @return true if a candidate is chosen; false if no candidates is chosen.
*/
private <C extends StorageGroup> boolean choose4One(StorageGroup g,
Collection<C> candidates, Matcher matcher) {
final Iterator<C> i = candidates.iterator();
final C chosen = chooseCandidate(g, i, matcher);
if (chosen == null) {
return false;
}
if (g instanceof Source) {
matchSourceWithTargetToMove((Source)g, chosen);
} else {
matchSourceWithTargetToMove((Source)chosen, g);
}
if (!chosen.hasSpaceForScheduling()) {
i.remove();
}
return true;
}
private void matchSourceWithTargetToMove(Source source, StorageGroup target) {
long size = Math.min(source.availableSizeToMove(), target.availableSizeToMove());
final Task task = new Task(target, size);
source.addTask(task);
target.incScheduledSize(task.getSize());
dispatcher.add(source, target);
LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from "
+ source.getDisplayName() + " to " + target.getDisplayName());
}
/** Choose a candidate for the given datanode. */
private <G extends StorageGroup, C extends StorageGroup>
C chooseCandidate(G g, Iterator<C> candidates, Matcher matcher) {
if (g.hasSpaceForScheduling()) {
for(; candidates.hasNext(); ) {
final C c = candidates.next();
if (!c.hasSpaceForScheduling()) {
candidates.remove();
} else if (matchStorageGroups(c, g, matcher)) {
return c;
}
}
}
return null;
}
private boolean matchStorageGroups(StorageGroup left, StorageGroup right,
Matcher matcher) {
return left.getStorageType() == right.getStorageType()
&& matcher.match(dispatcher.getCluster(),
left.getDatanodeInfo(), right.getDatanodeInfo());
}
/**
* Register BalancerMXBean.
*/
private void registerBalancerMXBean() {
balancerInfoBeanName = MBeans.register("Balancer", "BalancerInfo", this);
}
/* reset all fields in a balancer preparing for the next iteration */
void resetData(Configuration conf) {
this.overUtilized.clear();
this.aboveAvgUtilized.clear();
this.belowAvgUtilized.clear();
this.underUtilized.clear();
this.policy.reset();
this.dispatcher.reset(conf);
DefaultMetricsSystem.removeSourceName(metrics.getName());
if (balancerInfoBeanName != null) {
MBeans.unregister(balancerInfoBeanName);
balancerInfoBeanName = null;
}
}
NameNodeConnector getNnc() {
return nnc;
}
@Override
public String getVersion() {
return VersionInfo.getVersion() + ", r" + VersionInfo.getRevision();
}
@Override
public String getSoftwareVersion() {
return VersionInfo.getVersion();
}
@Override
public String getCompileInfo() {
return VersionInfo.getDate() + " by " + VersionInfo.getUser() + " from "
+ VersionInfo.getBranch();
}
static | Balancer |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/cluster/AbstractNodeSelection.java | {
"start": 622,
"end": 770
} | interface ____ to invoke multi-node operations.
* @param <K> Key type.
* @param <V> Value type.
* @since 4.1
* @author Mark Paluch
*/
abstract | type |
java | apache__hadoop | hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobCreator.java | {
"start": 1474,
"end": 4861
} | enum ____ {
LOADJOB {
@Override
public GridmixJob createGridmixJob(
Configuration gridmixConf, long submissionMillis, JobStory jobdesc,
Path outRoot, UserGroupInformation ugi, int seq) throws IOException {
// Build configuration for this simulated job
Configuration conf = new Configuration(gridmixConf);
dce.configureDistCacheFiles(conf, jobdesc.getJobConf());
return new LoadJob(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
}
@Override
public boolean canEmulateDistCacheLoad() {
return true;
}
},
SLEEPJOB {
private String[] hosts;
@Override
public GridmixJob createGridmixJob(
Configuration conf, long submissionMillis, JobStory jobdesc, Path outRoot,
UserGroupInformation ugi, int seq) throws IOException {
int numLocations = conf.getInt(SLEEPJOB_RANDOM_LOCATIONS, 0);
if (numLocations < 0) numLocations = 0;
if (hosts == null) {
final JobClient client = new JobClient(new JobConf(conf));
ClusterStatus stat = client.getClusterStatus(true);
final int nTrackers = stat.getTaskTrackers();
final ArrayList<String> hostList = new ArrayList<String>(nTrackers);
final Pattern trackerPattern = Pattern.compile("tracker_([^:]*):.*");
final Matcher m = trackerPattern.matcher("");
for (String tracker : stat.getActiveTrackerNames()) {
m.reset(tracker);
if (!m.find()) {
continue;
}
final String name = m.group(1);
hostList.add(name);
}
hosts = hostList.toArray(new String[hostList.size()]);
}
return new SleepJob(conf, submissionMillis, jobdesc, outRoot, ugi, seq,
numLocations, hosts);
}
@Override
public boolean canEmulateDistCacheLoad() {
return false;
}
};
public static final String GRIDMIX_JOB_TYPE = "gridmix.job.type";
public static final String SLEEPJOB_RANDOM_LOCATIONS =
"gridmix.sleep.fake-locations";
/**
* Create Gridmix simulated job.
* @param conf configuration of simulated job
* @param submissionMillis At what time submission of this simulated job be
* done
* @param jobdesc JobStory obtained from trace
* @param outRoot gridmix output directory
* @param ugi UGI of job submitter of this simulated job
* @param seq job sequence number
* @return the created simulated job
* @throws IOException
*/
public abstract GridmixJob createGridmixJob(
final Configuration conf, long submissionMillis, final JobStory jobdesc,
Path outRoot, UserGroupInformation ugi, final int seq) throws IOException;
public static JobCreator getPolicy(
Configuration conf, JobCreator defaultPolicy) {
return conf.getEnum(GRIDMIX_JOB_TYPE, defaultPolicy);
}
/**
* @return true if gridmix simulated jobs of this job type can emulate
* distributed cache load
*/
abstract boolean canEmulateDistCacheLoad();
DistributedCacheEmulator dce;
/**
* This method is to be called before calling any other method in JobCreator
* except canEmulateDistCacheLoad(), especially if canEmulateDistCacheLoad()
* returns true for that job type.
* @param e Distributed Cache Emulator
*/
void setDistCacheEmulator(DistributedCacheEmulator e) {
this.dce = e;
}
}
| JobCreator |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/repositories/ShardGenerations.java | {
"start": 8589,
"end": 13555
} | class ____ {
private final Map<IndexId, Map<Integer, ShardGeneration>> generations = new HashMap<>();
/**
* Filters out all generations that don't belong to any of the supplied {@code indices} and prunes all {@link #DELETED_SHARD_GEN}
* entries from the builder.
*
* @param indices indices to filter for
* @return builder that contains only the given {@code indices} and no {@link #DELETED_SHARD_GEN} entries
*/
public Builder retainIndicesAndPruneDeletes(Set<IndexId> indices) {
generations.keySet().retainAll(indices);
for (IndexId index : indices) {
final Map<Integer, ShardGeneration> shards = generations.getOrDefault(index, Collections.emptyMap());
final Iterator<Map.Entry<Integer, ShardGeneration>> iterator = shards.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<Integer, ShardGeneration> entry = iterator.next();
final ShardGeneration generation = entry.getValue();
if (generation.equals(DELETED_SHARD_GEN)) {
iterator.remove();
}
}
if (shards.isEmpty()) {
generations.remove(index);
}
}
return this;
}
public Builder putAll(ShardGenerations shardGenerations) {
shardGenerations.shardGenerations.forEach((indexId, gens) -> {
for (int i = 0; i < gens.size(); i++) {
final ShardGeneration gen = gens.get(i);
if (gen != null) {
put(indexId, i, gen);
}
}
});
return this;
}
public Builder update(UpdatedShardGenerations updatedShardGenerations) {
putAll(updatedShardGenerations.liveIndices());
// For deleted indices, we only update the generations if they are present in the existing generations, i.e.
// they are referenced by other snapshots.
updateIfPresent(updatedShardGenerations.deletedIndices());
return this;
}
public Builder put(IndexId indexId, int shardId, SnapshotsInProgress.ShardSnapshotStatus status) {
// only track generations for successful shard status values
return put(indexId, shardId, status.state().failed() ? null : status.generation());
}
public Builder put(IndexId indexId, int shardId, ShardGeneration generation) {
assert noDuplicateIndicesWithSameName(indexId);
ShardGeneration existingGeneration = generations.computeIfAbsent(indexId, i -> new HashMap<>()).put(shardId, generation);
assert generation != null || existingGeneration == null
: "must not overwrite existing generation with null generation [" + existingGeneration + "]";
return this;
}
private void updateIfPresent(ShardGenerations shardGenerations) {
shardGenerations.shardGenerations.forEach((indexId, gens) -> {
final Map<Integer, ShardGeneration> existingShardGens = generations.get(indexId);
if (existingShardGens != null) {
for (int i = 0; i < gens.size(); i++) {
final ShardGeneration gen = gens.get(i);
if (gen != null) {
existingShardGens.put(i, gen);
}
}
}
});
}
private boolean noDuplicateIndicesWithSameName(IndexId newId) {
for (IndexId id : generations.keySet()) {
if (id.getName().equals(newId.getName()) && id.equals(newId) == false) {
assert false : Strings.format("Unable to add: %s. There's another index id with the same name: %s", newId, id);
}
}
return true;
}
public ShardGenerations build() {
if (generations.isEmpty()) {
return EMPTY;
}
return new ShardGenerations(generations.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> {
final Set<Integer> shardIds = entry.getValue().keySet();
assert shardIds.isEmpty() == false;
final int size = shardIds.stream().mapToInt(i -> i).max().getAsInt() + 1;
// Create a list that can hold the highest shard id as index and leave null values for shards that don't have
// a map entry.
final ShardGeneration[] gens = new ShardGeneration[size];
entry.getValue().forEach((shardId, generation) -> gens[shardId] = generation);
return Collections.unmodifiableList(Arrays.asList(gens));
})));
}
}
}
| Builder |
java | apache__kafka | test-common/test-common-internal-api/src/main/java/org/apache/kafka/common/test/api/ClusterFeature.java | {
"start": 1187,
"end": 1260
} | interface ____ {
Feature feature();
short version();
}
| ClusterFeature |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/ShouldHaveSameFileSystemAs.java | {
"start": 911,
"end": 1454
} | class ____ extends BasicErrorMessageFactory {
private static final String PATH_SHOULD_HAVE_SAME_FILE_SYSTEM_AS_PATH = "%nExpecting path:%n %s%nto have the same file system as path:%n %s";
public static ErrorMessageFactory shouldHaveSameFileSystemAs(final Path actual, final Path expected) {
return new ShouldHaveSameFileSystemAs(actual, expected);
}
private ShouldHaveSameFileSystemAs(final Path actual, final Path expected) {
super(PATH_SHOULD_HAVE_SAME_FILE_SYSTEM_AS_PATH, actual, expected);
}
}
| ShouldHaveSameFileSystemAs |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceStaticTests.java | {
"start": 1869,
"end": 6051
} | class ____ extends ESTestCase {
public void testLimit() {
int textLength = (int) ScalarFunction.MAX_BYTES_REF_RESULT_SIZE / 10;
String text = randomAlphaOfLength((int) ScalarFunction.MAX_BYTES_REF_RESULT_SIZE / 10);
String regex = "^(.+)$";
// 10 times the original text + the remainder
String extraString = "a".repeat((int) ScalarFunction.MAX_BYTES_REF_RESULT_SIZE % 10);
assert textLength * 10 + extraString.length() == ScalarFunction.MAX_BYTES_REF_RESULT_SIZE;
String newStr = "$0$0$0$0$0$0$0$0$0$0" + extraString;
String result = process(text, regex, newStr);
assertThat(result, equalTo(newStr.replaceAll("\\$\\d", text)));
}
public void testTooBig() {
String textAndNewStr = randomAlphaOfLength((int) (ScalarFunction.MAX_BYTES_REF_RESULT_SIZE / 10));
String regex = ".";
String result = process(textAndNewStr, regex, textAndNewStr);
assertNull(result);
assertWarnings(
"Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.",
"Line -1:-1: java.lang.IllegalArgumentException: "
+ "Creating strings with more than ["
+ ScalarFunction.MAX_BYTES_REF_RESULT_SIZE
+ "] bytes is not supported"
);
}
public void testTooBigWithGroups() {
int textLength = (int) ScalarFunction.MAX_BYTES_REF_RESULT_SIZE / 10;
String text = randomAlphaOfLength(textLength);
String regex = "(.+)";
// 10 times the original text + the remainder + 1
String extraString = "a".repeat(1 + (int) ScalarFunction.MAX_BYTES_REF_RESULT_SIZE % 10);
assert textLength * 10 + extraString.length() == ScalarFunction.MAX_BYTES_REF_RESULT_SIZE + 1;
String newStr = "$0$1$0$1$0$1$0$1$0$1" + extraString;
String result = process(text, regex, newStr);
assertNull(result);
assertWarnings(
"Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.",
"Line -1:-1: java.lang.IllegalArgumentException: "
+ "Creating strings with more than ["
+ ScalarFunction.MAX_BYTES_REF_RESULT_SIZE
+ "] bytes is not supported"
);
}
public String process(String text, String regex, String newStr) {
try (
var eval = AbstractScalarFunctionTestCase.evaluator(
new Replace(
Source.EMPTY,
field("text", DataType.KEYWORD),
field("regex", DataType.KEYWORD),
field("newStr", DataType.KEYWORD)
)
).get(driverContext());
Block block = eval.eval(row(List.of(new BytesRef(text), new BytesRef(regex), new BytesRef(newStr))));
) {
return block.isNull(0) ? null : ((BytesRef) BlockUtils.toJavaObject(block, 0)).utf8ToString();
}
}
/**
* The following fields and methods were borrowed from AbstractScalarFunctionTestCase
*/
private final List<CircuitBreaker> breakers = Collections.synchronizedList(new ArrayList<>());
private static Page row(List<Object> values) {
return new Page(1, BlockUtils.fromListRow(TestBlockFactory.getNonBreakingInstance(), values));
}
private static FieldAttribute field(String name, DataType type) {
return new FieldAttribute(Source.synthetic(name), name, new EsField(name, type, Map.of(), true, EsField.TimeSeriesFieldType.NONE));
}
private DriverContext driverContext() {
BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofMb(256)).withCircuitBreaking();
CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST);
breakers.add(breaker);
return new DriverContext(bigArrays, new BlockFactory(breaker, bigArrays));
}
@After
public void allMemoryReleased() {
for (CircuitBreaker breaker : breakers) {
assertThat(breaker.getUsed(), equalTo(0L));
}
}
}
| ReplaceStaticTests |
java | redisson__redisson | redisson/src/test/java/org/redisson/RedissonRemoteServiceTest.java | {
"start": 1483,
"end": 1980
} | class ____ implements Serializable {
private String stringField;
public SerializablePojo() {
}
public SerializablePojo(String stringField) {
this.stringField = stringField;
}
public String getStringField() {
return stringField;
}
public void setStringField(String stringField) {
this.stringField = stringField;
}
}
@RRemoteAsync(RemoteInterface.class)
public | SerializablePojo |
java | apache__camel | components/camel-sql/src/test/java/org/apache/camel/processor/aggregate/jdbc/JdbcAggregateRecoverTest.java | {
"start": 1155,
"end": 3706
} | class ____ extends AbstractJdbcAggregationTestSupport {
private static AtomicInteger counter = new AtomicInteger();
@Override
void configureJdbcAggregationRepository() {
super.configureJdbcAggregationRepository();
// enable recovery
repo.setUseRecovery(true);
// check faster
repo.setRecoveryInterval(500, TimeUnit.MILLISECONDS);
}
@Test
public void testJdbcAggregateRecover() throws Exception {
// should fail the first 2 times and then recover
getMockEndpoint("mock:aggregated").expectedMessageCount(3);
getMockEndpoint("mock:result").expectedBodiesReceived("ABCDE");
// should be marked as redelivered
getMockEndpoint("mock:result").message(0).header(Exchange.REDELIVERED).isEqualTo(Boolean.TRUE);
// on the 2nd redelivery attempt we success
getMockEndpoint("mock:result").message(0).header(Exchange.REDELIVERY_COUNTER).isEqualTo(2);
template.sendBodyAndHeader("direct:start", "A", "id", 123);
template.sendBodyAndHeader("direct:start", "B", "id", 123);
template.sendBodyAndHeader("direct:start", "C", "id", 123);
template.sendBodyAndHeader("direct:start", "D", "id", 123);
template.sendBodyAndHeader("direct:start", "E", "id", 123);
MockEndpoint.assertIsSatisfied(context, 30, TimeUnit.SECONDS);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
configureJdbcAggregationRepository();
from("direct:start")
.aggregate(header("id"), new MyAggregationStrategy())
.completionSize(5).aggregationRepository(repo)
.log("aggregated exchange id ${exchangeId} with ${body}")
.to("mock:aggregated")
.delay(1000)
// simulate errors the first two times
.process(new Processor() {
public void process(Exchange exchange) {
int count = counter.incrementAndGet();
if (count <= 2) {
throw new IllegalArgumentException("Damn");
}
}
})
.to("mock:result")
.end();
}
};
}
}
| JdbcAggregateRecoverTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/matchers/CompileTimeConstantExpressionMatcherTest.java | {
"start": 4258,
"end": 5231
} | class ____ {
public void m1(final @CompileTimeConstant String s) {
// BUG: Diagnostic contains: true
String s1 = s;
}
public void m2(@CompileTimeConstant String s) {
s = null;
// BUG: Diagnostic contains: false
String s2 = s;
}
public void m3(final String s) {
// BUG: Diagnostic contains: false
String s3 = s;
}
public void m4(@CompileTimeConstant String s) {
// BUG: Diagnostic contains: true
String s4 = s;
}
}
""")
.doTest();
}
@Test
public void finalCompileTimeConstantConstructorParameters() {
testHelper
.addSourceLines(
"Test.java",
"""
import com.google.errorprone.annotations.CompileTimeConstant;
public | Test |
java | apache__dubbo | dubbo-cluster/src/main/java/org/apache/dubbo/rpc/cluster/merger/ArrayMerger.java | {
"start": 982,
"end": 2299
} | class ____ implements Merger<Object[]> {
public static final ArrayMerger INSTANCE = new ArrayMerger();
@Override
public Object[] merge(Object[]... items) {
if (ArrayUtils.isEmpty(items)) {
return new Object[0];
}
int i = 0;
while (i < items.length && items[i] == null) {
i++;
}
if (i == items.length) {
return new Object[0];
}
Class<?> type = items[i].getClass().getComponentType();
int totalLen = 0;
for (; i < items.length; i++) {
if (items[i] == null) {
continue;
}
Class<?> itemType = items[i].getClass().getComponentType();
if (itemType != type) {
throw new IllegalArgumentException("Arguments' types are different");
}
totalLen += items[i].length;
}
if (totalLen == 0) {
return new Object[0];
}
Object result = Array.newInstance(type, totalLen);
int index = 0;
for (Object[] array : items) {
if (array != null) {
System.arraycopy(array, 0, result, index, array.length);
index += array.length;
}
}
return (Object[]) result;
}
}
| ArrayMerger |
java | elastic__elasticsearch | libs/h3/src/main/java/org/elasticsearch/h3/HexRing.java | {
"start": 25605,
"end": 38596
} | class ____ grids.
*
* Current digit -> direction -> new ap7 move (at coarser level).
*/
private static final CoordIJK.Direction[][] NEW_ADJUSTMENT_III = new CoordIJK.Direction[][] {
{
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.CENTER_DIGIT },
{
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.K_AXES_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.JK_AXES_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.K_AXES_DIGIT,
CoordIJK.Direction.CENTER_DIGIT },
{
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.J_AXES_DIGIT,
CoordIJK.Direction.J_AXES_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.IJ_AXES_DIGIT },
{
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.JK_AXES_DIGIT,
CoordIJK.Direction.J_AXES_DIGIT,
CoordIJK.Direction.JK_AXES_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.CENTER_DIGIT },
{
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.I_AXES_DIGIT,
CoordIJK.Direction.IK_AXES_DIGIT,
CoordIJK.Direction.I_AXES_DIGIT },
{
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.K_AXES_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.IK_AXES_DIGIT,
CoordIJK.Direction.IK_AXES_DIGIT,
CoordIJK.Direction.CENTER_DIGIT },
{
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.IJ_AXES_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.I_AXES_DIGIT,
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.IJ_AXES_DIGIT } };
private static final CoordIJK.Direction[] NEIGHBORSETCLOCKWISE = new CoordIJK.Direction[] {
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.JK_AXES_DIGIT,
CoordIJK.Direction.IJ_AXES_DIGIT,
CoordIJK.Direction.J_AXES_DIGIT,
CoordIJK.Direction.IK_AXES_DIGIT,
CoordIJK.Direction.K_AXES_DIGIT,
CoordIJK.Direction.I_AXES_DIGIT };
private static final CoordIJK.Direction[] NEIGHBORSETCOUNTERCLOCKWISE = new CoordIJK.Direction[] {
CoordIJK.Direction.CENTER_DIGIT,
CoordIJK.Direction.IK_AXES_DIGIT,
CoordIJK.Direction.JK_AXES_DIGIT,
CoordIJK.Direction.K_AXES_DIGIT,
CoordIJK.Direction.IJ_AXES_DIGIT,
CoordIJK.Direction.I_AXES_DIGIT,
CoordIJK.Direction.J_AXES_DIGIT };
/**
* Returns whether or not the provided H3Indexes are neighbors.
* @param origin The origin H3 index.
* @param destination The destination H3 index.
* @return true if the indexes are neighbors, false otherwise
*/
public static boolean areNeighbours(long origin, long destination) {
// Make sure they're hexagon indexes
if (H3Index.H3_get_mode(origin) != Constants.H3_CELL_MODE) {
throw new IllegalArgumentException("Invalid cell: " + origin);
}
if (H3Index.H3_get_mode(destination) != Constants.H3_CELL_MODE) {
throw new IllegalArgumentException("Invalid cell: " + destination);
}
// Hexagons cannot be neighbors with themselves
if (origin == destination) {
return false;
}
final int resolution = H3Index.H3_get_resolution(origin);
// Only hexagons in the same resolution can be neighbors
if (resolution != H3Index.H3_get_resolution(destination)) {
return false;
}
// H3 Indexes that share the same parent are very likely to be neighbors
// Child 0 is neighbor with all of its parent's 'offspring', the other
// children are neighbors with 3 of the 7 children. So a simple comparison
// of origin and destination parents and then a lookup table of the children
// is a super-cheap way to possibly determine they are neighbors.
if (resolution > 1) {
long originParent = H3.h3ToParent(origin);
long destinationParent = H3.h3ToParent(destination);
if (originParent == destinationParent) {
int originResDigit = H3Index.H3_get_index_digit(origin, resolution);
int destinationResDigit = H3Index.H3_get_index_digit(destination, resolution);
if (originResDigit == CoordIJK.Direction.CENTER_DIGIT.digit()
|| destinationResDigit == CoordIJK.Direction.CENTER_DIGIT.digit()) {
return true;
}
if (originResDigit >= CoordIJK.Direction.INVALID_DIGIT.digit()) {
// Prevent indexing off the end of the array below
throw new IllegalArgumentException("");
}
if ((originResDigit == CoordIJK.Direction.K_AXES_DIGIT.digit()
|| destinationResDigit == CoordIJK.Direction.K_AXES_DIGIT.digit()) && H3.isPentagon(originParent)) {
// If these are invalid cells, fail rather than incorrectly
// reporting neighbors. For pentagon cells that are actually
// neighbors across the deleted subsequence, they will fail the
// optimized check below, but they will be accepted by the
// gridDisk check below that.
throw new IllegalArgumentException("Undefined error checking for neighbors");
}
// These sets are the relevant neighbors in the clockwise
// and counter-clockwise
if (NEIGHBORSETCLOCKWISE[originResDigit].digit() == destinationResDigit
|| NEIGHBORSETCOUNTERCLOCKWISE[originResDigit].digit() == destinationResDigit) {
return true;
}
}
}
// Otherwise, we have to determine the neighbor relationship the "hard" way.
for (int i = 0; i < 6; i++) {
long neighbor = h3NeighborInDirection(origin, DIRECTIONS[i].digit());
if (neighbor != -1) {
// -1 is an expected case when trying to traverse off of
// pentagons.
if (destination == neighbor) {
return true;
}
}
}
return false;
}
/**
* Returns the hexagon index neighboring the origin, in the direction dir.
*
* Implementation note: The only reachable case where this returns -1 is if the
* origin is a pentagon and the translation is in the k direction. Thus,
* -1 can only be returned if origin is a pentagon.
*
* @param origin Origin index
* @param dir Direction to move in
* @return H3Index of the specified neighbor or -1 if there is no more neighbor
*/
static long h3NeighborInDirection(long origin, int dir) {
long current = origin;
int newRotations = 0;
int oldBaseCell = H3Index.H3_get_base_cell(current);
if (oldBaseCell < 0 || oldBaseCell >= Constants.NUM_BASE_CELLS) { // LCOV_EXCL_BR_LINE
// Base cells less than zero can not be represented in an index
throw new IllegalArgumentException("Invalid base cell looking for neighbor");
}
int oldLeadingDigit = H3Index.h3LeadingNonZeroDigit(current);
// Adjust the indexing digits and, if needed, the base cell.
int r = H3Index.H3_get_resolution(current) - 1;
while (true) {
if (r == -1) {
current = H3Index.H3_set_base_cell(current, baseCellNeighbors[oldBaseCell][dir]);
newRotations = baseCellNeighbor60CCWRots[oldBaseCell][dir];
if (H3Index.H3_get_base_cell(current) == INVALID_BASE_CELL) {
// Adjust for the deleted k vertex at the base cell level.
// This edge actually borders a different neighbor.
current = H3Index.H3_set_base_cell(current, baseCellNeighbors[oldBaseCell][CoordIJK.Direction.IK_AXES_DIGIT.digit()]);
newRotations = baseCellNeighbor60CCWRots[oldBaseCell][CoordIJK.Direction.IK_AXES_DIGIT.digit()];
// perform the adjustment for the k-subsequence we're skipping
// over.
current = H3Index.h3Rotate60ccw(current);
}
break;
} else {
int oldDigit = H3Index.H3_get_index_digit(current, r + 1);
int nextDir;
if (oldDigit == CoordIJK.Direction.INVALID_DIGIT.digit()) {
// Only possible on invalid input
throw new IllegalArgumentException();
} else if (H3Index.isResolutionClassIII(r + 1)) {
current = H3Index.H3_set_index_digit(current, r + 1, NEW_DIGIT_II[oldDigit][dir].digit());
nextDir = NEW_ADJUSTMENT_II[oldDigit][dir].digit();
} else {
current = H3Index.H3_set_index_digit(current, r + 1, NEW_DIGIT_III[oldDigit][dir].digit());
nextDir = NEW_ADJUSTMENT_III[oldDigit][dir].digit();
}
if (nextDir != CoordIJK.Direction.CENTER_DIGIT.digit()) {
dir = nextDir;
r--;
} else {
// No more adjustment to perform
break;
}
}
}
int newBaseCell = H3Index.H3_get_base_cell(current);
if (BaseCells.isBaseCellPentagon(newBaseCell)) {
// force rotation out of missing k-axes sub-sequence
if (H3Index.h3LeadingNonZeroDigit(current) == CoordIJK.Direction.K_AXES_DIGIT.digit()) {
if (oldBaseCell != newBaseCell) {
// in this case, we traversed into the deleted
// k subsequence of a pentagon base cell.
// We need to rotate out of that case depending
// on how we got here.
// check for a cw/ccw offset face; default is ccw
if (BaseCells.baseCellIsCwOffset(newBaseCell, BaseCells.getBaseFaceIJK(oldBaseCell).face)) {
current = H3Index.h3Rotate60cw(current);
} else {
// See cwOffsetPent in testGridDisk.c for why this is
// unreachable.
current = H3Index.h3Rotate60ccw(current); // LCOV_EXCL_LINE
}
} else {
// In this case, we traversed into the deleted
// k subsequence from within the same pentagon
// base cell.
if (oldLeadingDigit == CoordIJK.Direction.CENTER_DIGIT.digit()) {
// Undefined: the k direction is deleted from here
return -1L;
} else if (oldLeadingDigit == CoordIJK.Direction.JK_AXES_DIGIT.digit()) {
// Rotate out of the deleted k subsequence
// We also need an additional change to the direction we're
// moving in
current = H3Index.h3Rotate60ccw(current);
} else if (oldLeadingDigit == CoordIJK.Direction.IK_AXES_DIGIT.digit()) {
// Rotate out of the deleted k subsequence
// We also need an additional change to the direction we're
// moving in
current = H3Index.h3Rotate60cw(current);
} else {
// Should never occur
throw new IllegalArgumentException("Undefined error looking for neighbor"); // LCOV_EXCL_LINE
}
}
}
for (int i = 0; i < newRotations; i++) {
current = H3Index.h3RotatePent60ccw(current);
}
} else {
for (int i = 0; i < newRotations; i++) {
current = H3Index.h3Rotate60ccw(current);
}
}
return current;
}
}
| III |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/TestPropertyTypeInfo.java | {
"start": 1189,
"end": 1497
} | class ____ {
@JsonTypeInfo(use=JsonTypeInfo.Id.CLASS, include=JsonTypeInfo.As.WRAPPER_ARRAY)
public FieldWrapperBean[] beans;
public FieldWrapperBeanArray() { }
public FieldWrapperBeanArray(FieldWrapperBean[] beans) { this.beans = beans; }
}
static | FieldWrapperBeanArray |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/localdatetime/LocalDateTimeAssert_hasMonth_Test.java | {
"start": 1226,
"end": 2523
} | class ____ {
@Test
void should_fail_if_given_month_is_null() {
// GIVEN
LocalDateTime actual = LocalDateTime.now();
Month month = null;
// WHEN
ThrowingCallable code = () -> assertThat(actual).hasMonth(month);
// Then
assertThatIllegalArgumentException().isThrownBy(code)
.withMessage("The given Month should not be null");
}
@Test
void should_fail_if_actual_is_null() {
// GIVEN
LocalDateTime actual = null;
// WHEN
var assertionError = expectAssertionError(() -> assertThat(actual).hasMonth(Month.MAY));
// THEN
then(assertionError).hasMessage(actualIsNull());
}
@Test
void should_fail_if_actual_is_not_in_given_month() {
// GIVEN
LocalDateTime actual = LocalDateTime.of(2020, Month.FEBRUARY, 2, 3, 4, 5);
Month month = Month.JUNE;
// WHEN
var assertionError = expectAssertionError(() -> assertThat(actual).hasMonth(month));
// THEN
then(assertionError).hasMessage(shouldHaveMonth(actual, month).create());
}
@Test
void should_pass_if_actual_is_in_given_month() {
// GIVEN
LocalDateTime actual = LocalDateTime.of(2022, Month.APRIL, 16, 20, 18, 59);
// WHEN/THEN
then(actual).hasMonth(Month.APRIL);
}
}
| LocalDateTimeAssert_hasMonth_Test |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoGridAggregatorSupplier.java | {
"start": 1144,
"end": 1619
} | interface ____ {
GeoGridAggregator<? extends InternalGeoGrid<?>> build(
String name,
AggregatorFactories factories,
ValuesSource valuesSource,
int precision,
GeoBoundingBox geoBoundingBox,
int requiredSize,
int shardSize,
AggregationContext context,
Aggregator parent,
CardinalityUpperBound cardinality,
Map<String, Object> metadata
) throws IOException;
}
| GeoGridAggregatorSupplier |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/appender/RollingRandomAccessFileAppender.java | {
"start": 2652,
"end": 17281
} | class ____<B extends Builder<B>> extends AbstractOutputStreamAppender.Builder<B>
implements org.apache.logging.log4j.core.util.Builder<RollingRandomAccessFileAppender> {
public Builder() {
setBufferSize(RollingRandomAccessFileManager.DEFAULT_BUFFER_SIZE);
setIgnoreExceptions(true);
setImmediateFlush(true);
}
@PluginBuilderAttribute("fileName")
private String fileName;
@PluginBuilderAttribute("filePattern")
private String filePattern;
@PluginBuilderAttribute("append")
private boolean append = true;
@PluginElement("Policy")
private TriggeringPolicy policy;
@PluginElement("Strategy")
private RolloverStrategy strategy;
@PluginBuilderAttribute("advertise")
private boolean advertise;
@PluginBuilderAttribute("advertiseURI")
private String advertiseURI;
@PluginBuilderAttribute
private String filePermissions;
@PluginBuilderAttribute
private String fileOwner;
@PluginBuilderAttribute
private String fileGroup;
@Override
public RollingRandomAccessFileAppender build() {
final String name = getName();
if (name == null) {
LOGGER.error("No name provided for FileAppender");
return null;
}
if (strategy == null) {
if (fileName != null) {
strategy = DefaultRolloverStrategy.newBuilder()
.setCompressionLevelStr(String.valueOf(Deflater.DEFAULT_COMPRESSION))
.setConfig(getConfiguration())
.build();
} else {
strategy = DirectWriteRolloverStrategy.newBuilder()
.setCompressionLevelStr(String.valueOf(Deflater.DEFAULT_COMPRESSION))
.setConfig(getConfiguration())
.build();
}
} else if (fileName == null && !(strategy instanceof DirectFileRolloverStrategy)) {
LOGGER.error(
"RollingFileAppender '{}': When no file name is provided a DirectFileRolloverStrategy must be configured");
return null;
}
if (filePattern == null) {
LOGGER.error("No filename pattern provided for FileAppender with name " + name);
return null;
}
if (policy == null) {
LOGGER.error("A TriggeringPolicy must be provided");
return null;
}
final Layout<? extends Serializable> layout = getOrCreateLayout();
final boolean immediateFlush = isImmediateFlush();
final int bufferSize = getBufferSize();
final RollingRandomAccessFileManager manager =
RollingRandomAccessFileManager.getRollingRandomAccessFileManager(
fileName,
filePattern,
append,
immediateFlush,
bufferSize,
policy,
strategy,
advertiseURI,
layout,
filePermissions,
fileOwner,
fileGroup,
getConfiguration());
if (manager == null) {
return null;
}
manager.initialize();
return new RollingRandomAccessFileAppender(
name,
layout,
getFilter(),
manager,
fileName,
filePattern,
isIgnoreExceptions(),
immediateFlush,
bufferSize,
advertise ? getConfiguration().getAdvertiser() : null,
getPropertyArray());
}
/**
* @since 2.26.0
*/
public B setFileName(final String fileName) {
this.fileName = fileName;
return asBuilder();
}
/**
* @since 2.26.0
*/
public B setFilePattern(final String filePattern) {
this.filePattern = filePattern;
return asBuilder();
}
/**
* @since 2.26.0
*/
public B setAppend(final boolean append) {
this.append = append;
return asBuilder();
}
/**
* @since 2.26.0
*/
public B setPolicy(final TriggeringPolicy policy) {
this.policy = policy;
return asBuilder();
}
/**
* @since 2.26.0
*/
public B setStrategy(final RolloverStrategy strategy) {
this.strategy = strategy;
return asBuilder();
}
/**
* @since 2.26.0
*/
public B setAdvertise(final boolean advertise) {
this.advertise = advertise;
return asBuilder();
}
/**
* @since 2.26.0
*/
public B setAdvertiseURI(final String advertiseURI) {
this.advertiseURI = advertiseURI;
return asBuilder();
}
/**
* @since 2.26.0
*/
public B setFilePermissions(final String filePermissions) {
this.filePermissions = filePermissions;
return asBuilder();
}
/**
* @since 2.26.0
*/
public B setFileOwner(final String fileOwner) {
this.fileOwner = fileOwner;
return asBuilder();
}
/**
* @since 2.26.0
*/
public B setFileGroup(final String fileGroup) {
this.fileGroup = fileGroup;
return asBuilder();
}
/**
* @deprecated since 2.26.0 use {@link #setFileName(String)}.
*/
@Deprecated
public B withFileName(final String fileName) {
this.fileName = fileName;
return asBuilder();
}
/**
* @deprecated since 2.26.0 use {@link #setFilePattern(String)}.
*/
@Deprecated
public B withFilePattern(final String filePattern) {
this.filePattern = filePattern;
return asBuilder();
}
/**
* @deprecated since 2.26.0 use {@link #setAppend(boolean)}.
*/
@Deprecated
public B withAppend(final boolean append) {
this.append = append;
return asBuilder();
}
/**
* @deprecated since 2.26.0 use {@link #setPolicy(TriggeringPolicy)}.
*/
@Deprecated
public B withPolicy(final TriggeringPolicy policy) {
this.policy = policy;
return asBuilder();
}
/**
* @deprecated since 2.26.0 use {@link #setStrategy(RolloverStrategy)}.
*/
@Deprecated
public B withStrategy(final RolloverStrategy strategy) {
this.strategy = strategy;
return asBuilder();
}
/**
* @deprecated since 2.26.0 use {@link #setAdvertise(boolean)}.
*/
@Deprecated
public B withAdvertise(final boolean advertise) {
this.advertise = advertise;
return asBuilder();
}
/**
* @deprecated since 2.26.0 use {@link #setAdvertiseURI(String)}.
*/
@Deprecated
public B withAdvertiseURI(final String advertiseURI) {
this.advertiseURI = advertiseURI;
return asBuilder();
}
/**
* @deprecated since 2.26.0 use {@link #setFilePermissions(String)}.
*/
@Deprecated
public B withFilePermissions(final String filePermissions) {
this.filePermissions = filePermissions;
return asBuilder();
}
/**
* @deprecated since 2.26.0 use {@link #setFileOwner(String)}.
*/
@Deprecated
public B withFileOwner(final String fileOwner) {
this.fileOwner = fileOwner;
return asBuilder();
}
/**
* @deprecated since 2.26.0 use {@link #setFileGroup(String)}.
*/
@Deprecated
public B withFileGroup(final String fileGroup) {
this.fileGroup = fileGroup;
return asBuilder();
}
}
private final String fileName;
private final String filePattern;
private final Object advertisement;
private final Advertiser advertiser;
private RollingRandomAccessFileAppender(
final String name,
final Layout<? extends Serializable> layout,
final Filter filter,
final RollingRandomAccessFileManager manager,
final String fileName,
final String filePattern,
final boolean ignoreExceptions,
final boolean immediateFlush,
final int bufferSize,
final Advertiser advertiser,
final Property[] properties) {
super(name, layout, filter, ignoreExceptions, immediateFlush, properties, manager);
if (advertiser != null) {
final Map<String, String> configuration = new HashMap<>(layout.getContentFormat());
configuration.put("contentType", layout.getContentType());
configuration.put("name", name);
advertisement = advertiser.advertise(configuration);
} else {
advertisement = null;
}
this.fileName = fileName;
this.filePattern = filePattern;
this.advertiser = advertiser;
}
@Override
public boolean stop(final long timeout, final TimeUnit timeUnit) {
setStopping();
super.stop(timeout, timeUnit, false);
if (advertiser != null) {
advertiser.unadvertise(advertisement);
}
setStopped();
return true;
}
/**
* Write the log entry rolling over the file when required.
*
* @param event The LogEvent.
*/
@Override
public void append(final LogEvent event) {
final RollingRandomAccessFileManager manager = getManager();
manager.checkRollover(event);
// LOG4J2-1292 utilize gc-free Layout.encode() method: taken care of in superclass
super.append(event);
}
/**
* Returns the File name for the Appender.
*
* @return The file name.
*/
public String getFileName() {
return fileName;
}
/**
* Returns the file pattern used when rolling over.
*
* @return The file pattern.
*/
public String getFilePattern() {
return filePattern;
}
/**
* Returns the size of the file manager's buffer.
* @return the buffer size
*/
public int getBufferSize() {
return getManager().getBufferSize();
}
/**
* Create a RollingRandomAccessFileAppender.
*
* @param fileName The name of the file that is actively written to.
* (required).
* @param filePattern The pattern of the file name to use on rollover.
* (required).
* @param append If true, events are appended to the file. If false, the
* file is overwritten when opened. Defaults to "true"
* @param name The name of the Appender (required).
* @param immediateFlush When true, events are immediately flushed. Defaults
* to "true".
* @param bufferSizeStr The buffer size, defaults to {@value RollingRandomAccessFileManager#DEFAULT_BUFFER_SIZE}.
* @param policy The triggering policy. (required).
* @param strategy The rollover strategy. Defaults to
* DefaultRolloverStrategy.
* @param layout The layout to use (defaults to the default PatternLayout).
* @param filter The Filter or null.
* @param ignoreExceptions If {@code "true"} (default) exceptions encountered when appending events are logged; otherwise
* they are propagated to the caller.
* @param advertise "true" if the appender configuration should be
* advertised, "false" otherwise.
* @param advertiseURI The advertised URI which can be used to retrieve the
* file contents.
* @param configuration The Configuration.
* @return A RollingRandomAccessFileAppender.
* @deprecated Use {@link #newBuilder()}.
*/
@Deprecated
public static <B extends Builder<B>> RollingRandomAccessFileAppender createAppender(
final String fileName,
final String filePattern,
final String append,
final String name,
final String immediateFlush,
final String bufferSizeStr,
final TriggeringPolicy policy,
final RolloverStrategy strategy,
final Layout<? extends Serializable> layout,
final Filter filter,
final String ignoreExceptions,
final String advertise,
final String advertiseURI,
final Configuration configuration) {
final boolean isAppend = Booleans.parseBoolean(append, true);
final boolean isIgnoreExceptions = Booleans.parseBoolean(ignoreExceptions, true);
final boolean isImmediateFlush = Booleans.parseBoolean(immediateFlush, true);
final boolean isAdvertise = Boolean.parseBoolean(advertise);
final int bufferSize = Integers.parseInt(bufferSizeStr, RollingRandomAccessFileManager.DEFAULT_BUFFER_SIZE);
return RollingRandomAccessFileAppender.<B>newBuilder()
.setAdvertise(isAdvertise)
.setAdvertiseURI(advertiseURI)
.setAppend(isAppend)
.setBufferSize(bufferSize)
.setConfiguration(configuration)
.setFileName(fileName)
.setFilePattern(filePattern)
.setFilter(filter)
.setIgnoreExceptions(isIgnoreExceptions)
.setImmediateFlush(isImmediateFlush)
.setLayout(layout)
.setName(name)
.setPolicy(policy)
.setStrategy(strategy)
.build();
}
@PluginBuilderFactory
public static <B extends Builder<B>> B newBuilder() {
return new Builder<B>().asBuilder();
}
}
| Builder |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/bean/override/mockito/integration/MockitoBeanWithCustomQualifierAnnotationByTypeTests.java | {
"start": 2166,
"end": 2643
} | class ____ {
@MockitoBean(enforceOverride = true)
@MyQualifier
ExampleService service;
@Autowired
ExampleServiceCaller caller;
@Test
void test(ApplicationContext context) {
assertIsMock(service);
assertMockName(service, "qualifiedService");
assertThat(service).isNotInstanceOf(QualifiedService.class);
// Since the 'service' field's type is ExampleService, the QualifiedService
// bean in the @Configuration | MockitoBeanWithCustomQualifierAnnotationByTypeTests |
java | elastic__elasticsearch | plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java | {
"start": 1168,
"end": 4079
} | class ____ extends AbstractLifecycleComponent {
private static final Logger logger = LogManager.getLogger(GceMetadataService.class);
// Forcing Google Token API URL as set in GCE SDK to
// http://metadata/computeMetadata/v1/instance/service-accounts/default/token
// See https://developers.google.com/compute/docs/metadata#metadataserver
// all settings just used for testing - not registered by default
public static final Setting<String> GCE_HOST = Setting.simpleString(
"cloud.gce.host",
"http://metadata.google.internal",
Setting.Property.NodeScope
);
private final Settings settings;
/** Global instance of the HTTP transport. */
private HttpTransport gceHttpTransport;
public GceMetadataService(Settings settings) {
this.settings = settings;
}
protected synchronized HttpTransport getGceHttpTransport() throws GeneralSecurityException, IOException {
if (gceHttpTransport == null) {
gceHttpTransport = GoogleNetHttpTransport.newTrustedTransport();
}
return gceHttpTransport;
}
public String metadata(String metadataPath) throws IOException, URISyntaxException {
// Forcing Google Token API URL as set in GCE SDK to
// http://metadata/computeMetadata/v1/instance/service-accounts/default/token
// See https://developers.google.com/compute/docs/metadata#metadataserver
final URI urlMetadataNetwork = new URI(GCE_HOST.get(settings)).resolve("/computeMetadata/v1/instance/").resolve(metadataPath);
logger.debug("get metadata from [{}]", urlMetadataNetwork);
HttpHeaders headers;
try {
// hack around code messiness in GCE code
// TODO: get this fixed
headers = new HttpHeaders();
GenericUrl genericUrl = new GenericUrl(urlMetadataNetwork);
// This is needed to query meta data: https://cloud.google.com/compute/docs/metadata
headers.put("Metadata-Flavor", "Google");
HttpResponse response = getGceHttpTransport().createRequestFactory().buildGetRequest(genericUrl).setHeaders(headers).execute();
String metadata = response.parseAsString();
logger.debug("metadata found [{}]", metadata);
return metadata;
} catch (Exception e) {
throw new IOException("failed to fetch metadata from [" + urlMetadataNetwork + "]", e);
}
}
@Override
protected void doStart() {
}
@Override
protected void doStop() {
if (gceHttpTransport != null) {
try {
gceHttpTransport.shutdown();
} catch (IOException e) {
logger.warn("unable to shutdown GCE Http Transport", e);
}
gceHttpTransport = null;
}
}
@Override
protected void doClose() {
}
}
| GceMetadataService |
java | apache__camel | components/camel-stitch/src/test/java/org/apache/camel/component/stitch/StitchProducerTest.java | {
"start": 7966,
"end": 8544
} | class ____ implements StitchClient {
@Override
public Mono<StitchResponse> batch(StitchRequestBody requestBody) {
final StitchResponse response = new StitchResponse(
400,
Collections.singletonMap("header-1", "test"),
"Error",
"Not good!");
final StitchException exception = new StitchException(response);
return Mono.error(exception);
}
@Override
public void close() {
// noop
}
}
}
| TestErrorClient |
java | elastic__elasticsearch | modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IpinfoIpDataLookups.java | {
"start": 14425,
"end": 17006
} | class ____ extends AbstractBase<GeolocationResult> {
Geolocation(final Set<Database.Property> properties) {
super(properties, GeolocationResult.class);
}
@Override
protected Map<String, Object> transform(final Result<GeolocationResult> result) {
GeolocationResult response = result.result();
Map<String, Object> data = new HashMap<>();
for (Database.Property property : this.properties) {
switch (property) {
case IP -> data.put("ip", result.ip());
case COUNTRY_ISO_CODE -> {
String countryIsoCode = response.country;
if (countryIsoCode != null) {
data.put("country_iso_code", countryIsoCode);
}
}
case REGION_NAME -> {
String subdivisionName = response.region;
if (subdivisionName != null) {
data.put("region_name", subdivisionName);
}
}
case CITY_NAME -> {
String cityName = response.city;
if (cityName != null) {
data.put("city_name", cityName);
}
}
case TIMEZONE -> {
String locationTimeZone = response.timezone;
if (locationTimeZone != null) {
data.put("timezone", locationTimeZone);
}
}
case POSTAL_CODE -> {
String postalCode = response.postalCode;
if (postalCode != null) {
data.put("postal_code", postalCode);
}
}
case LOCATION -> {
Double latitude = response.lat;
Double longitude = response.lng;
if (latitude != null && longitude != null) {
Map<String, Object> locationObject = new HashMap<>();
locationObject.put("lat", latitude);
locationObject.put("lon", longitude);
data.put("location", locationObject);
}
}
}
}
return data;
}
}
static | Geolocation |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/i18n/LocalizedFileBundleLocaleMergeTest.java | {
"start": 2386,
"end": 2576
} | interface ____ extends Messages {
@Message("Hello world!")
String hello_world();
@Message
String greetings();
}
@Localized("de")
public | EnMessages |
java | apache__camel | components/camel-azure/camel-azure-cosmosdb/src/generated/java/org/apache/camel/component/azure/cosmosdb/CosmosDbEndpointConfigurer.java | {
"start": 741,
"end": 14960
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
CosmosDbEndpoint target = (CosmosDbEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "accountkey":
case "accountKey": target.getConfiguration().setAccountKey(property(camelContext, java.lang.String.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "changefeedprocessoroptions":
case "changeFeedProcessorOptions": target.getConfiguration().setChangeFeedProcessorOptions(property(camelContext, com.azure.cosmos.models.ChangeFeedProcessorOptions.class, value)); return true;
case "clienttelemetryenabled":
case "clientTelemetryEnabled": target.getConfiguration().setClientTelemetryEnabled(property(camelContext, boolean.class, value)); return true;
case "connectionsharingacrossclientsenabled":
case "connectionSharingAcrossClientsEnabled": target.getConfiguration().setConnectionSharingAcrossClientsEnabled(property(camelContext, boolean.class, value)); return true;
case "consistencylevel":
case "consistencyLevel": target.getConfiguration().setConsistencyLevel(property(camelContext, com.azure.cosmos.ConsistencyLevel.class, value)); return true;
case "containerpartitionkeypath":
case "containerPartitionKeyPath": target.getConfiguration().setContainerPartitionKeyPath(property(camelContext, java.lang.String.class, value)); return true;
case "contentresponseonwriteenabled":
case "contentResponseOnWriteEnabled": target.getConfiguration().setContentResponseOnWriteEnabled(property(camelContext, boolean.class, value)); return true;
case "cosmosasyncclient":
case "cosmosAsyncClient": target.getConfiguration().setCosmosAsyncClient(property(camelContext, com.azure.cosmos.CosmosAsyncClient.class, value)); return true;
case "createcontainerifnotexists":
case "createContainerIfNotExists": target.getConfiguration().setCreateContainerIfNotExists(property(camelContext, boolean.class, value)); return true;
case "createdatabaseifnotexists":
case "createDatabaseIfNotExists": target.getConfiguration().setCreateDatabaseIfNotExists(property(camelContext, boolean.class, value)); return true;
case "createleasecontainerifnotexists":
case "createLeaseContainerIfNotExists": target.getConfiguration().setCreateLeaseContainerIfNotExists(property(camelContext, boolean.class, value)); return true;
case "createleasedatabaseifnotexists":
case "createLeaseDatabaseIfNotExists": target.getConfiguration().setCreateLeaseDatabaseIfNotExists(property(camelContext, boolean.class, value)); return true;
case "credentialtype":
case "credentialType": target.getConfiguration().setCredentialType(property(camelContext, org.apache.camel.component.azure.cosmosdb.CredentialType.class, value)); return true;
case "databaseendpoint":
case "databaseEndpoint": target.getConfiguration().setDatabaseEndpoint(property(camelContext, java.lang.String.class, value)); return true;
case "exceptionhandler":
case "exceptionHandler": target.setExceptionHandler(property(camelContext, org.apache.camel.spi.ExceptionHandler.class, value)); return true;
case "exchangepattern":
case "exchangePattern": target.setExchangePattern(property(camelContext, org.apache.camel.ExchangePattern.class, value)); return true;
case "hostname":
case "hostName": target.getConfiguration().setHostName(property(camelContext, java.lang.String.class, value)); return true;
case "indexingpolicy":
case "indexingPolicy": target.getConfiguration().setIndexingPolicy(property(camelContext, com.azure.cosmos.models.IndexingPolicy.class, value)); return true;
case "itemid":
case "itemId": target.getConfiguration().setItemId(property(camelContext, java.lang.String.class, value)); return true;
case "itempartitionkey":
case "itemPartitionKey": target.getConfiguration().setItemPartitionKey(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "leasecontainername":
case "leaseContainerName": target.getConfiguration().setLeaseContainerName(property(camelContext, java.lang.String.class, value)); return true;
case "leasedatabasename":
case "leaseDatabaseName": target.getConfiguration().setLeaseDatabaseName(property(camelContext, java.lang.String.class, value)); return true;
case "multiplewriteregionsenabled":
case "multipleWriteRegionsEnabled": target.getConfiguration().setMultipleWriteRegionsEnabled(property(camelContext, boolean.class, value)); return true;
case "operation": target.getConfiguration().setOperation(property(camelContext, org.apache.camel.component.azure.cosmosdb.CosmosDbOperationsDefinition.class, value)); return true;
case "preferredregions":
case "preferredRegions": target.getConfiguration().setPreferredRegions(property(camelContext, java.lang.String.class, value)); return true;
case "query": target.getConfiguration().setQuery(property(camelContext, java.lang.String.class, value)); return true;
case "queryrequestoptions":
case "queryRequestOptions": target.getConfiguration().setQueryRequestOptions(property(camelContext, com.azure.cosmos.models.CosmosQueryRequestOptions.class, value)); return true;
case "readrequestsfallbackenabled":
case "readRequestsFallbackEnabled": target.getConfiguration().setReadRequestsFallbackEnabled(property(camelContext, boolean.class, value)); return true;
case "throughputproperties":
case "throughputProperties": target.getConfiguration().setThroughputProperties(property(camelContext, com.azure.cosmos.models.ThroughputProperties.class, value)); return true;
default: return false;
}
}
@Override
public String[] getAutowiredNames() {
return new String[]{"cosmosAsyncClient"};
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "accountkey":
case "accountKey": return java.lang.String.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "changefeedprocessoroptions":
case "changeFeedProcessorOptions": return com.azure.cosmos.models.ChangeFeedProcessorOptions.class;
case "clienttelemetryenabled":
case "clientTelemetryEnabled": return boolean.class;
case "connectionsharingacrossclientsenabled":
case "connectionSharingAcrossClientsEnabled": return boolean.class;
case "consistencylevel":
case "consistencyLevel": return com.azure.cosmos.ConsistencyLevel.class;
case "containerpartitionkeypath":
case "containerPartitionKeyPath": return java.lang.String.class;
case "contentresponseonwriteenabled":
case "contentResponseOnWriteEnabled": return boolean.class;
case "cosmosasyncclient":
case "cosmosAsyncClient": return com.azure.cosmos.CosmosAsyncClient.class;
case "createcontainerifnotexists":
case "createContainerIfNotExists": return boolean.class;
case "createdatabaseifnotexists":
case "createDatabaseIfNotExists": return boolean.class;
case "createleasecontainerifnotexists":
case "createLeaseContainerIfNotExists": return boolean.class;
case "createleasedatabaseifnotexists":
case "createLeaseDatabaseIfNotExists": return boolean.class;
case "credentialtype":
case "credentialType": return org.apache.camel.component.azure.cosmosdb.CredentialType.class;
case "databaseendpoint":
case "databaseEndpoint": return java.lang.String.class;
case "exceptionhandler":
case "exceptionHandler": return org.apache.camel.spi.ExceptionHandler.class;
case "exchangepattern":
case "exchangePattern": return org.apache.camel.ExchangePattern.class;
case "hostname":
case "hostName": return java.lang.String.class;
case "indexingpolicy":
case "indexingPolicy": return com.azure.cosmos.models.IndexingPolicy.class;
case "itemid":
case "itemId": return java.lang.String.class;
case "itempartitionkey":
case "itemPartitionKey": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "leasecontainername":
case "leaseContainerName": return java.lang.String.class;
case "leasedatabasename":
case "leaseDatabaseName": return java.lang.String.class;
case "multiplewriteregionsenabled":
case "multipleWriteRegionsEnabled": return boolean.class;
case "operation": return org.apache.camel.component.azure.cosmosdb.CosmosDbOperationsDefinition.class;
case "preferredregions":
case "preferredRegions": return java.lang.String.class;
case "query": return java.lang.String.class;
case "queryrequestoptions":
case "queryRequestOptions": return com.azure.cosmos.models.CosmosQueryRequestOptions.class;
case "readrequestsfallbackenabled":
case "readRequestsFallbackEnabled": return boolean.class;
case "throughputproperties":
case "throughputProperties": return com.azure.cosmos.models.ThroughputProperties.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
CosmosDbEndpoint target = (CosmosDbEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "accountkey":
case "accountKey": return target.getConfiguration().getAccountKey();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "changefeedprocessoroptions":
case "changeFeedProcessorOptions": return target.getConfiguration().getChangeFeedProcessorOptions();
case "clienttelemetryenabled":
case "clientTelemetryEnabled": return target.getConfiguration().isClientTelemetryEnabled();
case "connectionsharingacrossclientsenabled":
case "connectionSharingAcrossClientsEnabled": return target.getConfiguration().isConnectionSharingAcrossClientsEnabled();
case "consistencylevel":
case "consistencyLevel": return target.getConfiguration().getConsistencyLevel();
case "containerpartitionkeypath":
case "containerPartitionKeyPath": return target.getConfiguration().getContainerPartitionKeyPath();
case "contentresponseonwriteenabled":
case "contentResponseOnWriteEnabled": return target.getConfiguration().isContentResponseOnWriteEnabled();
case "cosmosasyncclient":
case "cosmosAsyncClient": return target.getConfiguration().getCosmosAsyncClient();
case "createcontainerifnotexists":
case "createContainerIfNotExists": return target.getConfiguration().isCreateContainerIfNotExists();
case "createdatabaseifnotexists":
case "createDatabaseIfNotExists": return target.getConfiguration().isCreateDatabaseIfNotExists();
case "createleasecontainerifnotexists":
case "createLeaseContainerIfNotExists": return target.getConfiguration().isCreateLeaseContainerIfNotExists();
case "createleasedatabaseifnotexists":
case "createLeaseDatabaseIfNotExists": return target.getConfiguration().isCreateLeaseDatabaseIfNotExists();
case "credentialtype":
case "credentialType": return target.getConfiguration().getCredentialType();
case "databaseendpoint":
case "databaseEndpoint": return target.getConfiguration().getDatabaseEndpoint();
case "exceptionhandler":
case "exceptionHandler": return target.getExceptionHandler();
case "exchangepattern":
case "exchangePattern": return target.getExchangePattern();
case "hostname":
case "hostName": return target.getConfiguration().getHostName();
case "indexingpolicy":
case "indexingPolicy": return target.getConfiguration().getIndexingPolicy();
case "itemid":
case "itemId": return target.getConfiguration().getItemId();
case "itempartitionkey":
case "itemPartitionKey": return target.getConfiguration().getItemPartitionKey();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "leasecontainername":
case "leaseContainerName": return target.getConfiguration().getLeaseContainerName();
case "leasedatabasename":
case "leaseDatabaseName": return target.getConfiguration().getLeaseDatabaseName();
case "multiplewriteregionsenabled":
case "multipleWriteRegionsEnabled": return target.getConfiguration().isMultipleWriteRegionsEnabled();
case "operation": return target.getConfiguration().getOperation();
case "preferredregions":
case "preferredRegions": return target.getConfiguration().getPreferredRegions();
case "query": return target.getConfiguration().getQuery();
case "queryrequestoptions":
case "queryRequestOptions": return target.getConfiguration().getQueryRequestOptions();
case "readrequestsfallbackenabled":
case "readRequestsFallbackEnabled": return target.getConfiguration().isReadRequestsFallbackEnabled();
case "throughputproperties":
case "throughputProperties": return target.getConfiguration().getThroughputProperties();
default: return null;
}
}
}
| CosmosDbEndpointConfigurer |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/nullness/AddNullMarkedToPackageInfo.java | {
"start": 1395,
"end": 2435
} | class ____ extends BugChecker
implements CompilationUnitTreeMatcher {
@Override
public Description matchCompilationUnit(CompilationUnitTree unit, VisitorState state) {
if (!isPackageInfo(unit)) {
return NO_MATCH;
}
boolean nullMarkedAnnotationPresent =
unit.getPackageAnnotations().stream()
.anyMatch(
annotation -> ASTHelpers.getAnnotationName(annotation).contentEquals("NullMarked"));
if (nullMarkedAnnotationPresent) {
return NO_MATCH;
}
return describeMatch(
unit.getPackage(),
SuggestedFix.builder()
.prefixWith(unit.getPackage(), "@NullMarked ")
.addImport("org.jspecify.annotations.NullMarked")
.build());
}
private static boolean isPackageInfo(CompilationUnitTree tree) {
String name = ASTHelpers.getFileName(tree);
int idx = name.lastIndexOf('/');
if (idx != -1) {
name = name.substring(idx + 1);
}
return name.equals("package-info.java");
}
}
| AddNullMarkedToPackageInfo |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/records/ElementCollectionOfRecordsTest.java | {
"start": 1930,
"end": 2382
} | class ____ {
@Id
Long id;
@OrderColumn
@ElementCollection(fetch = FetchType.EAGER)
@OrderBy("longField")
List<Record> records = new ArrayList<>();
public void setId(Long id) {
this.id = id;
}
public void addRecord(Record r) {
this.records.add( r );
}
public List<Record> getRecords() {
return records;
}
}
@Embeddable
public record Record(String cField, String aField, String bField, Long longField) {}
}
| MainEntity |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/IntLongMathTest.java | {
"start": 1858,
"end": 2335
} | interface ____ {
long f(int i);
}
F f =
i -> {
// BUG: Diagnostic contains: return i + 3L
return i + 3;
};
}
""")
.doTest();
}
@Test
public void refactoring() {
BugCheckerRefactoringTestHelper.newInstance(IntLongMath.class, getClass())
.addInputLines(
"in/Test.java",
"""
| F |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/AnnotationMirrorToString.java | {
"start": 1553,
"end": 2654
} | class ____ extends AbstractToString {
private static final TypePredicate TYPE_PREDICATE =
TypePredicates.isExactType("javax.lang.model.element.AnnotationMirror");
@Inject
AnnotationMirrorToString(ErrorProneFlags flags) {
super(flags);
}
@Override
protected TypePredicate typePredicate() {
return TYPE_PREDICATE;
}
@Override
protected Optional<Fix> implicitToStringFix(ExpressionTree tree, VisitorState state) {
return fix(tree, tree, state);
}
@Override
protected Optional<Fix> toStringFix(Tree parent, ExpressionTree tree, VisitorState state) {
return fix(parent, tree, state);
}
private static Optional<Fix> fix(Tree replace, Tree with, VisitorState state) {
SuggestedFix.Builder fix = SuggestedFix.builder();
return Optional.of(
fix.replace(
replace,
String.format(
"%s.toString(%s)",
qualifyType(state, fix, "com.google.auto.common.AnnotationMirrors"),
state.getSourceForNode(with)))
.build());
}
}
| AnnotationMirrorToString |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/DefaultLoginPageConfigurerTests.java | {
"start": 19562,
"end": 20038
} | class ____ {
static ObjectPostProcessor<Object> objectPostProcessor;
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.exceptionHandling(withDefaults())
.formLogin(withDefaults());
return http.build();
// @formatter:on
}
@Bean
static ObjectPostProcessor<Object> objectPostProcessor() {
return objectPostProcessor;
}
}
@Configuration
@EnableWebSecurity
static | ObjectPostProcessorConfig |
java | elastic__elasticsearch | test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java | {
"start": 2549,
"end": 3203
} | class ____ {
String skipReason = null;
String skipVersionRange = null;
List<String> skipOperatingSystems = new ArrayList<>();
List<KnownIssue> skipKnownIssues = new ArrayList<>();
String skipAwaitsFix = null;
Set<String> skipClusterFeatures = new HashSet<>();
List<CapabilitiesCheck> skipCapabilities = new ArrayList<>();
String requiresReason = null;
List<String> requiredYamlRunnerFeatures = new ArrayList<>();
Set<String> requiredClusterFeatures = new HashSet<>();
List<CapabilitiesCheck> requiredCapabilities = new ArrayList<>();
| PrerequisiteSectionBuilder |
java | apache__commons-lang | src/test/java/org/apache/commons/lang3/builder/ReflectionToStringBuilderCustomImplementationTest.java | {
"start": 1292,
"end": 2086
} | class ____ extends ReflectionToStringBuilder {
private static final String CUSTOM_PREFIX = "prefix:";
CustomReflectionToStringBuilder(final Object object, final ToStringStyle toStringStyle) {
super(object, toStringStyle);
}
@Override
protected Object getValue(final Field field) throws IllegalAccessException {
return CUSTOM_PREFIX + super.getValue(field);
}
}
@SuppressWarnings("unused") // Used indirectly by ReflectionToStringBuilder
private final String stringField = "string";
@Test
void testBuild() {
assertEquals("[stringField=prefix:string]",
new CustomReflectionToStringBuilder(this, ToStringStyle.NO_CLASS_NAME_STYLE).build());
}
} | CustomReflectionToStringBuilder |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java | {
"start": 2877,
"end": 7575
} | class ____ implements
HAServiceProtocolPB {
private final HAServiceProtocol server;
private static final MonitorHealthResponseProto MONITOR_HEALTH_RESP =
MonitorHealthResponseProto.newBuilder().build();
private static final TransitionToActiveResponseProto TRANSITION_TO_ACTIVE_RESP =
TransitionToActiveResponseProto.newBuilder().build();
private static final TransitionToStandbyResponseProto TRANSITION_TO_STANDBY_RESP =
TransitionToStandbyResponseProto.newBuilder().build();
private static final TransitionToObserverResponseProto
TRANSITION_TO_OBSERVER_RESP =
TransitionToObserverResponseProto.newBuilder().build();
private static final Logger LOG = LoggerFactory.getLogger(
HAServiceProtocolServerSideTranslatorPB.class);
public HAServiceProtocolServerSideTranslatorPB(HAServiceProtocol server) {
this.server = server;
}
@Override
public MonitorHealthResponseProto monitorHealth(RpcController controller,
MonitorHealthRequestProto request) throws ServiceException {
try {
server.monitorHealth();
return MONITOR_HEALTH_RESP;
} catch(IOException e) {
throw new ServiceException(e);
}
}
private StateChangeRequestInfo convert(HAStateChangeRequestInfoProto proto) {
RequestSource src;
switch (proto.getReqSource()) {
case REQUEST_BY_USER:
src = RequestSource.REQUEST_BY_USER;
break;
case REQUEST_BY_USER_FORCED:
src = RequestSource.REQUEST_BY_USER_FORCED;
break;
case REQUEST_BY_ZKFC:
src = RequestSource.REQUEST_BY_ZKFC;
break;
default:
LOG.warn("Unknown request source: " + proto.getReqSource());
src = null;
}
return new StateChangeRequestInfo(src);
}
@Override
public TransitionToActiveResponseProto transitionToActive(
RpcController controller, TransitionToActiveRequestProto request)
throws ServiceException {
try {
server.transitionToActive(convert(request.getReqInfo()));
return TRANSITION_TO_ACTIVE_RESP;
} catch(IOException e) {
throw new ServiceException(e);
}
}
@Override
public TransitionToStandbyResponseProto transitionToStandby(
RpcController controller, TransitionToStandbyRequestProto request)
throws ServiceException {
try {
server.transitionToStandby(convert(request.getReqInfo()));
return TRANSITION_TO_STANDBY_RESP;
} catch(IOException e) {
throw new ServiceException(e);
}
}
@Override
public TransitionToObserverResponseProto transitionToObserver(
RpcController controller, TransitionToObserverRequestProto request)
throws ServiceException {
try {
server.transitionToObserver(convert(request.getReqInfo()));
return TRANSITION_TO_OBSERVER_RESP;
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public GetServiceStatusResponseProto getServiceStatus(RpcController controller,
GetServiceStatusRequestProto request) throws ServiceException {
HAServiceStatus s;
try {
s = server.getServiceStatus();
} catch(IOException e) {
throw new ServiceException(e);
}
HAServiceStateProto retState;
switch (s.getState()) {
case ACTIVE:
retState = HAServiceStateProto.ACTIVE;
break;
case STANDBY:
retState = HAServiceStateProto.STANDBY;
break;
case OBSERVER:
retState = HAServiceStateProto.OBSERVER;
break;
case INITIALIZING:
default:
retState = HAServiceStateProto.INITIALIZING;
break;
}
GetServiceStatusResponseProto.Builder ret =
GetServiceStatusResponseProto.newBuilder()
.setState(retState)
.setReadyToBecomeActive(s.isReadyToBecomeActive());
if (!s.isReadyToBecomeActive()) {
ret.setNotReadyReason(s.getNotReadyReason());
}
return ret.build();
}
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return RPC.getProtocolVersion(HAServiceProtocolPB.class);
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
if (!protocol.equals(RPC.getProtocolName(HAServiceProtocolPB.class))) {
throw new IOException("Serverside implements " +
RPC.getProtocolName(HAServiceProtocolPB.class) +
". The following requested protocol is unknown: " + protocol);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
RPC.getProtocolVersion(HAServiceProtocolPB.class),
HAServiceProtocolPB.class);
}
}
| HAServiceProtocolServerSideTranslatorPB |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/AbstractCompleteCheckpointStore.java | {
"start": 1188,
"end": 2614
} | class ____ implements CompletedCheckpointStore {
private final SharedStateRegistry sharedStateRegistry;
public AbstractCompleteCheckpointStore(SharedStateRegistry sharedStateRegistry) {
this.sharedStateRegistry = sharedStateRegistry;
}
@Override
public SharedStateRegistry getSharedStateRegistry() {
return sharedStateRegistry;
}
@Override
public void shutdown(JobStatus jobStatus, CheckpointsCleaner checkpointsCleaner)
throws Exception {
if (jobStatus.isGloballyTerminalState()) {
sharedStateRegistry.close();
}
}
/**
* Unregister shared states that are no longer in use. Should be called after completing a
* checkpoint (even if no checkpoint was subsumed, so that state added by an aborted checkpoints
* and not used later can be removed).
*/
protected void unregisterUnusedState(Deque<CompletedCheckpoint> unSubsumedCheckpoints) {
findLowest(unSubsumedCheckpoints).ifPresent(sharedStateRegistry::unregisterUnusedState);
}
protected static Optional<Long> findLowest(Deque<CompletedCheckpoint> unSubsumedCheckpoints) {
for (CompletedCheckpoint p : unSubsumedCheckpoints) {
if (!p.getProperties().isSavepoint()) {
return Optional.of(p.getCheckpointID());
}
}
return Optional.empty();
}
}
| AbstractCompleteCheckpointStore |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java | {
"start": 1607,
"end": 3445
} | class ____
extends TestCryptoStreams {
@BeforeAll
public static void init() throws Exception {
GenericTestUtils.assumeInNativeProfile();
Configuration conf = new Configuration();
conf.set(HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_AES_CTR_NOPADDING_KEY,
OpensslAesCtrCryptoCodec.class.getName());
codec = CryptoCodec.getInstance(conf);
assertNotNull(codec, "Unable to instantiate codec " +
OpensslAesCtrCryptoCodec.class.getName() + ", is the required "
+ "version of OpenSSL installed?");
assertEquals(OpensslAesCtrCryptoCodec.class.getCanonicalName(),
codec.getClass().getCanonicalName());
}
@Test
public void testCodecClosesRandom() throws Exception {
GenericTestUtils.assumeInNativeProfile();
Configuration conf = new Configuration();
conf.set(HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY, "AES/CTR/NoPadding");
conf.set(HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_AES_CTR_NOPADDING_KEY,
OpensslAesCtrCryptoCodec.class.getName());
conf.set(
CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY,
OsSecureRandom.class.getName());
CryptoCodec codecWithRandom = CryptoCodec.getInstance(conf);
assertNotNull(
codecWithRandom, "Unable to instantiate codec " + OpensslAesCtrCryptoCodec.class
.getName() + ", is the required " + "version of OpenSSL installed?");
OsSecureRandom random = (OsSecureRandom)
((OpensslAesCtrCryptoCodec) codecWithRandom).getRandom();
// trigger the OsSecureRandom to create an internal FileInputStream
random.nextBytes(new byte[10]);
assertFalse(random.isClosed());
// verify closing the codec closes the codec's random's stream.
codecWithRandom.close();
assertTrue(random.isClosed());
}
}
| TestCryptoStreamsWithOpensslAesCtrCryptoCodec |
java | apache__dubbo | dubbo-metadata/dubbo-metadata-api/src/main/java/org/apache/dubbo/metadata/ParameterTypesComparator.java | {
"start": 871,
"end": 1603
} | class ____ {
private Class[] parameterTypes;
public ParameterTypesComparator(Class[] parameterTypes) {
this.parameterTypes = parameterTypes;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ParameterTypesComparator that = (ParameterTypesComparator) o;
return Arrays.equals(parameterTypes, that.parameterTypes);
}
@Override
public int hashCode() {
return Arrays.hashCode(parameterTypes);
}
public static ParameterTypesComparator getInstance(Class[] parameterTypes) {
return new ParameterTypesComparator(parameterTypes);
}
}
| ParameterTypesComparator |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/tck/MergeIterableTckTest.java | {
"start": 798,
"end": 1173
} | class ____ extends BaseTck<Long> {
@Override
public Publisher<Long> createPublisher(long elements) {
return
Flowable.merge(Arrays.asList(
Flowable.fromIterable(iterate(elements / 2)),
Flowable.fromIterable(iterate(elements - elements / 2))
)
)
;
}
}
| MergeIterableTckTest |
java | quarkusio__quarkus | extensions/smallrye-reactive-messaging/deployment/src/test/java/io/quarkus/smallrye/reactivemessaging/ReactiveMessagingTest.java | {
"start": 507,
"end": 1902
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(SimpleBean.class, ChannelConsumer.class, EmitterExample.class));
@Inject
ChannelConsumer channelConsumer;
@Inject
EmitterExample emitterExample;
@Test
public void testSimpleBean() {
assertEquals(4, SimpleBean.RESULT.size());
assertTrue(SimpleBean.RESULT.contains("HELLO"));
assertTrue(SimpleBean.RESULT.contains("SMALLRYE"));
assertTrue(SimpleBean.RESULT.contains("REACTIVE"));
assertTrue(SimpleBean.RESULT.contains("MESSAGE"));
}
@Test
public void testChannelInjection() {
List<String> consumed = channelConsumer.consume();
assertEquals(5, consumed.size());
assertEquals("hello", consumed.get(0));
assertEquals("with", consumed.get(1));
assertEquals("SmallRye", consumed.get(2));
assertEquals("reactive", consumed.get(3));
assertEquals("message", consumed.get(4));
}
@Test
public void testEmitter() {
emitterExample.run();
List<String> list = emitterExample.list();
assertEquals(3, list.size());
assertEquals("a", list.get(0));
assertEquals("b", list.get(1));
assertEquals("c", list.get(2));
}
}
| ReactiveMessagingTest |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/number/NumberValueTest_error_0.java | {
"start": 190,
"end": 519
} | class ____ extends TestCase {
public void test_0() throws Exception {
Exception error = null;
try {
String text = "{\"value\":33e}";
JSON.parse(text);
} catch (JSONException e) {
error = e;
}
Assert.assertNotNull(error);
}
}
| NumberValueTest_error_0 |
java | apache__flink | flink-core/src/main/java/org/apache/flink/configuration/CoreOptions.java | {
"start": 12377,
"end": 30444
} | class ____. These patterns are appended to \""
+ PLUGIN_ALWAYS_PARENT_FIRST_LOADER_PATTERNS.key()
+ "\".");
public static String[] getPluginParentFirstLoaderPatterns(Configuration config) {
List<String> base = config.get(PLUGIN_ALWAYS_PARENT_FIRST_LOADER_PATTERNS);
List<String> append = config.get(PLUGIN_ALWAYS_PARENT_FIRST_LOADER_PATTERNS_ADDITIONAL);
return mergeListsToArray(base, append);
}
@Internal
public static String[] mergeListsToArray(List<String> base, List<String> append) {
return Iterables.toArray(Iterables.concat(base, append), String.class);
}
// ------------------------------------------------------------------------
// process parameters
// ------------------------------------------------------------------------
public static final ConfigOption<String> FLINK_JAVA_HOME =
ConfigOptions.key("env.java.home")
.stringType()
.noDefaultValue()
.withDescription(
Description.builder()
.text(
"Location where Java is installed. If not specified,"
+ " Flink will use your default Java installation.")
.build());
public static final ConfigOption<String> FLINK_JVM_OPTIONS =
ConfigOptions.key("env.java.opts.all")
.stringType()
.defaultValue("")
.withDeprecatedKeys("env.java.opts")
.withDescription(
Description.builder()
.text(
"Java options to start the JVM of all Flink processes with.")
.build());
public static final ConfigOption<String> FLINK_JM_JVM_OPTIONS =
ConfigOptions.key("env.java.opts.jobmanager")
.stringType()
.defaultValue("")
.withDescription(
Description.builder()
.text("Java options to start the JVM of the JobManager with.")
.build());
public static final ConfigOption<String> FLINK_TM_JVM_OPTIONS =
ConfigOptions.key("env.java.opts.taskmanager")
.stringType()
.defaultValue("")
.withDescription(
Description.builder()
.text("Java options to start the JVM of the TaskManager with.")
.build());
public static final ConfigOption<String> FLINK_HS_JVM_OPTIONS =
ConfigOptions.key("env.java.opts.historyserver")
.stringType()
.defaultValue("")
.withDescription(
Description.builder()
.text(
"Java options to start the JVM of the HistoryServer with.")
.build());
public static final ConfigOption<String> FLINK_CLI_JVM_OPTIONS =
ConfigOptions.key("env.java.opts.client")
.stringType()
.defaultValue("")
.withDescription(
Description.builder()
.text("Java options to start the JVM of the Flink Client with.")
.build());
public static final ConfigOption<String> FLINK_SQL_GATEWAY_JVM_OPTIONS =
ConfigOptions.key("env.java.opts.sql-gateway")
.stringType()
.defaultValue("")
.withDescription(
Description.builder()
.text(
"Java options to start the JVM of the Flink SQL Gateway with.")
.build());
public static final ConfigOption<String> FLINK_DEFAULT_JVM_OPTIONS =
ConfigOptions.key("env.java.default-opts.all")
.stringType()
.defaultValue("")
.withDescription(
Description.builder()
.text(
"A string of default JVM options to prepend to %s."
+ " This is intended to be set by administrators.",
code(FLINK_JVM_OPTIONS.key()))
.build());
public static final ConfigOption<String> FLINK_DEFAULT_JM_JVM_OPTIONS =
ConfigOptions.key("env.java.default-opts.jobmanager")
.stringType()
.defaultValue("")
.withDescription(
Description.builder()
.text(
"A string of default JVM options to prepend to %s."
+ " This is intended to be set by administrators.",
code(FLINK_JM_JVM_OPTIONS.key()))
.build());
public static final ConfigOption<String> FLINK_DEFAULT_TM_JVM_OPTIONS =
ConfigOptions.key("env.java.default-opts.taskmanager")
.stringType()
.defaultValue("")
.withDescription(
Description.builder()
.text(
"A string of default JVM options to prepend to %s."
+ " This is intended to be set by administrators.",
code(FLINK_TM_JVM_OPTIONS.key()))
.build());
/**
* This option is here only for documentation generation, it is only evaluated in the shell
* scripts.
*/
@SuppressWarnings("unused")
public static final ConfigOption<String> FLINK_LOG_DIR =
ConfigOptions.key("env.log.dir")
.stringType()
.noDefaultValue()
.withDescription(
"Defines the directory where the Flink logs are saved. It has to be an absolute path."
+ " (Defaults to the log directory under Flink’s home)");
/**
* The config parameter defining the directory for Flink PID file. see: {@code
* bin/config.sh#KEY_ENV_PID_DIR} and {@code bin/config.sh#DEFAULT_ENV_PID_DIR}
*/
public static final ConfigOption<String> FLINK_PID_DIR =
ConfigOptions.key("env.pid.dir")
.stringType()
.defaultValue("/tmp")
.withDescription(
"Defines the directory where the flink-<host>-<process>.pid files are saved.");
/**
* This option is here only for documentation generation, it is only evaluated in the shell
* scripts.
*/
@SuppressWarnings("unused")
public static final ConfigOption<Integer> FLINK_LOG_MAX =
ConfigOptions.key("env.log.max")
.intType()
.defaultValue(10)
.withDescription("The maximum number of old log files to keep.");
/**
* This option is here only for documentation generation, it is only evaluated in the shell
* scripts.
*/
@SuppressWarnings("unused")
public static final ConfigOption<String> FLINK_LOG_LEVEL =
ConfigOptions.key("env.log.level")
.stringType()
.defaultValue("INFO")
.withDescription("Defines the level of the root logger.");
/**
* This option is here only for documentation generation, it is only evaluated in the shell
* scripts.
*/
@SuppressWarnings("unused")
public static final ConfigOption<Boolean> FLINK_STD_REDIRECT_TO_FILE =
ConfigOptions.key("env.stdout-err.redirect-to-file")
.booleanType()
.defaultValue(false)
.withDescription(
"Whether redirect stdout and stderr to files when running foreground. "
+ "If enabled, logs won't append the console too. "
+ "Note that redirected files do not support rolling rotate.");
/**
* This option is here only for documentation generation, it is only evaluated in the shell
* scripts.
*/
@SuppressWarnings("unused")
public static final ConfigOption<String> FLINK_SSH_OPTIONS =
ConfigOptions.key("env.ssh.opts")
.stringType()
.noDefaultValue()
.withDescription(
"Additional command line options passed to SSH clients when starting or stopping JobManager,"
+ " TaskManager, and Zookeeper services (start-cluster.sh, stop-cluster.sh, start-zookeeper-quorum.sh,"
+ " stop-zookeeper-quorum.sh).");
/**
* This option is here only for documentation generation, it is only evaluated in the shell
* scripts.
*/
@SuppressWarnings("unused")
public static final ConfigOption<String> FLINK_HADOOP_CONF_DIR =
ConfigOptions.key("env.hadoop.conf.dir")
.stringType()
.noDefaultValue()
.withDescription(
"Path to hadoop configuration directory. It is required to read HDFS and/or YARN"
+ " configuration. You can also set it via environment variable.");
/**
* This option is here only for documentation generation, it is only evaluated in the shell
* scripts.
*/
@SuppressWarnings("unused")
public static final ConfigOption<String> FLINK_YARN_CONF_DIR =
ConfigOptions.key("env.yarn.conf.dir")
.stringType()
.noDefaultValue()
.withDescription(
"Path to yarn configuration directory. It is required to run flink on YARN. You can also"
+ " set it via environment variable.");
/**
* This option is here only for documentation generation, it is only evaluated in the shell
* scripts.
*/
@SuppressWarnings("unused")
public static final ConfigOption<String> FLINK_HBASE_CONF_DIR =
ConfigOptions.key("env.hbase.conf.dir")
.stringType()
.noDefaultValue()
.withDescription(
"Path to hbase configuration directory. It is required to read HBASE configuration."
+ " You can also set it via environment variable.");
// ------------------------------------------------------------------------
// generic io
// ------------------------------------------------------------------------
/**
* The config parameter defining the directories for temporary files, separated by ",", "|", or
* the system's {@link java.io.File#pathSeparator}.
*/
@Documentation.OverrideDefault(
"'LOCAL_DIRS' on Yarn. System.getProperty(\"java.io.tmpdir\") in standalone.")
@Documentation.Section(Documentation.Sections.COMMON_MISCELLANEOUS)
public static final ConfigOption<String> TMP_DIRS =
key("io.tmp.dirs")
.stringType()
.defaultValue(System.getProperty("java.io.tmpdir"))
.withDeprecatedKeys("taskmanager.tmp.dirs")
.withDescription(
"Directories for temporary files, separated by\",\", \"|\", or the system's java.io.File.pathSeparator.");
// ------------------------------------------------------------------------
// program
// ------------------------------------------------------------------------
public static final ConfigOption<Integer> DEFAULT_PARALLELISM =
ConfigOptions.key("parallelism.default")
.intType()
.defaultValue(1)
.withDescription("Default parallelism for jobs.");
// ------------------------------------------------------------------------
// file systems
// ------------------------------------------------------------------------
/** The default filesystem scheme, used for paths that do not declare a scheme explicitly. */
@Documentation.Section(Documentation.Sections.COMMON_MISCELLANEOUS)
public static final ConfigOption<String> DEFAULT_FILESYSTEM_SCHEME =
ConfigOptions.key("fs.default-scheme")
.stringType()
.noDefaultValue()
.withDescription(
"The default filesystem scheme, used for paths that do not declare a scheme explicitly."
+ " May contain an authority, e.g. host:port in case of an HDFS NameNode.");
@Documentation.Section(Documentation.Sections.COMMON_MISCELLANEOUS)
public static final ConfigOption<String> ALLOWED_FALLBACK_FILESYSTEMS =
ConfigOptions.key("fs.allowed-fallback-filesystems")
.stringType()
.defaultValue("")
.withDescription(
"A (semicolon-separated) list of file schemes, for which Hadoop can be used instead "
+ "of an appropriate Flink plugin. (example: s3;wasb)");
/** Specifies whether file output writers should overwrite existing files by default. */
@Documentation.Section(Documentation.Sections.DEPRECATED_FILE_SINKS)
public static final ConfigOption<Boolean> FILESYTEM_DEFAULT_OVERRIDE =
key("fs.overwrite-files")
.booleanType()
.defaultValue(false)
.withDescription(
"Specifies whether file output writers should overwrite existing files by default. Set to"
+ " \"true\" to overwrite by default,\"false\" otherwise.");
/**
* Specifies whether the file systems should always create a directory for the output, even with
* a parallelism of one.
*/
@Documentation.Section(Documentation.Sections.DEPRECATED_FILE_SINKS)
public static final ConfigOption<Boolean> FILESYSTEM_OUTPUT_ALWAYS_CREATE_DIRECTORY =
key("fs.output.always-create-directory")
.booleanType()
.defaultValue(false)
.withDescription(
"File writers running with a parallelism larger than one create a directory for the output"
+ " file path and put the different result files (one per parallel writer task) into that directory."
+ " If this option is set to \"true\", writers with a parallelism of 1 will also create a"
+ " directory and place a single result file into it. If the option is set to \"false\","
+ " the writer will directly create the file directly at the output path, without creating a containing"
+ " directory.");
/**
* The total number of input plus output connections that a file system for the given scheme may
* open. Unlimited be default.
*/
public static ConfigOption<Integer> fileSystemConnectionLimit(String scheme) {
return ConfigOptions.key("fs." + scheme + ".limit.total").intType().defaultValue(-1);
}
/**
* The total number of input connections that a file system for the given scheme may open.
* Unlimited be default.
*/
public static ConfigOption<Integer> fileSystemConnectionLimitIn(String scheme) {
return ConfigOptions.key("fs." + scheme + ".limit.input").intType().defaultValue(-1);
}
/**
* The total number of output connections that a file system for the given scheme may open.
* Unlimited be default.
*/
public static ConfigOption<Integer> fileSystemConnectionLimitOut(String scheme) {
return ConfigOptions.key("fs." + scheme + ".limit.output").intType().defaultValue(-1);
}
/**
* If any connection limit is configured, this option can be optionally set to define after
* which time (in milliseconds) stream opening fails with a timeout exception, if no stream
* connection becomes available. Unlimited timeout be default.
*/
public static ConfigOption<Long> fileSystemConnectionLimitTimeout(String scheme) {
return ConfigOptions.key("fs." + scheme + ".limit.timeout").longType().defaultValue(0L);
}
/**
* If any connection limit is configured, this option can be optionally set to define after
* which time (in milliseconds) inactive streams are reclaimed. This option can help to prevent
* that inactive streams make up the full pool of limited connections, and no further
* connections can be established. Unlimited timeout be default.
*/
public static ConfigOption<Long> fileSystemConnectionLimitStreamInactivityTimeout(
String scheme) {
return ConfigOptions.key("fs." + scheme + ".limit.stream-timeout")
.longType()
.defaultValue(0L);
}
}
| name |
java | apache__commons-lang | src/test/java/org/apache/commons/lang3/event/EventListenerSupportTest.java | {
"start": 2180,
"end": 13401
} | class ____ extends AbstractLangTest {
private void addDeregisterListener(final EventListenerSupport<VetoableChangeListener> listenerSupport) {
listenerSupport.addListener(new VetoableChangeListener() {
@Override
public void vetoableChange(final PropertyChangeEvent e) {
listenerSupport.removeListener(this);
}
});
}
private VetoableChangeListener createListener(final List<VetoableChangeListener> calledListeners) {
return new VetoableChangeListener() {
@Override
public void vetoableChange(final PropertyChangeEvent e) {
calledListeners.add(this);
}
};
}
@Test
void testAddListenerNoDuplicates() {
final EventListenerSupport<VetoableChangeListener> listenerSupport = EventListenerSupport.create(VetoableChangeListener.class);
final VetoableChangeListener[] listeners = listenerSupport.getListeners();
assertEquals(0, listeners.length);
assertEquals(VetoableChangeListener.class, listeners.getClass().getComponentType());
final VetoableChangeListener[] empty = listeners;
// for fun, show that the same empty instance is used
assertSame(empty, listenerSupport.getListeners());
final VetoableChangeListener listener1 = EasyMock.createNiceMock(VetoableChangeListener.class);
listenerSupport.addListener(listener1);
assertEquals(1, listenerSupport.getListeners().length);
listenerSupport.addListener(listener1, false);
assertEquals(1, listenerSupport.getListeners().length);
listenerSupport.removeListener(listener1);
assertSame(empty, listenerSupport.getListeners());
}
@Test
void testAddNullListener() {
final EventListenerSupport<VetoableChangeListener> listenerSupport = EventListenerSupport.create(VetoableChangeListener.class);
assertNullPointerException(() -> listenerSupport.addListener(null));
}
@Test
void testCreateWithNonInterfaceParameter() {
assertIllegalArgumentException(() -> EventListenerSupport.create(String.class));
}
@Test
void testCreateWithNullParameter() {
assertNullPointerException(() -> EventListenerSupport.create(null));
}
@Test
void testEventDispatchOrder() throws PropertyVetoException {
final EventListenerSupport<VetoableChangeListener> listenerSupport = EventListenerSupport.create(VetoableChangeListener.class);
final List<VetoableChangeListener> calledListeners = new ArrayList<>();
final VetoableChangeListener listener1 = createListener(calledListeners);
final VetoableChangeListener listener2 = createListener(calledListeners);
listenerSupport.addListener(listener1);
listenerSupport.addListener(listener2);
listenerSupport.fire().vetoableChange(new PropertyChangeEvent(new Date(), "Day", 4, 5));
assertEquals(calledListeners.size(), 2);
assertSame(calledListeners.get(0), listener1);
assertSame(calledListeners.get(1), listener2);
}
@Test
void testGetListeners() {
final EventListenerSupport<VetoableChangeListener> listenerSupport = EventListenerSupport.create(VetoableChangeListener.class);
final VetoableChangeListener[] listeners = listenerSupport.getListeners();
assertEquals(0, listeners.length);
assertEquals(VetoableChangeListener.class, listeners.getClass().getComponentType());
final VetoableChangeListener[] empty = listeners;
// for fun, show that the same empty instance is used
assertSame(empty, listenerSupport.getListeners());
final VetoableChangeListener listener1 = EasyMock.createNiceMock(VetoableChangeListener.class);
listenerSupport.addListener(listener1);
assertEquals(1, listenerSupport.getListeners().length);
final VetoableChangeListener listener2 = EasyMock.createNiceMock(VetoableChangeListener.class);
listenerSupport.addListener(listener2);
assertEquals(2, listenerSupport.getListeners().length);
listenerSupport.removeListener(listener1);
assertEquals(1, listenerSupport.getListeners().length);
listenerSupport.removeListener(listener2);
assertSame(empty, listenerSupport.getListeners());
}
@Test
void testRemoveListenerDuringEvent() throws PropertyVetoException {
final EventListenerSupport<VetoableChangeListener> listenerSupport = EventListenerSupport.create(VetoableChangeListener.class);
for (int i = 0; i < 10; ++i) {
addDeregisterListener(listenerSupport);
}
assertEquals(listenerSupport.getListenerCount(), 10);
listenerSupport.fire().vetoableChange(new PropertyChangeEvent(new Date(), "Day", 4, 5));
assertEquals(listenerSupport.getListenerCount(), 0);
}
@Test
void testRemoveNullListener() {
final EventListenerSupport<VetoableChangeListener> listenerSupport = EventListenerSupport.create(VetoableChangeListener.class);
assertNullPointerException(() -> listenerSupport.removeListener(null));
}
@Test
void testSerialization() throws IOException, ClassNotFoundException, PropertyVetoException {
final EventListenerSupport<VetoableChangeListener> listenerSupport = EventListenerSupport.create(VetoableChangeListener.class);
listenerSupport.addListener(Function.identity()::apply);
listenerSupport.addListener(EasyMock.createNiceMock(VetoableChangeListener.class));
// serialize:
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
try (ObjectOutputStream objectOutputStream = new ObjectOutputStream(outputStream)) {
objectOutputStream.writeObject(listenerSupport);
}
// deserialize:
@SuppressWarnings("unchecked")
final EventListenerSupport<VetoableChangeListener> deserializedListenerSupport = (EventListenerSupport<VetoableChangeListener>) new ObjectInputStream(
new ByteArrayInputStream(outputStream.toByteArray())).readObject();
// make sure we get a listener array back, of the correct component type, and that it contains only the serializable mock
final VetoableChangeListener[] listeners = deserializedListenerSupport.getListeners();
assertEquals(VetoableChangeListener.class, listeners.getClass().getComponentType());
assertEquals(1, listeners.length);
// now verify that the mock still receives events; we can infer that the proxy was correctly reconstituted
final VetoableChangeListener listener = listeners[0];
final PropertyChangeEvent evt = new PropertyChangeEvent(new Date(), "Day", 7, 9);
listener.vetoableChange(evt);
EasyMock.replay(listener);
deserializedListenerSupport.fire().vetoableChange(evt);
EasyMock.verify(listener);
// remove listener and verify we get an empty array of listeners
deserializedListenerSupport.removeListener(listener);
assertEquals(0, deserializedListenerSupport.getListeners().length);
}
@Test
void testSubclassInvocationHandling() throws PropertyVetoException {
final EventListenerSupport<VetoableChangeListener> eventListenerSupport = new EventListenerSupport<VetoableChangeListener>(
VetoableChangeListener.class) {
private static final long serialVersionUID = 1L;
@Override
protected java.lang.reflect.InvocationHandler createInvocationHandler() {
return new ProxyInvocationHandler() {
@Override
public Object invoke(final Object proxy, final Method method, final Object[] args)
throws IllegalAccessException, IllegalArgumentException, InvocationTargetException {
return "vetoableChange".equals(method.getName()) && "Hour".equals(((PropertyChangeEvent) args[0]).getPropertyName()) ? null
: super.invoke(proxy, method, args);
}
};
}
};
final VetoableChangeListener listener = EasyMock.createNiceMock(VetoableChangeListener.class);
eventListenerSupport.addListener(listener);
final Object source = new Date();
final PropertyChangeEvent ignore = new PropertyChangeEvent(source, "Hour", 5, 6);
final PropertyChangeEvent respond = new PropertyChangeEvent(source, "Day", 6, 7);
listener.vetoableChange(respond);
EasyMock.replay(listener);
eventListenerSupport.fire().vetoableChange(ignore);
eventListenerSupport.fire().vetoableChange(respond);
EasyMock.verify(listener);
}
/**
* Tests that throwing an exception from a listener stops calling the remaining listeners.
*/
@Test
void testThrowingListener() {
final AtomicInteger count = new AtomicInteger();
final EventListenerSupport<VetoableChangeListener> listenerSupport = EventListenerSupport.create(VetoableChangeListener.class);
final int vetoLimit = 1;
final int listenerCount = 10;
for (int i = 0; i < listenerCount; ++i) {
listenerSupport.addListener(evt -> {
if (count.incrementAndGet() > vetoLimit) {
throw new PropertyVetoException(count.toString(), evt);
}
});
}
assertEquals(listenerCount, listenerSupport.getListenerCount());
assertEquals(0, count.get());
final Exception e = assertThrows(UndeclaredThrowableException.class,
() -> listenerSupport.fire().vetoableChange(new PropertyChangeEvent(new Date(), "Day", 0, 1)));
final Throwable rootCause = ExceptionUtils.getRootCause(e);
assertInstanceOf(PropertyVetoException.class, rootCause);
assertEquals(vetoLimit + 1, count.get());
}
/**
* Tests that throwing an exception from a listener continues calling the remaining listeners.
*/
@Test
void testThrowingListenerContinues() throws PropertyVetoException {
final AtomicInteger count = new AtomicInteger();
final EventListenerSupport<VetoableChangeListener> listenerSupport = new EventListenerSupport<VetoableChangeListener>(VetoableChangeListener.class) {
@Override
protected InvocationHandler createInvocationHandler() {
return new ProxyInvocationHandler(FailableConsumer.nop());
}
};
final int vetoLimit = 1;
final int listenerCount = 10;
for (int i = 0; i < listenerCount; ++i) {
listenerSupport.addListener(evt -> {
if (count.incrementAndGet() > vetoLimit) {
throw new PropertyVetoException(count.toString(), evt);
}
});
}
assertEquals(listenerCount, listenerSupport.getListenerCount());
assertEquals(0, count.get());
listenerSupport.fire().vetoableChange(new PropertyChangeEvent(new Date(), "Day", 0, 1));
assertEquals(listenerCount, count.get());
}
}
| EventListenerSupportTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/ProvidedIdFieldMapper.java | {
"start": 3064,
"end": 9829
} | class ____ extends AbstractIdFieldType {
private final BooleanSupplier fieldDataEnabled;
IdFieldType(BooleanSupplier fieldDataEnabled) {
this.fieldDataEnabled = fieldDataEnabled;
}
@Override
public boolean mayExistInIndex(SearchExecutionContext context) {
return true;
}
@Override
public boolean isAggregatable() {
return fieldDataEnabled.getAsBoolean();
}
@Override
public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext) {
if (fieldDataEnabled.getAsBoolean() == false) {
throw new IllegalArgumentException(
"Fielddata access on the _id field is disallowed, "
+ "you can re-enable it by updating the dynamic cluster setting: "
+ IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING.getKey()
);
}
final IndexFieldData.Builder fieldDataBuilder = new PagedBytesIndexFieldData.Builder(
name(),
TextFieldMapper.Defaults.FIELDDATA_MIN_FREQUENCY,
TextFieldMapper.Defaults.FIELDDATA_MAX_FREQUENCY,
TextFieldMapper.Defaults.FIELDDATA_MIN_SEGMENT_SIZE,
CoreValuesSourceType.KEYWORD,
(dv, n) -> new DelegateDocValuesField(
new ScriptDocValues.Strings(new ScriptDocValues.StringsSupplier(FieldData.toString(dv))),
n
)
);
return new IndexFieldData.Builder() {
@Override
public IndexFieldData<?> build(IndexFieldDataCache cache, CircuitBreakerService breakerService) {
deprecationLogger.warn(DeprecationCategory.AGGREGATIONS, "id_field_data", ID_FIELD_DATA_DEPRECATION_MESSAGE);
final IndexFieldData<?> fieldData = fieldDataBuilder.build(cache, breakerService);
return new IndexFieldData<>() {
@Override
public String getFieldName() {
return fieldData.getFieldName();
}
@Override
public ValuesSourceType getValuesSourceType() {
return fieldData.getValuesSourceType();
}
@Override
public LeafFieldData load(LeafReaderContext context) {
return wrap(fieldData.load(context));
}
@Override
public LeafFieldData loadDirect(LeafReaderContext context) throws Exception {
return wrap(fieldData.loadDirect(context));
}
@Override
public SortField sortField(Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) {
XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested);
return new SortField(getFieldName(), source, reverse);
}
@Override
public BucketedSort newBucketedSort(
BigArrays bigArrays,
Object missingValue,
MultiValueMode sortMode,
Nested nested,
SortOrder sortOrder,
DocValueFormat format,
int bucketSize,
BucketedSort.ExtraData extra
) {
throw new UnsupportedOperationException("can't sort on the [" + CONTENT_TYPE + "] field");
}
};
}
};
}
}
private static LeafFieldData wrap(LeafFieldData in) {
return new LeafFieldData() {
@Override
public void close() {
in.close();
}
@Override
public long ramBytesUsed() {
return in.ramBytesUsed();
}
@Override
public DocValuesScriptFieldFactory getScriptFieldFactory(String name) {
return new DelegateDocValuesField(new ScriptDocValues.Strings(new ScriptDocValues.StringsSupplier(getBytesValues())), name);
}
@Override
public SortedBinaryDocValues getBytesValues() {
SortedBinaryDocValues inValues = in.getBytesValues();
return new SortedBinaryDocValues() {
@Override
public BytesRef nextValue() throws IOException {
BytesRef encoded = inValues.nextValue();
return new BytesRef(
Uid.decodeId(Arrays.copyOfRange(encoded.bytes, encoded.offset, encoded.offset + encoded.length))
);
}
@Override
public int docValueCount() {
final int count = inValues.docValueCount();
// If the count is not 1 then the impl is not correct as the binary representation
// does not preserve order. But id fields only have one value per doc so we are good.
assert count == 1;
return inValues.docValueCount();
}
@Override
public boolean advanceExact(int doc) throws IOException {
return inValues.advanceExact(doc);
}
};
}
};
}
public ProvidedIdFieldMapper(BooleanSupplier fieldDataEnabled) {
super(new IdFieldType(fieldDataEnabled));
}
@Override
public void preParse(DocumentParserContext context) {
if (context.sourceToParse().id() == null) {
throw new IllegalStateException("_id should have been set on the coordinating node");
}
context.id(context.sourceToParse().id());
context.doc().add(standardIdField(context.id()));
}
@Override
public String documentDescription(DocumentParserContext context) {
return "document with id '" + context.sourceToParse().id() + "'";
}
@Override
public String documentDescription(ParsedDocument parsedDocument) {
return "[" + parsedDocument.id() + "]";
}
@Override
public String reindexId(String id) {
return id;
}
}
| IdFieldType |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/ImportAwareTests.java | {
"start": 7362,
"end": 7525
} | class ____ {
@Bean
public String otherImportedConfigBean() {
return "";
}
}
@Configuration
@Import(ImportedConfigLite.class)
static | OtherImportedConfig |
java | google__error-prone | check_api/src/main/java/com/google/errorprone/matchers/Matchers.java | {
"start": 50168,
"end": 51508
} | class ____ for C*\/); // will not match }
*/
public static Matcher<ClassTree> isDirectImplementationOf(String clazz) {
Matcher<Tree> isProvidedType = isSameType(clazz);
return new IsDirectImplementationOf(isProvidedType);
}
/** Matches any node that is a direct extension of the given class. */
public static Matcher<ClassTree> isExtensionOf(String clazz) {
Matcher<Tree> isProvidedType = isSameType(clazz);
return new IsExtensionOf(isProvidedType);
}
@SafeVarargs
public static Matcher<Tree> hasAnyAnnotation(Class<? extends Annotation>... annotations) {
ArrayList<Matcher<Tree>> matchers = new ArrayList<>(annotations.length);
for (Class<? extends Annotation> annotation : annotations) {
matchers.add(hasAnnotation(annotation));
}
return anyOf(matchers);
}
public static Matcher<Tree> hasAnyAnnotation(List<? extends TypeMirror> mirrors) {
ArrayList<Matcher<Tree>> matchers = new ArrayList<>(mirrors.size());
for (TypeMirror mirror : mirrors) {
matchers.add(hasAnnotation(mirror));
}
return anyOf(matchers);
}
public static boolean methodCallInDeclarationOfThrowingRunnable(VisitorState state) {
return stream(state.getPath())
// Find the nearest definitional context for this method invocation
// (i.e.: the nearest surrounding | tree |
java | apache__maven | compat/maven-plugin-api/src/main/java/org/apache/maven/plugin/descriptor/MojoDescriptor.java | {
"start": 1659,
"end": 22257
} | class ____ extends ComponentDescriptor<Mojo> implements Cloneable {
/** The Plexus component type */
public static final String MAVEN_PLUGIN = "maven-plugin";
/** "once-per-session" execution strategy */
public static final String SINGLE_PASS_EXEC_STRATEGY = "once-per-session";
/** "always" execution strategy */
public static final String MULTI_PASS_EXEC_STRATEGY = "always";
private static final String DEFAULT_INSTANTIATION_STRATEGY = "per-lookup";
private static final String DEFAULT_LANGUAGE = "java";
private final ArrayList<Parameter> parameters;
/** By default, the execution strategy is "once-per-session" */
private String executionStrategy = SINGLE_PASS_EXEC_STRATEGY;
/**
* The goal name for the Mojo, that users will reference from the command line to execute the Mojo directly, or
* inside a POM in order to provide Mojo-specific configuration.
*/
private String goal;
/**
* Defines a default phase to bind a mojo execution to if the user does not explicitly set a phase in the POM.
* <i>Note:</i> This will not automagically make a mojo run when the plugin declaration is added to the POM. It
* merely enables the user to omit the <code><phase></code> element from the surrounding
* <code><execution></code> element.
*/
private String phase;
/** Specify the version when the Mojo was added to the API. Similar to Javadoc since. */
private String since;
/** Reference the invocation phase of the Mojo. */
private String executePhase;
/** Reference the invocation goal of the Mojo. */
private String executeGoal;
/** Reference the invocation lifecycle of the Mojo. */
private String executeLifecycle;
/**
* Description with reason of Mojo deprecation. Similar to Javadoc {@code @deprecated}.
* This will trigger a warning when a user tries to use a Mojo marked as deprecated.
*/
private String deprecated;
/**
* Flags this Mojo to run it in a multi-module way, i.e. aggregate the build with the set of projects listed as
* modules. By default, no need to aggregate the Maven project and its child modules
*/
private boolean aggregator = false;
// ----------------------------------------------------------------------
//
// ----------------------------------------------------------------------
/** Specify the required dependencies in a specified scope */
private String dependencyResolutionRequired = null;
/**
* The scope of (transitive) dependencies that should be collected but not resolved.
* @since 3.0-alpha-3
*/
private String dependencyCollectionRequired;
/** By default, the Mojo needs a Maven project to be executed */
private boolean projectRequired = true;
/** By default, the Mojo is assumed to work offline as well */
private boolean onlineRequired = false;
/** Plugin configuration */
private PlexusConfiguration mojoConfiguration;
/** Plugin descriptor */
private PluginDescriptor pluginDescriptor;
/** By default, the Mojo is inherited */
private boolean inheritedByDefault = true;
/** By default, the Mojo cannot be invoked directly */
private boolean directInvocationOnly = false;
/** By default, the Mojo don't need reports to run */
private boolean requiresReports = false;
/**
* By default, mojos are not threadsafe
* @since 3.0-beta-2
*/
private boolean threadSafe = false;
private boolean v4Api = false;
/**
* Default constructor.
*/
public MojoDescriptor() {
this.parameters = new ArrayList<>();
setInstantiationStrategy(DEFAULT_INSTANTIATION_STRATEGY);
setComponentFactory(DEFAULT_LANGUAGE);
}
public MojoDescriptor(PluginDescriptor pd, org.apache.maven.api.plugin.descriptor.MojoDescriptor md) {
this();
this.setPluginDescriptor(pd);
this.setGoal(md.getGoal());
this.setExecuteGoal(md.getExecuteGoal());
this.setExecuteLifecycle(md.getExecuteLifecycle());
this.setExecutePhase(md.getExecutePhase());
this.setDeprecated(md.getDeprecated());
this.setLanguage(md.getLanguage());
this.setAggregator(md.isAggregator());
this.setDependencyCollectionRequired(md.getDependencyCollection());
this.setDependencyResolutionRequired(md.getDependencyResolution());
this.setComponentConfigurator(md.getConfigurator());
this.setInheritedByDefault(md.isInheritedByDefault());
this.setPhase(md.getPhase());
this.setOnlineRequired(md.isOnlineRequired());
this.setProjectRequired(md.isProjectRequired());
this.setSince(md.getSince());
this.setThreadSafe(true);
this.setImplementation(md.getImplementation());
try {
this.setParameters(md.getParameters().stream().map(Parameter::new).collect(Collectors.toList()));
} catch (DuplicateParameterException e) {
throw new IllegalArgumentException(e);
}
this.mojoDescriptorV4 = md;
this.v4Api = true;
}
// ----------------------------------------------------------------------
//
// ----------------------------------------------------------------------
/**
* @return the language of this Mojo, i.e. <code>java</code>
*/
public String getLanguage() {
return getComponentFactory();
}
/**
* @param language the new language
*/
public void setLanguage(String language) {
setComponentFactory(language);
}
/**
* @return Description with reason of a Mojo deprecation.
*/
public String getDeprecated() {
return deprecated;
}
/**
* @param deprecated Description with reason of a Mojo deprecation.
*/
public void setDeprecated(String deprecated) {
this.deprecated = deprecated;
}
/**
* @return the list of parameters copy. Any change to returned list is NOT reflected on this instance. To add
* parameters, use {@link #addParameter(Parameter)} method.
*/
public List<Parameter> getParameters() {
return new ArrayList<>(parameters);
}
/**
* @param parameters the new list of parameters
* @throws DuplicateParameterException if any
*/
public void setParameters(List<Parameter> parameters) throws DuplicateParameterException {
this.parameters.clear();
for (Parameter parameter : parameters) {
addParameter(parameter);
}
}
/**
* @param parameter add a new parameter
* @throws DuplicateParameterException if any
*/
public void addParameter(Parameter parameter) throws DuplicateParameterException {
if (parameters.contains(parameter)) {
throw new DuplicateParameterException(parameter.getName()
+ " has been declared multiple times in mojo with goal: " + getGoal() + " (implementation: "
+ getImplementation() + ")");
}
parameters.add(parameter);
}
/**
* @return the list parameters as a Map (keyed by {@link Parameter#getName()}) that is built from
* {@link #parameters} list on each call. In other words, the map returned is built on fly and is a copy.
* Any change to this map is NOT reflected on list and other way around!
*/
public Map<String, Parameter> getParameterMap() {
LinkedHashMap<String, Parameter> parameterMap = new LinkedHashMap<>();
for (Parameter pd : parameters) {
parameterMap.put(pd.getName(), pd);
}
return parameterMap;
}
// ----------------------------------------------------------------------
// Dependency requirement
// ----------------------------------------------------------------------
/**
* @param requiresDependencyResolution the new required dependencies in a specified scope
*/
public void setDependencyResolutionRequired(String requiresDependencyResolution) {
this.dependencyResolutionRequired = requiresDependencyResolution;
}
public String getDependencyResolutionRequired() {
return dependencyResolutionRequired;
}
/**
* @return the required dependencies in a specified scope
* TODO the name is not intelligible
*/
@Deprecated
public String isDependencyResolutionRequired() {
return dependencyResolutionRequired;
}
/**
* @since 3.0-alpha-3
*/
public void setDependencyCollectionRequired(String requiresDependencyCollection) {
this.dependencyCollectionRequired = requiresDependencyCollection;
}
/**
* Gets the scope of (transitive) dependencies that should be collected. Dependency collection refers to the process
* of calculating the complete dependency tree in terms of artifact coordinates. In contrast to dependency
* resolution, this does not include the download of the files for the dependency artifacts. It is meant for mojos
* that only want to analyze the set of transitive dependencies, in particular during early lifecycle phases where
* full dependency resolution might fail due to projects which haven't been built yet.
*
* @return The scope of (transitive) dependencies that should be collected or {@code null} if none.
* @since 3.0-alpha-3
*/
public String getDependencyCollectionRequired() {
return dependencyCollectionRequired;
}
// ----------------------------------------------------------------------
// Project requirement
// ----------------------------------------------------------------------
/**
* @param requiresProject <code>true</code> if the Mojo needs a Maven project to be executed, <code>false</code>
* otherwise.
*/
public void setProjectRequired(boolean requiresProject) {
this.projectRequired = requiresProject;
}
/**
* @return <code>true</code> if the Mojo needs a Maven project to be executed, <code>false</code> otherwise.
*/
public boolean isProjectRequired() {
return projectRequired;
}
// ----------------------------------------------------------------------
// Online vs. Offline requirement
// ----------------------------------------------------------------------
/**
* @param requiresOnline <code>true</code> if the Mojo is online, <code>false</code> otherwise.
*/
public void setOnlineRequired(boolean requiresOnline) {
this.onlineRequired = requiresOnline;
}
/**
* @return <code>true</code> if the Mojo is online, <code>false</code> otherwise.
*/
// blech! this isn't even intelligible as a method name. provided for
// consistency...
public boolean isOnlineRequired() {
return onlineRequired;
}
/**
* @return <code>true</code> if the Mojo is online, <code>false</code> otherwise.
*/
// more english-friendly method...keep the code clean! :)
public boolean requiresOnline() {
return onlineRequired;
}
/**
* @return the bound phase name of the Mojo
*/
public String getPhase() {
return phase;
}
/**
* @param phase the new bound phase name of the Mojo
*/
public void setPhase(String phase) {
this.phase = phase;
}
/**
* @return the version when the Mojo was added to the API
*/
public String getSince() {
return since;
}
/**
* @param since the new version when the Mojo was added to the API
*/
public void setSince(String since) {
this.since = since;
}
/**
* @return The goal name of the Mojo
*/
public String getGoal() {
return goal;
}
/**
* @param goal The new goal name of the Mojo
*/
public void setGoal(String goal) {
this.goal = goal;
}
/**
* @return the invocation phase of the Mojo
*/
public String getExecutePhase() {
return executePhase;
}
/**
* @param executePhase the new invocation phase of the Mojo
*/
public void setExecutePhase(String executePhase) {
this.executePhase = executePhase;
}
/**
* @return <code>true</code> if the Mojo uses <code>always</code> for the <code>executionStrategy</code>
*/
public boolean alwaysExecute() {
return MULTI_PASS_EXEC_STRATEGY.equals(executionStrategy);
}
/**
* @return the execution strategy
*/
public String getExecutionStrategy() {
return executionStrategy;
}
/**
* @param executionStrategy the new execution strategy
*/
public void setExecutionStrategy(String executionStrategy) {
this.executionStrategy = executionStrategy;
}
/**
* @return the mojo configuration
*/
public PlexusConfiguration getMojoConfiguration() {
if (mojoConfiguration == null) {
mojoConfiguration = new XmlPlexusConfiguration("configuration");
}
return mojoConfiguration;
}
/**
* @param mojoConfiguration a new mojo configuration
*/
public void setMojoConfiguration(PlexusConfiguration mojoConfiguration) {
this.mojoConfiguration = mojoConfiguration;
}
/** {@inheritDoc} */
@Override
public String getRole() {
return isV4Api() ? "org.apache.maven.api.plugin.Mojo" : Mojo.ROLE;
}
/** {@inheritDoc} */
@Override
public String getRoleHint() {
return getId();
}
/**
* @return the id of the mojo, based on the goal name
*/
public String getId() {
return getPluginDescriptor().getId() + ":" + getGoal();
}
/**
* @return the full goal name
* @see PluginDescriptor#getGoalPrefix()
* @see #getGoal()
*/
public String getFullGoalName() {
return getPluginDescriptor().getGoalPrefix() + ":" + getGoal();
}
/** {@inheritDoc} */
@Override
public String getComponentType() {
return MAVEN_PLUGIN;
}
/**
* @return the plugin descriptor
*/
public PluginDescriptor getPluginDescriptor() {
return pluginDescriptor;
}
/**
* @param pluginDescriptor the new plugin descriptor
*/
public void setPluginDescriptor(PluginDescriptor pluginDescriptor) {
this.pluginDescriptor = pluginDescriptor;
}
/**
* @return <code>true</code> if the Mojo is inherited, <code>false</code> otherwise.
*/
public boolean isInheritedByDefault() {
return inheritedByDefault;
}
/**
* @param inheritedByDefault <code>true</code> if the Mojo is inherited, <code>false</code> otherwise.
*/
public void setInheritedByDefault(boolean inheritedByDefault) {
this.inheritedByDefault = inheritedByDefault;
}
/** {@inheritDoc} */
@Override
public boolean equals(Object object) {
if (this == object) {
return true;
}
if (object instanceof MojoDescriptor other) {
return Objects.equals(getPluginDescriptor(), other.getPluginDescriptor())
&& Objects.equals(getGoal(), other.getGoal());
}
return false;
}
/** {@inheritDoc} */
@Override
public int hashCode() {
return Objects.hash(getGoal(), getPluginDescriptor());
}
/**
* @return the invocation lifecycle of the Mojo
*/
public String getExecuteLifecycle() {
return executeLifecycle;
}
/**
* @param executeLifecycle the new invocation lifecycle of the Mojo
*/
public void setExecuteLifecycle(String executeLifecycle) {
this.executeLifecycle = executeLifecycle;
}
/**
* @param aggregator <code>true</code> if the Mojo uses the Maven project and its child modules,
* <code>false</code> otherwise.
*/
public void setAggregator(boolean aggregator) {
this.aggregator = aggregator;
}
/**
* @return <code>true</code> if the Mojo uses the Maven project and its child modules,
* <code>false</code> otherwise.
*/
public boolean isAggregator() {
return aggregator;
}
/**
* @return <code>true</code> if the Mojo cannot be invoked directly, <code>false</code> otherwise.
*/
public boolean isDirectInvocationOnly() {
return directInvocationOnly;
}
/**
* @param directInvocationOnly <code>true</code> if the Mojo cannot be invoked directly,
* <code>false</code> otherwise.
*/
public void setDirectInvocationOnly(boolean directInvocationOnly) {
this.directInvocationOnly = directInvocationOnly;
}
/**
* @return <code>true</code> if the Mojo needs reports to run, <code>false</code> otherwise.
*/
public boolean isRequiresReports() {
return requiresReports;
}
/**
* @param requiresReports <code>true</code> if the Mojo needs reports to run, <code>false</code> otherwise.
*/
public void setRequiresReports(boolean requiresReports) {
this.requiresReports = requiresReports;
}
/**
* @param executeGoal the new invocation goal of the Mojo
*/
public void setExecuteGoal(String executeGoal) {
this.executeGoal = executeGoal;
}
/**
* @return the invocation goal of the Mojo
*/
public String getExecuteGoal() {
return executeGoal;
}
/**
* @return True if the <code>Mojo</code> is thread-safe and can be run safely in parallel
* @since 3.0-beta-2
*/
public boolean isThreadSafe() {
return threadSafe;
}
/**
* @param threadSafe indicates that the mojo is thread-safe and can be run safely in parallel
* @since 3.0-beta-2
*/
public void setThreadSafe(boolean threadSafe) {
this.threadSafe = threadSafe;
}
/**
* @return {@code true} if this mojo forks either a goal or the lifecycle, {@code false} otherwise.
*/
public boolean isForking() {
return (getExecuteGoal() != null && !getExecuteGoal().isEmpty())
|| (getExecutePhase() != null && !getExecutePhase().isEmpty());
}
public boolean isV4Api() {
return v4Api;
}
/**
* Creates a shallow copy of this mojo descriptor.
*/
@Override
public MojoDescriptor clone() {
try {
return (MojoDescriptor) super.clone();
} catch (CloneNotSupportedException e) {
throw new UnsupportedOperationException(e);
}
}
private volatile org.apache.maven.api.plugin.descriptor.MojoDescriptor mojoDescriptorV4;
public org.apache.maven.api.plugin.descriptor.MojoDescriptor getMojoDescriptorV4() {
if (mojoDescriptorV4 == null) {
synchronized (this) {
if (mojoDescriptorV4 == null) {
mojoDescriptorV4 = org.apache.maven.api.plugin.descriptor.MojoDescriptor.newBuilder()
.goal(goal)
.description(getDescription())
.implementation(getImplementation())
.language(getLanguage())
.phase(phase)
.executeGoal(executeGoal)
.executeLifecycle(executeLifecycle)
.executePhase(executePhase)
.aggregator(aggregator)
.dependencyResolution(dependencyResolutionRequired)
.dependencyCollection(dependencyCollectionRequired)
.projectRequired(projectRequired)
.onlineRequired(onlineRequired)
.inheritedByDefault(inheritedByDefault)
.since(since)
.deprecated(deprecated)
.configurator(getComponentConfigurator())
.parameters(getParameters().stream()
.filter(p -> p.getRequirement() == null)
.map(Parameter::getParameterV4)
.collect(Collectors.toList()))
.id(getId())
.fullGoalName(getFullGoalName())
.build();
}
}
}
return mojoDescriptorV4;
}
}
| MojoDescriptor |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/EnumUtils.java | {
"start": 21136,
"end": 21683
} | enum ____, may be null.
* @return the new stream, empty of {@code clazz} is null.
* @since 3.18.0
* @see Class#getEnumConstants()
*/
public static <T> Stream<T> stream(final Class<T> clazz) {
return clazz != null ? Streams.of(clazz.getEnumConstants()) : Stream.empty();
}
/**
* This constructor is public to permit tools that require a JavaBean
* instance to operate.
*
* @deprecated TODO Make private in 4.0.
*/
@Deprecated
public EnumUtils() {
// empty
}
}
| values |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/insert/MySqlInsertTest_3.java | {
"start": 1033,
"end": 1881
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "INSERT INTO tbl_name (col1,col2) VALUES(col2*2,15);";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
SQLInsertStatement insertStmt = (SQLInsertStatement) stmt;
assertEquals(2, insertStmt.getValues().getValues().size());
assertEquals(2, insertStmt.getColumns().size());
assertEquals(1, statementList.size());
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
stmt.accept(visitor);
assertEquals("INSERT INTO tbl_name (col1, col2)" +
"\nVALUES (col2 * 2, 15);", SQLUtils.toMySqlString(insertStmt));
}
}
| MySqlInsertTest_3 |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/DataTypeFactory.java | {
"start": 1866,
"end": 1915
} | interface ____ {
/** Returns the | DataTypeFactory |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java | {
"start": 28665,
"end": 29872
} | class ____ extends
ResourceTrackerService {
public CustomedResourceTrackerService(RMContext rmContext,
NodesListManager nodesListManager,
NMLivelinessMonitor nmLivelinessMonitor,
RMContainerTokenSecretManager containerTokenSecretManager,
NMTokenSecretManagerInRM nmTokenSecretManager) {
super(rmContext, nodesListManager, nmLivelinessMonitor,
containerTokenSecretManager, nmTokenSecretManager);
}
@Override
public RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException,
IOException {
resetStartFailoverFlag(true);
// make sure failover has been triggered
assertTrue(waittingForFailOver());
return super.registerNodeManager(request);
}
@Override
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException {
resetStartFailoverFlag(true);
// make sure failover has been triggered
assertTrue(waittingForFailOver());
return super.nodeHeartbeat(request);
}
}
private | CustomedResourceTrackerService |
java | google__auto | value/src/test/java/com/google/auto/value/processor/AutoAnnotationCompilationTest.java | {
"start": 19825,
"end": 20464
} | interface ____ {}",
" @AutoAnnotation static Empty newEmpty() {}",
" @NotAutoAnnotation Empty notNewEmpty() {}",
"}");
Compilation compilation =
javac().withProcessors(new AutoAnnotationProcessor()).compile(erroneousJavaFileObject);
assertThat(compilation)
.hadErrorContaining("NotAutoAnnotation")
.inFile(erroneousJavaFileObject)
.onLineContaining("@NotAutoAnnotation");
assertThat(
compilation.errors().stream()
.map(diag -> diag.getMessage(null))
.filter(m -> m.contains("static")))
.isEmpty();
}
}
| Empty |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/discovery/SeedHostsProvider.java | {
"start": 679,
"end": 1144
} | interface ____ {
/**
* Returns a list of seed hosts to use for discovery. Called repeatedly while discovery is active (i.e. while there is no master)
* so that this list may be dynamic.
*/
List<TransportAddress> getSeedAddresses(HostsResolver hostsResolver);
/**
* Helper object that allows to resolve a list of hosts to a list of transport addresses.
* Each host is resolved into a transport address
*/
| SeedHostsProvider |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/results/internal/dynamic/DynamicFetchBuilderContainer.java | {
"start": 290,
"end": 988
} | interface ____ {
/**
* Locate an explicit fetch definition for the named fetchable
*/
FetchBuilder findFetchBuilder(Fetchable fetchable);
/**
* Add a property mapped to a single column.
*/
DynamicFetchBuilderContainer addProperty(Fetchable fetchable, String columnAlias);
/**
* Add a property mapped to multiple columns
*/
DynamicFetchBuilderContainer addProperty(Fetchable fetchable, String... columnAliases);
/**
* Add a property whose columns can later be defined using {@link DynamicFetchBuilder#addColumnAlias}
*/
DynamicFetchBuilder addProperty(Fetchable fetchable);
void addFetchBuilder(Fetchable fetchable, FetchBuilder fetchBuilder);
}
| DynamicFetchBuilderContainer |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStepTests.java | {
"start": 1865,
"end": 17580
} | class ____ extends AbstractStepTestCase<WaitForSnapshotStep> {
private final ClusterAdminClient clusterAdminClient = mock(ClusterAdminClient.class);
@Before
public void setupClusterClient() {
Mockito.when(adminClient.cluster()).thenReturn(clusterAdminClient);
}
@Override
protected WaitForSnapshotStep createRandomInstance() {
return new WaitForSnapshotStep(randomStepKey(), randomStepKey(), client, randomAlphaOfLengthBetween(1, 10));
}
@Override
protected WaitForSnapshotStep mutateInstance(WaitForSnapshotStep instance) {
Step.StepKey key = instance.getKey();
Step.StepKey nextKey = instance.getNextStepKey();
String policy = instance.getPolicy();
switch (between(0, 2)) {
case 0 -> key = new Step.StepKey(key.phase(), key.action(), key.name() + randomAlphaOfLength(5));
case 1 -> nextKey = new Step.StepKey(nextKey.phase(), nextKey.action(), nextKey.name() + randomAlphaOfLength(5));
case 2 -> policy = randomValueOtherThan(policy, () -> randomAlphaOfLengthBetween(1, 10));
default -> throw new AssertionError("Illegal randomisation branch");
}
return new WaitForSnapshotStep(key, nextKey, client, policy);
}
@Override
protected WaitForSnapshotStep copyInstance(WaitForSnapshotStep instance) {
return new WaitForSnapshotStep(instance.getKey(), instance.getNextStepKey(), client, instance.getPolicy());
}
public void testNoSlmPolicies() {
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10))
.putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, Map.of("action_time", Long.toString(randomLong())))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
WaitForSnapshotStep instance = createRandomInstance();
SetOnce<Exception> error = new SetOnce<>();
final var state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
instance.evaluateCondition(state, indexMetadata, new AsyncWaitStep.Listener() {
@Override
public void onResponse(boolean conditionMet, ToXContentObject info) {
logger.warn("expected an error got unexpected response {}", conditionMet);
throw new AssertionError("unexpected method call");
}
@Override
public void onFailure(Exception e) {
error.set(e);
}
}, MASTER_TIMEOUT);
assertThat(error.get().getMessage(), containsString("'" + instance.getPolicy() + "' not found"));
}
public void testSlmPolicyNotExecuted() {
WaitForSnapshotStep instance = createRandomInstance();
SnapshotLifecyclePolicyMetadata slmPolicy = SnapshotLifecyclePolicyMetadata.builder()
.setModifiedDate(randomLong())
.setPolicy(new SnapshotLifecyclePolicy("", "", "", "", null, null))
.build();
SnapshotLifecycleMetadata smlMetadata = new SnapshotLifecycleMetadata(
Map.of(instance.getPolicy(), slmPolicy),
OperationMode.RUNNING,
null
);
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10))
.putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, Map.of("action_time", Long.toString(randomLong())))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
SetOnce<Boolean> isConditionMet = new SetOnce<>();
SetOnce<ToXContentObject> informationContext = new SetOnce<>();
final var state = projectStateFromProject(
ProjectMetadata.builder(randomProjectIdOrDefault())
.put(indexMetadata, true)
.putCustom(SnapshotLifecycleMetadata.TYPE, smlMetadata)
);
instance.evaluateCondition(state, indexMetadata, new AsyncWaitStep.Listener() {
@Override
public void onResponse(boolean conditionMet, ToXContentObject info) {
isConditionMet.set(conditionMet);
informationContext.set(info);
}
@Override
public void onFailure(Exception e) {
logger.warn("unexpected onFailure call", e);
throw new AssertionError("unexpected method call");
}
}, MASTER_TIMEOUT);
assertThat(isConditionMet.get(), is(false));
assertTrue(toString(informationContext.get()).contains("to be executed"));
}
public void testSlmPolicyExecutedBeforeStep() throws IOException {
// The snapshot was started and finished before the phase time, so we do not expect the step to finish:
assertSlmPolicyExecuted(false, false);
}
public void testSlmPolicyExecutedAfterStep() {
final var projectId = randomProjectIdOrDefault();
String repoName = randomAlphaOfLength(10);
String snapshotName = randomAlphaOfLength(10);
String indexName = randomAlphaOfLength(10);
// The snapshot was started and finished after the phase time, so we do expect the step to finish:
GetSnapshotsResponse response = new GetSnapshotsResponse(
List.of(
new SnapshotInfo(
new Snapshot(projectId, randomAlphaOfLength(10), new SnapshotId(snapshotName, randomAlphaOfLength(10))),
List.of(indexName),
List.of(),
List.of(),
SnapshotState.SUCCESS
)
),
null,
0,
0
);
Mockito.doAnswer(invocationOnMock -> {
GetSnapshotsRequest request = (GetSnapshotsRequest) invocationOnMock.getArguments()[0];
assertGetSnapshotRequest(repoName, snapshotName, request);
@SuppressWarnings("unchecked")
ActionListener<GetSnapshotsResponse> listener = (ActionListener<GetSnapshotsResponse>) invocationOnMock.getArguments()[1];
listener.onResponse(response);
return null;
}).when(clusterAdminClient).getSnapshots(any(), any());
assertSlmPolicyExecuted(projectId, repoName, snapshotName, indexName, true, true);
}
public void testSlmPolicyNotExecutedWhenStartIsBeforePhaseTime() throws IOException {
// The snapshot was started before the phase time and finished after, so we do expect the step to finish:
assertSlmPolicyExecuted(false, true);
}
public void testIndexNotBackedUpYet() {
final var projectId = randomProjectIdOrDefault();
String repoName = randomAlphaOfLength(10);
String snapshotName = randomAlphaOfLength(10);
String indexName = randomAlphaOfLength(10);
// The latest snapshot does not contain the index we are interested in
GetSnapshotsResponse response = new GetSnapshotsResponse(
List.of(
new SnapshotInfo(
new Snapshot(projectId, randomAlphaOfLength(10), new SnapshotId(snapshotName, randomAlphaOfLength(10))),
List.of(),
List.of(),
List.of(),
SnapshotState.SUCCESS
)
),
null,
0,
0
);
Mockito.doAnswer(invocationOnMock -> {
GetSnapshotsRequest request = (GetSnapshotsRequest) invocationOnMock.getArguments()[0];
assertGetSnapshotRequest(repoName, snapshotName, request);
@SuppressWarnings("unchecked")
ActionListener<GetSnapshotsResponse> listener = (ActionListener<GetSnapshotsResponse>) invocationOnMock.getArguments()[1];
listener.onResponse(response);
return null;
}).when(clusterAdminClient).getSnapshots(any(), any());
long phaseTime = randomLongBetween(100, 100000);
long actionTime = phaseTime + randomLongBetween(100, 100000);
WaitForSnapshotStep instance = createRandomInstance();
SnapshotLifecyclePolicyMetadata slmPolicy = SnapshotLifecyclePolicyMetadata.builder()
.setModifiedDate(randomLong())
.setPolicy(new SnapshotLifecyclePolicy("", "", "", repoName, null, null))
.setLastSuccess(new SnapshotInvocationRecord(snapshotName, actionTime + 10, actionTime + 100, ""))
.build();
SnapshotLifecycleMetadata smlMetadata = new SnapshotLifecycleMetadata(
Map.of(instance.getPolicy(), slmPolicy),
OperationMode.RUNNING,
null
);
IndexMetadata indexMetadata = IndexMetadata.builder(indexName)
.putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, Map.of("action_time", Long.toString(actionTime)))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
SetOnce<Exception> error = new SetOnce<>();
final var state = projectStateFromProject(
ProjectMetadata.builder(projectId).put(indexMetadata, true).putCustom(SnapshotLifecycleMetadata.TYPE, smlMetadata)
);
instance.evaluateCondition(state, indexMetadata, new AsyncWaitStep.Listener() {
@Override
public void onResponse(boolean conditionMet, ToXContentObject info) {
logger.warn("expected an error got unexpected response {}", conditionMet);
throw new AssertionError("unexpected method call");
}
@Override
public void onFailure(Exception e) {
error.set(e);
}
}, MASTER_TIMEOUT);
assertThat(error.get().getMessage(), containsString("does not include index '" + indexName + "'"));
}
private void assertSlmPolicyExecuted(boolean startTimeAfterPhaseTime, boolean finishTimeAfterPhaseTime) {
assertSlmPolicyExecuted(
randomProjectIdOrDefault(),
randomAlphaOfLength(10),
randomAlphaOfLength(10),
randomAlphaOfLength(10),
startTimeAfterPhaseTime,
finishTimeAfterPhaseTime
);
}
private void assertSlmPolicyExecuted(
ProjectId projectId,
String repoName,
String snapshotName,
String indexName,
boolean startTimeAfterPhaseTime,
boolean finishTimeAfterPhaseTime
) {
long phaseTime = randomLong();
WaitForSnapshotStep instance = createRandomInstance();
SnapshotLifecyclePolicyMetadata slmPolicy = SnapshotLifecyclePolicyMetadata.builder()
.setModifiedDate(randomLong())
.setPolicy(new SnapshotLifecyclePolicy("", "", "", repoName, null, null))
.setLastSuccess(
new SnapshotInvocationRecord(
snapshotName,
phaseTime + (startTimeAfterPhaseTime ? 10 : -100),
phaseTime + (finishTimeAfterPhaseTime ? 100 : -10),
""
)
)
.build();
SnapshotLifecycleMetadata smlMetadata = new SnapshotLifecycleMetadata(
Map.of(instance.getPolicy(), slmPolicy),
OperationMode.RUNNING,
null
);
IndexMetadata indexMetadata = IndexMetadata.builder(indexName)
.putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, Map.of("action_time", Long.toString(phaseTime)))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
SetOnce<Boolean> isConditionMet = new SetOnce<>();
SetOnce<ToXContentObject> informationContext = new SetOnce<>();
final var state = projectStateFromProject(
ProjectMetadata.builder(projectId).put(indexMetadata, true).putCustom(SnapshotLifecycleMetadata.TYPE, smlMetadata)
);
instance.evaluateCondition(state, indexMetadata, new AsyncWaitStep.Listener() {
@Override
public void onResponse(boolean conditionMet, ToXContentObject info) {
isConditionMet.set(conditionMet);
informationContext.set(info);
}
@Override
public void onFailure(Exception e) {
logger.warn("unexpected onFailure call", e);
throw new AssertionError("unexpected method call");
}
}, MASTER_TIMEOUT);
if (startTimeAfterPhaseTime) {
assertThat(isConditionMet.get(), is(true));
assertThat(informationContext.get(), is(EmptyInfo.INSTANCE));
} else {
assertThat(isConditionMet.get(), is(false));
assertThat(toString(informationContext.get()), containsString("to be executed"));
}
}
private void assertGetSnapshotRequest(String repoName, String snapshotName, GetSnapshotsRequest request) {
assertThat(request.repositories().length, is(1));
assertThat(request.repositories()[0], equalTo(repoName));
assertThat(request.snapshots().length, is(1));
assertThat(request.snapshots()[0], equalTo(snapshotName));
assertThat(request.includeIndexNames(), is(true));
assertThat(request.verbose(), is(false));
}
public void testNullStartTime() {
long phaseTime = randomLong();
WaitForSnapshotStep instance = createRandomInstance();
SnapshotLifecyclePolicyMetadata slmPolicy = SnapshotLifecyclePolicyMetadata.builder()
.setModifiedDate(randomLong())
.setPolicy(new SnapshotLifecyclePolicy("", "", "", "", null, null))
.setLastSuccess(new SnapshotInvocationRecord("", null, phaseTime + 100, ""))
.build();
SnapshotLifecycleMetadata smlMetadata = new SnapshotLifecycleMetadata(
Map.of(instance.getPolicy(), slmPolicy),
OperationMode.RUNNING,
null
);
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10))
.putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, Map.of("phase_time", Long.toString(phaseTime)))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
SetOnce<Exception> error = new SetOnce<>();
final var state = projectStateFromProject(
ProjectMetadata.builder(randomProjectIdOrDefault())
.put(indexMetadata, true)
.putCustom(SnapshotLifecycleMetadata.TYPE, smlMetadata)
);
instance.evaluateCondition(state, indexMetadata, new AsyncWaitStep.Listener() {
@Override
public void onResponse(boolean conditionMet, ToXContentObject info) {
logger.warn("expected an error got unexpected response {}", conditionMet);
throw new AssertionError("unexpected method call");
}
@Override
public void onFailure(Exception e) {
error.set(e);
}
}, MASTER_TIMEOUT);
assertThat(error.get().getMessage(), containsString("no information about ILM action start"));
}
private String toString(ToXContentObject info) {
return Strings.toString(info);
}
}
| WaitForSnapshotStepTests |
java | apache__camel | components/camel-kafka/src/main/java/org/apache/camel/processor/resume/kafka/SingleNodeKafkaResumeStrategy.java | {
"start": 2649,
"end": 17038
} | class ____ implements KafkaResumeStrategy, CamelContextAware {
private static final Logger LOG = LoggerFactory.getLogger(SingleNodeKafkaResumeStrategy.class);
private Consumer<byte[], byte[]> consumer;
private Producer<byte[], byte[]> producer;
private Duration pollDuration = Duration.ofSeconds(1);
private boolean subscribed;
private ResumeAdapter adapter;
private KafkaResumeStrategyConfiguration resumeStrategyConfiguration;
private ExecutorService executorService;
private final ReentrantLock writeLock = new ReentrantLock();
private CountDownLatch initLatch;
private CamelContext camelContext;
public SingleNodeKafkaResumeStrategy() {
}
/**
* Builds an instance of this class
*
* @param resumeStrategyConfiguration the configuration to use for this strategy instance
*/
public SingleNodeKafkaResumeStrategy(KafkaResumeStrategyConfiguration resumeStrategyConfiguration) {
this.resumeStrategyConfiguration = resumeStrategyConfiguration;
}
/**
* Builds an instance of this class
*
* @param resumeStrategyConfiguration the configuration to use for this strategy instance
*/
public SingleNodeKafkaResumeStrategy(KafkaResumeStrategyConfiguration resumeStrategyConfiguration,
ExecutorService executorService) {
this.resumeStrategyConfiguration = resumeStrategyConfiguration;
this.executorService = executorService;
}
/**
* Sends data to a topic. The records will always be sent asynchronously. If there's an error, a producer error
* counter will be increased.
*
* @param message the message to send
*
*/
protected void produce(byte[] key, byte[] message, UpdateCallBack updateCallBack) {
ProducerRecord<byte[], byte[]> producerRecord
= new ProducerRecord<>(resumeStrategyConfiguration.getTopic(), key, message);
producer.send(producerRecord, (recordMetadata, e) -> {
if (e != null) {
LOG.error("Failed to send message {}", e.getMessage(), e);
}
if (updateCallBack != null) {
updateCallBack.onUpdate(e);
}
});
}
protected void doAdd(OffsetKey<?> key, Offset<?> offsetValue) {
if (adapter instanceof Cacheable cacheable) {
cacheable.add(key, offsetValue);
}
}
@Override
public <T extends Resumable> void updateLastOffset(T offset) throws Exception {
updateLastOffset(offset, null);
}
@Override
public <T extends Resumable> void updateLastOffset(T offset, UpdateCallBack updateCallBack) throws Exception {
OffsetKey<?> key = offset.getOffsetKey();
Offset<?> offsetValue = offset.getLastOffset();
if (LOG.isDebugEnabled()) {
LOG.debug("Updating offset on Kafka with key {} to {}", key.getValue(), offsetValue.getValue());
}
updateLastOffset(key, offsetValue);
}
@Override
public void updateLastOffset(OffsetKey<?> offsetKey, Offset<?> offset) throws Exception {
updateLastOffset(offsetKey, offset, null);
}
@Override
public void updateLastOffset(OffsetKey<?> offsetKey, Offset<?> offset, UpdateCallBack updateCallBack) throws Exception {
ByteBuffer keyBuffer = offsetKey.serialize();
ByteBuffer valueBuffer = offset.serialize();
try {
writeLock.lock();
produce(keyBuffer.array(), valueBuffer.array(), updateCallBack);
} finally {
writeLock.unlock();
}
doAdd(offsetKey, offset);
}
/**
* Loads the existing data into the cache
*/
@Override
public void loadCache() {
if (!(adapter instanceof Deserializable)) {
throw new RuntimeCamelException("Cannot load data for an adapter that is not deserializable");
}
initLatch = new CountDownLatch(resumeStrategyConfiguration.getMaxInitializationRetries());
if (executorService == null) {
executorService
= camelContext.getExecutorServiceManager().newSingleThreadExecutor(this, "SingleNodeKafkaResumeStrategy");
}
executorService.submit(() -> refresh(initLatch));
}
private void waitForInitialization() {
try {
LOG.trace("Waiting for kafka resume strategy async initialization");
if (!initLatch.await(resumeStrategyConfiguration.getMaxInitializationDuration().toMillis(),
TimeUnit.MILLISECONDS)) {
LOG.debug("The initialization timed out");
}
LOG.trace("Kafka resume strategy initialization complete");
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
/**
* Launch a thread to refresh the offsets periodically
*/
private void refresh(CountDownLatch latch) {
LOG.trace("Creating a offset cache refresher");
try {
consumer = createConsumer();
subscribe(consumer);
LOG.debug("Loading records from topic {}", resumeStrategyConfiguration.getTopic());
consumer.subscribe(Collections.singletonList(resumeStrategyConfiguration.getTopic()));
poll(consumer, latch);
} catch (WakeupException e) {
LOG.info("Kafka consumer was interrupted during a blocking call");
} catch (Exception e) {
LOG.error("Error while refreshing the local cache: {}", e.getMessage(), e);
} finally {
if (consumer != null) {
consumer.unsubscribe();
try {
consumer.close(Duration.ofSeconds(5));
} catch (Exception e) {
LOG.warn("Error closing the consumer: {} (this error will be ignored)", e.getMessage(), e);
}
}
}
}
protected void poll(Consumer<byte[], byte[]> consumer, CountDownLatch latch) {
Deserializable deserializable = (Deserializable) adapter;
boolean initialized = false;
do {
ConsumerRecords<byte[], byte[]> records = consume(consumer);
for (ConsumerRecord<byte[], byte[]> consumerRecord : records) {
byte[] value = consumerRecord.value();
if (LOG.isTraceEnabled()) {
LOG.trace("Read from Kafka at {} ({}): {}", Instant.ofEpochMilli(consumerRecord.timestamp()),
consumerRecord.timestampType(), value);
}
if (!deserializable.deserialize(ByteBuffer.wrap(consumerRecord.key()),
ByteBuffer.wrap(consumerRecord.value()))) {
LOG.warn("Deserializer indicates that this is the last record to deserialize");
}
}
if (!initialized) {
if (latch.getCount() == 1) {
initialized = true;
}
latch.countDown();
}
} while (true);
}
/**
* Subscribe to the topic if not subscribed yet
*
* @param topic the topic to consume the messages from
*/
protected void checkAndSubscribe(Consumer<byte[], byte[]> consumer, String topic) {
if (!subscribed) {
consumer.subscribe(Collections.singletonList(topic));
subscribed = true;
}
}
/**
* Subscribe to the topic if not subscribed yet
*
* @param topic the topic to consume the messages from
* @param remaining the number of messages to rewind from the last offset position (used to fill the cache)
*/
public void checkAndSubscribe(Consumer<byte[], byte[]> consumer, String topic, long remaining) {
if (!subscribed) {
consumer.subscribe(Collections.singletonList(topic), getConsumerRebalanceListener(consumer, remaining));
subscribed = true;
}
}
private ConsumerRebalanceListener getConsumerRebalanceListener(Consumer<byte[], byte[]> consumer, long remaining) {
return new ConsumerRebalanceListener() {
@Override
public void onPartitionsRevoked(Collection<TopicPartition> collection) {
// NO-OP
}
@Override
public void onPartitionsAssigned(Collection<TopicPartition> assignments) {
for (TopicPartition assignment : assignments) {
final long endPosition = consumer.position(assignment);
final long startPosition = endPosition - remaining;
if (startPosition >= 0) {
consumer.seek(assignment, startPosition);
} else {
LOG.info(
"Ignoring the seek command because the initial offset is negative (the topic is likely empty)");
}
}
}
};
}
/**
* Consumes message from the topic previously setup
*
* @return An instance of the consumer records
*/
protected ConsumerRecords<byte[], byte[]> consume(Consumer<byte[], byte[]> consumer) {
ConsumerRecords<byte[], byte[]> records = consumer.poll(pollDuration);
if (!records.isEmpty()) {
return records;
}
return ConsumerRecords.empty();
}
/**
* Consumes message from the topic previously setup
*
* @param retries how many times to retry consuming data from the topic
* @param consumer the kafka consumer object instance to use
* @return An instance of the consumer records
*/
protected ConsumerRecords<byte[], byte[]> consume(int retries, Consumer<byte[], byte[]> consumer) {
while (retries > 0) {
ConsumerRecords<byte[], byte[]> records = consumer.poll(pollDuration);
if (!records.isEmpty()) {
return records;
}
retries--;
}
return ConsumerRecords.empty();
}
private void subscribe(Consumer<byte[], byte[]> consumer) {
if (adapter instanceof Cacheable cacheable) {
ResumeCache<?> cache = cacheable.getCache();
if (cache.capacity() >= 1) {
checkAndSubscribe(consumer, resumeStrategyConfiguration.getTopic(), cache.capacity());
} else {
checkAndSubscribe(consumer, resumeStrategyConfiguration.getTopic());
}
} else {
checkAndSubscribe(consumer, resumeStrategyConfiguration.getTopic());
}
}
@Override
public ResumeAdapter getAdapter() {
if (adapter == null) {
waitForInitialization();
}
return adapter;
}
@Override
public void setAdapter(ResumeAdapter adapter) {
this.adapter = adapter;
}
@Override
public void build() {
// NO-OP
}
@Override
public void init() {
LOG.debug("Initializing the Kafka resume strategy");
}
private void createProducer() {
if (producer == null) {
producer = new KafkaProducer<>(resumeStrategyConfiguration.getProducerProperties());
}
}
private Consumer<byte[], byte[]> createConsumer() {
return new KafkaConsumer<>(resumeStrategyConfiguration.getConsumerProperties());
}
@Override
public void stop() {
try {
LOG.trace("Trying to obtain a lock for closing the producer");
if (!writeLock.tryLock(1, TimeUnit.SECONDS)) {
LOG.warn("Failed to obtain a lock for closing the producer. Force closing the producer ...");
}
LOG.info("Closing the Kafka producer");
IOHelper.close(producer, "Kafka producer", LOG);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (Exception e) {
LOG.warn("Error closing the Kafka producer: {} (this error will be ignored)", e.getMessage(), e);
} finally {
writeLock.unlock();
}
try {
LOG.info("Closing the Kafka consumer");
consumer.wakeup();
if (executorService != null) {
executorService.shutdown();
if (!executorService.awaitTermination(2, TimeUnit.SECONDS)) {
LOG.warn("Kafka consumer did not shutdown within 2 seconds");
executorService.shutdownNow();
}
} else {
// This may happen if the start up has failed in some other part
LOG.trace("There's no executor service to shutdown");
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
@Override
public void close() throws IOException {
stop();
}
@Override
public void start() {
LOG.info("Starting the kafka resume strategy");
createProducer();
}
public Duration getPollDuration() {
return pollDuration;
}
public void setPollDuration(Duration pollDuration) {
this.pollDuration = Objects.requireNonNull(pollDuration, "The poll duration cannot be null");
}
protected Producer<byte[], byte[]> getProducer() {
return producer;
}
@Override
public void setResumeStrategyConfiguration(ResumeStrategyConfiguration resumeStrategyConfiguration) {
if (resumeStrategyConfiguration instanceof KafkaResumeStrategyConfiguration kafkaResumeStrategyConfiguration) {
this.resumeStrategyConfiguration = kafkaResumeStrategyConfiguration;
} else {
throw new RuntimeCamelException(
"Invalid resume strategy configuration of type " +
ObjectHelper.className(resumeStrategyConfiguration));
}
}
@Override
public ResumeStrategyConfiguration getResumeStrategyConfiguration() {
return resumeStrategyConfiguration;
}
@Override
public CamelContext getCamelContext() {
return camelContext;
}
@Override
public void setCamelContext(CamelContext camelContext) {
this.camelContext = camelContext;
}
}
| SingleNodeKafkaResumeStrategy |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/scheduling/PipelinedRegionSchedulingITCase.java | {
"start": 9865,
"end": 10963
} | class ____ extends AbstractInvokable {
public Receiver(Environment environment) {
super(environment);
}
@Override
public void invoke() throws Exception {
if (getEnvironment().getAllInputGates().length < 2) {
throw new IllegalStateException();
}
final String[] tmpDirs = getEnvironment().getTaskManagerInfo().getTmpDirectories();
final List<RecordReader<IntValue>> readers =
Arrays.asList(getEnvironment().getAllInputGates()).stream()
.map(
inputGate ->
new RecordReader<>(inputGate, IntValue.class, tmpDirs))
.collect(Collectors.toList());
for (RecordReader<IntValue> reader : readers) {
while (reader.hasNext()) {
reader.next();
}
}
}
}
/** Invokable which fails exactly once with a {@link PartitionException}. */
public static | Receiver |
java | mybatis__mybatis-3 | src/main/java/org/apache/ibatis/reflection/Jdk.java | {
"start": 800,
"end": 2107
} | class ____ {
/**
* <code>true</code> if <code>java.lang.reflect.Parameter</code> is available.
*
* @deprecated Since 3.5.0, Will remove this field at feature(next major version up)
*/
@Deprecated
public static final boolean parameterExists;
static {
boolean available = false;
try {
Resources.classForName("java.lang.reflect.Parameter");
available = true;
} catch (ClassNotFoundException e) {
// ignore
}
parameterExists = available;
}
/**
* @deprecated Since 3.5.0, Will remove this field at feature(next major version up)
*/
@Deprecated
public static final boolean dateAndTimeApiExists;
static {
boolean available = false;
try {
Resources.classForName("java.time.Clock");
available = true;
} catch (ClassNotFoundException e) {
// ignore
}
dateAndTimeApiExists = available;
}
/**
* @deprecated Since 3.5.0, Will remove this field at feature(next major version up)
*/
@Deprecated
public static final boolean optionalExists;
static {
boolean available = false;
try {
Resources.classForName("java.util.Optional");
available = true;
} catch (ClassNotFoundException e) {
// ignore
}
optionalExists = available;
}
private Jdk() {
}
}
| Jdk |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/insertordering/InsertOrderingWithJoinedTableMultiLevelInheritance.java | {
"start": 5016,
"end": 5167
} | class ____ extends Person {
private boolean working;
@ManyToOne
private Office office;
}
@Entity(name = "President")
public static | AnotherPerson |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/number/NumberValueTest.java | {
"start": 187,
"end": 688
} | class ____ extends TestCase {
public void test_0() throws Exception {
String text = "{\"value\":3D}";
JSONObject obj = (JSONObject) JSON.parse(text);
Assert.assertTrue(3D == ((Double)obj.get("value")).doubleValue());
}
public void test_1() throws Exception {
String text = "{\"value\":3.e3D}";
JSONObject obj = (JSONObject) JSON.parse(text);
Assert.assertTrue(3.e3D == ((Double)obj.get("value")).doubleValue());
}
}
| NumberValueTest |
java | elastic__elasticsearch | x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpRequest.java | {
"start": 21766,
"end": 23979
} | interface ____ {
ParseField SCHEME = new ParseField("scheme");
ParseField HOST = new ParseField("host");
ParseField PORT = new ParseField("port");
ParseField METHOD = new ParseField("method");
ParseField PATH = new ParseField("path");
ParseField PARAMS = new ParseField("params");
ParseField HEADERS = new ParseField("headers");
ParseField AUTH = new ParseField("auth");
ParseField BODY = new ParseField("body");
ParseField CONNECTION_TIMEOUT = new ParseField("connection_timeout_in_millis");
ParseField CONNECTION_TIMEOUT_HUMAN = new ParseField("connection_timeout");
ParseField READ_TIMEOUT = new ParseField("read_timeout_millis");
ParseField READ_TIMEOUT_HUMAN = new ParseField("read_timeout");
ParseField PROXY = new ParseField("proxy");
ParseField URL = new ParseField("url");
}
/**
* Write a request via toXContent, but filter certain parts of it - this is needed to not expose secrets
*
* @param request The HttpRequest object to serialize
* @param xContentType The XContentType from the parent outputstream builder
* @param params The ToXContentParams from the parent write
* @param excludeField The field to exclude
* @return A bytearrayinputstream that contains the serialized request
* @throws IOException if an IOException is triggered in the underlying toXContent method
*/
public static InputStream filterToXContent(HttpRequest request, XContentType xContentType, Params params, String excludeField)
throws IOException {
try (
ByteArrayOutputStream bos = new ByteArrayOutputStream();
XContentBuilder filteredBuilder = new XContentBuilder(
xContentType.xContent(),
bos,
Collections.emptySet(),
Collections.singleton(excludeField),
xContentType.toParsedMediaType()
)
) {
request.toXContent(filteredBuilder, params);
filteredBuilder.flush();
return new ByteArrayInputStream(bos.toByteArray());
}
}
}
| Field |
java | alibaba__nacos | plugin/control/src/main/java/com/alibaba/nacos/plugin/control/tps/barrier/RateCounter.java | {
"start": 858,
"end": 3115
} | class ____ {
/**
* rate count name.
*/
private String name;
/**
* rate period.
*/
private TimeUnit period;
public RateCounter(String name, TimeUnit period) {
this.name = name;
this.period = period;
}
public TimeUnit getPeriod() {
return period;
}
/**
* add count for the second of timestamp.
*
* @param timestamp timestamp.
* @param count count.
* @return
*/
public abstract long add(long timestamp, long count);
/**
* add intercepted count for the second of timestamp.
*
* @param timestamp timestamp
* @param countDelta count
* @param upperLimit upperLimit
* @return
*/
public abstract boolean tryAdd(long timestamp, long countDelta, long upperLimit);
/**
* get count of the second of timestamp.
*
* @param timestamp timestamp.
* @return
*/
public abstract long getCount(long timestamp);
public String getName() {
return name;
}
/**
* get trim mills of second.
*
* @param timeStamp timestamp milliseconds.
* @return
*/
public static long getTrimMillsOfMinute(long timeStamp) {
String millString = String.valueOf(timeStamp);
String substring = millString.substring(0, millString.length() - 3);
return Long.valueOf(Long.valueOf(substring) / 60 * 60 + "000");
}
/**
* get trim mills of second.
*
* @param timeStamp timestamp milliseconds.
* @return
*/
public static long getTrimMillsOfSecond(long timeStamp) {
String millString = String.valueOf(timeStamp);
String substring = millString.substring(0, millString.length() - 3);
return Long.valueOf(substring + "000");
}
/**
* get trim mills of second.
*
* @param timeStamp timestamp milliseconds.
* @return
*/
public static long getTrimMillsOfHour(long timeStamp) {
String millString = String.valueOf(timeStamp);
String substring = millString.substring(0, millString.length() - 3);
return Long.valueOf(Long.valueOf(substring) / (60 * 60) * (60 * 60) + "000");
}
}
| RateCounter |
java | alibaba__nacos | core/src/main/java/com/alibaba/nacos/core/remote/ClientConnectionEventListenerRegistry.java | {
"start": 1004,
"end": 2944
} | class ____ {
final List<ClientConnectionEventListener> clientConnectionEventListeners = new ArrayList<>();
/**
* notify where a new client connected.
*
* @param connection connection that new created.
*/
public void notifyClientConnected(final Connection connection) {
for (ClientConnectionEventListener clientConnectionEventListener : clientConnectionEventListeners) {
try {
clientConnectionEventListener.clientConnected(connection);
} catch (Throwable throwable) {
Loggers.REMOTE
.info("[NotifyClientConnected] failed for listener {}", clientConnectionEventListener.getName(),
throwable);
}
}
}
/**
* notify where a new client disconnected.
*
* @param connection connection that disconnected.
*/
public void notifyClientDisConnected(final Connection connection) {
for (ClientConnectionEventListener clientConnectionEventListener : clientConnectionEventListeners) {
try {
clientConnectionEventListener.clientDisConnected(connection);
} catch (Throwable throwable) {
Loggers.REMOTE.info("[NotifyClientDisConnected] failed for listener {}",
clientConnectionEventListener.getName(), throwable);
}
}
}
/**
* register ClientConnectionEventListener.
*
* @param listener listener.
*/
public void registerClientConnectionEventListener(ClientConnectionEventListener listener) {
Loggers.REMOTE.info("[ClientConnectionEventListenerRegistry] registry listener - " + listener.getClass()
.getSimpleName());
this.clientConnectionEventListeners.add(listener);
}
}
| ClientConnectionEventListenerRegistry |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1700/Issue1701.java | {
"start": 2171,
"end": 3283
} | class ____ {
@PostMapping(path = "/download", produces = "application/octet-stream;charset=UTF-8")
public @ResponseBody
ResponseEntity<byte[]> download(@RequestBody TestBean testBean) {
byte[] body = new byte[0];
InputStream in;
try {
in = Issue1701.class.getClassLoader().getResourceAsStream(testBean.getName());
body = new byte[in.available()];
in.read(body);
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
HttpHeaders headers = new HttpHeaders();
headers.add("Content-Disposition", "attachment;filename=1.txt");
HttpStatus statusCode = HttpStatus.OK;
ResponseEntity<byte[]> response = new ResponseEntity<byte[]>(body, headers, statusCode);
return response;
}
}
@ComponentScan(basePackages = "com.alibaba.json.bvt.issue_1700")
@Configuration
@EnableWebMvc
public static | BeanController |
java | quarkusio__quarkus | integration-tests/keycloak-authorization/src/main/java/io/quarkus/it/keycloak/AdminClientResource.java | {
"start": 742,
"end": 3756
} | class ____ {
@Inject
Keycloak keycloak;
@GET
@Produces(MediaType.TEXT_PLAIN)
@Path("realm")
public String getRealm() {
return keycloak.realm("quarkus").toRepresentation().getRealm();
}
@GET
@Produces(MediaType.TEXT_PLAIN)
@Path("realms")
public String getRealms() {
return keycloak.realms().findAll().stream().map(r -> r.getRealm()).sorted().collect(Collectors.joining("-"));
}
@GET
@Produces(MediaType.TEXT_PLAIN)
@Path("roles")
public String getRoles() {
return keycloak.realm("quarkus").roles().list().stream().map(r -> r.getName()).sorted()
.collect(Collectors.joining("-"));
}
@GET
@Produces(MediaType.TEXT_PLAIN)
@Path("users")
public String getUsers() {
return keycloak.realm("quarkus").users().list().stream().map(r -> r.getUsername()).sorted()
.collect(Collectors.joining("-"));
}
@GET
@Produces(MediaType.TEXT_PLAIN)
@Path("newrealm")
public String createRealm() {
RealmRepresentation newRealm = createRealm("quarkus2");
newRealm.getClients().add(createClient("quarkus-app2"));
newRealm.getUsers().add(createUser("alice", "user"));
keycloak.realms().create(newRealm);
return keycloak.realm("quarkus2").toRepresentation().getRealm();
}
private static RealmRepresentation createRealm(String name) {
RealmRepresentation realm = new RealmRepresentation();
realm.setRealm(name);
realm.setEnabled(true);
realm.setUsers(new ArrayList<>());
realm.setClients(new ArrayList<>());
RolesRepresentation roles = new RolesRepresentation();
List<RoleRepresentation> realmRoles = new ArrayList<>();
roles.setRealm(realmRoles);
realm.setRoles(roles);
realm.getRoles().getRealm().add(new RoleRepresentation("user", null, false));
return realm;
}
private static ClientRepresentation createClient(String clientId) {
ClientRepresentation client = new ClientRepresentation();
client.setClientId(clientId);
client.setRedirectUris(Arrays.asList("*"));
client.setPublicClient(false);
client.setSecret("secret");
client.setDirectAccessGrantsEnabled(true);
client.setEnabled(true);
return client;
}
private static UserRepresentation createUser(String username, String... realmRoles) {
UserRepresentation user = new UserRepresentation();
user.setUsername(username);
user.setEnabled(true);
user.setCredentials(new ArrayList<>());
user.setRealmRoles(Arrays.asList(realmRoles));
CredentialRepresentation credential = new CredentialRepresentation();
credential.setType(CredentialRepresentation.PASSWORD);
credential.setValue(username);
credential.setTemporary(false);
user.getCredentials().add(credential);
return user;
}
}
| AdminClientResource |
java | quarkusio__quarkus | extensions/smallrye-openapi/deployment/src/test/java/io/quarkus/smallrye/openapi/test/jaxrs/OpenApiResourceSecuredAtClassLevel.java | {
"start": 613,
"end": 1499
} | class ____ {
@SuppressWarnings("unused")
private ResourceBean resourceBean;
@GET
@Path("/test-security/classLevel/1")
@RolesAllowed("user1")
public String secureEndpoint1() {
return "secret";
}
@GET
@Path("/test-security/classLevel/2")
@RolesAllowed("user2")
public String secureEndpoint2() {
return "secret";
}
@GET
@Path("/test-security/classLevel/3")
@SecurityRequirement(name = "MyOwnName")
public String secureEndpoint3() {
return "secret";
}
@APIResponses({
@APIResponse(responseCode = "401", description = "Who are you?"),
@APIResponse(responseCode = "403", description = "You cannot do that.")
})
@GET
@Path("/test-security/classLevel/4")
public String secureEndpoint4() {
return "secret";
}
}
| OpenApiResourceSecuredAtClassLevel |
java | quarkusio__quarkus | extensions/observability-devservices/common/src/main/java/io/quarkus/observability/common/config/GrafanaConfig.java | {
"start": 218,
"end": 620
} | interface ____ extends ContainerConfig {
/**
* The username.
*/
@WithDefault("admin")
String username();
/**
* The password.
*/
@WithDefault("admin")
String password();
/**
* The port of the Grafana container.
*/
OptionalInt grafanaPort();
/**
* The timeout.
*/
@WithDefault("PT3M")
Duration timeout();
}
| GrafanaConfig |
java | quarkusio__quarkus | integration-tests/maven/src/test/resources-filtered/projects/maven-extension-manipulating-pom/app/app/src/main/java/org/acme/Application.java | {
"start": 232,
"end": 559
} | class ____ implements QuarkusApplication {
@Inject
Greeter greeter;
@Override
public int run(String... args) throws Exception {
String msg = greeter.getGreeting();
if(args.length > 0) {
msg += ", " + args[0] + "!";
}
Log.info(msg);
return 0;
}
}
| Application |
java | dropwizard__dropwizard | dropwizard-core/src/test/java/io/dropwizard/core/ApplicationTest.java | {
"start": 774,
"end": 2463
} | class ____<C extends FakeConfiguration> extends Application<C> {
private final Application<C> application;
private WrapperApplication(Application<C> application) {
this.application = application;
}
@Override
public void initialize(Bootstrap<C> bootstrap) {
this.application.initialize(bootstrap);
}
@Override
public void run(C configuration, Environment environment) throws Exception {
this.application.run(configuration, environment);
}
}
@Test
void hasAReferenceToItsTypeParameter() throws Exception {
assertThat(new FakeApplication().getConfigurationClass())
.isSameAs(FakeConfiguration.class);
}
@Test
void canDetermineConfiguration() throws Exception {
assertThat(new PoserApplication().getConfigurationClass())
.isSameAs(FakeConfiguration.class);
}
@Test
void canDetermineWrappedConfiguration() throws Exception {
final PoserApplication application = new PoserApplication();
assertThat(new WrapperApplication<>(application).getConfigurationClass())
.isSameAs(FakeConfiguration.class);
}
@Test
void exitWithFatalErrorWhenCommandFails() throws Exception {
final File configFile = File.createTempFile("dropwizard-invalid-config", ".yml");
try {
final FakeApplication application = new FakeApplication();
application.run("server", configFile.getAbsolutePath());
assertThat(application.fatalError).isTrue();
} finally {
configFile.delete();
}
}
}
| WrapperApplication |
java | spring-projects__spring-boot | test-support/spring-boot-test-support/src/main/java/org/springframework/boot/testsupport/junit/DisabledOnOsCondition.java | {
"start": 1231,
"end": 2598
} | class ____ implements ExecutionCondition {
@Override
public ConditionEvaluationResult evaluateExecutionCondition(ExtensionContext context) {
if (context.getElement().isEmpty()) {
return ConditionEvaluationResult.enabled("No element for @DisabledOnOs found");
}
MergedAnnotation<DisabledOnOs> annotation = MergedAnnotations
.from(context.getElement().get(), SearchStrategy.TYPE_HIERARCHY)
.get(DisabledOnOs.class);
if (!annotation.isPresent()) {
return ConditionEvaluationResult.enabled("No @DisabledOnOs found");
}
return evaluate(annotation.synthesize());
}
private ConditionEvaluationResult evaluate(DisabledOnOs annotation) {
String architecture = System.getProperty("os.arch");
String os = System.getProperty("os.name");
boolean onDisabledOs = Arrays.stream(annotation.os()).anyMatch(OS::isCurrentOs);
boolean onDisabledArchitecture = Arrays.asList(annotation.architecture()).contains(architecture);
if (onDisabledOs && onDisabledArchitecture) {
String reason = annotation.disabledReason().isEmpty()
? String.format("Disabled on OS = %s, architecture = %s", os, architecture)
: annotation.disabledReason();
return ConditionEvaluationResult.disabled(reason);
}
return ConditionEvaluationResult
.enabled(String.format("Enabled on OS = %s, architecture = %s", os, architecture));
}
}
| DisabledOnOsCondition |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/context/TestContextManager.java | {
"start": 8629,
"end": 10047
} | class ____</em> — for example, methods annotated with
* JUnit Jupiter's {@link org.junit.jupiter.api.BeforeAll @BeforeAll}.
* <p>An attempt will be made to give each registered
* {@link TestExecutionListener} a chance to pre-process the test class
* execution. If a listener throws an exception, however, the remaining
* registered listeners will <strong>not</strong> be called.
* @throws Exception if a registered TestExecutionListener throws an
* exception
* @since 3.0
* @see #getTestExecutionListeners()
*/
public void beforeTestClass() throws Exception {
try {
Class<?> testClass = getTestContext().getTestClass();
if (logger.isTraceEnabled()) {
logger.trace("beforeTestClass(): class [" + typeName(testClass) + "]");
}
getTestContext().updateState(null, null, null);
for (TestExecutionListener testExecutionListener : getTestExecutionListeners()) {
try {
testExecutionListener.beforeTestClass(getTestContext());
}
catch (Throwable ex) {
logException(ex, "beforeTestClass", testExecutionListener, testClass);
ReflectionUtils.rethrowException(ex);
}
}
}
finally {
resetMethodInvoker();
}
}
/**
* Hook for preparing a test instance prior to execution of any individual
* test methods — for example, to inject dependencies.
* <p>This method should be called immediately after instantiation of the test
* | methods |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedKeyValueBuffer.java | {
"start": 2859,
"end": 12830
} | class ____<K, V> implements StoreBuilder<TimeOrderedKeyValueBuffer<K, V, V>> {
private final String storeName;
private final Serde<K> keySerde;
private final Serde<V> valueSerde;
private boolean loggingEnabled = true;
private Map<String, String> logConfig = new HashMap<>();
private final Duration grace;
private final String topic;
public Builder(
final String storeName,
final Serde<K> keySerde,
final Serde<V> valueSerde,
final Duration grace,
final String topic
) {
this.storeName = storeName;
this.keySerde = keySerde;
this.valueSerde = valueSerde;
this.grace = grace;
this.topic = topic;
}
/**
* As of 2.1, there's no way for users to directly interact with the buffer,
* so this method is implemented solely to be called by Streams (which
* it will do based on the {@code cache.max.bytes.buffering} config.
* <p>
* It's currently a no-op.
*/
@Override
public StoreBuilder<TimeOrderedKeyValueBuffer<K, V, V>> withCachingEnabled() {
return this;
}
/**
* As of 2.1, there's no way for users to directly interact with the buffer,
* so this method is implemented solely to be called by Streams (which
* it will do based on the {@code cache.max.bytes.buffering} config.
* <p>
* It's currently a no-op.
*/
@Override
public StoreBuilder<TimeOrderedKeyValueBuffer<K, V, V>> withCachingDisabled() {
return this;
}
@Override
public StoreBuilder<TimeOrderedKeyValueBuffer<K, V, V>> withLoggingEnabled(final Map<String, String> config) {
logConfig = config;
return this;
}
@Override
public StoreBuilder<TimeOrderedKeyValueBuffer<K, V, V>> withLoggingDisabled() {
loggingEnabled = false;
return this;
}
@Override
public TimeOrderedKeyValueBuffer<K, V, V> build() {
return new RocksDBTimeOrderedKeyValueBuffer<>(
new RocksDBTimeOrderedKeyValueBytesStoreSupplier(storeName).get(),
keySerde,
valueSerde,
grace,
topic,
loggingEnabled);
}
@Override
public Map<String, String> logConfig() {
return loggingEnabled() ? Collections.unmodifiableMap(logConfig) : Collections.emptyMap();
}
@Override
public boolean loggingEnabled() {
return loggingEnabled;
}
@Override
public String name() {
return storeName;
}
}
public RocksDBTimeOrderedKeyValueBuffer(final RocksDBTimeOrderedKeyValueBytesStore store,
final Serde<K> keySerde,
final Serde<V> valueSerde,
final Duration gracePeriod,
final String topic,
final boolean loggingEnabled) {
this.store = store;
this.keySerde = keySerde;
this.valueSerde = valueSerde;
this.gracePeriod = gracePeriod.toMillis();
minTimestamp = store.minTimestamp();
minValid = false;
numRecords = 0;
bufferSize = 0;
seqnum = 0;
this.topic = topic;
this.loggingEnabled = loggingEnabled;
}
@SuppressWarnings("unchecked")
@Override
public void setSerdesIfNull(final SerdeGetter getter) {
keySerde = keySerde == null ? (Serde<K>) getter.keySerde() : keySerde;
valueSerde = valueSerde == null ? (Serde<V>) getter.valueSerde() : valueSerde;
}
private long observedStreamTime() {
return store.observedStreamTime;
}
@Override
public String name() {
return store.name();
}
@Override
public void init(final StateStoreContext stateStoreContext, final StateStore root) {
store.init(stateStoreContext, root);
iternalContext = ProcessorContextUtils.asInternalProcessorContext(stateStoreContext);
partition = stateStoreContext.taskId().partition();
if (loggingEnabled) {
changelogTopic = ProcessorContextUtils.changelogFor(stateStoreContext, name(), Boolean.TRUE);
}
}
@Override
public void flush() {
store.flush();
}
@Override
public void close() {
store.close();
}
@Override
public boolean persistent() {
return store.persistent();
}
@Override
public boolean isOpen() {
return store.isOpen();
}
@Override
public void evictWhile(final Supplier<Boolean> predicate, final Consumer<Eviction<K, V>> callback) {
KeyValue<Bytes, byte[]> keyValue;
if (predicate.get()) {
long start = 0;
if (minValid) {
start = minTimestamp();
}
try (final KeyValueIterator<Bytes, byte[]> iterator = store
.fetchAll(start, observedStreamTime() - gracePeriod)) {
while (iterator.hasNext() && predicate.get()) {
keyValue = iterator.next();
final BufferValue bufferValue = BufferValue.deserialize(ByteBuffer.wrap(keyValue.value));
final K key = keySerde.deserializer().deserialize(topic,
PrefixedWindowKeySchemas.TimeFirstWindowKeySchema.extractStoreKeyBytes(keyValue.key.get()));
if (bufferValue.context().timestamp() < minTimestamp && minValid) {
throw new IllegalStateException(
"minTimestamp [" + minTimestamp + "] did not match the actual min timestamp [" +
bufferValue.context().timestamp() + "]"
);
}
minTimestamp = bufferValue.context().timestamp();
minValid = true;
final V value = valueSerde.deserializer().deserialize(topic, bufferValue.newValue());
callback.accept(new Eviction<>(key, value, bufferValue.context()));
store.remove(keyValue.key);
if (loggingEnabled) {
logTombstone(keyValue.key);
}
numRecords--;
bufferSize = bufferSize - computeRecordSize(keyValue.key, bufferValue);
}
if (numRecords == 0) {
minTimestamp = Long.MAX_VALUE;
} else {
minTimestamp = observedStreamTime() - gracePeriod + 1;
}
}
}
}
@Override
public Maybe<ValueAndTimestamp<V>> priorValueForBuffered(final K key) {
return Maybe.undefined();
}
@Override
public boolean put(final long time, final Record<K, V> record, final ProcessorRecordContext recordContext) {
requireNonNull(record.value(), "value cannot be null");
requireNonNull(record.key(), "key cannot be null");
requireNonNull(recordContext, "recordContext cannot be null");
if (observedStreamTime() - gracePeriod > record.timestamp()) {
return false;
}
maybeUpdateSeqnumForDups();
final Bytes serializedKey = Bytes.wrap(
PrefixedWindowKeySchemas.TimeFirstWindowKeySchema.toStoreKeyBinary(keySerde.serializer().serialize(topic, record.key()),
record.timestamp(),
seqnum).get());
final byte[] valueBytes = valueSerde.serializer().serialize(topic, record.value());
final BufferValue buffered = new BufferValue(null, null, valueBytes, recordContext);
store.put(serializedKey, buffered.serialize(0).array());
if (loggingEnabled) {
final BufferKey key = new BufferKey(0L, serializedKey);
logValue(serializedKey, key, buffered);
}
bufferSize += computeRecordSize(serializedKey, buffered);
numRecords++;
minTimestamp = Math.min(minTimestamp(), record.timestamp());
return true;
}
@Override
public int numRecords() {
return numRecords;
}
@Override
public long bufferSize() {
return bufferSize;
}
@Override
public long minTimestamp() {
return minTimestamp;
}
private static long computeRecordSize(final Bytes key, final BufferValue value) {
long size = 0L;
size += key.get().length;
if (value != null) {
size += value.residentMemorySizeEstimate();
}
return size;
}
private void maybeUpdateSeqnumForDups() {
seqnum = (seqnum + 1) & 0x7FFFFFFF;
}
private void logValue(final Bytes key, final BufferKey bufferKey, final BufferValue value) {
final int sizeOfBufferTime = Long.BYTES;
final ByteBuffer buffer = value.serialize(sizeOfBufferTime);
buffer.putLong(bufferKey.time());
final byte[] array = buffer.array();
((RecordCollector.Supplier) iternalContext).recordCollector().send(
changelogTopic,
key,
array,
null,
partition,
null,
KEY_SERIALIZER,
VALUE_SERIALIZER,
null,
null);
}
private void logTombstone(final Bytes key) {
((RecordCollector.Supplier) iternalContext).recordCollector().send(
changelogTopic,
key,
null,
null,
partition,
null,
KEY_SERIALIZER,
VALUE_SERIALIZER,
null,
null);
}
}
| Builder |
java | quarkusio__quarkus | integration-tests/gradle/src/test/java/io/quarkus/gradle/TestResourcesInBuildStepsTest.java | {
"start": 218,
"end": 1109
} | class ____ extends QuarkusGradleWrapperTestBase {
@Test
public void testBasicMultiModuleBuild() throws Exception {
final File projectDir = getProjectDir("test-resources-in-build-steps");
runGradleWrapper(projectDir, "build");
final Path buildDir = projectDir.toPath().resolve("application").resolve("build");
final Path libDir = buildDir.resolve("quarkus-app").resolve("lib").resolve("main");
assertThat(libDir).exists();
assertThat(libDir.resolve("org.acme.runtime-1.0-SNAPSHOT.jar")).exists();
final Path prodResourcesTxt = buildDir.resolve(LaunchMode.NORMAL + "-resources.txt");
assertThat(prodResourcesTxt).hasContent("main");
final Path testResourcesTxt = buildDir.resolve(LaunchMode.TEST + "-resources.txt");
assertThat(testResourcesTxt).hasContent("test");
}
}
| TestResourcesInBuildStepsTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.