language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
mockito__mockito
|
mockito-core/src/main/java/org/mockito/exceptions/misusing/UnfinishedVerificationException.java
|
{
"start": 223,
"end": 436
}
|
class ____ extends MockitoException {
private static final long serialVersionUID = 1L;
public UnfinishedVerificationException(String message) {
super(message);
}
}
|
UnfinishedVerificationException
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/clientproxy/bridgemethod/SameDescriptorDifferentReturnTypeMethodTest.java
|
{
"start": 1674,
"end": 1824
}
|
interface ____ extends SuperLoop {
// Since JDK8+ a "Serializable next()" bridge method is also generated
Integer next();
}
|
Loop
|
java
|
spring-projects__spring-framework
|
spring-test/src/main/java/org/springframework/test/context/support/AbstractGenericContextLoader.java
|
{
"start": 2769,
"end": 19131
}
|
class ____ extends AbstractContextLoader implements AotContextLoader {
protected static final Log logger = LogFactory.getLog(AbstractGenericContextLoader.class);
/**
* Load a {@link GenericApplicationContext} for the supplied
* {@link MergedContextConfiguration}.
* <p>Implementation details:
* <ul>
* <li>Calls {@link #validateMergedContextConfiguration(MergedContextConfiguration)}
* to allow subclasses to validate the supplied configuration before proceeding.</li>
* <li>Calls {@link #createContext()} to create a {@link GenericApplicationContext}
* instance.</li>
* <li>If the supplied {@code MergedContextConfiguration} references a
* {@linkplain MergedContextConfiguration#getParent() parent configuration},
* the corresponding {@link MergedContextConfiguration#getParentApplicationContext()
* ApplicationContext} will be retrieved and
* {@linkplain GenericApplicationContext#setParent(ApplicationContext) set as the parent}
* for the context created by this method.</li>
* <li>Calls {@link #prepareContext(GenericApplicationContext)} for backwards
* compatibility with the {@link org.springframework.test.context.ContextLoader
* ContextLoader} SPI.</li>
* <li>Calls {@link #prepareContext(ConfigurableApplicationContext, MergedContextConfiguration)}
* to allow for customizing the context before bean definitions are loaded.</li>
* <li>Calls {@link #customizeBeanFactory(DefaultListableBeanFactory)} to allow for customizing the
* context's {@code DefaultListableBeanFactory}.</li>
* <li>Delegates to {@link #loadBeanDefinitions(GenericApplicationContext, MergedContextConfiguration)}
* to populate the context from the locations or classes in the supplied
* {@code MergedContextConfiguration}.</li>
* <li>Delegates to {@link AnnotationConfigUtils} for
* {@link AnnotationConfigUtils#registerAnnotationConfigProcessors registering}
* annotation configuration processors.</li>
* <li>Calls {@link #customizeContext(GenericApplicationContext)} to allow for customizing the context
* before it is refreshed.</li>
* <li>Calls {@link #customizeContext(ConfigurableApplicationContext, MergedContextConfiguration)} to
* allow for customizing the context before it is refreshed.</li>
* <li>{@link ConfigurableApplicationContext#refresh Refreshes} the
* context and registers a JVM shutdown hook for it.</li>
* </ul>
* @param mergedConfig the merged context configuration to use to load the
* application context
* @return a new application context
* @see org.springframework.test.context.SmartContextLoader#loadContext(MergedContextConfiguration)
*/
@Override
public final ApplicationContext loadContext(MergedContextConfiguration mergedConfig) throws Exception {
return loadContext(mergedConfig, false);
}
/**
* Load a {@link GenericApplicationContext} for AOT build-time processing based
* on the supplied {@link MergedContextConfiguration}.
* <p>In contrast to {@link #loadContext(MergedContextConfiguration)}, this
* method does not
* {@linkplain org.springframework.context.ConfigurableApplicationContext#refresh()
* refresh} the {@code ApplicationContext} or
* {@linkplain org.springframework.context.ConfigurableApplicationContext#registerShutdownHook()
* register a JVM shutdown hook} for it. Otherwise, this method implements
* behavior identical to {@link #loadContext(MergedContextConfiguration)}.
* @param mergedConfig the merged context configuration to use to load the
* application context
* @return a new application context
* @throws Exception if context loading failed
* @since 6.0
* @see AotContextLoader#loadContextForAotProcessing(MergedContextConfiguration)
* @deprecated as of Spring Framework 6.2.4, in favor of
* {@link #loadContextForAotProcessing(MergedContextConfiguration, RuntimeHints)};
* to be removed in Spring Framework 8.0
*/
@Deprecated(since = "6.2.4", forRemoval = true)
@Override
@SuppressWarnings("removal")
public final GenericApplicationContext loadContextForAotProcessing(MergedContextConfiguration mergedConfig)
throws Exception {
return loadContext(mergedConfig, true);
}
/**
* Load a {@link GenericApplicationContext} for AOT build-time processing based
* on the supplied {@link MergedContextConfiguration}.
* <p>In contrast to {@link #loadContext(MergedContextConfiguration)}, this
* method does not
* {@linkplain org.springframework.context.ConfigurableApplicationContext#refresh()
* refresh} the {@code ApplicationContext} or
* {@linkplain org.springframework.context.ConfigurableApplicationContext#registerShutdownHook()
* register a JVM shutdown hook} for it. Otherwise, this method implements
* behavior identical to {@link #loadContext(MergedContextConfiguration)}.
* @param mergedConfig the merged context configuration to use to load the
* application context
* @param runtimeHints the runtime hints
* @return a new application context
* @throws Exception if context loading failed
* @since 6.2.4
* @see AotContextLoader#loadContextForAotProcessing(MergedContextConfiguration, RuntimeHints)
*/
@Override
public final GenericApplicationContext loadContextForAotProcessing(MergedContextConfiguration mergedConfig,
RuntimeHints runtimeHints) throws Exception {
return loadContext(mergedConfig, true);
}
/**
* Load a {@link GenericApplicationContext} for AOT run-time execution based on
* the supplied {@link MergedContextConfiguration} and
* {@link ApplicationContextInitializer}.
* @param mergedConfig the merged context configuration to use to load the
* application context
* @param initializer the {@code ApplicationContextInitializer} that should
* be applied to the context in order to recreate bean definitions
* @return a new application context
* @throws Exception if context loading failed
* @since 6.0
* @see AotContextLoader#loadContextForAotRuntime(MergedContextConfiguration, ApplicationContextInitializer)
*/
@Override
public final GenericApplicationContext loadContextForAotRuntime(MergedContextConfiguration mergedConfig,
ApplicationContextInitializer<ConfigurableApplicationContext> initializer) throws Exception {
Assert.notNull(mergedConfig, "MergedContextConfiguration must not be null");
Assert.notNull(initializer, "ApplicationContextInitializer must not be null");
if (logger.isTraceEnabled()) {
logger.trace("Loading ApplicationContext for AOT runtime for " + mergedConfig);
}
else if (logger.isDebugEnabled()) {
logger.debug("Loading ApplicationContext for AOT runtime for test class " +
mergedConfig.getTestClass().getName());
}
validateMergedContextConfiguration(mergedConfig);
GenericApplicationContext context = createContext();
try {
prepareContext(context);
prepareContext(context, mergedConfig);
initializer.initialize(context);
customizeContext(context);
customizeContext(context, mergedConfig);
context.refresh();
return context;
}
catch (Exception ex) {
throw new ContextLoadException(context, ex);
}
}
/**
* Load a {@link GenericApplicationContext} for the supplied
* {@link MergedContextConfiguration}.
* @param mergedConfig the merged context configuration to use to load the
* application context
* @param forAotProcessing {@code true} if the context is being loaded for
* AOT processing, meaning not to refresh the {@code ApplicationContext} or
* register a JVM shutdown hook for it
* @return a new application context
*/
private GenericApplicationContext loadContext(
MergedContextConfiguration mergedConfig, boolean forAotProcessing) throws Exception {
if (logger.isTraceEnabled()) {
logger.trace("Loading ApplicationContext %sfor %s".formatted(
(forAotProcessing ? "for AOT processing " : ""), mergedConfig));
}
else if (logger.isDebugEnabled()) {
logger.debug("Loading ApplicationContext %sfor test class %s".formatted(
(forAotProcessing ? "for AOT processing " : ""), mergedConfig.getTestClass().getName()));
}
validateMergedContextConfiguration(mergedConfig);
GenericApplicationContext context = createContext();
try {
ApplicationContext parent = mergedConfig.getParentApplicationContext();
if (parent != null) {
context.setParent(parent);
}
prepareContext(context);
prepareContext(context, mergedConfig);
customizeBeanFactory(context.getDefaultListableBeanFactory());
loadBeanDefinitions(context, mergedConfig);
AnnotationConfigUtils.registerAnnotationConfigProcessors(context);
customizeContext(context);
customizeContext(context, mergedConfig);
if (!forAotProcessing) {
context.refresh();
context.registerShutdownHook();
}
return context;
}
catch (Exception ex) {
throw new ContextLoadException(context, ex);
}
}
/**
* Validate the supplied {@link MergedContextConfiguration} with respect to
* what this context loader supports.
* <p>The default implementation is a <em>no-op</em> but can be overridden by
* subclasses as appropriate.
* @param mergedConfig the merged configuration to validate
* @throws IllegalStateException if the supplied configuration is not valid
* for this context loader
* @since 4.0.4
*/
protected void validateMergedContextConfiguration(MergedContextConfiguration mergedConfig) {
// no-op
}
/**
* Load a Spring ApplicationContext from the supplied {@code locations}.
* <p>Implementation details:
* <ul>
* <li>Calls {@link #createContext()} to create a {@link GenericApplicationContext}
* instance.</li>
* <li>Calls {@link #prepareContext(GenericApplicationContext)} to allow for customizing the context
* before bean definitions are loaded.</li>
* <li>Calls {@link #customizeBeanFactory(DefaultListableBeanFactory)} to allow for customizing the
* context's {@code DefaultListableBeanFactory}.</li>
* <li>Delegates to {@link #createBeanDefinitionReader(GenericApplicationContext)} to create a
* {@link BeanDefinitionReader} which is then used to populate the context
* from the specified locations.</li>
* <li>Delegates to {@link AnnotationConfigUtils} for
* {@link AnnotationConfigUtils#registerAnnotationConfigProcessors registering}
* annotation configuration processors.</li>
* <li>Calls {@link #customizeContext(GenericApplicationContext)} to allow for customizing the context
* before it is refreshed.</li>
* <li>{@link ConfigurableApplicationContext#refresh Refreshes} the
* context and registers a JVM shutdown hook for it.</li>
* </ul>
* <p><b>Note</b>: this method does not provide a means to set active bean definition
* profiles for the loaded context. See {@link #loadContext(MergedContextConfiguration)}
* and {@link AbstractContextLoader#prepareContext(ConfigurableApplicationContext, MergedContextConfiguration)}
* for an alternative.
* @return a new application context
* @since 2.5
* @see org.springframework.test.context.ContextLoader#loadContext
* @see GenericApplicationContext
* @see #loadContext(MergedContextConfiguration)
* @deprecated as of Spring Framework 6.0, in favor of {@link #loadContext(MergedContextConfiguration)}
*/
@Deprecated(since = "6.0")
@Override
public final ConfigurableApplicationContext loadContext(String... locations) throws Exception {
if (logger.isDebugEnabled()) {
logger.debug("Loading ApplicationContext for locations " + Arrays.toString(locations));
}
GenericApplicationContext context = createContext();
prepareContext(context);
customizeBeanFactory(context.getDefaultListableBeanFactory());
createBeanDefinitionReader(context).loadBeanDefinitions(locations);
AnnotationConfigUtils.registerAnnotationConfigProcessors(context);
customizeContext(context);
context.refresh();
context.registerShutdownHook();
return context;
}
/**
* Factory method for creating the {@link GenericApplicationContext} used by
* this {@code ContextLoader}.
* <p>The default implementation creates a {@code GenericApplicationContext}
* using the default constructor. This method may be overridden — for
* example, to use a custom context subclass or to create a
* {@code GenericApplicationContext} with a custom
* {@link DefaultListableBeanFactory} implementation.
* @return a newly instantiated {@code GenericApplicationContext}
* @since 5.2.9
*/
protected GenericApplicationContext createContext() {
return new GenericApplicationContext();
}
/**
* Prepare the {@link GenericApplicationContext} created by this {@code ContextLoader}.
* Called <i>before</i> bean definitions are read.
* <p>The default implementation is empty. Can be overridden in subclasses to
* customize {@code GenericApplicationContext}'s standard settings.
* @param context the context that should be prepared
* @since 2.5
* @see #loadContext(MergedContextConfiguration)
* @see GenericApplicationContext#setAllowBeanDefinitionOverriding
* @see GenericApplicationContext#setResourceLoader
* @see GenericApplicationContext#setId
* @see #prepareContext(ConfigurableApplicationContext, MergedContextConfiguration)
*/
protected void prepareContext(GenericApplicationContext context) {
}
/**
* Customize the internal bean factory of the ApplicationContext created by
* this {@code ContextLoader}.
* <p>The default implementation is empty but can be overridden in subclasses
* to customize {@code DefaultListableBeanFactory}'s standard settings.
* @param beanFactory the bean factory created by this {@code ContextLoader}
* @since 2.5
* @see #loadContext(MergedContextConfiguration)
* @see DefaultListableBeanFactory#setAllowBeanDefinitionOverriding
* @see DefaultListableBeanFactory#setAllowEagerClassLoading
* @see DefaultListableBeanFactory#setAllowCircularReferences
* @see DefaultListableBeanFactory#setAllowRawInjectionDespiteWrapping
*/
protected void customizeBeanFactory(DefaultListableBeanFactory beanFactory) {
}
/**
* Load bean definitions into the supplied {@link GenericApplicationContext context}
* from the locations or classes in the supplied {@code MergedContextConfiguration}.
* <p>The default implementation delegates to the {@link BeanDefinitionReader}
* returned by {@link #createBeanDefinitionReader(GenericApplicationContext)} to
* {@link BeanDefinitionReader#loadBeanDefinitions(String) load} the
* bean definitions.
* <p>Subclasses must provide an appropriate implementation of
* {@link #createBeanDefinitionReader(GenericApplicationContext)}. Alternatively subclasses
* may provide a <em>no-op</em> implementation of {@code createBeanDefinitionReader()}
* and override this method to provide a custom strategy for loading or
* registering bean definitions.
* @param context the context into which the bean definitions should be loaded
* @param mergedConfig the merged context configuration
* @since 3.1
* @see #loadContext(MergedContextConfiguration)
*/
protected void loadBeanDefinitions(GenericApplicationContext context, MergedContextConfiguration mergedConfig) {
createBeanDefinitionReader(context).loadBeanDefinitions(mergedConfig.getLocations());
}
/**
* Factory method for creating a new {@link BeanDefinitionReader} for loading
* bean definitions into the supplied {@link GenericApplicationContext context}.
* @param context the context for which the {@code BeanDefinitionReader}
* should be created
* @return a {@code BeanDefinitionReader} for the supplied context
* @since 2.5
* @see #loadContext(MergedContextConfiguration)
* @see #loadBeanDefinitions
* @see BeanDefinitionReader
*/
protected abstract BeanDefinitionReader createBeanDefinitionReader(GenericApplicationContext context);
/**
* Customize the {@link GenericApplicationContext} created by this
* {@code ContextLoader} <i>after</i> bean definitions have been
* loaded into the context but <i>before</i> the context is refreshed.
* <p>The default implementation is empty but can be overridden in subclasses
* to customize the application context.
* @param context the newly created application context
* @since 2.5
* @see #loadContext(MergedContextConfiguration)
* @see #customizeContext(ConfigurableApplicationContext, MergedContextConfiguration)
*/
protected void customizeContext(GenericApplicationContext context) {
}
}
|
AbstractGenericContextLoader
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/inheritance/Tomato.java
|
{
"start": 630,
"end": 810
}
|
class ____ extends Vegetable {
private int size;
@Column(name="tom_size")
public int getSize() {
return size;
}
public void setSize(int size) {
this.size = size;
}
}
|
Tomato
|
java
|
elastic__elasticsearch
|
modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java
|
{
"start": 3165,
"end": 22288
}
|
class ____ extends BaseGeoParsingTestCase {
private static XContentBuilder toWKTContent(ShapeBuilder<?, ?, ?> builder, boolean generateMalformed) throws IOException {
String wkt = builder.toWKT();
if (generateMalformed) {
// malformed - extra paren
// TODO generate more malformed WKT
wkt += GeoWKTParser.RPAREN;
}
if (randomBoolean()) {
// test comments
wkt = "# " + wkt + "\n" + wkt;
}
return XContentFactory.jsonBuilder().value(wkt);
}
private void assertExpected(Object expected, ShapeBuilder<?, ?, ?> builder, boolean useJTS) throws IOException, ParseException {
XContentBuilder xContentBuilder = toWKTContent(builder, false);
assertGeometryEquals(expected, xContentBuilder, useJTS);
}
private void assertMalformed(ShapeBuilder<?, ?, ?> builder) throws IOException {
XContentBuilder xContentBuilder = toWKTContent(builder, true);
assertValidException(xContentBuilder, ElasticsearchParseException.class);
}
@Override
public void testParsePoint() throws IOException, ParseException {
GeoPoint p = RandomShapeGenerator.randomPoint(random());
Coordinate c = new Coordinate(p.lon(), p.lat());
Point expected = GEOMETRY_FACTORY.createPoint(c);
assertExpected(new JtsPoint(expected, SPATIAL_CONTEXT), new PointBuilder().coordinate(c), true);
assertExpected(new org.elasticsearch.geometry.Point(p.lon(), p.lat()), new PointBuilder().coordinate(c), false);
assertMalformed(new PointBuilder().coordinate(c));
}
@Override
public void testParseMultiPoint() throws IOException, ParseException {
int numPoints = randomIntBetween(0, 100);
List<Coordinate> coordinates = new ArrayList<>(numPoints);
for (int i = 0; i < numPoints; ++i) {
coordinates.add(new Coordinate(GeoTestUtil.nextLongitude(), GeoTestUtil.nextLatitude()));
}
List<org.elasticsearch.geometry.Point> points = new ArrayList<>(numPoints);
for (int i = 0; i < numPoints; ++i) {
Coordinate c = coordinates.get(i);
points.add(new org.elasticsearch.geometry.Point(c.x, c.y));
}
Geometry expectedGeom;
MultiPointBuilder actual;
if (numPoints == 0) {
expectedGeom = MultiPoint.EMPTY;
actual = new MultiPointBuilder();
} else if (numPoints == 1) {
expectedGeom = points.get(0);
actual = new MultiPointBuilder(coordinates);
} else {
expectedGeom = new MultiPoint(points);
actual = new MultiPointBuilder(coordinates);
}
assertExpected(expectedGeom, actual, false);
assertMalformed(actual);
assumeTrue("JTS test path cannot handle empty multipoints", numPoints > 1);
Shape[] shapes = new Shape[numPoints];
for (int i = 0; i < numPoints; ++i) {
Coordinate c = coordinates.get(i);
shapes[i] = SPATIAL_CONTEXT.makePoint(c.x, c.y);
}
ShapeCollection<?> expected = shapeCollection(shapes);
assertExpected(expected, new MultiPointBuilder(coordinates), true);
}
private List<Coordinate> randomLineStringCoords() {
int numPoints = randomIntBetween(2, 100);
List<Coordinate> coordinates = new ArrayList<>(numPoints);
GeoPoint p;
for (int i = 0; i < numPoints; ++i) {
p = RandomShapeGenerator.randomPointIn(random(), -90d, -90d, 90d, 90d);
coordinates.add(new Coordinate(p.lon(), p.lat()));
}
return coordinates;
}
@Override
public void testParseLineString() throws IOException, ParseException {
List<Coordinate> coordinates = randomLineStringCoords();
LineString expected = GEOMETRY_FACTORY.createLineString(coordinates.toArray(new Coordinate[coordinates.size()]));
assertExpected(jtsGeom(expected), new LineStringBuilder(coordinates), true);
double[] lats = new double[coordinates.size()];
double[] lons = new double[lats.length];
for (int i = 0; i < lats.length; ++i) {
lats[i] = coordinates.get(i).y;
lons[i] = coordinates.get(i).x;
}
assertExpected(new Line(lons, lats), new LineStringBuilder(coordinates), false);
}
@Override
public void testParseMultiLineString() throws IOException, ParseException {
int numLineStrings = randomIntBetween(0, 8);
List<LineString> lineStrings = new ArrayList<>(numLineStrings);
MultiLineStringBuilder builder = new MultiLineStringBuilder();
for (int j = 0; j < numLineStrings; ++j) {
List<Coordinate> lsc = randomLineStringCoords();
Coordinate[] coords = lsc.toArray(new Coordinate[lsc.size()]);
lineStrings.add(GEOMETRY_FACTORY.createLineString(coords));
builder.linestring(new LineStringBuilder(lsc));
}
List<Line> lines = new ArrayList<>(lineStrings.size());
for (int j = 0; j < lineStrings.size(); ++j) {
Coordinate[] c = lineStrings.get(j).getCoordinates();
lines.add(new Line(Arrays.stream(c).mapToDouble(i -> i.x).toArray(), Arrays.stream(c).mapToDouble(i -> i.y).toArray()));
}
Geometry expectedGeom;
if (lines.isEmpty()) {
expectedGeom = MultiLine.EMPTY;
} else if (lines.size() == 1) {
expectedGeom = new Line(lines.get(0).getX(), lines.get(0).getY());
} else {
expectedGeom = new MultiLine(lines);
}
assertExpected(expectedGeom, builder, false);
assertMalformed(builder);
MultiLineString expected = GEOMETRY_FACTORY.createMultiLineString(lineStrings.toArray(new LineString[lineStrings.size()]));
assumeTrue("JTS test path cannot handle empty multilinestrings", numLineStrings > 1);
assertExpected(jtsGeom(expected), builder, true);
}
@Override
public void testParsePolygon() throws IOException, ParseException {
PolygonBuilder builder = PolygonBuilder.class.cast(
RandomShapeGenerator.createShape(random(), RandomShapeGenerator.ShapeType.POLYGON)
);
Coordinate[] coords = builder.coordinates()[0][0];
LinearRing shell = GEOMETRY_FACTORY.createLinearRing(coords);
Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, null);
assertExpected(jtsGeom(expected), builder, true);
assertMalformed(builder);
}
@Override
public void testParseMultiPolygon() throws IOException, ParseException {
int numPolys = randomIntBetween(0, 8);
MultiPolygonBuilder builder = new MultiPolygonBuilder();
PolygonBuilder pb;
Coordinate[] coordinates;
Polygon[] shapes = new Polygon[numPolys];
LinearRing shell;
for (int i = 0; i < numPolys; ++i) {
pb = PolygonBuilder.class.cast(RandomShapeGenerator.createShape(random(), RandomShapeGenerator.ShapeType.POLYGON));
builder.polygon(pb);
coordinates = pb.coordinates()[0][0];
shell = GEOMETRY_FACTORY.createLinearRing(coordinates);
shapes[i] = GEOMETRY_FACTORY.createPolygon(shell, null);
}
assumeTrue("JTS test path cannot handle empty multipolygon", numPolys > 1);
Shape expected = shapeCollection(shapes);
assertExpected(expected, builder, true);
assertMalformed(builder);
}
public void testParsePolygonWithHole() throws IOException, ParseException {
// add 3d point to test ISSUE #10501
List<Coordinate> shellCoordinates = new ArrayList<>();
shellCoordinates.add(new Coordinate(100, 0));
shellCoordinates.add(new Coordinate(101, 0));
shellCoordinates.add(new Coordinate(101, 1));
shellCoordinates.add(new Coordinate(100, 1));
shellCoordinates.add(new Coordinate(100, 0));
List<Coordinate> holeCoordinates = new ArrayList<>();
holeCoordinates.add(new Coordinate(100.2, 0.2));
holeCoordinates.add(new Coordinate(100.8, 0.2));
holeCoordinates.add(new Coordinate(100.8, 0.8));
holeCoordinates.add(new Coordinate(100.2, 0.8));
holeCoordinates.add(new Coordinate(100.2, 0.2));
PolygonBuilder polygonWithHole = new PolygonBuilder(new CoordinatesBuilder().coordinates(shellCoordinates));
polygonWithHole.hole(new LineStringBuilder(holeCoordinates));
LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]));
LinearRing[] holes = new LinearRing[1];
holes[0] = GEOMETRY_FACTORY.createLinearRing(holeCoordinates.toArray(new Coordinate[holeCoordinates.size()]));
Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, holes);
assertExpected(jtsGeom(expected), polygonWithHole, true);
org.elasticsearch.geometry.LinearRing hole = new org.elasticsearch.geometry.LinearRing(
new double[] { 100.2d, 100.8d, 100.8d, 100.2d, 100.2d },
new double[] { 0.8d, 0.8d, 0.2d, 0.2d, 0.8d }
);
org.elasticsearch.geometry.Polygon p = new org.elasticsearch.geometry.Polygon(
new org.elasticsearch.geometry.LinearRing(new double[] { 101d, 101d, 100d, 100d, 101d }, new double[] { 0d, 1d, 1d, 0d, 0d }),
Collections.singletonList(hole)
);
assertExpected(p, polygonWithHole, false);
assertMalformed(polygonWithHole);
}
public void testParseMixedDimensionPolyWithHole() throws IOException, ParseException {
List<Coordinate> shellCoordinates = new ArrayList<>();
shellCoordinates.add(new Coordinate(100, 0));
shellCoordinates.add(new Coordinate(101, 0));
shellCoordinates.add(new Coordinate(101, 1));
shellCoordinates.add(new Coordinate(100, 1));
shellCoordinates.add(new Coordinate(100, 0));
// add 3d point to test ISSUE #10501
List<Coordinate> holeCoordinates = new ArrayList<>();
holeCoordinates.add(new Coordinate(100.2, 0.2, 15.0));
holeCoordinates.add(new Coordinate(100.8, 0.2));
holeCoordinates.add(new Coordinate(100.8, 0.8));
holeCoordinates.add(new Coordinate(100.2, 0.8, 10.0));
holeCoordinates.add(new Coordinate(100.2, 0.2));
PolygonBuilder builder = new PolygonBuilder(new CoordinatesBuilder().coordinates(shellCoordinates));
builder.hole(new LineStringBuilder(holeCoordinates));
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().value(builder.toWKT());
XContentParser parser = createParser(xContentBuilder);
parser.nextToken();
final LegacyGeoShapeFieldMapper mapperBuilder = new LegacyGeoShapeFieldMapper.Builder("test", IndexVersion.current(), false, true)
.build(MapperBuilderContext.root(false, false));
// test store z disabled
ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> ShapeParser.parse(parser, mapperBuilder));
assertThat(e, hasToString(containsString("coordinate dimensions do not match")));
}
public void testParseMixedDimensionPolyWithHoleStoredZ() throws IOException {
List<Coordinate> shellCoordinates = new ArrayList<>();
shellCoordinates.add(new Coordinate(100, 0));
shellCoordinates.add(new Coordinate(101, 0));
shellCoordinates.add(new Coordinate(101, 1));
shellCoordinates.add(new Coordinate(100, 1));
shellCoordinates.add(new Coordinate(100, 0));
// add 3d point to test ISSUE #10501
List<Coordinate> holeCoordinates = new ArrayList<>();
holeCoordinates.add(new Coordinate(100.2, 0.2, 15.0));
holeCoordinates.add(new Coordinate(100.8, 0.2));
holeCoordinates.add(new Coordinate(100.8, 0.8));
holeCoordinates.add(new Coordinate(100.2, 0.8, 10.0));
holeCoordinates.add(new Coordinate(100.2, 0.2));
PolygonBuilder builder = new PolygonBuilder(new CoordinatesBuilder().coordinates(shellCoordinates));
builder.hole(new LineStringBuilder(holeCoordinates));
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().value(builder.toWKT());
XContentParser parser = createParser(xContentBuilder);
parser.nextToken();
final IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0);
final LegacyGeoShapeFieldMapper mapperBuilder = new LegacyGeoShapeFieldMapper.Builder("test", version, false, true).build(
MapperBuilderContext.root(false, false)
);
// test store z disabled
ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> ShapeParser.parse(parser, mapperBuilder));
assertThat(e, hasToString(containsString("unable to add coordinate to CoordinateBuilder: coordinate dimensions do not match")));
}
public void testParsePolyWithStoredZ() throws IOException {
List<Coordinate> shellCoordinates = new ArrayList<>();
shellCoordinates.add(new Coordinate(100, 0, 0));
shellCoordinates.add(new Coordinate(101, 0, 0));
shellCoordinates.add(new Coordinate(101, 1, 0));
shellCoordinates.add(new Coordinate(100, 1, 5));
shellCoordinates.add(new Coordinate(100, 0, 5));
PolygonBuilder builder = new PolygonBuilder(new CoordinatesBuilder().coordinates(shellCoordinates));
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().value(builder.toWKT());
XContentParser parser = createParser(xContentBuilder);
parser.nextToken();
final IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0);
final LegacyGeoShapeFieldMapper mapperBuilder = new LegacyGeoShapeFieldMapper.Builder("test", version, false, true).build(
MapperBuilderContext.root(false, false)
);
ShapeBuilder<?, ?, ?> shapeBuilder = ShapeParser.parse(parser, mapperBuilder);
assertEquals(shapeBuilder.numDimensions(), 3);
}
public void testParseOpenPolygon() throws IOException {
String openPolygon = "POLYGON ((100 5, 100 10, 90 10, 90 5))";
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().value(openPolygon);
XContentParser parser = createParser(xContentBuilder);
parser.nextToken();
final IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0);
final LegacyGeoShapeFieldMapper defaultMapperBuilder = new LegacyGeoShapeFieldMapper.Builder("test", version, false, true).coerce(
false
).build(MapperBuilderContext.root(false, false));
ElasticsearchParseException exception = expectThrows(
ElasticsearchParseException.class,
() -> ShapeParser.parse(parser, defaultMapperBuilder)
);
assertEquals("invalid LinearRing found (coordinates are not closed)", exception.getMessage());
final LegacyGeoShapeFieldMapper coercingMapperBuilder = new LegacyGeoShapeFieldMapper.Builder(
"test",
IndexVersion.current(),
false,
true
).coerce(true).build(MapperBuilderContext.root(false, false));
ShapeBuilder<?, ?, ?> shapeBuilder = ShapeParser.parse(parser, coercingMapperBuilder);
assertNotNull(shapeBuilder);
assertEquals("polygon ((100.0 5.0, 100.0 10.0, 90.0 10.0, 90.0 5.0, 100.0 5.0))", shapeBuilder.toWKT());
}
public void testParseSelfCrossingPolygon() throws IOException {
// test self crossing ccw poly not crossing dateline
List<Coordinate> shellCoordinates = new ArrayList<>();
shellCoordinates.add(new Coordinate(176, 15));
shellCoordinates.add(new Coordinate(-177, 10));
shellCoordinates.add(new Coordinate(-177, -10));
shellCoordinates.add(new Coordinate(176, -15));
shellCoordinates.add(new Coordinate(-177, 15));
shellCoordinates.add(new Coordinate(172, 0));
shellCoordinates.add(new Coordinate(176, 15));
PolygonBuilder poly = new PolygonBuilder(new CoordinatesBuilder().coordinates(shellCoordinates));
XContentBuilder builder = XContentFactory.jsonBuilder().value(poly.toWKT());
assertValidException(builder, InvalidShapeException.class);
}
public void testMalformedWKT() throws IOException {
// malformed points in a polygon is a common typo
String malformedWKT = "POLYGON ((100, 5) (100, 10) (90, 10), (90, 5), (100, 5)";
XContentBuilder builder = XContentFactory.jsonBuilder().value(malformedWKT);
assertValidException(builder, ElasticsearchParseException.class);
}
@Override
public void testParseEnvelope() throws IOException, ParseException {
org.apache.lucene.geo.Rectangle r = GeoTestUtil.nextBox();
EnvelopeBuilder builder = new EnvelopeBuilder(new Coordinate(r.minLon, r.maxLat), new Coordinate(r.maxLon, r.minLat));
Rectangle expected = SPATIAL_CONTEXT.makeRectangle(r.minLon, r.maxLon, r.minLat, r.maxLat);
assertExpected(expected, builder, true);
assertExpected(new org.elasticsearch.geometry.Rectangle(r.minLon, r.maxLon, r.maxLat, r.minLat), builder, false);
assertMalformed(builder);
}
public void testInvalidGeometryType() throws IOException {
XContentBuilder builder = XContentFactory.jsonBuilder().value("UnknownType (-1 -2)");
assertValidException(builder, IllegalArgumentException.class);
}
@Override
public void testParseGeometryCollection() throws IOException, ParseException {
if (rarely()) {
// assert empty shape collection
GeometryCollectionBuilder builder = new GeometryCollectionBuilder();
Shape[] expected = new Shape[0];
if (randomBoolean()) {
assertEquals(shapeCollection(expected).isEmpty(), builder.buildS4J().isEmpty());
} else {
assertEquals(shapeCollection(expected).isEmpty(), builder.buildGeometry().size() == 0);
}
} else {
GeometryCollectionBuilder gcb = RandomShapeGenerator.createGeometryCollection(random());
assertExpected(gcb.buildS4J(), gcb, true);
assertExpected(GeometryNormalizer.apply(Orientation.CCW, gcb.buildGeometry()), gcb, false);
}
}
public void testUnexpectedShapeException() throws IOException {
XContentBuilder builder = toWKTContent(new PointBuilder(-1, 2), false);
XContentParser parser = createParser(builder);
parser.nextToken();
ElasticsearchParseException e = expectThrows(
ElasticsearchParseException.class,
() -> GeoWKTParser.parseExpectedType(parser, GeoShapeType.POLYGON)
);
assertThat(e, hasToString(containsString("Expected geometry type [polygon] but found [point]")));
}
}
|
GeoWKTShapeParserTests
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/ComponentValidationTest.java
|
{
"start": 5216,
"end": 5768
}
|
interface ____ {",
" @Provides",
" static String provideString() { return \"test\"; }",
"}");
CompilerTests.daggerCompiler(componentFile, subcomponentFile, moduleFile)
.compile(subject -> subject.hasErrorCount(0));
}
@Test public void componentOnEnum() {
Source componentFile =
CompilerTests.javaSource(
"test.NotAComponent",
"package test;",
"",
"import dagger.Component;",
"",
"@Component",
"
|
TestModule
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/refaster/testdata/input/NonJdkTypeTemplateExample.java
|
{
"start": 794,
"end": 929
}
|
class ____ {
ImmutableList<Integer> example() {
return ImmutableList.copyOf(Stream.of(1).iterator());
}
}
|
NonJdkTypeTemplateExample
|
java
|
apache__camel
|
tooling/maven/camel-package-maven-plugin/src/test/java/org/apache/camel/maven/packaging/endpoint/SomeEndpoint.java
|
{
"start": 1078,
"end": 1405
}
|
class ____ {
@UriPath(description = "Hostname of the Foo server")
@Metadata(required = true)
private String host;
public String getHost() {
return host;
}
/**
* Hostname of the Foo server
*/
public void setHost(String host) {
this.host = host;
}
public
|
SomeEndpoint
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/atomic/referencearray/AtomicReferenceArrayAssert_filteredOn_predicate_Test.java
|
{
"start": 1033,
"end": 1662
}
|
class ____ extends AtomicReferenceArrayAssert_filtered_baseTest {
@Test
void should_filter_iterable_under_test_on_predicate() {
assertThat(employees).filteredOn(employee -> employee.getAge() > 100)
.containsOnly(yoda, obiwan);
}
@Test
void should_fail_if_given_predicate_is_null() {
Predicate<? super Employee> predicate = null;
assertThatIllegalArgumentException().isThrownBy(() -> assertThat(employees).filteredOn(predicate))
.withMessage("The filter predicate should not be null");
}
}
|
AtomicReferenceArrayAssert_filteredOn_predicate_Test
|
java
|
apache__camel
|
test-infra/camel-test-infra-openai-mock/src/test/java/org/apache/camel/test/infra/openai/mock/OpenAIMockReplyWithAfterToolTest.java
|
{
"start": 1292,
"end": 4560
}
|
class ____ {
@RegisterExtension
public OpenAIMock openAIMock = new OpenAIMock().builder()
.when("What is the weather in london?")
.invokeTool("FindsTheLatitudeAndLongitudeOfAGivenCity")
.withParam("name", "London")
.replyWith("the latitude of london is 1")
.end()
.build();
@Test
public void testReplyWithAfterTool() throws Exception {
HttpClient client = HttpClient.newHttpClient();
// First request - should trigger tool call
HttpRequest request1 = HttpRequest.newBuilder()
.uri(URI.create(openAIMock.getBaseUrl() + "/v1/chat/completions"))
.header("Content-Type", "application/json")
.POST(HttpRequest.BodyPublishers
.ofString("{\"messages\": [{\"role\": \"user\", \"content\": \"What is the weather in london?\"}]}"))
.build();
HttpResponse<String> response1 = client.send(request1, HttpResponse.BodyHandlers.ofString());
String responseBody1 = response1.body();
ObjectMapper objectMapper = new ObjectMapper();
JsonNode responseJson1 = objectMapper.readTree(responseBody1);
JsonNode choice1 = responseJson1.path("choices").get(0);
JsonNode message1 = choice1.path("message");
assertEquals("assistant", message1.path("role").asText());
assertEquals("tool_calls", choice1.path("finish_reason").asText());
JsonNode toolCalls = message1.path("tool_calls");
assertEquals(1, toolCalls.size());
JsonNode toolCall = toolCalls.get(0);
String toolCallId = toolCall.path("id").asText();
assertEquals("function", toolCall.path("type").asText());
assertEquals("FindsTheLatitudeAndLongitudeOfAGivenCity", toolCall.path("function").path("name").asText());
assertEquals("{\"name\":\"London\"}", toolCall.path("function").path("arguments").asText());
// Second request with tool result - should return custom reply
String secondRequestBody = String.format(
"{\"messages\": [{\"role\": \"user\", \"content\": \"What is the weather in london?\"}, {\"role\":\"tool\", \"tool_call_id\":\"%s\", \"content\":\"{\\\"latitude\\\": \\\"51.5074\\\", \\\"longitude\\\": \\\"-0.1278\\\"}\"}]}",
toolCallId);
HttpRequest request2 = HttpRequest.newBuilder()
.uri(URI.create(openAIMock.getBaseUrl() + "/v1/chat/completions"))
.header("Content-Type", "application/json")
.POST(HttpRequest.BodyPublishers.ofString(secondRequestBody))
.build();
HttpResponse<String> response2 = client.send(request2, HttpResponse.BodyHandlers.ofString());
String responseBody2 = response2.body();
JsonNode responseJson2 = objectMapper.readTree(responseBody2);
JsonNode choice2 = responseJson2.path("choices").get(0);
JsonNode message2 = choice2.path("message");
assertEquals("assistant", message2.path("role").asText());
assertEquals("stop", choice2.path("finish_reason").asText());
assertEquals("the latitude of london is 1", message2.path("content").asText());
}
}
|
OpenAIMockReplyWithAfterToolTest
|
java
|
google__guice
|
core/src/com/google/inject/internal/ExposedKeyFactory.java
|
{
"start": 873,
"end": 2680
}
|
class ____<T> extends InternalFactory<T> implements CreationListener {
private final Key<T> key;
private final Object source;
private final PrivateElements privateElements;
private InternalFactory<T> delegate;
ExposedKeyFactory(Key<T> key, Object source, PrivateElements privateElements) {
this.key = key;
this.source = source;
this.privateElements = privateElements;
}
@Override
public void notify(Errors errors) {
InjectorImpl privateInjector = (InjectorImpl) privateElements.getInjector();
BindingImpl<T> explicitBinding = privateInjector.getBindingData().getExplicitBinding(key);
// validate that the child injector has its own factory. If the getInternalFactory() returns
// this, then that child injector doesn't have a factory (and getExplicitBinding has returned
// its parent's binding instead
if (explicitBinding.getInternalFactory() == this) {
errors.withSource(explicitBinding.getSource()).exposedButNotBound(key);
return;
}
@SuppressWarnings("unchecked") // safe because InternalFactory<T> is covariant
InternalFactory<T> delegate = (InternalFactory<T>) explicitBinding.getInternalFactory();
this.delegate = delegate;
}
@Override
public T get(InternalContext context, Dependency<?> dependency, boolean linked)
throws InternalProvisionException {
try {
return delegate.get(context, dependency, linked);
} catch (InternalProvisionException ipe) {
throw ipe.addSource(source);
}
}
@Override
MethodHandleResult makeHandle(LinkageContext context, boolean linked) {
return makeCachableOnLinkedSetting(
InternalMethodHandles.catchInternalProvisionExceptionAndRethrowWithSource(
this.delegate.getHandle(context, linked), source));
}
}
|
ExposedKeyFactory
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/spi/PropertyData.java
|
{
"start": 1228,
"end": 1371
}
|
class ____ itself or the element type if an array
*/
String getClassOrElementName() throws MappingException;
/**
* Returns the returned
|
name
|
java
|
bumptech__glide
|
library/src/test/java/com/bumptech/glide/request/target/CustomViewTargetTest.java
|
{
"start": 18953,
"end": 19887
}
|
class ____ extends CustomViewTarget<View, Object> {
TestViewTarget(View view) {
super(view);
}
@Override
protected void onResourceCleared(@Nullable Drawable placeholder) {
// Intentionally Empty.
}
// We're intentionally avoiding the super call.
@SuppressWarnings("MissingSuperCall")
@Override
public void onResourceReady(
@NonNull Object resource, @Nullable Transition<? super Object> transition) {
// Avoid calling super.
}
// We're intentionally avoiding the super call.
@SuppressWarnings("MissingSuperCall")
@Override
public void onResourceLoading(@Nullable Drawable placeholder) {
// Avoid calling super.
}
// We're intentionally avoiding the super call.
@SuppressWarnings("MissingSuperCall")
@Override
public void onLoadFailed(@Nullable Drawable errorDrawable) {
// Avoid calling super.
}
}
}
|
TestViewTarget
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/operations/SqlNodeToCallOperationTest.java
|
{
"start": 11516,
"end": 11901
}
|
class ____ implements Procedure {
public @DataTypeHint("TIMESTAMP(3)") LocalDateTime[] call(
ProcedureContext procedureContext,
@DataTypeHint("TIMESTAMP(3)") LocalDateTime localDateTime,
@DataTypeHint("TIMESTAMP(3)") TimestampData timestampData) {
return null;
}
}
/** A simple pojo
|
TimeStampArgProcedure
|
java
|
spring-projects__spring-boot
|
module/spring-boot-micrometer-observation/src/test/java/org/springframework/boot/micrometer/observation/autoconfigure/ObservationHandlerGroupsTests.java
|
{
"start": 4431,
"end": 4832
}
|
class ____ implements ObservationHandler<Observation.Context> {
private final String name;
NamedObservationHandler(String name) {
this.name = name;
}
@Override
public boolean supportsContext(Context context) {
return true;
}
@Override
public String toString() {
return getClass().getSimpleName() + "{name='" + this.name + "'}";
}
}
private static
|
NamedObservationHandler
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/issue_1700/Issue1785.java
|
{
"start": 117,
"end": 280
}
|
class ____ extends TestCase {
public void test_for_issue() throws Exception {
JSON.parseObject("\"2006-8-9\"", java.sql.Timestamp.class);
}
}
|
Issue1785
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java
|
{
"start": 898,
"end": 8084
}
|
class ____ extends ClusterStateUpdateTask {
private static final Logger logger = LogManager.getLogger(StartupSelfGeneratedLicenseTask.class);
static final String TASK_SOURCE = "maybe generate license for cluster";
/**
* Max number of nodes licensed by generated trial license
*/
private static final int selfGeneratedLicenseMaxNodes = 1000;
private final Settings settings;
private final Clock clock;
private final ClusterService clusterService;
public StartupSelfGeneratedLicenseTask(Settings settings, Clock clock, ClusterService clusterService) {
this.settings = settings;
this.clock = clock;
this.clusterService = clusterService;
}
@Override
public void clusterStateProcessed(ClusterState oldState, ClusterState newState) {
LicensesMetadata licensesMetadata = newState.metadata().custom(LicensesMetadata.TYPE);
if (logger.isDebugEnabled()) {
logger.debug("registered self generated license: {}", licensesMetadata);
}
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
XPackPlugin.checkReadyForXPackCustomMetadata(currentState);
final Metadata metadata = currentState.metadata();
final LicensesMetadata currentLicensesMetadata = metadata.custom(LicensesMetadata.TYPE);
// do not generate a license if any license is present
if (currentLicensesMetadata == null) {
License.LicenseType type = SelfGeneratedLicense.validateSelfGeneratedType(
LicenseSettings.SELF_GENERATED_LICENSE_TYPE.get(settings)
);
return updateWithLicense(currentState, type);
} else if (LicenseUtils.signatureNeedsUpdate(currentLicensesMetadata.getLicense())) {
return updateLicenseSignature(currentState, currentLicensesMetadata);
} else if (LicenseUtils.licenseNeedsExtended(currentLicensesMetadata.getLicense())) {
return extendBasic(currentState, currentLicensesMetadata);
} else {
return currentState;
}
}
private ClusterState updateLicenseSignature(ClusterState currentState, LicensesMetadata currentLicenseMetadata) {
License license = currentLicenseMetadata.getLicense();
Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata());
String type = license.type();
long issueDate = license.issueDate();
long expiryDate = LicenseUtils.getExpiryDate(license);
// extend the basic license expiration date if needed since extendBasic will not be called now
if (License.LicenseType.isBasic(type) && expiryDate != LicenseSettings.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS) {
expiryDate = LicenseSettings.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS;
}
License.Builder specBuilder = License.builder()
.uid(license.uid())
.issuedTo(license.issuedTo())
.maxNodes(selfGeneratedLicenseMaxNodes)
.issueDate(issueDate)
.type(type)
.expiryDate(expiryDate);
License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder);
TrialLicenseVersion trialVersion = currentLicenseMetadata.getMostRecentTrialVersion();
LicensesMetadata newLicenseMetadata = new LicensesMetadata(selfGeneratedLicense, trialVersion);
mdBuilder.putCustom(LicensesMetadata.TYPE, newLicenseMetadata);
logger.info(
"Updating existing license to the new version.\n\nOld license:\n {}\n\n New license:\n{}",
license,
newLicenseMetadata.getLicense()
);
return ClusterState.builder(currentState).metadata(mdBuilder).build();
}
@Override
public void onFailure(@Nullable Exception e) {
var state = clusterService.lifecycleState();
if (state == Lifecycle.State.STOPPED || state == Lifecycle.State.CLOSED) {
logger.debug("node shutdown during [" + TASK_SOURCE + "]", e);
} else {
logger.error("unexpected failure during [" + TASK_SOURCE + "]", e);
}
}
private ClusterState extendBasic(ClusterState currentState, LicensesMetadata currentLicenseMetadata) {
License license = currentLicenseMetadata.getLicense();
Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata());
LicensesMetadata newLicenseMetadata = createBasicLicenseFromExistingLicense(currentLicenseMetadata);
mdBuilder.putCustom(LicensesMetadata.TYPE, newLicenseMetadata);
logger.info("""
Existing basic license has an expiration. Basic licenses no longer expire.Regenerating license.
Old license:
{}
New license:
{}""", license, newLicenseMetadata.getLicense());
return ClusterState.builder(currentState).metadata(mdBuilder).build();
}
private LicensesMetadata createBasicLicenseFromExistingLicense(LicensesMetadata currentLicenseMetadata) {
License currentLicense = currentLicenseMetadata.getLicense();
License.Builder specBuilder = License.builder()
.uid(currentLicense.uid())
.issuedTo(currentLicense.issuedTo())
.maxNodes(selfGeneratedLicenseMaxNodes)
.issueDate(currentLicense.issueDate())
.type(License.LicenseType.BASIC)
.expiryDate(LicenseSettings.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS);
License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder, currentLicense.version());
TrialLicenseVersion trialVersion = currentLicenseMetadata.getMostRecentTrialVersion();
return new LicensesMetadata(selfGeneratedLicense, trialVersion);
}
private ClusterState updateWithLicense(ClusterState currentState, License.LicenseType type) {
long issueDate = clock.millis();
Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata());
long expiryDate;
if (type == License.LicenseType.BASIC) {
expiryDate = LicenseSettings.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS;
} else {
expiryDate = issueDate + LicenseSettings.NON_BASIC_SELF_GENERATED_LICENSE_DURATION.getMillis();
}
License.Builder specBuilder = License.builder()
.uid(UUID.randomUUID().toString())
.issuedTo(clusterService.getClusterName().value())
.maxNodes(selfGeneratedLicenseMaxNodes)
.issueDate(issueDate)
.type(type)
.expiryDate(expiryDate);
License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder);
LicensesMetadata licensesMetadata;
if (License.LicenseType.TRIAL.equals(type)) {
licensesMetadata = new LicensesMetadata(selfGeneratedLicense, TrialLicenseVersion.CURRENT);
} else {
licensesMetadata = new LicensesMetadata(selfGeneratedLicense, null);
}
mdBuilder.putCustom(LicensesMetadata.TYPE, licensesMetadata);
return ClusterState.builder(currentState).metadata(mdBuilder).build();
}
}
|
StartupSelfGeneratedLicenseTask
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/ast/expr/SQLQueryExpr.java
|
{
"start": 1112,
"end": 3436
}
|
class ____ extends SQLExprImpl implements Serializable {
private static final long serialVersionUID = 1L;
public SQLSelect subQuery;
public SQLQueryExpr() {
}
public SQLQueryExpr(SQLSelect select) {
setSubQuery(select);
}
public SQLSelect getSubQuery() {
return this.subQuery;
}
public void setSubQuery(SQLSelect subQuery) {
if (subQuery != null) {
subQuery.setParent(this);
}
this.subQuery = subQuery;
}
@Override
protected void accept0(SQLASTVisitor visitor) {
if (visitor.visit(this)) {
if (this.subQuery != null) {
this.subQuery.accept(visitor);
}
}
visitor.endVisit(this);
}
public List<SQLObject> getChildren() {
return Collections.<SQLObject>singletonList(subQuery);
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((subQuery == null) ? 0 : subQuery.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
SQLQueryExpr other = (SQLQueryExpr) obj;
if (subQuery == null) {
if (other.subQuery != null) {
return false;
}
} else if (!subQuery.equals(other.subQuery)) {
return false;
}
return true;
}
public SQLQueryExpr clone() {
SQLQueryExpr x = new SQLQueryExpr();
if (subQuery != null) {
x.setSubQuery(subQuery.clone());
}
x.parenthesized = this.parenthesized;
return x;
}
public SQLDataType computeDataType() {
if (subQuery == null) {
return null;
}
SQLSelectQueryBlock queryBlock = subQuery.getFirstQueryBlock();
if (queryBlock == null) {
return null;
}
List<SQLSelectItem> selectList = queryBlock.getSelectList();
if (selectList.size() == 1) {
return selectList.get(0).computeDataType();
}
return null;
}
}
|
SQLQueryExpr
|
java
|
spring-projects__spring-boot
|
module/spring-boot-cloudfoundry/src/test/java/org/springframework/boot/cloudfoundry/autoconfigure/actuate/endpoint/servlet/CloudFoundryHealthEndpointWebExtensionTests.java
|
{
"start": 2925,
"end": 4589
}
|
class ____ {
private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner()
.withPropertyValues("VCAP_APPLICATION={}")
.withConfiguration(
AutoConfigurations.of(SecurityAutoConfiguration.class, ServletWebSecurityAutoConfiguration.class,
WebMvcAutoConfiguration.class, JacksonAutoConfiguration.class,
DispatcherServletAutoConfiguration.class, HttpMessageConvertersAutoConfiguration.class,
PropertyPlaceholderAutoConfiguration.class, RestTemplateAutoConfiguration.class,
ManagementContextAutoConfiguration.class, ServletManagementContextAutoConfiguration.class,
EndpointAutoConfiguration.class, WebEndpointAutoConfiguration.class,
HealthContributorAutoConfiguration.class, HealthContributorRegistryAutoConfiguration.class,
HealthEndpointAutoConfiguration.class, CloudFoundryActuatorAutoConfiguration.class))
.withUserConfiguration(TestHealthIndicator.class);
@Test
void healthComponentsAlwaysPresent() {
this.contextRunner.run((context) -> {
CloudFoundryHealthEndpointWebExtension extension = context
.getBean(CloudFoundryHealthEndpointWebExtension.class);
HealthDescriptor descriptor = extension.health(ApiVersion.V3).getBody();
assertThat(descriptor).isNotNull();
Map<String, HealthDescriptor> components = ((CompositeHealthDescriptor) descriptor).getComponents();
assertThat(components).isNotNull();
HealthDescriptor component = components.entrySet().iterator().next().getValue();
assertThat(((IndicatedHealthDescriptor) component).getDetails()).containsEntry("spring", "boot");
});
}
private static final
|
CloudFoundryHealthEndpointWebExtensionTests
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/dependency/ErroneousAddressMapperWithUnknownPropertyInDependsOn.java
|
{
"start": 304,
"end": 638
}
|
interface ____ {
ErroneousAddressMapperWithUnknownPropertyInDependsOn INSTANCE = Mappers.getMapper(
ErroneousAddressMapperWithUnknownPropertyInDependsOn.class
);
@Mapping(target = "lastName", dependsOn = "doesnotexist")
PersonDto personToDto(Person person);
}
|
ErroneousAddressMapperWithUnknownPropertyInDependsOn
|
java
|
elastic__elasticsearch
|
libs/entitlement/qa/entitlement-test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/NioFilesActions.java
|
{
"start": 1900,
"end": 18347
}
|
class ____ {
@EntitlementTest(expectedAccess = PLUGINS)
static void filesGetOwner() throws IOException {
Files.getOwner(readFile());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void filesProbeContentType() throws IOException {
Files.probeContentType(readFile());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void filesSetOwner() throws IOException {
UserPrincipal owner = EntitledActions.getFileOwner(readWriteFile());
Files.setOwner(readWriteFile(), owner); // set to existing owner, just trying to execute the method
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesNewInputStream() throws IOException {
Files.newInputStream(readFile()).close();
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesNewOutputStream() throws IOException {
Files.newOutputStream(readWriteFile()).close();
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesNewByteChannelRead() throws IOException {
Files.newByteChannel(readFile(), Set.of(StandardOpenOption.READ)).close();
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesNewByteChannelWrite() throws IOException {
Files.newByteChannel(readWriteFile(), Set.of(StandardOpenOption.WRITE)).close();
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesNewByteChannelReadVarargs() throws IOException {
Files.newByteChannel(readFile(), StandardOpenOption.READ).close();
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesNewByteChannelWriteVarargs() throws IOException {
Files.newByteChannel(readWriteFile(), StandardOpenOption.WRITE).close();
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesNewDirectoryStream() throws IOException {
Files.newDirectoryStream(FileCheckActions.readDir()).close();
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesNewDirectoryStreamGlob() throws IOException {
Files.newDirectoryStream(FileCheckActions.readDir(), "*").close();
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesNewDirectoryStreamFilter() throws IOException {
Files.newDirectoryStream(FileCheckActions.readDir(), entry -> false).close();
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesCreateFile() throws IOException {
Files.createFile(readWriteDir().resolve("file.txt"));
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesCreateDirectory() throws IOException {
var directory = EntitledActions.createTempDirectoryForWrite();
Files.createDirectory(directory.resolve("subdir"));
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesCreateDirectories() throws IOException {
var directory = EntitledActions.createTempDirectoryForWrite();
Files.createDirectories(directory.resolve("subdir").resolve("subsubdir"));
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesCreateTempFileInDir() throws IOException {
Files.createTempFile(readWriteDir(), "prefix", "suffix");
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesCreateTempDirectoryInDir() throws IOException {
Files.createTempDirectory(readWriteDir(), "prefix");
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesCreateSymbolicLink() throws IOException {
var directory = EntitledActions.createTempDirectoryForWrite();
try {
Files.createSymbolicLink(directory.resolve("link"), readFile());
} catch (UnsupportedOperationException | FileSystemException e) {
// OK not to implement symbolic link in the filesystem
}
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesCreateRelativeSymbolicLink() throws IOException {
var directory = EntitledActions.createTempDirectoryForWrite();
try {
Files.createSymbolicLink(directory.resolve("link"), Path.of("target"));
} catch (UnsupportedOperationException | FileSystemException e) {
// OK not to implement symbolic link in the filesystem
}
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesCreateLink() throws IOException {
var directory = EntitledActions.createTempDirectoryForWrite();
try {
Files.createLink(directory.resolve("link"), readFile());
} catch (UnsupportedOperationException | FileSystemException e) {
// OK not to implement symbolic link in the filesystem
}
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesCreateRelativeLink() throws IOException {
var directory = EntitledActions.createTempDirectoryForWrite();
var target = directory.resolve("target");
try {
Files.createLink(directory.resolve("link"), Path.of("target"));
} catch (UnsupportedOperationException | FileSystemException e) {
// OK not to implement symbolic link in the filesystem
}
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesDelete() throws IOException {
var file = EntitledActions.createTempFileForWrite();
Files.delete(file);
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesDeleteIfExists() throws IOException {
var file = EntitledActions.createTempFileForWrite();
Files.deleteIfExists(file);
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesReadSymbolicLink() throws IOException {
var link = EntitledActions.createTempSymbolicLink();
Files.readSymbolicLink(link);
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesCopy() throws IOException {
var directory = EntitledActions.createTempDirectoryForWrite();
Files.copy(readFile(), directory.resolve("copied"));
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesMove() throws IOException {
var directory = EntitledActions.createTempDirectoryForWrite();
var file = EntitledActions.createTempFileForWrite();
Files.move(file, directory.resolve("moved"));
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesIsSameFile() throws IOException {
Files.isSameFile(readWriteFile(), readFile());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesMismatch() throws IOException {
Files.mismatch(readWriteFile(), readFile());
}
@SuppressForbidden(reason = "testing entitlements on this API specifically")
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesIsHidden() throws IOException {
Files.isHidden(readFile());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesGetFileStore() throws IOException {
var file = EntitledActions.createTempFileForRead();
Files.getFileStore(file);
}
@EntitlementTest(expectedAccess = ALWAYS_DENIED)
static void checkFilesGetFileAttributeView() {
Files.getFileAttributeView(readFile(), FileOwnerAttributeView.class);
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesReadAttributesWithClass() throws IOException {
Files.readAttributes(readFile(), BasicFileAttributes.class);
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesReadAttributesWithString() throws IOException {
Files.readAttributes(readFile(), "*");
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesGetAttribute() throws IOException {
try {
Files.getAttribute(readFile(), "dos:hidden");
} catch (UnsupportedOperationException | IllegalArgumentException | FileSystemException e) {
// OK if the file does not have/does not support the attribute
}
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesSetAttribute() throws IOException {
var file = EntitledActions.createTempFileForWrite();
try {
Files.setAttribute(file, "dos:hidden", true);
} catch (UnsupportedOperationException | IllegalArgumentException | FileSystemException e) {
// OK if the file does not have/does not support the attribute
}
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesGetPosixFilePermissions() throws IOException {
try {
Files.getPosixFilePermissions(readFile());
} catch (UnsupportedOperationException | IllegalArgumentException | FileSystemException e) {
// OK if the file does not have/does not support the attribute
}
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesSetPosixFilePermissions() throws IOException {
var file = EntitledActions.createTempFileForWrite();
try {
Files.setPosixFilePermissions(file, Set.of());
} catch (UnsupportedOperationException | IllegalArgumentException | FileSystemException e) {
// OK if the file does not have/does not support the attribute
}
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesIsSymbolicLink() {
Files.isSymbolicLink(readFile());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesIsDirectory() {
Files.isDirectory(readFile());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesIsRegularFile() {
Files.isRegularFile(readFile());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesGetLastModifiedTime() throws IOException {
Files.getLastModifiedTime(readFile());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesSetLastModifiedTime() throws IOException {
var file = EntitledActions.createTempFileForWrite();
Files.setLastModifiedTime(file, FileTime.from(Instant.now()));
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesSize() throws IOException {
Files.size(readFile());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesExists() {
Files.exists(readFile());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesNotExists() {
Files.notExists(readFile());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesIsReadable() {
Files.isReadable(readFile());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesIsWriteable() {
Files.isWritable(readFile());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesIsExecutable() {
Files.isExecutable(readFile());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesWalkFileTree() throws IOException {
Files.walkFileTree(readDir(), dummyVisitor());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesWalkFileTreeWithOptions() throws IOException {
Files.walkFileTree(readDir(), Set.of(FileVisitOption.FOLLOW_LINKS), 2, dummyVisitor());
}
private static FileVisitor<Path> dummyVisitor() {
return new FileVisitor<>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
return FileVisitResult.SKIP_SUBTREE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {
return FileVisitResult.SKIP_SUBTREE;
}
@Override
public FileVisitResult visitFileFailed(Path file, IOException exc) {
return FileVisitResult.SKIP_SUBTREE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) {
return FileVisitResult.SKIP_SUBTREE;
}
};
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesNewBufferedReader() throws IOException {
Files.newBufferedReader(readFile()).close();
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesNewBufferedReaderWithCharset() throws IOException {
Files.newBufferedReader(readFile(), Charset.defaultCharset()).close();
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesNewBufferedWriter() throws IOException {
Files.newBufferedWriter(readWriteFile(), StandardOpenOption.WRITE).close();
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesNewBufferedWriterWithCharset() throws IOException {
Files.newBufferedWriter(readWriteFile(), Charset.defaultCharset(), StandardOpenOption.WRITE).close();
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesCopyInputStream() throws IOException {
var directory = EntitledActions.createTempDirectoryForWrite();
Files.copy(new ByteArrayInputStream("foo".getBytes(StandardCharsets.UTF_8)), directory.resolve("copied"));
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesCopyOutputStream() throws IOException {
Files.copy(readFile(), new ByteArrayOutputStream());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesReadAllBytes() throws IOException {
Files.readAllBytes(readFile());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesReadString() throws IOException {
Files.readString(readFile());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesReadStringWithCharset() throws IOException {
Files.readString(readFile(), Charset.defaultCharset());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesReadAllLines() throws IOException {
Files.readAllLines(readFile());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesReadAllLinesWithCharset() throws IOException {
Files.readAllLines(readFile(), Charset.defaultCharset());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesWrite() throws IOException {
var directory = EntitledActions.createTempDirectoryForWrite();
Files.writeString(directory.resolve("file"), "foo");
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesWriteLines() throws IOException {
var directory = EntitledActions.createTempDirectoryForWrite();
Files.write(directory.resolve("file"), List.of("foo"));
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesWriteString() throws IOException {
var directory = EntitledActions.createTempDirectoryForWrite();
Files.writeString(directory.resolve("file"), "foo");
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesWriteStringWithCharset() throws IOException {
var directory = EntitledActions.createTempDirectoryForWrite();
Files.writeString(directory.resolve("file"), "foo", Charset.defaultCharset());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesList() throws IOException {
Files.list(readDir()).close();
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesWalk() throws IOException {
Files.walk(readDir()).close();
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesWalkWithDepth() throws IOException {
Files.walk(readDir(), 2).close();
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesFind() throws IOException {
Files.find(readDir(), 2, (path, basicFileAttributes) -> false).close();
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesLines() throws IOException {
Files.lines(readFile()).close();
}
@EntitlementTest(expectedAccess = PLUGINS)
static void checkFilesLinesWithCharset() throws IOException {
Files.lines(readFile(), Charset.defaultCharset()).close();
}
private NioFilesActions() {}
}
|
NioFilesActions
|
java
|
apache__camel
|
components/camel-jgroups/src/main/java/org/apache/camel/component/jgroups/JGroupsComponent.java
|
{
"start": 1227,
"end": 2759
}
|
class ____ extends DefaultComponent {
@Metadata
private JChannel channel;
@Metadata
private String channelProperties;
@Metadata(label = "consumer")
private boolean enableViewMessages;
public JGroupsComponent() {
}
@Override
protected Endpoint createEndpoint(String uri, String clusterName, Map<String, Object> parameters) throws Exception {
JGroupsEndpoint endpoint = new JGroupsEndpoint(uri, this, channel, clusterName, channelProperties, enableViewMessages);
setProperties(endpoint, parameters);
return endpoint;
}
public JChannel getChannel() {
return channel;
}
/**
* Channel to use
*/
public void setChannel(JChannel channel) {
this.channel = channel;
}
public String getChannelProperties() {
return channelProperties;
}
/**
* Specifies configuration properties of the JChannel used by the endpoint.
*/
public void setChannelProperties(String channelProperties) {
this.channelProperties = channelProperties;
}
public boolean isEnableViewMessages() {
return enableViewMessages;
}
/**
* If set to true, the consumer endpoint will receive org.jgroups.View messages as well (not only
* org.jgroups.Message instances). By default only regular messages are consumed by the endpoint.
*/
public void setEnableViewMessages(boolean enableViewMessages) {
this.enableViewMessages = enableViewMessages;
}
}
|
JGroupsComponent
|
java
|
dropwizard__dropwizard
|
.mvn/wrapper/MavenWrapperDownloader.java
|
{
"start": 726,
"end": 4941
}
|
class ____ {
private static final String WRAPPER_VERSION = "0.5.6";
/**
* Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
*/
private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
+ WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
/**
* Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
* use instead of the default one.
*/
private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
".mvn/wrapper/maven-wrapper.properties";
/**
* Path where the maven-wrapper.jar will be saved to.
*/
private static final String MAVEN_WRAPPER_JAR_PATH =
".mvn/wrapper/maven-wrapper.jar";
/**
* Name of the property which should be used to override the default download url for the wrapper.
*/
private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
public static void main(String args[]) {
System.out.println("- Downloader started");
File baseDirectory = new File(args[0]);
System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
// If the maven-wrapper.properties exists, read it and check if it contains a custom
// wrapperUrl parameter.
File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
String url = DEFAULT_DOWNLOAD_URL;
if(mavenWrapperPropertyFile.exists()) {
FileInputStream mavenWrapperPropertyFileInputStream = null;
try {
mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
Properties mavenWrapperProperties = new Properties();
mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
} catch (IOException e) {
System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
} finally {
try {
if(mavenWrapperPropertyFileInputStream != null) {
mavenWrapperPropertyFileInputStream.close();
}
} catch (IOException e) {
// Ignore ...
}
}
}
System.out.println("- Downloading from: " + url);
File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
if(!outputFile.getParentFile().exists()) {
if(!outputFile.getParentFile().mkdirs()) {
System.out.println(
"- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'");
}
}
System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
try {
downloadFileFromURL(url, outputFile);
System.out.println("Done");
System.exit(0);
} catch (Throwable e) {
System.out.println("- Error downloading");
e.printStackTrace();
System.exit(1);
}
}
private static void downloadFileFromURL(String urlString, File destination) throws Exception {
if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) {
String username = System.getenv("MVNW_USERNAME");
char[] password = System.getenv("MVNW_PASSWORD").toCharArray();
Authenticator.setDefault(new Authenticator() {
@Override
protected PasswordAuthentication getPasswordAuthentication() {
return new PasswordAuthentication(username, password);
}
});
}
URL website = new URL(urlString);
ReadableByteChannel rbc;
rbc = Channels.newChannel(website.openStream());
FileOutputStream fos = new FileOutputStream(destination);
fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
fos.close();
rbc.close();
}
}
|
MavenWrapperDownloader
|
java
|
apache__camel
|
test-infra/camel-test-infra-ftp/src/main/java/org/apache/camel/test/infra/ftp/services/embedded/FtpsUtil.java
|
{
"start": 1066,
"end": 2114
}
|
class ____ {
private static final Logger LOG = LoggerFactory.getLogger(FtpsUtil.class);
private static boolean checked;
private static boolean hasRequiredAlgorithms;
private FtpsUtil() {
}
public static boolean hasRequiredAlgorithms() {
if (!checked) {
hasRequiredAlgorithms = doCheck();
}
return hasRequiredAlgorithms;
}
private static boolean doCheck() {
LOG.debug("Checking if the system has the required algorithms for the test execution");
try {
KeyManagerFactory.getInstance("SunX509");
TrustManagerFactory.getInstance("SunX509");
return true;
} catch (NoSuchAlgorithmException e) {
String name = System.getProperty("os.name");
String message = e.getMessage();
LOG.warn("SunX509 is not available on this platform [{}] Testing is skipped! Real cause: {}", name, message, e);
return false;
} finally {
checked = true;
}
}
}
|
FtpsUtil
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java
|
{
"start": 3593,
"end": 18717
}
|
class ____ {
private static final Logger LOG = LoggerFactory
.getLogger(UnmanagedAMLauncher.class);
private Configuration conf;
// Handle to talk to the Resource Manager/Applications Manager
protected YarnClient rmClient;
// Application master specific info to register a new Application with RM/ASM
private String appName = "";
// App master priority
private int amPriority = 0;
// Queue for App master
private String amQueue = "";
// cmd to start AM
private String amCmd = null;
// set the classpath explicitly
private String classpath = null;
private volatile boolean amCompleted = false;
private static final long AM_STATE_WAIT_TIMEOUT_MS = 10000;
/**
* @param args
* Command line arguments
*/
public static void main(String[] args) {
try {
UnmanagedAMLauncher client = new UnmanagedAMLauncher();
LOG.info("Initializing Client");
boolean doRun = client.init(args);
if (!doRun) {
System.exit(0);
}
client.run();
} catch (Throwable t) {
LOG.error("Error running Client", t);
System.exit(1);
}
}
/**
*/
public UnmanagedAMLauncher(Configuration conf) throws Exception {
// Set up RPC
this.conf = conf;
}
public UnmanagedAMLauncher() throws Exception {
this(new Configuration());
}
private void printUsage(Options opts) {
new HelpFormatter().printHelp("Client", opts);
}
public boolean init(String[] args) throws ParseException {
Options opts = new Options();
opts.addOption("appname", true,
"Application Name. Default value - UnmanagedAM");
opts.addOption("priority", true, "Application Priority. Default 0");
opts.addOption("queue", true,
"RM Queue in which this application is to be submitted");
opts.addOption("master_memory", true,
"Amount of memory in MB to be requested to run the application master");
opts.addOption("cmd", true, "command to start unmanaged AM (required)");
opts.addOption("classpath", true, "additional classpath");
opts.addOption("help", false, "Print usage");
CommandLine cliParser = new GnuParser().parse(opts, args);
if (args.length == 0) {
printUsage(opts);
throw new IllegalArgumentException(
"No args specified for client to initialize");
}
if (cliParser.hasOption("help")) {
printUsage(opts);
return false;
}
appName = cliParser.getOptionValue("appname", "UnmanagedAM");
amPriority = Integer.parseInt(cliParser.getOptionValue("priority", "0"));
amQueue = cliParser.getOptionValue("queue", "default");
classpath = cliParser.getOptionValue("classpath", () ->null);
amCmd = cliParser.getOptionValue("cmd");
if (amCmd == null) {
printUsage(opts);
throw new IllegalArgumentException(
"No cmd specified for application master");
}
YarnConfiguration yarnConf = new YarnConfiguration(conf);
rmClient = YarnClient.createYarnClient();
rmClient.init(yarnConf);
return true;
}
public void launchAM(ApplicationAttemptId attemptId)
throws IOException, YarnException {
Credentials credentials = new Credentials();
Token<AMRMTokenIdentifier> token =
rmClient.getAMRMToken(attemptId.getApplicationId());
// Service will be empty but that's okay, we are just passing down only
// AMRMToken down to the real AM which eventually sets the correct
// service-address.
credentials.addToken(token.getService(), token);
File tokenFile = File.createTempFile("unmanagedAMRMToken","",
new File(System.getProperty("user.dir")));
try {
FileUtil.chmod(tokenFile.getAbsolutePath(), "600");
} catch (InterruptedException ex) {
throw new RuntimeException(ex);
}
tokenFile.deleteOnExit();
try (DataOutputStream os = new DataOutputStream(
new FileOutputStream(tokenFile, true))) {
credentials.writeTokenStorageToStream(os);
}
Map<String, String> env = System.getenv();
ArrayList<String> envAMList = new ArrayList<String>();
boolean setClasspath = false;
for (Map.Entry<String, String> entry : env.entrySet()) {
String key = entry.getKey();
String value = entry.getValue();
if(key.equals("CLASSPATH")) {
setClasspath = true;
if(classpath != null) {
value = value + File.pathSeparator + classpath;
}
}
envAMList.add(key + "=" + value);
}
if(!setClasspath && classpath!=null) {
envAMList.add("CLASSPATH="+classpath);
}
ContainerId containerId = ContainerId.newContainerId(attemptId, 0);
String hostname = InetAddress.getLocalHost().getHostName();
envAMList.add(Environment.CONTAINER_ID.name() + "=" + containerId);
envAMList.add(Environment.NM_HOST.name() + "=" + hostname);
envAMList.add(Environment.NM_HTTP_PORT.name() + "=0");
envAMList.add(Environment.NM_PORT.name() + "=0");
envAMList.add(Environment.LOCAL_DIRS.name() + "= /tmp");
envAMList.add(ApplicationConstants.APP_SUBMIT_TIME_ENV + "="
+ System.currentTimeMillis());
envAMList.add(ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME + "=" +
tokenFile.getAbsolutePath());
String[] envAM = new String[envAMList.size()];
Process amProc = Runtime.getRuntime().exec(amCmd, envAMList.toArray(envAM));
final BufferedReader errReader =
new BufferedReader(new InputStreamReader(
amProc.getErrorStream(), StandardCharsets.UTF_8));
final BufferedReader inReader =
new BufferedReader(new InputStreamReader(
amProc.getInputStream(), StandardCharsets.UTF_8));
// read error and input streams as this would free up the buffers
// free the error stream buffer
Thread errThread = new SubjectInheritingThread() {
@Override
public void work() {
try {
String line = errReader.readLine();
while((line != null) && !isInterrupted()) {
System.err.println(line);
line = errReader.readLine();
}
} catch(IOException ioe) {
LOG.warn("Error reading the error stream", ioe);
}
}
};
Thread outThread = new SubjectInheritingThread() {
@Override
public void work() {
try {
String line = inReader.readLine();
while((line != null) && !isInterrupted()) {
System.out.println(line);
line = inReader.readLine();
}
} catch(IOException ioe) {
LOG.warn("Error reading the out stream", ioe);
}
}
};
try {
errThread.start();
outThread.start();
} catch (IllegalStateException ise) { }
// wait for the process to finish and check the exit code
try {
int exitCode = amProc.waitFor();
LOG.info("AM process exited with value: " + exitCode);
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
amCompleted = true;
}
try {
// make sure that the error thread exits
// on Windows these threads sometimes get stuck and hang the execution
// timeout and join later after destroying the process.
errThread.join();
outThread.join();
errReader.close();
inReader.close();
} catch (InterruptedException ie) {
LOG.info("ShellExecutor: Interrupted while reading the error/out stream",
ie);
} catch (IOException ioe) {
LOG.warn("Error while closing the error/out stream", ioe);
}
amProc.destroy();
}
public boolean run() throws IOException, YarnException {
LOG.info("Starting Client");
// Connect to ResourceManager
rmClient.start();
try {
// Create launch context for app master
LOG.info("Setting up application submission context for ASM");
ApplicationSubmissionContext appContext = rmClient.createApplication()
.getApplicationSubmissionContext();
ApplicationId appId = appContext.getApplicationId();
// set the application name
appContext.setApplicationName(appName);
// Set the priority for the application master
Priority pri = Records.newRecord(Priority.class);
pri.setPriority(amPriority);
appContext.setPriority(pri);
// Set the queue to which this application is to be submitted in the RM
appContext.setQueue(amQueue);
// Set up the container launch context for the application master
ContainerLaunchContext amContainer = Records
.newRecord(ContainerLaunchContext.class);
appContext.setAMContainerSpec(amContainer);
// unmanaged AM
appContext.setUnmanagedAM(true);
LOG.info("Setting unmanaged AM");
// Submit the application to the applications manager
LOG.info("Submitting application to ASM");
rmClient.submitApplication(appContext);
ApplicationReport appReport =
monitorApplication(appId, EnumSet.of(YarnApplicationState.ACCEPTED,
YarnApplicationState.KILLED, YarnApplicationState.FAILED,
YarnApplicationState.FINISHED));
if (appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED) {
// Monitor the application attempt to wait for launch state
ApplicationAttemptReport attemptReport =
monitorCurrentAppAttempt(appId,
YarnApplicationAttemptState.LAUNCHED);
ApplicationAttemptId attemptId =
attemptReport.getApplicationAttemptId();
LOG.info("Launching AM with application attempt id " + attemptId);
// launch AM
launchAM(attemptId);
// Monitor the application for end state
appReport =
monitorApplication(appId, EnumSet.of(YarnApplicationState.KILLED,
YarnApplicationState.FAILED, YarnApplicationState.FINISHED));
}
YarnApplicationState appState = appReport.getYarnApplicationState();
FinalApplicationStatus appStatus = appReport.getFinalApplicationStatus();
LOG.info("App ended with state: " + appReport.getYarnApplicationState()
+ " and status: " + appStatus);
boolean success;
if (YarnApplicationState.FINISHED == appState
&& FinalApplicationStatus.SUCCEEDED == appStatus) {
LOG.info("Application has completed successfully.");
success = true;
} else {
LOG.info("Application did finished unsuccessfully." + " YarnState="
+ appState.toString() + ", FinalStatus=" + appStatus.toString());
success = false;
}
return success;
} finally {
rmClient.stop();
}
}
private ApplicationAttemptReport monitorCurrentAppAttempt(
ApplicationId appId, YarnApplicationAttemptState attemptState)
throws YarnException, IOException {
long startTime = System.currentTimeMillis();
ApplicationAttemptId attemptId = null;
while (true) {
if (attemptId == null) {
attemptId =
rmClient.getApplicationReport(appId)
.getCurrentApplicationAttemptId();
}
ApplicationAttemptReport attemptReport = null;
if (attemptId != null) {
attemptReport = rmClient.getApplicationAttemptReport(attemptId);
if (attemptState.equals(attemptReport.getYarnApplicationAttemptState())) {
return attemptReport;
}
}
LOG.info("Current attempt state of " + appId + " is " + (attemptReport == null
? " N/A " : attemptReport.getYarnApplicationAttemptState())
+ ", waiting for current attempt to reach " + attemptState);
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
LOG.warn("Interrupted while waiting for current attempt of " + appId
+ " to reach " + attemptState);
}
if (System.currentTimeMillis() - startTime > AM_STATE_WAIT_TIMEOUT_MS) {
String errmsg =
"Timeout for waiting current attempt of " + appId + " to reach "
+ attemptState;
LOG.error(errmsg);
throw new RuntimeException(errmsg);
}
}
}
/**
* Monitor the submitted application for completion. Kill application if time
* expires.
*
* @param appId
* Application Id of application to be monitored
* @return true if application completed successfully
* @throws YarnException
* @throws IOException
*/
private ApplicationReport monitorApplication(ApplicationId appId,
Set<YarnApplicationState> finalState) throws YarnException,
IOException {
long foundAMCompletedTime = 0;
StringBuilder expectedFinalState = new StringBuilder();
boolean first = true;
for (YarnApplicationState state : finalState) {
if (first) {
first = false;
expectedFinalState.append(state.name());
} else {
expectedFinalState.append("," + state.name());
}
}
while (true) {
// Check app status every 1 second.
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
LOG.debug("Thread sleep in monitoring loop interrupted");
}
// Get application report for the appId we are interested in
ApplicationReport report = rmClient.getApplicationReport(appId);
LOG.info("Got application report from ASM for" + ", appId="
+ appId.getId() + ", appAttemptId="
+ report.getCurrentApplicationAttemptId() + ", clientToAMToken="
+ report.getClientToAMToken() + ", appDiagnostics="
+ report.getDiagnostics() + ", appMasterHost=" + report.getHost()
+ ", appQueue=" + report.getQueue() + ", appMasterRpcPort="
+ report.getRpcPort() + ", appStartTime=" + report.getStartTime()
+ ", yarnAppState=" + report.getYarnApplicationState().toString()
+ ", distributedFinalState="
+ report.getFinalApplicationStatus().toString() + ", appTrackingUrl="
+ report.getTrackingUrl() + ", appUser=" + report.getUser());
YarnApplicationState state = report.getYarnApplicationState();
if (finalState.contains(state)) {
return report;
}
// wait for 10 seconds after process has completed for app report to
// come back
if (amCompleted) {
if (foundAMCompletedTime == 0) {
foundAMCompletedTime = System.currentTimeMillis();
} else if ((System.currentTimeMillis() - foundAMCompletedTime)
> AM_STATE_WAIT_TIMEOUT_MS) {
LOG.warn("Waited " + AM_STATE_WAIT_TIMEOUT_MS/1000
+ " seconds after process completed for AppReport"
+ " to reach desired final state. Not waiting anymore."
+ "CurrentState = " + state
+ ", ExpectedStates = " + expectedFinalState.toString());
throw new RuntimeException("Failed to receive final expected state"
+ " in ApplicationReport"
+ ", CurrentState=" + state
+ ", ExpectedStates=" + expectedFinalState.toString());
}
}
}
}
}
|
UnmanagedAMLauncher
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/beanmanager/BeanManagerTest.java
|
{
"start": 12670,
"end": 13050
}
|
class ____ {
static final AtomicBoolean DESTROYED = new AtomicBoolean();
@Inject
BeanManager beanManager;
public BeanManager getBeanManager() {
return beanManager;
}
@PreDestroy
void destroy() {
DESTROYED.set(true);
}
}
@Priority(1)
@Alternative
@Dependent
static
|
Legacy
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/interceptor/CustomInterceptorRouteWithChildOutputTest.java
|
{
"start": 2981,
"end": 3475
}
|
class ____ implements InterceptStrategy {
private final List<ProcessorDefinition> defs = new ArrayList<>();
@Override
public Processor wrapProcessorInInterceptors(
CamelContext context, NamedNode definition, Processor target, Processor nextTarget) {
defs.add((ProcessorDefinition<?>) definition);
return target;
}
public List<ProcessorDefinition> getDefs() {
return defs;
}
}
}
|
MyInterceptor
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-authorization-server/src/main/java/org/springframework/security/oauth2/server/authorization/authentication/OAuth2DeviceAuthorizationRequestAuthenticationProvider.java
|
{
"start": 3601,
"end": 9951
}
|
class ____ implements AuthenticationProvider {
private static final String ERROR_URI = "https://datatracker.ietf.org/doc/html/rfc6749#section-5.2";
static final OAuth2TokenType DEVICE_CODE_TOKEN_TYPE = new OAuth2TokenType(OAuth2ParameterNames.DEVICE_CODE);
static final OAuth2TokenType USER_CODE_TOKEN_TYPE = new OAuth2TokenType(OAuth2ParameterNames.USER_CODE);
private final Log logger = LogFactory.getLog(getClass());
private final OAuth2AuthorizationService authorizationService;
private OAuth2TokenGenerator<OAuth2DeviceCode> deviceCodeGenerator = new OAuth2DeviceCodeGenerator();
private OAuth2TokenGenerator<OAuth2UserCode> userCodeGenerator = new OAuth2UserCodeGenerator();
/**
* Constructs an {@code OAuth2DeviceAuthorizationRequestAuthenticationProvider} using
* the provided parameters.
* @param authorizationService the authorization service
*/
public OAuth2DeviceAuthorizationRequestAuthenticationProvider(OAuth2AuthorizationService authorizationService) {
Assert.notNull(authorizationService, "authorizationService cannot be null");
this.authorizationService = authorizationService;
}
@Override
public Authentication authenticate(Authentication authentication) throws AuthenticationException {
OAuth2DeviceAuthorizationRequestAuthenticationToken deviceAuthorizationRequestAuthentication = (OAuth2DeviceAuthorizationRequestAuthenticationToken) authentication;
OAuth2ClientAuthenticationToken clientPrincipal = OAuth2AuthenticationProviderUtils
.getAuthenticatedClientElseThrowInvalidClient(deviceAuthorizationRequestAuthentication);
RegisteredClient registeredClient = clientPrincipal.getRegisteredClient();
if (this.logger.isTraceEnabled()) {
this.logger.trace("Retrieved registered client");
}
if (!registeredClient.getAuthorizationGrantTypes().contains(AuthorizationGrantType.DEVICE_CODE)) {
if (this.logger.isDebugEnabled()) {
this.logger.debug(LogMessage.format(
"Invalid request: requested grant_type is not allowed" + " for registered client '%s'",
registeredClient.getId()));
}
throwError(OAuth2ErrorCodes.UNAUTHORIZED_CLIENT, OAuth2ParameterNames.CLIENT_ID);
}
Set<String> requestedScopes = deviceAuthorizationRequestAuthentication.getScopes();
if (!CollectionUtils.isEmpty(requestedScopes)) {
for (String requestedScope : requestedScopes) {
if (!registeredClient.getScopes().contains(requestedScope)) {
throwError(OAuth2ErrorCodes.INVALID_SCOPE, OAuth2ParameterNames.SCOPE);
}
}
if (requestedScopes.contains(OidcScopes.OPENID)) {
throwError(OAuth2ErrorCodes.INVALID_SCOPE, OAuth2ParameterNames.SCOPE);
}
}
if (this.logger.isTraceEnabled()) {
this.logger.trace("Validated device authorization request parameters");
}
// @formatter:off
DefaultOAuth2TokenContext.Builder tokenContextBuilder = DefaultOAuth2TokenContext.builder()
.registeredClient(registeredClient)
.principal(clientPrincipal)
.authorizationServerContext(AuthorizationServerContextHolder.getContext())
.authorizationGrantType(AuthorizationGrantType.DEVICE_CODE)
.authorizationGrant(deviceAuthorizationRequestAuthentication);
// @formatter:on
// Generate a high-entropy string to use as the device code
OAuth2TokenContext tokenContext = tokenContextBuilder.tokenType(DEVICE_CODE_TOKEN_TYPE).build();
OAuth2DeviceCode deviceCode = this.deviceCodeGenerator.generate(tokenContext);
if (deviceCode == null) {
OAuth2Error error = new OAuth2Error(OAuth2ErrorCodes.SERVER_ERROR,
"The token generator failed to generate the device code.", ERROR_URI);
throw new OAuth2AuthenticationException(error);
}
if (this.logger.isTraceEnabled()) {
this.logger.trace("Generated device code");
}
// Generate a low-entropy string to use as the user code
tokenContext = tokenContextBuilder.tokenType(USER_CODE_TOKEN_TYPE).build();
OAuth2UserCode userCode = this.userCodeGenerator.generate(tokenContext);
if (userCode == null) {
OAuth2Error error = new OAuth2Error(OAuth2ErrorCodes.SERVER_ERROR,
"The token generator failed to generate the user code.", ERROR_URI);
throw new OAuth2AuthenticationException(error);
}
if (this.logger.isTraceEnabled()) {
this.logger.trace("Generated user code");
}
// @formatter:off
OAuth2Authorization authorization = OAuth2Authorization.withRegisteredClient(registeredClient)
.principalName(clientPrincipal.getName())
.authorizationGrantType(AuthorizationGrantType.DEVICE_CODE)
.token(deviceCode)
.token(userCode)
.attribute(OAuth2ParameterNames.SCOPE, new HashSet<>(requestedScopes))
.build();
// @formatter:on
this.authorizationService.save(authorization);
if (this.logger.isTraceEnabled()) {
this.logger.trace("Saved authorization");
}
if (this.logger.isTraceEnabled()) {
this.logger.trace("Authenticated device authorization request");
}
return new OAuth2DeviceAuthorizationRequestAuthenticationToken(clientPrincipal, requestedScopes, deviceCode,
userCode);
}
@Override
public boolean supports(Class<?> authentication) {
return OAuth2DeviceAuthorizationRequestAuthenticationToken.class.isAssignableFrom(authentication);
}
/**
* Sets the {@link OAuth2TokenGenerator} that generates the {@link OAuth2DeviceCode}.
* @param deviceCodeGenerator the {@link OAuth2TokenGenerator} that generates the
* {@link OAuth2DeviceCode}
*/
public void setDeviceCodeGenerator(OAuth2TokenGenerator<OAuth2DeviceCode> deviceCodeGenerator) {
Assert.notNull(deviceCodeGenerator, "deviceCodeGenerator cannot be null");
this.deviceCodeGenerator = deviceCodeGenerator;
}
/**
* Sets the {@link OAuth2TokenGenerator} that generates the {@link OAuth2UserCode}.
* @param userCodeGenerator the {@link OAuth2TokenGenerator} that generates the
* {@link OAuth2UserCode}
*/
public void setUserCodeGenerator(OAuth2TokenGenerator<OAuth2UserCode> userCodeGenerator) {
Assert.notNull(userCodeGenerator, "userCodeGenerator cannot be null");
this.userCodeGenerator = userCodeGenerator;
}
private static void throwError(String errorCode, String parameterName) {
OAuth2Error error = new OAuth2Error(errorCode, "OAuth 2.0 Parameter: " + parameterName, ERROR_URI);
throw new OAuth2AuthenticationException(error);
}
private static final
|
OAuth2DeviceAuthorizationRequestAuthenticationProvider
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/cluster/service/MasterService.java
|
{
"start": 67542,
"end": 70304
}
|
class ____<T extends ClusterStateTaskListener> extends AbstractRunnable {
private final TimeValue timeout;
private final String source;
private final AtomicReference<T> taskHolder; // atomically read and set to null by at most one of {execute, timeout}
private TaskTimeoutHandler(TimeValue timeout, String source, AtomicReference<T> taskHolder) {
this.timeout = timeout;
this.source = source;
this.taskHolder = taskHolder;
}
@Override
public void onRejection(Exception e) {
assert e instanceof EsRejectedExecutionException esre && esre.isExecutorShutdown() : e;
completeTask(e);
}
@Override
public void onFailure(Exception e) {
logger.error("unexpected failure executing task timeout handler", e);
assert false : e;
completeTask(e);
}
@Override
public boolean isForceExecution() {
return true;
}
@Override
protected void doRun() {
completeTask(new ProcessClusterEventTimeoutException(timeout, source));
}
private void completeTask(Exception e) {
final var task = taskHolder.getAndSet(null);
if (task != null) {
logger.trace("timing out [{}][{}] after [{}]", source, task, timeout);
task.onFailure(e);
}
}
@Override
public String toString() {
return getTimeoutTaskDescription(source, taskHolder.get(), timeout);
}
}
static String getTimeoutTaskDescription(String source, Object task, TimeValue timeout) {
return Strings.format("master service timeout handler for [%s][%s] after [%s]", source, task, timeout);
}
/**
* Actual implementation of {@link MasterServiceTaskQueue} exposed to clients. Conceptually, each entry in each {@link PerPriorityQueue}
* is a {@link BatchingTaskQueue} representing a batch of tasks to be executed. Clients may add more tasks to each of these queues prior
* to their execution.
*
* Works similarly to {@link PerPriorityQueue} in that the queue size is tracked in a threadsafe fashion so that we can detect
* transitions between empty and nonempty queues and arrange to process the queue if and only if it's nonempty. There is only ever one
* active processor for each such queue.
*
* Works differently from {@link PerPriorityQueue} in that each time the queue is processed it will drain all the pending items at once
* and process them in a single batch.
*
* Also handles that tasks may time out before being processed.
*/
private static
|
TaskTimeoutHandler
|
java
|
spring-projects__spring-boot
|
core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/AutoConfigurationsTests.java
|
{
"start": 940,
"end": 2397
}
|
class ____ {
@Test
void ofShouldCreateOrderedConfigurations() {
Configurations configurations = AutoConfigurations.of(AutoConfigureA.class, AutoConfigureB.class);
assertThat(Configurations.getClasses(configurations)).containsExactly(AutoConfigureB.class,
AutoConfigureA.class);
}
@Test
void whenHasReplacementForAutoConfigureAfterShouldCreateOrderedConfigurations() {
Configurations configurations = new AutoConfigurations(this::replaceB,
Arrays.asList(AutoConfigureA.class, AutoConfigureB2.class));
assertThat(Configurations.getClasses(configurations)).containsExactly(AutoConfigureB2.class,
AutoConfigureA.class);
}
@Test
void whenHasReplacementForClassShouldReplaceClass() {
Configurations configurations = new AutoConfigurations(this::replaceB,
Arrays.asList(AutoConfigureA.class, AutoConfigureB.class));
assertThat(Configurations.getClasses(configurations)).containsExactly(AutoConfigureB2.class,
AutoConfigureA.class);
}
@Test
void getBeanNameShouldUseClassName() {
Configurations configurations = AutoConfigurations.of(AutoConfigureA.class, AutoConfigureB.class);
assertThat(configurations.getBeanName(AutoConfigureA.class)).isEqualTo(AutoConfigureA.class.getName());
}
private String replaceB(String className) {
return (!AutoConfigureB.class.getName().equals(className)) ? className : AutoConfigureB2.class.getName();
}
@AutoConfigureAfter(AutoConfigureB.class)
static
|
AutoConfigurationsTests
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java
|
{
"start": 1664,
"end": 1743
}
|
class ____ extends DeferableBucketAggregator {
/**
* This
|
TermsAggregator
|
java
|
apache__camel
|
components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/processor/SpringRedeliverToSubRouteTest.java
|
{
"start": 1044,
"end": 1316
}
|
class ____ extends RedeliverToSubRouteTest {
@Override
protected CamelContext createCamelContext() throws Exception {
return createSpringCamelContext(this, "org/apache/camel/spring/processor/RedeliverToSubRouteTest.xml");
}
}
|
SpringRedeliverToSubRouteTest
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/issue_2200/Issue2264.java
|
{
"start": 161,
"end": 4905
}
|
class ____ extends TestCase {
public void test_for_issue() throws Exception {
String oID="{\"sys\":\"ROC\",\"code\":0,\"messages\":\"分页获取信息成功!\",\"data\":{\"pageNum\":1,\"pageSize\":10,\"totalPages\":11,\"total\":110,\"records\":[{\"id\":\"64e72850-d149-46d6-8bd7-5f1d332d2a16\",\"tenantCode\":\"clouds_dianmo\",\"name\":\"asr_t1\",\"operatorId\":\"38ba5660-ef6e-4b66-9673-b0236832f179\",\"createTime\":\"2019-01-25 14:21:03\",\"updateTime\":\"2019-01-25 14:21:03\",\"status\":0,\"robotType\":1,\"policyType\":0,\"policyVersion\":null,\"description\":null,\"extensionJson\":null,\"operatorCode\":\"liyiwan\",\"insRcuCnt\":0,\"distRcuCnt\":0},{\"id\":\"4f6a0975-3980-4fd9-b27c-09aa258f4e36\",\"tenantCode\":\"cloudminds\",\"name\":\"xianglong\",\"operatorId\":\"b9bf937f-01c6-4fe8-86f8-43ce7a08167a\",\"createTime\":\"2019-01-25 11:48:03\",\"updateTime\":\"2019-01-25 13:03:00\",\"status\":0,\"robotType\":1,\"policyType\":0,\"policyVersion\":null,\"description\":null,\"extensionJson\":null,\"operatorCode\":\"zhangxianglong\",\"insRcuCnt\":0,\"distRcuCnt\":1},{\"id\":\"b209b3b8-7b41-49dd-a087-fb7f6b5bfa51\",\"tenantCode\":\"cloudminds\",\"name\":\"cloud_pu\",\"operatorId\":\"21d08412-9c19-49c0-9428-a6a5ad1bb548\",\"createTime\":\"2019-01-25 11:45:14\",\"updateTime\":\"2019-01-25 11:45:14\",\"status\":0,\"robotType\":1,\"policyType\":0,\"policyVersion\":null,\"description\":null,\"extensionJson\":null,\"operatorCode\":\"dian\",\"insRcuCnt\":0,\"distRcuCnt\":1},{\"id\":\"a35e468d-3ff5-48e4-a0e9-b86249167ee5\",\"tenantCode\":\"CloudPepper_Test\",\"name\":\"welcome\",\"operatorId\":\"ca69a720-8b8e-4ee5-8b12-63a20e897ef1\",\"createTime\":\"2019-01-25 11:05:42\",\"updateTime\":\"2019-01-25 14:07:05\",\"status\":0,\"robotType\":1,\"policyType\":0,\"policyVersion\":null,\"description\":null,\"extensionJson\":null,\"operatorCode\":\"duwei\",\"insRcuCnt\":0,\"distRcuCnt\":1},{\"id\":\"25243f56-b31d-4b58-bd96-c6920628b06c\",\"tenantCode\":\"roc\",\"name\":\"士大夫撒点\",\"operatorId\":\"06f82222-48a4-4a6a-b1cc-52148ed27651\",\"createTime\":\"2019-01-25 11:02:02\",\"updateTime\":\"2019-01-25 11:02:02\",\"status\":0,\"robotType\":1,\"policyType\":0,\"policyVersion\":null,\"description\":null,\"extensionJson\":null,\"operatorCode\":\"admin\",\"insRcuCnt\":0,\"distRcuCnt\":0},{\"id\":\"229d9c33-0606-4cda-a4d5-8c1feba2a5ed\",\"tenantCode\":\"cloudminds\",\"name\":\"LocalAsr\",\"operatorId\":\"38ba5660-ef6e-4b66-9673-b0236832f179\",\"createTime\":\"2019-01-25 10:51:43\",\"updateTime\":\"2019-01-25 10:51:43\",\"status\":0,\"robotType\":1,\"policyType\":0,\"policyVersion\":null,\"description\":null,\"extensionJson\":null,\"operatorCode\":\"liyiwan\",\"insRcuCnt\":0,\"distRcuCnt\":0},{\"id\":\"3aedd158-24b8-4021-a9a3-d6effc91a32a\",\"tenantCode\":\"cloudminds\",\"name\":\"cloudAsr\",\"operatorId\":\"38ba5660-ef6e-4b66-9673-b0236832f179\",\"createTime\":\"2019-01-25 10:27:59\",\"updateTime\":\"2019-01-25 10:27:59\",\"status\":0,\"robotType\":1,\"policyType\":0,\"policyVersion\":null,\"description\":null,\"extensionJson\":null,\"operatorCode\":\"liyiwan\",\"insRcuCnt\":0,\"distRcuCnt\":1},{\"id\":\"53065639-a467-4872-8333-73e085c99e43\",\"tenantCode\":\"CloudPepper_Test\",\"name\":\"asrtest\",\"operatorId\":\"394e0148-ba95-4c39-a9f9-973abb2c718a\",\"createTime\":\"2019-01-25 10:17:36\",\"updateTime\":\"2019-01-25 13:12:01\",\"status\":0,\"robotType\":1,\"policyType\":0,\"policyVersion\":null,\"description\":null,\"extensionJson\":null,\"operatorCode\":\"liuyanan\",\"insRcuCnt\":0,\"distRcuCnt\":1},{\"id\":\"da2db833-c065-49dd-bdb7-939c2026faa3\",\"tenantCode\":\"CloudPepper_Test\",\"name\":\"testwqeq\",\"operatorId\":\"bb5cd865-baea-42a0-a36d-b9e354b88f27\",\"createTime\":\"2019-01-24 19:20:04\",\"updateTime\":\"2019-01-24 19:20:27\",\"status\":0,\"robotType\":1,\"policyType\":0,\"policyVersion\":null,\"description\":null,\"extensionJson\":null,\"operatorCode\":\"cqtest01\",\"insRcuCnt\":0,\"distRcuCnt\":0},{\"id\":\"da672b14-d968-4776-97ba-b7c1addaa3b3\",\"tenantCode\":\"CloudPepper_Test\",\"name\":\"cqtestASR\",\"operatorId\":\"bb5cd865-baea-42a0-a36d-b9e354b88f27\",\"createTime\":\"2019-01-24 16:46:40\",\"updateTime\":\"2019-01-24 18:14:15\",\"status\":0,\"robotType\":1,\"policyType\":0,\"policyVersion\":null,\"description\":null,\"extensionJson\":null,\"operatorCode\":\"cqtest01\",\"insRcuCnt\":0,\"distRcuCnt\":2}]},\"errors\":null,\"action\":0,\"script\":\"\"}";
JSONObject json = JSONObject.parseObject(oID);
String par="$..records[?(@.name=='asr_t1')].operatorId";
Object source = JSONPath.eval(json,par);
String device_udid=JSONObject.toJSONString(source);
assertEquals("[\"38ba5660-ef6e-4b66-9673-b0236832f179\"]", device_udid);
}
}
|
Issue2264
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/identifier/CompositeIdGenerationTypeTest.java
|
{
"start": 4098,
"end": 4350
}
|
class ____ {
private Long id;
private String uuid;
public IdClassPK() {
}
public IdClassPK(Long id, String uuid) {
this.id = id;
this.uuid = uuid;
}
}
@Entity( name = "MultipleIdClass" )
@IdClass( IdClassPK.class )
static
|
IdClassPK
|
java
|
apache__kafka
|
connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceTaskConfig.java
|
{
"start": 1049,
"end": 2453
}
|
class ____ extends MirrorSourceConfig {
private static final String TASK_TOPIC_PARTITIONS_DOC = "Topic-partitions assigned to this task to replicate.";
public MirrorSourceTaskConfig(Map<String, String> props) {
super(TASK_CONFIG_DEF, props);
}
Set<TopicPartition> taskTopicPartitions() {
List<String> fields = getList(TASK_TOPIC_PARTITIONS);
return fields.stream()
.map(MirrorUtils::decodeTopicPartition)
.collect(Collectors.toSet());
}
MirrorSourceMetrics metrics() {
MirrorSourceMetrics metrics = new MirrorSourceMetrics(this);
metricsReporters().forEach(metrics::addReporter);
return metrics;
}
@Override
String entityLabel() {
return super.entityLabel() + "-" + (getInt(TASK_INDEX) == null ? "?" : getInt(TASK_INDEX));
}
protected static final ConfigDef TASK_CONFIG_DEF = new ConfigDef(CONNECTOR_CONFIG_DEF)
.define(
TASK_TOPIC_PARTITIONS,
ConfigDef.Type.LIST,
ConfigDef.NO_DEFAULT_VALUE,
ConfigDef.ValidList.anyNonDuplicateValues(false, false),
ConfigDef.Importance.LOW,
TASK_TOPIC_PARTITIONS_DOC)
.define(TASK_INDEX,
ConfigDef.Type.INT,
null,
ConfigDef.Importance.LOW,
"The index of the task");
}
|
MirrorSourceTaskConfig
|
java
|
spring-projects__spring-boot
|
core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/condition/scan/ScannedFactoryBeanConfiguration.java
|
{
"start": 1067,
"end": 1215
}
|
class ____ {
@Bean
public FactoryBean<ScanBean> exampleBeanFactoryBean() {
return new ScanFactoryBean("foo");
}
}
|
ScannedFactoryBeanConfiguration
|
java
|
grpc__grpc-java
|
examples/android/clientcache/app/src/main/java/io/grpc/clientcacheexample/SafeMethodCachingInterceptor.java
|
{
"start": 3048,
"end": 10615
}
|
interface ____ {
void put(Key key, Value value);
Value get(Key key);
void remove(Key key);
void clear();
}
/**
* Obtain a new cache with a least-recently used eviction policy and the specified size limit. The
* backing caching implementation is provided by {@link LruCache}. It is safe for a single cache
* to be shared across multiple {@link SafeMethodCachingInterceptor}s without synchronization.
*/
public static Cache newLruCache(final int cacheSizeInBytes) {
return new Cache() {
private final LruCache<Key, Value> lruCache =
new LruCache<Key, Value>(cacheSizeInBytes) {
protected int sizeOf(Key key, Value value) {
return value.response.getSerializedSize();
}
};
@Override
public void put(Key key, Value value) {
lruCache.put(key, value);
}
@Override
public Value get(Key key) {
return lruCache.get(key);
}
@Override
public void remove(Key key) {
lruCache.remove(key);
}
@Override
public void clear() {
lruCache.evictAll();
}
};
}
public static SafeMethodCachingInterceptor newSafeMethodCachingInterceptor(Cache cache) {
return newSafeMethodCachingInterceptor(cache, DEFAULT_MAX_AGE_SECONDS);
}
public static SafeMethodCachingInterceptor newSafeMethodCachingInterceptor(
Cache cache, int defaultMaxAge) {
return new SafeMethodCachingInterceptor(cache, defaultMaxAge);
}
private static int DEFAULT_MAX_AGE_SECONDS = 3600;
private static final Metadata.Key<String> CACHE_CONTROL_KEY =
Metadata.Key.of("cache-control", Metadata.ASCII_STRING_MARSHALLER);
private static final Splitter CACHE_CONTROL_SPLITTER =
Splitter.on(',').trimResults().omitEmptyStrings();
private final Cache internalCache;
private final int defaultMaxAge;
private SafeMethodCachingInterceptor(Cache cache, int defaultMaxAge) {
this.internalCache = cache;
this.defaultMaxAge = defaultMaxAge;
}
@Override
public <ReqT, RespT> ClientCall<ReqT, RespT> interceptCall(
final MethodDescriptor<ReqT, RespT> method, final CallOptions callOptions, Channel next) {
// Currently only unary methods can be marked safe, but check anyways.
if (!method.isSafe() || method.getType() != MethodDescriptor.MethodType.UNARY) {
return next.newCall(method, callOptions);
}
final String fullMethodName = method.getFullMethodName();
return new ForwardingClientCall.SimpleForwardingClientCall<ReqT, RespT>(
next.newCall(method, callOptions)) {
private Listener<RespT> interceptedListener;
private Key requestKey;
private boolean cacheResponse = true;
private volatile String cacheOptionsErrorMsg;
@Override
public void start(Listener<RespT> responseListener, Metadata headers) {
interceptedListener =
new ForwardingClientCallListener.SimpleForwardingClientCallListener<RespT>(
responseListener) {
private Deadline deadline;
private int maxAge = -1;
@Override
public void onHeaders(Metadata headers) {
Iterable<String> cacheControlHeaders = headers.getAll(CACHE_CONTROL_KEY);
if (cacheResponse && cacheControlHeaders != null) {
for (String cacheControlHeader : cacheControlHeaders) {
for (String directive : CACHE_CONTROL_SPLITTER.split(cacheControlHeader)) {
if (directive.equalsIgnoreCase("no-cache")) {
cacheResponse = false;
break;
} else if (directive.equalsIgnoreCase("no-store")) {
cacheResponse = false;
break;
} else if (directive.equalsIgnoreCase("no-transform")) {
cacheResponse = false;
break;
} else if (directive.toLowerCase(Locale.US).startsWith("max-age")) {
String[] parts = directive.split("=");
if (parts.length == 2) {
try {
maxAge = Integer.parseInt(parts[1]);
} catch (NumberFormatException e) {
Log.e(TAG, "max-age directive failed to parse", e);
continue;
}
}
}
}
}
}
if (cacheResponse) {
if (maxAge > -1) {
deadline = Deadline.after(maxAge, TimeUnit.SECONDS);
} else {
deadline = Deadline.after(defaultMaxAge, TimeUnit.SECONDS);
}
}
super.onHeaders(headers);
}
@Override
public void onMessage(RespT message) {
if (cacheResponse && !deadline.isExpired()) {
Value value = new Value((MessageLite) message, deadline);
internalCache.put(requestKey, value);
}
super.onMessage(message);
}
@Override
public void onClose(Status status, Metadata trailers) {
if (cacheOptionsErrorMsg != null) {
// UNAVAILABLE is the canonical gRPC mapping for HTTP response code 504 (as used
// by the built-in Android HTTP request cache).
super.onClose(
Status.UNAVAILABLE.withDescription(cacheOptionsErrorMsg), new Metadata());
} else {
super.onClose(status, trailers);
}
}
};
delegate().start(interceptedListener, headers);
}
@Override
public void sendMessage(ReqT message) {
boolean noCache = callOptions.getOption(NO_CACHE_CALL_OPTION);
boolean onlyIfCached = callOptions.getOption(ONLY_IF_CACHED_CALL_OPTION);
if (noCache) {
if (onlyIfCached) {
cacheOptionsErrorMsg = "Unsatisfiable Request (no-cache and only-if-cached conflict)";
super.cancel(cacheOptionsErrorMsg, null);
return;
}
cacheResponse = false;
super.sendMessage(message);
return;
}
// Check the cache
requestKey = new Key(fullMethodName, (MessageLite) message);
Value cachedResponse = internalCache.get(requestKey);
if (cachedResponse != null) {
if (cachedResponse.maxAgeDeadline.isExpired()) {
internalCache.remove(requestKey);
} else {
cacheResponse = false; // already cached
interceptedListener.onMessage((RespT) cachedResponse.response);
Metadata metadata = new Metadata();
interceptedListener.onClose(Status.OK, metadata);
return;
}
}
if (onlyIfCached) {
cacheOptionsErrorMsg =
"Unsatisfiable Request (only-if-cached set, but value not in cache)";
super.cancel(cacheOptionsErrorMsg, null);
return;
}
super.sendMessage(message);
}
@Override
public void halfClose() {
if (cacheOptionsErrorMsg != null) {
// already canceled
return;
}
super.halfClose();
}
};
}
}
|
Cache
|
java
|
processing__processing4
|
app/src/processing/app/ui/ColorChooser.java
|
{
"start": 16490,
"end": 17017
}
|
class ____ extends JTextField {
public boolean allowHex;
public NumberField(int cols, boolean allowHex) {
super(cols);
this.allowHex = allowHex;
}
protected Document createDefaultModel() {
return new NumberDocument(this);
}
public Dimension getMinimumSize() {
return getPreferredSize();
}
public Dimension getMaximumSize() {
return getPreferredSize();
}
}
/**
* Document model to go with JTextField that only allows numbers.
*/
static
|
NumberField
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/shareddata/impl/Checker.java
|
{
"start": 1059,
"end": 3426
}
|
class ____ {
private static final Logger log = LoggerFactory.getLogger(Checker.class);
private static final Set<Class<?>> IMMUTABLE_TYPES = Stream.<Class<?>>builder()
.add(String.class)
.add(Integer.class)
.add(Long.class)
.add(Boolean.class)
.add(Double.class)
.add(Float.class)
.add(Short.class)
.add(Byte.class)
.add(Character.class)
.add(BigInteger.class)
.add(BigDecimal.class)
.build()
.collect(toSet());
static void checkType(Object obj) {
Objects.requireNonNull(obj, "null not allowed for shareddata data structure");
// All immutables and byte arrays are Serializable by the platform
if (!(obj instanceof Serializable || obj instanceof Shareable || obj instanceof ClusterSerializable)) {
throw new IllegalArgumentException("Invalid type for shareddata data structure: " + obj.getClass().getName());
}
}
@SuppressWarnings("unchecked")
static <T> T copyIfRequired(T obj) {
Object result;
if (obj == null) {
// Happens with putIfAbsent
result = null;
} else if (IMMUTABLE_TYPES.contains(obj.getClass())) {
result = obj;
} else if (obj instanceof byte[]) {
result = copyByteArray((byte[]) obj);
} else if (obj instanceof Shareable) {
result = ((Shareable) obj).copy();
} else if (obj instanceof ClusterSerializable) {
result = copyClusterSerializable((ClusterSerializable) obj);
} else if (obj instanceof Serializable) {
result = copySerializable(obj);
} else {
throw new IllegalStateException();
}
return (T) result;
}
private static byte[] copyByteArray(byte[] bytes) {
byte[] copy = new byte[bytes.length];
System.arraycopy(bytes, 0, copy, 0, bytes.length);
return copy;
}
private static ClusterSerializable copyClusterSerializable(ClusterSerializable obj) {
logDeveloperInfo(obj);
return ClusterSerializableUtils.copy(obj);
}
private static void logDeveloperInfo(Object obj) {
if (log.isDebugEnabled()) {
log.debug("Copying " + obj.getClass() + " for shared data. Consider implementing " + Shareable.class + " for better performance.");
}
}
private static Object copySerializable(Object obj) {
logDeveloperInfo(obj);
return SerializableUtils.fromBytes(SerializableUtils.toBytes(obj), ObjectInputStream::new);
}
}
|
Checker
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java
|
{
"start": 1636,
"end": 4229
}
|
class ____ {
static final String DIR = "/" + TestFileCreationClient.class.getSimpleName() + "/";
{
GenericTestUtils.setLogLevel(DataNode.LOG, Level.TRACE);
GenericTestUtils.setLogLevel(LeaseManager.LOG, Level.TRACE);
GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.TRACE);
GenericTestUtils.setLogLevel(InterDatanodeProtocol.LOG, Level.TRACE);
}
/** Test lease recovery Triggered by DFSClient. */
@Test
public void testClientTriggeredLeaseRecovery() throws Exception {
final int REPLICATION = 3;
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, REPLICATION);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
try {
final FileSystem fs = cluster.getFileSystem();
final Path dir = new Path("/wrwelkj");
SlowWriter[] slowwriters = new SlowWriter[10];
for(int i = 0; i < slowwriters.length; i++) {
slowwriters[i] = new SlowWriter(fs, new Path(dir, "file" + i));
}
try {
for(int i = 0; i < slowwriters.length; i++) {
slowwriters[i].start();
}
Thread.sleep(1000); // let writers get started
//stop a datanode, it should have least recover.
cluster.stopDataNode(AppendTestUtil.nextInt(REPLICATION));
//let the slow writer writes a few more seconds
System.out.println("Wait a few seconds");
Thread.sleep(5000);
}
finally {
for(int i = 0; i < slowwriters.length; i++) {
if (slowwriters[i] != null) {
slowwriters[i].running = false;
slowwriters[i].interrupt();
}
}
for(int i = 0; i < slowwriters.length; i++) {
if (slowwriters[i] != null) {
slowwriters[i].join();
}
}
}
//Verify the file
System.out.println("Verify the file");
for(int i = 0; i < slowwriters.length; i++) {
System.out.println(slowwriters[i].filepath + ": length="
+ fs.getFileStatus(slowwriters[i].filepath).getLen());
FSDataInputStream in = null;
try {
in = fs.open(slowwriters[i].filepath);
for(int j = 0, x; (x = in.read()) != -1; j++) {
assertEquals(j, x);
}
}
finally {
IOUtils.closeStream(in);
}
}
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
static
|
TestFileCreationClient
|
java
|
apache__logging-log4j2
|
log4j-api/src/main/java/org/apache/logging/log4j/util/StackLocatorUtil.java
|
{
"start": 3737,
"end": 3886
}
|
class ____ which to begin searching
* @param callerPredicate Predicate checked after the sentinelClass is found
* @return the first matching
|
at
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/fastjson/deserializer/issues3796/bean/ObjectL1.java
|
{
"start": 121,
"end": 1631
}
|
class ____ {
List<ObjectL1_A> a;
int b;
int c;
int d;
long e;
long f;
List<ObjectL2_B> g;
List<CommonObject> h;
HashMap<Integer, HashMap<Integer, ObjectL2_C>> i;
boolean j = false;
public List<ObjectL1_A> getA() {
return a;
}
public void setA(List<ObjectL1_A> a) {
this.a = a;
}
public int getB() {
return b;
}
public void setB(int b) {
this.b = b;
}
public int getC() {
return c;
}
public void setC(int c) {
this.c = c;
}
public int getD() {
return d;
}
public void setD(int d) {
this.d = d;
}
public long getE() {
return e;
}
public void setE(long e) {
this.e = e;
}
public long getF() {
return f;
}
public void setF(long f) {
this.f = f;
}
public List<ObjectL2_B> getG() {
return g;
}
public void setG(List<ObjectL2_B> g) {
this.g = g;
}
public List<CommonObject> getH() {
return h;
}
public void setH(List<CommonObject> h) {
this.h = h;
}
public HashMap<Integer, HashMap<Integer, ObjectL2_C>> getI() {
return i;
}
public void setI(HashMap<Integer, HashMap<Integer, ObjectL2_C>> i) {
this.i = i;
}
public boolean isJ() {
return j;
}
public void setJ(boolean j) {
this.j = j;
}
}
|
ObjectL1
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/bootstrap/binding/annotations/basics/sql/MapKeyJdbcTypeTests.java
|
{
"start": 4270,
"end": 4663
}
|
class ____ {
@Id
private Integer id;
@ElementCollection
private Map<Integer,String> baseMap;
@ElementCollection
@JdbcTypeCode( Types.NVARCHAR )
@MapKeyJdbcTypeCode( Types.TINYINT )
private Map<Integer,String> sqlTypeCodeMap;
@ElementCollection
@JdbcTypeCode( Types.NVARCHAR )
@MapKeyJdbcType( TinyIntJdbcType.class )
private Map<Integer,String> sqlTypeMap;
}
}
|
MyEntity
|
java
|
apache__rocketmq
|
test/src/test/java/org/apache/rocketmq/test/container/SlaveBrokerIT.java
|
{
"start": 1200,
"end": 5372
}
|
class ____ extends ContainerIntegrationTestBase {
@Test
public void reAddSlaveBroker() throws Exception {
await().atMost(Duration.ofMinutes(1)).until(() -> {
ClusterInfo clusterInfo = defaultMQAdminExt.examineBrokerClusterInfo();
if (clusterInfo.getClusterAddrTable().get(master1With3Replicas.getBrokerConfig().getBrokerClusterName()).size() != 3) {
return false;
}
if (clusterInfo.getBrokerAddrTable().get(master1With3Replicas.getBrokerConfig().getBrokerName()).getBrokerAddrs().size() != 3) {
return false;
}
if (clusterInfo.getBrokerAddrTable().get(master2With3Replicas.getBrokerConfig().getBrokerName()).getBrokerAddrs().size() != 3) {
return false;
}
if (clusterInfo.getBrokerAddrTable().get(master3With3Replicas.getBrokerConfig().getBrokerName()).getBrokerAddrs().size() != 3) {
return false;
}
return true;
});
// Remove one replicas from each broker group
removeSlaveBroker(1, brokerContainer1, master3With3Replicas);
removeSlaveBroker(1, brokerContainer2, master1With3Replicas);
removeSlaveBroker(1, brokerContainer3, master2With3Replicas);
await().atMost(Duration.ofMinutes(1)).until(() -> {
// Test cluster info again
ClusterInfo clusterInfo = defaultMQAdminExt.examineBrokerClusterInfo();
assertThat(clusterInfo.getBrokerAddrTable().get(master1With3Replicas.getBrokerConfig().getBrokerName()).getBrokerAddrs().size())
.isEqualTo(2);
assertThat(clusterInfo.getBrokerAddrTable().get(master2With3Replicas.getBrokerConfig().getBrokerName()).getBrokerAddrs().size())
.isEqualTo(2);
assertThat(clusterInfo.getBrokerAddrTable().get(master3With3Replicas.getBrokerConfig().getBrokerName()).getBrokerAddrs().size())
.isEqualTo(2);
return true;
});
// ReAdd the slave broker
createAndAddSlave(1, brokerContainer1, master3With3Replicas);
createAndAddSlave(1, brokerContainer2, master1With3Replicas);
createAndAddSlave(1, brokerContainer3, master2With3Replicas);
// Trigger a register action
//for (final SlaveBrokerController slaveBrokerController : brokerContainer1.getSlaveBrokers()) {
// slaveBrokerController.registerBrokerAll(false, false, true);
//}
//
//for (final SlaveBrokerController slaveBrokerController : brokerContainer2.getSlaveBrokers()) {
// slaveBrokerController.registerBrokerAll(false, false, true);
//}
await().atMost(Duration.ofMinutes(1)).until(() -> {
ClusterInfo clusterInfo = defaultMQAdminExt.examineBrokerClusterInfo();
return clusterInfo.getBrokerAddrTable()
.get(master1With3Replicas.getBrokerConfig().getBrokerName()).getBrokerAddrs().size() == 3
&& clusterInfo.getBrokerAddrTable()
.get(master2With3Replicas.getBrokerConfig().getBrokerName()).getBrokerAddrs().size() == 3
&& clusterInfo.getBrokerAddrTable()
.get(master2With3Replicas.getBrokerConfig().getBrokerName()).getBrokerAddrs().size() == 3;
});
}
@Test
public void reAddSlaveBroker_ConnectionCheck() throws Exception {
await().atMost(100, TimeUnit.SECONDS)
.until(() -> ((DefaultMessageStore) master3With3Replicas.getMessageStore()).getHaService().getConnectionCount().get() == 2);
removeSlaveBroker(1, brokerContainer1, master3With3Replicas);
createAndAddSlave(1, brokerContainer1, master3With3Replicas);
await().atMost(100, TimeUnit.SECONDS)
.until(() -> ((DefaultMessageStore) master3With3Replicas.getMessageStore()).getHaService().getConnectionCount().get() == 2);
await().atMost(100, TimeUnit.SECONDS)
.until(() -> ((DefaultMessageStore) master3With3Replicas.getMessageStore()).getHaService().inSyncReplicasNums(0) == 3);
Thread.sleep(1000 * 101);
}
}
|
SlaveBrokerIT
|
java
|
mockito__mockito
|
mockito-core/src/main/java/org/mockito/internal/hamcrest/HamcrestArgumentMatcher.java
|
{
"start": 321,
"end": 1218
}
|
class ____<T> implements ArgumentMatcher<T> {
private final Matcher<T> matcher;
private final Class<?> type;
public HamcrestArgumentMatcher(Matcher<T> matcher) {
this(Void.class, matcher);
}
public HamcrestArgumentMatcher(Matcher<T> matcher, Class<T> type) {
this(type, matcher);
}
private HamcrestArgumentMatcher(Class<?> type, Matcher<T> matcher) {
this.type = requireNonNull(type, "type");
this.matcher = requireNonNull(matcher, "matcher");
}
@Override
public boolean matches(Object argument) {
return this.matcher.matches(argument);
}
@Override
public String toString() {
// TODO SF add unit tests and integ test coverage for toString()
return StringDescription.toString(matcher);
}
@Override
public Class<?> type() {
return type;
}
}
|
HamcrestArgumentMatcher
|
java
|
quarkusio__quarkus
|
core/builder/src/main/java/io/quarkus/builder/Execution.java
|
{
"start": 778,
"end": 7406
}
|
class ____ {
static final Logger log = Logger.getLogger("io.quarkus.builder");
private final BuildChain chain;
private final ConcurrentHashMap<ItemId, BuildItem> singles;
private final ConcurrentHashMap<ItemId, List<BuildItem>> multis;
private final Set<ItemId> finalIds;
private final ConcurrentHashMap<StepInfo, BuildContext> contextCache = new ConcurrentHashMap<>();
private final EnhancedQueueExecutor executor;
private final List<Diagnostic> diagnostics = Collections.synchronizedList(new ArrayList<>());
private final String buildTargetName;
private final AtomicBoolean errorReported = new AtomicBoolean();
private final AtomicInteger lastStepCount = new AtomicInteger();
private volatile Thread runningThread;
private volatile boolean done;
private final BuildMetrics metrics;
static {
try {
Class.forName("org.jboss.threads.EnhancedQueueExecutor$1", false, Execution.class.getClassLoader());
} catch (ClassNotFoundException ignored) {
}
}
Execution(final BuildExecutionBuilder builder, final Set<ItemId> finalIds) {
chain = builder.getChain();
this.singles = new ConcurrentHashMap<>(builder.getInitialSingle());
this.multis = new ConcurrentHashMap<>(builder.getInitialMulti());
this.finalIds = finalIds;
final EnhancedQueueExecutor.Builder executorBuilder = new EnhancedQueueExecutor.Builder();
executorBuilder.setRegisterMBean(false);
executorBuilder.setQueueLimited(false);
final int availableProcessors = ProcessorInfo.availableProcessors();
final int corePoolSize = defineCorePoolSize(availableProcessors);
final int maxPoolSize = defineMaxPoolSize(availableProcessors, corePoolSize);
executorBuilder.setMaximumPoolSize(maxPoolSize);
executorBuilder.setCorePoolSize(corePoolSize);
executorBuilder.setExceptionHandler(JBossExecutors.loggingExceptionHandler());
executorBuilder.setThreadFactory(new JBossThreadFactory(new ThreadGroup("build group"), Boolean.FALSE, null, "build-%t",
JBossExecutors.loggingExceptionHandler(), null));
buildTargetName = builder.getBuildTargetName();
executor = executorBuilder.build();
lastStepCount.set(builder.getChain().getEndStepCount());
if (lastStepCount.get() == 0)
done = true;
metrics = new BuildMetrics(buildTargetName);
}
private static int defineMaxPoolSize(final int availableProcessors, final int corePoolSize) {
//We used to have a hard limit of 1024, but we think now that was too high.
//Now we default to twice the number of available processors, and allow to configure it so that people can experiment.
Integer integer = Integer.getInteger("io.quarkus.builder.execution.maxPoolSize");
return integer != null ? integer : Math.max(availableProcessors * 2, corePoolSize);
}
private static int defineCorePoolSize(final int availableProcessors) {
//The default core pool size is 8 but we allow to tune this to experiment with different values.
return Integer.getInteger("io.quarkus.builder.execution.corePoolSize", 8);
}
List<Diagnostic> getDiagnostics() {
return diagnostics;
}
BuildContext getBuildContext(StepInfo stepInfo) {
return contextCache.computeIfAbsent(stepInfo, si -> new BuildContext(chain.getClassLoader(), si, this));
}
void removeBuildContext(StepInfo stepInfo, BuildContext buildContext) {
contextCache.remove(stepInfo, buildContext);
}
BuildResult run() throws BuildException {
final long start = System.nanoTime();
metrics.buildStarted();
runningThread = Thread.currentThread();
// run the build
final List<StepInfo> startSteps = chain.getStartSteps();
for (StepInfo startStep : startSteps) {
executor.execute(getBuildContext(startStep)::run);
}
// wait for the wrap-up
boolean intr = false;
try {
for (;;) {
if (Thread.interrupted())
intr = true;
if (done)
break;
park(this);
}
} finally {
if (intr)
Thread.currentThread().interrupt();
runningThread = null;
}
executor.shutdown();
for (;;)
try {
executor.awaitTermination(1000L, TimeUnit.DAYS);
break;
} catch (InterruptedException e) {
intr = true;
} finally {
if (intr)
Thread.currentThread().interrupt();
}
for (Diagnostic diagnostic : diagnostics) {
if (diagnostic.getLevel() == Diagnostic.Level.ERROR) {
BuildException failed = new BuildException("Build failed due to errors", diagnostic.getThrown(),
Collections.unmodifiableList(diagnostics));
for (Diagnostic i : diagnostics) {
if (i.getThrown() != null && i.getThrown() != diagnostic.getThrown()) {
failed.addSuppressed(i.getThrown());
}
}
throw failed;
}
}
if (lastStepCount.get() > 0)
throw new BuildException("Extra steps left over", Collections.emptyList());
long duration = max(0, System.nanoTime() - start);
metrics.buildFinished(TimeUnit.NANOSECONDS.toMillis(duration));
return new BuildResult(singles, multis, finalIds, Collections.unmodifiableList(diagnostics),
duration, metrics);
}
EnhancedQueueExecutor getExecutor() {
return executor;
}
String getBuildTargetName() {
return buildTargetName;
}
void setErrorReported() {
errorReported.compareAndSet(false, true);
}
boolean isErrorReported() {
return errorReported.get();
}
ConcurrentHashMap<ItemId, BuildItem> getSingles() {
return singles;
}
ConcurrentHashMap<ItemId, List<BuildItem>> getMultis() {
return multis;
}
BuildChain getBuildChain() {
return chain;
}
BuildMetrics getMetrics() {
return metrics;
}
void depFinished() {
final int count = lastStepCount.decrementAndGet();
log.tracef("End step completed; %d remaining", count);
if (count == 0) {
done = true;
unpark(runningThread);
}
}
}
|
Execution
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlAssignmentNotifierTests.java
|
{
"start": 2156,
"end": 20211
}
|
class ____ extends ESTestCase {
private AnomalyDetectionAuditor anomalyDetectionAuditor;
private DataFrameAnalyticsAuditor dataFrameAnalyticsAuditor;
private ClusterService clusterService;
private ThreadPool threadPool;
@Before
public void setupMocks() {
anomalyDetectionAuditor = mock(AnomalyDetectionAuditor.class);
dataFrameAnalyticsAuditor = mock(DataFrameAnalyticsAuditor.class);
clusterService = mock(ClusterService.class);
threadPool = mock(ThreadPool.class);
ExecutorService executorService = mock(ExecutorService.class);
doAnswer(invocation -> {
((Runnable) invocation.getArguments()[0]).run();
return null;
}).when(executorService).execute(any(Runnable.class));
when(threadPool.executor(anyString())).thenReturn(executorService);
}
public void testClusterChanged_assign() {
MlAssignmentNotifier notifier = new MlAssignmentNotifier(
anomalyDetectionAuditor,
dataFrameAnalyticsAuditor,
threadPool,
clusterService
);
ClusterState previous = ClusterState.builder(new ClusterName("_name"))
.metadata(
Metadata.builder()
.putCustom(PersistentTasksCustomMetadata.TYPE, new PersistentTasksCustomMetadata(0L, Collections.emptyMap()))
)
.build();
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask("job_id", "_node_id", null, tasksBuilder);
Metadata metadata = Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build()).build();
ClusterState newState = ClusterState.builder(new ClusterName("_name"))
.metadata(metadata)
// set local node master
.nodes(
DiscoveryNodes.builder()
.add(DiscoveryNodeUtils.create("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9300)))
.localNodeId("_node_id")
.masterNodeId("_node_id")
)
.build();
notifier.clusterChanged(new ClusterChangedEvent("_test", newState, previous));
if (anomalyDetectionAuditor.includeNodeInfo()) {
verify(anomalyDetectionAuditor, times(1)).info("job_id", "Opening job on node [_node_id]");
} else {
verify(anomalyDetectionAuditor, times(1)).info("job_id", "Opening job");
}
// no longer master
newState = ClusterState.builder(new ClusterName("_name"))
.metadata(metadata)
.nodes(
DiscoveryNodes.builder()
.add(DiscoveryNodeUtils.create("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9300)))
.localNodeId("_node_id")
)
.build();
notifier.clusterChanged(new ClusterChangedEvent("_test", newState, previous));
if (anomalyDetectionAuditor.includeNodeInfo()) {
verifyNoMoreInteractions(anomalyDetectionAuditor);
}
}
public void testClusterChanged_unassign() {
MlAssignmentNotifier notifier = new MlAssignmentNotifier(
anomalyDetectionAuditor,
dataFrameAnalyticsAuditor,
threadPool,
clusterService
);
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask("job_id", "_node_id", null, tasksBuilder);
Metadata metadata = Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build()).build();
ClusterState previous = ClusterState.builder(new ClusterName("_name"))
.metadata(metadata)
// set local node master
.nodes(
DiscoveryNodes.builder()
.add(DiscoveryNodeUtils.create("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200)))
.localNodeId("_node_id")
.masterNodeId("_node_id")
)
.build();
tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask("job_id", null, null, tasksBuilder);
metadata = Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build()).build();
ClusterState newState = ClusterState.builder(new ClusterName("_name"))
.metadata(metadata)
// set local node master
.nodes(
DiscoveryNodes.builder()
.add(DiscoveryNodeUtils.create("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200)))
.localNodeId("_node_id")
.masterNodeId("_node_id")
)
.build();
notifier.clusterChanged(new ClusterChangedEvent("_test", newState, previous));
if (anomalyDetectionAuditor.includeNodeInfo()) {
verify(anomalyDetectionAuditor, times(1)).info("job_id", "Job unassigned from node [_node_id]");
} else {
verify(anomalyDetectionAuditor, times(1)).info("job_id", "Job relocating.");
}
verify(anomalyDetectionAuditor, times(2)).includeNodeInfo();
// no longer master
newState = ClusterState.builder(new ClusterName("_name"))
.metadata(metadata)
.nodes(
DiscoveryNodes.builder()
.add(DiscoveryNodeUtils.create("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200)))
.localNodeId("_node_id")
)
.build();
notifier.clusterChanged(new ClusterChangedEvent("_test", newState, previous));
verifyNoMoreInteractions(anomalyDetectionAuditor);
}
public void testClusterChanged_multipleProjects() {
final Clock clock = mock(Clock.class);
final Instant startInstant = Instant.now();
when(clock.instant()).thenReturn(startInstant);
MlAssignmentNotifier notifier = new MlAssignmentNotifier(
anomalyDetectionAuditor,
dataFrameAnalyticsAuditor,
threadPool,
clusterService,
clock
);
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask("job_id", null, JobState.OPENED, tasksBuilder);
final ProjectId projectId = randomProjectIdOrDefault();
final Metadata.Builder metadataBuilder = Metadata.builder()
.put(ProjectMetadata.builder(projectId).putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build()).build());
for (int p = randomIntBetween(1, 5); p > 0; p--) {
metadataBuilder.put(ProjectMetadata.builder(randomUniqueProjectId()));
}
final ClusterState previous = ClusterState.builder(ClusterName.DEFAULT).metadata(metadataBuilder.build()).build();
final ClusterState newState = ClusterState.builder(previous)
// set local node master
.nodes(
DiscoveryNodes.builder()
.add(DiscoveryNodeUtils.create("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200)))
.localNodeId("_node_id")
.masterNodeId("_node_id")
)
.build();
// Force the event to happen far enough in the future to trigger the unassigned checker
// This will track the current state (which jobs are unassigned) but won't trigger a warning
final Instant firstEventInstant = startInstant.plus(MlAssignmentNotifier.MIN_CHECK_UNASSIGNED_INTERVAL).plusSeconds(1);
when(clock.instant()).thenReturn(firstEventInstant);
notifier.clusterChanged(new ClusterChangedEvent("_test", newState, previous));
final MockLog log = MockLog.capture(MlAssignmentNotifier.class);
log.addExpectation(
new MockLog.PatternSeenEventExpectation(
"expect-warning-log",
MlAssignmentNotifier.class.getName(),
Level.WARN,
Pattern.quote("In project [" + projectId + "] ML persistent tasks unassigned for a long time [") + ".*"
)
);
// Force an event in the future that will trigger a warning message
final Instant secondEventInstant = firstEventInstant.plus(MlAssignmentNotifier.LONG_TIME_UNASSIGNED_INTERVAL).plusSeconds(1);
when(clock.instant()).thenReturn(secondEventInstant);
notifier.clusterChanged(new ClusterChangedEvent("_test", newState, previous));
log.assertAllExpectationsMatched();
}
public void testClusterChanged_noPersistentTaskChanges() {
MlAssignmentNotifier notifier = new MlAssignmentNotifier(
anomalyDetectionAuditor,
dataFrameAnalyticsAuditor,
threadPool,
clusterService
);
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask("job_id", null, null, tasksBuilder);
Metadata metadata = Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build()).build();
ClusterState previous = ClusterState.builder(new ClusterName("_name")).metadata(metadata).build();
ClusterState newState = ClusterState.builder(new ClusterName("_name"))
.metadata(metadata)
// set local node master
.nodes(
DiscoveryNodes.builder()
.add(DiscoveryNodeUtils.create("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200)))
.localNodeId("_node_id")
.masterNodeId("_node_id")
)
.build();
notifier.clusterChanged(new ClusterChangedEvent("_test", newState, previous));
verifyNoMoreInteractions(anomalyDetectionAuditor);
// no longer master
newState = ClusterState.builder(new ClusterName("_name"))
.metadata(metadata)
.nodes(
DiscoveryNodes.builder()
.add(DiscoveryNodeUtils.create("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200)))
.localNodeId("_node_id")
)
.build();
notifier.clusterChanged(new ClusterChangedEvent("_test", newState, previous));
}
public void testAuditUnassignedMlTasks() {
MlAssignmentNotifier notifier = new MlAssignmentNotifier(
anomalyDetectionAuditor,
dataFrameAnalyticsAuditor,
threadPool,
clusterService
);
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask("job_id", null, null, tasksBuilder);
Metadata metadata = Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build()).build();
ClusterState newState = ClusterState.builder(new ClusterName("_name"))
.metadata(metadata)
// set local node master
.nodes(
DiscoveryNodes.builder()
.add(DiscoveryNodeUtils.create("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200)))
.localNodeId("_node_id")
.masterNodeId("_node_id")
)
.build();
notifier.auditUnassignedMlTasks(
Metadata.DEFAULT_PROJECT_ID,
newState.nodes(),
newState.metadata().getProject().custom(PersistentTasksCustomMetadata.TYPE)
);
if (anomalyDetectionAuditor.includeNodeInfo()) {
verify(anomalyDetectionAuditor, times(1)).warning("job_id", "No node found to open job. Reasons [test assignment]");
} else {
// need to account for includeNodeInfo being called here, in the test, and also in anomalyDetectionAuditor
verify(anomalyDetectionAuditor, times(2)).includeNodeInfo();
}
}
public void testFindLongTimeUnassignedTasks() {
MlAssignmentNotifier notifier = new MlAssignmentNotifier(
anomalyDetectionAuditor,
dataFrameAnalyticsAuditor,
threadPool,
clusterService
);
Instant now = Instant.now();
Instant eightHoursAgo = now.minus(Duration.ofHours(8));
Instant sevenHoursAgo = eightHoursAgo.plus(Duration.ofHours(1));
Instant twoHoursAgo = sevenHoursAgo.plus(Duration.ofHours(5));
Instant tomorrow = now.plus(Duration.ofHours(24));
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask("job1", "node1", JobState.OPENED, tasksBuilder);
addJobTask("job2", "node1", JobState.OPENED, tasksBuilder);
addJobTask("job3", null, JobState.OPENED, tasksBuilder);
addJobTask("job4", null, JobState.OPENED, tasksBuilder);
addJobTask("job5", null, JobState.OPENED, tasksBuilder);
List<String> itemsToReport = notifier.findLongTimeUnassignedTasks(eightHoursAgo, tasksBuilder.build());
// Nothing reported because unassigned jobs only just detected
assertThat(itemsToReport, empty());
tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask("job1", null, JobState.OPENED, tasksBuilder);
addJobTask("job2", "node1", JobState.OPENED, tasksBuilder);
addJobTask("job3", null, JobState.OPENED, tasksBuilder);
addJobTask("job4", "node2", JobState.OPENED, tasksBuilder);
addJobTask("job5", null, JobState.OPENED, tasksBuilder);
itemsToReport = notifier.findLongTimeUnassignedTasks(sevenHoursAgo, tasksBuilder.build());
// Jobs 3 and 5 still unassigned so should get reported, job 4 now assigned, job 1 only just detected unassigned
assertThat(
itemsToReport,
containsInAnyOrder("[xpack/ml/job]/[job3] unassigned for [3600] seconds", "[xpack/ml/job]/[job5] unassigned for [3600] seconds")
);
tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask("job1", null, JobState.OPENED, tasksBuilder);
addJobTask("job2", null, JobState.OPENED, tasksBuilder);
addJobTask("job3", null, JobState.OPENED, tasksBuilder);
addJobTask("job4", "node2", JobState.OPENED, tasksBuilder);
addJobTask("job5", null, JobState.OPENED, tasksBuilder);
itemsToReport = notifier.findLongTimeUnassignedTasks(twoHoursAgo, tasksBuilder.build());
// Jobs 3 and 5 still unassigned but reported less than 6 hours ago, job 1 still unassigned so gets reported now,
// job 2 only just detected unassigned
assertThat(itemsToReport, contains("[xpack/ml/job]/[job1] unassigned for [18000] seconds"));
tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask("job1", null, JobState.OPENED, tasksBuilder);
addJobTask("job2", null, JobState.OPENED, tasksBuilder);
addJobTask("job3", null, JobState.OPENED, tasksBuilder);
addJobTask("job4", null, JobState.OPENED, tasksBuilder);
addJobTask("job5", "node1", JobState.OPENED, tasksBuilder);
itemsToReport = notifier.findLongTimeUnassignedTasks(now, tasksBuilder.build());
// Job 3 still unassigned and reported more than 6 hours ago, job 1 still unassigned but reported less than 6 hours ago,
// job 2 still unassigned so gets reported now, job 4 only just detected unassigned, job 5 now assigned
assertThat(
itemsToReport,
containsInAnyOrder(
"[xpack/ml/job]/[job2] unassigned for [7200] seconds",
"[xpack/ml/job]/[job3] unassigned for [28800] seconds"
)
);
tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask("job1", null, JobState.FAILED, tasksBuilder);
addJobTask("job2", null, JobState.FAILED, tasksBuilder);
addJobTask("job3", null, JobState.FAILED, tasksBuilder);
addJobTask("job4", null, JobState.FAILED, tasksBuilder);
addJobTask("job5", "node1", JobState.FAILED, tasksBuilder);
itemsToReport = notifier.findLongTimeUnassignedTasks(tomorrow, tasksBuilder.build());
// We still have unassigned jobs, but now all the jobs are failed, so none should be reported as unassigned
// as it doesn't make any difference whether they're assigned or not and autoscaling will ignore them
assertThat(itemsToReport, empty());
}
public void testFindLongTimeUnassignedTasks_WithNullState() {
MlAssignmentNotifier notifier = new MlAssignmentNotifier(
anomalyDetectionAuditor,
dataFrameAnalyticsAuditor,
threadPool,
clusterService
);
var now = Instant.now();
var sevenHoursAgo = now.minus(Duration.ofHours(7));
var eightHoursAgo = now.minus(Duration.ofHours(8));
{
// run once with valid state to add unassigned job to the history
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask("job1", null, JobState.OPENED, tasksBuilder);
List<String> itemsToReport = notifier.findLongTimeUnassignedTasks(eightHoursAgo, tasksBuilder.build());
// Nothing reported because unassigned jobs only just detected
assertThat(itemsToReport, empty());
}
{
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask("job1", null, null, tasksBuilder); // this time the job has no state
// one hour later the job would be detected as unassigned if not for the missing state
List<String> itemsToReport = notifier.findLongTimeUnassignedTasks(sevenHoursAgo, tasksBuilder.build());
assertThat(itemsToReport, empty());
}
}
}
|
MlAssignmentNotifierTests
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/FilesEndpointBuilderFactory.java
|
{
"start": 159014,
"end": 166117
}
|
interface ____
extends
AdvancedFilesEndpointConsumerBuilder,
AdvancedFilesEndpointProducerBuilder {
default FilesEndpointBuilder basic() {
return (FilesEndpointBuilder) this;
}
/**
* Automatically create missing directories in the file's pathname. For
* the file consumer, that means creating the starting directory. For
* the file producer, it means the directory the files should be written
* to.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autoCreate the value to set
* @return the dsl builder
*/
default AdvancedFilesEndpointBuilder autoCreate(boolean autoCreate) {
doSetProperty("autoCreate", autoCreate);
return this;
}
/**
* Automatically create missing directories in the file's pathname. For
* the file consumer, that means creating the starting directory. For
* the file producer, it means the directory the files should be written
* to.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autoCreate the value to set
* @return the dsl builder
*/
default AdvancedFilesEndpointBuilder autoCreate(String autoCreate) {
doSetProperty("autoCreate", autoCreate);
return this;
}
/**
* Maximum number of messages to keep in memory available for browsing.
* Use 0 for unlimited.
*
* The option is a: <code>int</code> type.
*
* Default: 100
* Group: advanced
*
* @param browseLimit the value to set
* @return the dsl builder
*/
default AdvancedFilesEndpointBuilder browseLimit(int browseLimit) {
doSetProperty("browseLimit", browseLimit);
return this;
}
/**
* Maximum number of messages to keep in memory available for browsing.
* Use 0 for unlimited.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 100
* Group: advanced
*
* @param browseLimit the value to set
* @return the dsl builder
*/
default AdvancedFilesEndpointBuilder browseLimit(String browseLimit) {
doSetProperty("browseLimit", browseLimit);
return this;
}
/**
* Sets the connect timeout for waiting for a connection to be
* established Used by both FTPClient and JSCH.
*
* The option is a: <code>int</code> type.
*
* Default: 10000
* Group: advanced
*
* @param connectTimeout the value to set
* @return the dsl builder
*/
default AdvancedFilesEndpointBuilder connectTimeout(int connectTimeout) {
doSetProperty("connectTimeout", connectTimeout);
return this;
}
/**
* Sets the connect timeout for waiting for a connection to be
* established Used by both FTPClient and JSCH.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 10000
* Group: advanced
*
* @param connectTimeout the value to set
* @return the dsl builder
*/
default AdvancedFilesEndpointBuilder connectTimeout(String connectTimeout) {
doSetProperty("connectTimeout", connectTimeout);
return this;
}
/**
* Specifies the maximum reconnect attempts Camel performs when it tries
* to connect to the remote FTP server. Use 0 to disable this behavior.
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*
* @param maximumReconnectAttempts the value to set
* @return the dsl builder
*/
default AdvancedFilesEndpointBuilder maximumReconnectAttempts(int maximumReconnectAttempts) {
doSetProperty("maximumReconnectAttempts", maximumReconnectAttempts);
return this;
}
/**
* Specifies the maximum reconnect attempts Camel performs when it tries
* to connect to the remote FTP server. Use 0 to disable this behavior.
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*
* @param maximumReconnectAttempts the value to set
* @return the dsl builder
*/
default AdvancedFilesEndpointBuilder maximumReconnectAttempts(String maximumReconnectAttempts) {
doSetProperty("maximumReconnectAttempts", maximumReconnectAttempts);
return this;
}
/**
* Delay in millis Camel will wait before performing a reconnect
* attempt.
*
* The option is a: <code>long</code> type.
*
* Default: 1000
* Group: advanced
*
* @param reconnectDelay the value to set
* @return the dsl builder
*/
default AdvancedFilesEndpointBuilder reconnectDelay(long reconnectDelay) {
doSetProperty("reconnectDelay", reconnectDelay);
return this;
}
/**
* Delay in millis Camel will wait before performing a reconnect
* attempt.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 1000
* Group: advanced
*
* @param reconnectDelay the value to set
* @return the dsl builder
*/
default AdvancedFilesEndpointBuilder reconnectDelay(String reconnectDelay) {
doSetProperty("reconnectDelay", reconnectDelay);
return this;
}
/**
* Sets the data timeout for waiting for reply Used only by FTPClient.
*
* The option is a: <code>int</code> type.
*
* Default: 30000
* Group: advanced
*
* @param timeout the value to set
* @return the dsl builder
*/
default AdvancedFilesEndpointBuilder timeout(int timeout) {
doSetProperty("timeout", timeout);
return this;
}
/**
* Sets the data timeout for waiting for reply Used only by FTPClient.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 30000
* Group: advanced
*
* @param timeout the value to set
* @return the dsl builder
*/
default AdvancedFilesEndpointBuilder timeout(String timeout) {
doSetProperty("timeout", timeout);
return this;
}
}
public
|
AdvancedFilesEndpointBuilder
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/AS2EndpointBuilderFactory.java
|
{
"start": 35896,
"end": 38901
}
|
interface ____
extends
EndpointConsumerBuilder {
default AS2EndpointConsumerBuilder basic() {
return (AS2EndpointConsumerBuilder) this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedAS2EndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedAS2EndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedAS2EndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedAS2EndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
}
/**
* Builder for endpoint producers for the AS2 component.
*/
public
|
AdvancedAS2EndpointConsumerBuilder
|
java
|
spring-projects__spring-boot
|
module/spring-boot-http-client/src/test/java/org/springframework/boot/http/client/ReactorClientHttpRequestFactoryBuilderTests.java
|
{
"start": 1446,
"end": 3957
}
|
class ____
extends AbstractClientHttpRequestFactoryBuilderTests<ReactorClientHttpRequestFactory> {
ReactorClientHttpRequestFactoryBuilderTests() {
super(ReactorClientHttpRequestFactory.class, ClientHttpRequestFactoryBuilder.reactor());
}
@Test
void withHttpClientFactory() {
boolean[] called = new boolean[1];
Supplier<HttpClient> httpClientFactory = () -> {
called[0] = true;
return HttpClient.create();
};
ClientHttpRequestFactoryBuilder.reactor().withHttpClientFactory(httpClientFactory).build();
assertThat(called).containsExactly(true);
}
@Test
void withReactorResourceFactory() {
ReactorResourceFactory resourceFactory = spy(new ReactorResourceFactory());
ClientHttpRequestFactoryBuilder.reactor().withReactorResourceFactory(resourceFactory).build();
then(resourceFactory).should().getConnectionProvider();
then(resourceFactory).should().getLoopResources();
}
@Test
void withCustomizers() {
List<HttpClient> httpClients = new ArrayList<>();
UnaryOperator<HttpClient> httpClientCustomizer1 = (httpClient) -> {
httpClients.add(httpClient);
return httpClient;
};
UnaryOperator<HttpClient> httpClientCustomizer2 = (httpClient) -> {
httpClients.add(httpClient);
return httpClient;
};
ClientHttpRequestFactoryBuilder.reactor()
.withHttpClientCustomizer(httpClientCustomizer1)
.withHttpClientCustomizer(httpClientCustomizer2)
.build();
assertThat(httpClients).hasSize(2);
}
@Test
void with() {
boolean[] called = new boolean[1];
Supplier<HttpClient> httpClientFactory = () -> {
called[0] = true;
return HttpClient.create();
};
ClientHttpRequestFactoryBuilder.reactor()
.with((builder) -> builder.withHttpClientFactory(httpClientFactory))
.build();
assertThat(called).containsExactly(true);
}
@Override
protected long connectTimeout(ReactorClientHttpRequestFactory requestFactory) {
HttpClient httpClient = (HttpClient) ReflectionTestUtils.getField(requestFactory, "httpClient");
assertThat(httpClient).isNotNull();
Object connectTimeout = httpClient.configuration().options().get(ChannelOption.CONNECT_TIMEOUT_MILLIS);
assertThat(connectTimeout).isNotNull();
return (int) connectTimeout;
}
@Override
protected long readTimeout(ReactorClientHttpRequestFactory requestFactory) {
Duration readTimeout = (Duration) ReflectionTestUtils.getField(requestFactory, "readTimeout");
assertThat(readTimeout).isNotNull();
return readTimeout.toMillis();
}
}
|
ReactorClientHttpRequestFactoryBuilderTests
|
java
|
ReactiveX__RxJava
|
src/test/java/io/reactivex/rxjava3/tck/BaseTck.java
|
{
"start": 2404,
"end": 2687
}
|
class ____ implements Iterable<Long> {
final long end;
FiniteRange(long end) {
this.end = end;
}
@Override
public Iterator<Long> iterator() {
return new FiniteRangeIterator(end);
}
static final
|
FiniteRange
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/client/RedisNoReplicasException.java
|
{
"start": 751,
"end": 972
}
|
class ____ extends RedisRetryException {
private static final long serialVersionUID = -5658453331593029252L;
public RedisNoReplicasException(String message) {
super(message);
}
}
|
RedisNoReplicasException
|
java
|
apache__logging-log4j2
|
log4j-api/src/main/java/org/apache/logging/log4j/util/StringBuilderFormattable.java
|
{
"start": 878,
"end": 988
}
|
interface ____ be converted to text, ideally without allocating temporary objects.
*
* @since 2.6
*/
public
|
can
|
java
|
google__guava
|
android/guava/src/com/google/common/collect/FilteredKeyListMultimap.java
|
{
"start": 932,
"end": 1714
}
|
class ____<K extends @Nullable Object, V extends @Nullable Object>
extends FilteredKeyMultimap<K, V> implements ListMultimap<K, V> {
FilteredKeyListMultimap(ListMultimap<K, V> unfiltered, Predicate<? super K> keyPredicate) {
super(unfiltered, keyPredicate);
}
@Override
public ListMultimap<K, V> unfiltered() {
return (ListMultimap<K, V>) super.unfiltered();
}
@Override
public List<V> get(@ParametricNullness K key) {
return (List<V>) super.get(key);
}
@Override
public List<V> removeAll(@Nullable Object key) {
return (List<V>) super.removeAll(key);
}
@Override
public List<V> replaceValues(@ParametricNullness K key, Iterable<? extends V> values) {
return (List<V>) super.replaceValues(key, values);
}
}
|
FilteredKeyListMultimap
|
java
|
apache__flink
|
flink-metrics/flink-metrics-core/src/main/java/org/apache/flink/metrics/MetricGroup.java
|
{
"start": 1173,
"end": 1394
}
|
class ____ be used to register new metrics with Flink and to create a nested
* hierarchy based on the group names.
*
* <p>A MetricGroup is uniquely identified by it's place in the hierarchy and name.
*/
@Public
public
|
can
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/search/runtime/AbstractGeoPointScriptFieldQueryTestCase.java
|
{
"start": 623,
"end": 963
}
|
class ____<T extends AbstractGeoPointScriptFieldQuery> extends
AbstractScriptFieldQueryTestCase<T> {
protected final GeoPointFieldScript.LeafFactory leafFactory = mock(GeoPointFieldScript.LeafFactory.class);
@Override
public final void testVisit() {
assertEmptyVisit();
}
}
|
AbstractGeoPointScriptFieldQueryTestCase
|
java
|
apache__camel
|
components/camel-huawei/camel-huaweicloud-common/src/main/java/org/apache/camel/component/huaweicloud/common/models/ServiceKeys.java
|
{
"start": 873,
"end": 1449
}
|
class ____ {
private String accessKey;
private String secretKey;
public ServiceKeys() {
}
public ServiceKeys(String accessKey, String secretKey) {
this.accessKey = accessKey;
this.secretKey = secretKey;
}
public String getAccessKey() {
return accessKey;
}
public void setAccessKey(String accessKey) {
this.accessKey = accessKey;
}
public String getSecretKey() {
return secretKey;
}
public void setSecretKey(String secretKey) {
this.secretKey = secretKey;
}
}
|
ServiceKeys
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
|
{
"start": 37583,
"end": 37718
}
|
interface ____ {
CompletableEventReaper build(final LogContext logContext);
}
// auxiliary
|
CompletableEventReaperFactory
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/injection/erroneous/interceptedBean/InterceptedBeanNotUnboundWildcardTest.java
|
{
"start": 944,
"end": 1434
}
|
class ____ {
@RegisterExtension
ArcTestContainer container = ArcTestContainer.builder().beanClasses(InterceptedBeanNotUnboundWildcardTest.class,
MyInterceptor.class, Binding.class).shouldFail().build();
@Test
public void testExceptionThrown() {
Throwable error = container.getFailure();
assertThat(error).isInstanceOf(DefinitionException.class);
}
@Interceptor
@Priority(1)
@Binding
static
|
InterceptedBeanNotUnboundWildcardTest
|
java
|
apache__kafka
|
metadata/src/test/java/org/apache/kafka/controller/PeriodicTaskControlManagerTest.java
|
{
"start": 2527,
"end": 10784
}
|
class ____ implements PeriodicTaskControlManager.QueueAccessor {
final MockTime time;
final PeriodicTaskControlManager manager;
final TreeMap<Long, List<TrackedTask>> tasks;
int numCalls = 10_000;
PeriodicTaskControlManagerTestEnv() {
this.time = new MockTime(0, 0, 0);
this.manager = new PeriodicTaskControlManager.Builder().
setTime(time).
setQueueAccessor(this).
build();
this.tasks = new TreeMap<>();
}
@Override
public void scheduleDeferred(
String tag,
long deadlineNs,
Supplier<ControllerResult<Void>> op
) {
if (numCalls <= 0) {
throw new RuntimeException("too many deferred calls.");
}
numCalls--;
cancelDeferred(tag);
TrackedTask task = new TrackedTask(tag, deadlineNs, op);
tasks.computeIfAbsent(deadlineNs, __ -> new ArrayList<>()).add(task);
}
@Override
public void cancelDeferred(String tag) {
Iterator<Map.Entry<Long, List<TrackedTask>>> iter = tasks.entrySet().iterator();
boolean foundTask = false;
while (iter.hasNext() && (!foundTask)) {
Map.Entry<Long, List<TrackedTask>> entry = iter.next();
Iterator<TrackedTask> taskIter = entry.getValue().iterator();
while (taskIter.hasNext()) {
TrackedTask task = taskIter.next();
if (task.tag.equals(tag)) {
taskIter.remove();
foundTask = true;
break;
}
}
if (entry.getValue().isEmpty()) {
iter.remove();
}
}
}
int numDeferred() {
int count = 0;
for (List<TrackedTask> taskList : tasks.values()) {
count += taskList.size();
}
return count;
}
void advanceTime(long ms) {
time.sleep(ms);
while (true) {
Iterator<Map.Entry<Long, List<TrackedTask>>> iter = tasks.entrySet().iterator();
if (!iter.hasNext()) {
return;
}
Map.Entry<Long, List<TrackedTask>> entry = iter.next();
if (time.nanoseconds() < entry.getKey()) {
return;
}
if (!entry.getValue().isEmpty()) {
Iterator<TrackedTask> taskIter = entry.getValue().iterator();
TrackedTask task = taskIter.next();
taskIter.remove();
try {
task.op.get();
} catch (Exception e) {
// discard exception
}
continue;
}
iter.remove();
}
}
}
@Test
public void testActivate() {
PeriodicTaskControlManagerTestEnv env = new PeriodicTaskControlManagerTestEnv();
assertFalse(env.manager.active());
env.manager.activate();
assertTrue(env.manager.active());
assertEquals(0, env.numDeferred());
}
@Test
public void testDeactivate() {
PeriodicTaskControlManagerTestEnv env = new PeriodicTaskControlManagerTestEnv();
assertFalse(env.manager.active());
env.manager.activate();
env.manager.deactivate();
assertFalse(env.manager.active());
assertEquals(0, env.numDeferred());
}
@Test
public void testRegisterTaskWhenDeactivated() {
FakePeriodicTask foo = new FakePeriodicTask("foo", MILLISECONDS.toNanos(100));
PeriodicTaskControlManagerTestEnv env = new PeriodicTaskControlManagerTestEnv();
env.manager.registerTask(foo.task);
assertEquals(0, env.numDeferred());
}
@Test
public void testRegisterTaskWhenActivated() {
FakePeriodicTask foo = new FakePeriodicTask("foo", MILLISECONDS.toNanos(100));
PeriodicTaskControlManagerTestEnv env = new PeriodicTaskControlManagerTestEnv();
env.manager.activate();
env.manager.registerTask(foo.task);
assertEquals(1, env.numDeferred());
}
@Test
public void testRegisterTaskWhenActivatedThenDeactivate() {
FakePeriodicTask foo = new FakePeriodicTask("foo", MILLISECONDS.toNanos(100));
PeriodicTaskControlManagerTestEnv env = new PeriodicTaskControlManagerTestEnv();
env.manager.activate();
env.manager.registerTask(foo.task);
env.manager.deactivate();
assertEquals(0, env.numDeferred());
}
@Test
public void testRegisterTaskAndAdvanceTime() {
FakePeriodicTask foo = new FakePeriodicTask("foo", MILLISECONDS.toNanos(100));
FakePeriodicTask bar = new FakePeriodicTask("bar", MILLISECONDS.toNanos(50));
PeriodicTaskControlManagerTestEnv env = new PeriodicTaskControlManagerTestEnv();
env.manager.activate();
env.manager.registerTask(foo.task);
env.manager.registerTask(bar.task);
assertEquals(2, env.numDeferred());
env.advanceTime(50);
assertEquals(0, foo.numCalls.get());
assertEquals(1, bar.numCalls.get());
assertEquals(2, env.numDeferred());
env.advanceTime(50);
assertEquals(1, foo.numCalls.get());
assertEquals(2, bar.numCalls.get());
assertEquals(2, env.numDeferred());
env.manager.deactivate();
}
@Test
public void testContinuation() {
FakePeriodicTask foo = new FakePeriodicTask("foo", MILLISECONDS.toNanos(100));
FakePeriodicTask bar = new FakePeriodicTask("bar", MILLISECONDS.toNanos(50));
bar.continuation.set(true);
PeriodicTaskControlManagerTestEnv env = new PeriodicTaskControlManagerTestEnv();
env.manager.activate();
env.manager.registerTask(foo.task);
env.manager.registerTask(bar.task);
assertEquals(2, env.numDeferred());
env.advanceTime(50);
assertEquals(0, foo.numCalls.get());
assertEquals(1, bar.numCalls.get());
assertEquals(2, env.numDeferred());
env.advanceTime(10);
assertEquals(2, bar.numCalls.get());
env.advanceTime(40);
assertEquals(1, foo.numCalls.get());
assertEquals(2, bar.numCalls.get());
assertEquals(2, env.numDeferred());
env.advanceTime(10);
assertEquals(3, bar.numCalls.get());
env.manager.deactivate();
}
@Test
public void testRegisterTaskAndUnregister() {
FakePeriodicTask foo = new FakePeriodicTask("foo", MILLISECONDS.toNanos(100));
FakePeriodicTask bar = new FakePeriodicTask("bar", MILLISECONDS.toNanos(50));
PeriodicTaskControlManagerTestEnv env = new PeriodicTaskControlManagerTestEnv();
env.manager.activate();
env.manager.registerTask(foo.task);
env.manager.registerTask(bar.task);
assertEquals(2, env.numDeferred());
env.advanceTime(50);
assertEquals(0, foo.numCalls.get());
assertEquals(1, bar.numCalls.get());
env.manager.unregisterTask(foo.task.name());
assertEquals(1, env.numDeferred());
env.manager.unregisterTask(bar.task.name());
assertEquals(0, env.numDeferred());
env.advanceTime(200);
assertEquals(0, foo.numCalls.get());
assertEquals(1, bar.numCalls.get());
env.manager.deactivate();
}
@Test
public void testReschedulingAfterFailure() {
FakePeriodicTask foo = new FakePeriodicTask("foo", MILLISECONDS.toNanos(100));
foo.shouldFail.set(true);
PeriodicTaskControlManagerTestEnv env = new PeriodicTaskControlManagerTestEnv();
env.manager.activate();
env.manager.registerTask(foo.task);
assertEquals(1, env.numDeferred());
env.advanceTime(100);
assertEquals(1, foo.numCalls.get());
env.advanceTime(300000);
assertEquals(2, foo.numCalls.get());
env.manager.deactivate();
}
}
|
PeriodicTaskControlManagerTestEnv
|
java
|
redisson__redisson
|
redisson/src/test/java/org/redisson/RedissonHyperLogLogReactiveTest.java
|
{
"start": 153,
"end": 1363
}
|
class ____ extends BaseReactiveTest {
@Test
public void testAdd() {
RHyperLogLogReactive<Integer> log = redisson.getHyperLogLog("log");
sync(log.add(1));
sync(log.add(2));
sync(log.add(3));
Assertions.assertEquals(3L, sync(log.count()).longValue());
}
@Test
public void testMerge() {
RHyperLogLogReactive<String> hll1 = redisson.getHyperLogLog("hll1");
Assertions.assertTrue(sync(hll1.add("foo")));
Assertions.assertTrue(sync(hll1.add("bar")));
Assertions.assertTrue(sync(hll1.add("zap")));
Assertions.assertTrue(sync(hll1.add("a")));
RHyperLogLogReactive<String> hll2 = redisson.getHyperLogLog("hll2");
Assertions.assertTrue(sync(hll2.add("a")));
Assertions.assertTrue(sync(hll2.add("b")));
Assertions.assertTrue(sync(hll2.add("c")));
Assertions.assertTrue(sync(hll2.add("foo")));
Assertions.assertFalse(sync(hll2.add("c")));
RHyperLogLogReactive<String> hll3 = redisson.getHyperLogLog("hll3");
sync(hll3.mergeWith("hll1", "hll2"));
Assertions.assertEquals(6L, sync(hll3.count()).longValue());
}
}
|
RedissonHyperLogLogReactiveTest
|
java
|
apache__camel
|
components/camel-clickup/src/main/java/org/apache/camel/component/clickup/model/Event.java
|
{
"start": 1331,
"end": 1629
}
|
class ____ {
@JsonProperty("webhook_id")
protected String webhookId;
public String getWebhookId() {
return webhookId;
}
@Override
public String toString() {
return "Event{" +
"webhookId='" + webhookId + '\'' +
'}';
}
}
|
Event
|
java
|
apache__camel
|
components/camel-web3j/src/generated/java/org/apache/camel/component/web3j/Web3jEndpointConfigurer.java
|
{
"start": 732,
"end": 13987
}
|
class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
Web3jEndpoint target = (Web3jEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "address": target.getConfiguration().setAddress(property(camelContext, java.lang.String.class, value)); return true;
case "addresses": target.getConfiguration().setAddresses(property(camelContext, java.lang.String.class, value)); return true;
case "atblock":
case "atBlock": target.getConfiguration().setAtBlock(property(camelContext, java.lang.String.class, value)); return true;
case "blockhash":
case "blockHash": target.getConfiguration().setBlockHash(property(camelContext, java.lang.String.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "clientid":
case "clientId": target.getConfiguration().setClientId(property(camelContext, java.lang.String.class, value)); return true;
case "data": target.getConfiguration().setData(property(camelContext, java.lang.String.class, value)); return true;
case "databasename":
case "databaseName": target.getConfiguration().setDatabaseName(property(camelContext, java.lang.String.class, value)); return true;
case "exceptionhandler":
case "exceptionHandler": target.setExceptionHandler(property(camelContext, org.apache.camel.spi.ExceptionHandler.class, value)); return true;
case "exchangepattern":
case "exchangePattern": target.setExchangePattern(property(camelContext, org.apache.camel.ExchangePattern.class, value)); return true;
case "filterid":
case "filterId": target.getConfiguration().setFilterId(property(camelContext, java.math.BigInteger.class, value)); return true;
case "fromaddress":
case "fromAddress": target.getConfiguration().setFromAddress(property(camelContext, java.lang.String.class, value)); return true;
case "fromblock":
case "fromBlock": target.getConfiguration().setFromBlock(property(camelContext, java.lang.String.class, value)); return true;
case "fulltransactionobjects":
case "fullTransactionObjects": target.getConfiguration().setFullTransactionObjects(property(camelContext, boolean.class, value)); return true;
case "gaslimit":
case "gasLimit": target.getConfiguration().setGasLimit(property(camelContext, java.math.BigInteger.class, value)); return true;
case "gasprice":
case "gasPrice": target.getConfiguration().setGasPrice(property(camelContext, java.math.BigInteger.class, value)); return true;
case "hashrate": target.getConfiguration().setHashrate(property(camelContext, java.lang.String.class, value)); return true;
case "headerpowhash":
case "headerPowHash": target.getConfiguration().setHeaderPowHash(property(camelContext, java.lang.String.class, value)); return true;
case "index": target.getConfiguration().setIndex(property(camelContext, java.math.BigInteger.class, value)); return true;
case "keyname":
case "keyName": target.getConfiguration().setKeyName(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "mixdigest":
case "mixDigest": target.getConfiguration().setMixDigest(property(camelContext, java.lang.String.class, value)); return true;
case "nonce": target.getConfiguration().setNonce(property(camelContext, java.lang.String.class, value)); return true;
case "operation": target.getConfiguration().setOperation(property(camelContext, java.lang.String.class, value)); return true;
case "position": target.getConfiguration().setPosition(property(camelContext, java.math.BigInteger.class, value)); return true;
case "priority": target.getConfiguration().setPriority(property(camelContext, java.math.BigInteger.class, value)); return true;
case "privatefor":
case "privateFor": target.getConfiguration().setPrivateFor(property(camelContext, java.lang.String.class, value)); return true;
case "quorumapi":
case "quorumAPI": target.getConfiguration().setQuorumAPI(property(camelContext, boolean.class, value)); return true;
case "sha3hashofdatatosign":
case "sha3HashOfDataToSign": target.getConfiguration().setSha3HashOfDataToSign(property(camelContext, java.lang.String.class, value)); return true;
case "signedtransactiondata":
case "signedTransactionData": target.getConfiguration().setSignedTransactionData(property(camelContext, java.lang.String.class, value)); return true;
case "sourcecode":
case "sourceCode": target.getConfiguration().setSourceCode(property(camelContext, java.lang.String.class, value)); return true;
case "toaddress":
case "toAddress": target.getConfiguration().setToAddress(property(camelContext, java.lang.String.class, value)); return true;
case "toblock":
case "toBlock": target.getConfiguration().setToBlock(property(camelContext, java.lang.String.class, value)); return true;
case "topics": target.getConfiguration().setTopics(property(camelContext, java.lang.String.class, value)); return true;
case "transactionhash":
case "transactionHash": target.getConfiguration().setTransactionHash(property(camelContext, java.lang.String.class, value)); return true;
case "ttl": target.getConfiguration().setTtl(property(camelContext, java.math.BigInteger.class, value)); return true;
case "value": target.getConfiguration().setValue(property(camelContext, java.math.BigInteger.class, value)); return true;
case "web3j": target.getConfiguration().setWeb3j(property(camelContext, org.web3j.protocol.Web3j.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "address": return java.lang.String.class;
case "addresses": return java.lang.String.class;
case "atblock":
case "atBlock": return java.lang.String.class;
case "blockhash":
case "blockHash": return java.lang.String.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "clientid":
case "clientId": return java.lang.String.class;
case "data": return java.lang.String.class;
case "databasename":
case "databaseName": return java.lang.String.class;
case "exceptionhandler":
case "exceptionHandler": return org.apache.camel.spi.ExceptionHandler.class;
case "exchangepattern":
case "exchangePattern": return org.apache.camel.ExchangePattern.class;
case "filterid":
case "filterId": return java.math.BigInteger.class;
case "fromaddress":
case "fromAddress": return java.lang.String.class;
case "fromblock":
case "fromBlock": return java.lang.String.class;
case "fulltransactionobjects":
case "fullTransactionObjects": return boolean.class;
case "gaslimit":
case "gasLimit": return java.math.BigInteger.class;
case "gasprice":
case "gasPrice": return java.math.BigInteger.class;
case "hashrate": return java.lang.String.class;
case "headerpowhash":
case "headerPowHash": return java.lang.String.class;
case "index": return java.math.BigInteger.class;
case "keyname":
case "keyName": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "mixdigest":
case "mixDigest": return java.lang.String.class;
case "nonce": return java.lang.String.class;
case "operation": return java.lang.String.class;
case "position": return java.math.BigInteger.class;
case "priority": return java.math.BigInteger.class;
case "privatefor":
case "privateFor": return java.lang.String.class;
case "quorumapi":
case "quorumAPI": return boolean.class;
case "sha3hashofdatatosign":
case "sha3HashOfDataToSign": return java.lang.String.class;
case "signedtransactiondata":
case "signedTransactionData": return java.lang.String.class;
case "sourcecode":
case "sourceCode": return java.lang.String.class;
case "toaddress":
case "toAddress": return java.lang.String.class;
case "toblock":
case "toBlock": return java.lang.String.class;
case "topics": return java.lang.String.class;
case "transactionhash":
case "transactionHash": return java.lang.String.class;
case "ttl": return java.math.BigInteger.class;
case "value": return java.math.BigInteger.class;
case "web3j": return org.web3j.protocol.Web3j.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
Web3jEndpoint target = (Web3jEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "address": return target.getConfiguration().getAddress();
case "addresses": return target.getConfiguration().getAddresses();
case "atblock":
case "atBlock": return target.getConfiguration().getAtBlock();
case "blockhash":
case "blockHash": return target.getConfiguration().getBlockHash();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "clientid":
case "clientId": return target.getConfiguration().getClientId();
case "data": return target.getConfiguration().getData();
case "databasename":
case "databaseName": return target.getConfiguration().getDatabaseName();
case "exceptionhandler":
case "exceptionHandler": return target.getExceptionHandler();
case "exchangepattern":
case "exchangePattern": return target.getExchangePattern();
case "filterid":
case "filterId": return target.getConfiguration().getFilterId();
case "fromaddress":
case "fromAddress": return target.getConfiguration().getFromAddress();
case "fromblock":
case "fromBlock": return target.getConfiguration().getFromBlock();
case "fulltransactionobjects":
case "fullTransactionObjects": return target.getConfiguration().isFullTransactionObjects();
case "gaslimit":
case "gasLimit": return target.getConfiguration().getGasLimit();
case "gasprice":
case "gasPrice": return target.getConfiguration().getGasPrice();
case "hashrate": return target.getConfiguration().getHashrate();
case "headerpowhash":
case "headerPowHash": return target.getConfiguration().getHeaderPowHash();
case "index": return target.getConfiguration().getIndex();
case "keyname":
case "keyName": return target.getConfiguration().getKeyName();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "mixdigest":
case "mixDigest": return target.getConfiguration().getMixDigest();
case "nonce": return target.getConfiguration().getNonce();
case "operation": return target.getConfiguration().getOperation();
case "position": return target.getConfiguration().getPosition();
case "priority": return target.getConfiguration().getPriority();
case "privatefor":
case "privateFor": return target.getConfiguration().getPrivateFor();
case "quorumapi":
case "quorumAPI": return target.getConfiguration().isQuorumAPI();
case "sha3hashofdatatosign":
case "sha3HashOfDataToSign": return target.getConfiguration().getSha3HashOfDataToSign();
case "signedtransactiondata":
case "signedTransactionData": return target.getConfiguration().getSignedTransactionData();
case "sourcecode":
case "sourceCode": return target.getConfiguration().getSourceCode();
case "toaddress":
case "toAddress": return target.getConfiguration().getToAddress();
case "toblock":
case "toBlock": return target.getConfiguration().getToBlock();
case "topics": return target.getConfiguration().getTopics();
case "transactionhash":
case "transactionHash": return target.getConfiguration().getTransactionHash();
case "ttl": return target.getConfiguration().getTtl();
case "value": return target.getConfiguration().getValue();
case "web3j": return target.getConfiguration().getWeb3j();
default: return null;
}
}
}
|
Web3jEndpointConfigurer
|
java
|
reactor__reactor-core
|
reactor-core/src/test/java/reactor/test/TestBestPracticesArchTest.java
|
{
"start": 1142,
"end": 2132
}
|
class ____ {
static JavaClasses TEST_CLASSES = new ClassFileImporter()
.withImportOption(ImportOption.Predefined.DO_NOT_INCLUDE_JARS)
.withImportOption(location -> !ImportOption.Predefined.DO_NOT_INCLUDE_TESTS.includes(location))
.importPackages("reactor");
@Test
void parameterizedTestsIncludeDisplayNameInPattern() {
DescribedPredicate<JavaAnnotation<?>> parameterizedTestWithDisplayName =
DescribedPredicate.describe("ParameterizedTest with {displayName}, or perhaps consider @ParameterizedTestWithName instead",
javaAnnotation -> {
if (!javaAnnotation.getRawType().isAssignableFrom(ParameterizedTest.class)) {
return false;
}
return javaAnnotation.get("name").map(String::valueOf).orElse("NOPE")
.contains(ParameterizedTest.DISPLAY_NAME_PLACEHOLDER);
});
methods()
.that().areAnnotatedWith(ParameterizedTest.class)
.should().beAnnotatedWith(parameterizedTestWithDisplayName)
.check(TEST_CLASSES);
}
}
|
TestBestPracticesArchTest
|
java
|
ReactiveX__RxJava
|
src/test/java/io/reactivex/rxjava3/internal/operators/completable/CompletableSubscribeTest.java
|
{
"start": 889,
"end": 1356
}
|
class ____ extends RxJavaTest {
@Test
public void subscribeAlreadyCancelled() {
PublishProcessor<Integer> pp = PublishProcessor.create();
pp.ignoreElements().test(true);
assertFalse(pp.hasSubscribers());
}
@Test
public void methodTestNoCancel() {
PublishSubject<Integer> ps = PublishSubject.create();
ps.ignoreElements().test(false);
assertTrue(ps.hasObservers());
}
}
|
CompletableSubscribeTest
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableReplay.java
|
{
"start": 1385,
"end": 10020
}
|
class ____<T> extends ConnectableFlowable<T> implements HasUpstreamPublisher<T> {
/** The source observable. */
final Flowable<T> source;
/** Holds the current subscriber that is, will be or just was subscribed to the source observable. */
final AtomicReference<ReplaySubscriber<T>> current;
/** A factory that creates the appropriate buffer for the ReplaySubscriber. */
final Supplier<? extends ReplayBuffer<T>> bufferFactory;
final Publisher<T> onSubscribe;
@SuppressWarnings("rawtypes")
static final Supplier DEFAULT_UNBOUNDED_FACTORY = new DefaultUnboundedFactory();
/**
* Given a connectable observable factory, it multicasts over the generated
* ConnectableObservable via a selector function.
* @param <U> the connectable observable type
* @param <R> the result type
* @param connectableFactory the factory that returns a ConnectableFlowable for each individual subscriber
* @param selector the function that receives a Flowable and should return another Flowable that will be subscribed to
* @return the new Observable instance
*/
public static <U, R> Flowable<R> multicastSelector(
final Supplier<? extends ConnectableFlowable<U>> connectableFactory,
final Function<? super Flowable<U>, ? extends Publisher<R>> selector) {
return new MulticastFlowable<>(connectableFactory, selector);
}
/**
* Creates a replaying ConnectableObservable with an unbounded buffer.
* @param <T> the value type
* @param source the source Publisher to use
* @return the new ConnectableObservable instance
*/
@SuppressWarnings("unchecked")
public static <T> ConnectableFlowable<T> createFrom(Flowable<? extends T> source) {
return create(source, DEFAULT_UNBOUNDED_FACTORY);
}
/**
* Creates a replaying ConnectableObservable with a size bound buffer.
* @param <T> the value type
* @param source the source Flowable to use
* @param bufferSize the maximum number of elements to hold
* @param eagerTruncate if true, the head reference is refreshed to avoid unwanted item retention
* @return the new ConnectableObservable instance
*/
public static <T> ConnectableFlowable<T> create(Flowable<T> source,
final int bufferSize, boolean eagerTruncate) {
if (bufferSize == Integer.MAX_VALUE) {
return createFrom(source);
}
return create(source, new ReplayBufferSupplier<>(bufferSize, eagerTruncate));
}
/**
* Creates a replaying ConnectableObservable with a time bound buffer.
* @param <T> the value type
* @param source the source Flowable to use
* @param maxAge the maximum age of entries
* @param unit the unit of measure of the age amount
* @param scheduler the target scheduler providing the current time
* @param eagerTruncate if true, the head reference is refreshed to avoid unwanted item retention
* @return the new ConnectableObservable instance
*/
public static <T> ConnectableFlowable<T> create(Flowable<T> source,
long maxAge, TimeUnit unit, Scheduler scheduler, boolean eagerTruncate) {
return create(source, maxAge, unit, scheduler, Integer.MAX_VALUE, eagerTruncate);
}
/**
* Creates a replaying ConnectableObservable with a size and time bound buffer.
* @param <T> the value type
* @param source the source Flowable to use
* @param maxAge the maximum age of entries
* @param unit the unit of measure of the age amount
* @param scheduler the target scheduler providing the current time
* @param bufferSize the maximum number of elements to hold
* @param eagerTruncate if true, the head reference is refreshed to avoid unwanted item retention
* @return the new ConnectableFlowable instance
*/
public static <T> ConnectableFlowable<T> create(Flowable<T> source,
final long maxAge, final TimeUnit unit, final Scheduler scheduler, final int bufferSize, boolean eagerTruncate) {
return create(source, new ScheduledReplayBufferSupplier<>(bufferSize, maxAge, unit, scheduler, eagerTruncate));
}
/**
* Creates a OperatorReplay instance to replay values of the given source {@code Flowable}.
* @param <T> the value type
* @param source the source {@code Flowable} to use
* @param bufferFactory the factory to instantiate the appropriate buffer when the {@code Flowable} becomes active
* @return the {@code ConnectableFlowable} instance
*/
static <T> ConnectableFlowable<T> create(Flowable<T> source,
final Supplier<? extends ReplayBuffer<T>> bufferFactory) {
// the current connection to source needs to be shared between the operator and its onSubscribe call
final AtomicReference<ReplaySubscriber<T>> curr = new AtomicReference<>();
Publisher<T> onSubscribe = new ReplayPublisher<>(curr, bufferFactory);
return RxJavaPlugins.onAssembly(new FlowableReplay<>(onSubscribe, source, curr, bufferFactory));
}
private FlowableReplay(Publisher<T> onSubscribe, Flowable<T> source,
final AtomicReference<ReplaySubscriber<T>> current,
final Supplier<? extends ReplayBuffer<T>> bufferFactory) {
this.onSubscribe = onSubscribe;
this.source = source;
this.current = current;
this.bufferFactory = bufferFactory;
}
@Override
public Publisher<T> source() {
return source;
}
@Override
protected void subscribeActual(Subscriber<? super T> s) {
onSubscribe.subscribe(s);
}
@Override
public void reset() {
ReplaySubscriber<T> conn = current.get();
if (conn != null && conn.isDisposed()) {
current.compareAndSet(conn, null);
}
}
@Override
public void connect(Consumer<? super Disposable> connection) {
boolean doConnect;
ReplaySubscriber<T> ps;
// we loop because concurrent connect/disconnect and termination may change the state
for (;;) {
// retrieve the current subscriber-to-source instance
ps = current.get();
// if there is none yet or the current was disposed
if (ps == null || ps.isDisposed()) {
ReplayBuffer<T> buf;
try {
buf = bufferFactory.get();
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
throw ExceptionHelper.wrapOrThrow(ex);
}
// create a new subscriber-to-source
ReplaySubscriber<T> u = new ReplaySubscriber<>(buf, current);
// try setting it as the current subscriber-to-source
if (!current.compareAndSet(ps, u)) {
// did not work, perhaps a new subscriber arrived
// and created a new subscriber-to-source as well, retry
continue;
}
ps = u;
}
// if connect() was called concurrently, only one of them should actually
// connect to the source
doConnect = !ps.shouldConnect.get() && ps.shouldConnect.compareAndSet(false, true);
break; // NOPMD
}
/*
* Notify the callback that we have a (new) connection which it can dispose
* but since ps is unique to a connection, multiple calls to connect() will return the
* same Subscription and even if there was a connect-disconnect-connect pair, the older
* references won't disconnect the newer connection.
* Synchronous source consumers have the opportunity to disconnect via dispose on the
* Disposable as unsafeSubscribe may never return in its own.
*
* Note however, that asynchronously disconnecting a running source might leave
* child-subscribers without any terminal event; ReplaySubject does not have this
* issue because the cancellation was always triggered by the child-subscribers
* themselves.
*/
try {
connection.accept(ps);
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
if (doConnect) {
ps.shouldConnect.compareAndSet(true, false);
}
Exceptions.throwIfFatal(ex);
throw ExceptionHelper.wrapOrThrow(ex);
}
if (doConnect) {
source.subscribe(ps);
}
}
@SuppressWarnings("rawtypes")
static final
|
FlowableReplay
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/core/annotation/MergedAnnotationsTests.java
|
{
"start": 141200,
"end": 141282
}
|
interface ____ {
}
@DefaultOverrideMetaMetaMeta
static
|
DefaultOverrideMetaMetaMeta
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/association/Incident.java
|
{
"start": 338,
"end": 812
}
|
class ____ {
@Id
private String id;
@OneToOne(cascade = CascadeType.ALL)
private IncidentStatus incidentStatus;
public Incident() {
}
public Incident(String id) {
this.id = id;
}
public IncidentStatus getIncidentStatus() {
return incidentStatus;
}
public void setIncidentStatus(IncidentStatus incidentStatus) {
this.incidentStatus = incidentStatus;
}
@Override
public String toString() {
return "Incident: " + id + " " + incidentStatus;
}
}
|
Incident
|
java
|
grpc__grpc-java
|
core/src/main/java/io/grpc/internal/DnsNameResolver.java
|
{
"start": 2184,
"end": 10710
}
|
class ____ extends NameResolver {
private static final Logger logger = Logger.getLogger(DnsNameResolver.class.getName());
private static final String SERVICE_CONFIG_CHOICE_CLIENT_LANGUAGE_KEY = "clientLanguage";
private static final String SERVICE_CONFIG_CHOICE_PERCENTAGE_KEY = "percentage";
private static final String SERVICE_CONFIG_CHOICE_CLIENT_HOSTNAME_KEY = "clientHostname";
private static final String SERVICE_CONFIG_CHOICE_SERVICE_CONFIG_KEY = "serviceConfig";
// From https://github.com/grpc/proposal/blob/master/A2-service-configs-in-dns.md
static final String SERVICE_CONFIG_PREFIX = "grpc_config=";
private static final Set<String> SERVICE_CONFIG_CHOICE_KEYS =
Collections.unmodifiableSet(
new HashSet<>(
Arrays.asList(
SERVICE_CONFIG_CHOICE_CLIENT_LANGUAGE_KEY,
SERVICE_CONFIG_CHOICE_PERCENTAGE_KEY,
SERVICE_CONFIG_CHOICE_CLIENT_HOSTNAME_KEY,
SERVICE_CONFIG_CHOICE_SERVICE_CONFIG_KEY)));
// From https://github.com/grpc/proposal/blob/master/A2-service-configs-in-dns.md
private static final String SERVICE_CONFIG_NAME_PREFIX = "_grpc_config.";
private static final String JNDI_PROPERTY =
System.getProperty("io.grpc.internal.DnsNameResolverProvider.enable_jndi", "true");
private static final String JNDI_LOCALHOST_PROPERTY =
System.getProperty("io.grpc.internal.DnsNameResolverProvider.enable_jndi_localhost", "false");
private static final String JNDI_TXT_PROPERTY =
System.getProperty("io.grpc.internal.DnsNameResolverProvider.enable_service_config", "false");
/**
* Java networking system properties name for caching DNS result.
*
* <p>Default value is -1 (cache forever) if security manager is installed. If security manager is
* not installed, the ttl value is {@code null} which falls back to {@link
* #DEFAULT_NETWORK_CACHE_TTL_SECONDS gRPC default value}.
*
* <p>For android, gRPC doesn't attempt to cache; this property value will be ignored.
*/
@VisibleForTesting
static final String NETWORKADDRESS_CACHE_TTL_PROPERTY = "networkaddress.cache.ttl";
/** Default DNS cache duration if network cache ttl value is not specified ({@code null}). */
@VisibleForTesting
static final long DEFAULT_NETWORK_CACHE_TTL_SECONDS = 30;
@VisibleForTesting
static boolean enableJndi = Boolean.parseBoolean(JNDI_PROPERTY);
@VisibleForTesting
static boolean enableJndiLocalhost = Boolean.parseBoolean(JNDI_LOCALHOST_PROPERTY);
@VisibleForTesting
protected static boolean enableTxt = Boolean.parseBoolean(JNDI_TXT_PROPERTY);
private static final ResourceResolverFactory resourceResolverFactory =
getResourceResolverFactory(DnsNameResolver.class.getClassLoader());
@VisibleForTesting
final ProxyDetector proxyDetector;
/** Access through {@link #getLocalHostname}. */
private static String localHostname;
private final Random random = new Random();
protected volatile AddressResolver addressResolver = JdkAddressResolver.INSTANCE;
private final AtomicReference<ResourceResolver> resourceResolver = new AtomicReference<>();
private final String authority;
private final String host;
private final int port;
private final ObjectPool<Executor> executorPool;
private final long cacheTtlNanos;
private final SynchronizationContext syncContext;
private final ServiceConfigParser serviceConfigParser;
// Following fields must be accessed from syncContext
private final Stopwatch stopwatch;
protected boolean resolved;
private boolean shutdown;
private Executor executor;
private boolean resolving;
// The field must be accessed from syncContext, although the methods on an Listener2 can be called
// from any thread.
private NameResolver.Listener2 listener;
protected DnsNameResolver(
@Nullable String nsAuthority,
String name,
Args args,
Resource<Executor> executorResource,
Stopwatch stopwatch,
boolean isAndroid) {
checkNotNull(args, "args");
// TODO: if a DNS server is provided as nsAuthority, use it.
// https://www.captechconsulting.com/blogs/accessing-the-dusty-corners-of-dns-with-java
// Must prepend a "//" to the name when constructing a URI, otherwise it will be treated as an
// opaque URI, thus the authority and host of the resulted URI would be null.
URI nameUri = URI.create("//" + checkNotNull(name, "name"));
Preconditions.checkArgument(nameUri.getHost() != null, "Invalid DNS name: %s", name);
authority = Preconditions.checkNotNull(nameUri.getAuthority(),
"nameUri (%s) doesn't have an authority", nameUri);
host = nameUri.getHost();
if (nameUri.getPort() == -1) {
port = args.getDefaultPort();
} else {
port = nameUri.getPort();
}
this.proxyDetector = checkNotNull(args.getProxyDetector(), "proxyDetector");
Executor offloadExecutor = args.getOffloadExecutor();
if (offloadExecutor != null) {
this.executorPool = new FixedObjectPool<>(offloadExecutor);
} else {
this.executorPool = SharedResourcePool.forResource(executorResource);
}
this.cacheTtlNanos = getNetworkAddressCacheTtlNanos(isAndroid);
this.stopwatch = checkNotNull(stopwatch, "stopwatch");
this.syncContext = checkNotNull(args.getSynchronizationContext(), "syncContext");
this.serviceConfigParser = checkNotNull(args.getServiceConfigParser(), "serviceConfigParser");
}
@Override
public String getServiceAuthority() {
return authority;
}
@VisibleForTesting
protected String getHost() {
return host;
}
@Override
public void start(Listener2 listener) {
Preconditions.checkState(this.listener == null, "already started");
executor = executorPool.getObject();
this.listener = checkNotNull(listener, "listener");
resolve();
}
@Override
public void refresh() {
Preconditions.checkState(listener != null, "not started");
resolve();
}
private List<EquivalentAddressGroup> resolveAddresses() throws Exception {
List<? extends InetAddress> addresses = addressResolver.resolveAddress(host);
// Each address forms an EAG
List<EquivalentAddressGroup> servers = new ArrayList<>(addresses.size());
for (InetAddress inetAddr : addresses) {
servers.add(new EquivalentAddressGroup(new InetSocketAddress(inetAddr, port)));
}
return Collections.unmodifiableList(servers);
}
@Nullable
private ConfigOrError resolveServiceConfig() {
List<String> txtRecords = Collections.emptyList();
ResourceResolver resourceResolver = getResourceResolver();
if (resourceResolver != null) {
try {
txtRecords = resourceResolver.resolveTxt(SERVICE_CONFIG_NAME_PREFIX + host);
} catch (Exception e) {
logger.log(Level.FINE, "ServiceConfig resolution failure", e);
}
}
if (!txtRecords.isEmpty()) {
ConfigOrError rawServiceConfig = parseServiceConfig(txtRecords, random, getLocalHostname());
if (rawServiceConfig != null) {
if (rawServiceConfig.getError() != null) {
return ConfigOrError.fromError(rawServiceConfig.getError());
}
@SuppressWarnings("unchecked")
Map<String, ?> verifiedRawServiceConfig = (Map<String, ?>) rawServiceConfig.getConfig();
return serviceConfigParser.parseServiceConfig(verifiedRawServiceConfig);
}
} else {
logger.log(Level.FINE, "No TXT records found for {0}", new Object[]{host});
}
return null;
}
@Nullable
private EquivalentAddressGroup detectProxy() throws IOException {
InetSocketAddress destination =
InetSocketAddress.createUnresolved(host, port);
ProxiedSocketAddress proxiedAddr = proxyDetector.proxyFor(destination);
if (proxiedAddr != null) {
return new EquivalentAddressGroup(proxiedAddr);
}
return null;
}
/**
* Main logic of name resolution.
*/
protected InternalResolutionResult doResolve(boolean forceTxt) {
InternalResolutionResult result = new InternalResolutionResult();
try {
result.addresses = resolveAddresses();
} catch (Exception e) {
logger.log(Level.FINE, "Address resolution failure", e);
if (!forceTxt) {
result.error =
Status.UNAVAILABLE.withDescription("Unable to resolve host " + host).withCause(e);
return result;
}
}
if (enableTxt) {
result.config = resolveServiceConfig();
}
return result;
}
private final
|
DnsNameResolver
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
|
{
"start": 4201,
"end": 5364
}
|
class ____<T extends ContainerRequest> extends AMRMClient<T> {
private static final Logger LOG =
LoggerFactory.getLogger(AMRMClientImpl.class);
private static final List<String> ANY_LIST =
Collections.singletonList(ResourceRequest.ANY);
private int lastResponseId = 0;
protected String appHostName;
protected int appHostPort;
protected String appTrackingUrl;
protected String newTrackingUrl;
protected ApplicationMasterProtocol rmClient;
protected Resource clusterAvailableResources;
protected int clusterNodeCount;
// blacklistedNodes is required for keeping history of blacklisted nodes that
// are sent to RM. On RESYNC command from RM, blacklistedNodes are used to get
// current blacklisted nodes and send back to RM.
protected final Set<String> blacklistedNodes = new HashSet<String>();
protected final Set<String> blacklistAdditions = new HashSet<String>();
protected final Set<String> blacklistRemovals = new HashSet<String>();
private Map<Set<String>, PlacementConstraint> placementConstraints =
new HashMap<>();
protected Map<String, Resource> resourceProfilesMap;
static
|
AMRMClientImpl
|
java
|
quarkusio__quarkus
|
core/deployment/src/main/java/io/quarkus/deployment/builditem/DevServicesRegistryBuildItem.java
|
{
"start": 1241,
"end": 4889
}
|
class ____ extends SimpleBuildItem {
private static final Logger log = Logger.getLogger(DevServicesRegistryBuildItem.class);
private final UUID uuid;
private final DevServicesConfig globalConfig;
private final LaunchMode launchMode;
public DevServicesRegistryBuildItem(UUID uuid, DevServicesConfig devServicesConfig, LaunchMode launchMode) {
this.launchMode = launchMode;
this.uuid = uuid;
this.globalConfig = devServicesConfig;
}
public RunningService getRunningServices(String featureName, String configName, Object identifyingConfig) {
DevServiceOwner owner = new DevServiceOwner(featureName, launchMode.name(), configName);
ComparableDevServicesConfig key = new ComparableDevServicesConfig(uuid, owner, globalConfig, identifyingConfig);
return RunningDevServicesRegistry.INSTANCE.getRunningServices(key);
}
public void addRunningService(String featureName, String configName, Object identifyingConfig,
RunningService service) {
DevServiceOwner owner = new DevServiceOwner(featureName, launchMode.name(), configName);
ComparableDevServicesConfig key = new ComparableDevServicesConfig(uuid, owner, globalConfig, identifyingConfig);
RunningDevServicesRegistry.INSTANCE.addRunningService(key, service);
}
public void closeAllRunningServices(String featureName, String configName) {
DevServiceOwner owner = new DevServiceOwner(featureName, launchMode.name(), configName);
RunningDevServicesRegistry.INSTANCE.closeAllRunningServices(owner);
}
public void closeAllRunningServices() {
RunningDevServicesRegistry.INSTANCE.closeAllRunningServices(launchMode.name());
}
public void closeRemainingRunningServices(Collection<DevServicesResultBuildItem> services) {
Set<DevServiceOwner> ownersToKeep = services.stream()
.map(s -> new DevServiceOwner(s.getName(), launchMode.name(), s.getServiceName()))
.collect(Collectors.toSet());
RunningDevServicesRegistry.INSTANCE.closeRemainingRunningServices(uuid, launchMode.name(), ownersToKeep);
}
public Map<String, String> getConfigForAllRunningServices() {
Map<String, String> config = new HashMap<>();
for (RunningService service : RunningDevServicesRegistry.INSTANCE.getAllRunningServices(launchMode.name())) {
config.putAll(service.configs());
}
return config;
}
public void startAll(Collection<DevServicesResultBuildItem> services,
List<DevServicesCustomizerBuildItem> customizers,
ClassLoader augmentClassLoader) {
closeRemainingRunningServices(services);
CompletableFuture.allOf(services.stream()
.filter(DevServicesResultBuildItem::isStartable)
.map(serv -> CompletableFuture.runAsync(() -> {
// We need to set the context classloader to the augment classloader, so that the dev services can be started with the right classloader
if (augmentClassLoader != null) {
Thread.currentThread().setContextClassLoader(augmentClassLoader);
} else {
Thread.currentThread().setContextClassLoader(serv.getClass().getClassLoader());
}
this.start(serv, customizers);
}))
.toArray(CompletableFuture[]::new)).join();
}
public void start(DevServicesResultBuildItem request, List<DevServicesCustomizerBuildItem> customizers) {
// RunningService
|
DevServicesRegistryBuildItem
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/mappedBy/OneToManyMappedByTypeTest.java
|
{
"start": 3637,
"end": 3825
}
|
class ____ {
@Id
private Long id;
@ManyToOne
private EntityACorrect parent;
}
@Entity( name = "EntityA2Correct" )
@Table( name = "entity_a_correct" )
public static
|
EntityBCorrect
|
java
|
apache__camel
|
components/camel-undertow/src/test/java/org/apache/camel/component/undertow/UndertowBasicAuthHandler.java
|
{
"start": 1530,
"end": 2909
}
|
class ____ implements CamelUndertowHttpHandler {
private HttpHandler next;
private HttpHandler securityHandler;
private IdentityManager identityManager;
@Override
public void handleRequest(HttpServerExchange exchange) throws Exception {
if (identityManager == null) {
buildIdMgr();
}
if (securityHandler == null) {
buildSecurityHandler();
}
this.securityHandler.handleRequest(exchange);
}
private void buildSecurityHandler() {
HttpHandler handler = this.next;
handler = new AuthenticationCallHandler(handler);
handler = new AuthenticationConstraintHandler(handler);
final List<AuthenticationMechanism> mechanisms
= Collections.<AuthenticationMechanism> singletonList(new BasicAuthenticationMechanism("My Realm"));
handler = new AuthenticationMechanismsHandler(handler, mechanisms);
this.securityHandler = new SecurityInitialHandler(AuthenticationMode.PRO_ACTIVE, identityManager, handler);
}
private void buildIdMgr() {
final Map<String, char[]> users = new HashMap<>(1);
users.put("guest", "secret".toCharArray());
identityManager = new MapIdentityManager(users);
}
public void setNext(HttpHandler nextHandler) {
this.next = nextHandler;
}
}
|
UndertowBasicAuthHandler
|
java
|
spring-projects__spring-security
|
ldap/src/test/java/org/springframework/security/ldap/aot/hint/LdapSecurityRuntimeHintsTests.java
|
{
"start": 1242,
"end": 1907
}
|
class ____ {
private final RuntimeHints hints = new RuntimeHints();
@BeforeEach
void setup() {
SpringFactoriesLoader.forResourceLocation("META-INF/spring/aot.factories")
.load(RuntimeHintsRegistrar.class)
.forEach((registrar) -> registrar.registerHints(this.hints, ClassUtils.getDefaultClassLoader()));
}
@Test
void ldifResourcesHasHints() {
assertThat(RuntimeHintsPredicates.resource().forResource("users.ldif")).accepts(this.hints);
}
@Test
void ldapCtxFactoryHasHints() {
assertThat(RuntimeHintsPredicates.reflection().onType(TypeReference.of("com.sun.jndi.ldap.LdapCtxFactory")))
.accepts(this.hints);
}
}
|
LdapSecurityRuntimeHintsTests
|
java
|
lettuce-io__lettuce-core
|
src/test/java/io/lettuce/core/cluster/topology/ClusterTopologyRefreshUnitTests.java
|
{
"start": 2963,
"end": 28820
}
|
class ____ {
private static final String NODE_1_VIEW = "1 127.0.0.1:7380 master,myself - 0 1401258245007 2 disconnected 8000-11999\n"
+ "2 127.0.0.1:7381 master - 111 1401258245007 222 connected 7000 12000 12002-16383\n";
private static final String NODE_2_VIEW = "1 127.0.0.1:7380 master - 0 1401258245007 2 disconnected 8000-11999\n"
+ "2 127.0.0.1:7381 master,myself - 111 1401258245007 222 connected 7000 12000 12002-16383\n";
private DefaultClusterTopologyRefresh sut;
@Mock
private RedisClusterClient client;
@Mock
private StatefulRedisConnection<String, String> connection;
@Mock
private ClientResources clientResources;
@Mock
private NodeConnectionFactory nodeConnectionFactory;
@Mock
private StatefulRedisConnection<String, String> connection1;
@Mock
private RedisAsyncCommands<String, String> asyncCommands1;
@Mock
private StatefulRedisConnection<String, String> connection2;
@Mock
private RedisAsyncCommands<String, String> asyncCommands2;
@Mock
private EventExecutorGroup eventExecutors;
@BeforeEach
void before() {
io.netty.util.Timer timer = mock(io.netty.util.Timer.class);
when(timer.newTimeout(any(), anyLong(), any())).thenReturn(mock(Timeout.class));
when(clientResources.timer()).thenReturn(timer);
when(clientResources.socketAddressResolver()).thenReturn(SocketAddressResolver.create(DnsResolver.unresolved()));
when(clientResources.eventExecutorGroup()).thenReturn(eventExecutors);
doAnswer(invocation -> {
((Runnable) invocation.getArgument(0)).run();
return null;
}).when(eventExecutors).execute(any(Runnable.class));
when(connection1.async()).thenReturn(asyncCommands1);
when(connection2.async()).thenReturn(asyncCommands2);
when(connection1.closeAsync()).thenReturn(CompletableFuture.completedFuture(null));
when(connection2.closeAsync()).thenReturn(CompletableFuture.completedFuture(null));
when(connection1.dispatch(any(RedisCommand.class))).thenAnswer(invocation -> {
TimedAsyncCommand command = (TimedAsyncCommand) invocation.getArguments()[0];
if (command.getType() == CommandType.CLUSTER) {
command.getOutput().set(ByteBuffer.wrap(NODE_1_VIEW.getBytes()));
command.complete();
}
if (command.getType() == CommandType.CLIENT) {
command.getOutput().set(ByteBuffer.wrap("c1\nc2\n".getBytes()));
command.complete();
}
if (command.getType() == CommandType.INFO) {
command.getOutput().set(ByteBuffer.wrap(
"# Clients\nconnected_clients:2\nclient_longest_output_list:0\nclient_biggest_input_buf:0\nblocked_clients:0"
.getBytes()));
command.complete();
}
command.encodedAtNs = 10;
command.completedAtNs = 50;
return command;
});
when(connection2.dispatch(any(RedisCommand.class))).thenAnswer(invocation -> {
TimedAsyncCommand command = (TimedAsyncCommand) invocation.getArguments()[0];
if (command.getType() == CommandType.CLUSTER) {
command.getOutput().set(ByteBuffer.wrap(NODE_2_VIEW.getBytes()));
command.complete();
}
if (command.getType() == CommandType.CLIENT) {
command.getOutput().set(ByteBuffer.wrap("".getBytes()));
command.complete();
}
if (command.getType() == CommandType.INFO) {
command.getOutput().set(ByteBuffer.wrap(
"# Clients\nconnected_clients:2\nclient_longest_output_list:0\nclient_biggest_input_buf:0\nblocked_clients:0"
.getBytes()));
command.complete();
}
command.encodedAtNs = 10;
command.completedAtNs = 20;
return command;
});
sut = new DefaultClusterTopologyRefresh(nodeConnectionFactory, clientResources);
}
@Test
void getNodeTopologyView() throws Exception {
Requests requestedTopology = createClusterNodesRequests(1, NODE_1_VIEW);
Requests requestedClients = createClientListRequests(1,
"# Clients\r\nconnected_clients:2438\r\nclient_longest_output_list:0\r\nclient_biggest_input_buf:0\r\nblocked_clients:0");
RedisURI redisURI = RedisURI.create("redis://localhost:1");
NodeTopologyView nodeTopologyView = NodeTopologyView.from(redisURI, requestedTopology, requestedClients);
assertThat(nodeTopologyView.getConnectedClients()).isEqualTo(2438);
}
@Test
void getNodeSpecificViewsNode1IsFasterThanNode2() throws Exception {
Requests requests = createClusterNodesRequests(1, NODE_1_VIEW);
requests = createClusterNodesRequests(2, NODE_2_VIEW).mergeWith(requests);
Requests clientRequests = createClientListRequests(1, "c1\nc2\n").mergeWith(createClientListRequests(2, "c1\nc2\n"));
NodeTopologyViews nodeSpecificViews = sut.getNodeSpecificViews(requests, clientRequests);
Collection<Partitions> values = nodeSpecificViews.toMap().values();
assertThat(values).hasSize(2);
for (Partitions value : values) {
assertThat(value).extracting("nodeId").containsSequence("1", "2");
}
}
@Test
void shouldNotRequestTopologyIfExecutorShutsDown() {
when(eventExecutors.isShuttingDown()).thenReturn(true);
List<RedisURI> seed = Arrays.asList(RedisURI.create("127.0.0.1", 7380), RedisURI.create("127.0.0.1", 7381));
sut.loadViews(seed, Duration.ofSeconds(1), true);
verifyNoInteractions(nodeConnectionFactory);
}
@Test
void partitionsReturnedAsReported() throws Exception {
System.setProperty("io.lettuce.core.topology.sort", "none");
String NODE_1_VIEW = "2 127.0.0.1:7381 master - 111 1401258245007 222 connected 7000 12000 12002-16383\n"
+ "1 127.0.0.1:7380 master,myself - 0 1401258245007 2 disconnected 8000-11999\n";
String NODE_2_VIEW = "2 127.0.0.1:7381 master,myself - 111 1401258245007 222 connected 7000 12000 12002-16383\n"
+ "1 127.0.0.1:7380 master - 0 1401258245007 2 disconnected 8000-11999\n";
Requests requests = createClusterNodesRequests(1, NODE_1_VIEW);
requests = createClusterNodesRequests(2, NODE_2_VIEW).mergeWith(requests);
Requests clientRequests = createClientListRequests(1, "c1\nc2\n").mergeWith(createClientListRequests(2, "c1\nc2\n"));
NodeTopologyViews nodeSpecificViews = sut.getNodeSpecificViews(requests, clientRequests);
Collection<Partitions> values = nodeSpecificViews.toMap().values();
assertThat(values).hasSize(2);
for (Partitions value : values) {
assertThat(value).extracting("nodeId").containsSequence("2", "1");
}
System.getProperties().remove("io.lettuce.core.topology.sort");
}
@Test
void getNodeSpecificViewTestingNoAddrFilter() throws Exception {
String nodes1 = "n1 10.37.110.63:7000 slave n3 0 1452553664848 43 connected\n"
+ "n2 10.37.110.68:7000 slave n6 0 1452553664346 45 connected\n"
+ "badSlave :0 slave,fail,noaddr n5 1449160058028 1449160053146 46 disconnected\n"
+ "n3 10.37.110.69:7000 master - 0 1452553662842 43 connected 3829-6787 7997-9999\n"
+ "n4 10.37.110.62:7000 slave n3 0 1452553663844 43 connected\n"
+ "n5 10.37.110.70:7000 myself,master - 0 0 46 connected 10039-14999\n"
+ "n6 10.37.110.65:7000 master - 0 1452553663844 45 connected 0-3828 6788-7996 10000-10038 15000-16383";
Requests clusterNodesRequests = createClusterNodesRequests(1, nodes1);
Requests clientRequests = createClientListRequests(1,
"# Clients\r\nconnected_clients:2\r\nclient_longest_output_list:0\r\nclient_biggest_input_buf:0\r\nblocked_clients:0");
NodeTopologyViews nodeSpecificViews = sut.getNodeSpecificViews(clusterNodesRequests, clientRequests);
List<Partitions> values = new ArrayList<>(nodeSpecificViews.toMap().values());
assertThat(values).hasSize(1);
for (Partitions value : values) {
assertThat(value).extracting("nodeId").containsOnly("n1", "n2", "n3", "n4", "n5", "n6");
}
RedisClusterNodeSnapshot firstPartition = (RedisClusterNodeSnapshot) values.get(0).getPartition(0);
RedisClusterNodeSnapshot selfPartition = (RedisClusterNodeSnapshot) values.get(0).getPartition(4);
assertThat(firstPartition.getConnectedClients()).isEqualTo(2);
assertThat(selfPartition.getConnectedClients()).isNull();
}
@Test
void getNodeSpecificViewsNode2IsFasterThanNode1() {
Requests clusterNodesRequests = createClusterNodesRequests(5, NODE_1_VIEW);
clusterNodesRequests = createClusterNodesRequests(1, NODE_2_VIEW).mergeWith(clusterNodesRequests);
Requests clientRequests = createClientListRequests(5, "c1\nc2\n").mergeWith(createClientListRequests(1, "c1\nc2\n"));
NodeTopologyViews nodeSpecificViews = sut.getNodeSpecificViews(clusterNodesRequests, clientRequests);
List<Partitions> values = new ArrayList<>(nodeSpecificViews.toMap().values());
assertThat(values).hasSize(2);
for (Partitions value : values) {
assertThat(value).extracting("nodeId").containsExactly("2", "1");
}
}
@Test
void shouldAttemptToConnectOnlyOnce() {
List<RedisURI> seed = Arrays.asList(RedisURI.create("127.0.0.1", 7380), RedisURI.create("127.0.0.1", 7381));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7380))))
.thenReturn(completedFuture((StatefulRedisConnection) connection1));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7381))))
.thenReturn(completedWithException(new RedisException("connection failed")));
sut.loadViews(seed, Duration.ofSeconds(1), true);
verify(nodeConnectionFactory).connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7380)));
verify(nodeConnectionFactory).connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7381)));
}
@Test
void shouldNotConnectToSeedNodeTwice() {
List<RedisURI> seed = Arrays.asList(RedisURI.create("foobar", 7380));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("foobar", 7380))))
.thenReturn(completedFuture((StatefulRedisConnection) connection1));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7380))))
.thenReturn(completedFuture((StatefulRedisConnection) connection1));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7381))))
.thenReturn(completedFuture((StatefulRedisConnection) connection2));
sut.loadViews(seed, Duration.ofSeconds(1), true);
verify(nodeConnectionFactory).connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("foobar", 7380)));
verify(nodeConnectionFactory).connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7380)));
verify(nodeConnectionFactory).connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7381)));
}
@Test
void shouldFailIfNoNodeConnects() {
List<RedisURI> seed = Arrays.asList(RedisURI.create("127.0.0.1", 7380), RedisURI.create("127.0.0.1", 7381));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7380))))
.thenReturn(completedWithException(new RedisException("connection failed")));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7381))))
.thenReturn(completedWithException(new RedisException("connection failed")));
try {
sut.loadViews(seed, Duration.ofSeconds(1), true).toCompletableFuture().join();
fail("Missing RedisConnectionException");
} catch (Exception e) {
assertThat(e.getCause()).hasMessageStartingWith("Cannot retrieve cluster partitions from ");
assertThat(e.getCause().getSuppressed()).hasSize(2);
}
verify(nodeConnectionFactory).connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7380)));
verify(nodeConnectionFactory).connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7381)));
}
@Test
void shouldShouldDiscoverNodes() {
List<RedisURI> seed = Collections.singletonList(RedisURI.create("127.0.0.1", 7380));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7380))))
.thenReturn(completedFuture((StatefulRedisConnection) connection1));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7381))))
.thenReturn(completedFuture((StatefulRedisConnection) connection2));
sut.loadViews(seed, Duration.ofSeconds(1), true);
verify(nodeConnectionFactory).connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7380)));
verify(nodeConnectionFactory).connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7381)));
}
@Test
void shouldShouldNotDiscoverNodes() {
List<RedisURI> seed = Collections.singletonList(RedisURI.create("127.0.0.1", 7380));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7380))))
.thenReturn(completedFuture((StatefulRedisConnection) connection1));
sut.loadViews(seed, Duration.ofSeconds(1), false);
verify(nodeConnectionFactory).connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7380)));
verifyNoMoreInteractions(nodeConnectionFactory);
}
@Test
void shouldNotFailOnDuplicateSeedNodes() {
List<RedisURI> seed = Arrays.asList(RedisURI.create("127.0.0.1", 7380), RedisURI.create("127.0.0.1", 7381),
RedisURI.create("127.0.0.1", 7381));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7380))))
.thenReturn(completedFuture((StatefulRedisConnection) connection1));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7381))))
.thenReturn(completedFuture((StatefulRedisConnection) connection2));
sut.loadViews(seed, Duration.ofSeconds(1), true);
verify(nodeConnectionFactory).connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7380)));
verify(nodeConnectionFactory).connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7381)));
}
@Test
void shouldCloseConnections() {
List<RedisURI> seed = Arrays.asList(RedisURI.create("127.0.0.1", 7380), RedisURI.create("127.0.0.1", 7381));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7380))))
.thenReturn(completedFuture((StatefulRedisConnection) connection1));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7381))))
.thenReturn(completedFuture((StatefulRedisConnection) connection2));
sut.loadViews(seed, Duration.ofSeconds(1), true);
verify(connection1).closeAsync();
verify(connection2).closeAsync();
}
/**
* @see <a href="https://github.com/redis/lettuce/issues/3240">Issue link</a>
*/
@Test
@org.junit.jupiter.api.Timeout(value = 5, unit = TimeUnit.SECONDS, threadMode = SEPARATE_THREAD)
void shouldHandleInvalidUrisWithoutDeadlock() {
List<RedisURI> seed = Arrays.asList(RedisURI.create("redis://localhost:$(INVALID_DATA):CONFIG"),
RedisURI.create("redis://localhost:$(INVALID_DATA):CONFIG"));
CompletionException completionException = Assertions.assertThrows(CompletionException.class,
() -> sut.loadViews(seed, Duration.ofSeconds(1), true).toCompletableFuture().join());
assertThat(completionException)
.hasRootCauseInstanceOf(DefaultClusterTopologyRefresh.CannotRetrieveClusterPartitions.class);
}
@Test
void undiscoveredAdditionalNodesShouldBeLastUsingClientCount() {
List<RedisURI> seed = Collections.singletonList(RedisURI.create("127.0.0.1", 7380));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7380))))
.thenReturn(completedFuture((StatefulRedisConnection) connection1));
Map<RedisURI, Partitions> partitionsMap = sut.loadViews(seed, Duration.ofSeconds(1), false).toCompletableFuture()
.join();
Partitions partitions = partitionsMap.values().iterator().next();
List<RedisClusterNode> nodes = TopologyComparators.sortByClientCount(partitions);
assertThat(nodes).hasSize(2).extracting(RedisClusterNode::getUri).containsSequence(seed.get(0),
RedisURI.create("127.0.0.1", 7381));
}
@Test
void discoveredAdditionalNodesShouldBeOrderedUsingClientCount() {
List<RedisURI> seed = Collections.singletonList(RedisURI.create("127.0.0.1", 7380));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7380))))
.thenReturn(completedFuture((StatefulRedisConnection) connection1));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7381))))
.thenReturn(completedFuture((StatefulRedisConnection) connection2));
Map<RedisURI, Partitions> partitionsMap = sut.loadViews(seed, Duration.ofSeconds(1), true).toCompletableFuture().join();
Partitions partitions = partitionsMap.values().iterator().next();
List<RedisClusterNode> nodes = TopologyComparators.sortByClientCount(partitions);
assertThat(nodes).hasSize(2).extracting(RedisClusterNode::getUri).contains(RedisURI.create("127.0.0.1", 7381),
seed.get(0));
}
@Test
void undiscoveredAdditionalNodesShouldBeLastUsingLatency() {
List<RedisURI> seed = Collections.singletonList(RedisURI.create("127.0.0.1", 7380));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7380))))
.thenReturn(completedFuture((StatefulRedisConnection) connection1));
Map<RedisURI, Partitions> partitionsMap = sut.loadViews(seed, Duration.ofSeconds(1), false).toCompletableFuture()
.join();
Partitions partitions = partitionsMap.values().iterator().next();
List<RedisClusterNode> nodes = TopologyComparators.sortByLatency(partitions);
assertThat(nodes).hasSize(2).extracting(RedisClusterNode::getUri).containsSequence(seed.get(0),
RedisURI.create("127.0.0.1", 7381));
}
@Test
void discoveredAdditionalNodesShouldBeOrderedUsingLatency() {
List<RedisURI> seed = Collections.singletonList(RedisURI.create("127.0.0.1", 7380));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7380))))
.thenReturn(completedFuture((StatefulRedisConnection) connection1));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7381))))
.thenReturn(completedFuture((StatefulRedisConnection) connection2));
Map<RedisURI, Partitions> partitionsMap = sut.loadViews(seed, Duration.ofSeconds(1), true).toCompletableFuture().join();
Partitions partitions = partitionsMap.values().iterator().next();
List<RedisClusterNode> nodes = TopologyComparators.sortByLatency(partitions);
assertThat(nodes).hasSize(2).extracting(RedisClusterNode::getUri).contains(RedisURI.create("127.0.0.1", 7381),
seed.get(0));
}
@Test
void shouldPropagateCommandFailures() {
List<RedisURI> seed = Arrays.asList(RedisURI.create("127.0.0.1", 7380), RedisURI.create("127.0.0.1", 7381));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7380))))
.thenReturn(completedFuture((StatefulRedisConnection) connection1));
when(nodeConnectionFactory.connectToNodeAsync(any(RedisCodec.class),
eq(InetSocketAddress.createUnresolved("127.0.0.1", 7381))))
.thenReturn(completedFuture((StatefulRedisConnection) connection2));
reset(connection1, connection2);
when(connection1.async()).thenReturn(asyncCommands1);
when(connection2.async()).thenReturn(asyncCommands2);
when(connection1.closeAsync()).thenReturn(CompletableFuture.completedFuture(null));
when(connection2.closeAsync()).thenReturn(CompletableFuture.completedFuture(null));
when(connection1.dispatch(any(RedisCommand.class))).thenAnswer(invocation -> {
TimedAsyncCommand command = invocation.getArgument(0);
command.completeExceptionally(new RedisException("AUTH"));
return command;
});
RedisException nestedException = new RedisException("NESTED");
when(connection2.dispatch(any(RedisCommand.class))).thenAnswer(invocation -> {
TimedAsyncCommand command = invocation.getArgument(0);
command.completeExceptionally(nestedException);
return command;
});
CompletionStage<Map<RedisURI, Partitions>> actual = sut.loadViews(seed, Duration.ofSeconds(1), true);
assertThat(actual).isCompletedExceptionally();
try {
actual.toCompletableFuture().join();
fail("Missing CompletionException");
} catch (CompletionException e) {
assertThat(e.getCause()).hasSuppressedException(nestedException);
}
}
Requests createClusterNodesRequests(int duration, String nodes) {
RedisURI redisURI = RedisURI.create("redis://localhost:" + duration);
Connections connections = new Connections(clientResources, new HashMap<>());
connections.addConnection(redisURI, connection);
Requests requests = connections.requestTopology(100, TimeUnit.SECONDS);
TimedAsyncCommand<String, String, String> command = requests.getRequest(redisURI);
command.getOutput().set(ByteBuffer.wrap(nodes.getBytes()));
command.complete();
command.encodedAtNs = 0;
command.completedAtNs = duration;
return requests;
}
Requests createClientListRequests(int duration, String response) {
RedisURI redisURI = RedisURI.create("redis://localhost:" + duration);
Connections connections = new Connections(clientResources, new HashMap<>());
connections.addConnection(redisURI, connection);
Requests requests = connections.requestTopology(100, TimeUnit.SECONDS);
TimedAsyncCommand<String, String, String> command = requests.getRequest(redisURI);
command.getOutput().set(ByteBuffer.wrap(response.getBytes()));
command.complete();
return requests;
}
private static <T> ConnectionFuture<T> completedFuture(T value) {
return ConnectionFuture.from(InetSocketAddress.createUnresolved(TestSettings.host(), TestSettings.port()),
CompletableFuture.completedFuture(value));
}
private static <T> ConnectionFuture<T> completedWithException(Exception e) {
CompletableFuture<T> future = new CompletableFuture<>();
future.completeExceptionally(e);
return ConnectionFuture.from(InetSocketAddress.createUnresolved(TestSettings.host(), TestSettings.port()), future);
}
}
|
ClusterTopologyRefreshUnitTests
|
java
|
google__truth
|
extensions/proto/src/main/java/com/google/common/truth/extensions/proto/UnknownFieldDescriptor.java
|
{
"start": 859,
"end": 939
}
|
class ____ type information for unknown fields. */
@AutoValue
abstract
|
encapsulating
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/serializer/BugTest2.java
|
{
"start": 154,
"end": 405
}
|
class ____ extends TestCase {
public void test_0() throws Exception {
JSONObject obj = new JSONObject();
obj.put("a", new A());
String text = obj.toString();
System.out.println(text);
}
public static
|
BugTest2
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/operators/join/AsyncLookupJoinHarnessTest.java
|
{
"start": 15664,
"end": 18017
}
|
class ____ extends AbstractRichFunction
implements AsyncFunction<RowData, RowData> {
private static final long serialVersionUID = 4018474964018227081L;
private static final Map<Integer, List<RowData>> data = new HashMap<>();
private final Random random = new Random();
static {
data.put(1, Collections.singletonList(GenericRowData.of(1, fromString("Julian"))));
data.put(
3,
Arrays.asList(
GenericRowData.of(3, fromString("Jark")),
GenericRowData.of(3, fromString("Jackson"))));
data.put(4, Collections.singletonList(GenericRowData.of(4, fromString("Fabian"))));
}
private transient ExecutorService executor;
@Override
public void open(OpenContext openContext) throws Exception {
super.open(openContext);
// generate unordered result for async lookup
this.executor = Executors.newFixedThreadPool(2);
}
@Override
public void asyncInvoke(RowData input, ResultFuture<RowData> resultFuture)
throws Exception {
int id = input.getInt(0);
CompletableFuture.supplyAsync(
(Supplier<Collection<RowData>>)
() -> {
try {
Thread.sleep(random.nextInt(5));
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return data.get(id);
},
executor)
.thenAcceptAsync(resultFuture::complete, executor);
}
@Override
public void close() throws Exception {
super.close();
if (null != executor && !executor.isShutdown()) {
executor.shutdown();
}
}
}
/**
* The {@link TestingFetcherResultFuture} is a simple implementation of {@link
* TableFunctionCollector} which forwards the collected collection.
*/
public static final
|
TestingFetcherFunction
|
java
|
resilience4j__resilience4j
|
resilience4j-ratelimiter/src/test/java/io/github/resilience4j/ratelimiter/internal/SemaphoreBasedRateLimiterImplTest.java
|
{
"start": 1686,
"end": 12413
}
|
class ____ extends RateLimitersImplementationTest {
private static final int LIMIT = 2;
private static final Duration TIMEOUT = Duration.ofMillis(100);
private static final Duration REFRESH_PERIOD = Duration.ofMillis(100);
private static final String CONFIG_MUST_NOT_BE_NULL = "Config must not be null";
private static final String NAME_MUST_NOT_BE_NULL = "Name must not be null";
private static final Object O = new Object();
@Rule
public ExpectedException exception = ExpectedException.none();
private RateLimiterConfig config;
private static ConditionFactory awaitImpatiently() {
return await()
.pollDelay(1, TimeUnit.MICROSECONDS)
.pollInterval(2, TimeUnit.MILLISECONDS);
}
@Override
protected RateLimiter buildRateLimiter(RateLimiterConfig config) {
return new SemaphoreBasedRateLimiter("test", config, Executors.newScheduledThreadPool(1));
}
@Before
public void init() {
config = RateLimiterConfig.custom()
.timeoutDuration(TIMEOUT)
.limitRefreshPeriod(REFRESH_PERIOD)
.limitForPeriod(LIMIT)
.build();
}
@Test
public void rateLimiterCreationWithProvidedScheduler() throws Exception {
ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class);
RateLimiterConfig configSpy = spy(config);
SemaphoreBasedRateLimiter limit = new SemaphoreBasedRateLimiter("test", configSpy,
scheduledExecutorService);
ArgumentCaptor<Runnable> refreshLimitRunnableCaptor = ArgumentCaptor
.forClass(Runnable.class);
verify(scheduledExecutorService)
.scheduleAtFixedRate(
refreshLimitRunnableCaptor.capture(),
eq(config.getLimitRefreshPeriod().toNanos()),
eq(config.getLimitRefreshPeriod().toNanos()),
eq(TimeUnit.NANOSECONDS)
);
Runnable refreshLimitRunnable = refreshLimitRunnableCaptor.getValue();
then(limit.acquirePermission()).isTrue();
then(limit.acquirePermission()).isTrue();
then(limit.acquirePermission()).isFalse();
Thread.sleep(REFRESH_PERIOD.toMillis() * 2);
verify(configSpy, times(1)).getLimitForPeriod();
refreshLimitRunnable.run();
verify(configSpy, times(2)).getLimitForPeriod();
then(limit.acquirePermission()).isTrue();
then(limit.acquirePermission()).isTrue();
then(limit.acquirePermission()).isFalse();
}
@Test
public void acquirePermissionAndMetrics() throws Exception {
ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class);
RateLimiterConfig configSpy = spy(config);
SemaphoreBasedRateLimiter limit = new SemaphoreBasedRateLimiter("test", configSpy,
scheduledExecutorService);
RateLimiter.Metrics detailedMetrics = limit.getMetrics();
SynchronousQueue<Object> synchronousQueue = new SynchronousQueue<>();
Thread thread = new Thread(() -> run(() -> {
for (int i = 0; i < LIMIT; i++) {
synchronousQueue.put(O);
limit.acquirePermission();
}
limit.acquirePermission();
}));
thread.setDaemon(true);
thread.start();
for (int i = 0; i < LIMIT; i++) {
synchronousQueue.take();
}
awaitImpatiently()
.atMost(100, TimeUnit.MILLISECONDS)
.until(detailedMetrics::getAvailablePermissions, equalTo(0));
awaitImpatiently()
.atMost(2, TimeUnit.SECONDS).until(thread::getState, equalTo(TIMED_WAITING));
then(detailedMetrics.getAvailablePermissions()).isZero();
limit.refreshLimit();
awaitImpatiently()
.atMost(100, TimeUnit.MILLISECONDS)
.until(detailedMetrics::getAvailablePermissions, equalTo(1));
awaitImpatiently()
.atMost(2, TimeUnit.SECONDS).until(thread::getState, equalTo(TERMINATED));
then(detailedMetrics.getAvailablePermissions()).isEqualTo(1);
limit.changeLimitForPeriod(3);
limit.refreshLimit();
then(detailedMetrics.getAvailablePermissions()).isEqualTo(3);
}
@Test
public void changeDefaultTimeoutDuration() {
ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class);
RateLimiter rateLimiter = new SemaphoreBasedRateLimiter("some", config,
scheduledExecutorService);
RateLimiterConfig rateLimiterConfig = rateLimiter.getRateLimiterConfig();
then(rateLimiterConfig.getTimeoutDuration()).isEqualTo(TIMEOUT);
then(rateLimiterConfig.getLimitForPeriod()).isEqualTo(LIMIT);
then(rateLimiterConfig.getLimitRefreshPeriod()).isEqualTo(REFRESH_PERIOD);
rateLimiter.changeTimeoutDuration(Duration.ofSeconds(1));
then(rateLimiterConfig != rateLimiter.getRateLimiterConfig()).isTrue();
rateLimiterConfig = rateLimiter.getRateLimiterConfig();
then(rateLimiterConfig.getTimeoutDuration()).isEqualTo(Duration.ofSeconds(1));
then(rateLimiterConfig.getLimitForPeriod()).isEqualTo(LIMIT);
then(rateLimiterConfig.getLimitRefreshPeriod()).isEqualTo(REFRESH_PERIOD);
}
@Test
public void changeLimitForPeriod() {
ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class);
RateLimiter rateLimiter = new SemaphoreBasedRateLimiter("some", config,
scheduledExecutorService);
RateLimiterConfig rateLimiterConfig = rateLimiter.getRateLimiterConfig();
then(rateLimiterConfig.getTimeoutDuration()).isEqualTo(TIMEOUT);
then(rateLimiterConfig.getLimitForPeriod()).isEqualTo(LIMIT);
then(rateLimiterConfig.getLimitRefreshPeriod()).isEqualTo(REFRESH_PERIOD);
rateLimiter.changeLimitForPeriod(LIMIT * 2);
then(rateLimiterConfig != rateLimiter.getRateLimiterConfig()).isTrue();
rateLimiterConfig = rateLimiter.getRateLimiterConfig();
then(rateLimiterConfig.getTimeoutDuration()).isEqualTo(TIMEOUT);
then(rateLimiterConfig.getLimitForPeriod()).isEqualTo(LIMIT * 2);
then(rateLimiterConfig.getLimitRefreshPeriod()).isEqualTo(REFRESH_PERIOD);
}
@Test
public void acquirePermissionInterruption() {
ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class);
RateLimiterConfig configSpy = spy(config);
SemaphoreBasedRateLimiter limit = new SemaphoreBasedRateLimiter("test", configSpy,
scheduledExecutorService);
assertThat(limit.getName()).isEqualTo("test");
limit.acquirePermission();
limit.acquirePermission();
Thread thread = new Thread(() -> {
limit.acquirePermission();
while (true) {
Function.identity().apply(1);
}
});
thread.setDaemon(true);
thread.start();
awaitImpatiently()
.atMost(2, TimeUnit.SECONDS).until(thread::getState, equalTo(TIMED_WAITING));
thread.interrupt();
awaitImpatiently()
.atMost(2, TimeUnit.SECONDS).until(thread::getState, equalTo(RUNNABLE));
awaitImpatiently()
.atMost(100, TimeUnit.MILLISECONDS).until(thread::isInterrupted);
}
@Test
public void getName() {
ScheduledExecutorService scheduler = mock(ScheduledExecutorService.class);
SemaphoreBasedRateLimiter limit = new SemaphoreBasedRateLimiter("test", config, scheduler);
then(limit.getName()).isEqualTo("test");
}
@Test
public void getMetrics() {
ScheduledExecutorService scheduler = mock(ScheduledExecutorService.class);
SemaphoreBasedRateLimiter limit = new SemaphoreBasedRateLimiter("test", config, scheduler);
RateLimiter.Metrics metrics = limit.getMetrics();
then(metrics.getNumberOfWaitingThreads()).isZero();
}
@Test
public void getRateLimiterConfig() {
ScheduledExecutorService scheduler = mock(ScheduledExecutorService.class);
SemaphoreBasedRateLimiter limit = new SemaphoreBasedRateLimiter("test", config, scheduler);
then(limit.getRateLimiterConfig()).isEqualTo(config);
}
@Test
public void isUpperLimitedForPermissions() {
ScheduledExecutorService scheduler = mock(ScheduledExecutorService.class);
SemaphoreBasedRateLimiter limit = new SemaphoreBasedRateLimiter("test", config, scheduler);
RateLimiter.Metrics metrics = limit.getMetrics();
then(metrics.getAvailablePermissions()).isEqualTo(2);
limit.refreshLimit();
then(metrics.getAvailablePermissions()).isEqualTo(2);
}
@Test
public void getDetailedMetrics() {
ScheduledExecutorService scheduler = mock(ScheduledExecutorService.class);
SemaphoreBasedRateLimiter limit = new SemaphoreBasedRateLimiter("test", config, scheduler);
RateLimiter.Metrics metrics = limit.getMetrics();
then(metrics.getNumberOfWaitingThreads()).isZero();
then(metrics.getAvailablePermissions()).isEqualTo(2);
}
@Test
public void constructionWithNullName() {
exception.expect(NullPointerException.class);
exception.expectMessage(NAME_MUST_NOT_BE_NULL);
new SemaphoreBasedRateLimiter(null, config, (ScheduledExecutorService) null);
}
@Test
public void constructionWithNullConfig() {
exception.expect(NullPointerException.class);
exception.expectMessage(CONFIG_MUST_NOT_BE_NULL);
new SemaphoreBasedRateLimiter("test", null, (ScheduledExecutorService) null);
}
@Test
public void shutdownRateLimiter() throws InterruptedException {
ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class);
RateLimiterConfig configSpy = spy(config);
ScheduledFuture<?> future = mock(ScheduledFuture.class);
doReturn(future).when(scheduledExecutorService).scheduleAtFixedRate(any(Runnable.class), any(Long.class), any(Long.class),
any(TimeUnit.class));
SemaphoreBasedRateLimiter limit = new SemaphoreBasedRateLimiter("test", configSpy,
scheduledExecutorService);
then(limit.acquirePermission(1)).isTrue();
then(limit.acquirePermission(1)).isTrue();
then(limit.acquirePermission(1)).isFalse();
limit.shutdown();
Thread.sleep(REFRESH_PERIOD.toMillis() * 2);
verify(future, times(1)).isCancelled();
then(limit.acquirePermission(1)).isFalse();
}
}
|
SemaphoreBasedRateLimiterImplTest
|
java
|
apache__camel
|
dsl/camel-jbang/camel-jbang-it/src/test/java/org/apache/camel/dsl/jbang/it/CustomJarsITCase.java
|
{
"start": 1018,
"end": 1889
}
|
class ____ extends JBangTestSupport {
@Test
public void testCustomJars() throws IOException {
copyResourceInDataFolder(TestResources.CIRCUIT_BREAKER);
Assertions
.assertThatCode(() -> execute(String.format("run %s/CircuitBreakerRoute.java --dep=camel-timer", mountPoint())))
.as("the application without dependency will cause error")
.hasStackTraceContaining("Failed to create route: circuitBreaker")
.hasStackTraceContaining(
"Cannot find camel-resilience4j or camel-microprofile-fault-tolerance on the classpath.");
executeBackground(String.format("run %s/CircuitBreakerRoute.java --dep=camel-timer,camel-resilience4j", mountPoint()));
checkLogContains("timer called");
checkLogContains("Fallback message", 10);
}
}
|
CustomJarsITCase
|
java
|
quarkusio__quarkus
|
independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/core/parameters/converters/RuntimeResolvedConverter.java
|
{
"start": 396,
"end": 1721
}
|
class ____ implements ParameterConverter {
private volatile ParamConverter<?> runtimeConverter;
private final ParameterConverter quarkusConverter;
public RuntimeResolvedConverter(ParameterConverter quarkusConverter) {
this.quarkusConverter = quarkusConverter;
}
@Override
public Object convert(Object parameter) {
if (runtimeConverter != null)
return runtimeConverter.fromString(parameter != null ? parameter.toString() : null);
return quarkusConverter.convert(parameter);
}
@Override
public void init(ParamConverterProviders deployment, Class<?> rawType, Type genericType, Annotation[] annotations) {
for (ResourceParamConverterProvider i : deployment.getParamConverterProviders()) {
ParamConverterProvider instance = i.getFactory().createInstance().getInstance();
ParamConverter<?> converter = instance.getConverter(rawType, genericType, annotations);
if (converter != null) {
this.runtimeConverter = converter;
break;
}
}
if (runtimeConverter == null && quarkusConverter == null) {
throw new RuntimeException("Unable to create param converter for parameter " + genericType);
}
}
public static
|
RuntimeResolvedConverter
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/util/Loader.java
|
{
"start": 5338,
"end": 5752
}
|
class ____. Hence the
// code below.
LOGGER.trace("Trying to find [{}] using ClassLoader.getSystemResource().", resource);
return ClassLoader.getSystemResource(resource);
}
/**
* This method will search for {@code resource} in different
* places. The search order is as follows:
*
* <ol>
* <li>Search for {@code resource} using the thread context
*
|
loader
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/support/AbstractThreadedActionListener.java
|
{
"start": 762,
"end": 887
}
|
class ____ action listeners that wrap another action listener and dispatch its completion to an executor.
*/
public abstract
|
for
|
java
|
apache__kafka
|
connect/api/src/main/java/org/apache/kafka/connect/health/ConnectorType.java
|
{
"start": 944,
"end": 1296
}
|
enum ____ {
/**
* Identifies a source connector
*/
SOURCE,
/**
* Identifies a sink connector
*/
SINK,
/**
* Identifies a connector whose type could not be inferred
*/
UNKNOWN;
@Override
public String toString() {
return super.toString().toLowerCase(Locale.ROOT);
}
}
|
ConnectorType
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/datastreams/ModifyDataStreamsAction.java
|
{
"start": 1836,
"end": 4057
}
|
class ____ extends AcknowledgedRequest<Request> implements IndicesRequest, ToXContentObject {
// The actual DataStreamAction don't support wildcards, so supporting it doesn't make sense.
// Also supporting wildcards it would prohibit this api from removing broken references to backing indices. (in case of bugs).
// For this case, when removing broken backing indices references that don't exist, we need to allow ignore_unavailable and
// allow_no_indices. Otherwise, the data stream can't be repaired.
private static final IndicesOptions INDICES_OPTIONS = IndicesOptions.fromOptions(
true,
true,
false,
false,
false,
false,
true,
false
);
private final List<DataStreamAction> actions;
public Request(StreamInput in) throws IOException {
super(in);
actions = in.readCollectionAsList(DataStreamAction::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeCollection(actions);
}
public Request(TimeValue masterNodeTimeout, TimeValue ackTimeout, List<DataStreamAction> actions) {
super(masterNodeTimeout, ackTimeout);
this.actions = Collections.unmodifiableList(actions);
}
public List<DataStreamAction> getActions() {
return actions;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.startArray("actions");
for (DataStreamAction action : actions) {
action.toXContent(builder, params);
}
builder.endArray();
builder.endObject();
return builder;
}
@Override
public ActionRequestValidationException validate() {
if (actions.isEmpty()) {
return addValidationError("must specify at least one data stream modification action", null);
}
return null;
}
public
|
Request
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebappAuthentication.java
|
{
"start": 2598,
"end": 9888
}
|
class ____ {
private static MockRM rm;
private static Configuration simpleConf;
private static Configuration kerberosConf;
private static File testRootDir = new File("target",
TestRMWebServicesDelegationTokenAuthentication.class.getName() + "-root");
private static File httpSpnegoKeytabFile = new File(KerberosTestUtils.getKeytabFile());
private static boolean miniKDCStarted = false;
private static MiniKdc testMiniKDC;
static {
simpleConf = new Configuration();
simpleConf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
simpleConf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
ResourceScheduler.class);
simpleConf.setBoolean("mockrm.webapp.enabled", true);
kerberosConf = new Configuration();
kerberosConf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
kerberosConf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
ResourceScheduler.class);
kerberosConf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
kerberosConf.set(
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
kerberosConf.set(YarnConfiguration.RM_KEYTAB,
httpSpnegoKeytabFile.getAbsolutePath());
kerberosConf.setBoolean("mockrm.webapp.enabled", true);
}
public static Collection params() {
return Arrays.asList(new Object[][]{{1, simpleConf},
{2, kerberosConf}});
}
public void initTestRMWebappAuthentication(int run, Configuration conf) {
setupAndStartRM(conf);
}
@BeforeAll
public static void setUp() {
try {
testMiniKDC = new MiniKdc(MiniKdc.createConf(), testRootDir);
setupKDC();
} catch (Exception e) {
assertTrue(false, "Couldn't create MiniKDC");
}
}
@AfterAll
public static void tearDown() {
if (testMiniKDC != null) {
testMiniKDC.stop();
}
}
private static void setupKDC() throws Exception {
if (!miniKDCStarted) {
testMiniKDC.start();
getKdc().createPrincipal(httpSpnegoKeytabFile, "HTTP/localhost",
"client", UserGroupInformation.getLoginUser().getShortUserName());
miniKDCStarted = true;
}
}
private static MiniKdc getKdc() {
return testMiniKDC;
}
private static void setupAndStartRM(Configuration conf) {
UserGroupInformation.setConfiguration(conf);
rm = new MockRM(conf);
}
// ensure that in a non-secure cluster users can access
// the web pages as earlier and submit apps as anonymous
// user or by identifying themselves
@MethodSource("params")
@ParameterizedTest
public void testSimpleAuth(int run, Configuration conf) throws Exception {
initTestRMWebappAuthentication(run, conf);
rm.start();
// ensure users can access web pages
// this should work for secure and non-secure clusters
URL url = new URL("http://localhost:8088/cluster");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
try {
conn.getInputStream();
assertEquals(Status.OK.getStatusCode(), conn.getResponseCode());
} catch (Exception e) {
fail("Fetching url failed");
}
if (UserGroupInformation.isSecurityEnabled()) {
testAnonymousKerberosUser();
} else {
testAnonymousSimpleUser();
}
rm.stop();
}
private void testAnonymousKerberosUser() throws Exception {
ApplicationSubmissionContextInfo app =
new ApplicationSubmissionContextInfo();
String appid = "application_123_0";
app.setApplicationId(appid);
String requestBody =
TestRMWebServicesDelegationTokenAuthentication
.getMarshalledAppInfo(app);
URL url =
new URL("http://localhost:8088/ws/v1/cluster/apps/new-application");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
TestRMWebServicesDelegationTokenAuthentication.setupConn(conn, "POST",
"application/xml", requestBody);
try {
conn.getInputStream();
fail("Anonymous users should not be allowed to get new application ids in secure mode.");
} catch (IOException ie) {
assertEquals(Status.FORBIDDEN.getStatusCode(), conn.getResponseCode());
}
url = new URL("http://localhost:8088/ws/v1/cluster/apps");
conn = (HttpURLConnection) url.openConnection();
TestRMWebServicesDelegationTokenAuthentication.setupConn(conn, "POST",
"application/xml", requestBody);
try {
conn.getInputStream();
fail("Anonymous users should not be allowed to submit apps in secure mode.");
} catch (IOException ie) {
assertEquals(Status.FORBIDDEN.getStatusCode(), conn.getResponseCode());
}
AppState appState = new AppState();
appState.setState("KILLED");
requestBody = toJson(appState, AppState.class);
url = new URL("http://localhost:8088/ws/v1/cluster/apps/application_123_0/state");
conn = (HttpURLConnection) url.openConnection();
TestRMWebServicesDelegationTokenAuthentication.setupConn(conn, "PUT",
"application/json", requestBody);
try {
conn.getInputStream();
fail("Anonymous users should not be allowed to kill apps in secure mode.");
} catch (IOException ie) {
assertEquals(Status.FORBIDDEN.getStatusCode(), conn.getResponseCode());
}
}
private void testAnonymousSimpleUser() throws Exception {
ApplicationSubmissionContextInfo app =
new ApplicationSubmissionContextInfo();
String appid = "application_123_0";
app.setApplicationId(appid);
String requestBody =
TestRMWebServicesDelegationTokenAuthentication
.getMarshalledAppInfo(app);
URL url = new URL("http://localhost:8088/ws/v1/cluster/apps");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
TestRMWebServicesDelegationTokenAuthentication.setupConn(conn, "POST",
"application/xml", requestBody);
conn.getInputStream();
assertEquals(Status.ACCEPTED.getStatusCode(), conn.getResponseCode());
boolean appExists =
rm.getRMContext().getRMApps()
.containsKey(ApplicationId.fromString(appid));
assertTrue(appExists);
RMApp actualApp =
rm.getRMContext().getRMApps()
.get(ApplicationId.fromString(appid));
String owner = actualApp.getUser();
assertEquals(
rm.getConfig().get(CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER,
CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER), owner);
appid = "application_123_1";
app.setApplicationId(appid);
requestBody =
TestRMWebServicesDelegationTokenAuthentication
.getMarshalledAppInfo(app);
url = new URL("http://localhost:8088/ws/v1/cluster/apps?user.name=client");
conn = (HttpURLConnection) url.openConnection();
TestRMWebServicesDelegationTokenAuthentication.setupConn(conn, "POST",
MediaType.APPLICATION_XML, requestBody);
conn.getInputStream();
appExists =
rm.getRMContext().getRMApps()
.containsKey(ApplicationId.fromString(appid));
assertTrue(appExists);
actualApp =
rm.getRMContext().getRMApps()
.get(ApplicationId.fromString(appid));
owner = actualApp.getUser();
assertEquals("client", owner);
}
}
|
TestRMWebappAuthentication
|
java
|
hibernate__hibernate-orm
|
hibernate-testing/src/main/java/org/hibernate/testing/orm/junit/DialectFeatureChecks.java
|
{
"start": 44096,
"end": 44283
}
|
class ____ implements DialectFeatureCheck {
@Override
public boolean apply(Dialect dialect) {
return dialect.supportsSubqueryInSelect();
}
}
public static
|
SupportsSubqueryInSelect
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/util/EnvironmentInformationTest.java
|
{
"start": 1105,
"end": 6216
}
|
class ____ {
private final Logger log = LoggerFactory.getLogger(getClass());
@Test
public void testJavaMemory() {
try {
long fullHeap = EnvironmentInformation.getMaxJvmHeapMemory();
long freeWithGC = EnvironmentInformation.getSizeOfFreeHeapMemoryWithDefrag();
assertThat(fullHeap).isGreaterThan(0);
assertThat(freeWithGC).isGreaterThanOrEqualTo(0);
try {
long free = EnvironmentInformation.getSizeOfFreeHeapMemory();
assertThat(free).isGreaterThanOrEqualTo(0);
} catch (RuntimeException e) {
// this may only occur if the Xmx is not set
assertThat(EnvironmentInformation.getMaxJvmHeapMemory()).isEqualTo(Long.MAX_VALUE);
}
// we cannot make these assumptions, because the test JVM may grow / shrink during the
// GC
// assertTrue(free <= fullHeap);
// assertTrue(freeWithGC <= fullHeap);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
@Test
public void testEnvironmentMethods() {
try {
assertThat(EnvironmentInformation.getJvmStartupOptions()).isNotNull();
assertThat(EnvironmentInformation.getJvmStartupOptionsArray()).isNotNull();
assertThat(EnvironmentInformation.getJvmVersion()).isNotNull();
assertThat(EnvironmentInformation.getRevisionInformation()).isNotNull();
assertThat(EnvironmentInformation.getVersion()).isNotNull();
assertThat(EnvironmentInformation.getScalaVersion()).isNotNull();
assertThat(EnvironmentInformation.getBuildTime()).isNotNull();
assertThat(EnvironmentInformation.getBuildTimeString()).isNotNull();
assertThat(EnvironmentInformation.getGitCommitId()).isNotNull();
assertThat(EnvironmentInformation.getGitCommitIdAbbrev()).isNotNull();
assertThat(EnvironmentInformation.getGitCommitTime()).isNotNull();
assertThat(EnvironmentInformation.getGitCommitTimeString()).isNotNull();
assertThat(EnvironmentInformation.getHadoopVersionString()).isNotNull();
assertThat(EnvironmentInformation.getHadoopUser()).isNotNull();
assertThat(EnvironmentInformation.getOpenFileHandlesLimit()).isGreaterThanOrEqualTo(-1);
if (log.isInfoEnabled()) {
// Visual inspection of the available Environment variables
// To actually see it set "rootLogger.level = INFO" in "log4j2-test.properties"
log.info(
"JvmStartupOptions : {}",
EnvironmentInformation.getJvmStartupOptions());
log.info(
"JvmStartupOptionsArray : {}",
Arrays.asList(EnvironmentInformation.getJvmStartupOptionsArray()));
log.info("JvmVersion : {}", EnvironmentInformation.getJvmVersion());
log.info(
"RevisionInformation : {}",
EnvironmentInformation.getRevisionInformation());
log.info("Version : {}", EnvironmentInformation.getVersion());
log.info("ScalaVersion : {}", EnvironmentInformation.getScalaVersion());
log.info("BuildTime : {}", EnvironmentInformation.getBuildTime());
log.info(
"BuildTimeString : {}", EnvironmentInformation.getBuildTimeString());
log.info("GitCommitId : {}", EnvironmentInformation.getGitCommitId());
log.info(
"GitCommitIdAbbrev : {}",
EnvironmentInformation.getGitCommitIdAbbrev());
log.info("GitCommitTime : {}", EnvironmentInformation.getGitCommitTime());
log.info(
"GitCommitTimeString : {}",
EnvironmentInformation.getGitCommitTimeString());
log.info(
"HadoopVersionString : {}",
EnvironmentInformation.getHadoopVersionString());
log.info("HadoopUser : {}", EnvironmentInformation.getHadoopUser());
log.info(
"OpenFileHandlesLimit : {}",
EnvironmentInformation.getOpenFileHandlesLimit());
}
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
@Test
public void testLogEnvironmentInformation() {
try {
Logger mockLogger = Mockito.mock(Logger.class);
EnvironmentInformation.logEnvironmentInfo(mockLogger, "test", new String[0]);
EnvironmentInformation.logEnvironmentInfo(mockLogger, "test", null);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
}
|
EnvironmentInformationTest
|
java
|
resilience4j__resilience4j
|
resilience4j-spring-boot2/src/test/java/io/github/resilience4j/ratelimiter/RateLimiterAutoConfigurationTest.java
|
{
"start": 2519,
"end": 10020
}
|
class ____ {
@Rule
public WireMockRule wireMockRule = new WireMockRule(8090);
@Autowired
private RateLimiterRegistry rateLimiterRegistry;
@Autowired
private RateLimiterProperties rateLimiterProperties;
@Autowired
private RateLimiterAspect rateLimiterAspect;
@Autowired
private DummyService dummyService;
@Autowired
private TestRestTemplate restTemplate;
@Autowired
private RateLimiterDummyFeignClient rateLimiterDummyFeignClient;
@Autowired
private EventConsumerRegistry<RateLimiterEvent> eventConsumerRegistry;
/**
* This test verifies that the combination of @FeignClient and @RateLimiter annotation works as
* same as @Bulkhead alone works with any normal service class
*/
@Test
@Ignore
public void testFeignClient() {
WireMock.stubFor(WireMock
.get(WireMock.urlEqualTo("/limit/"))
.willReturn(WireMock.aResponse().withStatus(200).withBody("This is successful call"))
);
WireMock.stubFor(WireMock.get(WireMock.urlMatching("^.*\\/limit\\/error.*$"))
.willReturn(WireMock.aResponse().withStatus(400).withBody("This is error")));
assertThat(rateLimiterRegistry).isNotNull();
assertThat(rateLimiterProperties).isNotNull();
RateLimiter rateLimiter = rateLimiterRegistry.rateLimiter(RATE_LIMITER_FEIGN_CLIENT_NAME);
assertThat(rateLimiter).isNotNull();
rateLimiter.acquirePermission();
await()
.atMost(2, TimeUnit.SECONDS)
.until(() -> rateLimiter.getMetrics().getAvailablePermissions() == 10);
try {
rateLimiterDummyFeignClient.doSomething("error");
} catch (Exception ex) {
// Do nothing.
}
rateLimiterDummyFeignClient.doSomething(EMPTY);
assertThat(rateLimiter.getMetrics().getAvailablePermissions()).isEqualTo(8);
assertThat(rateLimiter.getMetrics().getNumberOfWaitingThreads()).isZero();
assertThat(rateLimiter.getRateLimiterConfig().getLimitForPeriod()).isEqualTo(10);
assertThat(rateLimiter.getRateLimiterConfig().getLimitRefreshPeriod())
.isEqualTo(Duration.ofSeconds(1));
assertThat(rateLimiter.getRateLimiterConfig().getTimeoutDuration())
.isEqualTo(Duration.ofSeconds(0));
// Test Actuator endpoints
ResponseEntity<RateLimiterEndpointResponse> rateLimiterList = restTemplate
.getForEntity("/actuator/ratelimiters", RateLimiterEndpointResponse.class);
assertThat(rateLimiterList.getBody().getRateLimiters()).hasSize(4)
.containsExactly("backendA", "backendB", "backendCustomizer",
"rateLimiterDummyFeignClient");
try {
for (int i = 0; i < 11; i++) {
rateLimiterDummyFeignClient.doSomething(EMPTY);
}
} catch (RequestNotPermitted e) {
// Do nothing
}
ResponseEntity<RateLimiterEventsEndpointResponse> rateLimiterEventList = restTemplate
.getForEntity("/actuator/ratelimiterevents", RateLimiterEventsEndpointResponse.class);
List<RateLimiterEventDTO> eventsList = rateLimiterEventList.getBody()
.getRateLimiterEvents();
assertThat(eventsList).isNotEmpty();
RateLimiterEventDTO lastEvent = eventsList.get(eventsList.size() - 1);
assertThat(lastEvent.getType()).isEqualTo(RateLimiterEvent.Type.FAILED_ACQUIRE);
await()
.atMost(2, TimeUnit.SECONDS)
.until(() -> rateLimiter.getMetrics().getAvailablePermissions() == 10);
assertThat(rateLimiterAspect.getOrder()).isEqualTo(401);
}
/**
* The test verifies that a RateLimiter instance is created and configured properly when the
* DummyService is invoked and that the RateLimiter records successful and failed calls.
*/
@Test
public void testRateLimiterAutoConfiguration() throws IOException {
assertThat(rateLimiterRegistry).isNotNull();
assertThat(rateLimiterProperties).isNotNull();
RateLimiter rateLimiter = rateLimiterRegistry.rateLimiter(DummyService.BACKEND);
assertThat(rateLimiter).isNotNull();
rateLimiter.acquirePermission();
await()
.atMost(2, TimeUnit.SECONDS)
.until(() -> rateLimiter.getMetrics().getAvailablePermissions() == 10);
try {
dummyService.doSomething(true);
} catch (IOException ex) {
// Do nothing.
}
dummyService.doSomething(false);
assertThat(rateLimiter.getMetrics().getAvailablePermissions()).isEqualTo(8);
assertThat(rateLimiter.getMetrics().getNumberOfWaitingThreads()).isZero();
assertThat(rateLimiter.getRateLimiterConfig().getLimitForPeriod()).isEqualTo(10);
assertThat(rateLimiter.getRateLimiterConfig().getLimitRefreshPeriod())
.isEqualTo(Duration.ofSeconds(1));
assertThat(rateLimiter.getRateLimiterConfig().getTimeoutDuration())
.isEqualTo(Duration.ofSeconds(0));
// Test Actuator endpoints
ResponseEntity<RateLimiterEndpointResponse> rateLimiterList = restTemplate
.getForEntity("/actuator/ratelimiters", RateLimiterEndpointResponse.class);
assertThat(rateLimiterList.getBody().getRateLimiters()).hasSize(4)
.containsExactly("backendA", "backendB", "backendCustomizer",
"rateLimiterDummyFeignClient");
try {
for (int i = 0; i < 11; i++) {
dummyService.doSomething(false);
}
} catch (RequestNotPermitted e) {
// Do nothing
}
ResponseEntity<RateLimiterEventsEndpointResponse> rateLimiterEventList = restTemplate
.getForEntity("/actuator/ratelimiterevents", RateLimiterEventsEndpointResponse.class);
List<RateLimiterEventDTO> eventsList = rateLimiterEventList.getBody()
.getRateLimiterEvents();
assertThat(eventsList).isNotEmpty();
RateLimiterEventDTO lastEvent = eventsList.get(eventsList.size() - 1);
assertThat(lastEvent.getType()).isEqualTo(RateLimiterEvent.Type.FAILED_ACQUIRE);
await()
.atMost(2, TimeUnit.SECONDS)
.until(() -> rateLimiter.getMetrics().getAvailablePermissions() == 10);
assertThat(rateLimiterAspect.getOrder()).isEqualTo(401);
// test the customizer impact for specific instance
RateLimiter backendCustomizer = rateLimiterRegistry.rateLimiter("backendCustomizer");
assertThat(backendCustomizer.getRateLimiterConfig().getLimitForPeriod()).isEqualTo(200);
}
@Test
public void testPermitsInRateLimiterAnnotation() {
RateLimiter rateLimiter = rateLimiterRegistry.rateLimiter(DummyService.BACKEND);
await()
.atMost(2, TimeUnit.SECONDS)
.until(() -> rateLimiter.getMetrics().getAvailablePermissions() == 10);
dummyService.doSomethingExpensive();
assertThat(rateLimiter.getMetrics().getAvailablePermissions()).isEqualTo(0);
assertThat(eventConsumerRegistry.getEventConsumer(DummyService.BACKEND).getBufferedEvents()).last()
.satisfies(event -> {
assertThat(event.getEventType()).isEqualTo(RateLimiterEvent.Type.SUCCESSFUL_ACQUIRE);
assertThat(event.getNumberOfPermits()).isEqualTo(10);
});
}
}
|
RateLimiterAutoConfigurationTest
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SubQueryDecorrelator.java
|
{
"start": 62410,
"end": 64307
}
|
class ____ {
// the new rel
final RelNode r;
// the condition contains correlation variables
final RexNode c;
// map the oldRel's field indices to newRel's field indices
final com.google.common.collect.ImmutableSortedMap<Integer, Integer> oldToNewOutputs;
Frame(
RelNode oldRel,
RelNode newRel,
RexNode corCondition,
Map<Integer, Integer> oldToNewOutputs) {
this.r = Preconditions.checkNotNull(newRel);
this.c = corCondition;
this.oldToNewOutputs =
com.google.common.collect.ImmutableSortedMap.copyOf(oldToNewOutputs);
assert allLessThan(
this.oldToNewOutputs.keySet(),
oldRel.getRowType().getFieldCount(),
Litmus.THROW);
assert allLessThan(
this.oldToNewOutputs.values(), r.getRowType().getFieldCount(), Litmus.THROW);
}
List<Integer> getCorInputRefIndices() {
final List<Integer> inputRefIndices;
if (c != null) {
inputRefIndices = RelOptUtil.InputFinder.bits(c).toList();
} else {
inputRefIndices = new ArrayList<>();
}
return inputRefIndices;
}
private static boolean allLessThan(Collection<Integer> integers, int limit, Litmus ret) {
for (int value : integers) {
if (value >= limit) {
return ret.fail("out of range; value: {}, limit: {}", value, limit);
}
}
return ret.succeed();
}
}
/**
* Result describing the relational expression after decorrelation and where to find the
* equivalent non-correlated expressions and correlated conditions.
*/
public static
|
Frame
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/cid/TvProgram.java
|
{
"start": 1036,
"end": 1236
}
|
class ____ {
@EmbeddedId
public TvMagazinPk id;
@Temporal( TemporalType.TIME )
@Column(name="`time`")
Date time;
@Column( name = "TXT", table = "TV_PROGRAM_EXT" )
public String text;
}
|
TvProgram
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/refaster/testdata/output/TryMultiCatchTemplateExample.java
|
{
"start": 773,
"end": 1044
}
|
class ____ {
public void foo() {
String str = null;
try {
str = String.class.newInstance();
} catch (ReflectiveOperationException | SecurityException tolerated) {
tolerated.printStackTrace();
}
System.out.println(str);
}
}
|
TryMultiCatchTemplateExample
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/manytomany/defaults/ManyToManyImplicitNamingTest.java
|
{
"start": 1903,
"end": 8729
}
|
class ____
implements SettingProvider.Provider<ImplicitNamingStrategy> {
@Override
public ImplicitNamingStrategy getSetting() {
return ImplicitNamingStrategyLegacyJpaImpl.INSTANCE;
}
}
@Test
public void testBidirNoOverrides(SessionFactoryScope scope) {
// Employee.contactInfo.phoneNumbers: associated entity: PhoneNumber
// both have @Entity with no name configured and default primary table names;
// Primary table names default to unqualified entity classes.
// PK column for Employee.id: id (default)
// PK column for PhoneNumber.phNumber: phNumber (default)
// bidirectional association
checkDefaultJoinTablAndJoinColumnNames(
Employee.class,
"contactInfo.phoneNumbers",
"employees",
"Employee_PhoneNumber",
"employees_id",
"phoneNumbers_phNumber",
scope
);
}
@Test
public void testBidirOwnerPKOverride(SessionFactoryScope scope) {
// Store.customers; associated entity: KnownClient
// both have @Entity with no name configured and default primary table names
// Primary table names default to unqualified entity classes.
// PK column for Store.id: sId
// PK column for KnownClient.id: id (default)
// bidirectional association
checkDefaultJoinTablAndJoinColumnNames(
Store.class,
"customers",
"stores",
"Store_KnownClient",
"stores_sId",
"customers_id",
scope
);
}
@Test
public void testUnidirOwnerPKAssocEntityNamePKOverride(SessionFactoryScope scope) {
// Store.items; associated entity: Item
// Store has @Entity with no name configured and no @Table
// Item has @Entity(name="ITEM") and no @Table
// PK column for Store.id: sId
// PK column for Item: iId
// unidirectional
checkDefaultJoinTablAndJoinColumnNames(
Store.class,
"items",
null,
"Store_ITEM",
"Store_sId",
"items_iId",
scope
);
}
@Test
public void testUnidirOwnerPKAssocPrimaryTableNameOverride(SessionFactoryScope scope) {
// Store.implantedIn; associated entity: City
// Store has @Entity with no name configured and no @Table
// City has @Entity with no name configured and @Table(name = "tbl_city")
// PK column for Store.id: sId
// PK column for City.id: id (default)
// unidirectional
checkDefaultJoinTablAndJoinColumnNames(
Store.class,
"implantedIn",
null,
"Store_tbl_city",
"Store_sId",
"implantedIn_id",
scope
);
}
@Test
public void testUnidirOwnerPKAssocEntityNamePrimaryTableOverride(SessionFactoryScope scope) {
// Store.categories; associated entity: Category
// Store has @Entity with no name configured and no @Table
// Category has @Entity(name="CATEGORY") @Table(name="CATEGORY_TAB")
// PK column for Store.id: sId
// PK column for Category.id: id (default)
// unidirectional
checkDefaultJoinTablAndJoinColumnNames(
Store.class,
"categories",
null,
"Store_CATEGORY_TAB",
"Store_sId",
"categories_id",
scope
);
}
@Test
public void testUnidirOwnerEntityNamePKAssocPrimaryTableOverride(SessionFactoryScope scope) {
// Item.producedInCities: associated entity: City
// Item has @Entity(name="ITEM") and no @Table
// City has @Entity with no name configured and @Table(name = "tbl_city")
// PK column for Item: iId
// PK column for City.id: id (default)
// unidirectional
checkDefaultJoinTablAndJoinColumnNames(
Item.class,
"producedInCities",
null,
"ITEM_tbl_city",
"ITEM_iId",
"producedInCities_id",
scope
);
}
@Test
@JiraKey(value = "HHH-9390")
public void testUnidirOwnerEntityNamePrimaryTableOverride(SessionFactoryScope scope) {
// Category.clients: associated entity: KnownClient
// Category has @Entity(name="CATEGORY") @Table(name="CATEGORY_TAB")
// KnownClient has @Entity with no name configured and no @Table
// PK column for Category.id: id (default)
// PK column for KnownClient.id: id (default)
// unidirectional
// legacy behavior would use the table name in the generated join column.
checkDefaultJoinTablAndJoinColumnNames(
Category.class,
"clients",
null,
"CATEGORY_TAB_KnownClient",
"CATEGORY_TAB_id",
"clients_id",
scope
);
}
protected void checkDefaultJoinTablAndJoinColumnNames(
Class<?> ownerEntityClass,
String ownerCollectionPropertyName,
String inverseCollectionPropertyName,
String expectedCollectionTableName,
String ownerForeignKeyNameExpected,
String inverseForeignKeyNameExpected,
SessionFactoryScope scope) {
MetadataImplementor metadata = scope.getMetadataImplementor();
final org.hibernate.mapping.Collection collection = metadata.getCollectionBinding(
ownerEntityClass.getName() + '.' + ownerCollectionPropertyName );
final org.hibernate.mapping.Table table = collection.getCollectionTable();
assertThat( table.getName() ).isEqualTo( expectedCollectionTableName );
final org.hibernate.mapping.Collection ownerCollection = metadata.getCollectionBinding(
ownerEntityClass.getName() + '.' + ownerCollectionPropertyName
);
// The default owner and inverse join columns can only be computed if they have PK with 1 column.
assertThat( ownerCollection.getOwner().getKey().getColumnSpan() ).isEqualTo( 1 );
assertThat( ownerCollection.getKey().getColumns().get( 0 ).getText() ).isEqualTo( ownerForeignKeyNameExpected );
final EntityType associatedEntityType = (EntityType) ownerCollection.getElement().getType();
final PersistentClass associatedPersistentClass =
metadata.getEntityBinding( associatedEntityType.getAssociatedEntityName() );
assertThat( associatedPersistentClass.getKey().getColumnSpan() ).isEqualTo( 1 );
if ( inverseCollectionPropertyName != null ) {
final org.hibernate.mapping.Collection inverseCollection = metadata.getCollectionBinding(
associatedPersistentClass.getEntityName() + '.' + inverseCollectionPropertyName
);
assertThat( inverseCollection.getKey().getSelectables().get( 0 ).getText() )
.isEqualTo( inverseForeignKeyNameExpected );
}
boolean hasOwnerFK = false;
boolean hasInverseFK = false;
for ( final ForeignKey fk : ownerCollection.getCollectionTable().getForeignKeyCollection() ) {
assertThat( fk.getTable() ).isSameAs( ownerCollection.getCollectionTable() );
if ( fk.getColumnSpan() > 1 ) {
continue;
}
if ( fk.getColumn( 0 ).getText().equals( ownerForeignKeyNameExpected ) ) {
assertThat( fk.getReferencedTable() ).isSameAs( ownerCollection.getOwner().getTable() );
hasOwnerFK = true;
}
else if ( fk.getColumn( 0 ).getText().equals( inverseForeignKeyNameExpected ) ) {
assertThat( fk.getReferencedTable() ).isSameAs( associatedPersistentClass.getTable() );
hasInverseFK = true;
}
}
assertThat( hasOwnerFK ).isTrue();
assertThat( hasInverseFK ).isTrue();
}
}
|
ImplicitNamingStrategyProvider
|
java
|
apache__camel
|
components/camel-bindy/src/main/java/org/apache/camel/dataformat/bindy/BindyAbstractFactory.java
|
{
"start": 1578,
"end": 4078
}
|
class ____ implements BindyFactory {
private static final Logger LOG = LoggerFactory.getLogger(BindyAbstractFactory.class);
protected final Map<String, List<Field>> annotatedLinkFields = new LinkedHashMap<>();
protected FormatFactory formatFactory;
protected Set<Class<?>> models;
protected Set<String> modelClassNames;
protected String crlf;
protected String eol;
private String locale;
private Class<?> type;
protected BindyAbstractFactory(Class<?> type) throws Exception {
this.type = type;
if (LOG.isDebugEnabled()) {
LOG.debug("Class name: {}", type.getName());
}
initModel();
}
/**
* method uses to initialize the model representing the classes who will bind the data. This process will scan for
* classes according to the package name provided, check the annotated classes and fields.
*
* @throws Exception
*/
@Override
public void initModel() throws Exception {
models = new HashSet<>();
modelClassNames = new HashSet<>();
loadModels(type);
}
/**
* Recursively load model.
*
* @param root
*/
@SuppressWarnings("rawtypes")
private void loadModels(Class<?> root) {
models.add(root);
modelClassNames.add(root.getName());
for (Field field : root.getDeclaredFields()) {
Link linkField = field.getAnnotation(Link.class);
if (linkField != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Class linked: {}, Field: {}", field.getType(), field);
}
models.add(field.getType());
modelClassNames.add(field.getType().getName());
loadModels(field.getType());
}
OneToMany oneToManyField = field.getAnnotation(OneToMany.class);
if (oneToManyField != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Class (OneToMany) linked: {}, Field: {}", field.getType(), field);
}
Type listType = field.getGenericType();
Type type = ((ParameterizedType) listType).getActualTypeArguments()[0];
Class clazz = (Class<?>) type;
models.add(clazz);
modelClassNames.add(clazz.getName());
loadModels(clazz);
}
}
}
/**
* Find fields annotated in each
|
BindyAbstractFactory
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/schemaupdate/LocalDateTimeAndEnumSchemaUpdateTest.java
|
{
"start": 3470,
"end": 4118
}
|
class ____ {
@Id
private Long id;
private String name;
private LocalDateTime birthday;
@Enumerated(EnumType.STRING)
private Gender gender;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public LocalDateTime getBirthday() {
return birthday;
}
public void setBirthday(LocalDateTime birthday) {
this.birthday = birthday;
}
public Gender getGender() {
return gender;
}
public void setGender(Gender gender) {
this.gender = gender;
}
}
public
|
User
|
java
|
spring-projects__spring-framework
|
spring-expression/src/test/java/org/springframework/expression/spel/SelectionAndProjectionTests.java
|
{
"start": 14104,
"end": 14394
}
|
class ____ implements Iterable<Integer> {
private final List<Integer> list = new ArrayList<>();
Counter(int size) {
IntStream.rangeClosed(1, size).forEach(this.list::add);
}
@Override
public Iterator<Integer> iterator() {
return this.list.iterator();
}
}
static
|
Counter
|
java
|
elastic__elasticsearch
|
x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene54/Lucene54DocValuesProducer.java
|
{
"start": 65574,
"end": 73802
}
|
class ____ extends BaseTermsEnum {
private long currentOrd = -1;
// offset to the start of the current block
private long currentBlockStart;
private final IndexInput input;
// delta from currentBlockStart to start of each term
private final int offsets[] = new int[Lucene54DocValuesFormat.INTERVAL_COUNT];
private final byte buffer[] = new byte[2 * Lucene54DocValuesFormat.INTERVAL_COUNT - 1];
private final BytesRef term = new BytesRef(maxTermLength);
private final BytesRef firstTerm = new BytesRef(maxTermLength);
private final BytesRef scratch = new BytesRef();
CompressedBinaryTermsEnum(IndexInput input) throws IOException {
this.input = input;
input.seek(0);
}
private void readHeader() throws IOException {
firstTerm.length = input.readVInt();
input.readBytes(firstTerm.bytes, 0, firstTerm.length);
input.readBytes(buffer, 0, Lucene54DocValuesFormat.INTERVAL_COUNT - 1);
if (buffer[0] == -1) {
readShortAddresses();
} else {
readByteAddresses();
}
currentBlockStart = input.getFilePointer();
}
// read single byte addresses: each is delta - 2
// (shared prefix byte and length > 0 are both implicit)
private void readByteAddresses() throws IOException {
int addr = 0;
for (int i = 1; i < offsets.length; i++) {
addr += 2 + (buffer[i - 1] & 0xFF);
offsets[i] = addr;
}
}
// read double byte addresses: each is delta - 2
// (shared prefix byte and length > 0 are both implicit)
private void readShortAddresses() throws IOException {
input.readBytes(buffer, Lucene54DocValuesFormat.INTERVAL_COUNT - 1, Lucene54DocValuesFormat.INTERVAL_COUNT);
int addr = 0;
for (int i = 1; i < offsets.length; i++) {
int x = i << 1;
addr += 2 + ((buffer[x - 1] << 8) | (buffer[x] & 0xFF));
offsets[i] = addr;
}
}
// set term to the first term
private void readFirstTerm() throws IOException {
term.length = firstTerm.length;
System.arraycopy(firstTerm.bytes, firstTerm.offset, term.bytes, 0, term.length);
}
// read term at offset, delta encoded from first term
private void readTerm(int offset) throws IOException {
int start = input.readByte() & 0xFF;
System.arraycopy(firstTerm.bytes, firstTerm.offset, term.bytes, 0, start);
int suffix = offsets[offset] - offsets[offset - 1] - 1;
input.readBytes(term.bytes, start, suffix);
term.length = start + suffix;
}
@Override
public BytesRef next() throws IOException {
currentOrd++;
if (currentOrd >= numValues) {
return null;
} else {
int offset = (int) (currentOrd & Lucene54DocValuesFormat.INTERVAL_MASK);
if (offset == 0) {
// switch to next block
readHeader();
readFirstTerm();
} else {
readTerm(offset);
}
return term;
}
}
// binary search reverse index to find smaller
// range of blocks to search
long binarySearchIndex(BytesRef text) throws IOException {
long low = 0;
long high = numReverseIndexValues - 1;
while (low <= high) {
long mid = (low + high) >>> 1;
reverseTerms.fill(scratch, reverseAddresses.get(mid));
int cmp = scratch.compareTo(text);
if (cmp < 0) {
low = mid + 1;
} else if (cmp > 0) {
high = mid - 1;
} else {
return mid;
}
}
return high;
}
// binary search against first term in block range
// to find term's block
long binarySearchBlock(BytesRef text, long low, long high) throws IOException {
while (low <= high) {
long mid = (low + high) >>> 1;
input.seek(addresses.get(mid));
term.length = input.readVInt();
input.readBytes(term.bytes, 0, term.length);
int cmp = term.compareTo(text);
if (cmp < 0) {
low = mid + 1;
} else if (cmp > 0) {
high = mid - 1;
} else {
return mid;
}
}
return high;
}
@Override
public SeekStatus seekCeil(BytesRef text) throws IOException {
// locate block: narrow to block range with index, then search blocks
final long block;
long indexPos = binarySearchIndex(text);
if (indexPos < 0) {
block = 0;
} else {
long low = indexPos << Lucene54DocValuesFormat.BLOCK_INTERVAL_SHIFT;
long high = Math.min(numIndexValues - 1, low + Lucene54DocValuesFormat.BLOCK_INTERVAL_MASK);
block = Math.max(low, binarySearchBlock(text, low, high));
}
// position before block, then scan to term.
input.seek(addresses.get(block));
currentOrd = (block << Lucene54DocValuesFormat.INTERVAL_SHIFT) - 1;
while (next() != null) {
int cmp = term.compareTo(text);
if (cmp == 0) {
return SeekStatus.FOUND;
} else if (cmp > 0) {
return SeekStatus.NOT_FOUND;
}
}
return SeekStatus.END;
}
@Override
public void seekExact(long ord) throws IOException {
long block = ord >>> Lucene54DocValuesFormat.INTERVAL_SHIFT;
if (block != currentOrd >>> Lucene54DocValuesFormat.INTERVAL_SHIFT) {
// switch to different block
input.seek(addresses.get(block));
readHeader();
}
currentOrd = ord;
int offset = (int) (ord & Lucene54DocValuesFormat.INTERVAL_MASK);
if (offset == 0) {
readFirstTerm();
} else {
input.seek(currentBlockStart + offsets[offset - 1]);
readTerm(offset);
}
}
@Override
public BytesRef term() throws IOException {
return term;
}
@Override
public long ord() throws IOException {
return currentOrd;
}
@Override
public int docFreq() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long totalTermFreq() throws IOException {
return -1;
}
@Override
public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public ImpactsEnum impacts(int flags) throws IOException {
throw new UnsupportedOperationException();
}
}
}
}
|
CompressedBinaryTermsEnum
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilege.java
|
{
"start": 819,
"end": 1765
}
|
interface ____ extends NamedWriteable, ToXContentFragment, ClusterPrivilege {
/**
* The category under which this privilege should be rendered when output as XContent.
*/
Category getCategory();
/**
* A {@link ConfigurableClusterPrivilege} should generate a fragment of {@code XContent}, which consists of
* a single field name, followed by its value (which may be an object, an array, or a simple value).
*/
@Override
XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException;
/**
* Categories exist for to segment privileges for the purposes of rendering to XContent.
* {@link ConfigurableClusterPrivileges#toXContent(XContentBuilder, Params, Collection)} builds one XContent
* object for a collection of {@link ConfigurableClusterPrivilege} instances, with the top level fields built
* from the categories.
*/
|
ConfigurableClusterPrivilege
|
java
|
quarkusio__quarkus
|
extensions/panache/hibernate-orm-rest-data-panache/deployment/src/main/java/io/quarkus/hibernate/orm/rest/data/panache/deployment/HibernateORMResourceMethodListenerImplementor.java
|
{
"start": 331,
"end": 1108
}
|
class ____ extends ResourceMethodListenerImplementor {
public HibernateORMResourceMethodListenerImplementor(ClassCreator cc, List<ClassInfo> resourceMethodListeners) {
super(cc, resourceMethodListeners);
}
public void onAfterAdd(BytecodeCreator methodCreator, ResultHandle entity) {
invokeMethodUsingEntity(ON_AFTER_ADD_METHOD_NAME, methodCreator, entity);
}
public void onAfterUpdate(BytecodeCreator methodCreator, ResultHandle entity) {
invokeMethodUsingEntity(ON_AFTER_UPDATE_METHOD_NAME, methodCreator, entity);
}
public void onAfterDelete(BytecodeCreator methodCreator, ResultHandle id) {
invokeMethodUsingId(ON_AFTER_DELETE_METHOD_NAME, methodCreator, id);
}
}
|
HibernateORMResourceMethodListenerImplementor
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/type/EnumOrdinalTypeHandlerTest.java
|
{
"start": 988,
"end": 2893
}
|
enum ____ {
ONE, TWO
}
private static final TypeHandler<MyEnum> TYPE_HANDLER = new EnumOrdinalTypeHandler<>(MyEnum.class);
@Override
@Test
public void shouldSetParameter() throws Exception {
TYPE_HANDLER.setParameter(ps, 1, MyEnum.ONE, null);
verify(ps).setInt(1, 0);
}
@Test
void shouldSetNullParameter() throws Exception {
TYPE_HANDLER.setParameter(ps, 1, null, JdbcType.VARCHAR);
verify(ps).setNull(1, JdbcType.VARCHAR.TYPE_CODE);
}
@Override
@Test
public void shouldGetResultFromResultSetByName() throws Exception {
when(rs.getInt("column")).thenReturn(0);
when(rs.wasNull()).thenReturn(false);
assertEquals(MyEnum.ONE, TYPE_HANDLER.getResult(rs, "column"));
}
@Override
@Test
public void shouldGetResultNullFromResultSetByName() throws Exception {
when(rs.getInt("column")).thenReturn(0);
when(rs.wasNull()).thenReturn(true);
assertNull(TYPE_HANDLER.getResult(rs, "column"));
}
@Override
@Test
public void shouldGetResultFromResultSetByPosition() throws Exception {
when(rs.getInt(1)).thenReturn(0);
when(rs.wasNull()).thenReturn(false);
assertEquals(MyEnum.ONE, TYPE_HANDLER.getResult(rs, 1));
}
@Override
@Test
public void shouldGetResultNullFromResultSetByPosition() throws Exception {
when(rs.getInt(1)).thenReturn(0);
when(rs.wasNull()).thenReturn(true);
assertNull(TYPE_HANDLER.getResult(rs, 1));
}
@Override
@Test
public void shouldGetResultFromCallableStatement() throws Exception {
when(cs.getInt(1)).thenReturn(0);
when(cs.wasNull()).thenReturn(false);
assertEquals(MyEnum.ONE, TYPE_HANDLER.getResult(cs, 1));
}
@Override
@Test
public void shouldGetResultNullFromCallableStatement() throws Exception {
when(cs.getInt(1)).thenReturn(0);
when(cs.wasNull()).thenReturn(true);
assertNull(TYPE_HANDLER.getResult(cs, 1));
}
}
|
MyEnum
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.