language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__spark
|
sql/core/src/test/java/test/org/apache/spark/sql/connector/JavaOrderAndPartitionAwareDataSource.java
|
{
"start": 3601,
"end": 4106
}
|
class ____ implements SortOrder {
private final Expression expression;
MySortOrder(String columnName) {
this.expression = new MyIdentityTransform(new MyNamedReference(columnName));
}
@Override
public Expression expression() {
return expression;
}
@Override
public SortDirection direction() {
return SortDirection.ASCENDING;
}
@Override
public NullOrdering nullOrdering() {
return NullOrdering.NULLS_FIRST;
}
}
static
|
MySortOrder
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/StaticAssignmentInConstructorTest.java
|
{
"start": 1892,
"end": 2213
}
|
class ____ {
int foo;
public Test(int foo) {
this.foo = foo;
}
}
""")
.doTest();
}
@Test
public void assignedWithinLambda_noMatch() {
helper
.addSourceLines(
"Test.java",
"""
|
Test
|
java
|
resilience4j__resilience4j
|
resilience4j-feign/src/test/java/io/github/resilience4j/feign/FeignDecoratorsTest.java
|
{
"start": 898,
"end": 2811
}
|
class ____ {
@Test
public void testWithNothing() throws Throwable {
final FeignDecorators testSubject = FeignDecorators.builder().build();
final Object result = testSubject.decorate(args -> args[0], null, null, null)
.apply(new Object[]{"test01"});
assertThat(result)
.describedAs("Returned result is correct")
.isEqualTo("test01");
}
@Test
public void testWithCircuitBreaker() throws Throwable {
final CircuitBreaker circuitBreaker = CircuitBreaker.ofDefaults("test");
final CircuitBreaker.Metrics metrics = circuitBreaker.getMetrics();
final FeignDecorators testSubject = FeignDecorators.builder()
.withCircuitBreaker(circuitBreaker).build();
final Method method = FeignDecoratorsTest.class.getDeclaredMethods()[0];
final Object result = testSubject.decorate(args -> args[0], method, null, null)
.apply(new Object[]{"test01"});
assertThat(result)
.describedAs("Returned result is correct")
.isEqualTo("test01");
assertThat(metrics.getNumberOfSuccessfulCalls())
.describedAs("Successful Calls")
.isEqualTo(1);
}
@Test
public void testWithRateLimiter() throws Throwable {
final RateLimiter rateLimiter = spy(RateLimiter.ofDefaults("test"));
final FeignDecorators testSubject = FeignDecorators.builder().withRateLimiter(rateLimiter)
.build();
final Method method = FeignDecoratorsTest.class.getDeclaredMethods()[0];
final Object result = testSubject.decorate(args -> args[0], method, null, null)
.apply(new Object[]{"test01"});
assertThat(result)
.describedAs("Returned result is correct")
.isEqualTo("test01");
verify(rateLimiter, times(1)).acquirePermission(1);
}
}
|
FeignDecoratorsTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/entitygraph/InheritedEntityGraphTest.java
|
{
"start": 5338,
"end": 5563
}
|
class ____ {
@Id
@GeneratedValue
public long id;
@OneToOne(fetch = FetchType.LAZY)
public Bar bar;
@OneToMany
public Set<Bar> bars = new HashSet<Bar>();
}
@Entity(name = "Bar")
public static
|
MappedSupperclass
|
java
|
grpc__grpc-java
|
xds/src/main/java/io/grpc/xds/RouteLookupServiceClusterSpecifierPlugin.java
|
{
"start": 3003,
"end": 3370
}
|
class ____ implements PluginConfig {
abstract ImmutableMap<String, ?> config();
static RlsPluginConfig create(Map<String, ?> config) {
return new AutoValue_RouteLookupServiceClusterSpecifierPlugin_RlsPluginConfig(
ImmutableMap.copyOf(config));
}
@Override
public String typeUrl() {
return TYPE_URL;
}
}
}
|
RlsPluginConfig
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/recovery/RecoveryStats.java
|
{
"start": 1103,
"end": 3559
}
|
class ____ implements ToXContentFragment, Writeable {
private final AtomicInteger currentAsSource = new AtomicInteger();
private final AtomicInteger currentAsTarget = new AtomicInteger();
private final AtomicLong throttleTimeInNanos = new AtomicLong();
public RecoveryStats() {}
public RecoveryStats(StreamInput in) throws IOException {
currentAsSource.set(in.readVInt());
currentAsTarget.set(in.readVInt());
throttleTimeInNanos.set(in.readLong());
}
public void add(RecoveryStats recoveryStats) {
if (recoveryStats != null) {
this.currentAsSource.addAndGet(recoveryStats.currentAsSource());
this.currentAsTarget.addAndGet(recoveryStats.currentAsTarget());
}
addTotals(recoveryStats);
}
public void addTotals(RecoveryStats recoveryStats) {
if (recoveryStats != null) {
this.throttleTimeInNanos.addAndGet(recoveryStats.throttleTime().nanos());
}
}
/**
* Number of ongoing recoveries for which a shard serves as a source
*/
public int currentAsSource() {
return currentAsSource.get();
}
/**
* Number of ongoing recoveries for which a shard serves as a target
*/
public int currentAsTarget() {
return currentAsTarget.get();
}
/**
* Total time recoveries waited due to throttling
*/
public TimeValue throttleTime() {
return TimeValue.timeValueNanos(throttleTimeInNanos.get());
}
public void incCurrentAsTarget() {
currentAsTarget.incrementAndGet();
}
public void decCurrentAsTarget() {
currentAsTarget.decrementAndGet();
}
public void incCurrentAsSource() {
currentAsSource.incrementAndGet();
}
public void decCurrentAsSource() {
currentAsSource.decrementAndGet();
}
public void addThrottleTime(long nanos) {
throttleTimeInNanos.addAndGet(nanos);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.RECOVERY);
builder.field(Fields.CURRENT_AS_SOURCE, currentAsSource());
builder.field(Fields.CURRENT_AS_TARGET, currentAsTarget());
builder.humanReadableField(Fields.THROTTLE_TIME_IN_MILLIS, Fields.THROTTLE_TIME, throttleTime());
builder.endObject();
return builder;
}
static final
|
RecoveryStats
|
java
|
apache__flink
|
flink-core/src/test/java/org/apache/flink/types/parser/ParserTestBase.java
|
{
"start": 1173,
"end": 15540
}
|
class ____<T> {
public abstract String[] getValidTestValues();
public abstract T[] getValidTestResults();
public abstract String[] getInvalidTestValues();
public abstract boolean allowsEmptyField();
public abstract FieldParser<T> getParser();
public abstract Class<T> getTypeClass();
@Test
void testTest() {
assertThat(getParser()).isNotNull();
assertThat(getTypeClass()).isNotNull();
assertThat(getValidTestValues()).isNotNull();
assertThat(getValidTestResults()).isNotNull();
assertThat(getInvalidTestValues()).isNotNull();
assertThat(getValidTestValues()).hasSameSizeAs(getValidTestResults());
}
@Test
void testGetValue() {
FieldParser<?> parser = getParser();
Object created = parser.createValue();
assertThat(created).isNotNull().isInstanceOf(getTypeClass());
}
@Test
void testValidStringInIsolation() {
String[] testValues = getValidTestValues();
T[] results = getValidTestResults();
for (int i = 0; i < testValues.length; i++) {
FieldParser<T> parser1 = getParser();
FieldParser<T> parser2 = getParser();
FieldParser<T> parser3 = getParser();
byte[] bytes1 = testValues[i].getBytes(ConfigConstants.DEFAULT_CHARSET);
byte[] bytes2 = testValues[i].getBytes(ConfigConstants.DEFAULT_CHARSET);
byte[] bytes3 = testValues[i].getBytes(ConfigConstants.DEFAULT_CHARSET);
int numRead1 =
parser1.parseField(
bytes1, 0, bytes1.length, new byte[] {'|'}, parser1.createValue());
int numRead2 =
parser2.parseField(
bytes2, 0, bytes2.length, new byte[] {'&', '&'}, parser2.createValue());
int numRead3 =
parser3.parseField(
bytes3,
0,
bytes3.length,
new byte[] {'9', '9', '9'},
parser3.createValue());
assertThat(numRead1)
.describedAs(
"Parser declared the valid value " + testValues[i] + " as invalid.")
.isNotEqualTo(-1);
assertThat(numRead2)
.describedAs(
"Parser declared the valid value " + testValues[i] + " as invalid.")
.isNotEqualTo(-1);
assertThat(numRead3)
.describedAs(
"Parser declared the valid value " + testValues[i] + " as invalid.")
.isNotEqualTo(-1);
assertThat(bytes1).hasSize(numRead1);
assertThat(bytes2).hasSize(numRead2);
assertThat(bytes3).hasSize(numRead3);
assertThat(parser1.getLastResult()).isEqualTo(results[i]);
assertThat(parser2.getLastResult()).isEqualTo(results[i]);
assertThat(parser3.getLastResult()).isEqualTo(results[i]);
}
}
@Test
void testValidStringInIsolationWithEndDelimiter() {
String[] testValues = getValidTestValues();
T[] results = getValidTestResults();
for (int i = 0; i < testValues.length; i++) {
FieldParser<T> parser1 = getParser();
FieldParser<T> parser2 = getParser();
String testVal1 = testValues[i] + "|";
String testVal2 = testValues[i] + "&&&&";
byte[] bytes1 = testVal1.getBytes(ConfigConstants.DEFAULT_CHARSET);
byte[] bytes2 = testVal2.getBytes(ConfigConstants.DEFAULT_CHARSET);
int numRead1 =
parser1.parseField(
bytes1, 0, bytes1.length, new byte[] {'|'}, parser1.createValue());
int numRead2 =
parser2.parseField(
bytes2,
0,
bytes2.length,
new byte[] {'&', '&', '&', '&'},
parser2.createValue());
assertThat(numRead1)
.describedAs(
"Parser declared the valid value " + testValues[i] + " as invalid.")
.isNotEqualTo(-1);
assertThat(numRead2)
.describedAs(
"Parser declared the valid value " + testValues[i] + " as invalid.")
.isNotEqualTo(-1);
assertThat(bytes1).hasSize(numRead1);
assertThat(bytes2).hasSize(numRead2);
assertThat(parser1.getLastResult()).isEqualTo(results[i]);
assertThat(parser2.getLastResult()).isEqualTo(results[i]);
}
}
@Test
void testConcatenated() {
String[] testValues = getValidTestValues();
T[] results = getValidTestResults();
byte[] allBytesWithDelimiter = concatenate(testValues, new char[] {'|'}, true);
byte[] allBytesNoDelimiterEnd = concatenate(testValues, new char[] {','}, false);
FieldParser<T> parser1 = getParser();
FieldParser<T> parser2 = getParser();
T val1 = parser1.createValue();
T val2 = parser2.createValue();
int pos1 = 0;
int pos2 = 0;
for (int i = 0; i < results.length; i++) {
pos1 =
parser1.parseField(
allBytesWithDelimiter,
pos1,
allBytesWithDelimiter.length,
new byte[] {'|'},
val1);
pos2 =
parser2.parseField(
allBytesNoDelimiterEnd,
pos2,
allBytesNoDelimiterEnd.length,
new byte[] {','},
val2);
assertThat(pos1)
.describedAs(
"Parser declared the valid value " + testValues[i] + " as invalid.")
.isNotEqualTo(-1);
assertThat(pos2)
.describedAs(
"Parser declared the valid value " + testValues[i] + " as invalid.")
.isNotEqualTo(-1);
assertThat(parser1.getLastResult()).isEqualTo(results[i]);
assertThat(parser2.getLastResult()).isEqualTo(results[i]);
}
}
@Test
void testConcatenatedMultiCharDelimiter() {
String[] testValues = getValidTestValues();
T[] results = getValidTestResults();
byte[] allBytesWithDelimiter =
concatenate(testValues, new char[] {'&', '&', '&', '&'}, true);
byte[] allBytesNoDelimiterEnd = concatenate(testValues, new char[] {'9', '9', '9'}, false);
FieldParser<T> parser1 = getParser();
FieldParser<T> parser2 = getParser();
T val1 = parser1.createValue();
T val2 = parser2.createValue();
int pos1 = 0;
int pos2 = 0;
for (int i = 0; i < results.length; i++) {
pos1 =
parser1.parseField(
allBytesWithDelimiter,
pos1,
allBytesWithDelimiter.length,
new byte[] {'&', '&', '&', '&'},
val1);
assertThat(pos1)
.describedAs(
"Parser declared the valid value " + testValues[i] + " as invalid.")
.isNotEqualTo(-1);
T result1 = parser1.getLastResult();
assertThat(result1).isEqualTo(results[i]);
pos2 =
parser2.parseField(
allBytesNoDelimiterEnd,
pos2,
allBytesNoDelimiterEnd.length,
new byte[] {'9', '9', '9'},
val2);
assertThat(pos2)
.describedAs(
"Parser declared the valid value " + testValues[i] + " as invalid.")
.isNotEqualTo(-1);
T result2 = parser2.getLastResult();
assertThat(result2).isEqualTo(results[i]);
}
}
@Test
void testInValidStringInIsolation() {
String[] testValues = getInvalidTestValues();
for (String testValue : testValues) {
FieldParser<T> parser = getParser();
byte[] bytes = testValue.getBytes(ConfigConstants.DEFAULT_CHARSET);
int numRead =
parser.parseField(
bytes, 0, bytes.length, new byte[] {'|'}, parser.createValue());
assertThat(numRead)
.describedAs("Parser accepted the invalid value " + testValue + ".")
.isEqualTo(-1);
}
}
@Test
void testInValidStringsMixedIn() {
String[] validValues = getValidTestValues();
T[] validResults = getValidTestResults();
String[] invalidTestValues = getInvalidTestValues();
FieldParser<T> parser = getParser();
T value = parser.createValue();
for (String invalid : invalidTestValues) {
// place an invalid string in the middle
String[] testLine = new String[validValues.length + 1];
int splitPoint = validValues.length / 2;
System.arraycopy(validValues, 0, testLine, 0, splitPoint);
testLine[splitPoint] = invalid;
System.arraycopy(
validValues,
splitPoint,
testLine,
splitPoint + 1,
validValues.length - splitPoint);
byte[] bytes = concatenate(testLine, new char[] {'%'}, true);
// read the valid parts
int pos = 0;
for (int i = 0; i < splitPoint; i++) {
pos = parser.parseField(bytes, pos, bytes.length, new byte[] {'%'}, value);
assertThat(pos)
.describedAs(
"Parser declared the valid value "
+ validValues[i]
+ " as invalid.")
.isNotEqualTo(-1);
T result = parser.getLastResult();
assertThat(result).describedAs("Parser parsed wrong.").isEqualTo(validResults[i]);
}
// fail on the invalid part
pos = parser.parseField(bytes, pos, bytes.length, new byte[] {'%'}, value);
assertThat(pos)
.describedAs("Parser accepted the invalid value " + invalid + ".")
.isEqualTo(-1);
}
}
@Test
@SuppressWarnings("unchecked")
public void testStaticParseMethod() throws IllegalAccessException, InvocationTargetException {
Method parseMethod;
try {
parseMethod =
getParser()
.getClass()
.getMethod(
"parseField", byte[].class, int.class, int.class, char.class);
} catch (NoSuchMethodException e) {
return;
}
String[] testValues = getValidTestValues();
T[] results = getValidTestResults();
for (int i = 0; i < testValues.length; i++) {
byte[] bytes = testValues[i].getBytes(ConfigConstants.DEFAULT_CHARSET);
assertThat((T) parseMethod.invoke(null, bytes, 0, bytes.length, '|'))
.isEqualTo(results[i]);
}
}
@Test
void testStaticParseMethodWithInvalidValues() {
Method parseMethod;
try {
parseMethod =
getParser()
.getClass()
.getMethod(
"parseField", byte[].class, int.class, int.class, char.class);
} catch (NoSuchMethodException e) {
return;
}
String[] testValues = getInvalidTestValues();
for (String testValue : testValues) {
byte[] bytes = testValue.getBytes(ConfigConstants.DEFAULT_CHARSET);
Method finalParseMethod = parseMethod;
assertThatThrownBy(() -> finalParseMethod.invoke(null, bytes, 0, bytes.length, '|'))
.isInstanceOf(InvocationTargetException.class);
}
}
private static byte[] concatenate(String[] values, char[] delimiter, boolean delimiterAtEnd) {
int len = 0;
for (String s : values) {
len += s.length() + delimiter.length;
}
if (!delimiterAtEnd) {
len -= delimiter.length;
}
int currPos = 0;
byte[] result = new byte[len];
for (int i = 0; i < values.length; i++) {
String s = values[i];
byte[] bytes = s.getBytes(ConfigConstants.DEFAULT_CHARSET);
int numBytes = bytes.length;
System.arraycopy(bytes, 0, result, currPos, numBytes);
currPos += numBytes;
if (delimiterAtEnd || i < values.length - 1) {
for (char c : delimiter) result[currPos++] = (byte) c;
}
}
return result;
}
@Test
void testTrailingEmptyField() {
FieldParser<T> parser = getParser();
byte[] bytes = "||".getBytes(ConfigConstants.DEFAULT_CHARSET);
for (int i = 0; i < 2; i++) {
// test empty field with trailing delimiter when i = 0,
// test empty field without trailing delimiter when i= 1.
int numRead =
parser.parseField(
bytes, i, bytes.length, new byte[] {'|'}, parser.createValue());
assertThat(parser.getErrorState()).isEqualTo(FieldParser.ParseErrorState.EMPTY_COLUMN);
if (this.allowsEmptyField()) {
assertThat(numRead).isNotEqualTo(-1);
assertThat(numRead).isEqualTo(i + 1);
} else {
assertThat(numRead).isEqualTo(-1);
}
parser.resetParserState();
}
}
}
|
ParserTestBase
|
java
|
apache__camel
|
core/camel-api/src/main/java/org/apache/camel/ExtendedCamelContext.java
|
{
"start": 2153,
"end": 12113
}
|
interface ____ {
/**
* Sets the name (id) of this context.
* <p/>
* This operation is mostly only used by different Camel runtimes such as camel-spring, camel-cdi, camel-spring-boot
* etc. Important: Setting the name should only be set before CamelContext is started.
*
* @param name the name
*/
void setName(String name);
default String getName() {
return null;
}
/**
* Sets the description of this Camel application.
*/
void setDescription(String description);
default String getDescription() {
return null;
}
/**
* Sets the profile Camel should run as (dev,test,prod).
*/
void setProfile(String profile);
/**
* The profile Camel should run as (dev,test,prod). Returns null if no profile has been set.
*/
default String getProfile() {
return null;
}
/**
* Sets the registry Camel should use for looking up beans by name or type.
* <p/>
* This operation is mostly only used by different Camel runtimes such as camel-spring, camel-cdi, camel-spring-boot
* etc. Important: Setting the registry should only be set before CamelContext is started.
*
* @param registry the registry such as DefaultRegistry or
*/
void setRegistry(Registry registry);
/**
* Sets the assembler to assemble a {@link javax.management.modelmbean.RequiredModelMBean}
*
* @param managementMBeanAssembler the assembler to use
*/
void setManagementMBeanAssembler(ManagementMBeanAssembler managementMBeanAssembler);
default Registry getRegistry() {
return null;
}
/**
* Method to signal to {@link CamelContext} that the process to initialize setup routes is in progress.
*
* @param done <tt>false</tt> to start the process, call again with <tt>true</tt> to signal its done.
* @see #isSetupRoutes()
*/
void setupRoutes(boolean done);
/**
* Indicates whether current thread is setting up route(s) as part of starting Camel.
* <p/>
* This can be useful to know by {@link LifecycleStrategy} or the likes, in case they need to react differently.
* <p/>
* As the startup procedure of {@link CamelContext} is slightly different when using plain Java versus
* camel-spring-xml, then we need to know when spring is setting up the routes, which can happen after the
* {@link CamelContext} itself is in started state.
*
* @return <tt>true</tt> if current thread is setting up route(s), or <tt>false</tt> if not.
* @see #setupRoutes(boolean)
*/
boolean isSetupRoutes();
/**
* Method to signal to {@link CamelContext} that the process to create routes is in progress.
*
* @param routeId the current id of the route being created
* @see #getCreateRoute()
*/
void createRoute(String routeId);
/**
* Indicates whether current thread is creating a route as part of starting Camel.
* <p/>
* This can be useful to know by {@link LifecycleStrategy} or the likes, in case they need to react differently.
*
* @return the route id currently being created/started, or <tt>null</tt> if not.
* @see #createRoute(String)
*/
String getCreateRoute();
/**
* Method to signal to {@link CamelContext} that creation of a given processor is in progress.
*
* @param processorId the current id of the processor being created
* @see #getCreateProcessor()
*/
void createProcessor(String processorId);
/**
* Indicates whether current thread is creating a processor as part of starting Camel.
* <p/>
* This can be useful to know by {@link LifecycleStrategy} or the likes, in case they need to react differently.
*
* @return the current id of the processor being created
* @see #createProcessor(String)
*/
String getCreateProcessor();
/**
* Registers a {@link org.apache.camel.spi.EndpointStrategy callback} to allow you to do custom logic when an
* {@link Endpoint} is about to be registered to the {@link org.apache.camel.spi.EndpointRegistry}.
* <p/>
* When a callback is registered it will be executed on the already registered endpoints allowing you to catch-up
*
* @param strategy callback to be invoked
*/
void registerEndpointCallback(EndpointStrategy strategy);
/**
* Registers an {@link AutoMockInterceptStrategy callback} which is used for intercepting sending messages to
* endpoints, and sending a copy to mock endpoints. This is a feature available with camel-test.
*
* @param strategy callback to be invoked
*/
void registerAutoMockInterceptStrategy(AutoMockInterceptStrategy strategy);
/**
* Resolves the given name to an {@link Endpoint} of the specified type (scope is prototype). If the name has a
* singleton endpoint registered, then the singleton is returned. Otherwise, a new {@link Endpoint} is created.
*
* The endpoint is NOT registered in the {@link org.apache.camel.spi.EndpointRegistry} as its prototype scoped, and
* therefore expected to be short lived and discarded after use (you must stop and shutdown the endpoint when no
* longer in use).
*
* @param uri the URI of the endpoint
* @return the endpoint
*
* @see CamelContext#getEndpoint(String)
*/
Endpoint getPrototypeEndpoint(String uri);
/**
* Resolves the given name to an {@link Endpoint} of the specified type (scope is prototype). If the name has a
* singleton endpoint registered, then the singleton is returned. Otherwise, a new {@link Endpoint} is created.
*
* The endpoint is NOT registered in the {@link org.apache.camel.spi.EndpointRegistry} as its prototype scoped, and
* therefore expected to be short lived and discarded after use (you must stop and shutdown the endpoint when no
* longer in use).
*
* @param uri the URI of the endpoint
* @return the endpoint
*
* @see CamelContext#getEndpoint(String)
*/
Endpoint getPrototypeEndpoint(NormalizedEndpointUri uri);
/**
* Is the given endpoint already registered in the {@link org.apache.camel.spi.EndpointRegistry}
*
* @param uri the URI of the endpoint
* @return the registered endpoint or <tt>null</tt> if not registered
*/
Endpoint hasEndpoint(NormalizedEndpointUri uri);
/**
* Resolves the given name to an {@link Endpoint} of the specified type. If the name has a singleton endpoint
* registered, then the singleton is returned. Otherwise, a new {@link Endpoint} is created and registered in the
* {@link org.apache.camel.spi.EndpointRegistry}.
*
* @param uri the URI of the endpoint
* @return the endpoint
*
* @see #getPrototypeEndpoint(String)
*/
Endpoint getEndpoint(NormalizedEndpointUri uri);
/**
* Resolves the given name to an {@link Endpoint} of the specified type. If the name has a singleton endpoint
* registered, then the singleton is returned. Otherwise, a new {@link Endpoint} is created and registered in the
* {@link org.apache.camel.spi.EndpointRegistry}.
*
* @param uri the URI of the endpoint
* @param parameters the parameters to customize the endpoint
* @return the endpoint
*
* @see #getPrototypeEndpoint(String)
*/
Endpoint getEndpoint(NormalizedEndpointUri uri, Map<String, Object> parameters);
/**
* Normalizes the given uri.
*
* @param uri the uri
* @return a normalized uri
*/
NormalizedEndpointUri normalizeUri(String uri);
/**
* Returns the order in which the route inputs was started.
* <p/>
* The order may not be according to the startupOrder defined on the route. For example a route could be started
* manually later, or new routes added at runtime.
*
* @return a list in the order how routes was started
*/
List<RouteStartupOrder> getRouteStartupOrder();
/**
* Adds a {@link BootstrapCloseable} task.
*/
void addBootstrap(BootstrapCloseable bootstrap);
/**
* Returns an unmodifiable list of the services registered currently in this {@link CamelContext}.
*/
List<Service> getServices();
/**
* Gets the exchange factory to use.
*/
ExchangeFactory getExchangeFactory();
/**
* Sets a custom exchange factory to use.
*/
void setExchangeFactory(ExchangeFactory exchangeFactory);
/**
* Gets the exchange factory manager to use.
*/
ExchangeFactoryManager getExchangeFactoryManager();
/**
* Sets a custom exchange factory manager to use.
*/
void setExchangeFactoryManager(ExchangeFactoryManager exchangeFactoryManager);
/**
* Gets the processor exchange factory to use.
*/
ProcessorExchangeFactory getProcessorExchangeFactory();
/**
* Sets a custom processor exchange factory to use.
*/
void setProcessorExchangeFactory(ProcessorExchangeFactory processorExchangeFactory);
/**
* Returns the management mbean assembler
*
* @return the mbean assembler
*/
ManagementMBeanAssembler getManagementMBeanAssembler();
/**
* Gets the default error handler builder which is inherited by the routes
*
* @return the builder
*/
ErrorHandlerFactory getErrorHandlerFactory();
/**
* Sets the default error handler builder which is inherited by the routes
*
* @param errorHandlerFactory the builder
*/
void setErrorHandlerFactory(ErrorHandlerFactory errorHandlerFactory);
/**
* Gets the default FactoryFinder which will be used for the loading the factory
|
ExtendedCamelContext
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/stream/Streams.java
|
{
"start": 2799,
"end": 2927
}
|
class ____ {
/**
* A Collector type for arrays.
*
* @param <E> The array type.
*/
public static
|
Streams
|
java
|
apache__flink
|
flink-streaming-java/src/main/java/org/apache/flink/streaming/util/retryable/RetryPredicates.java
|
{
"start": 1827,
"end": 2202
}
|
class ____<T>
implements Predicate<Collection<T>>, Serializable {
private static final long serialVersionUID = 1L;
@Override
public boolean test(Collection<T> ts) {
if (null == ts || ts.isEmpty()) {
return true;
}
return false;
}
}
private static final
|
EmptyResultPredicate
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/headers/ResponseHeaderTest.java
|
{
"start": 7113,
"end": 12073
}
|
class ____ {
private static final StackTraceElement[] EMPTY_STACK_TRACE = new StackTraceElement[0];
@ResponseHeader(name = "Access-Control-Allow-Origin", value = "*")
@ResponseHeader(name = "Keep-Alive", value = "timeout=5, max=997")
@GET
@Path(("/uni"))
public Uni<String> getTestUni() {
return Uni.createFrom().item("test");
}
@ResponseHeader(name = "foo", value = "bar")
@ResponseStatus(201)
@GET
@Path(("/uni2"))
public Uni<String> getTestUni2() {
return Uni.createFrom().item("test");
}
@ResponseHeader(name = "Access-Control-Allow-Origin", value = "*")
@ResponseHeader(name = "Keep-Alive", value = "timeout=5, max=997")
@GET
@Path("/multi")
public Multi<String> getTestMulti() {
return Multi.createFrom().item("test");
}
@ResponseHeader(name = "Access-Control-Allow-Origin", value = "*")
@ResponseHeader(name = "Keep-Alive", value = "timeout=5, max=997")
@GET
@Path("/completion")
public CompletionStage<String> getTestCompletion() {
return CompletableFuture.supplyAsync(() -> "test");
}
@ResponseHeader(name = "Access-Control-Allow-Origin", value = "*")
@ResponseHeader(name = "Keep-Alive", value = "timeout=5, max=997")
@GET
@Path("/plain")
public String getTestPlain() {
return "test";
}
@ResponseHeader(name = "Access-Control-Allow-Origin", value = "*")
@GET
@Path(("/exception_uni"))
public Uni<String> throwExceptionUni() {
return Uni.createFrom().failure(createException());
}
@ResponseHeader(name = "Access-Control-Allow-Origin", value = "*")
@GET
@Path("/exception_multi")
public Multi<String> throwExceptionMulti() {
return Multi.createFrom().failure(createException());
}
@ResponseHeader(name = "Access-Control-Allow-Origin", value = "*")
@Path("/exception_completion")
public CompletionStage<String> throwExceptionCompletion() {
return CompletableFuture.failedFuture(createException());
}
@ResponseHeader(name = "Access-Control-Allow-Origin", value = "*")
@GET
@Path("/exception_plain")
public String throwExceptionPlain() {
throw createException();
}
@ResponseHeader(name = "Access-Control-Allow-Origin", value = "*")
@ResponseHeader(name = "Keep-Alive", value = "timeout=5, max=997")
@GET
@Path("/rest-multi")
public RestMulti<String> getTestRestMulti() {
return RestMulti.fromMultiData(Multi.createFrom().item("test")).header("Access-Control-Allow-Origin", "foo")
.header("Keep-Alive", "bar").build();
}
@GET
@Path("/rest-multi2")
public RestMulti<String> getTestRestMulti2(@DefaultValue("bar") @RestQuery String keepAlive) {
return RestMulti.fromMultiData(Multi.createFrom().item("test")).header("Access-Control-Allow-Origin", "foo")
.header("Keep-Alive", keepAlive).build();
}
@GET
@Path("/rest-multi3")
@Produces("application/octet-stream")
public RestMulti<byte[]> getTestRestMulti3(@DefaultValue("foo") @RestQuery("h1") String header1,
@DefaultValue("bar") @RestQuery("h2") String header2) {
return RestMulti.fromUniResponse(getWrapper(header1, header2), Wrapper::getData, Wrapper::getHeaders);
}
@GET
@Path("/rest-multi4")
public RestMulti<byte[]> getTestRestMulti4(@DefaultValue("bar") @RestQuery String keepAlive) {
return RestMulti.fromMultiData(Multi.createFrom().item("test".getBytes(StandardCharsets.UTF_8)))
.header("Access-Control-Allow-Origin", "foo")
.header("Keep-Alive", keepAlive).header("Content-Type", MediaType.TEXT_PLAIN).build();
}
@GET
@Path("/rest-multi5")
public RestMulti<byte[]> getTestRestMulti5(@DefaultValue("foo") @RestQuery("h1") String header1,
@DefaultValue("bar") @RestQuery("h2") String header2) {
return RestMulti.fromUniResponse(getWrapper(header1, header2), Wrapper::getData, Wrapper::getHeaders);
}
private IllegalArgumentException createException() {
IllegalArgumentException result = new IllegalArgumentException();
result.setStackTrace(EMPTY_STACK_TRACE);
return result;
}
private Uni<Wrapper> getWrapper(String header1, String header2) {
return Uni.createFrom().item(
() -> new Wrapper(Multi.createFrom().item("test".getBytes(StandardCharsets.UTF_8)), header1, header2));
}
private static final
|
TestResource
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMROpportunisticMaps.java
|
{
"start": 7724,
"end": 8918
}
|
class ____ extends MapReduceBase
implements Mapper<LongWritable, Text, Text, Text> {
private Text keyText;
private Text valueText;
public MyMapper() {
keyText = new Text();
valueText = new Text();
}
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, Text> output,
Reporter reporter) throws IOException {
String record = value.toString();
int blankPos = record.indexOf(" ");
keyText.set(record.substring(0, blankPos));
valueText.set(record.substring(blankPos+1));
output.collect(keyText, valueText);
}
public void close() throws IOException {
}
}
/**
* Partitioner implementation to make sure that output is in total sorted
* order. We basically route key ranges to different reducers such that
* key values monotonically increase with the partition number. For example,
* in a test with 4 reducers, the keys are numbers from 1 to 1000 in the
* form "000000001" to "000001000" in each input file. The keys "000000001"
* to "000000250" are routed to partition 0, "000000251" to "000000500" are
* routed to partition 1.
*/
static
|
MyMapper
|
java
|
grpc__grpc-java
|
binder/src/main/java/io/grpc/binder/internal/Outbound.java
|
{
"start": 2288,
"end": 10337
}
|
enum ____ {
INITIAL,
PREFIX_SENT,
ALL_MESSAGES_SENT,
SUFFIX_SENT,
CLOSED,
}
/*
* Represents the state of data we've sent in binder transactions.
*/
@GuardedBy("this")
private State outboundState = State.INITIAL; // Represents what we've delivered.
// ----------------------------------
// For reporting to StatsTraceContext.
/** Indicates we're ready to send the prefix. */
private boolean prefixReady;
@Nullable private InputStream firstMessage;
@Nullable private Queue<InputStream> messageQueue;
/**
* Indicates we have everything ready to send the suffix. This implies we have all outgoing
* messages, and any additional data which needs to be send after the last message. (e.g.
* trailers).
*/
private boolean suffixReady;
/**
* The index of the next transaction we'll send, allowing the receiver to re-assemble out-of-order
* messages.
*/
@GuardedBy("this")
private int transactionIndex;
// ----------------------------------
// For reporting to StatsTraceContext.
private int numDeliveredMessages;
private int messageSize;
private Outbound(BinderTransport transport, int callId, StatsTraceContext statsTraceContext) {
this.transport = transport;
this.callId = callId;
this.statsTraceContext = statsTraceContext;
}
final StatsTraceContext getStatsTraceContext() {
return statsTraceContext;
}
/** Call to add a message to be delivered. Implies onPrefixReady(). */
@GuardedBy("this")
final void addMessage(InputStream message) throws StatusException {
onPrefixReady(); // This is implied.
if (messageQueue != null) {
messageQueue.add(message);
} else if (firstMessage == null) {
firstMessage = message;
} else {
messageQueue = new ConcurrentLinkedQueue<>();
messageQueue.add(message);
}
}
@GuardedBy("this")
protected final void onPrefixReady() {
this.prefixReady = true;
}
@GuardedBy("this")
protected final void onSuffixReady() {
this.suffixReady = true;
}
// =====================
// Updates to delivery.
@GuardedBy("this")
private void onOutboundState(State outboundState) {
checkTransition(this.outboundState, outboundState);
this.outboundState = outboundState;
}
// ===================
// Internals.
@GuardedBy("this")
protected final boolean messageAvailable() {
if (messageQueue != null) {
return !messageQueue.isEmpty();
} else if (firstMessage != null) {
return numDeliveredMessages == 0;
} else {
return false;
}
}
@Nullable
@GuardedBy("this")
private final InputStream peekNextMessage() {
if (numDeliveredMessages == 0) {
return firstMessage;
} else if (messageQueue != null) {
return messageQueue.peek();
}
return null;
}
@GuardedBy("this")
private final boolean canSend() {
switch (outboundState) {
case INITIAL:
if (!prefixReady) {
return false;
}
break;
case PREFIX_SENT:
// We can only send something if we have messages or the suffix.
// Note that if we have the suffix but no messages in this state, it means we've been closed
// early.
if (!messageAvailable() && !suffixReady) {
return false;
}
break;
case ALL_MESSAGES_SENT:
if (!suffixReady) {
return false;
}
break;
default:
return false;
}
return isReady();
}
final boolean isReady() {
return transport.isReady();
}
@GuardedBy("this")
final void onTransportReady() throws StatusException {
// The transport has become ready, attempt sending.
send();
}
@GuardedBy("this")
final void send() throws StatusException {
while (canSend()) {
try {
sendInternal();
} catch (StatusException se) {
// Ensure we don't send anything else and rethrow.
onOutboundState(State.CLOSED);
throw se;
}
}
}
@GuardedBy("this")
@SuppressWarnings("fallthrough")
protected final void sendInternal() throws StatusException {
try (ParcelHolder parcel = ParcelHolder.obtain()) {
int flags = 0;
parcel.get().writeInt(0); // Placeholder for flags. Will be filled in below.
parcel.get().writeInt(transactionIndex++);
switch (outboundState) {
case INITIAL:
flags |= TransactionUtils.FLAG_PREFIX;
flags |= writePrefix(parcel.get());
onOutboundState(State.PREFIX_SENT);
if (!messageAvailable() && !suffixReady) {
break;
}
// Fall-through.
case PREFIX_SENT:
InputStream messageStream = peekNextMessage();
if (messageStream != null) {
flags |= TransactionUtils.FLAG_MESSAGE_DATA;
flags |= writeMessageData(parcel.get(), messageStream);
} else {
checkState(suffixReady);
}
if (suffixReady && !messageAvailable()) {
onOutboundState(State.ALL_MESSAGES_SENT);
} else {
// There's still more message data to deliver, break out.
break;
}
// Fall-through.
case ALL_MESSAGES_SENT:
flags |= TransactionUtils.FLAG_SUFFIX;
flags |= writeSuffix(parcel.get());
onOutboundState(State.SUFFIX_SENT);
break;
default:
throw new AssertionError();
}
TransactionUtils.fillInFlags(parcel.get(), flags);
int dataSize = parcel.get().dataSize();
transport.sendTransaction(callId, parcel);
statsTraceContext.outboundWireSize(dataSize);
statsTraceContext.outboundUncompressedSize(dataSize);
} catch (IOException e) {
throw Status.INTERNAL.withCause(e).asException();
}
}
protected final void unregister() {
transport.unregisterCall(callId);
}
@Override
public synchronized String toString() {
return getClass().getSimpleName()
+ "[S="
+ outboundState
+ "/NDM="
+ numDeliveredMessages
+ "]";
}
/**
* Write prefix data to the given {@link Parcel}.
*
* @param parcel the transaction parcel to write to.
* @return any additional flags to be set on the transaction.
*/
@GuardedBy("this")
protected abstract int writePrefix(Parcel parcel) throws IOException, StatusException;
/**
* Write suffix data to the given {@link Parcel}.
*
* @param parcel the transaction parcel to write to.
* @return any additional flags to be set on the transaction.
*/
@GuardedBy("this")
protected abstract int writeSuffix(Parcel parcel) throws IOException, StatusException;
@GuardedBy("this")
private final int writeMessageData(Parcel parcel, InputStream stream) throws IOException {
int flags = 0;
boolean dataRemaining = false;
if (stream instanceof ParcelableInputStream) {
flags |= TransactionUtils.FLAG_MESSAGE_DATA_IS_PARCELABLE;
messageSize = ((ParcelableInputStream) stream).writeToParcel(parcel);
} else {
byte[] block = BlockPool.acquireBlock();
try {
int size = stream.read(block);
if (size <= 0) {
parcel.writeInt(0);
} else {
parcel.writeInt(size);
parcel.writeByteArray(block, 0, size);
messageSize += size;
if (size == block.length) {
flags |= TransactionUtils.FLAG_MESSAGE_DATA_IS_PARTIAL;
dataRemaining = true;
}
}
} finally {
BlockPool.releaseBlock(block);
}
}
if (!dataRemaining) {
stream.close();
int index = numDeliveredMessages++;
if (index > 0) {
checkNotNull(messageQueue).poll();
}
statsTraceContext.outboundMessage(index);
statsTraceContext.outboundMessageSent(index, messageSize, messageSize);
messageSize = 0;
}
return flags;
}
// ======================================
// Client-side outbound transactions.
static final
|
State
|
java
|
mockito__mockito
|
mockito-core/src/main/java/org/mockito/internal/util/MockCreationValidator.java
|
{
"start": 791,
"end": 2475
}
|
class ____ {
public void validateType(Class<?> classToMock, String mockMaker) {
TypeMockability typeMockability = MockUtil.typeMockabilityOf(classToMock, mockMaker);
if (!typeMockability.mockable()) {
throw cannotMockClass(classToMock, typeMockability.nonMockableReason());
}
}
public void validateExtraInterfaces(
Class<?> classToMock, Collection<Class<?>> extraInterfaces) {
if (extraInterfaces == null) {
return;
}
for (Class<?> i : extraInterfaces) {
if (classToMock == i) {
throw extraInterfacesCannotContainMockedType(classToMock);
}
}
}
public void validateMockedType(Class<?> classToMock, Object spiedInstance) {
if (classToMock == null || spiedInstance == null) {
return;
}
if (!classToMock.equals(spiedInstance.getClass())) {
throw mockedTypeIsInconsistentWithSpiedInstanceType(classToMock, spiedInstance);
}
}
public void validateDelegatedInstance(Class<?> classToMock, Object delegatedInstance) {
if (classToMock == null || delegatedInstance == null) {
return;
}
if (delegatedInstance.getClass().isAssignableFrom(classToMock)) {
throw mockedTypeIsInconsistentWithDelegatedInstanceType(classToMock, delegatedInstance);
}
}
public void validateConstructorUse(boolean usingConstructor, SerializableMode mode) {
if (usingConstructor && mode == SerializableMode.ACROSS_CLASSLOADERS) {
throw usingConstructorWithFancySerializable(mode);
}
}
}
|
MockCreationValidator
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/codec/tsdb/es819/ES819TSDBDocValuesProducer.java
|
{
"start": 70343,
"end": 95784
}
|
class ____ {
final long maxOrd;
final DirectMonotonicReader startDocs;
private long currentIndex = -1;
private long rangeEndExclusive = -1;
SortedOrdinalReader(long maxOrd, DirectMonotonicReader startDocs) {
this.maxOrd = maxOrd;
this.startDocs = startDocs;
}
long readValueAndAdvance(int doc) {
if (doc < rangeEndExclusive) {
return currentIndex;
}
// move to the next range
if (doc == rangeEndExclusive) {
currentIndex++;
} else {
currentIndex = searchRange(doc);
}
rangeEndExclusive = startDocs.get(currentIndex + 1);
return currentIndex;
}
private long searchRange(int doc) {
long index = startDocs.binarySearch(currentIndex + 1, maxOrd, doc);
if (index < 0) {
index = -2 - index;
}
assert index < maxOrd : "invalid range " + index + " for doc " + doc + " in maxOrd " + maxOrd;
return index;
}
long lookAheadValue(int targetDoc) {
if (targetDoc < rangeEndExclusive) {
return currentIndex;
} else {
return searchRange(targetDoc);
}
}
}
private NumericDocValues getNumeric(NumericEntry entry, long maxOrd) throws IOException {
if (entry.docsWithFieldOffset == -2) {
// empty
return DocValues.emptyNumeric();
}
if (maxOrd == 1) {
// Special case for maxOrd 1, no need to read blocks and use ordinal 0 as only value
if (entry.docsWithFieldOffset == -1) {
// Special case when all docs have a value
return new BaseDenseNumericValues(maxDoc) {
@Override
public long longValue() {
// Only one ordinal!
return 0L;
}
@Override
public int docIDRunEnd() {
return maxDoc;
}
@Override
long lookAheadValueAt(int targetDoc) throws IOException {
return 0L; // Only one ordinal!
}
@Override
SortedOrdinalReader sortedOrdinalReader() {
return null;
}
};
} else {
final IndexedDISI disi = new IndexedDISI(
data,
entry.docsWithFieldOffset,
entry.docsWithFieldLength,
entry.jumpTableEntryCount,
entry.denseRankPower,
entry.numValues
);
return new BaseSparseNumericValues(disi) {
@Override
public long longValue() throws IOException {
return 0L; // Only one ordinal!
}
@Override
public int docIDRunEnd() throws IOException {
return disi.docIDRunEnd();
}
};
}
} else if (entry.sortedOrdinals != null) {
return getRangeEncodedNumericDocValues(entry, maxOrd);
}
// NOTE: we could make this a bit simpler by reusing #getValues but this
// makes things slower.
final RandomAccessInput indexSlice = data.randomAccessSlice(entry.indexOffset, entry.indexLength);
final DirectMonotonicReader indexReader = DirectMonotonicReader.getInstance(entry.indexMeta, indexSlice, merging);
final IndexInput valuesData = data.slice("values", entry.valuesOffset, entry.valuesLength);
final int bitsPerOrd = maxOrd >= 0 ? PackedInts.bitsRequired(maxOrd - 1) : -1;
if (entry.docsWithFieldOffset == -1) {
// dense
return new BaseDenseNumericValues(maxDoc) {
private final TSDBDocValuesEncoder decoder = new TSDBDocValuesEncoder(numericBlockSize);
private long currentBlockIndex = -1;
private final long[] currentBlock = new long[numericBlockSize];
// lookahead block
private long lookaheadBlockIndex = -1;
private long[] lookaheadBlock;
private IndexInput lookaheadData = null;
@Override
public int docIDRunEnd() {
return maxDoc;
}
@Override
public long longValue() throws IOException {
final int index = doc;
final int blockIndex = index >>> numericBlockShift;
final int blockInIndex = index & numericBlockMask;
if (blockIndex == currentBlockIndex) {
return currentBlock[blockInIndex];
}
if (blockIndex == lookaheadBlockIndex) {
return lookaheadBlock[blockInIndex];
}
assert blockIndex > currentBlockIndex : blockIndex + " < " + currentBlockIndex;
// no need to seek if the loading block is the next block
if (currentBlockIndex + 1 != blockIndex) {
valuesData.seek(indexReader.get(blockIndex));
}
currentBlockIndex = blockIndex;
if (bitsPerOrd == -1) {
decoder.decode(valuesData, currentBlock);
} else {
decoder.decodeOrdinals(valuesData, currentBlock, bitsPerOrd);
}
return currentBlock[blockInIndex];
}
@Override
public BlockLoader.Block tryRead(
BlockLoader.BlockFactory factory,
BlockLoader.Docs docs,
int offset,
boolean nullsFiltered,
BlockDocValuesReader.ToDouble toDouble,
boolean toInt
) throws IOException {
try (var singletonLongBuilder = singletonLongBuilder(factory, toDouble, docs.count() - offset, toInt)) {
return tryRead(singletonLongBuilder, docs, offset);
}
}
@Override
BlockLoader.Block tryRead(BlockLoader.SingletonLongBuilder builder, BlockLoader.Docs docs, int offset) throws IOException {
final int docsCount = docs.count();
doc = docs.get(docsCount - 1);
for (int i = offset; i < docsCount;) {
int index = docs.get(i);
final int blockIndex = index >>> numericBlockShift;
final int blockInIndex = index & numericBlockMask;
if (blockIndex != currentBlockIndex) {
assert blockIndex > currentBlockIndex : blockIndex + " < " + currentBlockIndex;
// no need to seek if the loading block is the next block
if (currentBlockIndex + 1 != blockIndex) {
valuesData.seek(indexReader.get(blockIndex));
}
currentBlockIndex = blockIndex;
if (bitsPerOrd == -1) {
decoder.decode(valuesData, currentBlock);
} else {
decoder.decodeOrdinals(valuesData, currentBlock, bitsPerOrd);
}
}
// Try to append more than just one value:
// Instead of iterating over docs and find the max length, take an optimistic approach to avoid as
// many comparisons as there are remaining docs and instead do at most 7 comparisons:
int length = 1;
int remainingBlockLength = Math.min(numericBlockSize - blockInIndex, docsCount - i);
for (int newLength = remainingBlockLength; newLength > 1; newLength = newLength >> 1) {
int lastIndex = i + newLength - 1;
if (isDense(index, docs.get(lastIndex), newLength)) {
length = newLength;
break;
}
}
builder.appendLongs(currentBlock, blockInIndex, length);
i += length;
}
return builder.build();
}
@Override
long lookAheadValueAt(int targetDoc) throws IOException {
final int blockIndex = targetDoc >>> numericBlockShift;
final int valueIndex = targetDoc & numericBlockMask;
if (blockIndex == currentBlockIndex) {
return currentBlock[valueIndex];
}
// load data to the lookahead block
if (lookaheadBlockIndex != blockIndex) {
if (lookaheadBlock == null) {
lookaheadBlock = new long[numericBlockSize];
lookaheadData = data.slice("look_ahead_values", entry.valuesOffset, entry.valuesLength);
}
if (lookaheadBlockIndex + 1 != blockIndex) {
lookaheadData.seek(indexReader.get(blockIndex));
}
if (bitsPerOrd == -1) {
decoder.decode(lookaheadData, lookaheadBlock);
} else {
decoder.decodeOrdinals(lookaheadData, lookaheadBlock, bitsPerOrd);
}
lookaheadBlockIndex = blockIndex;
}
return lookaheadBlock[valueIndex];
}
@Override
SortedOrdinalReader sortedOrdinalReader() {
return null;
}
};
} else {
final IndexedDISI disi = new IndexedDISI(
data,
entry.docsWithFieldOffset,
entry.docsWithFieldLength,
entry.jumpTableEntryCount,
entry.denseRankPower,
entry.numValues
);
return new BaseSparseNumericValues(disi) {
private final TSDBDocValuesEncoder decoder = new TSDBDocValuesEncoder(numericBlockSize);
private IndexedDISI lookAheadDISI;
private long currentBlockIndex = -1;
private final long[] currentBlock = new long[numericBlockSize];
@Override
public int docIDRunEnd() throws IOException {
return disi.docIDRunEnd();
}
@Override
public long longValue() throws IOException {
final int index = disi.index();
final int blockIndex = index >>> numericBlockShift;
final int blockInIndex = index & numericBlockMask;
if (blockIndex != currentBlockIndex) {
assert blockIndex > currentBlockIndex : blockIndex + "<=" + currentBlockIndex;
// no need to seek if the loading block is the next block
if (currentBlockIndex + 1 != blockIndex) {
valuesData.seek(indexReader.get(blockIndex));
}
currentBlockIndex = blockIndex;
if (bitsPerOrd == -1) {
decoder.decode(valuesData, currentBlock);
} else {
decoder.decodeOrdinals(valuesData, currentBlock, bitsPerOrd);
}
}
return currentBlock[blockInIndex];
}
@Override
public BlockLoader.Block tryRead(
BlockLoader.BlockFactory factory,
BlockLoader.Docs docs,
int offset,
boolean nullsFiltered,
BlockDocValuesReader.ToDouble toDouble,
boolean toInt
) throws IOException {
if (nullsFiltered == false) {
return null;
}
final int firstDoc = docs.get(offset);
if (disi.advanceExact(firstDoc) == false) {
assert false : "nullsFiltered is true, but doc [" + firstDoc + "] has no value";
throw new IllegalStateException("nullsFiltered is true, but doc [" + firstDoc + "] has no value");
}
if (lookAheadDISI == null) {
lookAheadDISI = new IndexedDISI(
data,
entry.docsWithFieldOffset,
entry.docsWithFieldLength,
entry.jumpTableEntryCount,
entry.denseRankPower,
entry.numValues
);
}
final int lastDoc = docs.get(docs.count() - 1);
if (lookAheadDISI.advanceExact(lastDoc) == false) {
assert false : "nullsFiltered is true, but doc [" + lastDoc + "] has no value";
throw new IllegalStateException("nullsFiltered is true, but doc [" + lastDoc + "] has no value");
}
// Assumes docIds are unique - if the number of value indices between the first
// and last doc equals the doc count, all values can be read and converted in bulk
// TODO: Pass docCount attr for enrich and lookup.
final int firstIndex = disi.index();
final int lastIndex = lookAheadDISI.index();
final int valueCount = lastIndex - firstIndex + 1;
if (valueCount != docs.count()) {
return null;
}
if (Assertions.ENABLED) {
for (int i = 0; i < docs.count(); i++) {
final int doc = docs.get(i + offset);
assert disi.advanceExact(doc) : "nullsFiltered is true, but doc [" + doc + "] has no value";
assert disi.index() == firstIndex + i : "unexpected disi index " + (firstIndex + i) + "!=" + disi.index();
}
}
try (var singletonLongBuilder = singletonLongBuilder(factory, toDouble, valueCount, toInt)) {
for (int i = 0; i < valueCount;) {
final int index = firstIndex + i;
final int blockIndex = index >>> numericBlockShift;
final int blockStartIndex = index & numericBlockMask;
if (blockIndex != currentBlockIndex) {
assert blockIndex > currentBlockIndex : blockIndex + "<=" + currentBlockIndex;
if (currentBlockIndex + 1 != blockIndex) {
valuesData.seek(indexReader.get(blockIndex));
}
currentBlockIndex = blockIndex;
decoder.decode(valuesData, currentBlock);
}
final int count = Math.min(numericBlockSize - blockStartIndex, valueCount - i);
singletonLongBuilder.appendLongs(currentBlock, blockStartIndex, count);
i += count;
}
return singletonLongBuilder.build();
}
}
};
}
}
private static boolean isDense(int firstDocId, int lastDocId, int length) {
// This does not detect duplicate docids (e.g [1, 1, 2, 4] would be detected as dense),
// this can happen with enrich or lookup. However this codec isn't used for enrich / lookup.
// This codec is only used in the context of logsdb and tsdb, so this is fine here.
return lastDocId - firstDocId == length - 1;
}
private NumericDocValues getRangeEncodedNumericDocValues(NumericEntry entry, long maxOrd) throws IOException {
final var ordinalsReader = new SortedOrdinalReader(
maxOrd,
DirectMonotonicReader.getInstance(entry.sortedOrdinals, data.randomAccessSlice(entry.valuesOffset, entry.valuesLength), true)
);
if (entry.docsWithFieldOffset == -1) {
return new BaseDenseNumericValues(maxDoc) {
@Override
long lookAheadValueAt(int targetDoc) {
return ordinalsReader.lookAheadValue(targetDoc);
}
@Override
public long longValue() {
return ordinalsReader.readValueAndAdvance(doc);
}
@Override
public int docIDRunEnd() throws IOException {
return maxDoc;
}
@Override
SortedOrdinalReader sortedOrdinalReader() {
return ordinalsReader;
}
};
} else {
final var disi = new IndexedDISI(
data,
entry.docsWithFieldOffset,
entry.docsWithFieldLength,
entry.jumpTableEntryCount,
entry.denseRankPower,
entry.numValues
);
return new BaseSparseNumericValues(disi) {
@Override
public long longValue() {
return ordinalsReader.readValueAndAdvance(disi.docID());
}
@Override
public int docIDRunEnd() throws IOException {
return disi.docIDRunEnd();
}
};
}
}
private NumericValues getValues(NumericEntry entry, final long maxOrd) throws IOException {
assert entry.numValues > 0;
final RandomAccessInput indexSlice = data.randomAccessSlice(entry.indexOffset, entry.indexLength);
final DirectMonotonicReader indexReader = DirectMonotonicReader.getInstance(entry.indexMeta, indexSlice, merging);
final IndexInput valuesData = data.slice("values", entry.valuesOffset, entry.valuesLength);
final int bitsPerOrd = maxOrd >= 0 ? PackedInts.bitsRequired(maxOrd - 1) : -1;
final long[] currentBlockIndex = { -1 };
final long[] currentBlock = new long[numericBlockSize];
final TSDBDocValuesEncoder decoder = new TSDBDocValuesEncoder(numericBlockSize);
return index -> {
final long blockIndex = index >>> numericBlockShift;
final int blockInIndex = (int) (index & numericBlockMask);
if (blockIndex != currentBlockIndex[0]) {
// no need to seek if the loading block is the next block
if (currentBlockIndex[0] + 1 != blockIndex) {
valuesData.seek(indexReader.get(blockIndex));
}
currentBlockIndex[0] = blockIndex;
if (bitsPerOrd == -1) {
decoder.decode(valuesData, currentBlock);
} else {
decoder.decodeOrdinals(valuesData, currentBlock, bitsPerOrd);
}
}
return currentBlock[blockInIndex];
};
}
private SortedNumericDocValues getSortedNumeric(SortedNumericEntry entry, long maxOrd) throws IOException {
if (entry.numValues == entry.numDocsWithField) {
return DocValues.singleton(getNumeric(entry, maxOrd));
}
final RandomAccessInput addressesInput = data.randomAccessSlice(entry.addressesOffset, entry.addressesLength);
final LongValues addresses = DirectMonotonicReader.getInstance(entry.addressesMeta, addressesInput, merging);
assert entry.sortedOrdinals == null : "encoded ordinal range supports only one value per document";
if (entry.sortedOrdinals != null) {
// TODO: determine when this can be removed.
// This is for the clusters that ended up using ordinal range encoding for multi-values fields. Only first value can be
// returned.
NumericDocValues values = getRangeEncodedNumericDocValues(entry, maxOrd);
return DocValues.singleton(values);
}
final NumericValues values = getValues(entry, maxOrd);
if (entry.docsWithFieldOffset == -1) {
// dense
return new SortedNumericDocValues() {
int doc = -1;
long start, end;
int count;
@Override
public int nextDoc() throws IOException {
return advance(doc + 1);
}
@Override
public int docID() {
return doc;
}
@Override
public long cost() {
return maxDoc;
}
@Override
public int advance(int target) throws IOException {
if (target >= maxDoc) {
return doc = NO_MORE_DOCS;
}
start = addresses.get(target);
end = addresses.get(target + 1L);
count = (int) (end - start);
return doc = target;
}
@Override
public boolean advanceExact(int target) throws IOException {
start = addresses.get(target);
end = addresses.get(target + 1L);
count = (int) (end - start);
doc = target;
return true;
}
@Override
public long nextValue() throws IOException {
return values.advance(start++);
}
@Override
public int docValueCount() {
return count;
}
@Override
public int docIDRunEnd() {
return maxDoc;
}
};
} else {
// sparse
final IndexedDISI disi = new IndexedDISI(
data,
entry.docsWithFieldOffset,
entry.docsWithFieldLength,
entry.jumpTableEntryCount,
entry.denseRankPower,
entry.numDocsWithField
);
return new SortedNumericDocValues() {
boolean set;
long start, end;
int count;
@Override
public int nextDoc() throws IOException {
set = false;
return disi.nextDoc();
}
@Override
public int docID() {
return disi.docID();
}
@Override
public long cost() {
return disi.cost();
}
@Override
public int advance(int target) throws IOException {
set = false;
return disi.advance(target);
}
@Override
public boolean advanceExact(int target) throws IOException {
set = false;
return disi.advanceExact(target);
}
@Override
public long nextValue() throws IOException {
set();
return values.advance(start++);
}
@Override
public int docValueCount() {
set();
return count;
}
@Override
public int docIDRunEnd() throws IOException {
return disi.docIDRunEnd();
}
private void set() {
if (set == false) {
final int index = disi.index();
start = addresses.get(index);
end = addresses.get(index + 1L);
count = (int) (end - start);
set = true;
}
}
};
}
}
private record DocValuesSkipperEntry(long offset, long length, long minValue, long maxValue, int docCount, int maxDocId) {}
static
|
SortedOrdinalReader
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/testjar/JobKillCommitter.java
|
{
"start": 2211,
"end": 2322
}
|
class ____ used provides a dummy implementation for mapper method which
* does nothing.
*/
public static
|
is
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KubernetesNodesEndpointBuilderFactory.java
|
{
"start": 16051,
"end": 23112
}
|
interface ____
extends
EndpointConsumerBuilder {
default KubernetesNodesEndpointConsumerBuilder basic() {
return (KubernetesNodesEndpointConsumerBuilder) this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedKubernetesNodesEndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedKubernetesNodesEndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedKubernetesNodesEndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedKubernetesNodesEndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedKubernetesNodesEndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedKubernetesNodesEndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Connection timeout in milliseconds to use when making requests to the
* Kubernetes API server.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: advanced
*
* @param connectionTimeout the value to set
* @return the dsl builder
*/
default AdvancedKubernetesNodesEndpointConsumerBuilder connectionTimeout(Integer connectionTimeout) {
doSetProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Connection timeout in milliseconds to use when making requests to the
* Kubernetes API server.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: advanced
*
* @param connectionTimeout the value to set
* @return the dsl builder
*/
default AdvancedKubernetesNodesEndpointConsumerBuilder connectionTimeout(String connectionTimeout) {
doSetProperty("connectionTimeout", connectionTimeout);
return this;
}
}
/**
* Builder for endpoint producers for the Kubernetes Nodes component.
*/
public
|
AdvancedKubernetesNodesEndpointConsumerBuilder
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/main/java/org/springframework/data/jpa/repository/support/FetchableFluentQueryByPredicate.java
|
{
"start": 2716,
"end": 11199
}
|
class ____<S, R> extends FluentQuerySupport<S, R> implements FetchableFluentQuery<R> {
private final EntityPath<?> entityPath;
private final JpaEntityInformation<S, ?> entityInformation;
private final ScrollQueryFactory<AbstractJPAQuery<?, ?>> scrollQueryFactory;
private final Predicate predicate;
private final Function<Sort, AbstractJPAQuery<?, ?>> finder;
private final BiFunction<Sort, Pageable, AbstractJPAQuery<?, ?>> pagedFinder;
private final Function<Predicate, Long> countOperation;
private final Function<Predicate, Boolean> existsOperation;
private final EntityManager entityManager;
FetchableFluentQueryByPredicate(EntityPath<?> entityPath, Predicate predicate,
JpaEntityInformation<S, ?> entityInformation, Function<Sort, AbstractJPAQuery<?, ?>> finder,
ScrollQueryFactory<AbstractJPAQuery<?, ?>> scrollQueryFactory,
BiFunction<Sort, Pageable, AbstractJPAQuery<?, ?>> pagedFinder, Function<Predicate, Long> countOperation,
Function<Predicate, Boolean> existsOperation, EntityManager entityManager, ProjectionFactory projectionFactory) {
this(entityPath, predicate, entityInformation, (Class<R>) entityInformation.getJavaType(), Sort.unsorted(), 0,
Collections.emptySet(), finder, scrollQueryFactory, pagedFinder, countOperation, existsOperation, entityManager,
projectionFactory);
}
private FetchableFluentQueryByPredicate(EntityPath<?> entityPath, Predicate predicate,
JpaEntityInformation<S, ?> entityInformation, Class<R> resultType, Sort sort, int limit,
Collection<String> properties, Function<Sort, AbstractJPAQuery<?, ?>> finder,
ScrollQueryFactory<AbstractJPAQuery<?, ?>> scrollQueryFactory,
BiFunction<Sort, Pageable, AbstractJPAQuery<?, ?>> pagedFinder, Function<Predicate, Long> countOperation,
Function<Predicate, Boolean> existsOperation, EntityManager entityManager, ProjectionFactory projectionFactory) {
super(resultType, sort, limit, properties, entityInformation.getJavaType(), projectionFactory);
this.entityInformation = entityInformation;
this.entityPath = entityPath;
this.predicate = predicate;
this.finder = finder;
this.scrollQueryFactory = scrollQueryFactory;
this.pagedFinder = pagedFinder;
this.countOperation = countOperation;
this.existsOperation = existsOperation;
this.entityManager = entityManager;
}
@Override
public FetchableFluentQuery<R> sortBy(Sort sort) {
Assert.notNull(sort, "Sort must not be null");
return new FetchableFluentQueryByPredicate<>(entityPath, predicate, entityInformation, resultType,
this.sort.and(sort), limit, properties, finder, scrollQueryFactory, pagedFinder, countOperation,
existsOperation, entityManager, projectionFactory);
}
@Override
public FetchableFluentQuery<R> limit(int limit) {
Assert.isTrue(limit >= 0, "Limit must not be negative");
return new FetchableFluentQueryByPredicate<>(entityPath, predicate, entityInformation, resultType, sort, limit,
properties, finder, scrollQueryFactory, pagedFinder, countOperation, existsOperation, entityManager,
projectionFactory);
}
@Override
public <NR> FetchableFluentQuery<NR> as(Class<NR> resultType) {
Assert.notNull(resultType, "Projection target type must not be null");
return new FetchableFluentQueryByPredicate<>(entityPath, predicate, entityInformation, resultType, sort, limit,
properties, finder, scrollQueryFactory, pagedFinder, countOperation, existsOperation, entityManager,
projectionFactory);
}
@Override
public FetchableFluentQuery<R> project(Collection<String> properties) {
return new FetchableFluentQueryByPredicate<>(entityPath, predicate, entityInformation, resultType, sort, limit,
mergeProperties(properties), finder, scrollQueryFactory, pagedFinder, countOperation, existsOperation,
entityManager, projectionFactory);
}
@Override
public @Nullable R oneValue() {
List<?> results = createSortedAndProjectedQuery(this.sort) //
.limit(2) // Never need more than 2 values
.fetch();
if (results.size() > 1) {
throw new IncorrectResultSizeDataAccessException(1);
}
return results.isEmpty() ? null : getConversionFunction().apply(results.get(0));
}
@Override
public @Nullable R firstValue() {
List<?> results = createSortedAndProjectedQuery(this.sort) //
.limit(1) // Never need more than 1 value
.fetch();
return results.isEmpty() ? null : getConversionFunction().apply(results.get(0));
}
@Override
public List<R> all() {
return all(this.sort);
}
private List<R> all(Sort sort) {
return convert(createSortedAndProjectedQuery(sort).fetch());
}
@Override
public Window<R> scroll(ScrollPosition scrollPosition) {
Assert.notNull(scrollPosition, "ScrollPosition must not be null");
return new PredicateScrollDelegate<>(scrollQueryFactory, entityInformation)
.scroll(returnedType, sort, limit, scrollPosition).map(getConversionFunction());
}
@Override
public Page<R> page(Pageable pageable) {
return pageable.isUnpaged() ? new PageImpl<>(all(pageable.getSortOr(this.sort))) : readPage(pageable);
}
@Override
public Slice<R> slice(Pageable pageable) {
return pageable.isUnpaged() ? new SliceImpl<>(all(pageable.getSortOr(this.sort))) : readSlice(pageable);
}
@Override
public Stream<R> stream() {
return createSortedAndProjectedQuery(this.sort) //
.stream() //
.map(getConversionFunction());
}
@Override
public long count() {
return countOperation.apply(predicate);
}
@Override
public boolean exists() {
return existsOperation.apply(predicate);
}
private AbstractJPAQuery<?, ?> createSortedAndProjectedQuery(Sort sort) {
AbstractJPAQuery<?, ?> query = finder.apply(sort);
applyQuerySettings(this.returnedType, this.limit, query, null);
return query;
}
private void applyQuerySettings(ReturnedType returnedType, int limit, AbstractJPAQuery<?, ?> query,
@Nullable ScrollPosition scrollPosition) {
List<String> inputProperties = returnedType.getInputProperties();
if (returnedType.needsCustomConstruction()) {
Collection<String> requiredSelection;
if (scrollPosition instanceof KeysetScrollPosition && returnedType.isInterfaceProjection()) {
requiredSelection = KeysetScrollDelegate.getProjectionInputProperties(entityInformation, inputProperties, sort);
} else {
requiredSelection = inputProperties;
}
PathBuilder<?> builder = new PathBuilder<>(entityPath.getType(), entityPath.getMetadata());
Expression<?>[] projection = requiredSelection.stream().map(builder::get).toArray(Expression[]::new);
if (returnedType.isInterfaceProjection()) {
query.select(new JakartaTuple(projection));
} else {
query.select(new DtoProjection(returnedType.getReturnedType(), projection));
}
}
if (!properties.isEmpty()) {
query.setHint(EntityGraphFactory.HINT, EntityGraphFactory.create(entityManager, entityType, properties));
}
if (limit != 0) {
query.limit(limit);
}
}
private Page<R> readPage(Pageable pageable) {
Sort sort = pageable.getSortOr(this.sort);
AbstractJPAQuery<?, ?> query = createQuery(pageable, sort);
List<R> paginatedResults = convert(query.fetch());
return PageableExecutionUtils.getPage(paginatedResults, withSort(pageable, sort),
() -> countOperation.apply(predicate));
}
private Slice<R> readSlice(Pageable pageable) {
Sort sort = pageable.getSortOr(this.sort);
AbstractJPAQuery<?, ?> query = createQuery(pageable, sort);
query.limit(pageable.getPageSize() + 1);
List<?> resultList = query.fetch();
boolean hasNext = resultList.size() > pageable.getPageSize();
if (hasNext) {
resultList = resultList.subList(0, pageable.getPageSize());
}
List<R> slice = convert(resultList);
return new SliceImpl<>(slice, pageable, hasNext);
}
private AbstractJPAQuery<?, ?> createQuery(Pageable pageable, Sort sort) {
AbstractJPAQuery<?, ?> query = pagedFinder.apply(sort, pageable);
if (!properties.isEmpty()) {
query.setHint(EntityGraphFactory.HINT, EntityGraphFactory.create(entityManager, entityType, properties));
}
return query;
}
private List<R> convert(List<?> resultList) {
Function<Object, R> conversionFunction = getConversionFunction();
List<R> mapped = new ArrayList<>(resultList.size());
for (Object o : resultList) {
mapped.add(conversionFunction.apply(o));
}
return mapped;
}
private Function<Object, R> getConversionFunction() {
return getConversionFunction(entityType, resultType);
}
|
FetchableFluentQueryByPredicate
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/state/CheckpointBoundKeyedStateHandle.java
|
{
"start": 921,
"end": 1369
}
|
interface ____ extends KeyedStateHandle {
/** Returns the ID of the checkpoint for which the handle was created or used. */
long getCheckpointId();
/**
* Returns a new {@link CheckpointBoundKeyedStateHandle} Rebounding checkpoint id to a specific
* checkpoint id.
*
* @param checkpointId rebounded checkpoint id.
*/
CheckpointBoundKeyedStateHandle rebound(long checkpointId);
}
|
CheckpointBoundKeyedStateHandle
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/context/ApplicationEventPublisherAware.java
|
{
"start": 992,
"end": 1521
}
|
interface ____ extends Aware {
/**
* Set the ApplicationEventPublisher that this object runs in.
* <p>Invoked after population of normal bean properties but before an init
* callback like InitializingBean's afterPropertiesSet or a custom init-method.
* Invoked before ApplicationContextAware's setApplicationContext.
* @param applicationEventPublisher event publisher to be used by this object
*/
void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher);
}
|
ApplicationEventPublisherAware
|
java
|
elastic__elasticsearch
|
x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapper.java
|
{
"start": 9470,
"end": 13282
}
|
class ____ extends AbstractShapeGeometryFieldType<Geometry> implements GeoShapeQueryable {
private final GeoFormatterFactory<Geometry> geoFormatterFactory;
private final FieldValues<Geometry> scriptValues;
private final boolean isSyntheticSource;
public GeoShapeWithDocValuesFieldType(
String name,
boolean indexed,
boolean hasDocValues,
boolean isStored,
Orientation orientation,
GeoShapeParser parser,
FieldValues<Geometry> scriptValues,
GeoFormatterFactory<Geometry> geoFormatterFactory,
boolean isSyntheticSource,
Map<String, String> meta
) {
super(name, IndexType.points(indexed, hasDocValues), isStored, parser, orientation, meta);
this.scriptValues = scriptValues;
this.geoFormatterFactory = geoFormatterFactory;
this.isSyntheticSource = isSyntheticSource;
}
@Override
public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext) {
failIfNoDocValues();
return (cache, breakerService) -> new LatLonShapeIndexFieldData(
name(),
GeoShapeValuesSourceType.instance(),
GeoShapeDocValuesField::new
);
}
@Override
public String typeName() {
return CONTENT_TYPE;
}
@Override
public Query geoShapeQuery(SearchExecutionContext context, String fieldName, ShapeRelation relation, LatLonGeometry... geometries) {
failIfNotIndexedNorDocValuesFallback(context);
// CONTAINS queries are not supported by VECTOR strategy for indices created before version 7.5.0 (Lucene 8.3.0)
if (relation == ShapeRelation.CONTAINS && context.indexVersionCreated().before(IndexVersions.V_7_5_0)) {
throw new QueryShardException(
context,
ShapeRelation.CONTAINS + " query relation not supported for Field [" + fieldName + "]."
);
}
Query query;
if (indexType.hasPoints()) {
query = LatLonShape.newGeometryQuery(fieldName, relation.getLuceneRelation(), geometries);
if (hasDocValues()) {
final Query queryDocValues = new LatLonShapeDocValuesQuery(fieldName, relation.getLuceneRelation(), geometries);
query = new IndexOrDocValuesQuery(query, queryDocValues);
}
} else {
query = new LatLonShapeDocValuesQuery(fieldName, relation.getLuceneRelation(), geometries);
}
return query;
}
@Override
public ValueFetcher valueFetcher(SearchExecutionContext context, String format) {
if (isStored()) {
Function<List<Geometry>, List<Object>> formatter = getFormatter(format != null ? format : GeometryFormatterFactory.GEOJSON);
return new StoredValueFetcher(context.lookup(), name()) {
@Override
public List<Object> parseStoredValues(List<Object> storedValues) {
final List<Geometry> values = new ArrayList<>(storedValues.size());
for (Object storedValue : storedValues) {
if (storedValue instanceof BytesRef bytesRef) {
values.add(
WellKnownBinary.fromWKB(GeometryValidator.NOOP, false, bytesRef.bytes, bytesRef.offset, bytesRef.length)
);
} else {
throw new IllegalArgumentException("Unexpected
|
GeoShapeWithDocValuesFieldType
|
java
|
micronaut-projects__micronaut-core
|
core/src/main/java/io/micronaut/core/io/service/SoftServiceLoader.java
|
{
"start": 3397,
"end": 3777
}
|
class ____
* @param <S> The service generic type
* @return A new service loader
*/
public static <S> SoftServiceLoader<S> load(Class<S> service,
ClassLoader loader) {
return new SoftServiceLoader<>(service, loader);
}
/**
* Creates a new {@link SoftServiceLoader} using the given type and
|
loader
|
java
|
bumptech__glide
|
library/test/src/test/java/com/bumptech/glide/request/transition/ViewPropertyAnimationTest.java
|
{
"start": 847,
"end": 1842
}
|
class ____ {
private ViewPropertyTransition.Animator animator;
private ViewPropertyTransition<Object> animation;
@Before
public void setUp() {
animator = mock(ViewPropertyTransition.Animator.class);
animation = new ViewPropertyTransition<>(animator);
}
@Test
public void testAlwaysReturnsFalse() {
assertFalse(animation.transition(new Object(), mock(ViewAdapter.class)));
}
@Test
public void testCallsAnimatorWithGivenView() {
ImageView view = new ImageView(ApplicationProvider.getApplicationContext());
ViewAdapter adapter = mock(ViewAdapter.class);
when(adapter.getView()).thenReturn(view);
animation.transition(new Object(), adapter);
verify(animator).animate(eq(view));
}
@Test
public void testDoesNotCallAnimatorIfGivenAdapterWithNullView() {
ViewAdapter adapter = mock(ViewAdapter.class);
animation.transition(new Object(), adapter);
verify(animator, never()).animate(any(View.class));
}
}
|
ViewPropertyAnimationTest
|
java
|
netty__netty
|
codec-redis/src/main/java/io/netty/handler/codec/redis/RedisMessage.java
|
{
"start": 743,
"end": 794
}
|
interface ____ codec-redis.
*/
@UnstableApi
public
|
for
|
java
|
grpc__grpc-java
|
stub/src/test/java/io/grpc/stub/ClientCallsTest.java
|
{
"start": 30126,
"end": 31658
}
|
class ____ implements ServerStreamingMethod<Integer, Integer> {
ServerCallStreamObserver<Integer> observer;
@Override public void invoke(Integer request, StreamObserver<Integer> responseObserver) {
observer = (ServerCallStreamObserver<Integer>) responseObserver;
}
}
NoopServerStreamingMethod methodImpl = new NoopServerStreamingMethod();
server = InProcessServerBuilder.forName("noop").directExecutor()
.addService(ServerServiceDefinition.builder("some")
.addMethod(SERVER_STREAMING_METHOD, ServerCalls.asyncServerStreamingCall(methodImpl))
.build())
.build().start();
InterruptInterceptor interceptor = new InterruptInterceptor();
channel = InProcessChannelBuilder.forName("noop")
.directExecutor()
.intercept(interceptor)
.build();
Iterator<Integer> iter = ClientCalls.blockingServerStreamingCall(
channel.newCall(SERVER_STREAMING_METHOD, CallOptions.DEFAULT), req);
try {
iter.next();
fail();
} catch (StatusRuntimeException ex) {
assertTrue(Thread.interrupted());
assertTrue("interrupted", ex.getCause() instanceof InterruptedException);
}
assertTrue("onCloseCalled", interceptor.onCloseCalled);
assertTrue("context not cancelled", methodImpl.observer.isCancelled());
}
@Test
public void blockingServerStreamingCall2_success() throws Exception {
Integer req = 2;
final Integer resp1 = 3;
final Integer resp2 = 4;
|
NoopServerStreamingMethod
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/JGroupsEndpointBuilderFactory.java
|
{
"start": 3825,
"end": 9644
}
|
interface ____
extends
EndpointConsumerBuilder {
default JGroupsEndpointConsumerBuilder basic() {
return (JGroupsEndpointConsumerBuilder) this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedJGroupsEndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedJGroupsEndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedJGroupsEndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedJGroupsEndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedJGroupsEndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedJGroupsEndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
}
/**
* Builder for endpoint producers for the JGroups component.
*/
public
|
AdvancedJGroupsEndpointConsumerBuilder
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinDoubleEvaluator.java
|
{
"start": 858,
"end": 4851
}
|
class ____ extends AbstractMultivalueFunction.AbstractEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(MvMinDoubleEvaluator.class);
public MvMinDoubleEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) {
super(driverContext, field);
}
@Override
public String name() {
return "MvMin";
}
/**
* Evaluate blocks containing at least one multivalued field.
*/
@Override
public Block evalNullable(Block fieldVal) {
if (fieldVal.mvSortedAscending()) {
return evalAscendingNullable(fieldVal);
}
DoubleBlock v = (DoubleBlock) fieldVal;
int positionCount = v.getPositionCount();
try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) {
for (int p = 0; p < positionCount; p++) {
int valueCount = v.getValueCount(p);
if (valueCount == 0) {
builder.appendNull();
continue;
}
int first = v.getFirstValueIndex(p);
int end = first + valueCount;
double value = v.getDouble(first);
for (int i = first + 1; i < end; i++) {
double next = v.getDouble(i);
value = MvMin.process(value, next);
}
double result = value;
builder.appendDouble(result);
}
return builder.build();
}
}
/**
* Evaluate blocks containing at least one multivalued field.
*/
@Override
public Block evalNotNullable(Block fieldVal) {
if (fieldVal.mvSortedAscending()) {
return evalAscendingNotNullable(fieldVal);
}
DoubleBlock v = (DoubleBlock) fieldVal;
int positionCount = v.getPositionCount();
try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) {
for (int p = 0; p < positionCount; p++) {
int valueCount = v.getValueCount(p);
int first = v.getFirstValueIndex(p);
int end = first + valueCount;
double value = v.getDouble(first);
for (int i = first + 1; i < end; i++) {
double next = v.getDouble(i);
value = MvMin.process(value, next);
}
double result = value;
builder.appendDouble(result);
}
return builder.build().asBlock();
}
}
/**
* Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order.
*/
private Block evalAscendingNullable(Block fieldVal) {
DoubleBlock v = (DoubleBlock) fieldVal;
int positionCount = v.getPositionCount();
try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) {
for (int p = 0; p < positionCount; p++) {
int valueCount = v.getValueCount(p);
if (valueCount == 0) {
builder.appendNull();
continue;
}
int first = v.getFirstValueIndex(p);
int idx = MvMin.ascendingIndex(valueCount);
double result = v.getDouble(first + idx);
builder.appendDouble(result);
}
return builder.build();
}
}
/**
* Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order.
*/
private Block evalAscendingNotNullable(Block fieldVal) {
DoubleBlock v = (DoubleBlock) fieldVal;
int positionCount = v.getPositionCount();
try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) {
for (int p = 0; p < positionCount; p++) {
int valueCount = v.getValueCount(p);
int first = v.getFirstValueIndex(p);
int idx = MvMin.ascendingIndex(valueCount);
double result = v.getDouble(first + idx);
builder.appendDouble(result);
}
return builder.build().asBlock();
}
}
@Override
public long baseRamBytesUsed() {
return BASE_RAM_BYTES_USED + field.baseRamBytesUsed();
}
public static
|
MvMinDoubleEvaluator
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/VoidUsedTest.java
|
{
"start": 1729,
"end": 1874
}
|
class ____ {
void test(Void v) {
v = null;
}
}
""")
.doTest();
}
}
|
Test
|
java
|
dropwizard__dropwizard
|
dropwizard-core/src/main/java/io/dropwizard/core/cli/Cli.java
|
{
"start": 978,
"end": 1484
}
|
class ____ {
private static final String COMMAND_NAME_ATTR = "command";
// assume -h if no arguments are given
private static final String[][] HELP = {{}, {"-h"}, {"--help"}};
private static final String[][] VERSION = {{"-v"}, {"--version"}};
private final PrintWriter stdOut;
private final PrintWriter stdErr;
private final SortedMap<String, Command> commands;
private final Bootstrap<?> bootstrap;
private final ArgumentParser parser;
/**
* Create a new CLI
|
Cli
|
java
|
alibaba__nacos
|
config/src/main/java/com/alibaba/nacos/config/server/monitor/collector/ConfigSubscriberMetricsCollector.java
|
{
"start": 1234,
"end": 1824
}
|
class ____ {
private static final long DELAY_SECONDS = 5;
@Autowired
public ConfigSubscriberMetricsCollector(LongPollingService longPollingService, ConfigChangeListenContext configChangeListenContext) {
ConfigExecutor.scheduleConfigTask(() -> {
MetricsMonitor.getConfigSubscriberMonitor("v1").set(longPollingService.getSubscriberCount());
MetricsMonitor.getConfigSubscriberMonitor("v2").set(configChangeListenContext.getConnectionCount());
}, DELAY_SECONDS, DELAY_SECONDS, TimeUnit.SECONDS);
}
}
|
ConfigSubscriberMetricsCollector
|
java
|
netty__netty
|
pkitesting/src/main/java/io/netty/pkitesting/CertificateBuilder.java
|
{
"start": 46921,
"end": 49737
}
|
enum ____ {
/**
* For verifying digital signatures, for entity authentication,
* for entity authentication, or for integrity verification.
*/
digitalSignature(0),
/**
* This key usage is deprecated by X.509, and commitment may instead be derived from the actual use of the keys.
* <p>
* For verifying digital signatures that imply the signer has "committed" to the
* content being signed. This does not imply any specific policy or review on part of the signer, however.
*/
contentCommitment(1),
/**
* For enciphering keys or other security information.
*/
keyEncipherment(2),
/**
* For enciphering user data, but not keys or security information.
*/
dataEncipherment(3),
/**
* For use in public key agreement.
*/
keyAgreement(4),
/**
* For verifying the Certificate Authority's signature on a public-key certificate.
* <p>
* This implies {@link #digitalSignature} and {@link #contentCommitment}, so they do not need to be specified
* separately.
*/
keyCertSign(5),
/**
* For verifying the Certificate Authority's signature on a Certificate Revocation List.
* <p>
* This implies {@link #digitalSignature} and {@link #contentCommitment}, so they do not need to be specified
* separately.
*/
cRLSign(6),
/**
* For use with {@link #keyAgreement} to limit the key to enciphering only.
* <p>
* The meaning of this without the {@link #keyAgreement} bit set is unspecified.
*/
encipherOnly(7),
/**
* For use with {@link #keyAgreement} to limit the key to deciphering only.
* <p>
* The meaning of this without the {@link #keyAgreement} bit set is unspecified.
*/
decipherOnly(8);
private final int bitId;
KeyUsage(int bitId) {
this.bitId = bitId;
}
}
/**
* The extended key usage field specify what the certificate and key is allowed to be used for.
* <p>
* A certificate can have many key usages. For instance, some certificates support both client and server usage
* for TLS connections.
* <p>
* The key usage must be checked by the opposing peer receiving the certificate, and reject certificates that do
* not permit the given usage.
* <p>
* For instance, if a TLS client connects to a server that presents a certificate without the
* {@linkplain #PKIX_KP_SERVER_AUTH server-authentication} usage, then the client must reject the server
* certificate as invalid.
*/
public
|
KeyUsage
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/jsontype/ExistingPropertyTest.java
|
{
"start": 3720,
"end": 4049
}
|
class ____ extends Car
{
public int speakerCount;
private Accord() { super(null); }
protected Accord(String name, int b) {
super(name);
speakerCount = b;
}
public String getType() {
return "accord";
}
}
@JsonTypeName("camry")
static
|
Accord
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/JUnit4EmptyMethodsTest.java
|
{
"start": 5574,
"end": 5826
}
|
class ____ extends FooBase {
@Before
public void setUp() {
// don't delete this because it's an override!
}
}
""")
.expectUnchanged()
.doTest();
}
}
|
FooTest
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
|
{
"start": 4397,
"end": 23386
}
|
class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(ClientServiceDelegate.class);
private static final String UNAVAILABLE = "N/A";
// Caches for per-user NotRunningJobs
private HashMap<JobState, HashMap<String, NotRunningJob>> notRunningJobs;
private final Configuration conf;
private final JobID jobId;
private final ApplicationId appId;
private final ResourceMgrDelegate rm;
private final MRClientProtocol historyServerProxy;
private MRClientProtocol realProxy = null;
private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
private static String UNKNOWN_USER = "Unknown User";
private String trackingUrl;
private AtomicBoolean usingAMProxy = new AtomicBoolean(false);
private int maxClientRetry;
private boolean amAclDisabledStatusLogged = false;
public ClientServiceDelegate(Configuration conf, ResourceMgrDelegate rm,
JobID jobId, MRClientProtocol historyServerProxy) {
this.conf = new Configuration(conf); // Cloning for modifying.
// For faster redirects from AM to HS.
this.conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
this.conf.getInt(MRJobConfig.MR_CLIENT_TO_AM_IPC_MAX_RETRIES,
MRJobConfig.DEFAULT_MR_CLIENT_TO_AM_IPC_MAX_RETRIES));
this.conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
this.conf.getInt(MRJobConfig.MR_CLIENT_TO_AM_IPC_MAX_RETRIES_ON_TIMEOUTS,
MRJobConfig.DEFAULT_MR_CLIENT_TO_AM_IPC_MAX_RETRIES_ON_TIMEOUTS));
this.rm = rm;
this.jobId = jobId;
this.historyServerProxy = historyServerProxy;
this.appId = TypeConverter.toYarn(jobId).getAppId();
notRunningJobs = new HashMap<JobState, HashMap<String, NotRunningJob>>();
}
// Get the instance of the NotRunningJob corresponding to the specified
// user and state
private NotRunningJob getNotRunningJob(ApplicationReport applicationReport,
JobState state) {
synchronized (notRunningJobs) {
HashMap<String, NotRunningJob> map = notRunningJobs.get(state);
if (map == null) {
map = new HashMap<String, NotRunningJob>();
notRunningJobs.put(state, map);
}
String user =
(applicationReport == null) ?
UNKNOWN_USER : applicationReport.getUser();
NotRunningJob notRunningJob = map.get(user);
if (notRunningJob == null) {
notRunningJob = new NotRunningJob(applicationReport, state);
map.put(user, notRunningJob);
}
return notRunningJob;
}
}
private MRClientProtocol getProxy() throws IOException {
if (realProxy != null) {
return realProxy;
}
// Possibly allow nulls through the PB tunnel, otherwise deal with an exception
// and redirect to the history server.
ApplicationReport application = null;
try {
application = rm.getApplicationReport(appId);
} catch (ApplicationNotFoundException e) {
application = null;
} catch (YarnException e2) {
throw new IOException(e2);
}
if (application != null) {
trackingUrl = application.getTrackingUrl();
}
InetSocketAddress serviceAddr = null;
while (application == null
|| YarnApplicationState.RUNNING == application
.getYarnApplicationState()) {
if (application == null) {
LOG.info("Could not get Job info from RM for job " + jobId
+ ". Redirecting to job history server.");
return checkAndGetHSProxy(null, JobState.NEW);
}
try {
if (application.getHost() == null || "".equals(application.getHost())) {
LOG.debug("AM not assigned to Job. Waiting to get the AM ...");
Thread.sleep(2000);
LOG.debug("Application state is " + application.getYarnApplicationState());
application = rm.getApplicationReport(appId);
continue;
} else if (UNAVAILABLE.equals(application.getHost())) {
if (!amAclDisabledStatusLogged) {
LOG.info("Job " + jobId + " is running, but the host is unknown."
+ " Verify user has VIEW_JOB access.");
amAclDisabledStatusLogged = true;
}
return getNotRunningJob(application, JobState.RUNNING);
}
if(!conf.getBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED, false)) {
UserGroupInformation newUgi = UserGroupInformation.createRemoteUser(
UserGroupInformation.getCurrentUser().getUserName());
serviceAddr = NetUtils.createSocketAddrForHost(
application.getHost(), application.getRpcPort());
if (UserGroupInformation.isSecurityEnabled()) {
org.apache.hadoop.yarn.api.records.Token clientToAMToken =
application.getClientToAMToken();
Token<ClientToAMTokenIdentifier> token =
ConverterUtils.convertFromYarn(clientToAMToken, serviceAddr);
newUgi.addToken(token);
}
LOG.debug("Connecting to " + serviceAddr);
final InetSocketAddress finalServiceAddr = serviceAddr;
realProxy = newUgi.doAs(new PrivilegedExceptionAction<MRClientProtocol>() {
@Override
public MRClientProtocol run() throws IOException {
return instantiateAMProxy(finalServiceAddr);
}
});
} else {
if (!amAclDisabledStatusLogged) {
LOG.info("Network ACL closed to AM for job " + jobId
+ ". Not going to try to reach the AM.");
amAclDisabledStatusLogged = true;
}
return getNotRunningJob(null, JobState.RUNNING);
}
return realProxy;
} catch (IOException e) {
//possibly the AM has crashed
//there may be some time before AM is restarted
//keep retrying by getting the address from RM
LOG.info("Could not connect to " + serviceAddr +
". Waiting for getting the latest AM address...");
try {
Thread.sleep(2000);
} catch (InterruptedException e1) {
LOG.warn("getProxy() call interrupted", e1);
throw new YarnRuntimeException(e1);
}
try {
application = rm.getApplicationReport(appId);
} catch (YarnException e1) {
throw new IOException(e1);
}
if (application == null) {
LOG.info("Could not get Job info from RM for job " + jobId
+ ". Redirecting to job history server.");
return checkAndGetHSProxy(null, JobState.RUNNING);
}
} catch (InterruptedException e) {
LOG.warn("getProxy() call interrupted", e);
throw new YarnRuntimeException(e);
} catch (YarnException e) {
throw new IOException(e);
}
}
/** we just want to return if its allocating, so that we don't
* block on it. This is to be able to return job status
* on an allocating Application.
*/
String user = application.getUser();
if (user == null) {
throw new IOException("User is not set in the application report");
}
if (application.getYarnApplicationState() == YarnApplicationState.NEW
|| application.getYarnApplicationState() ==
YarnApplicationState.NEW_SAVING
|| application.getYarnApplicationState() == YarnApplicationState.SUBMITTED
|| application.getYarnApplicationState() == YarnApplicationState.ACCEPTED) {
realProxy = null;
return getNotRunningJob(application, JobState.NEW);
}
if (application.getYarnApplicationState() == YarnApplicationState.FAILED) {
realProxy = null;
return getNotRunningJob(application, JobState.FAILED);
}
if (application.getYarnApplicationState() == YarnApplicationState.KILLED) {
realProxy = null;
return getNotRunningJob(application, JobState.KILLED);
}
//History server can serve a job only if application
//succeeded.
if (application.getYarnApplicationState() == YarnApplicationState.FINISHED) {
LOG.info("Application state is completed. FinalApplicationStatus="
+ application.getFinalApplicationStatus().toString()
+ ". Redirecting to job history server");
realProxy = checkAndGetHSProxy(application, JobState.SUCCEEDED);
}
return realProxy;
}
private MRClientProtocol checkAndGetHSProxy(
ApplicationReport applicationReport, JobState state) {
if (null == historyServerProxy) {
LOG.warn("Job History Server is not configured.");
return getNotRunningJob(applicationReport, state);
}
return historyServerProxy;
}
MRClientProtocol instantiateAMProxy(final InetSocketAddress serviceAddr)
throws IOException {
LOG.trace("Connecting to ApplicationMaster at: " + serviceAddr);
YarnRPC rpc = YarnRPC.create(conf);
MRClientProtocol proxy =
(MRClientProtocol) rpc.getProxy(MRClientProtocol.class,
serviceAddr, conf);
usingAMProxy.set(true);
LOG.trace("Connected to ApplicationMaster at: " + serviceAddr);
return proxy;
}
private synchronized Object invoke(String method, Class argClass,
Object args) throws IOException {
Method methodOb = null;
try {
methodOb = MRClientProtocol.class.getMethod(method, argClass);
} catch (SecurityException e) {
throw new YarnRuntimeException(e);
} catch (NoSuchMethodException e) {
throw new YarnRuntimeException("Method name mismatch", e);
}
maxClientRetry = this.conf.getInt(
MRJobConfig.MR_CLIENT_MAX_RETRIES,
MRJobConfig.DEFAULT_MR_CLIENT_MAX_RETRIES);
IOException lastException = null;
while (maxClientRetry > 0) {
MRClientProtocol MRClientProxy = null;
try {
MRClientProxy = getProxy();
return methodOb.invoke(MRClientProxy, args);
} catch (InvocationTargetException e) {
// Will not throw out YarnException anymore
LOG.debug("Failed to contact AM/History for job " + jobId +
" retrying..", e.getTargetException());
// Force reconnection by setting the proxy to null.
realProxy = null;
// HS/AMS shut down
if (e.getCause() instanceof AuthorizationException) {
throw new IOException(e.getTargetException());
}
// if its AM shut down, do not decrement maxClientRetry while we wait
// for its AM to be restarted.
if (!usingAMProxy.get()) {
maxClientRetry--;
}
usingAMProxy.set(false);
lastException = new IOException(e.getTargetException());
try {
Thread.sleep(100);
} catch (InterruptedException ie) {
LOG.warn("ClientServiceDelegate invoke call interrupted", ie);
throw new YarnRuntimeException(ie);
}
} catch (Exception e) {
LOG.debug("Failed to contact AM/History for job " + jobId
+ " Will retry..", e);
// Force reconnection by setting the proxy to null.
realProxy = null;
// RM shutdown
maxClientRetry--;
lastException = new IOException(e.getMessage());
try {
Thread.sleep(100);
} catch (InterruptedException ie) {
LOG.warn("ClientServiceDelegate invoke call interrupted", ie);
throw new YarnRuntimeException(ie);
}
}
}
throw lastException;
}
// Only for testing
@VisibleForTesting
public int getMaxClientRetry() {
return this.maxClientRetry;
}
public org.apache.hadoop.mapreduce.Counters getJobCounters(JobID arg0) throws IOException,
InterruptedException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobID = TypeConverter.toYarn(arg0);
GetCountersRequest request = recordFactory.newRecordInstance(GetCountersRequest.class);
request.setJobId(jobID);
Counters cnt = ((GetCountersResponse)
invoke("getCounters", GetCountersRequest.class, request)).getCounters();
return TypeConverter.fromYarn(cnt);
}
public TaskCompletionEvent[] getTaskCompletionEvents(JobID arg0, int arg1, int arg2)
throws IOException, InterruptedException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobID = TypeConverter
.toYarn(arg0);
GetTaskAttemptCompletionEventsRequest request = recordFactory
.newRecordInstance(GetTaskAttemptCompletionEventsRequest.class);
request.setJobId(jobID);
request.setFromEventId(arg1);
request.setMaxEvents(arg2);
List<org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent> list =
((GetTaskAttemptCompletionEventsResponse) invoke(
"getTaskAttemptCompletionEvents", GetTaskAttemptCompletionEventsRequest.class, request)).
getCompletionEventList();
return TypeConverter
.fromYarn(list
.toArray(new org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent[0]));
}
public String[] getTaskDiagnostics(org.apache.hadoop.mapreduce.TaskAttemptID arg0)
throws IOException, InterruptedException {
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter
.toYarn(arg0);
GetDiagnosticsRequest request = recordFactory
.newRecordInstance(GetDiagnosticsRequest.class);
request.setTaskAttemptId(attemptID);
List<String> list = ((GetDiagnosticsResponse) invoke("getDiagnostics",
GetDiagnosticsRequest.class, request)).getDiagnosticsList();
String[] result = new String[list.size()];
int i = 0;
for (String c : list) {
result[i++] = c.toString();
}
return result;
}
public JobStatus getJobStatus(JobID oldJobID) throws IOException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
TypeConverter.toYarn(oldJobID);
GetJobReportRequest request =
recordFactory.newRecordInstance(GetJobReportRequest.class);
request.setJobId(jobId);
JobReport report = ((GetJobReportResponse) invoke("getJobReport",
GetJobReportRequest.class, request)).getJobReport();
JobStatus jobStatus = null;
if (report != null) {
if (StringUtils.isEmpty(report.getJobFile())) {
String jobFile = MRApps.getJobFile(conf, report.getUser(), oldJobID);
report.setJobFile(jobFile);
}
String historyTrackingUrl = report.getTrackingUrl();
String url = StringUtils.isNotEmpty(historyTrackingUrl)
? historyTrackingUrl : trackingUrl;
jobStatus = TypeConverter.fromYarn(report, url);
}
return jobStatus;
}
public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType taskType)
throws IOException{
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
TypeConverter.toYarn(oldJobID);
GetTaskReportsRequest request =
recordFactory.newRecordInstance(GetTaskReportsRequest.class);
request.setJobId(jobId);
request.setTaskType(TypeConverter.toYarn(taskType));
List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports =
((GetTaskReportsResponse) invoke("getTaskReports", GetTaskReportsRequest.class,
request)).getTaskReportList();
return TypeConverter.fromYarn
(taskReports).toArray(new org.apache.hadoop.mapreduce.TaskReport[0]);
}
public boolean killTask(TaskAttemptID taskAttemptID, boolean fail)
throws IOException {
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID
= TypeConverter.toYarn(taskAttemptID);
if (fail) {
FailTaskAttemptRequest failRequest = recordFactory.newRecordInstance(FailTaskAttemptRequest.class);
failRequest.setTaskAttemptId(attemptID);
invoke("failTaskAttempt", FailTaskAttemptRequest.class, failRequest);
} else {
KillTaskAttemptRequest killRequest = recordFactory.newRecordInstance(KillTaskAttemptRequest.class);
killRequest.setTaskAttemptId(attemptID);
invoke("killTaskAttempt", KillTaskAttemptRequest.class, killRequest);
}
return true;
}
public boolean killJob(JobID oldJobID)
throws IOException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId
= TypeConverter.toYarn(oldJobID);
KillJobRequest killRequest = recordFactory.newRecordInstance(KillJobRequest.class);
killRequest.setJobId(jobId);
invoke("killJob", KillJobRequest.class, killRequest);
return true;
}
public LogParams getLogFilePath(JobID oldJobID, TaskAttemptID oldTaskAttemptID)
throws IOException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
TypeConverter.toYarn(oldJobID);
GetJobReportRequest request =
recordFactory.newRecordInstance(GetJobReportRequest.class);
request.setJobId(jobId);
JobReport report =
((GetJobReportResponse) invoke("getJobReport",
GetJobReportRequest.class, request)).getJobReport();
if (EnumSet.of(JobState.SUCCEEDED, JobState.FAILED, JobState.KILLED,
JobState.ERROR).contains(report.getJobState())) {
if (oldTaskAttemptID != null) {
GetTaskAttemptReportRequest taRequest =
recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
taRequest.setTaskAttemptId(TypeConverter.toYarn(oldTaskAttemptID));
TaskAttemptReport taReport =
((GetTaskAttemptReportResponse) invoke("getTaskAttemptReport",
GetTaskAttemptReportRequest.class, taRequest))
.getTaskAttemptReport();
if (taReport.getContainerId() == null
|| taReport.getNodeManagerHost() == null) {
throw new IOException("Unable to get log information for task: "
+ oldTaskAttemptID);
}
return new LogParams(
taReport.getContainerId().toString(),
taReport.getContainerId().getApplicationAttemptId()
.getApplicationId().toString(),
NodeId.newInstance(taReport.getNodeManagerHost(),
taReport.getNodeManagerPort()).toString(), report.getUser());
} else {
if (report.getAMInfos() == null || report.getAMInfos().size() == 0) {
throw new IOException("Unable to get log information for job: "
+ oldJobID);
}
AMInfo amInfo = report.getAMInfos().get(report.getAMInfos().size() - 1);
return new LogParams(
amInfo.getContainerId().toString(),
amInfo.getAppAttemptId().getApplicationId().toString(),
NodeId.newInstance(amInfo.getNodeManagerHost(),
amInfo.getNodeManagerPort()).toString(), report.getUser());
}
} else {
throw new IOException("Cannot get log path for a in-progress job");
}
}
public void close() throws IOException {
if (rm != null) {
rm.close();
}
if (historyServerProxy != null) {
RPC.stopProxy(historyServerProxy);
}
if (realProxy != null) {
RPC.stopProxy(realProxy);
realProxy = null;
}
}
}
|
ClientServiceDelegate
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/core/annotation/AnnotatedElementUtilsTests.java
|
{
"start": 59870,
"end": 60044
}
|
class ____ {
}
@HalfConventionBasedAndHalfAliasedComposedContextConfig(locations = "explicitDeclaration")
static
|
HalfConventionBasedAndHalfAliasedComposedContextConfigClassV1
|
java
|
mockito__mockito
|
mockito-core/src/test/java/org/mockito/MockTest.java
|
{
"start": 482,
"end": 1129
}
|
class ____ {
public org.mockito.quality.Strictness strictness;
public StrictnessToMockStrictnessTest(org.mockito.quality.Strictness strictness) {
this.strictness = strictness;
}
@Test
public void should_have_matching_enum_in_mock_strictness_enum() {
Mock.Strictness.valueOf(strictness.name());
}
@Parameterized.Parameters(name = "{0}")
public static org.mockito.quality.Strictness[] data() {
return org.mockito.quality.Strictness.values();
}
}
@RunWith(value = Parameterized.class)
public static
|
StrictnessToMockStrictnessTest
|
java
|
apache__spark
|
sql/catalyst/src/test/java/org/apache/spark/sql/catalyst/JavaTypeInferenceBeans.java
|
{
"start": 1221,
"end": 1406
}
|
class ____<T> extends JavaBeanWithGenericsAB<Long> {
public T getPropertyC() {
return null;
}
public void setPropertyC(T a) {
}
}
static
|
JavaBeanWithGenericsABC
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/annotation/Spr8954Tests.java
|
{
"start": 3652,
"end": 4040
}
|
class ____ implements SmartInstantiationAwareBeanPostProcessor {
@Override
public Class<?> predictBeanType(Class<?> beanClass, String beanName) {
return (FactoryBean.class.isAssignableFrom(beanClass) ? PredictedType.class : null);
}
@Override
public PropertyValues postProcessProperties(PropertyValues pvs, Object bean, String beanName) {
return pvs;
}
}
}
|
PredictingBPP
|
java
|
quarkusio__quarkus
|
extensions/arc/deployment/src/test/java/io/quarkus/arc/test/properties/CombinedBuildProfileAndBuildPropertiesTest.java
|
{
"start": 730,
"end": 1863
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(Producer.class, AnotherProducer.class,
GreetingBean.class, Hello.class, PingBean.class, PongBean.class, FooBean.class, BarBean.class))
.overrideConfigKey("some.prop1", "v1")
.overrideConfigKey("some.prop2", "v2");
@Inject
Hello hello;
@Inject
Instance<BarBean> barBean;
@Test
public void testInjection() {
assertEquals("hello from matching prop. Foo is: foo from missing prop", hello.hello());
assertEquals("ping", hello.ping());
assertEquals("pong", hello.pong());
assertEquals("foo from missing prop", hello.foo());
assertTrue(barBean.isUnsatisfied());
}
@Test
public void testSelect() {
assertEquals("hello from matching prop. Foo is: foo from missing prop",
CDI.current().select(GreetingBean.class).get().greet());
}
@ApplicationScoped
static
|
CombinedBuildProfileAndBuildPropertiesTest
|
java
|
apache__maven
|
its/core-it-support/core-it-plugins/maven-it-plugin-artifact/src/main/java/org/apache/maven/plugin/coreit/ResolveTransitiveMojo.java
|
{
"start": 1881,
"end": 4311
}
|
class ____ extends AbstractMojo {
/**
* The local repository.
*/
@Parameter(defaultValue = "${localRepository}", readonly = true, required = true)
private ArtifactRepository localRepository;
/**
* The remote repositories of the current Maven project.
*/
@Parameter(defaultValue = "${project.remoteArtifactRepositories}", readonly = true, required = true)
private List remoteRepositories;
/**
* The artifact resolver.
*
*/
@Component
private ArtifactResolver resolver;
/**
* The artifact factory.
*
*/
@Component
private ArtifactFactory factory;
/**
* The metadata source.
*
*/
@Component
private ArtifactMetadataSource metadataSource;
/**
* The dependencies to resolve.
*
*/
@Parameter
private Dependency[] dependencies;
/**
* The path to a properties file to store the resolved artifact paths in.
*
*/
@Parameter
private File propertiesFile;
/**
* Runs this mojo.
*
* @throws MojoExecutionException If the artifacts couldn't be resolved.
*/
public void execute() throws MojoExecutionException {
getLog().info("[MAVEN-CORE-IT-LOG] Resolving artifacts");
ResolverThread thread = new ResolverThread();
thread.start();
while (thread.isAlive()) {
try {
thread.join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
if (thread.error != null) {
throw new MojoExecutionException("Failed to resolve artifacts: " + thread.error.getMessage(), thread.error);
}
if (propertiesFile != null) {
getLog().info("[MAVEN-CORE-IT-LOG] Creating properties file " + propertiesFile);
try {
propertiesFile.getParentFile().mkdirs();
try (FileOutputStream fos = new FileOutputStream(propertiesFile)) {
thread.props.store(fos, "MAVEN-CORE-IT");
}
} catch (IOException e) {
throw new MojoExecutionException("Failed to create properties file: " + e.getMessage(), e);
}
}
}
private String getId(Artifact artifact) {
artifact.isSnapshot(); // decouple from MNG-2961
return artifact.getId();
}
|
ResolveTransitiveMojo
|
java
|
micronaut-projects__micronaut-core
|
inject/src/main/java/io/micronaut/context/ExecutableMethodProcessorListener.java
|
{
"start": 1430,
"end": 3736
}
|
class ____ implements BeanCreatedEventListener<ExecutableMethodProcessor<?>> {
@Override
public ExecutableMethodProcessor<?> onCreated(BeanCreatedEvent<ExecutableMethodProcessor<?>> event) {
AnnotationValue<Deprecated> deprecatedAnnotation = event.getBeanDefinition().getAnnotation(Deprecated.class);
if (deprecatedAnnotation == null) {
return event.getBean();
}
deprecatedAnnotation.stringValue().ifPresent(message -> DefaultBeanContext.LOG.warn("{}: {}", event.getBeanDefinition().getBeanType().getName(), message));
ExecutableMethodProcessor<?> processor = event.getBean();
BeanDefinition<ExecutableMethodProcessor<?>> processorDefinition = event.getBeanDefinition();
BeanContext beanContext = event.getSource();
if (processor instanceof LifeCycle<?> cycle) {
try {
cycle.start();
} catch (Exception e) {
throw new BeanContextException("Error starting bean processing: " + e.getMessage(), e);
}
}
final List<Argument<?>> typeArguments = processorDefinition.getTypeArguments(ExecutableMethodProcessor.class);
if (typeArguments.size() == 1) {
final Argument<?> annotation = typeArguments.get(0);
Collection<BeanDefinition<Object>> beanDefinitions = beanContext.getBeanDefinitions(Qualifiers.byStereotype((Class) annotation.getType()));
for (BeanDefinition<Object> beanDefinition : beanDefinitions) {
for (ExecutableMethod<Object, ?> executableMethod : beanDefinition.getExecutableMethods()) {
try {
processor.process(beanDefinition, executableMethod);
} catch (Exception e) {
throw new BeanContextException("Error processing bean definition [" + beanDefinition + "]: " + e.getMessage(), e);
}
}
}
}
if (processor instanceof LifeCycle<?> cycle) {
try {
cycle.stop();
} catch (Exception e) {
throw new BeanContextException("Error finalizing bean processing: " + e.getMessage(), e);
}
}
return processor;
}
}
|
ExecutableMethodProcessorListener
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/functions/aggfunctions/Sum0AggFunction.java
|
{
"start": 4090,
"end": 4498
}
|
class ____ extends Sum0AggFunction {
@Override
public DataType getResultType() {
return DataTypes.INT();
}
@Override
public Expression[] initialValuesExpressions() {
return new Expression[] {/* sum0= */ literal(0, getResultType().notNull())};
}
}
/** Built-in Byte Sum0 aggregate function. */
public static
|
IntSum0AggFunction
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxFloatGroupingAggregatorFunctionTests.java
|
{
"start": 806,
"end": 1996
}
|
class ____ extends GroupingAggregatorFunctionTestCase {
@Override
protected SourceOperator simpleInput(BlockFactory blockFactory, int end) {
return new LongFloatTupleBlockSourceOperator(
blockFactory,
LongStream.range(0, end)
.mapToObj(l -> Tuple.tuple(randomLongBetween(0, 4), randomFloatBetween(-Float.MAX_VALUE, Float.MAX_VALUE, true)))
);
}
@Override
protected AggregatorFunctionSupplier aggregatorFunction() {
return new MaxFloatAggregatorFunctionSupplier();
}
@Override
protected String expectedDescriptionOfAggregator() {
return "max of floats";
}
@Override
protected void assertSimpleGroup(List<Page> input, Block result, int position, Long group) {
Optional<Float> max = input.stream().flatMap(p -> allFloats(p, group)).max(floatComparator());
if (max.isEmpty()) {
assertThat(result.isNull(position), equalTo(true));
return;
}
assertThat(result.isNull(position), equalTo(false));
assertThat(((FloatBlock) result).getFloat(position), equalTo(max.get()));
}
}
|
MaxFloatGroupingAggregatorFunctionTests
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/ser/enums/EnumSerializationTest.java
|
{
"start": 3104,
"end": 3455
}
|
class ____ extends StdSerializer<Enum>
{
public LowerCasingEnumSerializer() { super(Enum.class); }
@Override
public void serialize(Enum value, JsonGenerator g,
SerializationContext provider) {
g.writeString(value.name().toLowerCase());
}
}
protected static
|
LowerCasingEnumSerializer
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_1648/Issue1648Mapper.java
|
{
"start": 422,
"end": 621
}
|
interface ____ {
Issue1648Mapper INSTANCE = Mappers.getMapper( Issue1648Mapper.class );
@Mapping(target = "targetValue", source = "sourceValue")
Target map(Source source);
}
|
Issue1648Mapper
|
java
|
spring-projects__spring-framework
|
spring-core-test/src/main/java/org/springframework/aot/test/agent/EnabledIfRuntimeHintsAgent.java
|
{
"start": 1090,
"end": 1392
}
|
class ____ test method
* is only enabled if the {@link RuntimeHintsAgent} is loaded on the current JVM.
* <p>This is meta-annotated with {@code @Tag("RuntimeHintsTests")} so that test suites
* can choose to target or ignore those tests.
*
* <pre class="code">
* @EnabledIfRuntimeHintsAgent
*
|
or
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/main/java/org/springframework/boot/json/JacksonJsonParser.java
|
{
"start": 2088,
"end": 2183
}
|
class ____ extends TypeReference<Map<String, Object>> {
}
private static final
|
MapTypeReference
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/stream/WatermarkAssignerRestoreTest.java
|
{
"start": 1132,
"end": 1642
}
|
class ____ extends RestoreTestBase {
public WatermarkAssignerRestoreTest() {
super(StreamExecWatermarkAssigner.class);
}
@Override
public List<TableTestProgram> programs() {
return Arrays.asList(
WatermarkAssignerTestPrograms.WATERMARK_ASSIGNER_BASIC_FILTER,
WatermarkAssignerTestPrograms.WATERMARK_ASSIGNER_PUSHDOWN_METADATA,
WatermarkAssignerTestPrograms.WATERMARK_ASSIGNER_PUSHDOWN_COMPUTED);
}
}
|
WatermarkAssignerRestoreTest
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/streams/impl/PipeImpl.java
|
{
"start": 739,
"end": 3682
}
|
class ____<T> implements Pipe<T> {
private final Promise<Void> result;
private final ReadStream<T> src;
private boolean endOnSuccess = true;
private boolean endOnFailure = true;
private WriteStream<T> dst;
public PipeImpl(ReadStream<T> src) {
this.src = src;
this.result = Promise.promise();
// Set handlers now
src.endHandler(result::tryComplete);
src.exceptionHandler(result::tryFail);
}
@Override
public synchronized Pipe<T> endOnFailure(boolean end) {
endOnFailure = end;
return this;
}
@Override
public synchronized Pipe<T> endOnSuccess(boolean end) {
endOnSuccess = end;
return this;
}
@Override
public synchronized Pipe<T> endOnComplete(boolean end) {
endOnSuccess = end;
endOnFailure = end;
return this;
}
private void handleWriteResult(AsyncResult<Void> ack) {
if (ack.failed()) {
result.tryFail(new WriteException(ack.cause()));
}
}
@Override
public Future<Void> to(WriteStream<T> ws) {
Promise<Void> promise = Promise.promise();
if (ws == null) {
throw new NullPointerException();
}
synchronized (PipeImpl.this) {
if (dst != null) {
throw new IllegalStateException();
}
dst = ws;
}
Handler<Void> drainHandler = v -> src.resume();
src.handler(item -> {
ws.write(item).onComplete(this::handleWriteResult);
if (ws.writeQueueFull()) {
src.pause();
ws.drainHandler(drainHandler);
}
});
src.resume();
result.future().onComplete(ar -> {
try {
src.handler(null);
} catch (Exception ignore) {
}
try {
src.exceptionHandler(null);
} catch (Exception ignore) {
}
try {
src.endHandler(null);
} catch (Exception ignore) {
}
if (ar.succeeded()) {
handleSuccess(promise);
} else {
Throwable err = ar.cause();
if (err instanceof WriteException) {
src.resume();
err = err.getCause();
}
handleFailure(err, promise);
}
});
return promise.future();
}
private void handleSuccess(Promise<Void> promise) {
if (endOnSuccess) {
dst.end().onComplete(promise);
} else {
promise.complete();
}
}
private void handleFailure(Throwable cause, Promise<Void> completionHandler) {
if (endOnFailure){
dst
.end()
.transform(ar -> Future.<Void>failedFuture(cause))
.onComplete(completionHandler);
} else {
completionHandler.fail(cause);
}
}
public void close() {
synchronized (this) {
src.exceptionHandler(null);
src.handler(null);
if (dst != null) {
dst.drainHandler(null);
dst.exceptionHandler(null);
}
}
VertxException err = new VertxException("Pipe closed", true);
if (result.tryFail(err)) {
src.resume();
}
}
private static
|
PipeImpl
|
java
|
apache__flink
|
flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/inference/strategies/CommonArgumentTypeStrategy.java
|
{
"start": 1671,
"end": 2909
}
|
class ____ implements ArgumentTypeStrategy {
private static final Argument COMMON_ARGUMENT = Argument.ofGroup("COMMON");
private final boolean preserveNullability;
public CommonArgumentTypeStrategy(boolean preserveNullability) {
this.preserveNullability = preserveNullability;
}
@Override
public Optional<DataType> inferArgumentType(
CallContext callContext, int argumentPos, boolean throwOnFailure) {
final List<LogicalType> actualTypes =
callContext.getArgumentDataTypes().stream()
.map(DataType::getLogicalType)
.collect(Collectors.toList());
return LogicalTypeMerging.findCommonType(actualTypes)
.map(
commonType ->
preserveNullability
? commonType.copy(actualTypes.get(argumentPos).isNullable())
: commonType)
.map(TypeConversions::fromLogicalToDataType);
}
@Override
public Argument getExpectedArgument(FunctionDefinition functionDefinition, int argumentPos) {
return COMMON_ARGUMENT;
}
}
|
CommonArgumentTypeStrategy
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/main/java/org/springframework/web/servlet/handler/HandlerMappingIntrospector.java
|
{
"start": 19285,
"end": 20725
}
|
class ____ {
private final Map<String, AtomicInteger> counters =
Map.of("MatchableHandlerMapping", new AtomicInteger(), "CorsConfiguration", new AtomicInteger());
public void logHandlerMappingCacheMiss(HttpServletRequest request) {
logCacheMiss("MatchableHandlerMapping", request);
}
public void logCorsConfigCacheMiss(HttpServletRequest request) {
logCacheMiss("CorsConfiguration", request);
}
private void logCacheMiss(String label, HttpServletRequest request) {
AtomicInteger counter = this.counters.get(label);
Assert.notNull(counter, "Expected '" + label + "' counter.");
String message = getLogMessage(label, request);
if (logger.isWarnEnabled() && counter.getAndIncrement() == 0) {
logger.warn(message + " This is logged once only at WARN level, and every time at TRACE.");
}
else if (logger.isTraceEnabled()) {
logger.trace("No CachedResult, performing " + label + " lookup instead.");
}
}
private static String getLogMessage(String label, HttpServletRequest request) {
return "Cache miss for " + request.getDispatcherType() + " dispatch to '" + request.getRequestURI() + "' " +
"(previous " + request.getAttribute(CACHED_RESULT_ATTRIBUTE) + "). " +
"Performing " + label + " lookup.";
}
}
/**
* Request wrapper that buffers request attributes in order protect the
* underlying request from attribute changes.
*/
private static
|
CacheResultLogHelper
|
java
|
apache__flink
|
flink-table/flink-table-common/src/main/java/org/apache/flink/table/functions/TableFunction.java
|
{
"start": 4255,
"end": 4576
}
|
class ____ have a default constructor
* and must be instantiable during runtime. Anonymous functions in Table API can only be persisted
* if the function object is not stateful (i.e. containing only transient and static fields).
*
* <p>In the API, a table function can be used as follows:
*
* <pre>{@code
* public
|
must
|
java
|
quarkusio__quarkus
|
extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/engineconfigurations/section/CustomSectionFactory.java
|
{
"start": 1263,
"end": 1687
}
|
class ____ implements SectionHelper {
private final Expression foo;
public CustomSectionHelper(Expression foo) {
this.foo = foo;
}
@Override
public CompletionStage<ResultNode> resolve(SectionResolutionContext context) {
return context.evaluate(foo).thenApply(fooVal -> new SingleResultNode(fooVal.toString() + ":" + bar));
}
}
}
|
CustomSectionHelper
|
java
|
spring-projects__spring-boot
|
module/spring-boot-micrometer-metrics/src/main/java/org/springframework/boot/micrometer/metrics/autoconfigure/MetricsProperties.java
|
{
"start": 3061,
"end": 3470
}
|
class ____ {
/**
* Maximum number of unique URI tag values allowed. After the max number of
* tag values is reached, metrics with additional tag values are denied by
* filter.
*/
private int maxUriTags = 100;
public int getMaxUriTags() {
return this.maxUriTags;
}
public void setMaxUriTags(int maxUriTags) {
this.maxUriTags = maxUriTags;
}
}
public static
|
Client
|
java
|
alibaba__nacos
|
common/src/main/java/com/alibaba/nacos/common/notify/Event.java
|
{
"start": 956,
"end": 1908
}
|
class ____ implements Serializable {
private static final long serialVersionUID = -3731383194964997493L;
private static final AtomicLong SEQUENCE = new AtomicLong(0);
private final long sequence = SEQUENCE.getAndIncrement();
/**
* Event sequence number, which can be used to handle the sequence of events.
*
* @return sequence num, It's best to make sure it's monotone.
*/
public long sequence() {
return sequence;
}
/**
* Event scope.
*
* @return event scope, return null if for all scope
*/
public String scope() {
return null;
}
/**
* Whether is plugin event. If so, the event can be dropped when no publish and subscriber without any hint. Default
* false
*
* @return {@code true} if is plugin event, otherwise {@code false}
*/
public boolean isPluginEvent() {
return false;
}
}
|
Event
|
java
|
apache__flink
|
flink-end-to-end-tests/flink-stream-state-ttl-test/src/main/java/org/apache/flink/streaming/tests/DataStreamStateTTLTestProgram.java
|
{
"start": 2819,
"end": 5512
}
|
class ____ {
public static void main(String[] args) throws Exception {
final ParameterTool pt = ParameterTool.fromArgs(args);
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
setupEnvironment(env, pt, false);
TtlTestConfig config = TtlTestConfig.fromArgs(pt);
StateTtlConfig ttlConfig =
StateTtlConfig.newBuilder(config.ttl).cleanupFullSnapshot().build();
env.addSource(
new TtlStateUpdateSource(
config.keySpace, config.sleepAfterElements, config.sleepTime))
.name("TtlStateUpdateSource")
.keyBy(TtlStateUpdate::getKey)
.flatMap(new TtlVerifyUpdateFunction(ttlConfig, config.reportStatAfterUpdatesNum))
.name("TtlVerifyUpdateFunction")
.addSink(new PrintSinkFunction<>())
.name("PrintFailedVerifications");
StreamGraph streamGraph = env.getStreamGraph();
setBackendWithCustomTTLTimeProvider(streamGraph, pt);
streamGraph.setJobName("State TTL test job");
streamGraph.createJobCheckpointingSettings();
env.execute(streamGraph);
}
/**
* Sets the state backend to a new {@link StubStateBackend} which has a {@link
* MonotonicTTLTimeProvider}.
*
* @param streamGraph The {@link StreamGraph} of the job.
*/
private static void setBackendWithCustomTTLTimeProvider(
StreamGraph streamGraph, final ParameterTool pt) {
final MonotonicTTLTimeProvider ttlTimeProvider = new MonotonicTTLTimeProvider();
final StateBackend configuredBackend = getConfiguredStateBackend(pt);
final StateBackend stubBackend = new StubStateBackend(configuredBackend, ttlTimeProvider);
streamGraph.setStateBackend(stubBackend);
}
private static StateBackend getConfiguredStateBackend(final ParameterTool pt) {
final String stateBackend = pt.get(STATE_BACKEND.key(), STATE_BACKEND.defaultValue());
if ("hashmap".equalsIgnoreCase(stateBackend)) {
return new HashMapStateBackend();
} else if ("rocks".equalsIgnoreCase(stateBackend)) {
boolean incrementalCheckpoints =
pt.getBoolean(
STATE_BACKEND_ROCKS_INCREMENTAL.key(),
STATE_BACKEND_ROCKS_INCREMENTAL.defaultValue());
return new EmbeddedRocksDBStateBackend(incrementalCheckpoints);
} else {
throw new IllegalArgumentException("Unknown backend requested: " + stateBackend);
}
}
}
|
DataStreamStateTTLTestProgram
|
java
|
quarkusio__quarkus
|
extensions/quartz/deployment/src/test/java/io/quarkus/quartz/test/ListSchedulerJobsTest.java
|
{
"start": 407,
"end": 964
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest test = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(ListSchedulerJobsTest.Jobs.class));
@Inject
Scheduler scheduler;
@Test
public void testSchedulerListScheduledJobsMethod() {
List<Trigger> triggers = scheduler.getScheduledJobs();
assertEquals(triggers.size(), 1);
Trigger trigger = triggers.get(0);
assertEquals("the_schedule", trigger.getId());
}
static
|
ListSchedulerJobsTest
|
java
|
google__error-prone
|
annotations/src/main/java/com/google/errorprone/annotations/RestrictedApi.java
|
{
"start": 1871,
"end": 1975
}
|
interface ____{
* public string reviewer();
* public string comments();
* }
*
* public
|
ReviewedFooBar
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/cache/spi/support/TimestampsRegionTemplate.java
|
{
"start": 273,
"end": 755
}
|
class ____ extends DirectAccessRegionTemplate implements TimestampsRegion {
/**
* Constructs a {@link TimestampsRegionTemplate}.
*
* @param name - the unqualified region name
* @param regionFactory - the region factory
* @param storageAccess - the cache storage access strategy
*/
public TimestampsRegionTemplate(
String name,
RegionFactory regionFactory,
StorageAccess storageAccess) {
super( name, regionFactory, storageAccess );
}
}
|
TimestampsRegionTemplate
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated-src/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InBytesRefEvaluator.java
|
{
"start": 1294,
"end": 7684
}
|
class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(InBytesRefEvaluator.class);
private final Source source;
private final EvalOperator.ExpressionEvaluator lhs;
private final EvalOperator.ExpressionEvaluator[] rhs;
private final DriverContext driverContext;
private Warnings warnings;
public InBytesRefEvaluator(
Source source,
EvalOperator.ExpressionEvaluator lhs,
EvalOperator.ExpressionEvaluator[] rhs,
DriverContext driverContext
) {
this.source = source;
this.lhs = lhs;
this.rhs = rhs;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
try (BytesRefBlock lhsBlock = (BytesRefBlock) lhs.eval(page)) {
BytesRefBlock[] rhsBlocks = new BytesRefBlock[rhs.length];
try (Releasable rhsRelease = Releasables.wrap(rhsBlocks)) {
for (int i = 0; i < rhsBlocks.length; i++) {
rhsBlocks[i] = (BytesRefBlock) rhs[i].eval(page);
}
BytesRefVector lhsVector = lhsBlock.asVector();
if (lhsVector == null) {
return eval(page.getPositionCount(), lhsBlock, rhsBlocks);
}
BytesRefVector[] rhsVectors = new BytesRefVector[rhs.length];
for (int i = 0; i < rhsBlocks.length; i++) {
rhsVectors[i] = rhsBlocks[i].asVector();
if (rhsVectors[i] == null) {
return eval(page.getPositionCount(), lhsBlock, rhsBlocks);
}
}
return eval(page.getPositionCount(), lhsVector, rhsVectors);
}
}
}
private BooleanBlock eval(int positionCount, BytesRefBlock lhsBlock, BytesRefBlock[] rhsBlocks) {
try (BooleanBlock.Builder result = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) {
BytesRef[] rhsValues = new BytesRef[rhs.length];
BytesRef lhsScratch = new BytesRef();
BytesRef[] rhsScratch = new BytesRef[rhs.length];
for (int i = 0; i < rhs.length; i++) {
rhsScratch[i] = new BytesRef();
}
BitSet nulls = new BitSet(rhs.length);
BitSet mvs = new BitSet(rhs.length);
boolean foundMatch;
for (int p = 0; p < positionCount; p++) {
if (lhsBlock.isNull(p)) {
result.appendNull();
continue;
}
if (lhsBlock.getValueCount(p) != 1) {
if (lhsBlock.getValueCount(p) > 1) {
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
}
result.appendNull();
continue;
}
// unpack rhsBlocks into rhsValues
nulls.clear();
mvs.clear();
for (int i = 0; i < rhsBlocks.length; i++) {
if (rhsBlocks[i].isNull(p)) {
nulls.set(i);
continue;
}
if (rhsBlocks[i].getValueCount(p) > 1) {
mvs.set(i);
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
continue;
}
int o = rhsBlocks[i].getFirstValueIndex(p);
rhsValues[i] = rhsBlocks[i].getBytesRef(o, rhsScratch[i]);
}
if (nulls.cardinality() == rhsBlocks.length || mvs.cardinality() == rhsBlocks.length) {
result.appendNull();
continue;
}
foundMatch = In.process(nulls, mvs, lhsBlock.getBytesRef(lhsBlock.getFirstValueIndex(p), lhsScratch), rhsValues);
if (foundMatch) {
result.appendBoolean(true);
} else {
if (nulls.cardinality() > 0) {
result.appendNull();
} else {
result.appendBoolean(false);
}
}
}
return result.build();
}
}
private BooleanBlock eval(int positionCount, BytesRefVector lhsVector, BytesRefVector[] rhsVectors) {
try (BooleanBlock.Builder result = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) {
BytesRef[] rhsValues = new BytesRef[rhs.length];
BytesRef lhsScratch = new BytesRef();
BytesRef[] rhsScratch = new BytesRef[rhs.length];
for (int i = 0; i < rhs.length; i++) {
rhsScratch[i] = new BytesRef();
}
for (int p = 0; p < positionCount; p++) {
// unpack rhsVectors into rhsValues
for (int i = 0; i < rhsVectors.length; i++) {
rhsValues[i] = rhsVectors[i].getBytesRef(p, rhsScratch[i]);
}
result.appendBoolean(In.process(null, null, lhsVector.getBytesRef(p, lhsScratch), rhsValues));
}
return result.build();
}
}
@Override
public String toString() {
return "InBytesRefEvaluator[" + "lhs=" + lhs + ", rhs=" + Arrays.toString(rhs) + "]";
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += lhs.baseRamBytesUsed();
for (EvalOperator.ExpressionEvaluator r : rhs) {
baseRamBytesUsed += r.baseRamBytesUsed();
}
return baseRamBytesUsed;
}
@Override
public void close() {
Releasables.closeExpectNoException(lhs, () -> Releasables.close(rhs));
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static
|
InBytesRefEvaluator
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/batch/BatchExecPythonOverAggregate.java
|
{
"start": 2969,
"end": 14462
}
|
class ____ extends BatchExecOverAggregateBase {
private static final String ARROW_PYTHON_OVER_WINDOW_AGGREGATE_FUNCTION_OPERATOR_NAME =
"org.apache.flink.table.runtime.operators.python.aggregate.arrow.batch."
+ "BatchArrowPythonOverWindowAggregateFunctionOperator";
private final List<Long> lowerBoundary;
private final List<Long> upperBoundary;
private final List<AggregateCall> aggCalls;
private final List<Integer> aggWindowIndex;
public BatchExecPythonOverAggregate(
ReadableConfig tableConfig,
OverSpec overSpec,
InputProperty inputProperty,
RowType outputType,
String description) {
super(
ExecNodeContext.newNodeId(),
ExecNodeContext.newContext(BatchExecPythonOverAggregate.class),
ExecNodeContext.newPersistedConfig(BatchExecPythonOverAggregate.class, tableConfig),
overSpec,
inputProperty,
outputType,
description);
lowerBoundary = new ArrayList<>();
upperBoundary = new ArrayList<>();
aggCalls = new ArrayList<>();
aggWindowIndex = new ArrayList<>();
}
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(
PlannerBase planner, ExecNodeConfig config) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform =
(Transformation<RowData>) inputEdge.translateToPlan(planner);
final RowType inputType = (RowType) inputEdge.getOutputType();
List<OverSpec.GroupSpec> groups = overSpec.getGroups();
boolean[] isRangeWindows = new boolean[groups.size()];
for (int i = 0; i < groups.size(); i++) {
OverSpec.GroupSpec group = groups.get(i);
List<AggregateCall> groupAggCalls = group.getAggCalls();
aggCalls.addAll(groupAggCalls);
for (int j = 0; j < groupAggCalls.size(); j++) {
aggWindowIndex.add(i);
}
OverWindowMode mode = inferGroupMode(group);
if (mode == OverWindowMode.ROW) {
isRangeWindows[i] = false;
if (isUnboundedWindow(group)) {
lowerBoundary.add(Long.MIN_VALUE);
upperBoundary.add(Long.MAX_VALUE);
} else if (isUnboundedPrecedingWindow(group)) {
lowerBoundary.add(Long.MIN_VALUE);
upperBoundary.add(
OverAggregateUtil.getLongBoundary(overSpec, group.getUpperBound()));
} else if (isUnboundedFollowingWindow(group)) {
lowerBoundary.add(
OverAggregateUtil.getLongBoundary(overSpec, group.getLowerBound()));
upperBoundary.add(Long.MAX_VALUE);
} else if (isSlidingWindow(group)) {
lowerBoundary.add(
OverAggregateUtil.getLongBoundary(overSpec, group.getLowerBound()));
upperBoundary.add(
OverAggregateUtil.getLongBoundary(overSpec, group.getUpperBound()));
} else {
throw new TableException("Unsupported row window group spec " + group);
}
} else {
isRangeWindows[i] = true;
if (isUnboundedWindow(group)) {
lowerBoundary.add(Long.MIN_VALUE);
upperBoundary.add(Long.MAX_VALUE);
} else if (isUnboundedPrecedingWindow(group)) {
lowerBoundary.add(Long.MIN_VALUE);
upperBoundary.add(
OverAggregateUtil.getLongBoundary(overSpec, group.getUpperBound()));
} else if (isUnboundedFollowingWindow(group)) {
lowerBoundary.add(
OverAggregateUtil.getLongBoundary(overSpec, group.getLowerBound()));
upperBoundary.add(Long.MAX_VALUE);
} else if (isSlidingWindow(group)) {
lowerBoundary.add(
OverAggregateUtil.getLongBoundary(overSpec, group.getLowerBound()));
upperBoundary.add(
OverAggregateUtil.getLongBoundary(overSpec, group.getUpperBound()));
} else {
throw new TableException("Unsupported range window group spec " + group);
}
}
}
Configuration pythonConfig =
CommonPythonUtil.extractPythonConfiguration(
planner.getTableConfig(), planner.getFlinkContext().getClassLoader());
OneInputTransformation<RowData, RowData> transform =
createPythonOneInputTransformation(
inputTransform,
inputType,
InternalTypeInfo.of(getOutputType()).toRowType(),
isRangeWindows,
pythonConfig,
config,
planner.getFlinkContext().getClassLoader());
if (CommonPythonUtil.isPythonWorkerUsingManagedMemory(
pythonConfig, planner.getFlinkContext().getClassLoader())) {
transform.declareManagedMemoryUseCaseAtSlotScope(ManagedMemoryUseCase.PYTHON);
}
return transform;
}
private OneInputTransformation<RowData, RowData> createPythonOneInputTransformation(
Transformation<RowData> inputTransform,
RowType inputRowType,
RowType outputRowType,
boolean[] isRangeWindows,
Configuration pythonConfig,
ExecNodeConfig config,
ClassLoader classLoader) {
Tuple2<int[], PythonFunctionInfo[]> aggCallInfos =
CommonPythonUtil.extractPythonAggregateFunctionInfosFromAggregateCall(
aggCalls.toArray(new AggregateCall[0]));
int[] pythonUdafInputOffsets = aggCallInfos.f0;
PythonFunctionInfo[] pythonFunctionInfos = aggCallInfos.f1;
OneInputStreamOperator<RowData, RowData> pythonOperator =
getPythonOverWindowAggregateFunctionOperator(
config,
classLoader,
pythonConfig,
inputRowType,
outputRowType,
isRangeWindows,
pythonUdafInputOffsets,
pythonFunctionInfos);
return ExecNodeUtil.createOneInputTransformation(
inputTransform,
createTransformationName(pythonConfig),
createTransformationDescription(pythonConfig),
pythonOperator,
InternalTypeInfo.of(outputRowType),
inputTransform.getParallelism(),
false);
}
@SuppressWarnings("unchecked")
private OneInputStreamOperator<RowData, RowData> getPythonOverWindowAggregateFunctionOperator(
ExecNodeConfig config,
ClassLoader classLoader,
Configuration pythonConfig,
RowType inputRowType,
RowType outputRowType,
boolean[] isRangeWindows,
int[] udafInputOffsets,
PythonFunctionInfo[] pythonFunctionInfos) {
Class<?> clazz =
CommonPythonUtil.loadClass(
ARROW_PYTHON_OVER_WINDOW_AGGREGATE_FUNCTION_OPERATOR_NAME, classLoader);
RowType udfInputType = (RowType) Projection.of(udafInputOffsets).project(inputRowType);
RowType udfOutputType =
(RowType)
Projection.range(
inputRowType.getFieldCount(), outputRowType.getFieldCount())
.project(outputRowType);
PartitionSpec partitionSpec = overSpec.getPartition();
List<OverSpec.GroupSpec> groups = overSpec.getGroups();
SortSpec sortSpec = groups.get(groups.size() - 1).getSort();
try {
Constructor<?> ctor =
clazz.getConstructor(
Configuration.class,
PythonFunctionInfo[].class,
RowType.class,
RowType.class,
RowType.class,
long[].class,
long[].class,
boolean[].class,
int[].class,
int.class,
boolean.class,
GeneratedProjection.class,
GeneratedProjection.class,
GeneratedProjection.class);
return (OneInputStreamOperator<RowData, RowData>)
ctor.newInstance(
pythonConfig,
pythonFunctionInfos,
inputRowType,
udfInputType,
udfOutputType,
lowerBoundary.stream().mapToLong(i -> i).toArray(),
upperBoundary.stream().mapToLong(i -> i).toArray(),
isRangeWindows,
aggWindowIndex.stream().mapToInt(i -> i).toArray(),
sortSpec.getFieldIndices()[0],
sortSpec.getAscendingOrders()[0],
ProjectionCodeGenerator.generateProjection(
new CodeGeneratorContext(config, classLoader),
"UdafInputProjection",
inputRowType,
udfInputType,
udafInputOffsets),
ProjectionCodeGenerator.generateProjection(
new CodeGeneratorContext(config, classLoader),
"GroupKey",
inputRowType,
(RowType)
Projection.of(partitionSpec.getFieldIndices())
.project(inputRowType),
partitionSpec.getFieldIndices()),
ProjectionCodeGenerator.generateProjection(
new CodeGeneratorContext(config, classLoader),
"GroupSet",
inputRowType,
(RowType)
Projection.of(partitionSpec.getFieldIndices())
.project(inputRowType),
partitionSpec.getFieldIndices()));
} catch (NoSuchMethodException
| InstantiationException
| IllegalAccessException
| InvocationTargetException e) {
throw new TableException(
"Python BatchArrowPythonOverWindowAggregateFunctionOperator constructed failed.",
e);
}
}
}
|
BatchExecPythonOverAggregate
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/context/annotation/CommonAnnotationBeanPostProcessor.java
|
{
"start": 30328,
"end": 35867
}
|
class ____ implements BeanRegistrationAotContribution {
private static final String REGISTERED_BEAN_PARAMETER = "registeredBean";
private static final String INSTANCE_PARAMETER = "instance";
private final Class<?> target;
private final Collection<LookupElement> lookupElements;
private final @Nullable AutowireCandidateResolver candidateResolver;
AotContribution(Class<?> target, Collection<LookupElement> lookupElements,
@Nullable AutowireCandidateResolver candidateResolver) {
this.target = target;
this.lookupElements = lookupElements;
this.candidateResolver = candidateResolver;
}
@Override
public void applyTo(GenerationContext generationContext, BeanRegistrationCode beanRegistrationCode) {
GeneratedClass generatedClass = generationContext.getGeneratedClasses()
.addForFeatureComponent("ResourceAutowiring", this.target, type -> {
type.addJavadoc("Resource autowiring for {@link $T}.", this.target);
type.addModifiers(javax.lang.model.element.Modifier.PUBLIC);
});
GeneratedMethod generateMethod = generatedClass.getMethods().add("apply", method -> {
method.addJavadoc("Apply resource autowiring.");
method.addModifiers(javax.lang.model.element.Modifier.PUBLIC,
javax.lang.model.element.Modifier.STATIC);
method.addParameter(RegisteredBean.class, REGISTERED_BEAN_PARAMETER);
method.addParameter(this.target, INSTANCE_PARAMETER);
method.returns(this.target);
method.addCode(generateMethodCode(generatedClass.getName(),
generationContext.getRuntimeHints()));
});
beanRegistrationCode.addInstancePostProcessor(generateMethod.toMethodReference());
registerHints(generationContext.getRuntimeHints());
}
private CodeBlock generateMethodCode(ClassName targetClassName, RuntimeHints hints) {
CodeBlock.Builder code = CodeBlock.builder();
for (LookupElement lookupElement : this.lookupElements) {
code.addStatement(generateMethodStatementForElement(
targetClassName, lookupElement, hints));
}
code.addStatement("return $L", INSTANCE_PARAMETER);
return code.build();
}
private CodeBlock generateMethodStatementForElement(ClassName targetClassName,
LookupElement lookupElement, RuntimeHints hints) {
Member member = lookupElement.getMember();
if (member instanceof Field field) {
return generateMethodStatementForField(
targetClassName, field, lookupElement, hints);
}
if (member instanceof Method method) {
return generateMethodStatementForMethod(
targetClassName, method, lookupElement, hints);
}
throw new IllegalStateException(
"Unsupported member type " + member.getClass().getName());
}
private CodeBlock generateMethodStatementForField(ClassName targetClassName,
Field field, LookupElement lookupElement, RuntimeHints hints) {
hints.reflection().registerField(field);
CodeBlock resolver = generateFieldResolverCode(field, lookupElement);
AccessControl accessControl = AccessControl.forMember(field);
if (!accessControl.isAccessibleFrom(targetClassName)) {
return CodeBlock.of("$L.resolveAndSet($L, $L)", resolver,
REGISTERED_BEAN_PARAMETER, INSTANCE_PARAMETER);
}
return CodeBlock.of("$L.$L = $L.resolve($L)", INSTANCE_PARAMETER,
field.getName(), resolver, REGISTERED_BEAN_PARAMETER);
}
private CodeBlock generateFieldResolverCode(Field field, LookupElement lookupElement) {
if (lookupElement.isDefaultName) {
return CodeBlock.of("$T.$L($S)", ResourceElementResolver.class,
"forField", field.getName());
}
else {
return CodeBlock.of("$T.$L($S, $S)", ResourceElementResolver.class,
"forField", field.getName(), lookupElement.getName());
}
}
private CodeBlock generateMethodStatementForMethod(ClassName targetClassName,
Method method, LookupElement lookupElement, RuntimeHints hints) {
CodeBlock resolver = generateMethodResolverCode(method, lookupElement);
AccessControl accessControl = AccessControl.forMember(method);
if (!accessControl.isAccessibleFrom(targetClassName)) {
hints.reflection().registerMethod(method, ExecutableMode.INVOKE);
return CodeBlock.of("$L.resolveAndSet($L, $L)", resolver,
REGISTERED_BEAN_PARAMETER, INSTANCE_PARAMETER);
}
hints.reflection().registerType(method.getDeclaringClass());
return CodeBlock.of("$L.$L($L.resolve($L))", INSTANCE_PARAMETER,
method.getName(), resolver, REGISTERED_BEAN_PARAMETER);
}
private CodeBlock generateMethodResolverCode(Method method, LookupElement lookupElement) {
if (lookupElement.isDefaultName) {
return CodeBlock.of("$T.$L($S, $T.class)", ResourceElementResolver.class,
"forMethod", method.getName(), lookupElement.getLookupType());
}
else {
return CodeBlock.of("$T.$L($S, $T.class, $S)", ResourceElementResolver.class,
"forMethod", method.getName(), lookupElement.getLookupType(), lookupElement.getName());
}
}
private void registerHints(RuntimeHints runtimeHints) {
this.lookupElements.forEach(lookupElement ->
registerProxyIfNecessary(runtimeHints, lookupElement.getDependencyDescriptor()));
}
private void registerProxyIfNecessary(RuntimeHints runtimeHints, DependencyDescriptor dependencyDescriptor) {
if (this.candidateResolver != null) {
Class<?> proxyClass =
this.candidateResolver.getLazyResolutionProxyClass(dependencyDescriptor, null);
if (proxyClass != null) {
ClassHintUtils.registerProxyIfNecessary(proxyClass, runtimeHints);
}
}
}
}
}
|
AotContribution
|
java
|
micronaut-projects__micronaut-core
|
json-core/src/main/java/io/micronaut/json/tree/JsonArray.java
|
{
"start": 948,
"end": 2122
}
|
class ____ extends JsonContainer {
private final List<JsonNode> values;
JsonArray(List<JsonNode> values) {
this.values = values;
}
@Override
public Object getValue() {
return values.stream().map(JsonNode::getValue).toList();
}
@Override
public int size() {
return values.size();
}
@Override
public boolean isArray() {
return true;
}
@Override
public JsonNode get(@NonNull String fieldName) {
return null;
}
@Override
public JsonNode get(int index) {
if (index < 0 || index >= size()) {
return null;
} else {
return values.get(index);
}
}
@Override
@NonNull
public Iterable<JsonNode> values() {
return values;
}
@Override
@NonNull
public Iterable<Map.Entry<String, JsonNode>> entries() {
throw new IllegalStateException("Not an object");
}
@Override
public boolean equals(Object o) {
return o instanceof JsonArray ja && ja.values.equals(values);
}
@Override
public int hashCode() {
return values.hashCode();
}
}
|
JsonArray
|
java
|
apache__camel
|
components/camel-google/camel-google-mail/src/generated/java/org/apache/camel/component/google/mail/internal/GmailUsersMessagesApiMethod.java
|
{
"start": 673,
"end": 5735
}
|
enum ____ implements ApiMethod {
ATTACHMENTS(
com.google.api.services.gmail.Gmail.Users.Messages.Attachments.class,
"attachments"),
BATCH_DELETE(
com.google.api.services.gmail.Gmail.Users.Messages.BatchDelete.class,
"batchDelete",
arg("userId", String.class),
arg("batchDeleteMessagesRequest", com.google.api.services.gmail.model.BatchDeleteMessagesRequest.class)),
BATCH_MODIFY(
com.google.api.services.gmail.Gmail.Users.Messages.BatchModify.class,
"batchModify",
arg("userId", String.class),
arg("batchModifyMessagesRequest", com.google.api.services.gmail.model.BatchModifyMessagesRequest.class)),
DELETE(
com.google.api.services.gmail.Gmail.Users.Messages.Delete.class,
"delete",
arg("userId", String.class),
arg("id", String.class)),
GET(
com.google.api.services.gmail.Gmail.Users.Messages.Get.class,
"get",
arg("userId", String.class),
arg("id", String.class),
setter("format", String.class),
setter("metadataHeaders", java.util.List.class)),
GMAIL_IMPORT(
com.google.api.services.gmail.Gmail.Users.Messages.GmailImport.class,
"gmailImport",
arg("userId", String.class),
arg("content", com.google.api.services.gmail.model.Message.class),
setter("deleted", Boolean.class),
setter("internalDateSource", String.class),
setter("neverMarkSpam", Boolean.class),
setter("processForCalendar", Boolean.class)),
GMAIL_IMPORT_1(
com.google.api.services.gmail.Gmail.Users.Messages.GmailImport.class,
"gmailImport",
arg("userId", String.class),
arg("content", com.google.api.services.gmail.model.Message.class),
arg("mediaContent", com.google.api.client.http.AbstractInputStreamContent.class),
setter("deleted", Boolean.class),
setter("internalDateSource", String.class),
setter("neverMarkSpam", Boolean.class),
setter("processForCalendar", Boolean.class)),
INSERT(
com.google.api.services.gmail.Gmail.Users.Messages.Insert.class,
"insert",
arg("userId", String.class),
arg("content", com.google.api.services.gmail.model.Message.class),
setter("deleted", Boolean.class),
setter("internalDateSource", String.class)),
INSERT_1(
com.google.api.services.gmail.Gmail.Users.Messages.Insert.class,
"insert",
arg("userId", String.class),
arg("content", com.google.api.services.gmail.model.Message.class),
arg("mediaContent", com.google.api.client.http.AbstractInputStreamContent.class),
setter("deleted", Boolean.class),
setter("internalDateSource", String.class)),
LIST(
com.google.api.services.gmail.Gmail.Users.Messages.List.class,
"list",
arg("userId", String.class),
setter("includeSpamTrash", Boolean.class),
setter("labelIds", java.util.List.class),
setter("maxResults", Long.class),
setter("pageToken", String.class),
setter("q", String.class)),
MODIFY(
com.google.api.services.gmail.Gmail.Users.Messages.Modify.class,
"modify",
arg("userId", String.class),
arg("id", String.class),
arg("modifyMessageRequest", com.google.api.services.gmail.model.ModifyMessageRequest.class)),
SEND(
com.google.api.services.gmail.Gmail.Users.Messages.Send.class,
"send",
arg("userId", String.class),
arg("content", com.google.api.services.gmail.model.Message.class)),
SEND_1(
com.google.api.services.gmail.Gmail.Users.Messages.Send.class,
"send",
arg("userId", String.class),
arg("content", com.google.api.services.gmail.model.Message.class),
arg("mediaContent", com.google.api.client.http.AbstractInputStreamContent.class)),
TRASH(
com.google.api.services.gmail.Gmail.Users.Messages.Trash.class,
"trash",
arg("userId", String.class),
arg("id", String.class)),
UNTRASH(
com.google.api.services.gmail.Gmail.Users.Messages.Untrash.class,
"untrash",
arg("userId", String.class),
arg("id", String.class));
private final ApiMethod apiMethod;
GmailUsersMessagesApiMethod(Class<?> resultType, String name, ApiMethodArg... args) {
this.apiMethod = new ApiMethodImpl(Messages.class, resultType, name, args);
}
@Override
public String getName() { return apiMethod.getName(); }
@Override
public Class<?> getResultType() { return apiMethod.getResultType(); }
@Override
public List<String> getArgNames() { return apiMethod.getArgNames(); }
@Override
public List<String> getSetterArgNames() { return apiMethod.getSetterArgNames(); }
@Override
public List<Class<?>> getArgTypes() { return apiMethod.getArgTypes(); }
@Override
public Method getMethod() { return apiMethod.getMethod(); }
}
|
GmailUsersMessagesApiMethod
|
java
|
elastic__elasticsearch
|
x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlQueryResponseTests.java
|
{
"start": 1667,
"end": 7207
}
|
class ____ extends AbstractXContentSerializingTestCase<SqlQueryResponse> {
static String randomStringCursor() {
return randomBoolean() ? "" : randomAlphaOfLength(10);
}
@Override
protected SqlQueryResponse createXContextTestInstance(XContentType xContentType) {
SqlTestUtils.assumeXContentJsonOrCbor(xContentType);
return super.createXContextTestInstance(xContentType);
}
@Override
protected SqlQueryResponse createTestInstance() {
return createRandomInstance(
randomStringCursor(),
randomFrom(Mode.values()),
randomBoolean(),
rarely() ? null : randomAlphaOfLength(100),
randomBoolean(),
randomBoolean()
);
}
@Override
protected SqlQueryResponse mutateInstance(SqlQueryResponse instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected Writeable.Reader<SqlQueryResponse> instanceReader() {
return SqlQueryResponse::new;
}
public static SqlQueryResponse createRandomInstance(
String cursor,
Mode mode,
boolean columnar,
String asyncExecutionId,
boolean isPartial,
boolean isRunning
) {
int columnCount = between(1, 10);
List<ColumnInfo> columns = null;
if (randomBoolean()) {
columns = new ArrayList<>(columnCount);
for (int i = 0; i < columnCount; i++) {
columns.add(
new ColumnInfo(
randomAlphaOfLength(10),
randomAlphaOfLength(10),
randomAlphaOfLength(10),
randomBoolean() ? null : randomInt(25)
)
);
}
}
List<List<Object>> rows;
if (randomBoolean()) {
rows = Collections.emptyList();
} else {
int rowCount = between(1, 10);
if (columnar && columns != null) {
int temp = rowCount;
rowCount = columnCount;
columnCount = temp;
}
rows = new ArrayList<>(rowCount);
for (int r = 0; r < rowCount; r++) {
List<Object> row = new ArrayList<>(rowCount);
for (int c = 0; c < columnCount; c++) {
Supplier<Object> value = randomFrom(
Arrays.asList(() -> randomAlphaOfLength(10), ESTestCase::randomLong, ESTestCase::randomDouble, () -> null)
);
row.add(value.get());
}
rows.add(row);
}
}
return new SqlQueryResponse(cursor, mode, INTRODUCING_DATE_NANOS, false, columns, rows, asyncExecutionId, isPartial, isRunning);
}
public void testToXContent() throws IOException {
SqlQueryResponse testInstance = createTestInstance();
XContentBuilder builder = testInstance.toXContent(XContentFactory.jsonBuilder(), EMPTY_PARAMS);
Map<String, Object> rootMap = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2();
logger.info(Strings.toString(builder));
if (testInstance.columns() != null) {
List<?> columns = (List<?>) rootMap.get("columns");
assertThat(columns, hasSize(testInstance.columns().size()));
for (int i = 0; i < columns.size(); i++) {
Map<?, ?> columnMap = (Map<?, ?>) columns.get(i);
ColumnInfo columnInfo = testInstance.columns().get(i);
assertEquals(columnInfo.name(), columnMap.get("name"));
assertEquals(columnInfo.esType(), columnMap.get("type"));
assertEquals(columnInfo.displaySize(), columnMap.get("display_size"));
}
} else {
assertNull(rootMap.get("columns"));
}
List<?> rows;
if (testInstance.columnar()) {
rows = ((List<?>) rootMap.get("values"));
} else {
rows = ((List<?>) rootMap.get("rows"));
}
assertNotNull(rows);
assertThat(rows, hasSize(testInstance.rows().size()));
for (int i = 0; i < rows.size(); i++) {
List<?> row = (List<?>) rows.get(i);
assertEquals(row, testInstance.rows().get(i));
}
if (testInstance.cursor().equals("") == false) {
assertEquals(rootMap.get(CURSOR.getPreferredName()), testInstance.cursor());
}
if (Strings.hasText(testInstance.id())) {
assertEquals(testInstance.id(), rootMap.get(ID_NAME));
assertEquals(testInstance.isPartial(), rootMap.get(IS_PARTIAL_NAME));
assertEquals(testInstance.isRunning(), rootMap.get(IS_RUNNING_NAME));
}
}
@Override
protected SqlQueryResponse doParseInstance(XContentParser parser) throws IOException {
org.elasticsearch.xpack.sql.proto.SqlQueryResponse protoResponse = SqlTestUtils.fromXContentParser(
parser,
Payloads::parseQueryResponse
);
return new SqlQueryResponse(
protoResponse.cursor(),
Mode.JDBC,
INTRODUCING_DATE_NANOS,
false,
protoResponse.columns(),
protoResponse.rows(),
protoResponse.id(),
protoResponse.isPartial(),
protoResponse.isRunning()
);
}
}
|
SqlQueryResponseTests
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/bootstrap/binding/annotations/embedded/EmbeddableB.java
|
{
"start": 256,
"end": 449
}
|
class ____ {
private String embedAttrB;
public String getEmbedAttrB() {
return embedAttrB;
}
public void setEmbedAttrB(String embedAttrB) {
this.embedAttrB = embedAttrB;
}
}
|
EmbeddableB
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockSchedulerNodes.java
|
{
"start": 1622,
"end": 3481
}
|
class ____ {
private static final Logger LOG = LoggerFactory.getLogger(MockSchedulerNodes.class);
private String config;
private Map<NodeId, FiCaSchedulerNode> nodeIdToSchedulerNodes = new HashMap<>();
MockSchedulerNodes(String config) {
this.config = config;
init();
}
/**
* Format is:
* host1=partition[ res=resource];
* host2=partition[ res=resource];
*/
private void init() {
String[] nodesConfigStrArray = config.split(";");
for (String p : nodesConfigStrArray) {
String[] arr = p.split(" ");
NodeId nodeId = NodeId.newInstance(arr[0].substring(0, arr[0].indexOf("=")), 1);
String partition = arr[0].substring(arr[0].indexOf("=") + 1);
FiCaSchedulerNode sn = mock(FiCaSchedulerNode.class);
when(sn.getNodeID()).thenReturn(nodeId);
when(sn.getPartition()).thenReturn(partition);
Resource totalRes = Resources.createResource(0);
if (arr.length > 1) {
String res = arr[1];
if (res.contains("res=")) {
String resString = res.substring(
res.indexOf("res=") + "res=".length());
totalRes = parseResourceFromString(resString);
}
}
when(sn.getTotalResource()).thenReturn(totalRes);
when(sn.getUnallocatedResource()).thenReturn(Resources.clone(totalRes));
// TODO, add settings of killable resources when necessary
when(sn.getTotalKillableResources()).thenReturn(Resources.none());
List<RMContainer> liveContainers = new ArrayList<>();
when(sn.getCopiedListOfRunningContainers()).thenReturn(liveContainers);
nodeIdToSchedulerNodes.put(nodeId, sn);
LOG.debug("add scheduler node, id=" + nodeId + ", partition=" + partition);
}
}
Map<NodeId, FiCaSchedulerNode> getNodeIdToSchedulerNodes() {
return nodeIdToSchedulerNodes;
}
}
|
MockSchedulerNodes
|
java
|
apache__dubbo
|
dubbo-common/src/main/java/org/apache/dubbo/common/compiler/support/JdkCompiler.java
|
{
"start": 6459,
"end": 9805
}
|
class ____ extends ForwardingJavaFileManager<JavaFileManager> {
private final ClassLoaderImpl classLoader;
private final Map<URI, JavaFileObject> fileObjects = new HashMap<>();
public JavaFileManagerImpl(JavaFileManager fileManager, ClassLoaderImpl classLoader) {
super(fileManager);
this.classLoader = classLoader;
}
@Override
public FileObject getFileForInput(Location location, String packageName, String relativeName)
throws IOException {
FileObject o = fileObjects.get(uri(location, packageName, relativeName));
if (o != null) {
return o;
}
return super.getFileForInput(location, packageName, relativeName);
}
public void putFileForInput(
StandardLocation location, String packageName, String relativeName, JavaFileObject file) {
fileObjects.put(uri(location, packageName, relativeName), file);
}
private URI uri(Location location, String packageName, String relativeName) {
return ClassUtils.toURI(location.getName() + '/' + packageName + '/' + relativeName);
}
@Override
public JavaFileObject getJavaFileForOutput(
Location location, String qualifiedName, Kind kind, FileObject outputFile) throws IOException {
JavaFileObject file = new JavaFileObjectImpl(qualifiedName, kind);
classLoader.add(qualifiedName, file);
return file;
}
@Override
public ClassLoader getClassLoader(JavaFileManager.Location location) {
return classLoader;
}
@Override
public String inferBinaryName(Location loc, JavaFileObject file) {
if (file instanceof JavaFileObjectImpl) {
return file.getName();
}
return super.inferBinaryName(loc, file);
}
@Override
public Iterable<JavaFileObject> list(Location location, String packageName, Set<Kind> kinds, boolean recurse)
throws IOException {
Iterable<JavaFileObject> result = super.list(location, packageName, kinds, recurse);
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
ArrayList<JavaFileObject> files = new ArrayList<>();
if (location == StandardLocation.CLASS_PATH && kinds.contains(JavaFileObject.Kind.CLASS)) {
for (JavaFileObject file : fileObjects.values()) {
if (file.getKind() == Kind.CLASS && file.getName().startsWith(packageName)) {
files.add(file);
}
}
files.addAll(classLoader.files());
} else if (location == StandardLocation.SOURCE_PATH && kinds.contains(JavaFileObject.Kind.SOURCE)) {
for (JavaFileObject file : fileObjects.values()) {
if (file.getKind() == Kind.SOURCE && file.getName().startsWith(packageName)) {
files.add(file);
}
}
}
for (JavaFileObject file : result) {
files.add(file);
}
return files;
}
}
private static final
|
JavaFileManagerImpl
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/cluster/routing/RandomShardRoutingMutator.java
|
{
"start": 900,
"end": 2166
}
|
class ____ {
private RandomShardRoutingMutator() {
}
public static ShardRouting randomChange(ShardRouting shardRouting, Set<String> nodes) {
switch (randomInt(2)) {
case 0:
if (shardRouting.unassigned() == false && shardRouting.primary() == false) {
shardRouting = shardRouting.moveToUnassigned(randomUnassignedInfo(randomAlphaOfLength(10), false));
} else if (shardRouting.unassignedInfo() != null) {
shardRouting = shardRouting.updateUnassigned(
randomUnassignedInfo(randomAlphaOfLength(10), false),
shardRouting.recoverySource()
);
}
break;
case 1:
if (shardRouting.unassigned() && nodes.isEmpty() == false) {
shardRouting = shardRouting.initialize(randomFrom(nodes), null, -1);
}
break;
case 2:
if (shardRouting.initializing()) {
shardRouting = shardRouting.moveToStarted(ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
}
break;
}
return shardRouting;
}
}
|
RandomShardRoutingMutator
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/impl/RouteSuspendResumeWarmUpTest.java
|
{
"start": 2178,
"end": 2642
}
|
class ____ extends ServiceSupport implements Processor {
private volatile String state;
@Override
public void process(Exchange exchange) {
// noop
}
@Override
protected void doStart() {
state = "start";
}
@Override
protected void doStop() {
state = "stop";
}
public String getState() {
return state;
}
}
}
|
MyService
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/embeddable/table/EmbeddedTableTests.java
|
{
"start": 9689,
"end": 9790
}
|
class ____ {
private String kind;
private Instant whenReached;
}
@Embeddable
public static
|
Bottom
|
java
|
apache__rocketmq
|
client/src/main/java/org/apache/rocketmq/client/hook/EndTransactionHook.java
|
{
"start": 851,
"end": 970
}
|
interface ____ {
String hookName();
void endTransaction(final EndTransactionContext context);
}
|
EndTransactionHook
|
java
|
apache__camel
|
components/camel-box/camel-box-component/src/generated/java/org/apache/camel/component/box/BoxFoldersManagerEndpointConfiguration.java
|
{
"start": 3338,
"end": 9951
}
|
class ____ extends BoxConfiguration {
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "createFolderSharedLink", description="The access level of the shared link")})
private com.box.sdk.BoxSharedLink.Access access;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "copyFolder", description="The id of the destination folder"), @ApiMethod(methodName = "moveFolder", description="The id of the destination folder")})
private String destinationFolderId;
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "getFolderInfo", description="The information fields to retrieve; if null all information fields are retrieved."), @ApiMethod(methodName = "getFolderItems", description="The item fields to retrieve for each child item; if null all item fields are retrieved.")})
private String[] fields;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "copyFolder", description="The id of folder to copy"), @ApiMethod(methodName = "createFolderSharedLink", description="The id of folder to create shared link on"), @ApiMethod(methodName = "deleteFolder", description="The id of folder to delete"), @ApiMethod(methodName = "getFolderInfo", description="The id of folder"), @ApiMethod(methodName = "getFolderItems", description="The id of folder"), @ApiMethod(methodName = "moveFolder", description="The id of folder to move"), @ApiMethod(methodName = "renameFolder", description="The id of folder to rename"), @ApiMethod(methodName = "updateFolderInfo", description="The id of folder to update")})
private String folderId;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "createFolder", description="The name of created folder")})
private String folderName;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "updateFolderInfo", description="The updated information")})
private com.box.sdk.BoxFolder.Info info;
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "getFolderItems", description="The maximum number of children to retrieve after the offset; if null all child items are retrieved.")})
private Long limit;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "renameFolder", description="The new name of folder")})
private String newFolderName;
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "copyFolder", description="The new name for copied folder; if newName is null, the copied folder has same name as the original."), @ApiMethod(methodName = "moveFolder", description="The new name of moved folder; if newName is null, the moved folder has same name as the original.")})
private String newName;
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "getFolderItems", description="The index of first child item to retrieve; if null all child items are retrieved.")})
private Long offset;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "createFolder", description="The id of parent folder")})
private String parentFolderId;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "createFolder", description="Sequence of Box folder names from parent folder to returned folder"), @ApiMethod(methodName = "getFolder", description="Sequence of Box folder names from root folder to returned folder")})
private String[] path;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "createFolderSharedLink", description="The permissions of the created link; if permissions is null then the created shared link is create with default permissions.")})
private com.box.sdk.BoxSharedLink.Permissions permissions;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "createFolderSharedLink", description="The date and time at which time the created shared link will expire; if unsharedDate is null then a non-expiring link is created.")})
private java.util.Date unshareDate;
public com.box.sdk.BoxSharedLink.Access getAccess() {
return access;
}
public void setAccess(com.box.sdk.BoxSharedLink.Access access) {
this.access = access;
}
public String getDestinationFolderId() {
return destinationFolderId;
}
public void setDestinationFolderId(String destinationFolderId) {
this.destinationFolderId = destinationFolderId;
}
public String[] getFields() {
return fields;
}
public void setFields(String[] fields) {
this.fields = fields;
}
public String getFolderId() {
return folderId;
}
public void setFolderId(String folderId) {
this.folderId = folderId;
}
public String getFolderName() {
return folderName;
}
public void setFolderName(String folderName) {
this.folderName = folderName;
}
public com.box.sdk.BoxFolder.Info getInfo() {
return info;
}
public void setInfo(com.box.sdk.BoxFolder.Info info) {
this.info = info;
}
public Long getLimit() {
return limit;
}
public void setLimit(Long limit) {
this.limit = limit;
}
public String getNewFolderName() {
return newFolderName;
}
public void setNewFolderName(String newFolderName) {
this.newFolderName = newFolderName;
}
public String getNewName() {
return newName;
}
public void setNewName(String newName) {
this.newName = newName;
}
public Long getOffset() {
return offset;
}
public void setOffset(Long offset) {
this.offset = offset;
}
public String getParentFolderId() {
return parentFolderId;
}
public void setParentFolderId(String parentFolderId) {
this.parentFolderId = parentFolderId;
}
public String[] getPath() {
return path;
}
public void setPath(String[] path) {
this.path = path;
}
public com.box.sdk.BoxSharedLink.Permissions getPermissions() {
return permissions;
}
public void setPermissions(com.box.sdk.BoxSharedLink.Permissions permissions) {
this.permissions = permissions;
}
public java.util.Date getUnshareDate() {
return unshareDate;
}
public void setUnshareDate(java.util.Date unshareDate) {
this.unshareDate = unshareDate;
}
}
|
BoxFoldersManagerEndpointConfiguration
|
java
|
spring-projects__spring-boot
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/EnableAutoConfiguration.java
|
{
"start": 3997,
"end": 4067
}
|
class ____ such that they will never be
* applied.
* @return the
|
names
|
java
|
apache__dubbo
|
dubbo-metadata/dubbo-metadata-api/src/main/java/org/apache/dubbo/metadata/DubboMetadataServiceV2Triple.java
|
{
"start": 7287,
"end": 12692
}
|
class ____
implements MetadataServiceV2, ServerService<MetadataServiceV2> {
private <T, R> BiConsumer<T, StreamObserver<R>> syncToAsync(java.util.function.Function<T, R> syncFun) {
return new BiConsumer<T, StreamObserver<R>>() {
@Override
public void accept(T t, StreamObserver<R> observer) {
try {
R ret = syncFun.apply(t);
observer.onNext(ret);
observer.onCompleted();
} catch (Throwable e) {
observer.onError(e);
}
}
};
}
@Override
public CompletableFuture<MetadataInfoV2> getMetadataInfoAsync(MetadataRequest request) {
return CompletableFuture.completedFuture(getMetadataInfo(request));
}
@Override
public CompletableFuture<OpenAPIInfo> getOpenAPIInfoAsync(OpenAPIRequest request) {
return CompletableFuture.completedFuture(getOpenAPIInfo(request));
}
// This server stream type unary method is <b>only</b> used for generated stub to support async unary method.
// It will not be called if you are NOT using Dubbo3 generated triple stub and <b>DO NOT</b> implement this
// method.
public void getMetadataInfo(MetadataRequest request, StreamObserver<MetadataInfoV2> responseObserver) {
getMetadataInfoAsync(request).whenComplete((r, t) -> {
if (t != null) {
responseObserver.onError(t);
} else {
responseObserver.onNext(r);
responseObserver.onCompleted();
}
});
}
public void getOpenAPIInfo(OpenAPIRequest request, StreamObserver<OpenAPIInfo> responseObserver) {
getOpenAPIInfoAsync(request).whenComplete((r, t) -> {
if (t != null) {
responseObserver.onError(t);
} else {
responseObserver.onNext(r);
responseObserver.onCompleted();
}
});
}
@Override
public final Invoker<MetadataServiceV2> getInvoker(URL url) {
PathResolver pathResolver = url.getOrDefaultFrameworkModel()
.getExtensionLoader(PathResolver.class)
.getDefaultExtension();
Map<String, StubMethodHandler<?, ?>> handlers = new HashMap<>();
pathResolver.addNativeStub("/" + SERVICE_NAME + "/GetMetadataInfo");
pathResolver.addNativeStub("/" + SERVICE_NAME + "/GetMetadataInfoAsync");
// for compatibility
pathResolver.addNativeStub("/" + JAVA_SERVICE_NAME + "/GetMetadataInfo");
pathResolver.addNativeStub("/" + JAVA_SERVICE_NAME + "/GetMetadataInfoAsync");
pathResolver.addNativeStub("/" + SERVICE_NAME + "/GetOpenAPIInfo");
pathResolver.addNativeStub("/" + SERVICE_NAME + "/GetOpenAPIInfoAsync");
// for compatibility
pathResolver.addNativeStub("/" + JAVA_SERVICE_NAME + "/GetOpenAPIInfo");
pathResolver.addNativeStub("/" + JAVA_SERVICE_NAME + "/GetOpenAPIInfoAsync");
BiConsumer<MetadataRequest, StreamObserver<MetadataInfoV2>> getMetadataInfoFunc = this::getMetadataInfo;
handlers.put(getMetadataInfoMethod.getMethodName(), new UnaryStubMethodHandler<>(getMetadataInfoFunc));
BiConsumer<MetadataRequest, StreamObserver<MetadataInfoV2>> getMetadataInfoAsyncFunc =
syncToAsync(this::getMetadataInfo);
handlers.put(
getMetadataInfoProxyAsyncMethod.getMethodName(),
new UnaryStubMethodHandler<>(getMetadataInfoAsyncFunc));
BiConsumer<OpenAPIRequest, StreamObserver<OpenAPIInfo>> getOpenAPIInfoFunc = this::getOpenAPIInfo;
handlers.put(getOpenAPIInfoMethod.getMethodName(), new UnaryStubMethodHandler<>(getOpenAPIInfoFunc));
BiConsumer<OpenAPIRequest, StreamObserver<OpenAPIInfo>> getOpenAPIInfoAsyncFunc =
syncToAsync(this::getOpenAPIInfo);
handlers.put(
getOpenAPIInfoProxyAsyncMethod.getMethodName(),
new UnaryStubMethodHandler<>(getOpenAPIInfoAsyncFunc));
return new StubInvoker<>(this, url, MetadataServiceV2.class, handlers);
}
@Override
public MetadataInfoV2 getMetadataInfo(MetadataRequest request) {
throw unimplementedMethodException(getMetadataInfoMethod);
}
@Override
public OpenAPIInfo getOpenAPIInfo(OpenAPIRequest request) {
throw unimplementedMethodException(getOpenAPIInfoMethod);
}
@Override
public final ServiceDescriptor getServiceDescriptor() {
return serviceDescriptor;
}
private RpcException unimplementedMethodException(StubMethodDescriptor methodDescriptor) {
return TriRpcStatus.UNIMPLEMENTED
.withDescription(String.format(
"Method %s is unimplemented",
"/" + serviceDescriptor.getInterfaceName() + "/" + methodDescriptor.getMethodName()))
.asException();
}
}
}
|
MetadataServiceV2ImplBase
|
java
|
elastic__elasticsearch
|
server/src/internalClusterTest/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java
|
{
"start": 1539,
"end": 8202
}
|
class ____ extends ESIntegTestCase {
@Override
protected boolean addMockHttpTransport() {
return false; // enable http
}
public void testRollingRestartOfTwoNodeCluster() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(1);
final List<String> nodes = internalCluster().startNodes(2);
createIndex(
"test",
indexSettings(2, 1) // causes rebalancing
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.ZERO) // assign shards
.build()
);
ensureGreen("test");
final DiscoveryNodes discoveryNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT)
.clear()
.setNodes(true)
.get()
.getState()
.nodes();
final Map<String, String> nodeIdsByName = Maps.newMapWithExpectedSize(discoveryNodes.getSize());
discoveryNodes.forEach(n -> nodeIdsByName.put(n.getName(), n.getId()));
RestClient restClient = getRestClient();
internalCluster().rollingRestart(new InternalTestCluster.RestartCallback() {
@Override
public void doAfterNodes(int n, Client client) throws IOException {
ensureGreen("test");
final Request request = new Request("POST", "/_cluster/voting_config_exclusions");
final String nodeName = internalCluster().getNodeNames()[n];
if (randomBoolean()) {
request.addParameter("node_names", nodeName);
} else {
final String nodeId = nodeIdsByName.get(nodeName);
assertNotNull(nodeName, nodeId);
request.addParameter("node_ids", nodeId);
}
final Response response = restClient.performRequest(request);
assertThat(response.getStatusLine().getStatusCode(), is(200));
}
@Override
public Settings onNodeStopped(String nodeName) throws IOException {
String viaNode = randomValueOtherThan(nodeName, () -> randomFrom(nodes));
List<Node> allNodes = restClient.getNodes();
try {
restClient.setNodes(
Collections.singletonList(
new Node(
HttpHost.create(
internalCluster().getInstance(HttpServerTransport.class, viaNode)
.boundAddress()
.publishAddress()
.toString()
)
)
)
);
Response deleteResponse = restClient.performRequest(new Request("DELETE", "/_cluster/voting_config_exclusions"));
assertThat(deleteResponse.getStatusLine().getStatusCode(), is(200));
ClusterHealthResponse clusterHealthResponse = client(viaNode).admin()
.cluster()
.prepareHealth(TEST_REQUEST_TIMEOUT)
.setWaitForEvents(Priority.LANGUID)
.setWaitForNodes(Integer.toString(1))
.setTimeout(TimeValue.timeValueSeconds(30L))
.setWaitForYellowStatus()
.get();
assertFalse(nodeName, clusterHealthResponse.isTimedOut());
return Settings.EMPTY;
} finally {
restClient.setNodes(allNodes);
}
}
});
ensureStableCluster(2);
ensureGreen("test");
assertThat(internalCluster().size(), is(2));
}
public void testClearVotingTombstonesNotWaitingForRemoval() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(2);
List<String> nodes = internalCluster().startNodes(3);
ensureStableCluster(3);
RestClient restClient = getRestClient();
final Request request = new Request("POST", "/_cluster/voting_config_exclusions");
request.addParameter("node_names", nodes.get(2));
final Response response = restClient.performRequest(request);
assertThat(response.getStatusLine().getStatusCode(), is(200));
assertThat(response.getEntity().getContentLength(), is(0L));
Response deleteResponse = restClient.performRequest(
new Request("DELETE", "/_cluster/voting_config_exclusions/?wait_for_removal=false")
);
assertThat(deleteResponse.getStatusLine().getStatusCode(), is(200));
assertThat(deleteResponse.getEntity().getContentLength(), is(0L));
}
public void testClearVotingTombstonesWaitingForRemoval() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(2);
List<String> nodes = internalCluster().startNodes(3);
ensureStableCluster(3);
RestClient restClient = getRestClient();
String nodeToWithdraw = nodes.get(randomIntBetween(0, 2));
final Request request = new Request("POST", "/_cluster/voting_config_exclusions");
request.addParameter("node_names", nodeToWithdraw);
final Response response = restClient.performRequest(request);
assertThat(response.getStatusLine().getStatusCode(), is(200));
assertThat(response.getEntity().getContentLength(), is(0L));
internalCluster().stopNode(nodeToWithdraw);
Response deleteResponse = restClient.performRequest(new Request("DELETE", "/_cluster/voting_config_exclusions"));
assertThat(deleteResponse.getStatusLine().getStatusCode(), is(200));
assertThat(deleteResponse.getEntity().getContentLength(), is(0L));
}
public void testRemoveTwoNodesAtOnce() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(2);
List<String> nodes = internalCluster().startNodes(3);
ensureStableCluster(3);
RestClient restClient = getRestClient();
final Request request = new Request("POST", "/_cluster/voting_config_exclusions");
request.addParameter("node_names", nodes.get(2) + "," + nodes.get(0));
final Response response = restClient.performRequest(request);
assertThat(response.getStatusLine().getStatusCode(), is(200));
assertThat(response.getEntity().getContentLength(), is(0L));
internalCluster().stopNode(nodes.get(0));
internalCluster().stopNode(nodes.get(2));
ensureStableCluster(1);
}
}
|
Zen2RestApiIT
|
java
|
mockito__mockito
|
mockito-core/src/test/java/org/mockitousage/bugs/injection/ParentTestMockInjectionTest.java
|
{
"start": 1415,
"end": 1651
}
|
class ____ {
private DaoA daoFromParent;
private DaoB daoFromSub;
public void businessMethod() {
daoFromParent.doQuery();
daoFromSub.doQuery();
}
}
public static
|
TestedSystem
|
java
|
spring-projects__spring-framework
|
spring-web/src/test/java/org/springframework/http/server/reactive/RandomHandlerIntegrationTests.java
|
{
"start": 2501,
"end": 3497
}
|
class ____ implements HttpHandler {
static final int CHUNKS = 16;
@Override
public Mono<Void> handle(ServerHttpRequest request, ServerHttpResponse response) {
Mono<Integer> requestSizeMono = request.getBody().
reduce(0, (integer, dataBuffer) -> integer +
dataBuffer.readableByteCount()).
doOnNext(size -> assertThat(size).isEqualTo(REQUEST_SIZE)).
doOnError(throwable -> assertThat(throwable).isNull());
response.getHeaders().setContentLength(RESPONSE_SIZE);
return requestSizeMono.then(response.writeWith(multipleChunks()));
}
private Publisher<DataBuffer> multipleChunks() {
int chunkSize = RESPONSE_SIZE / CHUNKS;
return Flux.range(1, CHUNKS).map(integer -> randomBuffer(chunkSize));
}
private DataBuffer randomBuffer(int size) {
byte[] bytes = new byte[size];
rnd.nextBytes(bytes);
DataBuffer buffer = DefaultDataBufferFactory.sharedInstance.allocateBuffer(size);
buffer.write(bytes);
return buffer;
}
}
}
|
RandomHandler
|
java
|
elastic__elasticsearch
|
x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/BytesKey.java
|
{
"start": 563,
"end": 1316
}
|
class ____ {
final byte[] bytes;
private final int hashCode;
public BytesKey(byte[] bytes) {
this.bytes = bytes;
this.hashCode = StringHelper.murmurhash3_x86_32(bytes, 0, bytes.length, StringHelper.GOOD_FAST_HASH_SEED);
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public boolean equals(Object other) {
if (other == null) {
return false;
}
if (other instanceof BytesKey == false) {
return false;
}
BytesKey otherBytes = (BytesKey) other;
return Arrays.equals(otherBytes.bytes, bytes);
}
@Override
public String toString() {
return new BytesRef(bytes).toString();
}
}
|
BytesKey
|
java
|
apache__camel
|
core/camel-support/src/main/java/org/apache/camel/support/task/ForegroundTask.java
|
{
"start": 1554,
"end": 7813
}
|
class ____ extends AbstractTaskBuilder<ForegroundTask> {
private String name;
private IterationBudget budget;
/**
* Sets the name of the task
*
* @param name the name
* @return an instance of this builder
*/
@Override
public ForegroundTaskBuilder withName(String name) {
this.name = name;
return this;
}
/**
* Sets an iteration budget for the task (i.e.: the task will not run more than the given number of iterations)
*
* @param budget the budget
* @return an instance of this builder
*/
public ForegroundTaskBuilder withBudget(IterationBudget budget) {
this.budget = budget;
return this;
}
@Override
public ForegroundTask build() {
return new ForegroundTask(budget, name != null ? name : getName());
}
}
private static final Logger LOG = LoggerFactory.getLogger(ForegroundTask.class);
private final IterationBudget budget;
private Duration elapsed = Duration.ZERO;
private final AtomicBoolean running = new AtomicBoolean();
ForegroundTask(IterationBudget budget, String name) {
super(name);
this.budget = budget;
}
@Override
public boolean run(CamelContext camelContext, BooleanSupplier supplier) {
running.set(true);
boolean completed = false;
TaskManagerRegistry registry = null;
if (camelContext != null) {
registry = PluginHelper.getTaskManagerRegistry(camelContext.getCamelContextExtension());
}
if (registry != null) {
registry.addTask(this);
}
try {
if (budget.initialDelay() > 0) {
Thread.sleep(budget.initialDelay());
}
while (budget.next()) {
lastAttemptTime = System.currentTimeMillis();
if (firstAttemptTime < 0) {
firstAttemptTime = lastAttemptTime;
}
nextAttemptTime = lastAttemptTime + budget.interval();
if (doRun(supplier)) {
LOG.debug("Task {} is complete after {} iterations and it is ready to continue",
getName(), budget.iteration());
status = Status.Completed;
completed = true;
break;
}
if (budget.canContinue()) {
Thread.sleep(budget.interval());
} else {
status = Status.Exhausted;
}
}
} catch (InterruptedException e) {
LOG.warn("Interrupted {} while waiting for the repeatable task to finish", getName());
Thread.currentThread().interrupt();
} catch (Exception e) {
status = Status.Failed;
cause = e;
throw e;
} finally {
elapsed = budget.elapsed();
running.set(false);
if (registry != null) {
registry.removeTask(this);
}
}
return completed;
}
protected boolean doRun(BooleanSupplier supplier) {
try {
cause = null;
return supplier.getAsBoolean();
} catch (TaskRunFailureException e) {
LOG.debug("Task {} failed at {} iterations and will attempt again on next interval: {}",
getName(), budget.iteration(), e.getMessage());
cause = e;
return false;
}
}
/**
* Run a task until it produces a result
*
* @param camelContext the camel context
* @param supplier the supplier of the result
* @param predicate a predicate to test if the result is acceptable
* @param <T> the type for the result
* @return An optional with the result
*/
public <T> Optional<T> run(CamelContext camelContext, Supplier<T> supplier, Predicate<T> predicate) {
running.set(true);
TaskManagerRegistry registry = null;
if (camelContext != null) {
registry = PluginHelper.getTaskManagerRegistry(camelContext.getCamelContextExtension());
}
if (registry != null) {
registry.addTask(this);
}
try {
if (budget.initialDelay() > 0) {
Thread.sleep(budget.initialDelay());
}
while (budget.next()) {
lastAttemptTime = System.currentTimeMillis();
if (firstAttemptTime < 0) {
firstAttemptTime = lastAttemptTime;
}
T ret = supplier.get();
if (predicate.test(ret)) {
LOG.debug("Task {} is complete after {} iterations and it is ready to continue",
getName(), budget.iteration());
status = Status.Completed;
return Optional.ofNullable(ret);
}
nextAttemptTime = lastAttemptTime + budget.interval();
if (budget.canContinue()) {
Thread.sleep(budget.interval());
} else {
status = Status.Exhausted;
}
}
} catch (InterruptedException e) {
LOG.warn("Interrupted {} while waiting for the repeatable task to finish", getName());
Thread.currentThread().interrupt();
} catch (Exception e) {
status = Status.Failed;
cause = e;
} finally {
elapsed = budget.elapsed();
running.set(false);
if (registry != null) {
registry.removeTask(this);
}
}
return Optional.empty();
}
@Override
public boolean isRunning() {
return running.get();
}
@Override
public Duration elapsed() {
return elapsed;
}
@Override
public int iteration() {
return budget.iteration();
}
@Override
public long getCurrentDelay() {
return budget.interval();
}
}
|
ForegroundTaskBuilder
|
java
|
spring-projects__spring-framework
|
spring-jms/src/main/java/org/springframework/jms/connection/SmartConnectionFactory.java
|
{
"start": 930,
"end": 1238
}
|
interface ____ extends ConnectionFactory {
/**
* Should we stop the Connection, obtained from this ConnectionFactory?
* @param con the Connection to check
* @return whether a stop call is necessary
* @see jakarta.jms.Connection#stop()
*/
boolean shouldStop(Connection con);
}
|
SmartConnectionFactory
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/proxy/jdbc/DataSourceProxyImpl.java
|
{
"start": 1341,
"end": 14943
}
|
class ____ implements DataSourceProxy, DataSourceProxyImplMBean {
private final Driver rawDriver;
private final DataSourceProxyConfig config;
private long id;
private final long createdTimeMillis = System.currentTimeMillis();
private Properties properties;
private String dbType;
private final AtomicLong connectionIdSeed = new AtomicLong(10000);
private final AtomicLong statementIdSeed = new AtomicLong(20000);
private final AtomicLong resultSetIdSeed = new AtomicLong(50000);
private final AtomicLong metaDataIdSeed = new AtomicLong(100000);
private final AtomicLong transactionIdSeed = new AtomicLong(0);
private final JdbcDataSourceStat dataSourceStat;
public DataSourceProxyImpl(Driver rawDriver, DataSourceProxyConfig config) {
super();
this.rawDriver = rawDriver;
this.config = config;
this.dbType = JdbcUtils.getDbType(config.getRawUrl(), config.getRawDriverClassName());
this.dataSourceStat = new JdbcDataSourceStat(config.getName(), config.getUrl(), dbType);
}
public String getDbType() {
return dbType;
}
public Driver getRawDriver() {
return this.rawDriver;
}
public String getRawUrl() {
return config.getRawUrl();
}
public ConnectionProxy connect(Properties info) throws SQLException {
this.properties = info;
PasswordCallback passwordCallback = this.config.getPasswordCallback();
if (passwordCallback != null) {
char[] chars = passwordCallback.getPassword();
String password = new String(chars);
info.put("password", password);
}
NameCallback userCallback = this.config.getUserCallback();
if (userCallback != null) {
String user = userCallback.getName();
info.put("user", user);
}
FilterChain chain = new FilterChainImpl(this);
return chain.connection_connect(info);
}
public DataSourceProxyConfig getConfig() {
return config;
}
public long getDataSourceId() {
return getId();
}
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
@Override
public String getName() {
return this.config.getName();
}
@Override
public String getUrl() {
return config.getUrl();
}
public List<Filter> getProxyFilters() {
return config.getFilters();
}
@Override
public String[] getFilterClasses() {
List<Filter> filterConfigList = config.getFilters();
List<String> classes = new ArrayList<String>();
for (Filter filter : filterConfigList) {
classes.add(filter.getClass().getName());
}
return classes.toArray(new String[classes.size()]);
}
@Override
public String getRawDriverClassName() {
return config.getRawDriverClassName();
}
@Override
public Date getCreatedTime() {
return new Date(createdTimeMillis);
}
@Override
public int getRawDriverMajorVersion() {
return rawDriver.getMajorVersion();
}
@Override
public int getRawDriverMinorVersion() {
return rawDriver.getMinorVersion();
}
public String getDataSourceMBeanDomain() {
String name = this.config.getName();
if (name != null && name.length() != 0) {
return name;
}
return "java.sql.dataSource_" + System.identityHashCode(this);
}
public String getProperties() {
if (this.properties == null) {
return null;
}
Properties properties = new Properties(this.properties);
if (properties.contains("password")) {
properties.put("password", "******");
}
return properties.toString();
}
public Properties getConnectProperties() {
return properties;
}
public CompositeDataSupport getCompositeData() throws JMException {
JdbcDataSourceStat stat = this.getDataSourceStat();
Map<String, Object> map = new HashMap<String, Object>();
map.put("ID", id);
map.put("URL", this.getUrl());
map.put("Name", this.getName());
map.put("FilterClasses", getFilterClasses());
map.put("CreatedTime", getCreatedTime());
map.put("RawDriverClassName", getRawDriverClassName());
map.put("RawUrl", getRawUrl());
map.put("RawDriverMajorVersion", getRawDriverMajorVersion());
map.put("RawDriverMinorVersion", getRawDriverMinorVersion());
map.put("Properties", getProperties());
if (stat != null) {
map.put("ConnectionActiveCount", stat.getConnectionActiveCount());
map.put("ConnectionActiveCountMax", stat.getConnectionStat().getActiveMax());
map.put("ConnectionCloseCount", stat.getConnectionStat().getCloseCount());
map.put("ConnectionCommitCount", stat.getConnectionStat().getCommitCount());
map.put("ConnectionRollbackCount", stat.getConnectionStat().getRollbackCount());
map.put("ConnectionConnectLastTime", stat.getConnectionStat().getConnectLastTime());
map.put("ConnectionConnectErrorCount", stat.getConnectionStat().getConnectErrorCount());
Throwable lastConnectionConnectError = stat.getConnectionStat().getConnectErrorLast();
if (lastConnectionConnectError != null) {
map.put("ConnectionConnectErrorLastTime", stat.getConnectionStat().getErrorLastTime());
map.put("ConnectionConnectErrorLastMessage", lastConnectionConnectError.getMessage());
map.put("ConnectionConnectErrorLastStackTrace", Utils.getStackTrace(lastConnectionConnectError));
} else {
map.put("ConnectionConnectErrorLastTime", null);
map.put("ConnectionConnectErrorLastMessage", null);
map.put("ConnectionConnectErrorLastStackTrace", null);
}
map.put("StatementCreateCount", stat.getStatementStat().getCreateCount());
map.put("StatementPrepareCount", stat.getStatementStat().getPrepareCount());
map.put("StatementPreCallCount", stat.getStatementStat().getPrepareCallCount());
map.put("StatementExecuteCount", stat.getStatementStat().getExecuteCount());
map.put("StatementRunningCount", stat.getStatementStat().getRunningCount());
map.put("StatementConcurrentMax", stat.getStatementStat().getConcurrentMax());
map.put("StatementCloseCount", stat.getStatementStat().getCloseCount());
map.put("StatementErrorCount", stat.getStatementStat().getErrorCount());
Throwable lastStatementError = stat.getStatementStat().getLastException();
if (lastStatementError != null) {
map.put("StatementLastErrorTime", stat.getStatementStat().getLastErrorTime());
map.put("StatementLastErrorMessage", lastStatementError.getMessage());
map.put("StatementLastErrorStackTrace", Utils.getStackTrace(lastStatementError));
} else {
map.put("StatementLastErrorTime", null);
map.put("StatementLastErrorMessage", null);
map.put("StatementLastErrorStackTrace", null);
}
map.put("StatementExecuteMillisTotal", stat.getStatementStat().getMillisTotal());
map.put("StatementExecuteLastTime", stat.getStatementStat().getExecuteLastTime());
map.put("ConnectionConnectingCount", stat.getConnectionStat().getConnectingCount());
map.put("ResultSetCloseCount", stat.getResultSetStat().getCloseCount());
map.put("ResultSetOpenCount", stat.getResultSetStat().getOpenCount());
map.put("ResultSetOpenningCount", stat.getResultSetStat().getOpeningCount());
map.put("ResultSetOpenningMax", stat.getResultSetStat().getOpeningMax());
map.put("ResultSetFetchRowCount", stat.getResultSetStat().getFetchRowCount());
map.put("ResultSetLastOpenTime", stat.getResultSetStat().getLastOpenTime());
map.put("ResultSetErrorCount", stat.getResultSetStat().getErrorCount());
map.put("ResultSetOpenningMillisTotal", stat.getResultSetStat().getAliveMillisTotal());
map.put("ResultSetLastErrorTime", stat.getResultSetStat().getLastErrorTime());
Throwable lastResultSetError = stat.getResultSetStat().getLastError();
if (lastResultSetError != null) {
map.put("ResultSetLastErrorMessage", lastResultSetError.getMessage());
map.put("ResultSetLastErrorStackTrace", Utils.getStackTrace(lastResultSetError));
} else {
map.put("ResultSetLastErrorMessage", null);
map.put("ResultSetLastErrorStackTrace", null);
}
map.put("ConnectionConnectCount", stat.getConnectionStat().getConnectCount());
Throwable lastConnectionError = stat.getConnectionStat().getErrorLast();
if (lastConnectionError != null) {
map.put("ConnectionErrorLastMessage", lastConnectionError.getMessage());
map.put("ConnectionErrorLastStackTrace", Utils.getStackTrace(lastConnectionError));
} else {
map.put("ConnectionErrorLastMessage", null);
map.put("ConnectionErrorLastStackTrace", null);
}
map.put("ConnectionConnectMillisTotal", stat.getConnectionStat().getConnectMillis());
map.put("ConnectionConnectingCountMax", stat.getConnectionStat().getConnectingMax());
map.put("ConnectionConnectMillisMax", stat.getConnectionStat().getConnectMillisMax());
map.put("ConnectionErrorLastTime", stat.getConnectionStat().getErrorLastTime());
map.put("ConnectionAliveMillisMax", stat.getConnectionConnectAliveMillisMax());
map.put("ConnectionAliveMillisMin", stat.getConnectionConnectAliveMillisMin());
map.put("ConnectionHistogram", stat.getConnectionHistogramValues());
map.put("StatementHistogram", stat.getStatementStat().getHistogramValues());
} else {
map.put("ConnectionActiveCount", null);
map.put("ConnectionActiveCountMax", null);
map.put("ConnectionCloseCount", null);
map.put("ConnectionCommitCount", null);
map.put("ConnectionRollbackCount", null);
map.put("ConnectionConnectLastTime", null);
map.put("ConnectionConnectErrorCount", null);
map.put("ConnectionConnectErrorLastTime", null);
map.put("ConnectionConnectErrorLastMessage", null);
map.put("ConnectionConnectErrorLastStackTrace", null);
map.put("StatementCreateCount", null);
map.put("StatementPrepareCount", null);
map.put("StatementPreCallCount", null);
map.put("StatementExecuteCount", null);
map.put("StatementRunningCount", null);
map.put("StatementConcurrentMax", null);
map.put("StatementCloseCount", null);
map.put("StatementErrorCount", null);
map.put("StatementLastErrorTime", null);
map.put("StatementLastErrorMessage", null);
map.put("StatementLastErrorStackTrace", null);
map.put("StatementExecuteMillisTotal", null);
map.put("ConnectionConnectingCount", null);
map.put("StatementExecuteLastTime", null);
map.put("ResultSetCloseCount", null);
map.put("ResultSetOpenCount", null);
map.put("ResultSetOpenningCount", null);
map.put("ResultSetOpenningMax", null);
map.put("ResultSetFetchRowCount", null);
map.put("ResultSetLastOpenTime", null);
map.put("ResultSetErrorCount", null);
map.put("ResultSetOpenningMillisTotal", null);
map.put("ResultSetLastErrorTime", null);
map.put("ResultSetLastErrorMessage", null);
map.put("ResultSetLastErrorStackTrace", null);
map.put("ConnectionConnectCount", null);
map.put("ConnectionErrorLastMessage", null);
map.put("ConnectionErrorLastStackTrace", null);
map.put("ConnectionConnectMillisTotal", null);
map.put("ConnectionConnectingCountMax", null);
map.put("ConnectionConnectMillisMax", null);
map.put("ConnectionErrorLastTime", null);
map.put("ConnectionAliveMillisMax", null);
map.put("ConnectionAliveMillisMin", null);
map.put("ConnectionHistogram", new long[0]);
map.put("StatementHistogram", new long[0]);
}
return new CompositeDataSupport(JdbcStatManager.getDataSourceCompositeType(), map);
}
@Override
public String getRawJdbcUrl() {
return config.getRawUrl();
}
public long createConnectionId() {
return connectionIdSeed.incrementAndGet();
}
public long createStatementId() {
return statementIdSeed.getAndIncrement();
}
public long createResultSetId() {
return resultSetIdSeed.getAndIncrement();
}
public long createMetaDataId() {
return metaDataIdSeed.getAndIncrement();
}
@Override
public long createTransactionId() {
return transactionIdSeed.getAndIncrement();
}
public JdbcDataSourceStat getDataSourceStat() {
return dataSourceStat;
}
}
|
DataSourceProxyImpl
|
java
|
quarkusio__quarkus
|
extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/connection/ConnectionCloseTest.java
|
{
"start": 783,
"end": 1898
}
|
class ____ {
@RegisterExtension
public static final QuarkusUnitTest test = new QuarkusUnitTest()
.withApplicationRoot(root -> {
root.addClasses(Closing.class, ClosingBlocking.class, WSClient.class);
});
@Inject
Vertx vertx;
@TestHTTPResource("closing")
URI closingUri;
@TestHTTPResource("closing-blocking")
URI closingBlockingUri;
@Test
public void testClosed() throws InterruptedException {
assertClosed(closingUri);
assertTrue(Closing.CLOSED.await(5, TimeUnit.SECONDS));
}
@Test
public void testClosedBlocking() throws InterruptedException {
assertClosed(closingBlockingUri);
assertTrue(ClosingBlocking.CLOSED.await(5, TimeUnit.SECONDS));
}
private void assertClosed(URI testUri) throws InterruptedException {
WSClient client = WSClient.create(vertx).connect(testUri);
client.sendAndAwait("foo");
Awaitility.await().atMost(5, TimeUnit.SECONDS).until(() -> client.isClosed());
}
@WebSocket(path = "/closing")
public static
|
ConnectionCloseTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java
|
{
"start": 1196,
"end": 9337
}
|
class ____ extends AbstractXContentSerializingTestCase<Request> {
@Override
protected Request doParseInstance(XContentParser parser) throws IOException {
return Request.parseRequest(null, null, parser);
}
@Override
protected Writeable.Reader<Request> instanceReader() {
return Request::new;
}
@Override
protected Request createTestInstance() {
return createRandom();
}
@Override
protected Request mutateInstance(Request instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
public static Request createRandom() {
boolean deploymemtIdSameAsModelId = randomBoolean();
String modelId = randomAlphaOfLength(10);
Request request = new Request(modelId, deploymemtIdSameAsModelId ? modelId : randomAlphaOfLength(10));
if (randomBoolean()) {
request.setTimeout(randomPositiveTimeValue());
}
if (randomBoolean()) {
request.setWaitForState(randomFrom(AllocationStatus.State.values()));
}
if (randomBoolean()) {
request.setThreadsPerAllocation(randomFrom(1, 2, 4, 8, 16, 32));
}
if (randomBoolean()) {
request.setNumberOfAllocations(randomIntBetween(1, 8));
}
if (randomBoolean()) {
request.setQueueCapacity(randomIntBetween(1, 100_000));
}
if (randomBoolean()) {
request.setPriority(randomFrom(Priority.values()).toString());
if ((request.getNumberOfAllocations() != null && request.getNumberOfAllocations() > 1)
|| request.getThreadsPerAllocation() > 1) {
request.setPriority(Priority.NORMAL.toString());
}
}
return request;
}
public void testValidate_GivenThreadsPerAllocationIsZero() {
Request request = createRandom();
request.setThreadsPerAllocation(0);
ActionRequestValidationException e = request.validate();
assertThat(e, is(not(nullValue())));
assertThat(e.getMessage(), containsString("[threads_per_allocation] must be a positive integer"));
}
public void testValidate_GivenThreadsPerAllocationIsNegative() {
Request request = createRandom();
request.setThreadsPerAllocation(randomIntBetween(-100, -1));
ActionRequestValidationException e = request.validate();
assertThat(e, is(not(nullValue())));
assertThat(e.getMessage(), containsString("[threads_per_allocation] must be a positive integer"));
}
public void testValidate_GivenThreadsPerAllocationIsNotPowerOf2() {
Set<Integer> powersOf2 = IntStream.range(0, 10).map(n -> (int) Math.pow(2, n)).boxed().collect(Collectors.toSet());
List<Integer> input = IntStream.range(1, 33).filter(n -> powersOf2.contains(n) == false).boxed().toList();
for (int n : input) {
Request request = createRandom();
request.setThreadsPerAllocation(n);
ActionRequestValidationException e = request.validate();
assertThat(e, is(not(nullValue())));
assertThat(e.getMessage(), containsString("[threads_per_allocation] must be a power of 2 less than or equal to 32"));
}
}
public void testValidate_GivenThreadsPerAllocationIsValid() {
for (int n : List.of(1, 2, 4, 8, 16, 32)) {
Request request = createRandom();
request.setPriority(Priority.NORMAL.toString());
request.setThreadsPerAllocation(n);
ActionRequestValidationException e = request.validate();
assertThat(e, is(nullValue()));
}
}
public void testValidate_GivenNumberOfAllocationsIsZero() {
Request request = createRandom();
request.setNumberOfAllocations(0);
ActionRequestValidationException e = request.validate();
assertThat(e, is(not(nullValue())));
assertThat(e.getMessage(), containsString("[number_of_allocations] must be a positive integer"));
}
public void testValidate_GivenNumberOfAllocationsIsNegative() {
Request request = createRandom();
request.setNumberOfAllocations(randomIntBetween(-100, -1));
ActionRequestValidationException e = request.validate();
assertThat(e, is(not(nullValue())));
assertThat(e.getMessage(), containsString("[number_of_allocations] must be a positive integer"));
}
public void testValidate_GivenQueueCapacityIsZero() {
Request request = createRandom();
request.setQueueCapacity(0);
ActionRequestValidationException e = request.validate();
assertThat(e, is(not(nullValue())));
assertThat(e.getMessage(), containsString("[queue_capacity] must be a positive integer"));
}
public void testValidate_GivenQueueCapacityIsNegative() {
Request request = createRandom();
request.setQueueCapacity(randomIntBetween(Integer.MIN_VALUE, -1));
ActionRequestValidationException e = request.validate();
assertThat(e, is(not(nullValue())));
assertThat(e.getMessage(), containsString("[queue_capacity] must be a positive integer"));
}
public void testValidate_GivenQueueCapacityIsAtLimit() {
Request request = createRandom();
request.setQueueCapacity(100_000);
ActionRequestValidationException e = request.validate();
assertThat(e, is(nullValue()));
}
public void testValidate_GivenQueueCapacityIsOverLimit() {
Request request = createRandom();
request.setQueueCapacity(100_001);
ActionRequestValidationException e = request.validate();
assertThat(e, is(not(nullValue())));
assertThat(e.getMessage(), containsString("[queue_capacity] must be less than 100000"));
}
public void testValidate_GivenTimeoutIsNegative() {
Request request = createRandom();
request.setTimeout(TimeValue.parseTimeValue("-1s", "timeout"));
ActionRequestValidationException e = request.validate();
assertThat(e, is(not(nullValue())));
assertThat(e.getMessage(), containsString("[timeout] must be positive"));
}
public void testValidate_GivenTimeoutIsZero() {
Request request = createRandom();
request.setTimeout(TimeValue.parseTimeValue("0s", "timeout"));
ActionRequestValidationException e = request.validate();
assertThat(e, is(not(nullValue())));
assertThat(e.getMessage(), containsString("[timeout] must be positive"));
}
public void testValidate_GivenLowPriorityAndMultipleThreadsPerAllocation() {
Request request = createRandom();
request.setPriority(Priority.LOW.toString());
request.setThreadsPerAllocation(randomFrom(2, 4, 8, 16, 32));
ActionRequestValidationException e = request.validate();
assertThat(e, is(not(nullValue())));
assertThat(e.getMessage(), containsString("[threads_per_allocation] must be 1 when [priority] is low"));
}
public void testValidate_GivenLowPriorityAndMultipleAllocations() {
Request request = createRandom();
request.setPriority(Priority.LOW.toString());
request.setNumberOfAllocations(randomIntBetween(2, 32));
ActionRequestValidationException e = request.validate();
assertThat(e, is(not(nullValue())));
assertThat(e.getMessage(), containsString("[number_of_allocations] must be 1 when [priority] is low"));
}
public void testDefaults() {
Request request = new Request(randomAlphaOfLength(10), randomAlphaOfLength(10));
assertThat(request.getTimeout(), equalTo(TimeValue.timeValueSeconds(30)));
assertThat(request.getWaitForState(), equalTo(AllocationStatus.State.STARTED));
assertThat(request.getNumberOfAllocations(), nullValue());
assertThat(request.computeNumberOfAllocations(), equalTo(1));
assertThat(request.getThreadsPerAllocation(), equalTo(1));
assertThat(request.getQueueCapacity(), equalTo(10_000));
}
}
|
StartTrainedModelDeploymentRequestTests
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
|
{
"start": 3103,
"end": 4333
}
|
class ____ implements Comparable<Op> {
private final String opType;
private final List<User> users;
private final long totalCount;
private final int limit;
public Op(String opType, UserCounts users, int limit) {
this.opType = opType;
this.users = new ArrayList<>(users);
this.users.sort(Collections.reverseOrder());
this.totalCount = users.getTotal();
this.limit = limit;
}
public String getOpType() {
return opType;
}
public List<User> getAllUsers() {
return users;
}
public List<User> getTopUsers() {
return (users.size() > limit) ? users.subList(0, limit) : users;
}
public long getTotalCount() {
return totalCount;
}
@Override
public int compareTo(Op other) {
return Long.signum(totalCount - other.totalCount);
}
@Override
public boolean equals(Object o) {
return (o instanceof Op) && totalCount == ((Op)o).totalCount;
}
@Override
public int hashCode() {
return opType.hashCode();
}
}
/**
* Represents a user who called an Op within a TopWindow. Specifies the
* user and the number of times the user called the operation.
*/
public static
|
Op
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/api/RBlockingQueueRx.java
|
{
"start": 9554,
"end": 10135
}
|
class ____ the specified element
* prevents it from being added to this queue
* @throws NullPointerException if the specified element is null
* @throws IllegalArgumentException if some property of the specified
* element prevents it from being added to this queue
* @return void
*/
Completable put(V e);
/**
* Retrieves and removes continues stream of elements from the head of this queue.
* Waits for next element become available.
*
* @return stream of elements
*/
Flowable<V> takeElements();
}
|
of
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/a/ClassHavingNestedClass.java
|
{
"start": 746,
"end": 865
}
|
class ____ that the package name is a single
* character (i.e., length of 1).
*
* @author Johnny Lim
*/
public
|
requires
|
java
|
alibaba__nacos
|
common/src/main/java/com/alibaba/nacos/common/packagescan/resource/AbstractResource.java
|
{
"start": 4667,
"end": 4950
}
|
class ____.
*/
@Override
public ReadableByteChannel readableChannel() throws IOException {
return Channels.newChannel(getInputStream());
}
/**
* This method reads the entire InputStream to determine the content length.
* For a custom sub-
|
hierarchy
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/JpaqlStrictComplianceAliasTest.java
|
{
"start": 1499,
"end": 1982
}
|
class ____ {
@Id
private Long id;
private String name;
private String stockNumber;
public Part() {
}
public Long getId() {
return id;
}
private void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getStockNumber() {
return stockNumber;
}
public void setStockNumber(String stockNumber) {
this.stockNumber = stockNumber;
}
}
}
|
Part
|
java
|
square__retrofit
|
retrofit-mock/src/test/java/retrofit2/mock/MockRetrofitTest.java
|
{
"start": 267,
"end": 2449
}
|
class ____ {
private final Retrofit retrofit = new Retrofit.Builder().baseUrl("http://example.com").build();
private final NetworkBehavior behavior = NetworkBehavior.create();
private final ExecutorService executor = Executors.newSingleThreadExecutor();
@Test
public void retrofitNullThrows() {
try {
new MockRetrofit.Builder(null);
fail();
} catch (NullPointerException e) {
assertThat(e).hasMessageThat().isEqualTo("retrofit == null");
}
}
@Test
public void retrofitPropagated() {
MockRetrofit mockRetrofit = new MockRetrofit.Builder(retrofit).build();
assertThat(mockRetrofit.retrofit()).isSameInstanceAs(retrofit);
}
@Test
public void networkBehaviorNullThrows() {
MockRetrofit.Builder builder = new MockRetrofit.Builder(retrofit);
try {
builder.networkBehavior(null);
fail();
} catch (NullPointerException e) {
assertThat(e).hasMessageThat().isEqualTo("behavior == null");
}
}
@Test
public void networkBehaviorDefault() {
MockRetrofit mockRetrofit = new MockRetrofit.Builder(retrofit).build();
assertThat(mockRetrofit.networkBehavior()).isNotNull();
}
@Test
public void networkBehaviorPropagated() {
MockRetrofit mockRetrofit =
new MockRetrofit.Builder(retrofit).networkBehavior(behavior).build();
assertThat(mockRetrofit.networkBehavior()).isSameInstanceAs(behavior);
}
@Test
public void backgroundExecutorNullThrows() {
MockRetrofit.Builder builder = new MockRetrofit.Builder(retrofit);
try {
builder.backgroundExecutor(null);
fail();
} catch (NullPointerException e) {
assertThat(e).hasMessageThat().isEqualTo("executor == null");
}
}
@Test
public void backgroundExecutorDefault() {
MockRetrofit mockRetrofit = new MockRetrofit.Builder(retrofit).build();
assertThat(mockRetrofit.backgroundExecutor()).isNotNull();
}
@Test
public void backgroundExecutorPropagated() {
MockRetrofit mockRetrofit =
new MockRetrofit.Builder(retrofit).backgroundExecutor(executor).build();
assertThat(mockRetrofit.backgroundExecutor()).isSameInstanceAs(executor);
}
}
|
MockRetrofitTest
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/state/TaskStateManager.java
|
{
"start": 2602,
"end": 5798
}
|
interface ____ extends CheckpointListener, AutoCloseable {
void reportInitializationMetrics(SubTaskInitializationMetrics subTaskInitializationMetrics);
/**
* Report the state snapshots for the operator instances running in the owning task.
*
* @param checkpointMetaData meta data from the checkpoint request.
* @param checkpointMetrics task level metrics for the checkpoint.
* @param acknowledgedState the reported states to acknowledge to the job manager.
* @param localState the reported states for local recovery.
*/
void reportTaskStateSnapshots(
@Nonnull CheckpointMetaData checkpointMetaData,
@Nonnull CheckpointMetrics checkpointMetrics,
@Nullable TaskStateSnapshot acknowledgedState,
@Nullable TaskStateSnapshot localState);
InflightDataRescalingDescriptor getInputRescalingDescriptor();
InflightDataRescalingDescriptor getOutputRescalingDescriptor();
/**
* Report the stats for state snapshots for an aborted checkpoint.
*
* @param checkpointMetaData meta data from the checkpoint request.
* @param checkpointMetrics task level metrics for the checkpoint.
*/
void reportIncompleteTaskStateSnapshots(
CheckpointMetaData checkpointMetaData, CheckpointMetrics checkpointMetrics);
/** Whether all the operators of the task are finished on restore. */
boolean isTaskDeployedAsFinished();
/** Acquires the checkpoint id to restore from. */
Optional<Long> getRestoreCheckpointId();
/**
* Returns means to restore previously reported state of an operator running in the owning task.
*
* @param operatorID the id of the operator for which we request state.
* @return Previous state for the operator. The previous state can be empty if the operator had
* no previous state.
*/
@Nonnull
PrioritizedOperatorSubtaskState prioritizedOperatorState(OperatorID operatorID);
/**
* Get the restored state from jobManager which belongs to an operator running in the owning
* task.
*
* @param operatorID the id of the operator for which we request state.
* @return the subtask restored state from jobManager.
*/
Optional<OperatorSubtaskState> getSubtaskJobManagerRestoredState(OperatorID operatorID);
/**
* Returns the configuration for local recovery, i.e. the base directories for all file-based
* local state of the owning subtask and the general mode for local recovery.
*/
@Nonnull
LocalRecoveryConfig createLocalRecoveryConfig();
SequentialChannelStateReader getSequentialChannelStateReader();
/** Returns the configured state changelog storage for this task. */
@Nullable
StateChangelogStorage<?> getStateChangelogStorage();
/**
* Returns the state changelog storage view of given {@link ChangelogStateHandle} for this task.
*/
@Nullable
StateChangelogStorageView<?> getStateChangelogStorageView(
Configuration configuration, ChangelogStateHandle changelogStateHandle);
@Nullable
FileMergingSnapshotManager getFileMergingSnapshotManager();
}
|
TaskStateManager
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForActiveShardsTests.java
|
{
"start": 1532,
"end": 16230
}
|
class ____ extends AbstractStepTestCase<WaitForActiveShardsStep> {
@Override
public WaitForActiveShardsStep createRandomInstance() {
StepKey stepKey = randomStepKey();
StepKey nextStepKey = randomStepKey();
return new WaitForActiveShardsStep(stepKey, nextStepKey);
}
@Override
public WaitForActiveShardsStep mutateInstance(WaitForActiveShardsStep instance) {
StepKey key = instance.getKey();
StepKey nextKey = instance.getNextStepKey();
switch (between(0, 1)) {
case 0 -> key = new StepKey(key.phase(), key.action(), key.name() + randomAlphaOfLength(5));
case 1 -> nextKey = new StepKey(nextKey.phase(), nextKey.action(), nextKey.name() + randomAlphaOfLength(5));
default -> throw new AssertionError("Illegal randomisation branch");
}
return new WaitForActiveShardsStep(key, nextKey);
}
@Override
public WaitForActiveShardsStep copyInstance(WaitForActiveShardsStep instance) {
return new WaitForActiveShardsStep(instance.getKey(), instance.getNextStepKey());
}
public void testIsConditionMetThrowsExceptionWhenRolloverAliasIsNotSet() {
String alias = randomAlphaOfLength(5);
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10))
.putAlias(AliasMetadata.builder(alias))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
ProjectState state = projectStateFromProject(ProjectMetadata.builder(randomUniqueProjectId()).put(indexMetadata, false));
try {
createRandomInstance().isConditionMet(indexMetadata.getIndex(), state);
fail("expected the invocation to fail");
} catch (IllegalStateException e) {
assertThat(
e.getMessage(),
is(
"setting ["
+ RolloverAction.LIFECYCLE_ROLLOVER_ALIAS
+ "] is not set on index ["
+ indexMetadata.getIndex().getName()
+ "]"
)
);
}
}
public void testResultEvaluatedOnWriteIndexAliasWhenExists() {
String alias = randomAlphaOfLength(5);
IndexMetadata originalIndex = IndexMetadata.builder("index-000000")
.putAlias(AliasMetadata.builder(alias).writeIndex(false))
.settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias))
.numberOfShards(1)
.numberOfReplicas(randomIntBetween(0, 5))
.build();
IndexMetadata rolledIndex = IndexMetadata.builder("index-000001")
.putAlias(AliasMetadata.builder(alias).writeIndex(true))
.settings(
settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)
.put("index.write.wait_for_active_shards", "all")
)
.numberOfShards(1)
.numberOfReplicas(1)
.build();
IndexRoutingTable.Builder routingTable = new IndexRoutingTable.Builder(
ShardRoutingRoleStrategy.NO_SHARD_CREATION,
rolledIndex.getIndex()
);
routingTable.addShard(
TestShardRouting.newShardRouting(rolledIndex.getIndex().getName(), 0, "node", null, true, ShardRoutingState.STARTED)
);
routingTable.addShard(
TestShardRouting.newShardRouting(rolledIndex.getIndex().getName(), 0, "node2", null, false, ShardRoutingState.STARTED)
);
var project = ProjectMetadata.builder(randomProjectIdOrDefault()).put(originalIndex, false).put(rolledIndex, false).build();
ProjectState state = ClusterState.builder(ClusterName.DEFAULT)
.putProjectMetadata(project)
.putRoutingTable(project.id(), RoutingTable.builder().add(routingTable.build()).build())
.build()
.projectState(project.id());
assertThat(
"the rolled index has both the primary and the replica shards started so the condition should be met",
createRandomInstance().isConditionMet(originalIndex.getIndex(), state).complete(),
is(true)
);
}
public void testResultEvaluatedOnOnlyIndexTheAliasPointsToIfWriteIndexIsNull() {
String alias = randomAlphaOfLength(5);
IndexMetadata originalIndex = IndexMetadata.builder("index-000000")
.settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias))
.numberOfShards(1)
.numberOfReplicas(randomIntBetween(0, 5))
.build();
IndexMetadata rolledIndex = IndexMetadata.builder("index-000001")
.putAlias(AliasMetadata.builder(alias).writeIndex(false))
.settings(
settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)
.put("index.write.wait_for_active_shards", "all")
)
.numberOfShards(1)
.numberOfReplicas(1)
.build();
IndexRoutingTable.Builder routingTable = new IndexRoutingTable.Builder(
ShardRoutingRoleStrategy.NO_SHARD_CREATION,
rolledIndex.getIndex()
);
routingTable.addShard(
TestShardRouting.newShardRouting(rolledIndex.getIndex().getName(), 0, "node", null, true, ShardRoutingState.STARTED)
);
routingTable.addShard(
TestShardRouting.newShardRouting(rolledIndex.getIndex().getName(), 0, "node2", null, false, ShardRoutingState.STARTED)
);
var project = ProjectMetadata.builder(randomProjectIdOrDefault()).put(originalIndex, false).put(rolledIndex, false).build();
ProjectState state = ClusterState.builder(ClusterName.DEFAULT)
.putProjectMetadata(project)
.putRoutingTable(project.id(), RoutingTable.builder().add(routingTable.build()).build())
.build()
.projectState(project.id());
assertThat(
"the index the alias is pointing to has both the primary and the replica shards started so the condition should be" + " met",
createRandomInstance().isConditionMet(originalIndex.getIndex(), state).complete(),
is(true)
);
}
public void testResultEvaluatedOnDataStream() throws IOException {
String dataStreamName = "test-datastream";
long ts = System.currentTimeMillis();
IndexMetadata originalIndexMeta = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName, 1, ts))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
IndexMetadata failureOriginalIndexMeta = IndexMetadata.builder(DataStream.getDefaultFailureStoreName(dataStreamName, 1, ts))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
IndexMetadata rolledIndexMeta = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName, 2, ts))
.settings(settings(IndexVersion.current()).put("index.write.wait_for_active_shards", "3"))
.numberOfShards(1)
.numberOfReplicas(3)
.build();
IndexMetadata failureRolledIndexMeta = IndexMetadata.builder(DataStream.getDefaultFailureStoreName(dataStreamName, 2, ts))
.settings(settings(IndexVersion.current()).put("index.write.wait_for_active_shards", "3"))
.numberOfShards(1)
.numberOfReplicas(3)
.build();
IndexRoutingTable.Builder routingTable = new IndexRoutingTable.Builder(
ShardRoutingRoleStrategy.NO_SHARD_CREATION,
rolledIndexMeta.getIndex()
);
IndexRoutingTable.Builder failureRoutingTable = new IndexRoutingTable.Builder(
ShardRoutingRoleStrategy.NO_SHARD_CREATION,
failureRolledIndexMeta.getIndex()
);
routingTable.addShard(
TestShardRouting.newShardRouting(rolledIndexMeta.getIndex().getName(), 0, "node", null, true, ShardRoutingState.STARTED)
);
routingTable.addShard(
TestShardRouting.newShardRouting(rolledIndexMeta.getIndex().getName(), 0, "node2", null, false, ShardRoutingState.STARTED)
);
failureRoutingTable.addShard(
TestShardRouting.newShardRouting(failureRolledIndexMeta.getIndex().getName(), 0, "node", null, true, ShardRoutingState.STARTED)
);
failureRoutingTable.addShard(
TestShardRouting.newShardRouting(
failureRolledIndexMeta.getIndex().getName(),
0,
"node2",
null,
false,
ShardRoutingState.STARTED
)
);
final var project = ProjectMetadata.builder(randomProjectIdOrDefault())
.put(
DataStreamTestHelper.newInstance(
dataStreamName,
List.of(originalIndexMeta.getIndex(), rolledIndexMeta.getIndex()),
List.of(failureOriginalIndexMeta.getIndex(), failureRolledIndexMeta.getIndex())
)
)
.put(originalIndexMeta, true)
.put(rolledIndexMeta, true)
.put(failureOriginalIndexMeta, true)
.put(failureRolledIndexMeta, true)
.build();
ProjectState state = ClusterState.builder(ClusterName.DEFAULT)
.putProjectMetadata(project)
.putRoutingTable(project.id(), RoutingTable.builder().add(routingTable.build()).add(failureRoutingTable.build()).build())
.build()
.projectState(project.id());
WaitForActiveShardsStep waitForActiveShardsStep = createRandomInstance();
boolean useFailureStore = randomBoolean();
IndexMetadata indexToOperateOn = useFailureStore ? failureOriginalIndexMeta : originalIndexMeta;
ClusterStateWaitStep.Result result = waitForActiveShardsStep.isConditionMet(indexToOperateOn.getIndex(), state);
assertThat(result.complete(), is(false));
XContentBuilder expected = new WaitForActiveShardsStep.ActiveShardsInfo(2, "3", false).toXContent(
JsonXContent.contentBuilder(),
ToXContent.EMPTY_PARAMS
);
String actualResultAsString = Strings.toString(result.informationContext());
assertThat(actualResultAsString, is(Strings.toString(expected)));
assertThat(actualResultAsString, containsString("waiting for [3] shards to become active, but only [2] are active"));
}
public void testResultReportsMeaningfulMessage() throws IOException {
String alias = randomAlphaOfLength(5);
IndexMetadata originalIndex = IndexMetadata.builder("index-000000")
.putAlias(AliasMetadata.builder(alias).writeIndex(false))
.settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias))
.numberOfShards(1)
.numberOfReplicas(randomIntBetween(0, 5))
.build();
IndexMetadata rolledIndex = IndexMetadata.builder("index-000001")
.putAlias(AliasMetadata.builder(alias).writeIndex(true))
.settings(
settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)
.put("index.write.wait_for_active_shards", "3")
)
.numberOfShards(1)
.numberOfReplicas(2)
.build();
IndexRoutingTable.Builder routingTable = new IndexRoutingTable.Builder(
ShardRoutingRoleStrategy.NO_SHARD_CREATION,
rolledIndex.getIndex()
);
routingTable.addShard(
TestShardRouting.newShardRouting(rolledIndex.getIndex().getName(), 0, "node", null, true, ShardRoutingState.STARTED)
);
routingTable.addShard(
TestShardRouting.newShardRouting(rolledIndex.getIndex().getName(), 0, "node2", null, false, ShardRoutingState.STARTED)
);
var project = ProjectMetadata.builder(randomProjectIdOrDefault()).put(originalIndex, false).put(rolledIndex, false).build();
ProjectState state = ClusterState.builder(ClusterName.DEFAULT)
.putProjectMetadata(project)
.putRoutingTable(project.id(), RoutingTable.builder().add(routingTable.build()).build())
.build()
.projectState(project.id());
ClusterStateWaitStep.Result result = createRandomInstance().isConditionMet(originalIndex.getIndex(), state);
assertThat(result.complete(), is(false));
XContentBuilder expected = new WaitForActiveShardsStep.ActiveShardsInfo(2, "3", false).toXContent(
JsonXContent.contentBuilder(),
ToXContent.EMPTY_PARAMS
);
String actualResultAsString = Strings.toString(result.informationContext());
assertThat(actualResultAsString, is(Strings.toString(expected)));
assertThat(actualResultAsString, containsString("waiting for [3] shards to become active, but only [2] are active"));
}
public void testResultReportsErrorMessage() {
String alias = randomAlphaOfLength(5);
IndexMetadata rolledIndex = IndexMetadata.builder("index-000001")
.putAlias(AliasMetadata.builder(alias).writeIndex(true))
.settings(
settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)
.put("index.write.wait_for_active_shards", "3")
)
.numberOfShards(1)
.numberOfReplicas(2)
.build();
ProjectState state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(rolledIndex, false));
WaitForActiveShardsStep step = createRandomInstance();
ClusterStateWaitStep.Result result = step.isConditionMet(new Index("index-000000", UUID.randomUUID().toString()), state);
assertThat(result.complete(), is(false));
String actualResultAsString = Strings.toString(result.informationContext());
assertThat(
actualResultAsString,
containsString(
"[" + step.getKey().action() + "] lifecycle action for index [index-000000] executed but " + "index no longer exists"
)
);
}
}
|
WaitForActiveShardsTests
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/TestClientRMProxy.java
|
{
"start": 7057,
"end": 7657
}
|
class ____
extends HadoopYarnProtoRPC {
static UserGroupInformation lastCurrentUser = null;
@Override
public Object getProxy(Class protocol, InetSocketAddress addr,
Configuration conf) {
UserGroupInformation currentUser = null;
try {
currentUser = UserGroupInformation.getCurrentUser();
} catch (IOException ioe) {
fail("Unable to get current user\n"
+ StringUtils.stringifyException(ioe));
}
lastCurrentUser = currentUser;
return super.getProxy(protocol, addr, conf);
}
}
}
|
UGICapturingHadoopYarnProtoRPC
|
java
|
redisson__redisson
|
redisson/src/test/java/org/redisson/RedissonStreamTest.java
|
{
"start": 541,
"end": 54476
}
|
class ____ extends RedisDockerTest {
@Test
public void testAddListener() {
testWithParams(redisson -> {
RStream<String, String> ss = redisson.getStream("test");
ss.createGroup(StreamCreateGroupArgs.name("test-group").makeStream());
CountDownLatch latch = new CountDownLatch(1);
ss.addListener(new StreamAddListener() {
@Override
public void onAdd(String name) {
latch.countDown();
}
});
ss.add(StreamAddArgs.entry("test1", "test2"));
try {
assertThat(latch.await(1, TimeUnit.SECONDS)).isTrue();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}, NOTIFY_KEYSPACE_EVENTS, "Et");
}
@Test
public void testType() {
RStream<Object, Object> stream = redisson.getStream("stream");
stream.createGroup(StreamCreateGroupArgs.name("group").makeStream());
stream.add(StreamAddArgs.entry("key", "value"));
assertThat(redisson.getKeys().getType("stream")).isEqualTo(RType.STREAM);
}
@Test
public void testEmptyMap() {
RStream<Object, Object> stream = redisson.getStream("stream");
stream.createGroup(StreamCreateGroupArgs.name("group").makeStream());
stream.add(StreamAddArgs.entry("key", "value"));
Map<StreamMessageId, Map<Object, Object>> result2 = stream.readGroup("group", "consumer",
StreamReadGroupArgs.greaterThan(StreamMessageId.NEVER_DELIVERED).timeout(Duration.ofSeconds(1)).count(1));
assertThat(result2).hasSize(1);
stream.trim(StreamTrimArgs.minId(new StreamMessageId(2634125048379L,0)).noLimit());
Map<StreamMessageId, Map<Object, Object>> result = stream.readGroup("group", "consumer",
StreamReadGroupArgs.greaterThan(StreamMessageId.ALL).timeout(Duration.ofSeconds(1)).count(1));
assertThat(result).hasSize(1);
}
@Test
public void testAutoClaim() throws InterruptedException {
RStream<String, String> stream = redisson.getStream("test");
stream.add(StreamAddArgs.entry("0", "0"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup"));
StreamMessageId id1 = stream.add(StreamAddArgs.entry("1", "1"));
StreamMessageId id2 = stream.add(StreamAddArgs.entry("2", "2"));
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered());
assertThat(s.size()).isEqualTo(2);
StreamMessageId id3 = stream.add(StreamAddArgs.entry("3", "33"));
StreamMessageId id4 = stream.add(StreamAddArgs.entry("4", "44"));
Map<StreamMessageId, Map<String, String>> s2 = stream.readGroup("testGroup", "consumer2", StreamReadGroupArgs.neverDelivered());
assertThat(s2.size()).isEqualTo(2);
Thread.sleep(5);
AutoClaimResult<String, String> res = stream.autoClaim("testGroup", "consumer1", 1, TimeUnit.MILLISECONDS, id3, 2);
assertThat(res.getMessages().size()).isEqualTo(2);
for (Map.Entry<StreamMessageId, Map<String, String>> entry : res.getMessages().entrySet()) {
assertThat(entry.getValue().keySet()).containsAnyOf("3", "4");
assertThat(entry.getValue().values()).containsAnyOf("33", "44");
}
}
@Test
public void testAutoClaimDeletedIds() throws InterruptedException {
RStream<String, String> stream = redisson.getStream("test");
StreamMessageId id1 = stream.add(StreamAddArgs.entry("1", "1"));
StreamMessageId id2 = stream.add(StreamAddArgs.entry("2", "2"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup").id(StreamMessageId.ALL));
stream.createConsumer("testGroup", "consumer1");
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1",
StreamReadGroupArgs.neverDelivered());
Thread.sleep(5);
AutoClaimResult<String, String> res = stream.autoClaim("testGroup", "consumer1",
1, TimeUnit.MILLISECONDS, StreamMessageId.MIN, 2);
assertThat(res.getMessages().size()).isEqualTo(2);
stream.remove(res.getMessages().keySet().toArray(new StreamMessageId[0]));
AutoClaimResult<String, String> res1 = stream.autoClaim("testGroup", "consumer1",
1, TimeUnit.MILLISECONDS, StreamMessageId.MIN, 2);
assertThat(res1.getDeletedIds()).containsExactlyInAnyOrder(res.getMessages().keySet().toArray(new StreamMessageId[0]));
}
@Test
public void testPendingIdle() throws InterruptedException {
RStream<String, String> stream = redisson.getStream("test");
stream.add(StreamAddArgs.entry("0", "0"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup"));
StreamMessageId id1 = stream.add(StreamAddArgs.entry("1", "1"));
StreamMessageId id2 = stream.add(StreamAddArgs.entry("2", "2"));
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered());
assertThat(s.size()).isEqualTo(2);
StreamMessageId id3 = stream.add(StreamAddArgs.entry("3", "3"));
StreamMessageId id4 = stream.add(StreamAddArgs.entry("4", "4"));
Map<StreamMessageId, Map<String, String>> s2 = stream.readGroup("testGroup", "consumer2", StreamReadGroupArgs.neverDelivered());
assertThat(s2.size()).isEqualTo(2);
Thread.sleep(5);
List<PendingEntry> list = stream.listPending("testGroup", StreamMessageId.MIN, StreamMessageId.MAX, 1, TimeUnit.MILLISECONDS, 10);
assertThat(list.size()).isEqualTo(4);
for (PendingEntry pendingEntry : list) {
assertThat(pendingEntry.getId()).isIn(id1, id2, id3, id4);
assertThat(pendingEntry.getConsumerName()).isIn("consumer1", "consumer2");
assertThat(pendingEntry.getLastTimeDelivered()).isOne();
}
List<PendingEntry> list2 = stream.listPending("testGroup", "consumer1", StreamMessageId.MIN, StreamMessageId.MAX, 1, TimeUnit.MILLISECONDS,10);
assertThat(list2.size()).isEqualTo(2);
for (PendingEntry pendingEntry : list2) {
assertThat(pendingEntry.getId()).isIn(id1, id2);
assertThat(pendingEntry.getConsumerName()).isEqualTo("consumer1");
assertThat(pendingEntry.getLastTimeDelivered()).isOne();
}
}
@Test
public void testPendingIdle2() throws InterruptedException {
RStream<String, String> stream = redisson.getStream("test");
stream.add(StreamAddArgs.entry("0", "0"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup"));
StreamMessageId id1 = stream.add(StreamAddArgs.entry("1", "1"));
StreamMessageId id2 = stream.add(StreamAddArgs.entry("2", "2"));
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered());
assertThat(s.size()).isEqualTo(2);
StreamMessageId id3 = stream.add(StreamAddArgs.entry("3", "3"));
StreamMessageId id4 = stream.add(StreamAddArgs.entry("4", "4"));
Map<StreamMessageId, Map<String, String>> s2 = stream.readGroup("testGroup", "consumer2", StreamReadGroupArgs.neverDelivered());
assertThat(s2.size()).isEqualTo(2);
Thread.sleep(5);
List<PendingEntry> list = stream.listPending(StreamPendingRangeArgs.groupName("testGroup")
.startId(StreamMessageId.MIN)
.endId(StreamMessageId.MAX)
.count(10)
.idleTime(Duration.ofMillis(1)));
assertThat(list.size()).isEqualTo(4);
for (PendingEntry pendingEntry : list) {
assertThat(pendingEntry.getId()).isIn(id1, id2, id3, id4);
assertThat(pendingEntry.getConsumerName()).isIn("consumer1", "consumer2");
assertThat(pendingEntry.getLastTimeDelivered()).isOne();
}
List<PendingEntry> list2 = stream.listPending(StreamPendingRangeArgs.groupName("testGroup")
.startId(StreamMessageId.MIN)
.endId(StreamMessageId.MAX)
.count(10)
.consumerName("consumer1")
.idleTime(Duration.ofMillis(1)));
assertThat(list2.size()).isEqualTo(2);
for (PendingEntry pendingEntry : list2) {
assertThat(pendingEntry.getId()).isIn(id1, id2);
assertThat(pendingEntry.getConsumerName()).isEqualTo("consumer1");
assertThat(pendingEntry.getLastTimeDelivered()).isOne();
}
}
@Test
public void testTrim() {
RStream<String, String> stream = redisson.getStream("test");
stream.add(StreamAddArgs.entry("0", "0"));
stream.add(StreamAddArgs.entry("1", "1"));
stream.add(StreamAddArgs.entry("2", "2"));
assertThat(stream.trim(StreamTrimArgs.maxLen(2).noLimit())).isEqualTo(1);
RStream<String, String> stream2 = redisson.getStream("myStream");
StreamTrimArgs trimArgs = StreamTrimArgs.maxLen(0).noLimit();
assertThat(stream2.trim(trimArgs)).isZero();
}
@Test
public void testTrim2() {
RStream<String, String> stream = redisson.getStream("test");
stream.createGroup(StreamCreateGroupArgs.name("testGroup").makeStream());
stream.add(StreamAddArgs.entry("0", "0"));
stream.add(StreamAddArgs.entry("1", "1"));
stream.add(StreamAddArgs.entry("2", "2"));
stream.add(StreamAddArgs.entry("3", "3"));
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered());
assertThat(s.size()).isEqualTo(4);
assertThat(stream.trim(StreamTrimArgs.maxLen(3).noLimit())).isEqualTo(1);
List<PendingEntry> list = stream.listPending("testGroup", StreamMessageId.MIN, StreamMessageId.MAX, 1, TimeUnit.MILLISECONDS, 10);
assertThat(list.size()).isEqualTo(4);
assertThat(stream.trim(StreamTrimArgs.maxLen(2).removeReferences().noLimit())).isEqualTo(1);
List<PendingEntry> list2 = stream.listPending("testGroup", StreamMessageId.MIN, StreamMessageId.MAX, 1, TimeUnit.MILLISECONDS, 10);
assertThat(list2.size()).isEqualTo(3);
stream.add(StreamAddArgs.entry("4", "4").trim().maxLen(2).removeReferences().noLimit());
List<PendingEntry> list3 = stream.listPending("testGroup", StreamMessageId.MIN, StreamMessageId.MAX, 1, TimeUnit.MILLISECONDS, 10);
assertThat(list3.size()).isEqualTo(2);
stream.add(StreamAddArgs.entry("5", "5").trim().maxLen(2).keepReferences().noLimit());
List<PendingEntry> list4 = stream.listPending("testGroup", StreamMessageId.MIN, StreamMessageId.MAX, 1, TimeUnit.MILLISECONDS, 10);
assertThat(list4.size()).isEqualTo(2);
}
@Test
public void testPendingEmpty() {
RStream<Object, Object> stream = redisson.getStream("test");
stream.createGroup(StreamCreateGroupArgs.name("testGroup").makeStream());
PendingResult result = stream.getPendingInfo("testGroup");
assertThat(result.getTotal()).isZero();
assertThat(result.getHighestId()).isNull();
assertThat(result.getLowestId()).isNull();
assertThat(result.getConsumerNames()).isEmpty();
}
@Test
public void testUpdateGroupMessageId() {
RStream<String, String> stream = redisson.getStream("test");
StreamMessageId id = stream.add(StreamAddArgs.entry("0", "0"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup"));
StreamMessageId id1 = stream.add(StreamAddArgs.entry("1", "1"));
System.out.println("id1 " + id1);
StreamMessageId id2 = stream.add(StreamAddArgs.entry("2", "2"));
System.out.println("id2 " + id2);
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered());
assertThat(s.size()).isEqualTo(2);
stream.updateGroupMessageId("testGroup", id);
Map<StreamMessageId, Map<String, String>> s2 = stream.readGroup("testGroup", "consumer2", StreamReadGroupArgs.neverDelivered());
assertThat(s2.size()).isEqualTo(2);
}
@Test
public void testRemoveConsumer() {
RStream<String, String> stream = redisson.getStream("test");
stream.add(StreamAddArgs.entry("0", "0"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup").makeStream());
StreamMessageId id1 = stream.add(StreamAddArgs.entry("1", "1"));
StreamMessageId id2 = stream.add(StreamAddArgs.entry("2", "2"));
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered());
assertThat(s.size()).isEqualTo(2);
assertThat(stream.removeConsumer("testGroup", "consumer1")).isEqualTo(2);
assertThat(stream.removeConsumer("testGroup", "consumer2")).isZero();
}
@Test
public void testRemoveGroup() {
Assertions.assertThrows(RedisException.class, () -> {
RStream<String, String> stream = redisson.getStream("test");
stream.add(StreamAddArgs.entry("0", "0"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup"));
StreamMessageId id1 = stream.add(StreamAddArgs.entry("1", "1"));
StreamMessageId id2 = stream.add(StreamAddArgs.entry("2", "2"));
stream.removeGroup("testGroup");
stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered());
});
}
@Test
public void testRemoveMessages() {
RStream<String, String> stream = redisson.getStream("test");
StreamMessageId id1 = stream.add(StreamAddArgs.entry("0", "0"));
StreamMessageId id2 = stream.add(StreamAddArgs.entry("1", "1"));
assertThat(stream.size()).isEqualTo(2);
assertThat(stream.remove(id1, id2)).isEqualTo(2);
assertThat(stream.size()).isZero();
}
@Test
public void testRemoveMessages2() {
RStream<String, String> stream = redisson.getStream("test");
stream.createGroup(StreamCreateGroupArgs.name("testGroup").makeStream());
StreamMessageId id0 = stream.add(StreamAddArgs.entry("0", "0"));
StreamMessageId id1 = stream.add(StreamAddArgs.entry("1", "1"));
StreamMessageId id2 = stream.add(StreamAddArgs.entry("2", "2"));
StreamMessageId id3 = stream.add(StreamAddArgs.entry("3", "3"));
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered());
assertThat(s.size()).isEqualTo(4);
Map<StreamMessageId, StreamEntryStatus> map=stream.remove(StreamRemoveArgs.ids(id0, id1));
assertThat(map.get(id0)).isEqualTo(StreamEntryStatus.SUCCESS);
assertThat(map.get(id1)).isEqualTo(StreamEntryStatus.SUCCESS);
List<PendingEntry> list = stream.listPending("testGroup", StreamMessageId.MIN, StreamMessageId.MAX, 1, TimeUnit.MILLISECONDS, 10);
assertThat(list.size()).isEqualTo(4);
assertThat(stream.remove(StreamRemoveArgs.ids(id2).removeReferences()).get(id2)).isEqualTo(StreamEntryStatus.SUCCESS);
List<PendingEntry> list2 = stream.listPending("testGroup", StreamMessageId.MIN, StreamMessageId.MAX, 1, TimeUnit.MILLISECONDS, 10);
assertThat(list2.size()).isEqualTo(3);
assertThat(stream.remove(StreamRemoveArgs.ids(id3).removeAcknowledgedOnly()).get(id3)).isEqualTo(StreamEntryStatus.HAS_PENDING_REFERENCES);
}
@Test
public void testClaimRemove() throws InterruptedException {
RStream<String, String> stream = redisson.getStream("test");
stream.add(StreamAddArgs.entry("0", "0"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup"));
StreamMessageId id1 = stream.add(StreamAddArgs.entry("1", "1"));
StreamMessageId id2 = stream.add(StreamAddArgs.entry("2", "2"));
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered());
assertThat(s.size()).isEqualTo(2);
StreamMessageId id3 = stream.add(StreamAddArgs.entry("3", "33"));
StreamMessageId id4 = stream.add(StreamAddArgs.entry("4", "44"));
Map<StreamMessageId, Map<String, String>> s2 = stream.readGroup("testGroup", "consumer2", StreamReadGroupArgs.neverDelivered());
assertThat(s2.size()).isEqualTo(2);
stream.remove(id3);
Thread.sleep(2);
Map<StreamMessageId, Map<String, String>> res = stream.claim("testGroup", "consumer1", 1, TimeUnit.MILLISECONDS, id3, id4);
assertThat(res.size()).isEqualTo(1);
assertThat(res.keySet()).containsExactly(id4);
}
@Test
public void testClaim() throws InterruptedException {
RStream<String, String> stream = redisson.getStream("test");
stream.add(StreamAddArgs.entry("0", "0"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup"));
StreamMessageId id1 = stream.add(StreamAddArgs.entry("1", "1"));
StreamMessageId id2 = stream.add(StreamAddArgs.entry("2", "2"));
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered());
assertThat(s.size()).isEqualTo(2);
StreamMessageId id3 = stream.add(StreamAddArgs.entry("3", "33"));
StreamMessageId id4 = stream.add(StreamAddArgs.entry("4", "44"));
Map<StreamMessageId, Map<String, String>> s2 = stream.readGroup("testGroup", "consumer2", StreamReadGroupArgs.neverDelivered());
assertThat(s2.size()).isEqualTo(2);
Thread.sleep(5);
Map<StreamMessageId, Map<String, String>> res = stream.claim("testGroup", "consumer1", 1, TimeUnit.MILLISECONDS, id3, id4);
assertThat(res.size()).isEqualTo(2);
assertThat(res.keySet()).containsExactly(id3, id4);
for (Map<String, String> map : res.values()) {
assertThat(map.keySet()).containsAnyOf("3", "4");
assertThat(map.values()).containsAnyOf("33", "44");
}
}
@Test
public void testAutoClaimIds() throws InterruptedException {
RStream<String, String> stream = redisson.getStream("test3");
stream.add(StreamAddArgs.entry("0", "0"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup3"));
StreamMessageId id1 = stream.add(StreamAddArgs.entry("1", "1"));
StreamMessageId id2 = stream.add(StreamAddArgs.entry("2", "2"));
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup3", "consumer1", StreamReadGroupArgs.neverDelivered());
assertThat(s.size()).isEqualTo(2);
StreamMessageId id3 = stream.add(StreamAddArgs.entry("3", "33"));
StreamMessageId id4 = stream.add(StreamAddArgs.entry("4", "44"));
Map<StreamMessageId, Map<String, String>> s2 = stream.readGroup("testGroup3", "consumer2", StreamReadGroupArgs.neverDelivered());
assertThat(s2.size()).isEqualTo(2);
Thread.sleep(5);
FastAutoClaimResult res = stream.fastAutoClaim("testGroup3", "consumer1", 1, TimeUnit.MILLISECONDS, id3, 10);
assertThat(res.getNextId()).isEqualTo(new StreamMessageId(0, 0));
assertThat(res.getIds()).containsExactly(id3, id4);
}
@Test
public void testClaimIds() throws InterruptedException {
RStream<String, String> stream = redisson.getStream("test3");
stream.add(StreamAddArgs.entry("0", "0"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup3"));
StreamMessageId id1 = stream.add(StreamAddArgs.entry("1", "1"));
StreamMessageId id2 = stream.add(StreamAddArgs.entry("2", "2"));
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup3", "consumer1", StreamReadGroupArgs.neverDelivered());
assertThat(s.size()).isEqualTo(2);
StreamMessageId id3 = stream.add(StreamAddArgs.entry("3", "33"));
StreamMessageId id4 = stream.add(StreamAddArgs.entry("4", "44"));
Map<StreamMessageId, Map<String, String>> s2 = stream.readGroup("testGroup3", "consumer2", StreamReadGroupArgs.neverDelivered());
assertThat(s2.size()).isEqualTo(2);
Thread.sleep(5);
List<StreamMessageId> res = stream.fastClaim("testGroup3", "consumer1", 1, TimeUnit.MILLISECONDS, id3, id4);
assertThat(res.size()).isEqualTo(2);
assertThat(res).containsExactly(id3, id4);
}
@Test
public void testPending() {
RStream<String, String> stream = redisson.getStream("test");
stream.add(StreamAddArgs.entry("0", "0"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup"));
StreamMessageId id1 = stream.add(StreamAddArgs.entry("1", "1"));
StreamMessageId id2 = stream.add(StreamAddArgs.entry("2", "2"));
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered());
assertThat(s.size()).isEqualTo(2);
StreamMessageId id3 = stream.add(StreamAddArgs.entry("3", "3"));
StreamMessageId id4 = stream.add(StreamAddArgs.entry("4", "4"));
Map<StreamMessageId, Map<String, String>> s2 = stream.readGroup("testGroup", "consumer2", StreamReadGroupArgs.neverDelivered());
assertThat(s2.size()).isEqualTo(2);
PendingResult pi = stream.getPendingInfo("testGroup");
assertThat(pi.getLowestId()).isEqualTo(id1);
assertThat(pi.getHighestId()).isEqualTo(id4);
assertThat(pi.getTotal()).isEqualTo(4);
assertThat(pi.getConsumerNames().keySet()).containsExactly("consumer1", "consumer2");
List<PendingEntry> list = stream.listPending("testGroup", StreamMessageId.MIN, StreamMessageId.MAX, 10);
assertThat(list.size()).isEqualTo(4);
for (PendingEntry pendingEntry : list) {
assertThat(pendingEntry.getId()).isIn(id1, id2, id3, id4);
assertThat(pendingEntry.getConsumerName()).isIn("consumer1", "consumer2");
assertThat(pendingEntry.getLastTimeDelivered()).isOne();
}
List<PendingEntry> list2 = stream.listPending("testGroup", "consumer1", StreamMessageId.MIN, StreamMessageId.MAX, 10);
assertThat(list2.size()).isEqualTo(2);
for (PendingEntry pendingEntry : list2) {
assertThat(pendingEntry.getId()).isIn(id1, id2);
assertThat(pendingEntry.getConsumerName()).isEqualTo("consumer1");
assertThat(pendingEntry.getLastTimeDelivered()).isOne();
}
}
@Test
public void testPending2() {
RStream<String, String> stream = redisson.getStream("test");
stream.add(StreamAddArgs.entry("0", "0"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup"));
StreamMessageId id1 = stream.add(StreamAddArgs.entry("1", "1"));
StreamMessageId id2 = stream.add(StreamAddArgs.entry("2", "2"));
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered());
assertThat(s.size()).isEqualTo(2);
StreamMessageId id3 = stream.add(StreamAddArgs.entry("3", "3"));
StreamMessageId id4 = stream.add(StreamAddArgs.entry("4", "4"));
Map<StreamMessageId, Map<String, String>> s2 = stream.readGroup("testGroup", "consumer2", StreamReadGroupArgs.neverDelivered());
assertThat(s2.size()).isEqualTo(2);
PendingResult pi = stream.getPendingInfo("testGroup");
assertThat(pi.getLowestId()).isEqualTo(id1);
assertThat(pi.getHighestId()).isEqualTo(id4);
assertThat(pi.getTotal()).isEqualTo(4);
assertThat(pi.getConsumerNames().keySet()).containsExactly("consumer1", "consumer2");
//StreamPendingRangeArgs
List<PendingEntry> list = stream.listPending(StreamPendingRangeArgs.groupName("testGroup")
.startId(StreamMessageId.MIN)
.endId(StreamMessageId.MAX)
.count(10));
assertThat(list.size()).isEqualTo(4);
for (PendingEntry pendingEntry : list) {
assertThat(pendingEntry.getId()).isIn(id1, id2, id3, id4);
assertThat(pendingEntry.getConsumerName()).isIn("consumer1", "consumer2");
assertThat(pendingEntry.getLastTimeDelivered()).isOne();
}
List<PendingEntry> list2 = stream.listPending(StreamPendingRangeArgs.groupName("testGroup")
.startId(StreamMessageId.MIN)
.endId(StreamMessageId.MAX)
.count(10)
.consumerName("consumer1"));
assertThat(list2.size()).isEqualTo(2);
for (PendingEntry pendingEntry : list2) {
assertThat(pendingEntry.getId()).isIn(id1, id2);
assertThat(pendingEntry.getConsumerName()).isEqualTo("consumer1");
assertThat(pendingEntry.getLastTimeDelivered()).isOne();
}
}
@Test
public void testPendingRange() {
RStream<String, String> stream = redisson.getStream("test");
stream.add(StreamAddArgs.entry("0", "0"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup"));
StreamMessageId id1 = stream.add(StreamAddArgs.entry("11", "12"));
StreamMessageId id2 = stream.add(StreamAddArgs.entry("21", "22"));
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered());
assertThat(s.size()).isEqualTo(2);
Map<StreamMessageId, Map<String, String>> pres = stream.pendingRange("testGroup", StreamMessageId.MIN, StreamMessageId.MAX, 10);
assertThat(pres.keySet()).containsExactly(id1, id2);
assertThat(pres.get(id1)).isEqualTo(Collections.singletonMap("11", "12"));
assertThat(pres.get(id2)).isEqualTo(Collections.singletonMap("21", "22"));
Map<StreamMessageId, Map<String, String>> pres2 = stream.pendingRange("testGroup", "consumer1", StreamMessageId.MIN, StreamMessageId.MAX, 10);
assertThat(pres2.keySet()).containsExactly(id1, id2);
assertThat(pres2.get(id1)).isEqualTo(Collections.singletonMap("11", "12"));
assertThat(pres2.get(id2)).isEqualTo(Collections.singletonMap("21", "22"));
Map<StreamMessageId, Map<String, String>> pres3 = stream.pendingRange("testGroup", "consumer2", StreamMessageId.MIN, StreamMessageId.MAX, 10);
assertThat(pres3).isEmpty();
}
@Test
public void testAck() {
RStream<String, String> stream = redisson.getStream("test");
stream.add(StreamAddArgs.entry("0", "0"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup"));
StreamMessageId id1 = stream.add(StreamAddArgs.entry("1", "1"));
StreamMessageId id2 = stream.add(StreamAddArgs.entry("2", "2"));
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered());
assertThat(s.size()).isEqualTo(2);
assertThat(stream.ack("testGroup", id1, id2)).isEqualTo(2);
}
@Test
public void testAck2() {
RStream<String, String> stream = redisson.getStream("test");
stream.add(StreamAddArgs.entry("0", "0"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup2"));
StreamMessageId id1 = stream.add(StreamAddArgs.entry("1", "1"));
StreamMessageId id2 = stream.add(StreamAddArgs.entry("2", "2"));
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered());
assertThat(s.size()).isEqualTo(2);
Map<StreamMessageId, Map<String, String>> s2 = stream.readGroup("testGroup2", "consumer2", StreamReadGroupArgs.neverDelivered());
assertThat(s2.size()).isEqualTo(2);
assertThat(stream.ack(StreamAckArgs.group("testGroup").ids(id1).removeAcknowledgedOnly()).get(id1)).isEqualTo(StreamEntryStatus.HAS_PENDING_REFERENCES);
Map<StreamMessageId, StreamEntryStatus> map=stream.ack(StreamAckArgs.group("testGroup2").ids(id1, id2).removeAcknowledgedOnly());
assertThat(map.get(id1)).isEqualTo(StreamEntryStatus.SUCCESS);
assertThat(map.get(id2)).isEqualTo(StreamEntryStatus.HAS_PENDING_REFERENCES);
}
@Test
public void testReadGroupMulti() {
RStream<String, String> stream1 = redisson.getStream("test1");
RStream<String, String> stream2 = redisson.getStream("test2");
StreamMessageId id01 = stream1.add(StreamAddArgs.entry("0", "0"));
StreamMessageId id02 = stream2.add(StreamAddArgs.entry("0", "0"));
stream1.createGroup(StreamCreateGroupArgs.name("testGroup").id(id01));
stream2.createGroup(StreamCreateGroupArgs.name("testGroup").id(id02));
StreamMessageId id11 = stream1.add(StreamAddArgs.entry("1", "1"));
StreamMessageId id12 = stream1.add(StreamAddArgs.entry("2", "2"));
StreamMessageId id13 = stream1.add(StreamAddArgs.entry("3", "3"));
StreamMessageId id21 = stream2.add(StreamAddArgs.entry("1", "1"));
StreamMessageId id22 = stream2.add(StreamAddArgs.entry("2", "2"));
StreamMessageId id23 = stream2.add(StreamAddArgs.entry("3", "3"));
Map<String, Map<StreamMessageId, Map<String, String>>> s2 = stream1.readGroup("testGroup", "consumer1", StreamMultiReadGroupArgs.greaterThan(id11, Collections.singletonMap("test2", id21)));
assertThat(s2).isEmpty();
}
@Test
public void testReadGroupBlocking() {
RStream<String, String> stream = redisson.getStream("test");
StreamMessageId id0 = stream.add(StreamAddArgs.entry("0", "0"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup").id(id0).makeStream());
stream.add(StreamAddArgs.entry("1", "1"));
stream.add(StreamAddArgs.entry("2", "2"));
stream.add(StreamAddArgs.entry("3", "3"));
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered().count(3).timeout(Duration.ofSeconds(5)));
assertThat(s.values().iterator().next().keySet()).containsAnyOf("1", "2", "3");
assertThat(s.size()).isEqualTo(3);
stream.removeGroup("testGroup");
stream.createGroup(StreamCreateGroupArgs.name("testGroup").id(id0).makeStream());
stream.add(StreamAddArgs.entry("1", "1"));
stream.add(StreamAddArgs.entry("2", "2"));
stream.add(StreamAddArgs.entry("3", "3"));
RStream<String, String> stream2 = redisson.getStream("test2");
StreamMessageId id1 = stream2.add(StreamAddArgs.entry("0", "0"));
stream2.createGroup(StreamCreateGroupArgs.name("testGroup").id(id1).makeStream());
// Map<String, Map<StreamMessageId, Map<String, String>>> s2 = stream.readGroup("testGroup", "consumer1", 3, 5, TimeUnit.SECONDS, id0, Collections.singletonMap("test2", id1));
// assertThat(s2.values().iterator().next().values().iterator().next().keySet()).containsAnyOf("1", "2", "3");
// assertThat(s2.size()).isEqualTo(3);
}
@Test
public void testCreateEmpty() {
RStream<String, String> stream = redisson.getStream("test");
stream.createGroup(StreamCreateGroupArgs.name("testGroup").id(StreamMessageId.ALL).makeStream());
stream.add(StreamAddArgs.entry("1", "2"));
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered());
assertThat(s).hasSize(1);
}
@Test
public void testReadGroup() {
RStream<String, String> stream = redisson.getStream("test");
StreamMessageId id0 = stream.add(StreamAddArgs.entry("0", "0"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup").id(id0));
stream.add(StreamAddArgs.entry("1", "1"));
stream.add(StreamAddArgs.entry("2", "2"));
stream.add(StreamAddArgs.entry("3", "3"));
Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered());
assertThat(s.values().iterator().next().keySet()).containsAnyOf("1", "2", "3");
assertThat(s.size()).isEqualTo(3);
stream.add(StreamAddArgs.entry("1", "1"));
stream.add(StreamAddArgs.entry("2", "2"));
stream.add(StreamAddArgs.entry("3", "3"));
Map<StreamMessageId, Map<String, String>> s1 = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered().count(1));
assertThat(s1.size()).isEqualTo(1);
StreamMessageId id = stream.add(StreamAddArgs.entry("1", "1"));
stream.add(StreamAddArgs.entry("2", "2"));
stream.add(StreamAddArgs.entry("3", "3"));
Map<StreamMessageId, Map<String, String>> s2 = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.greaterThan(id));
assertThat(s2).isEmpty();
}
@Test
public void testAutogenerateStreamSequenceId() {
RStream<String, String> stream = redisson.getStream("test");
assertThat(stream.size()).isEqualTo(0);
StreamMessageId id = new StreamMessageId(1).autogenerateSequenceId();
Map<String, String> entry1 = new HashMap<>();
entry1.put("test", "value1");
Map<String, String> entry2 = new HashMap<>();
entry2.put("test", "value2");
stream.add(id,StreamAddArgs.entries(entry1));
stream.add(id,StreamAddArgs.entries(entry2));
Map<StreamMessageId, Map<String, String>> r = stream.range(10, StreamMessageId.MIN, StreamMessageId.MAX);
assertThat(r).size().isEqualTo(2);
assertThat(r.keySet()).containsExactly(
new StreamMessageId(1,0),new StreamMessageId(1,1)
);
assertThat(r.get(new StreamMessageId(1,0))).isEqualTo(entry1);
assertThat(r.get(new StreamMessageId(1,1))).isEqualTo(entry2);
}
@Test
public void testRangeReversed() {
RStream<String, String> stream = redisson.getStream("test");
assertThat(stream.size()).isEqualTo(0);
Map<String, String> entries1 = new HashMap<>();
entries1.put("1", "11");
entries1.put("3", "31");
stream.add(new StreamMessageId(1), StreamAddArgs.entries(entries1).trimNonStrict().maxLen(1).noLimit());
assertThat(stream.size()).isEqualTo(1);
Map<String, String> entries2 = new HashMap<>();
entries2.put("5", "55");
entries2.put("7", "77");
stream.add(new StreamMessageId(2), StreamAddArgs.entries(entries2).trimNonStrict().maxLen(1).noLimit());
Map<StreamMessageId, Map<String, String>> r2 = stream.rangeReversed(10, StreamMessageId.MAX, StreamMessageId.MIN);
assertThat(r2.keySet()).containsExactly(new StreamMessageId(2), new StreamMessageId(1));
assertThat(r2.get(new StreamMessageId(1))).isEqualTo(entries1);
assertThat(r2.get(new StreamMessageId(2))).isEqualTo(entries2);
}
@Test
public void testRange() {
RStream<String, String> stream = redisson.getStream("test");
assertThat(stream.size()).isEqualTo(0);
Map<String, String> entries1 = new HashMap<>();
entries1.put("1", "11");
entries1.put("3", "31");
stream.add(new StreamMessageId(1), StreamAddArgs.entries(entries1).trimNonStrict().maxLen(1).noLimit());
assertThat(stream.size()).isEqualTo(1);
Map<String, String> entries2 = new HashMap<>();
entries2.put("5", "55");
entries2.put("7", "77");
stream.add(new StreamMessageId(2), StreamAddArgs.entries(entries2).trimNonStrict().maxLen(1).noLimit());
Map<StreamMessageId, Map<String, String>> r = stream.range(10, new StreamMessageId(0), new StreamMessageId(1));
assertThat(r).hasSize(1);
assertThat(r.get(new StreamMessageId(1))).isEqualTo(entries1);
Map<StreamMessageId, Map<String, String>> r2 = stream.range(10, StreamMessageId.MIN, StreamMessageId.MAX);
assertThat(r2.keySet()).containsExactly(new StreamMessageId(1), new StreamMessageId(2));
assertThat(r2.get(new StreamMessageId(1))).isEqualTo(entries1);
assertThat(r2.get(new StreamMessageId(2))).isEqualTo(entries2);
}
@Test
public void testRangeReversed2() {
RStream<String, String> stream = redisson.getStream("test");
assertThat(stream.size()).isEqualTo(0);
Map<String, String> entries1 = new HashMap<>();
entries1.put("1", "11");
entries1.put("3", "31");
stream.add(new StreamMessageId(1), StreamAddArgs.entries(entries1).trimNonStrict().maxLen(1).noLimit());
assertThat(stream.size()).isEqualTo(1);
Map<String, String> entries2 = new HashMap<>();
entries2.put("5", "55");
entries2.put("7", "77");
stream.add(new StreamMessageId(2), StreamAddArgs.entries(entries2).trimNonStrict().maxLen(1).noLimit());
Map<StreamMessageId, Map<String, String>> r2 = stream.rangeReversed(StreamRangeArgs.startId(StreamMessageId.MAX)
.endId(StreamMessageId.MIN)
.count(10));
assertThat(r2.keySet()).containsExactly(new StreamMessageId(2), new StreamMessageId(1));
assertThat(r2.get(new StreamMessageId(1))).isEqualTo(entries1);
assertThat(r2.get(new StreamMessageId(2))).isEqualTo(entries2);
}
@Test
public void testRange2() {
RStream<String, String> stream = redisson.getStream("test");
assertThat(stream.size()).isEqualTo(0);
Map<String, String> entries1 = new HashMap<>();
entries1.put("1", "11");
entries1.put("3", "31");
stream.add(new StreamMessageId(1), StreamAddArgs.entries(entries1).trimNonStrict().maxLen(1).noLimit());
assertThat(stream.size()).isEqualTo(1);
Map<String, String> entries2 = new HashMap<>();
entries2.put("5", "55");
entries2.put("7", "77");
stream.add(new StreamMessageId(2), StreamAddArgs.entries(entries2).trimNonStrict().maxLen(1).noLimit());
Map<StreamMessageId, Map<String, String>> r = stream.range(StreamRangeArgs
.startId(new StreamMessageId(0)).endId(new StreamMessageId(1)).count(10));
assertThat(r).hasSize(1);
assertThat(r.get(new StreamMessageId(1))).isEqualTo(entries1);
Map<StreamMessageId, Map<String, String>> r2 = stream.range(StreamRangeArgs
.startId(StreamMessageId.MIN).endId(StreamMessageId.MAX).count(10));
assertThat(r2.keySet()).containsExactly(new StreamMessageId(1), new StreamMessageId(2));
assertThat(r2.get(new StreamMessageId(1))).isEqualTo(entries1);
assertThat(r2.get(new StreamMessageId(2))).isEqualTo(entries2);
Map<StreamMessageId, Map<String, String>> r3 = stream.range(StreamRangeArgs
.startIdExclusive(new StreamMessageId(1)).endId(new StreamMessageId(2)));
assertThat(r3).hasSize(1);
assertThat(r3.get(new StreamMessageId(2))).isEqualTo(entries2);
}
@Test
public void testPollMultiKeys() {
RStream<String, String> stream = redisson.getStream("test");
Map<String, String> entries1 = new LinkedHashMap<>();
entries1.put("1", "11");
entries1.put("3", "31");
Thread t = new Thread() {
@Override
public void run() {
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
e.printStackTrace();
}
stream.add(new StreamMessageId(1), StreamAddArgs.entries(entries1));
}
};
t.start();
Awaitility.await().between(Duration.ofMillis(1900), Duration.ofMillis(2700)).untilAsserted(() -> {
Map<String, Map<StreamMessageId, Map<String, String>>> s = stream.read(StreamMultiReadArgs.greaterThan(new StreamMessageId(0), "test1", StreamMessageId.NEWEST)
.timeout(Duration.ofSeconds(5))
.count(2));
assertThat(s).hasSize(1);
assertThat(s.get("test").get(new StreamMessageId(1))).isEqualTo(entries1);
});
}
@Test
public void testPoll() {
RStream<String, String> stream = redisson.getStream("test");
Map<String, String> entries1 = new LinkedHashMap<>();
entries1.put("1", "11");
entries1.put("3", "31");
Thread t = new Thread() {
@Override
public void run() {
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
e.printStackTrace();
}
stream.add(new StreamMessageId(1), StreamAddArgs.entries(entries1));
}
};
t.start();
Awaitility.await().between(Duration.ofMillis(1900), Duration.ofMillis(2200)).untilAsserted(() -> {
Map<StreamMessageId, Map<String, String>> s = stream.read(StreamReadArgs.greaterThan(new StreamMessageId(0)).count(2).timeout(Duration.ofSeconds(4)));
assertThat(s).hasSize(1);
assertThat(s.get(new StreamMessageId(1))).isEqualTo(entries1);
});
StreamMessageId id0 = stream.add(StreamAddArgs.entry("11", "11"));
stream.add(StreamAddArgs.entry("22", "22"));
RStream<String, String> stream2 = redisson.getStream("test2");
StreamMessageId id1 = stream2.add(StreamAddArgs.entry("33", "33"));
stream2.add(StreamAddArgs.entry("44", "44"));
Map<String, Map<StreamMessageId, Map<String, String>>> s2 = stream.read(StreamMultiReadArgs.greaterThan(id0, "test2", id1)
.timeout(Duration.ofSeconds(5)));
assertThat(s2.values().iterator().next().values().iterator().next().keySet()).containsAnyOf("11", "22", "33", "44");
assertThat(s2.keySet()).containsExactlyInAnyOrder("test", "test2");
}
@Test
public void testSize() {
RStream<String, String> stream = redisson.getStream("test");
assertThat(stream.size()).isEqualTo(0);
Map<String, String> entries1 = new HashMap<>();
entries1.put("1", "11");
entries1.put("3", "31");
stream.add(new StreamMessageId(1), StreamAddArgs.entries(entries1).trimNonStrict().maxLen(1).noLimit());
assertThat(stream.size()).isEqualTo(1);
Map<String, String> entries2 = new HashMap<>();
entries2.put("5", "55");
entries2.put("7", "77");
stream.add(new StreamMessageId(2), StreamAddArgs.entries(entries2).trimNonStrict().maxLen(1).noLimit());
assertThat(stream.size()).isEqualTo(2);
}
@Test
public void testReadMultiKeysEmpty() {
RStream<String, String> stream = redisson.getStream("test2");
Map<String, Map<StreamMessageId, Map<String, String>>> s = stream.read(StreamMultiReadArgs.greaterThan(new StreamMessageId(0), "test1", new StreamMessageId(0))
.count(10));
assertThat(s).isEmpty();
}
@Test
public void testReadMultiKeys() {
RStream<String, String> stream1 = redisson.getStream("test1");
Map<String, String> entries1 = new LinkedHashMap<>();
entries1.put("1", "11");
entries1.put("2", "22");
entries1.put("3", "33");
stream1.add(StreamAddArgs.entries(entries1));
RStream<String, String> stream2 = redisson.getStream("test2");
Map<String, String> entries2 = new LinkedHashMap<>();
entries2.put("4", "44");
entries2.put("5", "55");
entries2.put("6", "66");
stream2.add(StreamAddArgs.entries(entries2));
Map<String, Map<StreamMessageId, Map<String, String>>> s = stream2.read(StreamMultiReadArgs.greaterThan(new StreamMessageId(0), "test1", new StreamMessageId(0))
.count(10));
assertThat(s).hasSize(2);
assertThat(s.get("test1").values().iterator().next()).isEqualTo(entries1);
assertThat(s.get("test2").values().iterator().next()).isEqualTo(entries2);
}
@Test
public void testReadMulti() {
RStream<String, String> stream = redisson.getStream("test");
Map<String, String> entries1 = new LinkedHashMap<>();
entries1.put("1", "11");
entries1.put("3", "31");
stream.add(new StreamMessageId(1), StreamAddArgs.entries(entries1).trimNonStrict().maxLen(1).noLimit());
Map<String, String> entries2 = new LinkedHashMap<>();
entries2.put("5", "55");
entries2.put("7", "77");
stream.add(new StreamMessageId(2), StreamAddArgs.entries(entries2).trimNonStrict().maxLen(1).noLimit());
Map<String, String> entries3 = new LinkedHashMap<>();
entries3.put("15", "05");
entries3.put("17", "07");
stream.add(new StreamMessageId(3), StreamAddArgs.entries(entries3).trimNonStrict().maxLen(1).noLimit());
Map<StreamMessageId, Map<String, String>> result = stream.read(StreamReadArgs.greaterThan(new StreamMessageId(0, 0)).count(10));
assertThat(result).hasSize(3);
assertThat(result.get(new StreamMessageId(4))).isNull();
assertThat(result.get(new StreamMessageId(1))).isEqualTo(entries1);
assertThat(result.get(new StreamMessageId(2))).isEqualTo(entries2);
assertThat(result.get(new StreamMessageId(3))).isEqualTo(entries3);
}
@Test
public void testReadSingle() {
RStream<String, String> stream = redisson.getStream("test");
Map<String, String> entries1 = new LinkedHashMap<>();
entries1.put("1", "11");
entries1.put("3", "31");
stream.add(new StreamMessageId(1), StreamAddArgs.entries(entries1).trimNonStrict().maxLen(1).noLimit());
Map<StreamMessageId, Map<String, String>> result = stream.read(StreamReadArgs.greaterThan(new StreamMessageId(0, 0)).count(10));
assertThat(result).hasSize(1);
assertThat(result.get(new StreamMessageId(4))).isNull();
assertThat(result.get(new StreamMessageId(1))).isEqualTo(entries1);
}
@Test
public void testReadEmpty() {
RStream<String, String> stream2 = redisson.getStream("test");
Map<StreamMessageId, Map<String, String>> result2 = stream2.read(StreamReadArgs.greaterThan(new StreamMessageId(0, 0)).count(10));
assertThat(result2).isEmpty();
}
@Test
public void testAdd() {
RStream<String, String> stream = redisson.getStream("test1");
StreamMessageId s = stream.add(StreamAddArgs.entry("12", "33"));
assertThat(s.getId0()).isNotNegative();
assertThat(s.getId1()).isNotNegative();
assertThat(stream.size()).isEqualTo(1);
}
@Test
public void testAddAll() {
RStream<String, String> stream = redisson.getStream("test1");
assertThat(stream.size()).isEqualTo(0);
Map<String, String> entries = new HashMap<>();
entries.put("6", "61");
entries.put("4", "41");
StreamMessageId id = new StreamMessageId(12, 42);
stream.add(id, StreamAddArgs.entries(entries).trimNonStrict().maxLen(10).noLimit());
assertThat(stream.size()).isEqualTo(1);
Map<StreamMessageId, Map<String, String>> res = stream.read(StreamReadArgs.greaterThan(new StreamMessageId(10, 42)));
assertThat(res.get(id).size()).isEqualTo(2);
entries.clear();
entries.put("1", "11");
entries.put("3", "31");
stream.add(new StreamMessageId(Long.MAX_VALUE), StreamAddArgs.entries(entries).trimNonStrict().maxLen(1).noLimit());
assertThat(stream.size()).isEqualTo(2);
}
@Test
public void testStreamConsumers() {
RStream<String, String> stream = redisson.getStream("test1");
StreamMessageId id1 = new StreamMessageId(12, 44);
stream.createGroup(StreamCreateGroupArgs.name("testGroup").id(id1).makeStream());
stream.add(StreamAddArgs.entry("1", "1"));
stream.add(StreamAddArgs.entry("2", "2"));
stream.add(StreamAddArgs.entry("3", "3"));
StreamMessageId id2 = new StreamMessageId(12, 44);
stream.createGroup(StreamCreateGroupArgs.name("testGroup2").id(id2).makeStream());
stream.add(StreamAddArgs.entry("1", "1"));
stream.add(StreamAddArgs.entry("2", "2"));
stream.add(StreamAddArgs.entry("3", "3"));
Map<StreamMessageId, Map<String, String>> map = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered());
assertThat(map.size()).isEqualTo(6);
List<StreamConsumer> s1 = stream.listConsumers("testGroup");
assertThat(s1).hasSize(1);
assertThat(s1.get(0).getName()).isEqualTo("consumer1");
assertThat(s1.get(0).getPending()).isEqualTo(6);
assertThat(s1.get(0).getIdleTime()).isLessThan(100L);
Map<StreamMessageId, Map<String, String>> map2 = stream.readGroup("testGroup2", "consumer2", StreamReadGroupArgs.neverDelivered());
assertThat(map2.size()).isEqualTo(6);
List<StreamConsumer> s2 = stream.listConsumers("testGroup2");
assertThat(s2).hasSize(1);
assertThat(s2.get(0).getName()).isEqualTo("consumer2");
assertThat(s2.get(0).getPending()).isEqualTo(6);
assertThat(s2.get(0).getIdleTime()).isLessThan(100L);
}
@Test
public void testStreamGroupsInfo() {
RStream<String, String> stream = redisson.getStream("test1");
Map<String, String> entries = new HashMap<>();
entries.put("6", "61");
entries.put("4", "41");
StreamMessageId id = new StreamMessageId(12, 42);
stream.add(id, StreamAddArgs.entries(entries).trimNonStrict().maxLen(10).noLimit());
List<StreamGroup> s = stream.listGroups();
assertThat(s).isEmpty();
StreamMessageId id1 = new StreamMessageId(12, 44);
stream.createGroup(StreamCreateGroupArgs.name("testGroup").id(id1));
stream.add(StreamAddArgs.entry("1", "1"));
stream.add(StreamAddArgs.entry("2", "2"));
stream.add(StreamAddArgs.entry("3", "3"));
StreamMessageId id2 = new StreamMessageId(12, 44);
stream.createGroup(StreamCreateGroupArgs.name("testGroup2").id(id2));
stream.add(StreamAddArgs.entry("1", "1"));
stream.add(StreamAddArgs.entry("2", "2"));
stream.add(StreamAddArgs.entry("3", "3"));
List<StreamGroup> s2 = stream.listGroups();
assertThat(s2).hasSize(2);
assertThat(s2.get(0).getName()).isEqualTo("testGroup");
assertThat(s2.get(0).getConsumers()).isEqualTo(0);
assertThat(s2.get(0).getPending()).isEqualTo(0);
assertThat(s2.get(0).getLastDeliveredId()).isEqualTo(id1);
assertThat(s2.get(1).getName()).isEqualTo("testGroup2");
assertThat(s2.get(1).getConsumers()).isEqualTo(0);
assertThat(s2.get(1).getPending()).isEqualTo(0);
assertThat(s2.get(1).getLastDeliveredId()).isEqualTo(id2);
}
@Test
public void testStreamInfoEmpty() {
RStream<String, String> stream = redisson.getStream("test1");
StreamMessageId id1 = new StreamMessageId(12, 44);
stream.createGroup(StreamCreateGroupArgs.name("testGroup").id(id1).makeStream());
StreamInfo<String, String> s = stream.getInfo();
}
@Test
public void testStreamInfo() {
RStream<String, String> stream = redisson.getStream("test1");
Map<String, String> entries = new HashMap<>();
entries.put("6", "61");
entries.put("4", "41");
StreamMessageId id = new StreamMessageId(12, 42);
stream.add(id, StreamAddArgs.entries(entries).trimNonStrict().maxLen(10).noLimit());
Map<String, String> lastEntries = new HashMap<>();
lastEntries.put("10", "52");
lastEntries.put("44", "89");
StreamMessageId lastId = new StreamMessageId(12, 43);
stream.add(lastId, StreamAddArgs.entries(lastEntries).trimNonStrict().maxLen(10).noLimit());
StreamInfo<String, String> info = stream.getInfo();
assertThat(info.getLength()).isEqualTo(2);
assertThat(info.getRadixTreeKeys()).isEqualTo(1);
assertThat(info.getRadixTreeNodes()).isEqualTo(2);
assertThat(info.getLastGeneratedId()).isEqualTo(lastId);
assertThat(info.getFirstEntry().getId()).isEqualTo(id);
assertThat(info.getFirstEntry().getData()).isEqualTo(entries);
assertThat(info.getLastEntry().getId()).isEqualTo(lastId);
assertThat(info.getLastEntry().getData()).isEqualTo(lastEntries);
}
}
|
RedissonStreamTest
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/arm-java/org/apache/hadoop/ipc/protobuf/TestRpcServiceProtosLegacy.java
|
{
"start": 119792,
"end": 128523
}
|
interface ____ {
/**
* <code>rpc ping(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto);</code>
*/
public abstract void ping(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto> done);
/**
* <code>rpc echo(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto);</code>
*/
public abstract void echo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto> done);
}
public static com.google.protobuf.Service newReflectiveService(
final Interface impl) {
return new NewerProtobufRpcProto() {
@java.lang.Override
public void ping(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto> done) {
impl.ping(controller, request, done);
}
@java.lang.Override
public void echo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto> done) {
impl.echo(controller, request, done);
}
};
}
public static com.google.protobuf.BlockingService
newReflectiveBlockingService(final BlockingInterface impl) {
return new com.google.protobuf.BlockingService() {
public final com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final com.google.protobuf.Message callBlockingMethod(
com.google.protobuf.Descriptors.MethodDescriptor method,
com.google.protobuf.RpcController controller,
com.google.protobuf.Message request)
throws com.google.protobuf.ServiceException {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callBlockingMethod() given method descriptor for " +
"wrong service type.");
}
switch(method.getIndex()) {
case 0:
return impl.ping(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request);
case 1:
return impl.echo(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getRequestPrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getResponsePrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
};
}
/**
* <code>rpc ping(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto);</code>
*/
public abstract void ping(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto> done);
/**
* <code>rpc echo(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto);</code>
*/
public abstract void echo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto> done);
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
return org.apache.hadoop.ipc.protobuf.TestRpcServiceProtosLegacy.getDescriptor().getServices().get(4);
}
public final com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final void callMethod(
com.google.protobuf.Descriptors.MethodDescriptor method,
com.google.protobuf.RpcController controller,
com.google.protobuf.Message request,
com.google.protobuf.RpcCallback<
com.google.protobuf.Message> done) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callMethod() given method descriptor for wrong " +
"service type.");
}
switch(method.getIndex()) {
case 0:
this.ping(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request,
com.google.protobuf.RpcUtil.<org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto>specializeCallback(
done));
return;
case 1:
this.echo(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request,
com.google.protobuf.RpcUtil.<org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto>specializeCallback(
done));
return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getRequestPrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getResponsePrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public static Stub newStub(
com.google.protobuf.RpcChannel channel) {
return new Stub(channel);
}
public static final
|
Interface
|
java
|
google__error-prone
|
core/src/main/java/com/google/errorprone/bugpatterns/EmptyIfStatement.java
|
{
"start": 1719,
"end": 3214
}
|
class ____ extends BugChecker implements EmptyStatementTreeMatcher {
/**
* Match empty statement if: - Parent statement is an if - The then part of the parent if is an
* empty statement, and - The else part of the parent if does not exist
*/
@Override
public Description matchEmptyStatement(EmptyStatementTree tree, VisitorState state) {
TreePath parentPath = state.getPath().getParentPath();
Tree parent = parentPath.getLeaf();
if (!(parent instanceof IfTree ifTree)) {
return NO_MATCH;
}
if (!(ifTree.getThenStatement() instanceof EmptyStatementTree)
|| ifTree.getElseStatement() != null) {
return NO_MATCH;
}
/*
* We suggest different fixes depending on what follows the parent if statement.
* If there is no statement following the if, then suggest deleting the whole
* if statement. If the next statement is a block, then suggest deleting the
* empty then part of the if. If the next statement is not a block, then also
* suggest deleting the empty then part of the if.
*/
if (isLastStatementInBlock().matches(ifTree, state.withPath(parentPath))) {
// No following statements. Delete whole if.
return describeMatch(parent, SuggestedFix.delete(parent));
} else {
// There are more statements. Delete the empty then part of the if.
return describeMatch(
ifTree.getThenStatement(), SuggestedFix.delete(ifTree.getThenStatement()));
}
}
}
|
EmptyIfStatement
|
java
|
google__error-prone
|
core/src/main/java/com/google/errorprone/bugpatterns/JUnit4TearDownNotRun.java
|
{
"start": 2013,
"end": 2649
}
|
class ____ extends AbstractJUnit4InitMethodNotRun {
@Override
protected Matcher<MethodTree> methodMatcher() {
return allOf(
anyOf(looksLikeJUnit3TearDown, looksLikeJUnit4After), not(hasJUnit4AfterAnnotations));
}
@Override
protected String correctAnnotation() {
return JUNIT_AFTER_ANNOTATION;
}
@Override
protected List<AnnotationReplacements> annotationReplacements() {
return Arrays.asList(
new AnnotationReplacements(JUNIT_BEFORE_ANNOTATION, JUNIT_AFTER_ANNOTATION),
new AnnotationReplacements(JUNIT_BEFORE_CLASS_ANNOTATION, JUNIT_AFTER_CLASS_ANNOTATION));
}
}
|
JUnit4TearDownNotRun
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/records/tofix/JsonIncludeNonDefaultOnRecord5312Test.java
|
{
"start": 718,
"end": 2269
}
|
class ____
{
record StringValue(String value) {
@Override
@JsonValue
public String value() {
return value;
}
}
record Pojo1(StringValue value) { }
@JsonInclude(JsonInclude.Include.NON_DEFAULT)
record Pojo2(StringValue value) { }
record Pojo3(@JsonInclude(JsonInclude.Include.NON_DEFAULT) StringValue value) { }
private final ObjectMapper MAPPER = JsonMapper.builder()
//might be relevant for analysis, but does not affect test outcome
.changeDefaultPropertyInclusion(incl -> JsonInclude.Value.construct(NON_DEFAULT, NON_DEFAULT))
.withConfigOverride(String.class,
o -> o.setInclude(JsonInclude.Value.construct(NON_NULL, NON_NULL)))
.build();
@JacksonTestFailureExpected
@Test
void testSerialization1() throws Exception {
//FAIL on jackson 2.18.2 / 2.20.0
Assertions.assertEquals("{\"value\":\"\"}",
MAPPER.writeValueAsString(new Pojo1(new StringValue(""))));
}
//PASS
@Test
void testSerialization2() throws Exception {
Assertions.assertEquals("{\"value\":\"\"}",
MAPPER.writeValueAsString(new Pojo2(new StringValue(""))));
}
@JacksonTestFailureExpected
@Test
void testSerialization3() throws Exception {
//FAIL on jackson 2.18.2 / 2.20.0
Assertions.assertEquals("{\"value\":\"\"}", MAPPER.writeValueAsString(new Pojo3(new StringValue(""))));
}
}
|
JsonIncludeNonDefaultOnRecord5312Test
|
java
|
micronaut-projects__micronaut-core
|
http/src/main/java/io/micronaut/http/uri/UriTemplateMatcher.java
|
{
"start": 17903,
"end": 18559
}
|
class ____ implements UriTemplateParser.PartVisitor {
int variableCount = 0;
int rawLength = 0;
@Override
public void visitLiteral(String literal) {
rawLength += literal.length();
}
@Override
public void visitExpression(UriTemplateParser.ExpressionType type, List<UriTemplateParser.Variable> variables) {
if (!type.isQueryPart()) {
variableCount += variables.size();
}
}
}
private record Segment(SegmentType type, String value,
Pattern pattern, String[] regexpVariables) {
}
private
|
PathEvaluator
|
java
|
google__gson
|
gson/src/test/java/com/google/gson/TypeAdapterTest.java
|
{
"start": 896,
"end": 4289
}
|
class ____ {
@Test
public void testNullSafe() throws IOException {
TypeAdapter<String> adapter = assertionErrorAdapter.nullSafe();
assertThat(adapter.toJson(null)).isEqualTo("null");
assertThat(adapter.fromJson("null")).isNull();
}
@Test
public void testNullSafe_ReturningSameInstanceOnceNullSafe() {
TypeAdapter<?> nullSafeAdapter = assertionErrorAdapter.nullSafe();
assertThat(nullSafeAdapter.nullSafe()).isSameInstanceAs(nullSafeAdapter);
assertThat(nullSafeAdapter.nullSafe().nullSafe()).isSameInstanceAs(nullSafeAdapter);
assertThat(nullSafeAdapter.nullSafe().nullSafe().nullSafe()).isSameInstanceAs(nullSafeAdapter);
}
@Test
public void testNullSafe_ToString() {
TypeAdapter<?> adapter = assertionErrorAdapter;
assertThat(adapter.toString()).isEqualTo("assertionErrorAdapter");
assertThat(adapter.nullSafe().toString())
.isEqualTo("NullSafeTypeAdapter[assertionErrorAdapter]");
assertThat(adapter.nullSafe().nullSafe().toString())
.isEqualTo("NullSafeTypeAdapter[assertionErrorAdapter]");
}
private static final TypeAdapter<String> assertionErrorAdapter =
new TypeAdapter<>() {
@Override
public void write(JsonWriter out, String value) {
throw new AssertionError("unexpected call");
}
@Override
public String read(JsonReader in) {
throw new AssertionError("unexpected call");
}
@Override
public String toString() {
return "assertionErrorAdapter";
}
};
/**
* Tests behavior when {@link TypeAdapter#write(JsonWriter, Object)} manually throws {@link
* IOException} which is not caused by writer usage.
*/
@Test
public void testToJson_ThrowingIOException() {
IOException exception = new IOException("test");
TypeAdapter<Integer> adapter =
new TypeAdapter<>() {
@Override
public void write(JsonWriter out, Integer value) throws IOException {
throw exception;
}
@Override
public Integer read(JsonReader in) {
throw new AssertionError("not needed by this test");
}
};
JsonIOException e = assertThrows(JsonIOException.class, () -> adapter.toJson(1));
assertThat(e).hasCauseThat().isEqualTo(exception);
e = assertThrows(JsonIOException.class, () -> adapter.toJsonTree(1));
assertThat(e).hasCauseThat().isEqualTo(exception);
}
private static final TypeAdapter<String> adapter =
new TypeAdapter<>() {
@Override
public void write(JsonWriter out, String value) throws IOException {
out.value(value);
}
@Override
public String read(JsonReader in) throws IOException {
return in.nextString();
}
};
// Note: This test just verifies the current behavior; it is a bit questionable
// whether that behavior is actually desired
@Test
public void testFromJson_Reader_TrailingData() throws IOException {
assertThat(adapter.fromJson(new StringReader("\"a\"1"))).isEqualTo("a");
}
// Note: This test just verifies the current behavior; it is a bit questionable
// whether that behavior is actually desired
@Test
public void testFromJson_String_TrailingData() throws IOException {
assertThat(adapter.fromJson("\"a\"1")).isEqualTo("a");
}
}
|
TypeAdapterTest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.