language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/ConfigurationPropertiesTests.java | {
"start": 74582,
"end": 74731
} | enum ____ {
FOO, BAZ, BAR
}
@EnableConfigurationProperties
@ConfigurationProperties(prefix = "test", ignoreUnknownFields = false)
static | FooEnum |
java | quarkusio__quarkus | extensions/micrometer/deployment/src/main/java/io/quarkus/micrometer/deployment/binder/NettyBinderProcessor.java | {
"start": 2500,
"end": 2988
} | class ____ implements BooleanSupplier {
abstract MicrometerConfig getMicrometerConfig();
Class<?> metricsClass() {
return NETTY_ALLOCATOR_METRICS_CLASS;
}
abstract Class<?> getCheckClass();
public boolean getAsBoolean() {
return metricsClass() != null && getCheckClass() != null
&& getMicrometerConfig().isEnabled(getMicrometerConfig().binder().netty());
}
}
static | AbstractSupportEnabled |
java | apache__flink | flink-python/src/main/java/org/apache/flink/table/runtime/arrow/writers/BinaryWriter.java | {
"start": 2316,
"end": 2846
} | class ____ extends BinaryWriter<RowData> {
private BinaryWriterForRow(FixedSizeBinaryVector fixedSizeBinaryVector) {
super(fixedSizeBinaryVector);
}
@Override
boolean isNullAt(RowData in, int ordinal) {
return in.isNullAt(ordinal);
}
@Override
byte[] readBinary(RowData in, int ordinal) {
return in.getBinary(ordinal);
}
}
/** {@link BinaryWriter} for {@link ArrayData} input. */
public static final | BinaryWriterForRow |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/deser/SettableBeanProperty.java | {
"start": 24809,
"end": 25143
} | class ____ is designed to both make it easier to sub-class
* delegating subtypes and to reduce likelihood of breakage when
* new methods are added.
*<p>
* Class was specifically added to help with {@code Afterburner}
* module, but its use is not limited to only support it.
*/
public static abstract | that |
java | spring-projects__spring-boot | module/spring-boot-websocket/src/main/java/org/springframework/boot/websocket/autoconfigure/servlet/WebSocketMessagingAutoConfiguration.java | {
"start": 3266,
"end": 3552
} | class ____ {
@Configuration(proxyBeanMethods = false)
@ConditionalOnBean({ DelegatingWebSocketMessageBrokerConfiguration.class, JsonMapper.class })
@ConditionalOnClass({ JsonMapper.class, AbstractMessageBrokerConfiguration.class })
@Order(0)
static | WebSocketMessagingAutoConfiguration |
java | google__dagger | javatests/dagger/internal/codegen/MembersInjectionTest.java | {
"start": 5003,
"end": 5247
} | class ____ extends Parent {",
" @Inject Child() {}",
"}");
Source parentFile =
CompilerTests.javaSource(
"test.Parent",
"package test;",
"",
"public abstract | Child |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/strategy/VertexwiseSchedulingStrategy.java | {
"start": 10130,
"end": 10804
} | class ____ implements SchedulingStrategyFactory {
private final InputConsumableDecider.Factory inputConsumableDeciderFactory;
public Factory(InputConsumableDecider.Factory inputConsumableDeciderFactory) {
this.inputConsumableDeciderFactory = inputConsumableDeciderFactory;
}
@Override
public SchedulingStrategy createInstance(
final SchedulerOperations schedulerOperations,
final SchedulingTopology schedulingTopology) {
return new VertexwiseSchedulingStrategy(
schedulerOperations, schedulingTopology, inputConsumableDeciderFactory);
}
}
}
| Factory |
java | alibaba__nacos | client/src/main/java/com/alibaba/nacos/client/utils/ValidatorUtils.java | {
"start": 1009,
"end": 1729
} | class ____ {
private static final Pattern CONTEXT_PATH_MATCH = Pattern.compile("(\\/)\\1+");
public static void checkInitParam(NacosClientProperties properties) throws NacosException {
checkContextPath(properties.getProperty(PropertyKeyConst.CONTEXT_PATH));
}
/**
* Check context path.
*
* @param contextPath context path
*/
public static void checkContextPath(String contextPath) {
if (contextPath == null) {
return;
}
Matcher matcher = CONTEXT_PATH_MATCH.matcher(contextPath);
if (matcher.find()) {
throw new IllegalArgumentException("Illegal url path expression");
}
}
}
| ValidatorUtils |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/AnnotationPositionTest.java | {
"start": 12510,
"end": 12693
} | class ____ implements Test {}
}
""")
.addOutputLines(
"Test.java",
"""
/** Javadoc! */
sealed @Deprecated | A |
java | google__truth | core/src/test/java/com/google/common/truth/SubjectTest.java | {
"start": 28411,
"end": 28696
} | class ____ {
@SuppressWarnings("EqualsHashCode")
@Override
public boolean equals(@Nullable Object obj) {
throw new UnsupportedOperationException();
// buggy implementation but one that we're working around, at least for now
}
}
static final | ThrowsOnEquals |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/postgresql/ast/expr/PGMacAddrExpr.java | {
"start": 943,
"end": 2598
} | class ____ extends PGExprImpl implements SQLReplaceable {
private SQLExpr value;
public PGMacAddrExpr clone() {
PGMacAddrExpr x = new PGMacAddrExpr();
if (value != null) {
x.setValue(value.clone());
}
return x;
}
public SQLExpr getValue() {
return value;
}
public void setValue(SQLExpr value) {
this.value = value;
}
@Override
public void accept0(PGASTVisitor visitor) {
if (visitor.visit(this)) {
acceptChild(visitor, value);
}
visitor.endVisit(this);
}
@Override
public boolean replace(SQLExpr expr, SQLExpr target) {
if (this.value == expr) {
setValue(target);
return true;
}
return false;
}
public List<SQLObject> getChildren() {
return Collections.<SQLObject>singletonList(value);
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((value == null) ? 0 : value.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
PGMacAddrExpr other = (PGMacAddrExpr) obj;
if (value == null) {
if (other.value != null) {
return false;
}
} else if (!value.equals(other.value)) {
return false;
}
return true;
}
}
| PGMacAddrExpr |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/naturalid/nullable/D.java | {
"start": 354,
"end": 641
} | class ____ {
@Id
public int oid;
@NaturalId(mutable=true)
public String name;
@NaturalId(mutable=true)
@ManyToOne
public C associatedC;
public D() {
}
public D(int oid, String name, C associatedC) {
this.oid = oid;
this.name = name;
this.associatedC = associatedC;
}
}
| D |
java | google__guava | guava/src/com/google/common/collect/ForwardingNavigableMap.java | {
"start": 10219,
"end": 12027
} | class ____ extends Maps.DescendingMap<K, V> {
/** Constructor for use by subclasses. */
public StandardDescendingMap() {}
@Override
NavigableMap<K, V> forward() {
return ForwardingNavigableMap.this;
}
@Override
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
forward().replaceAll(function);
}
@Override
protected Iterator<Entry<K, V>> entryIterator() {
return new Iterator<Entry<K, V>>() {
private @Nullable Entry<K, V> toRemove = null;
private @Nullable Entry<K, V> nextOrNull = forward().lastEntry();
@Override
public boolean hasNext() {
return nextOrNull != null;
}
@Override
public Entry<K, V> next() {
if (nextOrNull == null) {
throw new NoSuchElementException();
}
try {
return nextOrNull;
} finally {
toRemove = nextOrNull;
nextOrNull = forward().lowerEntry(nextOrNull.getKey());
}
}
@Override
public void remove() {
if (toRemove == null) {
throw new IllegalStateException("no calls to next() since the last call to remove()");
}
forward().remove(toRemove.getKey());
toRemove = null;
}
};
}
}
@Override
public NavigableSet<K> navigableKeySet() {
return delegate().navigableKeySet();
}
/**
* A sensible implementation of {@link NavigableMap#navigableKeySet} in terms of the methods of
* this {@code NavigableMap}. In many cases, you may wish to override {@link
* ForwardingNavigableMap#navigableKeySet} to forward to this implementation or a subclass
* thereof.
*
* @since 12.0
*/
protected | StandardDescendingMap |
java | spring-projects__spring-boot | module/spring-boot-tomcat/src/main/java/org/springframework/boot/tomcat/autoconfigure/WebSocketTomcatWebServerFactoryCustomizer.java | {
"start": 1021,
"end": 1339
} | class ____
implements WebServerFactoryCustomizer<ConfigurableTomcatWebServerFactory> {
@Override
public void customize(ConfigurableTomcatWebServerFactory factory) {
factory.addContextCustomizers((context) -> context.addServletContainerInitializer(new WsSci(), null));
}
}
| WebSocketTomcatWebServerFactoryCustomizer |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataEncryptionKeyFactory.java | {
"start": 1122,
"end": 1364
} | interface ____ {
/**
* Creates a new DataEncryptionKey.
*
* @return DataEncryptionKey newly created
* @throws IOException for any error
*/
DataEncryptionKey newDataEncryptionKey() throws IOException;
}
| DataEncryptionKeyFactory |
java | google__dagger | javatests/dagger/producers/internal/AbstractProducerTest.java | {
"start": 1787,
"end": 2076
} | class ____<T> extends AbstractProducer<T> {
private final ListenableFuture<T> delegate;
DelegateProducer(ListenableFuture<T> delegate) {
this.delegate = delegate;
}
@Override
public ListenableFuture<T> compute() {
return delegate;
}
}
}
| DelegateProducer |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/serde/ContextResolvedTableSerdeTest.java | {
"start": 22415,
"end": 24410
} | class ____ {
private final SerdeContext ctx =
serdeContext(
TableConfigOptions.CatalogPlanCompilation.SCHEMA,
TableConfigOptions.CatalogPlanRestore.ALL);
@Test
void withPermanentTable() throws Exception {
final Tuple2<JsonNode, ContextResolvedTable> result =
serDe(ctx, PERMANENT_PLAN_CONTEXT_RESOLVED_TABLE);
assertThatJsonContains(result.f0, FIELD_NAME_IDENTIFIER);
assertThatJsonContains(result.f0, FIELD_NAME_CATALOG_TABLE);
assertThatJsonDoesNotContain(
result.f0,
FIELD_NAME_CATALOG_TABLE,
ResolvedCatalogTableJsonSerializer.OPTIONS);
assertThatJsonDoesNotContain(
result.f0,
FIELD_NAME_CATALOG_TABLE,
ResolvedCatalogTableJsonSerializer.COMMENT);
assertThat(result.f1.isPermanent()).isTrue();
assertThat(result.f1.getIdentifier()).isEqualTo(PERMANENT_TABLE_IDENTIFIER);
assertThat(result.f1.getResolvedSchema()).isEqualTo(CATALOG_TABLE_RESOLVED_SCHEMA);
assertThat(
result.f1
.<ResolvedCatalogTable>getResolvedTable()
.getDistribution()
.get())
.isEqualTo(DISTRIBUTION);
assertThat(result.f1.<ResolvedCatalogTable>getResolvedTable().getPartitionKeys())
.isEqualTo(PARTITION_KEYS);
assertThat(result.f1.getResolvedTable().getOptions())
.isEqualTo(RESOLVED_CATALOG_TABLE.getOptions());
}
}
@Nested
@DisplayName("and CatalogPlanRestore == ALL_ENFORCED")
| TestRestoreAll |
java | apache__thrift | lib/java/src/main/java/org/apache/thrift/transport/sasl/TInvalidSaslFrameException.java | {
"start": 931,
"end": 1107
} | class ____ extends TSaslNegotiationException {
public TInvalidSaslFrameException(String message) {
super(ErrorType.PROTOCOL_ERROR, message);
}
}
| TInvalidSaslFrameException |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/search/vectors/KnnFloatVectorQueryBuilderTests.java | {
"start": 590,
"end": 1448
} | class ____ extends AbstractKnnVectorQueryBuilderTestCase {
@Override
DenseVectorFieldMapper.ElementType elementType() {
return DenseVectorFieldMapper.ElementType.FLOAT;
}
@Override
KnnVectorQueryBuilder createKnnVectorQueryBuilder(
String fieldName,
int k,
int numCands,
Float visitPercentage,
RescoreVectorBuilder rescoreVectorBuilder,
Float similarity
) {
float[] vector = new float[vectorDimensions];
for (int i = 0; i < vector.length; i++) {
vector[i] = randomFloat();
}
return new KnnVectorQueryBuilder(fieldName, vector, k, numCands, visitPercentage, rescoreVectorBuilder, similarity);
}
@Override
protected String randomIndexType() {
return randomFrom(ALL_INDEX_TYPES);
}
}
| KnnFloatVectorQueryBuilderTests |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java | {
"start": 2150,
"end": 4927
} | class ____'t
* exist in the classloader.
*/
@Test
public void testClassDoesntExist() throws Exception {
assertThrows(IllegalArgumentException.class, () -> {
MiniDFSCluster cluster = null;
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",
"org.apache.hadoop.nonexistent");
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
"dummy://test");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
});
}
/**
* Test that a implementation of JournalManager without a
* (Configuration,URI) constructor throws an exception
*/
@Test
public void testBadConstructor() throws Exception {
MiniDFSCluster cluster = null;
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",
BadConstructorJournalManager.class.getName());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
"dummy://test");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
fail("Should have failed before this point");
} catch (IllegalArgumentException iae) {
if (!iae.getMessage().contains("Unable to construct journal")) {
fail("Should have failed with unable to construct exception");
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test that a dummy implementation of JournalManager can
* be initialized on startup
*/
@Test
public void testDummyJournalManager() throws Exception {
MiniDFSCluster cluster = null;
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",
DummyJournalManager.class.getName());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, DUMMY_URI);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY, 0);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
assertTrue(DummyJournalManager.shouldPromptCalled);
assertTrue(DummyJournalManager.formatCalled);
assertNotNull(DummyJournalManager.conf);
assertEquals(new URI(DUMMY_URI), DummyJournalManager.uri);
assertNotNull(DummyJournalManager.nsInfo);
assertEquals(DummyJournalManager.nsInfo.getClusterID(),
cluster.getNameNode().getNamesystem().getClusterId());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
public static | doesn |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ClassInitializationDeadlockTest.java | {
"start": 4967,
"end": 5357
} | class ____ extends A {
private B() {}
public static B create() {
return new B();
}
}
}
""")
.doTest();
}
@Test
public void positivePrivateConstructorFactoryMethodNonStatic() {
testHelper
.addSourceLines(
"A.java",
"""
public | B |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/select/OracleSelectTest47.java | {
"start": 982,
"end": 2368
} | class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = //
"select * from abc where model=?";
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
stmt.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertEquals(2, visitor.getColumns().size());
String text = TestUtils.outputOracle(stmt);
assertEquals("SELECT *"
+ "\nFROM abc"
+ "\nWHERE model = ?", text);
// assertTrue(visitor.getColumns().contains(new TableStat.Column("acduser.vw_acd_info", "xzqh")));
// assertTrue(visitor.getOrderByColumns().contains(new TableStat.Column("employees", "last_name")));
}
}
| OracleSelectTest47 |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/handler/SimpleUrlHandlerMapping.java | {
"start": 997,
"end": 2100
} | interface ____ maps from URLs to request handler beans. Supports both mapping to bean
* instances and mapping to bean names; the latter is required for non-singleton handlers.
*
* <p>The "urlMap" property is suitable for populating the handler map with
* bean references, for example, via the map element in XML bean definitions.
*
* <p>Mappings to bean names can be set via the "mappings" property, in a form
* accepted by the {@code java.util.Properties} class, as follows:
*
* <pre class="code">
* /welcome.html=ticketController
* /show.html=ticketController</pre>
*
* <p>The syntax is {@code PATH=HANDLER_BEAN_NAME}. If the path doesn't begin
* with a slash, one is prepended.
*
* <p>Supports direct matches (given "/test" -> registered "/test") and "*"
* matches (given "/test" -> registered "/t*"). For details on the pattern
* options, see the {@link org.springframework.web.util.pattern.PathPattern}
* javadoc.
* @author Rod Johnson
* @author Juergen Hoeller
* @author Sam Brannen
* @see #setMappings
* @see #setUrlMap
* @see BeanNameUrlHandlerMapping
*/
public | that |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/api/Slide.java | {
"start": 964,
"end": 1717
} | class ____ creating a sliding window. Sliding windows have a fixed size and slide by a
* specified slide interval. If the slide interval is smaller than the window size, sliding windows
* are overlapping. Thus, an element can be assigned to multiple windows.
*
* <p>For example, a sliding window of size 15 minutes with 5 minutes sliding interval groups
* elements of 15 minutes and evaluates every five minutes. Each element is contained in three
* consecutive window evaluations.
*
* <p>Java Example:
*
* <pre>{@code
* Slide.over("10.minutes").every("5.minutes").on("rowtime").as("w")
* }</pre>
*
* <p>Scala Example:
*
* <pre>{@code
* Slide over 10.minutes every 5.minutes on 'rowtime as 'w
* }</pre>
*/
@PublicEvolving
public final | for |
java | apache__maven | impl/maven-impl/src/main/java/org/apache/maven/impl/cache/AbstractRequestCache.java | {
"start": 1671,
"end": 7550
} | class ____ implements RequestCache {
/**
* Executes and optionally caches a single request.
* <p>
* The caching behavior is determined by the specific implementation of {@link #doCache(Request, Function)}.
* If caching is enabled, the result is retrieved from the cache or computed using the supplier function.
* </p>
*
* @param <REQ> The request type
* @param <REP> The response type
* @param req The request object used as the cache key
* @param supplier The function that provides the response if not cached
* @return The cached or computed response
*/
@Override
@SuppressWarnings("all")
public <REQ extends Request<?>, REP extends Result<REQ>> REP request(REQ req, Function<REQ, REP> supplier) {
CachingSupplier<REQ, REP> cs = doCache(req, supplier);
return cs.apply(req);
}
/**
* Executes and optionally caches a batch of requests.
* <p>
* This method processes a list of requests, utilizing caching where applicable and executing
* only the non-cached requests using the provided supplier function.
* </p>
* <p>
* If any request in the batch fails, a {@link BatchRequestException} is thrown, containing
* details of all failed requests.
* </p>
*
* @param <REQ> The request type
* @param <REP> The response type
* @param reqs List of requests to process
* @param supplier Function to execute the batch of requests
* @return List of results corresponding to the input requests
* @throws BatchRequestException if any request in the batch fails
*/
@Override
@SuppressWarnings("unchecked")
public <REQ extends Request<?>, REP extends Result<REQ>> List<REP> requests(
List<REQ> reqs, Function<List<REQ>, List<REP>> supplier) {
final Map<REQ, Object> nonCachedResults = new HashMap<>();
List<RequestResult<REQ, REP>> allResults = new ArrayList<>(reqs.size());
Function<REQ, REP> individualSupplier = req -> {
synchronized (nonCachedResults) {
while (!nonCachedResults.containsKey(req)) {
try {
nonCachedResults.wait();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
Object val = nonCachedResults.get(req);
if (val instanceof CachingSupplier.AltRes altRes) {
uncheckedThrow(altRes.throwable);
}
return (REP) val;
}
};
List<CachingSupplier<REQ, REP>> suppliers = new ArrayList<>(reqs.size());
List<REQ> nonCached = new ArrayList<>();
for (REQ req : reqs) {
CachingSupplier<REQ, REP> cs = doCache(req, individualSupplier);
suppliers.add(cs);
if (cs.getValue() == null) {
nonCached.add(req);
}
}
if (!nonCached.isEmpty()) {
synchronized (nonCachedResults) {
try {
List<REP> reps = supplier.apply(nonCached);
for (int i = 0; i < reps.size(); i++) {
nonCachedResults.put(nonCached.get(i), reps.get(i));
}
} catch (MavenExecutionException e) {
// If batch request fails, mark all non-cached requests as failed
for (REQ req : nonCached) {
nonCachedResults.put(
req, new CachingSupplier.AltRes(e.getCause())); // Mark as processed but failed
}
} finally {
nonCachedResults.notifyAll();
}
}
}
// Collect results in original order
boolean hasFailures = false;
for (int i = 0; i < reqs.size(); i++) {
REQ req = reqs.get(i);
CachingSupplier<REQ, REP> cs = suppliers.get(i);
try {
REP value = cs.apply(req);
allResults.add(new RequestResult<>(req, value, null));
} catch (Throwable t) {
hasFailures = true;
allResults.add(new RequestResult<>(req, null, t));
}
}
if (hasFailures) {
BatchRequestException exception = new BatchRequestException("One or more requests failed", allResults);
// Add all individual exceptions as suppressed exceptions to preserve stack traces
for (RequestResult<REQ, REP> result : allResults) {
if (result.error() != null) {
exception.addSuppressed(result.error());
}
}
throw exception;
}
return allResults.stream().map(RequestResult::result).toList();
}
/**
* Abstract method to be implemented by subclasses to handle caching logic.
* <p>
* This method is responsible for determining whether a request result should be cached,
* retrieving it from cache if available, or executing the supplier function if necessary.
* </p>
*
* @param <REQ> The request type
* @param <REP> The response type
* @param req The request object
* @param supplier The function that provides the response
* @return A caching supplier that handles caching logic for the request
*/
protected abstract <REQ extends Request<?>, REP extends Result<REQ>> CachingSupplier<REQ, REP> doCache(
REQ req, Function<REQ, REP> supplier);
@SuppressWarnings("unchecked")
protected static <T extends Throwable> void uncheckedThrow(Throwable t) throws T {
throw (T) t; // rely on vacuous cast
}
}
| AbstractRequestCache |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/type/descriptor/sql/spi/DdlTypeRegistry.java | {
"start": 6781,
"end": 8927
} | enum ____ array types,
* use {@link #getTypeName(int, Size, Type)} instead
*/
@Deprecated(since = "6.3")
public String getTypeName(int typeCode, Size size) {
return getTypeName( typeCode, size.getLength(), size.getPrecision(), size.getScale() );
}
/**
* Get the SQL type name for the specified {@link java.sql.Types JDBC type code}
* and size, filling in the placemarkers {@code $l}, {@code $p}, and {@code $s}
* with the length, precision, and scale determined by the given {@linkplain Size
* size object}. The returned type name should be of a SQL type large enough to
* accommodate values of the specified size.
*
* @param typeCode the JDBC type code
* @param columnSize an object which determines the length, precision, and scale
* @param type the {@link Type} mapped to the column
*
* @return the associated type name with the smallest capacity that accommodates
* the given size, if available, and the default type name otherwise
*
* @since 6.3
*/
public String getTypeName(int typeCode, Size columnSize, Type type) {
final DdlType descriptor = getDescriptor( typeCode );
if ( descriptor == null ) {
throw new HibernateException(
String.format(
"No type mapping for org.hibernate.type.SqlTypes code: %s (%s)",
typeCode,
JdbcTypeNameMapper.getTypeName( typeCode )
)
);
}
return descriptor.getTypeName( columnSize, type, this );
}
/**
* Get the SQL type name for the specified {@link java.sql.Types JDBC type code}
* and size, filling in the placemarkers {@code $l}, {@code $p}, and {@code $s}
* with the given length, precision, and scale. The returned type name should be
* of a SQL type large enough to accommodate values of the specified size.
*
* @param typeCode the JDBC type code
* @param size the SQL length, if any
* @param precision the SQL precision, if any
* @param scale the SQL scale, if any
*
* @return the associated type name with the smallest capacity that accommodates
* the given size, if available, and the default type name otherwise
*
* @deprecated not appropriate for named | or |
java | google__auto | value/src/test/java/com/google/auto/value/processor/TypeVariablesTest.java | {
"start": 2543,
"end": 3653
} | class ____<T> {
abstract void setFoo(List<T> list);
}
@Test
public void simpleTypeParameter() {
TypeElement source2 = elementUtils.getTypeElement(Source2.class.getCanonicalName());
TypeElement target2 = elementUtils.getTypeElement(Target2.class.getCanonicalName());
List<ExecutableElement> sourceMethods = ElementFilter.methodsIn(source2.getEnclosedElements());
ImmutableMap<ExecutableElement, AnnotatedTypeMirror> types =
TypeVariables.rewriteReturnTypes(typeUtils, sourceMethods, source2, target2);
List<ExecutableElement> targetMethods = ElementFilter.methodsIn(target2.getEnclosedElements());
TypeMirror setFooParameter = targetMethods.get(0).getParameters().get(0).asType();
ExecutableElement getFoo = sourceMethods.get(0);
TypeMirror originalGetFooReturn = getFoo.getReturnType();
TypeMirror rewrittenGetFooReturn = types.get(getFoo).getType();
assertThat(typeUtils.isAssignable(setFooParameter, originalGetFooReturn)).isFalse();
assertThat(typeUtils.isAssignable(setFooParameter, rewrittenGetFooReturn)).isTrue();
}
abstract static | Target2 |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/error/ShouldContainOnlyNulls_create_Test.java | {
"start": 1243,
"end": 2419
} | class ____ {
@Test
void should_create_error_message_with_unexpected_element() {
// GIVEN
ErrorMessageFactory factory = shouldContainOnlyNulls(list("person", null), list("person"));
// WHEN
String message = factory.create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo(format("[Test] %n"
+ "Expecting actual:%n"
+ " [\"person\", null]%n"
+ "to contain only null elements but some elements were not:%n"
+ " [\"person\"]"));
}
@Test
void should_create_error_message_with_no_any_element() {
// GIVEN
ErrorMessageFactory factory = shouldContainOnlyNulls(list());
// WHEN
String message = factory.create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo(format("[Test] %n"
+ "Expecting actual:%n"
+ " []%n"
+ "to contain only null elements but it was empty"));
}
}
| ShouldContainOnlyNulls_create_Test |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/SubstituteSurrogateExpressions.java | {
"start": 643,
"end": 1295
} | class ____ extends OptimizerRules.OptimizerExpressionRule<Expression> {
public SubstituteSurrogateExpressions() {
super(OptimizerRules.TransformDirection.UP);
}
@Override
protected Expression rule(Expression e, LogicalOptimizerContext ctx) {
return rule(e);
}
/**
* Perform the actual substitution.
*/
public static Expression rule(Expression e) {
if (e instanceof SurrogateExpression s) {
Expression surrogate = s.surrogate();
if (surrogate != null) {
return surrogate;
}
}
return e;
}
}
| SubstituteSurrogateExpressions |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java | {
"start": 66952,
"end": 76937
} | class ____ extends CoordinatorResponseHandler<OffsetCommitResponse, Void> {
private final Map<TopicPartition, OffsetAndMetadata> offsets;
private OffsetCommitResponseHandler(Map<TopicPartition, OffsetAndMetadata> offsets, Generation generation) {
super(generation);
this.offsets = offsets;
}
@Override
public void handle(OffsetCommitResponse commitResponse, RequestFuture<Void> future) {
coordinatorMetrics.commitSensor.record(response.requestLatencyMs());
Set<String> unauthorizedTopics = new HashSet<>();
for (OffsetCommitResponseData.OffsetCommitResponseTopic topic : commitResponse.data().topics()) {
for (OffsetCommitResponseData.OffsetCommitResponsePartition partition : topic.partitions()) {
TopicPartition tp = new TopicPartition(topic.name(), partition.partitionIndex());
OffsetAndMetadata offsetAndMetadata = this.offsets.get(tp);
long offset = offsetAndMetadata.offset();
Errors error = Errors.forCode(partition.errorCode());
if (error == Errors.NONE) {
log.debug("Committed offset {} for partition {}", offset, tp);
} else {
if (error.exception() instanceof RetriableException) {
log.warn("Offset commit failed on partition {} at offset {}: {}", tp, offset, error.message());
} else {
log.error("Offset commit failed on partition {} at offset {}: {}", tp, offset, error.message());
}
if (error == Errors.GROUP_AUTHORIZATION_FAILED) {
future.raise(GroupAuthorizationException.forGroupId(rebalanceConfig.groupId));
return;
} else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) {
unauthorizedTopics.add(tp.topic());
} else if (error == Errors.OFFSET_METADATA_TOO_LARGE
|| error == Errors.INVALID_COMMIT_OFFSET_SIZE) {
// raise the error to the user
future.raise(error);
return;
} else if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS
|| error == Errors.UNKNOWN_TOPIC_OR_PARTITION) {
// just retry
future.raise(error);
return;
} else if (error == Errors.COORDINATOR_NOT_AVAILABLE
|| error == Errors.NOT_COORDINATOR
|| error == Errors.REQUEST_TIMED_OUT) {
markCoordinatorUnknown(error);
future.raise(error);
return;
} else if (error == Errors.FENCED_INSTANCE_ID) {
log.info("OffsetCommit failed with {} due to group instance id {} fenced", sentGeneration, rebalanceConfig.groupInstanceId);
// if the generation has changed or we are not in rebalancing, do not raise the fatal error but rebalance-in-progress
if (generationUnchanged()) {
future.raise(error);
} else {
KafkaException exception;
synchronized (ConsumerCoordinator.this) {
if (ConsumerCoordinator.this.state == MemberState.PREPARING_REBALANCE) {
exception = new RebalanceInProgressException("Offset commit cannot be completed since the " +
"consumer member's old generation is fenced by its group instance id, it is possible that " +
"this consumer has already participated another rebalance and got a new generation");
} else {
exception = new CommitFailedException();
}
}
future.raise(exception);
}
return;
} else if (error == Errors.REBALANCE_IN_PROGRESS) {
/* Consumer should not try to commit offset in between join-group and sync-group,
* and hence on broker-side it is not expected to see a commit offset request
* during CompletingRebalance phase; if it ever happens then broker would return
* this error to indicate that we are still in the middle of a rebalance.
* In this case we would throw a RebalanceInProgressException,
* request re-join but do not reset generations. If the callers decide to retry they
* can go ahead and call poll to finish up the rebalance first, and then try commit again.
*/
requestRejoin("offset commit failed since group is already rebalancing");
future.raise(new RebalanceInProgressException("Offset commit cannot be completed since the " +
"consumer group is executing a rebalance at the moment. You can try completing the rebalance " +
"by calling poll() and then retry commit again"));
return;
} else if (error == Errors.UNKNOWN_MEMBER_ID
|| error == Errors.ILLEGAL_GENERATION) {
log.info("OffsetCommit failed with {}: {}", sentGeneration, error.message());
// only need to reset generation and re-join group if generation has not changed or we are not in rebalancing;
// otherwise only raise rebalance-in-progress error
KafkaException exception;
synchronized (ConsumerCoordinator.this) {
if (!generationUnchanged() && ConsumerCoordinator.this.state == MemberState.PREPARING_REBALANCE) {
exception = new RebalanceInProgressException("Offset commit cannot be completed since the " +
"consumer member's generation is already stale, meaning it has already participated another rebalance and " +
"got a new generation. You can try completing the rebalance by calling poll() and then retry commit again");
} else {
// don't reset generation member ID when ILLEGAL_GENERATION, since the member might be still valid
resetStateOnResponseError(ApiKeys.OFFSET_COMMIT, error, error != Errors.ILLEGAL_GENERATION);
exception = new CommitFailedException();
}
}
future.raise(exception);
return;
} else {
future.raise(new KafkaException("Unexpected error in commit: " + error.message()));
return;
}
}
}
}
if (!unauthorizedTopics.isEmpty()) {
log.error("Not authorized to commit to topics {}", unauthorizedTopics);
future.raise(new TopicAuthorizationException(unauthorizedTopics));
} else {
future.complete(null);
}
}
}
/**
* Fetch the committed offsets for a set of partitions. This is a non-blocking call. The
* returned future can be polled to get the actual offsets returned from the broker.
*
* @param partitions The set of partitions to get offsets for.
* @return A request future containing the committed offsets.
*/
private RequestFuture<Map<TopicPartition, OffsetAndMetadata>> sendOffsetFetchRequest(Set<TopicPartition> partitions) {
Node coordinator = checkAndGetCoordinator();
if (coordinator == null)
return RequestFuture.coordinatorNotAvailable();
log.debug("Fetching committed offsets for partitions: {}", partitions);
// construct the request
List<OffsetFetchRequestData.OffsetFetchRequestTopics> topics = partitions.stream()
.collect(Collectors.groupingBy(TopicPartition::topic))
.entrySet()
.stream()
.map(entry -> new OffsetFetchRequestData.OffsetFetchRequestTopics()
.setName(entry.getKey())
.setPartitionIndexes(entry.getValue().stream()
.map(TopicPartition::partition)
.collect(Collectors.toList())))
.collect(Collectors.toList());
OffsetFetchRequest.Builder requestBuilder = OffsetFetchRequest.Builder.forTopicNames(
new OffsetFetchRequestData()
.setRequireStable(true)
.setGroups(List.of(
new OffsetFetchRequestData.OffsetFetchRequestGroup()
.setGroupId(this.rebalanceConfig.groupId)
.setTopics(topics))),
throwOnFetchStableOffsetsUnsupported);
// send the request with a callback
return client.send(coordinator, requestBuilder)
.compose(new OffsetFetchResponseHandler());
}
private | OffsetCommitResponseHandler |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/support/SecurityQueryTemplateEvaluator.java | {
"start": 3865,
"end": 3959
} | interface ____ {
String evaluate(BytesReference query);
}
}
| DlsQueryEvaluationContext |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/annotation/REntity.java | {
"start": 2313,
"end": 2561
} | class ____ extends BaseCodec {
@Override
public Decoder<Object> getValueDecoder() {
return null;
}
@Override
public Encoder getValueEncoder() {
return null;
}
}
}
| DEFAULT |
java | apache__camel | components/camel-kafka/src/test/java/org/apache/camel/component/kafka/MockProducerInterceptor.java | {
"start": 1081,
"end": 1726
} | class ____ implements ProducerInterceptor<String, String> {
public static final ArrayList<ProducerRecord<String, String>> recordsCaptured = new ArrayList<>();
@Override
public ProducerRecord<String, String> onSend(ProducerRecord<String, String> producerRecord) {
recordsCaptured.add(producerRecord);
return producerRecord;
}
@Override
public void onAcknowledgement(RecordMetadata recordMetadata, Exception e) {
// noop
}
@Override
public void close() {
// noop
}
@Override
public void configure(Map<String, ?> map) {
// noop
}
}
| MockProducerInterceptor |
java | apache__flink | flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/OneInputStreamTaskTest.java | {
"start": 53496,
"end": 53771
} | class ____<IN> implements KeySelector<IN, IN> {
private static final long serialVersionUID = -3555913664416688425L;
@Override
public IN getKey(IN value) throws Exception {
return value;
}
}
private static | IdentityKeySelector |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateUpdateTask.java | {
"start": 1071,
"end": 3016
} | class ____ extends ReservedStateUpdateTask<ReservedClusterStateHandler<?>> {
public ReservedClusterStateUpdateTask(
String namespace,
ReservedStateChunk stateChunk,
ReservedStateVersionCheck versionCheck,
Map<String, ReservedClusterStateHandler<?>> handlers,
SequencedCollection<String> updateSequence,
Consumer<ErrorState> errorReporter,
ActionListener<ActionResponse.Empty> listener
) {
super(namespace, stateChunk, versionCheck, handlers, updateSequence, errorReporter, listener);
}
@Override
protected Optional<ProjectId> projectId() {
return Optional.empty();
}
@Override
protected TransformState transform(ReservedClusterStateHandler<?> handler, Object state, TransformState transformState)
throws Exception {
return ReservedClusterStateService.transform(handler, state, transformState);
}
@Override
protected ClusterState remove(ReservedClusterStateHandler<?> handler, TransformState prevState) throws Exception {
return ReservedClusterStateService.remove(handler, prevState);
}
@Override
ClusterState execute(ClusterState currentState) {
if (currentState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) {
// If cluster state has become blocked, this task was submitted while the node was master but is now not master.
// The new master will re-read file settings, so whatever update was to be written here will be handled
// by the new master.
return currentState;
}
var result = execute(currentState, currentState.getMetadata().reservedStateMetadata());
if (result == null) {
return currentState;
}
return ClusterState.builder(result.v1()).metadata(Metadata.builder(result.v1().metadata()).put(result.v2())).build();
}
}
| ReservedClusterStateUpdateTask |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/generated/sqldefault/DefaultTest.java | {
"start": 1953,
"end": 2290
} | class ____ {
@Id
private BigDecimal unitPrice;
@Id @ColumnDefault(value = "1")
private int quantity;
@Generated
@ColumnDefault(value = "'new'")
private String status;
public OrderLine() {}
public OrderLine(BigDecimal unitPrice, int quantity) {
this.unitPrice = unitPrice;
this.quantity = quantity;
}
}
}
| OrderLine |
java | apache__camel | components/camel-braintree/src/generated/java/org/apache/camel/component/braintree/DocumentUploadGatewayEndpointConfigurationConfigurer.java | {
"start": 750,
"end": 7500
} | class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, ExtendedPropertyConfigurerGetter {
private static final Map<String, Object> ALL_OPTIONS;
static {
Map<String, Object> map = new CaseInsensitiveMap();
map.put("AccessToken", java.lang.String.class);
map.put("ApiName", org.apache.camel.component.braintree.internal.BraintreeApiName.class);
map.put("Environment", java.lang.String.class);
map.put("HttpLogLevel", java.lang.String.class);
map.put("HttpLogName", java.lang.String.class);
map.put("HttpReadTimeout", java.lang.Integer.class);
map.put("LogHandlerEnabled", boolean.class);
map.put("MerchantId", java.lang.String.class);
map.put("MethodName", java.lang.String.class);
map.put("PrivateKey", java.lang.String.class);
map.put("ProxyHost", java.lang.String.class);
map.put("ProxyPort", java.lang.Integer.class);
map.put("PublicKey", java.lang.String.class);
map.put("Request", com.braintreegateway.DocumentUploadRequest.class);
ALL_OPTIONS = map;
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
org.apache.camel.component.braintree.DocumentUploadGatewayEndpointConfiguration target = (org.apache.camel.component.braintree.DocumentUploadGatewayEndpointConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesstoken":
case "accessToken": target.setAccessToken(property(camelContext, java.lang.String.class, value)); return true;
case "apiname":
case "apiName": target.setApiName(property(camelContext, org.apache.camel.component.braintree.internal.BraintreeApiName.class, value)); return true;
case "environment": target.setEnvironment(property(camelContext, java.lang.String.class, value)); return true;
case "httploglevel":
case "httpLogLevel": target.setHttpLogLevel(property(camelContext, java.lang.String.class, value)); return true;
case "httplogname":
case "httpLogName": target.setHttpLogName(property(camelContext, java.lang.String.class, value)); return true;
case "httpreadtimeout":
case "httpReadTimeout": target.setHttpReadTimeout(property(camelContext, java.lang.Integer.class, value)); return true;
case "loghandlerenabled":
case "logHandlerEnabled": target.setLogHandlerEnabled(property(camelContext, boolean.class, value)); return true;
case "merchantid":
case "merchantId": target.setMerchantId(property(camelContext, java.lang.String.class, value)); return true;
case "methodname":
case "methodName": target.setMethodName(property(camelContext, java.lang.String.class, value)); return true;
case "privatekey":
case "privateKey": target.setPrivateKey(property(camelContext, java.lang.String.class, value)); return true;
case "proxyhost":
case "proxyHost": target.setProxyHost(property(camelContext, java.lang.String.class, value)); return true;
case "proxyport":
case "proxyPort": target.setProxyPort(property(camelContext, java.lang.Integer.class, value)); return true;
case "publickey":
case "publicKey": target.setPublicKey(property(camelContext, java.lang.String.class, value)); return true;
case "request": target.setRequest(property(camelContext, com.braintreegateway.DocumentUploadRequest.class, value)); return true;
default: return false;
}
}
@Override
public Map<String, Object> getAllOptions(Object target) {
return ALL_OPTIONS;
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesstoken":
case "accessToken": return java.lang.String.class;
case "apiname":
case "apiName": return org.apache.camel.component.braintree.internal.BraintreeApiName.class;
case "environment": return java.lang.String.class;
case "httploglevel":
case "httpLogLevel": return java.lang.String.class;
case "httplogname":
case "httpLogName": return java.lang.String.class;
case "httpreadtimeout":
case "httpReadTimeout": return java.lang.Integer.class;
case "loghandlerenabled":
case "logHandlerEnabled": return boolean.class;
case "merchantid":
case "merchantId": return java.lang.String.class;
case "methodname":
case "methodName": return java.lang.String.class;
case "privatekey":
case "privateKey": return java.lang.String.class;
case "proxyhost":
case "proxyHost": return java.lang.String.class;
case "proxyport":
case "proxyPort": return java.lang.Integer.class;
case "publickey":
case "publicKey": return java.lang.String.class;
case "request": return com.braintreegateway.DocumentUploadRequest.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
org.apache.camel.component.braintree.DocumentUploadGatewayEndpointConfiguration target = (org.apache.camel.component.braintree.DocumentUploadGatewayEndpointConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesstoken":
case "accessToken": return target.getAccessToken();
case "apiname":
case "apiName": return target.getApiName();
case "environment": return target.getEnvironment();
case "httploglevel":
case "httpLogLevel": return target.getHttpLogLevel();
case "httplogname":
case "httpLogName": return target.getHttpLogName();
case "httpreadtimeout":
case "httpReadTimeout": return target.getHttpReadTimeout();
case "loghandlerenabled":
case "logHandlerEnabled": return target.isLogHandlerEnabled();
case "merchantid":
case "merchantId": return target.getMerchantId();
case "methodname":
case "methodName": return target.getMethodName();
case "privatekey":
case "privateKey": return target.getPrivateKey();
case "proxyhost":
case "proxyHost": return target.getProxyHost();
case "proxyport":
case "proxyPort": return target.getProxyPort();
case "publickey":
case "publicKey": return target.getPublicKey();
case "request": return target.getRequest();
default: return null;
}
}
}
| DocumentUploadGatewayEndpointConfigurationConfigurer |
java | apache__dubbo | dubbo-plugin/dubbo-native/src/test/java/org/apache/dubbo/aot/generate/ResourcePatternDescriberTest.java | {
"start": 985,
"end": 1432
} | class ____ {
@Test
public void testToRegex() {
ResourcePatternDescriber describer = new ResourcePatternDescriber(
"META-INF/dubbo/internal/org.apache.dubbo.common.extension.ExtensionInjector", null);
Assertions.assertEquals(
"\\QMETA-INF/dubbo/internal/org.apache.dubbo.common.extension.ExtensionInjector\\E",
describer.toRegex().toString());
}
}
| ResourcePatternDescriberTest |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/selection/resulttype/ErroneousFruitMapper.java | {
"start": 389,
"end": 589
} | interface ____ {
ErroneousFruitMapper INSTANCE = Mappers.getMapper( ErroneousFruitMapper.class );
@Mapping(target = "type", ignore = true)
Fruit map(FruitDto source);
}
| ErroneousFruitMapper |
java | quarkusio__quarkus | extensions/redis-client/runtime/src/main/java/io/quarkus/redis/datasource/codecs/Codec.java | {
"start": 454,
"end": 1302
} | interface ____ {
/**
* Checks if the current codec can handle the serialization and deserialization of object from the given type.
*
* @param clazz the type, cannot be {@code null}
* @return {@code true} if the codec can handle the type, {@code false} otherwise
*/
boolean canHandle(Type clazz);
/**
* Encodes the given object.
* The type of the given object matches the type used to call the {@link #canHandle(Type)} method.
*
* @param item the item
* @return the encoded content
*/
byte[] encode(Object item);
/**
* Decodes the given bytes to an object.
* The codec must return an instance of the type used to call the {@link #canHandle(Type)} method.
*
* @param item the bytes
* @return the object
*/
Object decode(byte[] item);
}
| Codec |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/dev/devservices/ImageName.java | {
"start": 3243,
"end": 3363
} | class ____ extends Version {
public Any() {
super("latest");
}
}
}
}
| Any |
java | quarkusio__quarkus | extensions/vertx-http/runtime/src/main/java/io/quarkus/vertx/http/runtime/filters/accesslog/DefaultAccessLogReceiver.java | {
"start": 1784,
"end": 12059
} | class ____ implements AccessLogReceiver, Runnable, Closeable {
private static final Logger log = Logger.getLogger(DefaultAccessLogReceiver.class);
private static final String DEFAULT_LOG_SUFFIX = "log";
private static final String DOT = ".";
private final Executor logWriteExecutor;
private final Deque<String> pendingMessages;
//0 = not running
//1 = queued
//2 = running
@SuppressWarnings("unused")
private volatile int state = 0;
private static final AtomicIntegerFieldUpdater<DefaultAccessLogReceiver> stateUpdater = AtomicIntegerFieldUpdater
.newUpdater(DefaultAccessLogReceiver.class, "state");
private long changeOverPoint;
private String currentDateString;
private boolean forceLogRotation;
private final Path outputDirectory;
private final Path defaultLogFile;
private final String logBaseName;
private final String logNameSuffix; // always starts with a '.' character
private Writer writer = null;
private volatile boolean closed = false;
private boolean initialRun = true;
private final boolean rotate;
private final LogFileHeaderGenerator fileHeaderGenerator;
public DefaultAccessLogReceiver(final Executor logWriteExecutor, final File outputDirectory, final String logBaseName) {
this(logWriteExecutor, outputDirectory.toPath(), logBaseName, null);
}
public DefaultAccessLogReceiver(final Executor logWriteExecutor, final File outputDirectory, final String logBaseName,
final String logNameSuffix) {
this(logWriteExecutor, outputDirectory.toPath(), logBaseName, logNameSuffix, true);
}
public DefaultAccessLogReceiver(final Executor logWriteExecutor, final File outputDirectory, final String logBaseName,
final String logNameSuffix, boolean rotate) {
this(logWriteExecutor, outputDirectory.toPath(), logBaseName, logNameSuffix, rotate);
}
public DefaultAccessLogReceiver(final Executor logWriteExecutor, final Path outputDirectory, final String logBaseName) {
this(logWriteExecutor, outputDirectory, logBaseName, null);
}
public DefaultAccessLogReceiver(final Executor logWriteExecutor, final Path outputDirectory, final String logBaseName,
final String logNameSuffix) {
this(logWriteExecutor, outputDirectory, logBaseName, logNameSuffix, true);
}
public DefaultAccessLogReceiver(final Executor logWriteExecutor, final Path outputDirectory, final String logBaseName,
final String logNameSuffix, boolean rotate) {
this(logWriteExecutor, outputDirectory, logBaseName, logNameSuffix, rotate, null);
}
private DefaultAccessLogReceiver(final Executor logWriteExecutor, final Path outputDirectory, final String logBaseName,
final String logNameSuffix, boolean rotate, LogFileHeaderGenerator fileHeader) {
this.logWriteExecutor = logWriteExecutor;
this.outputDirectory = outputDirectory;
this.logBaseName = effectiveLogBaseName(logBaseName);
this.rotate = rotate;
this.fileHeaderGenerator = fileHeader;
this.logNameSuffix = effectiveLogNameSuffix(logNameSuffix);
this.pendingMessages = new ConcurrentLinkedDeque<>();
this.defaultLogFile = outputDirectory.resolve(this.logBaseName + this.logNameSuffix);
calculateChangeOverPoint();
}
private String effectiveLogBaseName(String logBaseName) {
if (logBaseName == null) {
return "";
}
if (!logBaseName.endsWith(DOT)) {
return logBaseName;
}
return logBaseName.substring(0, logBaseName.length() - 1);
}
private static String effectiveLogNameSuffix(String logNameSuffix) {
var result = (logNameSuffix != null) ? logNameSuffix : DEFAULT_LOG_SUFFIX;
if (result.charAt(0) != '.') {
return '.' + result;
}
return result;
}
private void calculateChangeOverPoint() {
Calendar calendar = Calendar.getInstance();
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.HOUR_OF_DAY, 0);
calendar.add(Calendar.DATE, 1);
SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd", Locale.US);
currentDateString = df.format(new Date());
// if there is an existing default log file, use the date last modified instead of the current date
if (Files.exists(defaultLogFile)) {
try {
currentDateString = df.format(new Date(Files.getLastModifiedTime(defaultLogFile).toMillis()));
} catch (IOException e) {
// ignore. use the current date if exception happens.
}
}
changeOverPoint = calendar.getTimeInMillis();
}
@Override
public void logMessage(final String message) {
this.pendingMessages.add(message);
int state = stateUpdater.get(this);
if (state == 0) {
if (stateUpdater.compareAndSet(this, 0, 1)) {
logWriteExecutor.execute(this);
}
}
}
/**
* processes all queued log messages
*/
@Override
public void run() {
if (!stateUpdater.compareAndSet(this, 1, 2)) {
return;
}
if (forceLogRotation) {
doRotate();
} else if (initialRun && Files.exists(defaultLogFile)) {
//if there is an existing log file check if it should be rotated
long lm = 0;
try {
lm = Files.getLastModifiedTime(defaultLogFile).toMillis();
} catch (IOException e) {
log.error("Error rotating access log", e);
}
Calendar c = Calendar.getInstance();
c.setTimeInMillis(changeOverPoint);
c.add(Calendar.DATE, -1);
if (lm <= c.getTimeInMillis()) {
doRotate();
}
}
initialRun = false;
List<String> messages = new ArrayList<>();
String msg;
//only grab at most 1000 messages at a time
for (int i = 0; i < 1000; ++i) {
msg = pendingMessages.poll();
if (msg == null) {
break;
}
messages.add(msg);
}
try {
if (!messages.isEmpty()) {
writeMessage(messages);
}
} finally {
stateUpdater.set(this, 0);
//check to see if there is still more messages
//if so then run this again
if (!pendingMessages.isEmpty() || forceLogRotation) {
if (stateUpdater.compareAndSet(this, 0, 1)) {
logWriteExecutor.execute(this);
}
} else if (closed) {
try {
if (writer != null) {
writer.flush();
writer.close();
writer = null;
}
} catch (IOException e) {
log.error("Error writing access log", e);
}
}
}
}
/**
* For tests only. Blocks the current thread until all messages are written
* Just does a busy wait.
* <p/>
* DO NOT USE THIS OUTSIDE OF A TEST
*/
void awaitWrittenForTest() throws InterruptedException {
while (!pendingMessages.isEmpty() || forceLogRotation) {
Thread.sleep(10);
}
while (state != 0) {
Thread.sleep(10);
}
}
private void writeMessage(final List<String> messages) {
if (System.currentTimeMillis() > changeOverPoint) {
doRotate();
}
try {
if (writer == null) {
boolean created = !Files.exists(defaultLogFile);
writer = Files.newBufferedWriter(defaultLogFile, StandardCharsets.UTF_8, StandardOpenOption.APPEND,
StandardOpenOption.CREATE);
if (Files.size(defaultLogFile) == 0 && fileHeaderGenerator != null) {
String header = fileHeaderGenerator.generateHeader();
if (header != null) {
writer.write(header);
writer.write("\n");
writer.flush();
}
}
}
for (String message : messages) {
writer.write(message);
writer.write('\n');
}
writer.flush();
} catch (IOException e) {
log.error("Error writing access log", e);
}
}
private void doRotate() {
forceLogRotation = false;
if (!rotate) {
return;
}
try {
if (writer != null) {
writer.flush();
writer.close();
writer = null;
}
if (!Files.exists(defaultLogFile)) {
return;
}
Path newFile = outputDirectory.resolve(logBaseName + DOT + currentDateString + logNameSuffix);
int count = 0;
while (Files.exists(newFile)) {
++count;
newFile = outputDirectory.resolve(logBaseName + DOT + currentDateString + "-" + count + logNameSuffix);
}
Files.move(defaultLogFile, newFile);
} catch (IOException e) {
log.error("Error rotating access log", e);
} finally {
calculateChangeOverPoint();
}
}
/**
* forces a log rotation. This rotation is performed in an async manner, you cannot rely on the rotation
* being performed immediately after this method returns.
*/
public void rotate() {
forceLogRotation = true;
if (stateUpdater.compareAndSet(this, 0, 1)) {
logWriteExecutor.execute(this);
}
}
@Override
public void close() throws IOException {
closed = true;
if (stateUpdater.compareAndSet(this, 0, 1)) {
logWriteExecutor.execute(this);
}
}
public static Builder builder() {
return new Builder();
}
public static | DefaultAccessLogReceiver |
java | netty__netty | example/src/main/java/io/netty/example/redis/RedisClient.java | {
"start": 1531,
"end": 4202
} | class ____ {
private static final String HOST = System.getProperty("host", "127.0.0.1");
private static final int PORT = Integer.parseInt(System.getProperty("port", "6379"));
public static void main(String[] args) throws Exception {
EventLoopGroup group = new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory());
try {
Bootstrap b = new Bootstrap();
b.group(group)
.channel(NioSocketChannel.class)
.handler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline p = ch.pipeline();
p.addLast(new RedisDecoder());
p.addLast(new RedisBulkStringAggregator());
p.addLast(new RedisArrayAggregator());
p.addLast(new RedisEncoder());
p.addLast(new RedisClientHandler());
}
});
// Start the connection attempt.
Channel ch = b.connect(HOST, PORT).sync().channel();
// Read commands from the stdin.
System.out.println("Enter Redis commands (quit to end)");
ChannelFuture lastWriteFuture = null;
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
for (;;) {
final String input = in.readLine();
final String line = input != null ? input.trim() : null;
if (line == null || "quit".equalsIgnoreCase(line)) { // EOF or "quit"
ch.close().sync();
break;
} else if (line.isEmpty()) { // skip `enter` or `enter` with spaces.
continue;
}
// Sends the received line to the server.
lastWriteFuture = ch.writeAndFlush(line);
lastWriteFuture.addListener(new GenericFutureListener<ChannelFuture>() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
System.err.print("write failed: ");
future.cause().printStackTrace(System.err);
}
}
});
}
// Wait until all messages are flushed before closing the channel.
if (lastWriteFuture != null) {
lastWriteFuture.sync();
}
} finally {
group.shutdownGracefully();
}
}
}
| RedisClient |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/RedisAuthenticationHandler.java | {
"start": 13675,
"end": 14825
} | class ____<K, V> extends RedisAuthenticationHandler<K, V> {
public DisabledAuthenticationHandler(StatefulRedisConnectionImpl<K, V> connection,
RedisCredentialsProvider credentialsProvider, Boolean isPubSubConnection) {
super(null, null, null);
}
public DisabledAuthenticationHandler() {
super(null, null, null);
}
@Override
protected void postProcess(RedisCommand<K, V, ?> toSend) {
// No-op
}
@Override
protected void postProcess(Collection<? extends RedisCommand<K, V, ?>> dispatched) {
// No-op
}
@Override
public void startTransaction() {
// No-op
}
@Override
public void endTransaction() {
// No-op
}
@Override
public void setCredentials(RedisCredentials credentials) {
// No-op
}
@Override
public void unsubscribe() {
// No-op
}
@Override
public void subscribe() {
// No-op
}
}
}
| DisabledAuthenticationHandler |
java | alibaba__nacos | config/src/test/java/com/alibaba/nacos/config/server/utils/SimpleIpFlowDataTest.java | {
"start": 763,
"end": 1846
} | class ____ {
@Test
void testIncrementAndGet() {
SimpleIpFlowData simpleIpFlowData = new SimpleIpFlowData(5, 10000);
assertEquals(1, simpleIpFlowData.incrementAndGet("127.0.0.1"));
assertEquals(2, simpleIpFlowData.incrementAndGet("127.0.0.1"));
assertEquals(3, simpleIpFlowData.incrementAndGet("127.0.0.1"));
assertEquals(1, simpleIpFlowData.incrementAndGet("127.0.0.2"));
assertEquals(2, simpleIpFlowData.incrementAndGet("127.0.0.2"));
}
@Test
void testGetCurrentCount() {
SimpleIpFlowData simpleIpFlowData = new SimpleIpFlowData(3, 10000);
simpleIpFlowData.incrementAndGet("127.0.0.1");
simpleIpFlowData.incrementAndGet("127.0.0.1");
simpleIpFlowData.incrementAndGet("127.0.0.1");
assertEquals(3, simpleIpFlowData.getCurrentCount("127.0.0.1"));
simpleIpFlowData.rotateSlot();
assertEquals(0, simpleIpFlowData.getCurrentCount("127.0.0.1"));
assertEquals(1, simpleIpFlowData.getAverageCount());
}
}
| SimpleIpFlowDataTest |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/deser/ConstructorErrorTest.java | {
"start": 189,
"end": 512
} | class ____ extends TestCase {
public void test_error() throws Exception {
Exception error = null;
try {
JSON.parseObject("{}", Model.class);
} catch (JSONException ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public static | ConstructorErrorTest |
java | apache__camel | components/camel-as2/camel-as2-component/src/test/java/org/apache/camel/component/as2/AS2ClientManagerIT.java | {
"start": 4903,
"end": 4990
} | class ____ {@link org.apache.camel.component.as2.api.AS2ClientManager} APIs.
*/
public | for |
java | apache__camel | components/camel-ai/camel-langchain4j-agent-api/src/main/java/org/apache/camel/component/langchain4j/agent/api/AiAgentWithoutMemoryService.java | {
"start": 1011,
"end": 1061
} | interface ____ LangChain4j integration.
*/
public | for |
java | spring-projects__spring-security | test/src/test/java/org/springframework/security/test/web/servlet/request/SecurityMockMvcRequestPostProcessorsOAuth2ClientTests.java | {
"start": 8073,
"end": 8912
} | class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.authorizeHttpRequests((authz) -> authz
.anyRequest().permitAll()
)
.oauth2Client(withDefaults());
return http.build();
// @formatter:on
}
@Bean
OAuth2AuthorizedClientManager authorizedClientManager(ClientRegistrationRepository clients,
OAuth2AuthorizedClientRepository authorizedClients) {
return new DefaultOAuth2AuthorizedClientManager(clients, authorizedClients);
}
@Bean
ClientRegistrationRepository clientRegistrationRepository() {
return mock(ClientRegistrationRepository.class);
}
@Bean
OAuth2AuthorizedClientRepository authorizedClientRepository() {
return mock(OAuth2AuthorizedClientRepository.class);
}
@RestController
static | OAuth2ClientConfig |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/LongTopNSetTests.java | {
"start": 428,
"end": 1321
} | class ____ extends TopNSetTestCase<LongTopNSet, Long> {
@Override
protected LongTopNSet build(BigArrays bigArrays, SortOrder sortOrder, int limit) {
return new LongTopNSet(bigArrays, sortOrder, limit);
}
@Override
protected Long randomValue() {
return randomLong();
}
@Override
protected List<Long> threeSortedValues() {
return List.of(Long.MIN_VALUE, randomLong(), Long.MAX_VALUE);
}
@Override
protected void collect(LongTopNSet sort, Long value) {
sort.collect(value);
}
@Override
protected void reduceLimitByOne(LongTopNSet sort) {
sort.reduceLimitByOne();
}
@Override
protected Long getWorstValue(LongTopNSet sort) {
return sort.getWorstValue();
}
@Override
protected int getCount(LongTopNSet sort) {
return sort.getCount();
}
}
| LongTopNSetTests |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/dev/testing/JunitTestRunner.java | {
"start": 39797,
"end": 41222
} | class ____ an empty array, for some reason (plus it saves rediscovery effort)
String classPath = moduleInfo.getMain()
.getClassesPath() + File.pathSeparator + moduleInfo.getTest().get().getClassesPath();
classLoaderForLoadingTests = (ClassLoader) constructor.newInstance(Thread.currentThread()
.getContextClassLoader(), true, testApplication, profiles, quarkusTestClassesForFacadeClassLoader,
classPath);
// We only want to close classloaders if they're facade loaders we made, so squirrel away an instance to close on this path
classLoaderToClose = (Closeable) classLoaderForLoadingTests;
Thread.currentThread()
.setContextClassLoader(classLoaderForLoadingTests);
} catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException | InstantiationException
| InvocationTargetException e) {
// If the first deployment classloader cannot load a facade classloader, don't keep using it, let the next module provide one
firstDeploymentClassLoader = null;
// This is fine, and usually just means that test-framework/junit5 isn't one of the project dependencies
// In that case, fallback to loading classes as we normally would, using a TCCL
log.debug(
"Could not load | returns |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/text/StrLookup.java | {
"start": 1860,
"end": 2021
} | class ____<V> {
/**
* Lookup implementation that uses a Map.
*
* @param <V> the type of mapped values.
*/
private static final | StrLookup |
java | netty__netty | transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollChannelConfigTest.java | {
"start": 1159,
"end": 3557
} | class ____ {
@Test
public void testOptionGetThrowsChannelException() throws Exception {
Epoll.ensureAvailability();
EpollSocketChannel channel = new EpollSocketChannel();
channel.config().getSoLinger();
channel.fd().close();
try {
channel.config().getSoLinger();
fail();
} catch (ChannelException e) {
// expected
}
}
@Test
public void testOptionSetThrowsChannelException() throws Exception {
Epoll.ensureAvailability();
EpollSocketChannel channel = new EpollSocketChannel();
channel.config().setKeepAlive(true);
channel.fd().close();
try {
channel.config().setKeepAlive(true);
fail();
} catch (ChannelException e) {
// expected
}
}
@Test
public void testIntegerOption() throws Exception {
Epoll.ensureAvailability();
EpollSocketChannel channel = new EpollSocketChannel();
IntegerUnixChannelOption opt = new IntegerUnixChannelOption("INT_OPT", 1, 2);
Integer zero = 0;
assertEquals(zero, channel.config().getOption(opt));
channel.config().setOption(opt, 1);
assertNotEquals(zero, channel.config().getOption(opt));
channel.fd().close();
}
@Test
public void testRawOption() throws Exception {
Epoll.ensureAvailability();
EpollSocketChannel channel = new EpollSocketChannel();
// Value for SOL_SOCKET and SO_REUSEADDR
// See https://github.com/torvalds/linux/blob/v5.17/include/uapi/asm-generic/socket.h
RawUnixChannelOption opt = new RawUnixChannelOption("RAW_OPT", 1, 2, 4);
CleanableDirectBuffer disabledCleanable = Buffer.allocateDirectBufferWithNativeOrder(4);
ByteBuffer disabled = disabledCleanable.buffer();
disabled.putInt(0).flip();
assertEquals(disabled, channel.config().getOption(opt));
CleanableDirectBuffer enabledCleanable = Buffer.allocateDirectBufferWithNativeOrder(4);
ByteBuffer enabled = enabledCleanable.buffer();
enabled.putInt(1).flip();
channel.config().setOption(opt, enabled);
assertNotEquals(disabled, channel.config().getOption(opt));
channel.fd().close();
disabledCleanable.clean();
enabledCleanable.clean();
}
}
| EpollChannelConfigTest |
java | google__guava | android/guava/src/com/google/common/util/concurrent/Monitor.java | {
"start": 6535,
"end": 7453
} | class ____<V> {
* private V value;
* private final Monitor monitor = new Monitor();
* private final Monitor.Guard valuePresent = monitor.newGuard(() -> value != null);
* private final Monitor.Guard valueAbsent = monitor.newGuard(() -> value == null);
*
* public V get() throws InterruptedException {
* monitor.enterWhen(valuePresent);
* try {
* V result = value;
* value = null;
* return result;
* } finally {
* monitor.leave();
* }
* }
*
* public void set(V newValue) throws InterruptedException {
* monitor.enterWhen(valueAbsent);
* try {
* value = newValue;
* } finally {
* monitor.leave();
* }
* }
* }
* }
*
* @author Justin T. Sampson
* @author Martin Buchholz
* @since 10.0
*/
@J2ktIncompatible
@GwtIncompatible
@SuppressWarnings("GuardedBy") // TODO(b/35466881): Fix or suppress.
public final | SafeBox |
java | apache__rocketmq | client/src/main/java/org/apache/rocketmq/client/consumer/listener/ConsumeReturnType.java | {
"start": 865,
"end": 1198
} | enum ____ {
/**
* consume return success
*/
SUCCESS,
/**
* consume timeout ,even if success
*/
TIME_OUT,
/**
* consume throw exception
*/
EXCEPTION,
/**
* consume return null
*/
RETURNNULL,
/**
* consume return failed
*/
FAILED
}
| ConsumeReturnType |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingWithOneQuery.java | {
"start": 6181,
"end": 6622
} | class ____ extends LdapGroupsMapping {
private boolean secondaryQueryCalled = false;
public boolean isSecondaryQueryCalled() {
return secondaryQueryCalled;
}
Set<String> lookupGroup(SearchResult result, DirContext c,
int goUpHierarchy) throws NamingException {
secondaryQueryCalled = true;
return super.lookupGroup(result, c, goUpHierarchy);
}
}
}
| TestLdapGroupsMapping |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/client/HttpClientErrorException.java | {
"start": 11624,
"end": 12392
} | class ____ extends HttpClientErrorException {
private UnprocessableContent(String statusText, HttpHeaders headers, byte @Nullable [] body, @Nullable Charset charset) {
super(HttpStatus.UNPROCESSABLE_CONTENT, statusText, headers, body, charset);
}
private UnprocessableContent(String message, String statusText,
HttpHeaders headers, byte @Nullable [] body, @Nullable Charset charset) {
super(message, HttpStatus.UNPROCESSABLE_CONTENT, statusText, headers, body, charset);
}
}
/**
* {@link HttpClientErrorException} for status HTTP 422 Unprocessable Entity.
* @since 5.1
* @deprecated since 7.0 in favor of {@link UnprocessableContent}
*/
@Deprecated(since = "7.0")
@SuppressWarnings("serial")
public static final | UnprocessableContent |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java | {
"start": 2249,
"end": 9966
} | class ____ extends RMHATestBase {
private TestCapacitySchedulerAsyncScheduling.NMHeartbeatThread
nmHeartbeatThread = null;
@BeforeEach
@Override
public void setup() throws Exception {
super.setup();
confForRM1
.setClass(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,
DominantResourceCalculator.class, ResourceCalculator.class);
confForRM1.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
ResourceScheduler.class);
confForRM1.setBoolean(
CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true);
confForRM2
.setClass(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,
DominantResourceCalculator.class, ResourceCalculator.class);
confForRM2.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
ResourceScheduler.class);
confForRM2.setBoolean(
CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true);
}
private void keepNMHeartbeat(List<MockNM> mockNMs, int interval) {
if (nmHeartbeatThread != null) {
nmHeartbeatThread.setShouldStop();
nmHeartbeatThread = null;
}
nmHeartbeatThread =
new TestCapacitySchedulerAsyncScheduling.NMHeartbeatThread(mockNMs,
interval);
nmHeartbeatThread.start();
}
private void pauseNMHeartbeat() {
if (nmHeartbeatThread != null) {
nmHeartbeatThread.setShouldStop();
nmHeartbeatThread = null;
}
}
@Test
@Timeout(value = 60)
public void testAsyncScheduleThreadStateAfterRMHATransit() throws Exception {
// start two RMs, and transit rm1 to active, rm2 to standby
startRMs();
// register NM
MockNM nm = rm1.registerNode("192.1.1.1:1234", 8192, 8);
// submit app1 and check
RMApp app1 = submitAppAndCheckLaunched(rm1);
keepNMHeartbeat(Arrays.asList(nm), 1000);
// failover RM1 to RM2
explicitFailover();
checkAsyncSchedulerThreads(Thread.currentThread());
pauseNMHeartbeat();
// register NM, kill app1
nm = rm2.registerNode("192.1.1.1:1234", 8192, 8);
keepNMHeartbeat(Arrays.asList(nm), 1000);
rm2.waitForState(app1.getCurrentAppAttempt().getAppAttemptId(),
RMAppAttemptState.LAUNCHED);
rm2.killApp(app1.getApplicationId());
// submit app3 and check
RMApp app2 = submitAppAndCheckLaunched(rm2);
pauseNMHeartbeat();
// failover RM2 to RM1
HAServiceProtocol.StateChangeRequestInfo requestInfo =
new HAServiceProtocol.StateChangeRequestInfo(
HAServiceProtocol.RequestSource.REQUEST_BY_USER);
rm2.adminService.transitionToStandby(requestInfo);
rm1.adminService.transitionToActive(requestInfo);
assertTrue(rm2.getRMContext().getHAServiceState()
== HAServiceProtocol.HAServiceState.STANDBY);
assertTrue(rm1.getRMContext().getHAServiceState()
== HAServiceProtocol.HAServiceState.ACTIVE);
// check async schedule threads
checkAsyncSchedulerThreads(Thread.currentThread());
// register NM, kill app2
nm = rm1.registerNode("192.1.1.1:1234", 8192, 8);
keepNMHeartbeat(Arrays.asList(nm), 1000);
rm1.waitForState(app2.getCurrentAppAttempt().getAppAttemptId(),
RMAppAttemptState.LAUNCHED);
rm1.killApp(app2.getApplicationId());
// submit app3 and check
submitAppAndCheckLaunched(rm1);
pauseNMHeartbeat();
rm1.stop();
rm2.stop();
}
@Test
@Timeout(value = 30)
public void testAsyncScheduleThreadExit() throws Exception {
// start two RMs, and transit rm1 to active, rm2 to standby
startRMs();
// register NM
rm1.registerNode("192.1.1.1:1234", 8192, 8);
rm1.drainEvents();
// make sure async-scheduling thread is correct at beginning
checkAsyncSchedulerThreads(Thread.currentThread());
// test async-scheduling thread exit
try{
// set resource calculator to be null to simulate
// NPE in async-scheduling thread
CapacityScheduler cs =
(CapacityScheduler) rm1.getRMContext().getScheduler();
cs.setResourceCalculator(null);
// wait for rm1 to be transitioned to standby
GenericTestUtils.waitFor(() -> rm1.getRMContext().getHAServiceState()
== HAServiceProtocol.HAServiceState.STANDBY, 100, 5000);
// failover rm2 to rm1
HAServiceProtocol.StateChangeRequestInfo requestInfo =
new HAServiceProtocol.StateChangeRequestInfo(
HAServiceProtocol.RequestSource.REQUEST_BY_USER);
rm2.adminService.transitionToStandby(requestInfo);
GenericTestUtils.waitFor(() -> {
try {
// this call may fail when rm1 is still initializing
// in StandByTransitionRunnable thread
rm1.adminService.transitionToActive(requestInfo);
return true;
} catch (Exception e) {
return false;
}
}, 100, 3000);
// wait for rm1 to be transitioned to active again
GenericTestUtils.waitFor(() -> rm1.getRMContext().getHAServiceState()
== HAServiceProtocol.HAServiceState.ACTIVE, 100, 5000);
// make sure async-scheduling thread is correct after failover
checkAsyncSchedulerThreads(Thread.currentThread());
} finally {
rm1.stop();
rm2.stop();
}
}
private RMApp submitAppAndCheckLaunched(MockRM rm) throws Exception {
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(200, rm)
.withAppName("")
.withUser(UserGroupInformation.getCurrentUser().getShortUserName())
.withAcls(null)
.withUnmanagedAM(false)
.withQueue("default")
.withMaxAppAttempts(
configuration.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS))
.withCredentials(null)
.withAppType(null)
.withWaitForAppAcceptedState(false)
.withKeepContainers(false)
.build();
RMApp app = MockRMAppSubmitter.submit(rm, data);
rm.waitForState(app.getApplicationId(), RMAppState.ACCEPTED);
RMAppAttempt attempt = app.getCurrentAppAttempt();
rm.sendAMLaunched(attempt.getAppAttemptId());
rm.waitForState(app.getCurrentAppAttempt().getAppAttemptId(),
RMAppAttemptState.LAUNCHED);
return app;
}
/**
* Make sure the state of async-scheduler threads is correct
* @param currentThread
*/
private void checkAsyncSchedulerThreads(Thread currentThread){
// Make sure AsyncScheduleThread is interrupted
ThreadGroup threadGroup = currentThread.getThreadGroup();
while (threadGroup.getParent() != null) {
threadGroup = threadGroup.getParent();
}
Thread[] threads = new Thread[threadGroup.activeCount()];
threadGroup.enumerate(threads);
int numAsyncScheduleThread = 0;
int numResourceCommitterService = 0;
Thread asyncScheduleThread = null;
Thread resourceCommitterService = null;
for (Thread thread : threads) {
StackTraceElement[] stackTrace = thread.getStackTrace();
if (stackTrace.length > 0) {
for (StackTraceElement elem : stackTrace) {
String line = elem.toString();
if (line.contains("AsyncScheduleThread.work")) {
numAsyncScheduleThread++;
asyncScheduleThread = thread;
} else if (line.contains("ResourceCommitterService.work")) {
numResourceCommitterService++;
resourceCommitterService = thread;
}
}
}
}
assertEquals(1, numResourceCommitterService);
assertEquals(1, numAsyncScheduleThread);
assertNotNull(asyncScheduleThread);
assertNotNull(resourceCommitterService);
}
} | TestRMHAForAsyncScheduler |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NodeAttributesManagerImpl.java | {
"start": 22725,
"end": 26813
} | class ____
implements EventHandler<NodeAttributesStoreEvent> {
@Override
public void handle(NodeAttributesStoreEvent event) {
handleStoreEvent(event);
}
}
// Dispatcher related code
protected void handleStoreEvent(NodeAttributesStoreEvent event) {
List<NodeToAttributes> mappingList = new ArrayList<>();
Map<String, Map<NodeAttribute, AttributeValue>> nodeToAttr =
event.getNodeAttributeMappingList();
nodeToAttr.forEach((k, v) -> mappingList
.add(NodeToAttributes.newInstance(k, new ArrayList<>(v.keySet()))));
try {
switch (event.getOperation()) {
case REPLACE:
store.replaceNodeAttributes(mappingList);
break;
case ADD:
store.addNodeAttributes(mappingList);
break;
case REMOVE:
store.removeNodeAttributes(mappingList);
break;
default:
LOG.warn("Unsupported operation");
}
} catch (IOException e) {
LOG.error("Failed to store attribute modification to storage");
throw new YarnRuntimeException(e);
}
}
@Override
public void replaceNodeAttributes(String prefix,
Map<String, Set<NodeAttribute>> nodeAttributeMapping) throws IOException {
processMapping(nodeAttributeMapping,
AttributeMappingOperationType.REPLACE, prefix);
}
@Override
public void addNodeAttributes(
Map<String, Set<NodeAttribute>> nodeAttributeMapping) throws IOException {
processMapping(nodeAttributeMapping, AttributeMappingOperationType.ADD);
}
@Override
public void removeNodeAttributes(
Map<String, Set<NodeAttribute>> nodeAttributeMapping) throws IOException {
processMapping(nodeAttributeMapping, AttributeMappingOperationType.REMOVE);
}
private void processMapping(
Map<String, Set<NodeAttribute>> nodeAttributeMapping,
AttributeMappingOperationType mappingType) throws IOException {
processMapping(nodeAttributeMapping, mappingType,
NodeAttribute.PREFIX_CENTRALIZED);
}
private void processMapping(
Map<String, Set<NodeAttribute>> nodeAttributeMapping,
AttributeMappingOperationType mappingType, String attributePrefix)
throws IOException {
Map<NodeAttributeKey, RMNodeAttribute> newAttributesToBeAdded =
new HashMap<>();
Map<String, Map<NodeAttribute, AttributeValue>> validMapping =
validate(nodeAttributeMapping, newAttributesToBeAdded, false);
if (validMapping.size() > 0) {
internalUpdateAttributesOnNodes(validMapping, mappingType,
newAttributesToBeAdded, attributePrefix);
}
}
protected void stopDispatcher() {
AsyncDispatcher asyncDispatcher = (AsyncDispatcher) dispatcher;
if (null != asyncDispatcher) {
asyncDispatcher.stop();
}
}
@Override
protected void serviceStop() throws Exception {
// finalize store
stopDispatcher();
// only close store when we enabled store persistent
if (null != store) {
store.close();
}
}
public void setRMContext(RMContext context) {
this.rmContext = context;
}
/**
* Refresh node attributes on a given node during RM recovery.
* @param nodeId Node Id
*/
public void refreshNodeAttributesToScheduler(NodeId nodeId) {
String hostName = nodeId.getHost();
Map<String, Set<NodeAttribute>> newNodeToAttributesMap =
new HashMap<>();
Host host = nodeCollections.get(hostName);
if (host == null || host.attributes == null) {
return;
}
// Use read lock and create defensive copy since
// other threads might access host.attributes
readLock.lock();
try {
newNodeToAttributesMap.put(hostName, new HashSet<>(host.attributes.keySet()));
} finally {
readLock.unlock();
}
// Notify RM
if (rmContext != null && rmContext.getDispatcher() != null) {
LOG.info("Updated NodeAttribute event to RM:" + newNodeToAttributesMap);
rmContext.getDispatcher().getEventHandler().handle(
new NodeAttributesUpdateSchedulerEvent(newNodeToAttributesMap));
}
}
}
| ForwardingEventHandler |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/functions/aggfunctions/ListAggWsWithRetractAggFunctionTest.java | {
"start": 1499,
"end": 7219
} | class ____
extends AggFunctionTestBase<StringData, StringData, ListAggWsWithRetractAccumulator> {
@Override
protected List<List<StringData>> getInputValueSets() {
return Arrays.asList(
Arrays.asList(
StringData.fromString("a"),
StringData.fromString("\n"),
StringData.fromString("b"),
StringData.fromString("\n"),
null,
StringData.fromString("\n"),
StringData.fromString("c"),
StringData.fromString("\n"),
null,
StringData.fromString("\n"),
StringData.fromString("d"),
StringData.fromString("\n"),
StringData.fromString("e"),
StringData.fromString("\n"),
null,
StringData.fromString("\n"),
StringData.fromString("f"),
StringData.fromString("\n")),
Arrays.asList(null, null, null, null, null, null),
Arrays.asList(
null,
StringData.fromString("\n"),
null,
StringData.fromString("\n"),
null,
StringData.fromString("\n")),
Arrays.asList(
null,
StringData.fromString("\n"),
StringData.fromString("a"),
StringData.fromString("\n"),
StringData.fromString("b"),
StringData.fromString("\n")),
Arrays.asList(
StringData.fromString("a"),
StringData.fromString(","),
StringData.fromString("b"),
StringData.fromString(","),
null,
StringData.fromString("\n"),
StringData.fromString("c"),
StringData.fromString(",")),
Arrays.asList(
StringData.fromString("a"),
StringData.fromString(","),
StringData.fromString("b"),
StringData.fromString(","),
null,
StringData.fromString("\n"),
StringData.fromString("c"),
StringData.fromString("\n")));
}
@Override
protected List<StringData> getExpectedResults() {
return Arrays.asList(
StringData.fromString("a\nb\nc\nd\ne\nf"),
null,
null,
StringData.fromString("a\nb"),
StringData.fromString("a,b,c"),
StringData.fromString("a\nb\nc"));
}
@Override
protected AggregateFunction<StringData, ListAggWsWithRetractAccumulator> getAggregator() {
return new ListAggWsWithRetractAggFunction();
}
@Override
protected Method getAccumulateFunc() throws NoSuchMethodException {
return getAggregator()
.getClass()
.getMethod("accumulate", getAccClass(), StringData.class, StringData.class);
}
@Override
protected Method getRetractFunc() throws NoSuchMethodException {
return getAggregator()
.getClass()
.getMethod("retract", getAccClass(), StringData.class, StringData.class);
}
@Override
protected Class<?> getAccClass() {
return ListAggWsWithRetractAccumulator.class;
}
@Override
protected void accumulateValues(
AggregateFunction<StringData, ListAggWsWithRetractAccumulator> aggregator,
ListAggWsWithRetractAccumulator accumulator,
List<StringData> values)
throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
Method accumulateFunc = getAccumulateFunc();
Preconditions.checkArgument(
values.size() % 2 == 0, "number of values must be an integer multiple of 2.");
for (int i = 0; i < values.size(); i += 2) {
StringData value = values.get(i + 1);
StringData delimiter = values.get(i);
accumulateFunc.invoke(aggregator, accumulator, delimiter, value);
}
}
@Override
protected void retractValues(
ListAggWsWithRetractAccumulator accumulator, List<StringData> values)
throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
AggregateFunction<StringData, ListAggWsWithRetractAccumulator> aggregator = getAggregator();
Method retractFunc = getRetractFunc();
Preconditions.checkArgument(
values.size() % 2 == 0, "number of values must be an integer multiple of 2.");
for (int i = 0; i < values.size(); i += 2) {
StringData value = values.get(i + 1);
StringData delimiter = values.get(i);
retractFunc.invoke(aggregator, accumulator, delimiter, value);
}
}
@Override
protected Tuple2<List<StringData>, List<StringData>> splitValues(List<StringData> values) {
Preconditions.checkArgument(
values.size() % 2 == 0, "number of values must be an integer multiple of 2.");
int index = values.size() / 2;
if (index % 2 != 0) {
index -= 1;
}
return super.splitValues(values, index);
}
}
| ListAggWsWithRetractAggFunctionTest |
java | apache__camel | components/camel-oaipmh/src/generated/java/org/apache/camel/oaipmh/component/OAIPMHComponentConfigurer.java | {
"start": 733,
"end": 3504
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
OAIPMHComponent target = (OAIPMHComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "healthcheckconsumerenabled":
case "healthCheckConsumerEnabled": target.setHealthCheckConsumerEnabled(property(camelContext, boolean.class, value)); return true;
case "healthcheckproducerenabled":
case "healthCheckProducerEnabled": target.setHealthCheckProducerEnabled(property(camelContext, boolean.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "healthcheckconsumerenabled":
case "healthCheckConsumerEnabled": return boolean.class;
case "healthcheckproducerenabled":
case "healthCheckProducerEnabled": return boolean.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
OAIPMHComponent target = (OAIPMHComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "healthcheckconsumerenabled":
case "healthCheckConsumerEnabled": return target.isHealthCheckConsumerEnabled();
case "healthcheckproducerenabled":
case "healthCheckProducerEnabled": return target.isHealthCheckProducerEnabled();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
default: return null;
}
}
}
| OAIPMHComponentConfigurer |
java | micronaut-projects__micronaut-core | http-client/src/main/java/io/micronaut/http/client/netty/Pool40.java | {
"start": 1660,
"end": 1845
} | class ____ the sizing of a connection pool to conform to the configuration in
* {@link io.micronaut.http.client.HttpClientConfiguration.ConnectionPoolConfiguration}.
* <p>
* This | handles |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/sortpartition/KeyedSortPartitionOperator.java | {
"start": 3525,
"end": 16554
} | class ____<INPUT, KEY> extends AbstractStreamOperator<INPUT>
implements OneInputStreamOperator<INPUT, INPUT>, BoundedOneInput {
/** The type information of input records. */
protected final TypeInformation<INPUT> inputType;
/** The selector to create the sort key for records, which will be null if it's not used. */
protected final KeySelector<INPUT, ?> sortFieldSelector;
/** The order to sort records. */
private final Order sortOrder;
/**
* The string field to indicate the sort key for records with tuple or pojo type, which will be
* null if it's not used.
*/
private final String stringSortField;
/**
* The int field to indicate the sort key for records with tuple type, which will be -1 if it's
* not used.
*/
private final int positionSortField;
/**
* The sorter to sort both key and record if the record is not sorted by {@link KeySelector}.
*/
private PushSorter<Tuple2<byte[], INPUT>> recordSorter = null;
/** The sorter to sort both key and record if the record is sorted by {@link KeySelector}. */
private PushSorter<Tuple2<byte[], Tuple2<?, INPUT>>> recordSorterForSelector = null;
private TypeSerializer<KEY> recordKeySerializer;
/** A buffer to save the serialized record key. */
private DataOutputSerializer dataOutputSerializer;
public KeyedSortPartitionOperator(
TypeInformation<INPUT> inputType, int positionSortField, Order sortOrder) {
this.inputType = inputType;
ensureFieldSortable(positionSortField);
this.positionSortField = positionSortField;
this.stringSortField = null;
this.sortFieldSelector = null;
this.sortOrder = sortOrder;
}
public KeyedSortPartitionOperator(
TypeInformation<INPUT> inputType, String stringSortField, Order sortOrder) {
this.inputType = inputType;
ensureFieldSortable(stringSortField);
this.positionSortField = -1;
this.stringSortField = stringSortField;
this.sortFieldSelector = null;
this.sortOrder = sortOrder;
}
public <K> KeyedSortPartitionOperator(
TypeInformation<INPUT> inputType,
KeySelector<INPUT, K> sortFieldSelector,
Order sortOrder) {
this.inputType = inputType;
ensureFieldSortable(sortFieldSelector);
this.positionSortField = -1;
this.stringSortField = null;
this.sortFieldSelector = sortFieldSelector;
this.sortOrder = sortOrder;
}
@Override
protected void setup(
StreamTask<?, ?> containingTask,
StreamConfig config,
Output<StreamRecord<INPUT>> output) {
super.setup(containingTask, config, output);
ClassLoader userCodeClassLoader = containingTask.getUserCodeClassLoader();
ExecutionConfig executionConfig = containingTask.getEnvironment().getExecutionConfig();
recordKeySerializer = config.getStateKeySerializer(userCodeClassLoader);
int keyLength = recordKeySerializer.getLength();
createDataOutputSerializer(keyLength);
if (sortFieldSelector != null) {
TypeInformation<Tuple2<?, INPUT>> valueType =
Types.TUPLE(
TypeExtractor.getKeySelectorTypes(sortFieldSelector, inputType),
inputType);
KeyAndValueSerializer<Tuple2<?, INPUT>> valueSerializer =
new KeyAndValueSerializer<>(
valueType.createSerializer(getExecutionConfig().getSerializerConfig()),
keyLength);
TypeComparator<Tuple2<byte[], Tuple2<?, INPUT>>> sortTypeComparator;
if (keyLength > 0) {
sortTypeComparator =
new FixedLengthByteKeyAndValueComparator<>(
keyLength,
((CompositeType<Tuple2<?, INPUT>>) valueType)
.createComparator(
getSortFieldIndex(),
getSortOrderIndicator(),
0,
executionConfig));
} else {
sortTypeComparator =
new VariableLengthByteKeyAndValueComparator<>(
((CompositeType<Tuple2<?, INPUT>>) valueType)
.createComparator(
getSortFieldIndex(),
getSortOrderIndicator(),
0,
executionConfig));
}
recordSorterForSelector =
getSorter(valueSerializer, sortTypeComparator, containingTask);
} else {
KeyAndValueSerializer<INPUT> valueSerializer =
new KeyAndValueSerializer<>(
inputType.createSerializer(getExecutionConfig().getSerializerConfig()),
keyLength);
TypeComparator<Tuple2<byte[], INPUT>> sortTypeComparator;
if (keyLength > 0) {
sortTypeComparator =
new FixedLengthByteKeyAndValueComparator<>(
keyLength,
((CompositeType<INPUT>) inputType)
.createComparator(
getSortFieldIndex(),
getSortOrderIndicator(),
0,
executionConfig));
} else {
sortTypeComparator =
new VariableLengthByteKeyAndValueComparator<>(
((CompositeType<INPUT>) inputType)
.createComparator(
getSortFieldIndex(),
getSortOrderIndicator(),
0,
executionConfig));
}
recordSorter = getSorter(valueSerializer, sortTypeComparator, containingTask);
}
}
@Override
public void processElement(StreamRecord<INPUT> element) throws Exception {
KEY currentKey = (KEY) getCurrentKey();
recordKeySerializer.serialize(currentKey, dataOutputSerializer);
byte[] serializedKey = dataOutputSerializer.getCopyOfBuffer();
dataOutputSerializer.clear();
if (sortFieldSelector != null) {
recordSorterForSelector.writeRecord(
Tuple2.of(
serializedKey,
Tuple2.of(
sortFieldSelector.getKey(element.getValue()),
element.getValue())));
} else {
recordSorter.writeRecord(Tuple2.of(serializedKey, element.getValue()));
}
}
@Override
public void endInput() throws Exception {
TimestampedCollector<INPUT> outputCollector = new TimestampedCollector<>(output);
if (sortFieldSelector != null) {
recordSorterForSelector.finishReading();
MutableObjectIterator<Tuple2<byte[], Tuple2<?, INPUT>>> iterator =
recordSorterForSelector.getIterator();
Tuple2<byte[], Tuple2<?, INPUT>> record = iterator.next();
while (record != null) {
outputCollector.collect(record.f1.f1);
record = iterator.next();
}
recordSorterForSelector.close();
} else {
recordSorter.finishReading();
MutableObjectIterator<Tuple2<byte[], INPUT>> iterator = recordSorter.getIterator();
Tuple2<byte[], INPUT> record = iterator.next();
while (record != null) {
outputCollector.collect(record.f1);
record = iterator.next();
}
recordSorter.close();
}
}
@Override
public OperatorAttributes getOperatorAttributes() {
return new OperatorAttributesBuilder()
.setOutputOnlyAfterEndOfStream(true)
.setInternalSorterSupported(true)
.build();
}
/**
* Get the sort field index for the sorted data.
*
* @return the sort field index.
*/
private int[] getSortFieldIndex() {
int[] sortFieldIndex = new int[1];
if (positionSortField != -1) {
sortFieldIndex[0] =
new Keys.ExpressionKeys<>(positionSortField, inputType)
.computeLogicalKeyPositions()[0];
} else if (stringSortField != null) {
sortFieldIndex[0] =
new Keys.ExpressionKeys<>(stringSortField, inputType)
.computeLogicalKeyPositions()[0];
}
return sortFieldIndex;
}
/**
* Get the indicator for the sort order.
*
* @return sort order indicator.
*/
private boolean[] getSortOrderIndicator() {
boolean[] sortOrderIndicator = new boolean[1];
sortOrderIndicator[0] = this.sortOrder == Order.ASCENDING;
return sortOrderIndicator;
}
private void ensureFieldSortable(int field) throws InvalidProgramException {
if (!Keys.ExpressionKeys.isSortKey(field, inputType)) {
throw new InvalidProgramException(
"The field " + field + " of input type " + inputType + " is not sortable.");
}
}
private void ensureFieldSortable(String field) throws InvalidProgramException {
if (!Keys.ExpressionKeys.isSortKey(field, inputType)) {
throw new InvalidProgramException(
"The field " + field + " of input type " + inputType + " is not sortable.");
}
}
private <K> void ensureFieldSortable(KeySelector<INPUT, K> keySelector) {
TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keySelector, inputType);
Keys.SelectorFunctionKeys<INPUT, K> sortKey =
new Keys.SelectorFunctionKeys<>(keySelector, inputType, keyType);
if (!sortKey.getKeyType().isSortKeyType()) {
throw new InvalidProgramException("The key type " + keyType + " is not sortable.");
}
}
/**
* Create the dataOutputSerializer to save the serialized record key as a buffer.
*
* @param keyLength the length of record key. The key length will be variable if the value is
* -1.
*/
private void createDataOutputSerializer(int keyLength) {
if (keyLength > 0) {
dataOutputSerializer = new DataOutputSerializer(keyLength);
} else {
// The initial buffer size is set to 64. The buffer will expand size if it's needed.
dataOutputSerializer = new DataOutputSerializer(64);
}
}
private <TYPE> PushSorter<TYPE> getSorter(
TypeSerializer<TYPE> typeSerializer,
TypeComparator<TYPE> typeComparator,
StreamTask<?, ?> streamTask) {
ClassLoader userCodeClassLoader = streamTask.getUserCodeClassLoader();
Configuration jobConfiguration = streamTask.getEnvironment().getJobConfiguration();
double managedMemoryFraction =
config.getManagedMemoryFractionOperatorUseCaseOfSlot(
ManagedMemoryUseCase.OPERATOR,
streamTask.getEnvironment().getJobConfiguration(),
streamTask.getEnvironment().getTaskConfiguration(),
userCodeClassLoader);
try {
return ExternalSorter.newBuilder(
streamTask.getEnvironment().getMemoryManager(),
streamTask,
typeSerializer,
typeComparator,
streamTask.getExecutionConfig())
.memoryFraction(managedMemoryFraction)
.enableSpilling(
streamTask.getEnvironment().getIOManager(),
jobConfiguration.get(AlgorithmOptions.SORT_SPILLING_THRESHOLD))
.maxNumFileHandles(jobConfiguration.get(AlgorithmOptions.SPILLING_MAX_FAN))
.objectReuse(streamTask.getExecutionConfig().isObjectReuseEnabled())
.largeRecords(jobConfiguration.get(AlgorithmOptions.USE_LARGE_RECORDS_HANDLER))
.build();
} catch (MemoryAllocationException e) {
throw new RuntimeException(e);
}
}
}
| KeyedSortPartitionOperator |
java | google__guice | core/test/com/google/inject/spi/SpiBindingsTest.java | {
"start": 1694,
"end": 16337
} | class ____ extends TestCase {
public void testBindConstant() {
checkInjector(
new AbstractModule() {
@Override
protected void configure() {
bindConstant().annotatedWith(Names.named("one")).to(1);
}
},
new FailingElementVisitor() {
@Override
public <T> Void visit(Binding<T> binding) {
assertTrue(binding instanceof InstanceBinding);
assertEquals(Key.get(Integer.class, Names.named("one")), binding.getKey());
return null;
}
});
}
public void testToInstanceBinding() {
checkInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(String.class).toInstance("A");
}
},
new FailingElementVisitor() {
@Override
public <T> Void visit(Binding<T> binding) {
assertTrue(binding instanceof InstanceBinding);
checkBindingSource(binding);
assertEquals(Key.get(String.class), binding.getKey());
binding.acceptTargetVisitor(
new FailingTargetVisitor<T>() {
@Override
public Void visit(InstanceBinding<? extends T> binding) {
assertEquals("A", binding.getInstance());
return null;
}
});
binding.acceptScopingVisitor(
new FailingBindingScopingVisitor() {
@Override
public Void visitEagerSingleton() {
return null;
}
});
return null;
}
});
}
public void testToProviderBinding() {
final Provider<String> stringProvider = new StringProvider();
checkInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(String.class).toProvider(stringProvider);
}
},
new FailingElementVisitor() {
@Override
public <T> Void visit(Binding<T> binding) {
assertTrue(binding instanceof ProviderInstanceBinding);
checkBindingSource(binding);
assertEquals(Key.get(String.class), binding.getKey());
binding.acceptTargetVisitor(
new FailingTargetVisitor<T>() {
@Override
public Void visit(ProviderInstanceBinding<? extends T> binding) {
assertSame(stringProvider, binding.getUserSuppliedProvider());
return null;
}
});
return null;
}
});
}
public void testToProviderKeyBinding() {
checkInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(String.class).toProvider(StringProvider.class);
}
},
new FailingElementVisitor() {
@Override
public <T> Void visit(Binding<T> binding) {
assertTrue(binding instanceof ProviderKeyBinding);
checkBindingSource(binding);
assertEquals(Key.get(String.class), binding.getKey());
binding.acceptTargetVisitor(
new FailingTargetVisitor<T>() {
@Override
public Void visit(ProviderKeyBinding<? extends T> binding) {
assertEquals(Key.get(StringProvider.class), binding.getProviderKey());
return null;
}
});
return null;
}
});
}
public void testToKeyBinding() {
final Key<String> aKey = Key.get(String.class, Names.named("a"));
final Key<String> bKey = Key.get(String.class, Names.named("b"));
checkInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(aKey).to(bKey);
bind(bKey).toInstance("B");
}
},
new FailingElementVisitor() {
@Override
public <T> Void visit(Binding<T> binding) {
assertTrue(binding instanceof LinkedKeyBinding);
checkBindingSource(binding);
assertEquals(aKey, binding.getKey());
binding.acceptTargetVisitor(
new FailingTargetVisitor<T>() {
@Override
public Void visit(LinkedKeyBinding<? extends T> binding) {
assertEquals(bKey, binding.getLinkedKey());
return null;
}
});
return null;
}
},
new FailingElementVisitor() {
@Override
public <T> Void visit(Binding<T> binding) {
assertEquals(bKey, binding.getKey());
return null;
}
});
}
public void testToConstructorBinding() {
checkInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(D.class);
}
},
new FailingElementVisitor() {
@Override
public <T> Void visit(Binding<T> binding) {
assertTrue(binding instanceof ConstructorBinding);
checkBindingSource(binding);
assertEquals(Key.get(D.class), binding.getKey());
binding.acceptTargetVisitor(
new FailingTargetVisitor<T>() {
@Override
public Void visit(ConstructorBinding<? extends T> binding) {
Constructor<?> expected = D.class.getDeclaredConstructors()[0];
assertEquals(expected, binding.getConstructor().getMember());
assertEquals(ImmutableSet.<InjectionPoint>of(), binding.getInjectableMembers());
return null;
}
});
return null;
}
});
}
public void testConstantBinding() {
checkInjector(
new AbstractModule() {
@Override
protected void configure() {
bindConstant().annotatedWith(Names.named("one")).to(1);
}
},
new FailingElementVisitor() {
@Override
public <T> Void visit(Binding<T> binding) {
assertTrue(binding instanceof InstanceBinding);
checkBindingSource(binding);
assertEquals(Key.get(Integer.class, Names.named("one")), binding.getKey());
binding.acceptTargetVisitor(
new FailingTargetVisitor<T>() {
@Override
public Void visit(InstanceBinding<? extends T> binding) {
assertEquals(1, binding.getInstance());
return null;
}
});
return null;
}
});
}
public void testConvertedConstantBinding() {
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bindConstant().annotatedWith(Names.named("one")).to("1");
}
});
Binding<Integer> binding = injector.getBinding(Key.get(Integer.class, Names.named("one")));
assertEquals(Key.get(Integer.class, Names.named("one")), binding.getKey());
checkBindingSource(binding);
assertTrue(binding instanceof ConvertedConstantBinding);
binding.acceptTargetVisitor(
new FailingTargetVisitor<Integer>() {
@Override
public Void visit(ConvertedConstantBinding<? extends Integer> binding) {
assertEquals((Integer) 1, binding.getValue());
assertEquals(Key.get(String.class, Names.named("one")), binding.getSourceKey());
return null;
}
});
}
public void testProviderBinding() {
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(String.class).toInstance("A");
}
});
Key<Provider<String>> providerOfStringKey = new Key<Provider<String>>() {};
Binding<Provider<String>> binding = injector.getBinding(providerOfStringKey);
assertEquals(providerOfStringKey, binding.getKey());
checkBindingSource(binding);
assertTrue(binding instanceof ProviderBinding);
binding.acceptTargetVisitor(
new FailingTargetVisitor<Provider<String>>() {
@Override
public Void visit(ProviderBinding<? extends Provider<String>> binding) {
assertEquals(Key.get(String.class), binding.getProvidedKey());
return null;
}
});
}
public void testScopes() {
checkInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(String.class)
.annotatedWith(Names.named("a"))
.toProvider(StringProvider.class)
.in(Singleton.class);
bind(String.class)
.annotatedWith(Names.named("b"))
.toProvider(StringProvider.class)
.in(Scopes.SINGLETON);
bind(String.class)
.annotatedWith(Names.named("c"))
.toProvider(StringProvider.class)
.asEagerSingleton();
bind(String.class).annotatedWith(Names.named("d")).toProvider(StringProvider.class);
}
},
new FailingElementVisitor() {
@Override
public <T> Void visit(Binding<T> command) {
assertEquals(Key.get(String.class, Names.named("a")), command.getKey());
command.acceptScopingVisitor(
new FailingBindingScopingVisitor() {
@Override
public Void visitScope(Scope scope) {
// even though we bound with an annotation, the injector always uses instances
assertSame(Scopes.SINGLETON, scope);
return null;
}
});
return null;
}
},
new FailingElementVisitor() {
@Override
public <T> Void visit(Binding<T> command) {
assertEquals(Key.get(String.class, Names.named("b")), command.getKey());
command.acceptScopingVisitor(
new FailingBindingScopingVisitor() {
@Override
public Void visitScope(Scope scope) {
assertSame(Scopes.SINGLETON, scope);
return null;
}
});
return null;
}
},
new FailingElementVisitor() {
@Override
public <T> Void visit(Binding<T> command) {
assertEquals(Key.get(String.class, Names.named("c")), command.getKey());
command.acceptScopingVisitor(
new FailingBindingScopingVisitor() {
@Override
public Void visitEagerSingleton() {
return null;
}
});
return null;
}
},
new FailingElementVisitor() {
@Override
public <T> Void visit(Binding<T> command) {
assertEquals(Key.get(String.class, Names.named("d")), command.getKey());
command.acceptScopingVisitor(
new FailingBindingScopingVisitor() {
@Override
public Void visitNoScoping() {
return null;
}
});
return null;
}
});
}
public void testExtensionSpi() {
final AtomicBoolean visiting = new AtomicBoolean(false);
final Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(String.class)
.toProvider(
new ProviderWithExtensionVisitor<String>() {
@SuppressWarnings("unchecked") // Safe because V is fixed to String
@Override
public <B, V> V acceptExtensionVisitor(
BindingTargetVisitor<B, V> visitor,
ProviderInstanceBinding<? extends B> binding) {
assertSame(this, binding.getUserSuppliedProvider());
// We can't always check for FailingSpiTargetVisitor,
// because constructing the injector visits here, and we need
// to process the binding as normal
if (visiting.get()) {
assertTrue(
"visitor: " + visitor,
visitor instanceof FailingSpiTargetVisitor);
return (V) "visited";
} else {
return visitor.visit(binding);
}
}
@Override
public String get() {
return "FooBar";
}
});
}
});
visiting.set(true);
// Check for Provider<String> binding -- that is still a ProviderBinding.
Key<Provider<String>> providerOfStringKey = new Key<Provider<String>>() {};
Binding<Provider<String>> providerBinding = injector.getBinding(providerOfStringKey);
assertEquals(providerOfStringKey, providerBinding.getKey());
checkBindingSource(providerBinding);
assertTrue("binding: " + providerBinding, providerBinding instanceof ProviderBinding);
providerBinding.acceptTargetVisitor(
new FailingTargetVisitor<Provider<String>>() {
@Override
public Void visit(ProviderBinding<? extends Provider<String>> binding) {
assertEquals(Key.get(String.class), binding.getProvidedKey());
return null;
}
});
// Check for String binding -- that one is ProviderInstanceBinding, and gets hooked
Binding<String> binding = injector.getBinding(String.class);
assertEquals(Key.get(String.class), binding.getKey());
checkBindingSource(binding);
assertTrue(binding instanceof ProviderInstanceBinding);
assertEquals("visited", binding.acceptTargetVisitor(new FailingSpiTargetVisitor<String>()));
}
private static | SpiBindingsTest |
java | google__dagger | javatests/dagger/functional/producers/cancellation/CancellationComponent.java | {
"start": 1162,
"end": 1458
} | interface ____ {
@Named("ep1")
ListenableFuture<String> entryPoint1();
@Named("ep2")
Producer<String> entryPoint2();
@Named("ep3")
ListenableFuture<String> entryPoint3();
CancellationSubcomponent.Builder subcomponentBuilder();
@ProductionComponent.Builder
| CancellationComponent |
java | spring-projects__spring-framework | spring-tx/src/main/java/org/springframework/transaction/reactive/AbstractReactiveTransactionManager.java | {
"start": 1863,
"end": 2017
} | class ____ implements Spring's standard reactive transaction workflow,
* serving as basis for concrete platform transaction managers.
*
* <p>This base | that |
java | google__guava | android/guava-tests/test/com/google/common/util/concurrent/WrappingExecutorServiceTest.java | {
"start": 6625,
"end": 10110
} | class ____ implements ExecutorService {
private String lastMethodCalled = "";
private long lastTimeoutInMillis = -1;
private final ExecutorService inline = newDirectExecutorService();
void assertLastMethodCalled(String method) {
assertEquals(method, lastMethodCalled);
}
void assertMethodWithTimeout(String method, long timeout, TimeUnit unit) {
assertLastMethodCalled(method + "Timeout");
assertEquals(unit.toMillis(timeout), lastTimeoutInMillis);
}
@Override
public boolean awaitTermination(long timeout, TimeUnit unit) {
lastMethodCalled = "awaitTermination";
return false;
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks)
throws InterruptedException {
lastMethodCalled = "invokeAll";
assertTaskWrapped(tasks);
return inline.invokeAll(tasks);
}
@Override
public <T> List<Future<T>> invokeAll(
Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)
throws InterruptedException {
assertTaskWrapped(tasks);
lastMethodCalled = "invokeAllTimeout";
lastTimeoutInMillis = unit.toMillis(timeout);
return inline.invokeAll(tasks, timeout, unit);
}
// Define the invokeAny methods to invoke the first task
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks)
throws ExecutionException, InterruptedException {
assertTaskWrapped(tasks);
lastMethodCalled = "invokeAny";
return inline.submit(Iterables.get(tasks, 0)).get();
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)
throws ExecutionException, InterruptedException, TimeoutException {
assertTaskWrapped(tasks);
lastMethodCalled = "invokeAnyTimeout";
lastTimeoutInMillis = unit.toMillis(timeout);
return inline.submit(Iterables.get(tasks, 0)).get(timeout, unit);
}
@Override
public boolean isShutdown() {
lastMethodCalled = "isShutdown";
return false;
}
@Override
public boolean isTerminated() {
lastMethodCalled = "isTerminated";
return false;
}
@Override
public void shutdown() {
lastMethodCalled = "shutdown";
}
@Override
public List<Runnable> shutdownNow() {
lastMethodCalled = "shutdownNow";
return ImmutableList.of();
}
@Override
public <T> Future<T> submit(Callable<T> task) {
lastMethodCalled = "submit";
assertThat(task).isInstanceOf(WrappedCallable.class);
return inline.submit(task);
}
@Override
public Future<?> submit(Runnable task) {
lastMethodCalled = "submit";
assertThat(task).isInstanceOf(WrappedRunnable.class);
return inline.submit(task);
}
@Override
public <T> Future<T> submit(Runnable task, T result) {
lastMethodCalled = "submit";
assertThat(task).isInstanceOf(WrappedRunnable.class);
return inline.submit(task, result);
}
@Override
public void execute(Runnable command) {
lastMethodCalled = "execute";
assertThat(command).isInstanceOf(WrappedRunnable.class);
inline.execute(command);
}
private static <T> void assertTaskWrapped(Collection<? extends Callable<T>> tasks) {
Predicate<Object> p = Predicates.instanceOf(WrappedCallable.class);
assertTrue(Iterables.all(tasks, p));
}
}
}
| MockExecutor |
java | dropwizard__dropwizard | dropwizard-json-logging/src/main/java/io/dropwizard/logging/json/layout/JsonFormatter.java | {
"start": 443,
"end": 1979
} | class ____ {
private static final int DEFAULT_BUFFER_SIZE = 512;
private final ObjectMapper objectMapper;
private final boolean doesAppendLineSeparator;
private final int bufferSize;
public JsonFormatter(ObjectMapper objectMapper, boolean prettyPrint, boolean doesAppendLineSeparator,
int bufferSize) {
this.objectMapper = prettyPrint ? objectMapper.enable(SerializationFeature.INDENT_OUTPUT) : objectMapper;
this.doesAppendLineSeparator = doesAppendLineSeparator;
this.bufferSize = bufferSize;
}
public JsonFormatter(ObjectMapper objectMapper, boolean prettyPrint, boolean doesAppendLineSeparator) {
this(objectMapper, prettyPrint, doesAppendLineSeparator, DEFAULT_BUFFER_SIZE);
}
/**
* Converts the provided map as a JSON object according to the configured JSON mapper.
*
* @param map the provided map
* @return the JSON as a string
*/
@Nullable
public String toJson(@Nullable Map<String, Object> map) {
if (map == null || map.isEmpty()) {
return null;
}
try (StringWriter writer = new StringWriter(bufferSize)) {
objectMapper.writeValue(writer, map);
if (doesAppendLineSeparator) {
writer.append(CoreConstants.LINE_SEPARATOR);
}
return writer.toString();
} catch (IOException e) {
throw new IllegalArgumentException("Unable to format map as a JSON", e);
}
}
}
| JsonFormatter |
java | quarkusio__quarkus | extensions/arc/deployment/src/main/java/io/quarkus/arc/deployment/GeneratedBeanBuildItem.java | {
"start": 298,
"end": 1305
} | class ____ extends MultiBuildItem {
private final boolean applicationClass;
private final String name;
private final byte[] data;
private final String source;
public GeneratedBeanBuildItem(String name, byte[] data) {
this(name, data, null);
}
public GeneratedBeanBuildItem(String name, byte[] data, String source) {
this(name, data, source, true);
}
public GeneratedBeanBuildItem(String name, byte[] data, String source, boolean applicationClass) {
this.name = name;
this.data = data;
this.source = source;
this.applicationClass = applicationClass;
}
public String getName() {
return name;
}
public byte[] getData() {
return data;
}
/**
*
* @return the textual representation of generated code
*/
public String getSource() {
return source;
}
public boolean isApplicationClass() {
return applicationClass;
}
}
| GeneratedBeanBuildItem |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/web/servlet/client/RestTestClientTests.java | {
"start": 5609,
"end": 6127
} | class ____ {
@Test
void testCookie() {
RestTestClientTests.this.client.get().uri("/test")
.cookie("foo", "bar")
.exchange()
.expectStatus().isOk()
.expectBody().jsonPath("$.headers.Cookie").isEqualTo("foo=bar");
}
@Test
void testCookies() {
RestTestClientTests.this.client.get().uri("/test")
.cookies(cookies -> cookies.add("foo", "bar"))
.exchange()
.expectStatus().isOk()
.expectBody().jsonPath("$.headers.Cookie").isEqualTo("foo=bar");
}
}
@Nested
| Cookies |
java | apache__flink | flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/SourceStreamTaskTest.java | {
"start": 37890,
"end": 39098
} | class ____ extends RichSourceFunction<String> {
private static final long serialVersionUID = 1L;
public static boolean openCalled = false;
public static boolean closeCalled = false;
OpenCloseTestSource() {
openCalled = false;
closeCalled = false;
}
@Override
public void open(OpenContext openContext) throws Exception {
super.open(openContext);
if (closeCalled) {
fail("Close called before open.");
}
openCalled = true;
}
@Override
public void close() throws Exception {
super.close();
if (!openCalled) {
fail("Open was not called before close.");
}
closeCalled = true;
}
@Override
public void run(SourceContext<String> ctx) throws Exception {
if (!openCalled) {
fail("Open was not called before run.");
}
for (int i = 0; i < 10; i++) {
ctx.collect("Hello" + i);
}
}
@Override
public void cancel() {}
}
private static | OpenCloseTestSource |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng4235HttpAuthDeploymentChecksumsTest.java | {
"start": 2385,
"end": 7039
} | class ____ extends AbstractMavenIntegrationTestCase {
private File testDir;
private Server server;
private int port;
private final RepoHandler repoHandler = new RepoHandler();
@BeforeEach
protected void setUp() throws Exception {
testDir = extractResources("/mng-4235");
repoHandler.setResourceBase(testDir.getAbsolutePath());
Constraint constraint = new Constraint();
constraint.setName(Constraint.__BASIC_AUTH);
constraint.setRoles(new String[] {"deployer"});
constraint.setAuthenticate(true);
ConstraintMapping constraintMapping = new ConstraintMapping();
constraintMapping.setConstraint(constraint);
constraintMapping.setPathSpec("/*");
HashLoginService userRealm = new HashLoginService("TestRealm");
UserStore userStore = new UserStore();
userStore.addUser("testuser", new Password("testpass"), new String[] {"deployer"});
userRealm.setUserStore(userStore);
ConstraintSecurityHandler securityHandler = new ConstraintSecurityHandler();
securityHandler.setLoginService(userRealm);
securityHandler.setAuthMethod(__BASIC_AUTH);
securityHandler.setConstraintMappings(new ConstraintMapping[] {constraintMapping});
HandlerList handlerList = new HandlerList();
handlerList.addHandler(securityHandler);
handlerList.addHandler(repoHandler);
handlerList.addHandler(new DefaultHandler());
server = new Server(0);
server.setHandler(handlerList);
server.start();
if (server.isFailed()) {
fail("Couldn't bind the server socket to a free port!");
}
port = ((NetworkConnector) server.getConnectors()[0]).getLocalPort();
System.out.println("Bound server socket to the port " + port);
}
@AfterEach
protected void tearDown() throws Exception {
if (server != null) {
server.stop();
server.join();
}
}
/**
* Test the creation of proper checksums during deployment to a secured HTTP repo. The pitfall with HTTP auth is
* that it might require double submission of the data, first during an initial PUT without credentials and second
* during a retried PUT with credentials in response to the auth challenge by the server. The checksum must
* nevertheless only be calculated on the non-doubled data stream.
*
* @throws Exception in case of failure
*/
@Test
public void testit() throws Exception {
Map<String, String> filterProps = new HashMap<>();
filterProps.put("@port@", Integer.toString(port));
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.filterFile("pom-template.xml", "pom.xml", filterProps);
verifier.setAutoclean(false);
verifier.deleteArtifacts("org.apache.maven.its.mng4235");
verifier.deleteDirectory("repo");
verifier.addCliArgument("--settings");
verifier.addCliArgument("settings.xml");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
assertHash(verifier, "repo/org/apache/maven/its/mng4235/test/0.1/test-0.1.jar", ".sha1", "SHA-1");
assertHash(verifier, "repo/org/apache/maven/its/mng4235/test/0.1/test-0.1.jar", ".md5", "MD5");
assertHash(verifier, "repo/org/apache/maven/its/mng4235/test/0.1/test-0.1.pom", ".sha1", "SHA-1");
assertHash(verifier, "repo/org/apache/maven/its/mng4235/test/0.1/test-0.1.pom", ".md5", "MD5");
assertHash(verifier, "repo/org/apache/maven/its/mng4235/test/maven-metadata.xml", ".sha1", "SHA-1");
assertHash(verifier, "repo/org/apache/maven/its/mng4235/test/maven-metadata.xml", ".md5", "MD5");
for (DeployedResource deployedResource : repoHandler.deployedResources) {
if (StringUtils.equalsIgnoreCase("chunked", deployedResource.transferEncoding)) {
fail("deployedResource " + deployedResource
+ " use chunked transfert encoding some http server doesn't support that");
}
}
}
private void assertHash(Verifier verifier, String dataFile, String hashExt, String algo) throws Exception {
String actualHash = ItUtils.calcHash(new File(verifier.getBasedir(), dataFile), algo);
String expectedHash = verifier.loadLines(dataFile + hashExt).get(0).trim();
assertTrue(expectedHash.equalsIgnoreCase(actualHash), "expected=" + expectedHash + ", actual=" + actualHash);
}
private static | MavenITmng4235HttpAuthDeploymentChecksumsTest |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/GrpcEndpointBuilderFactory.java | {
"start": 61243,
"end": 64813
} | interface ____ extends EndpointProducerBuilder {
default GrpcEndpointProducerBuilder basic() {
return (GrpcEndpointProducerBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedGrpcEndpointProducerBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedGrpcEndpointProducerBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Sets whether synchronous processing should be strictly used.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param synchronous the value to set
* @return the dsl builder
*/
default AdvancedGrpcEndpointProducerBuilder synchronous(boolean synchronous) {
doSetProperty("synchronous", synchronous);
return this;
}
/**
* Sets whether synchronous processing should be strictly used.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param synchronous the value to set
* @return the dsl builder
*/
default AdvancedGrpcEndpointProducerBuilder synchronous(String synchronous) {
doSetProperty("synchronous", synchronous);
return this;
}
}
/**
* Builder for endpoint for the gRPC component.
*/
public | AdvancedGrpcEndpointProducerBuilder |
java | apache__spark | core/src/main/java/org/apache/spark/util/collection/unsafe/sort/PrefixComparators.java | {
"start": 4474,
"end": 4869
} | class ____ extends RadixSortSupport {
@Override public boolean sortDescending() { return false; }
@Override public boolean sortSigned() { return false; }
@Override public boolean nullsFirst() { return true; }
@Override
public int compare(long aPrefix, long bPrefix) {
return UnsignedLongs.compare(aPrefix, bPrefix);
}
}
public static final | UnsignedPrefixComparator |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/batch/BatchedMultiTableDynamicStatementTests.java | {
"start": 3631,
"end": 4305
} | class ____ {
@Id
private Integer id;
@Column( name = "amt")
private double amount;
@Column( name = "the_comment")
private String comment;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public double getAmount() {
return amount;
}
public void setAmount(double amount) {
this.amount = amount;
}
public String getComment() {
return comment;
}
public void setComment(String comment) {
this.comment = comment;
}
}
@Entity( name = "CheckPayment")
@Table( name = "check_payments" )
@PrimaryKeyJoinColumn( name = "payment_fk" )
@DynamicInsert
@DynamicUpdate
public static | Payment |
java | junit-team__junit5 | junit-platform-commons/src/main/java/org/junit/platform/commons/support/ReflectionSupport.java | {
"start": 19900,
"end": 22003
} | class ____ filter; never {@code null}
* @return a stream of all such classes found; never {@code null}
* but potentially empty
* @since 1.10
* @see #streamAllClassesInClasspathRoot(URI, Predicate, Predicate)
* @see #streamAllClassesInPackage(String, Predicate, Predicate)
*/
@API(status = MAINTAINED, since = "1.10")
public static Stream<Class<?>> streamAllClassesInModule(String moduleName, Predicate<Class<?>> classFilter,
Predicate<String> classNameFilter) {
return ReflectionUtils.streamAllClassesInModule(moduleName, classFilter, classNameFilter);
}
/**
* Find all {@linkplain Resource resources} in the supplied {@code moduleName}
* that match the specified {@code resourceFilter} predicate.
*
* <p>The module-path scanning algorithm searches recursively in all
* packages contained in the module.
*
* @param moduleName the name of the module to scan; never {@code null} or
* <em>empty</em>
* @param resourceFilter the resource type filter; never {@code null}
* @return a stream of all such resources found; never {@code null}
* but potentially empty
* @since 1.11
* @see #streamAllResourcesInClasspathRoot(URI, Predicate)
* @see #streamAllResourcesInPackage(String, Predicate)
* @deprecated Please use {@link ResourceSupport#streamAllResourcesInModule(String, ResourceFilter)} instead
*/
@API(status = DEPRECATED, since = "1.14")
@Deprecated(since = "1.14", forRemoval = true)
@SuppressWarnings("removal")
public static Stream<Resource> streamAllResourcesInModule(String moduleName, Predicate<Resource> resourceFilter) {
return toSupportResourcesStream(
ResourceSupport.streamAllResourcesInModule(moduleName, toResourceFilter(resourceFilter)));
}
/**
* Create a new instance of the specified {@link Class} by invoking
* the constructor whose argument list matches the types of the supplied
* arguments.
*
* <p>The constructor will be made accessible if necessary, and any checked
* exception will be {@linkplain ExceptionUtils#throwAsUncheckedException masked}
* as an unchecked exception.
*
* @param clazz the | name |
java | apache__kafka | tools/src/test/java/org/apache/kafka/tools/consumer/group/DescribeConsumerGroupTest.java | {
"start": 4500,
"end": 81605
} | class ____ {
private static final String TOPIC_PREFIX = "test.topic.";
private static final String GROUP_PREFIX = "test.group.";
private static final List<List<String>> DESCRIBE_TYPE_OFFSETS = List.of(List.of(""), List.of("--offsets"), List.of("--offsets", "--verbose"));
private static final List<List<String>> DESCRIBE_TYPE_MEMBERS = List.of(List.of("--members"), List.of("--members", "--verbose"));
private static final List<List<String>> DESCRIBE_TYPE_STATE = List.of(List.of("--state"), List.of("--state", "--verbose"));
private static final List<List<String>> DESCRIBE_TYPES = Stream.of(DESCRIBE_TYPE_OFFSETS, DESCRIBE_TYPE_MEMBERS, DESCRIBE_TYPE_STATE).flatMap(Collection::stream).toList();
private ClusterInstance clusterInstance;
@ClusterTest
public void testDescribeNonExistingGroup(ClusterInstance clusterInstance) {
String missingGroup = "missing.group";
for (List<String> describeType : DESCRIBE_TYPES) {
// note the group to be queried is a different (non-existing) group
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", missingGroup));
cgcArgs.addAll(describeType);
try (ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]))) {
service.describeGroups();
fail("Expected error was not detected for describe option '" + String.join(" ", describeType) + "'");
} catch (ExecutionException ee) {
assertInstanceOf(GroupIdNotFoundException.class, ee.getCause());
assertEquals("Group " + missingGroup + " not found.", ee.getCause().getMessage());
} catch (Exception e) {
fail("Expected error was not detected for describe option '" + String.join(" ", describeType) + "'");
}
}
}
@ClusterTest
public void testDescribeOffsetsOfNonExistingGroup(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
String missingGroup = "missing.group";
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic);
// run one consumer in the group consuming from a single-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
// note the group to be queried is a different (non-existing) group
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", missingGroup})
) {
service.collectGroupOffsets(missingGroup);
fail("Expected the group '" + missingGroup + "' to throw GroupIdNotFoundException");
} catch (ExecutionException ee) {
assertInstanceOf(GroupIdNotFoundException.class, ee.getCause(),
"Expected the group '" + missingGroup + "' to throw GroupIdNotFoundException");
}
}
}
@ClusterTest
public void testDescribeMembersOfNonExistingGroup(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
String missingGroup = "missing.group";
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic);
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
// note the group to be queried is a different (non-existing) group
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", missingGroup})
) {
service.collectGroupMembers(missingGroup);
fail("Expected the group '" + missingGroup + "' to throw GroupIdNotFoundException");
} catch (ExecutionException ee) {
assertInstanceOf(GroupIdNotFoundException.class, ee.getCause(),
"Expected the group '" + missingGroup + "' to throw GroupIdNotFoundException");
}
}
}
@ClusterTest
public void testDescribeStateOfNonExistingGroup(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
String missingGroup = "missing.group";
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic);
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
// note the group to be queried is a different (non-existing) group
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", missingGroup})
) {
service.collectGroupState(missingGroup);
fail("Expected the group '" + missingGroup + "' to throw GroupIdNotFoundException");
} catch (ExecutionException ee) {
assertInstanceOf(GroupIdNotFoundException.class, ee.getCause(),
"Expected the group '" + missingGroup + "' to throw GroupIdNotFoundException");
}
}
}
@ClusterTest
public void testDescribeGroupOffsets(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
clusterInstance.createTopic(topic, 1, (short) 1);
sendRecords(topic, 0, 1);
for (List<String> describeType : DESCRIBE_TYPE_OFFSETS) {
String group = GROUP_PREFIX + groupProtocol.name() + "." + String.join("", describeType);
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group));
cgcArgs.addAll(describeType);
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]));
Admin admin = clusterInstance.admin()
) {
TestUtils.waitForCondition(() -> {
Entry<String, String> res = ToolsTestUtils.grabConsoleOutputAndError(describeGroups(service));
String[] lines = res.getKey().trim().split("\n");
if (lines.length != 2 && !res.getValue().isEmpty()) {
return false;
}
ConsumerGroupDescription consumerGroupDescription = admin.describeConsumerGroups(Set.of(group)).describedGroups().get(group).get();
MemberDescription memberDescription = consumerGroupDescription.members().iterator().next();
List<String> expectedValues;
if (describeType.contains("--verbose")) {
expectedValues = List.of(group, topic, "0", "-", "1", "1", "0", memberDescription.consumerId(),
memberDescription.host(), memberDescription.clientId());
} else {
expectedValues = List.of(group, topic, "0", "1", "1", "0", memberDescription.consumerId(),
memberDescription.host(), memberDescription.clientId());
}
return checkArgsHeaderOutput(cgcArgs, lines[0]) &&
Arrays.stream(lines[1].trim().split("\\s+")).toList().equals(expectedValues);
}, "Expected a data row and no error in describe results with describe type " + String.join(" ", describeType) + ".");
}
}
}
}
@ClusterTest
public void testDescribeGroupMembers(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
boolean isConsumer = groupProtocol.equals(GroupProtocol.CONSUMER);
String topic1 = TOPIC_PREFIX + groupProtocol.name() + "1";
String topic2 = TOPIC_PREFIX + groupProtocol.name() + "2";
clusterInstance.createTopic(topic1, 2, (short) 1);
clusterInstance.createTopic(topic2, 1, (short) 1);
for (List<String> describeType : DESCRIBE_TYPE_MEMBERS) {
String group = GROUP_PREFIX + groupProtocol.name() + "." + String.join("", describeType);
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group));
cgcArgs.addAll(describeType);
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, Set.of(topic1, topic2), Map.of(), 1);
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]));
Admin admin = clusterInstance.admin()
) {
TestUtils.waitForCondition(() -> {
Entry<String, String> res = ToolsTestUtils.grabConsoleOutputAndError(describeGroups(service));
String[] lines = res.getKey().trim().split("\n");
if (lines.length != 2 && !res.getValue().isEmpty()) {
return false;
}
ConsumerGroupDescription consumerGroupDescription = admin.describeConsumerGroups(Set.of(group)).describedGroups().get(group).get();
MemberDescription memberDescription = consumerGroupDescription.members().iterator().next();
String topicAssignment = topic1 + ":0,1;" + topic2 + ":0";
List<String> expectedValues;
if (describeType.contains("--verbose")) {
expectedValues = List.of(group, memberDescription.consumerId(), memberDescription.host(),
memberDescription.clientId(), "3", isConsumer ? memberDescription.memberEpoch().get().toString() : "-",
topicAssignment, isConsumer ? consumerGroupDescription.targetAssignmentEpoch().get().toString() : "-",
isConsumer ? topicAssignment : "-");
} else {
expectedValues = List.of(group, memberDescription.consumerId(), memberDescription.host(),
memberDescription.clientId(), "3");
}
return checkArgsHeaderOutput(cgcArgs, lines[0]) &&
Arrays.stream(lines[1].trim().split("\\s+")).toList().equals(expectedValues);
}, "Expected a data row and no error in describe results with describe type " + String.join(" ", describeType) + ".");
}
}
}
}
@ClusterTest
public void testDescribeGroupMemberWithMigration(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
String topic = TOPIC_PREFIX + "migration";
String group = GROUP_PREFIX + "migration";
String classicClientId = "classic";
String consumerClientId = "consumer";
clusterInstance.createTopic(topic, 2, (short) 1);
List<String> cgcArgs = List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group, "--members", "--verbose");
try (AutoCloseable classicConsumer = consumerGroupClosable(GroupProtocol.CLASSIC, group, topic, Map.of(ConsumerConfig.CLIENT_ID_CONFIG, classicClientId));
Admin admin = clusterInstance.admin()
) {
// Make sure the classic consumer is stable before starting another consumer.
TestUtils.waitForCondition(
() -> {
ConsumerGroupDescription consumerGroupDescription = admin.describeConsumerGroups(Set.of(group)).describedGroups().get(group).get();
MemberDescription memberDescription = consumerGroupDescription.members().iterator().next();
return !memberDescription.assignment().topicPartitions().isEmpty();
},
"Expected the classic consumer to join the group."
);
try (AutoCloseable consumerConsumer = consumerGroupClosable(GroupProtocol.CONSUMER, group, topic, Map.of(ConsumerConfig.CLIENT_ID_CONFIG, consumerClientId));
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]))
) {
TestUtils.waitForCondition(() -> {
Entry<String, String> res = ToolsTestUtils.grabConsoleOutputAndError(describeGroups(service));
String[] lines = res.getKey().trim().split("\n");
if (lines.length != 3 && !res.getValue().isEmpty()) {
return false;
}
// We can't guarantee the order of the lines and final assignment on both members,
// so only checking UPGRADED field to make sure the tool can reflect member migration.
String[] header = lines[0].trim().split("\\s+");
assertEquals("UPGRADED", header[header.length - 1]);
List<String> line1 = Arrays.stream(lines[1].trim().split("\\s+")).toList();
List<String> line2 = Arrays.stream(lines[2].trim().split("\\s+")).toList();
if (line1.contains(classicClientId)) {
assertEquals("false", line1.get(line1.size() - 1));
assertEquals("true", line2.get(line2.size() - 1));
} else {
assertEquals("false", line2.get(line2.size() - 1));
assertEquals("true", line1.get(line1.size() - 1));
}
return true;
}, "Expected a data row and no error in describe results with describe type \"--members --verbose\"");
}
}
}
@ClusterTest
public void testDescribeGroupState(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
boolean isConsumer = groupProtocol.equals(GroupProtocol.CONSUMER);
String topic = TOPIC_PREFIX + groupProtocol.name();
clusterInstance.createTopic(topic, 1, (short) 1);
for (List<String> describeType : DESCRIBE_TYPE_STATE) {
String group = GROUP_PREFIX + groupProtocol.name() + "." + String.join("", describeType);
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group));
cgcArgs.addAll(describeType);
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]));
Admin admin = clusterInstance.admin()
) {
TestUtils.waitForCondition(() -> {
Entry<String, String> res = ToolsTestUtils.grabConsoleOutputAndError(describeGroups(service));
String[] lines = res.getKey().trim().split("\n");
if (lines.length != 2 && !res.getValue().isEmpty()) {
return false;
}
ConsumerGroupDescription consumerGroupDescription = admin.describeConsumerGroups(Set.of(group)).describedGroups().get(group).get();
List<String> expectedValues;
String coordinatorAddress = consumerGroupDescription.coordinator().host() + ":" + consumerGroupDescription.coordinator().port();
String coordinatorId = "(" + consumerGroupDescription.coordinator().idString() + ")";
if (describeType.contains("--verbose")) {
expectedValues = List.of(group, coordinatorAddress, coordinatorId, consumerGroupDescription.partitionAssignor(), GroupState.STABLE.toString(),
isConsumer ? consumerGroupDescription.groupEpoch().get().toString() : "-",
isConsumer ? consumerGroupDescription.targetAssignmentEpoch().get().toString() : "-", "1");
} else {
expectedValues = List.of(group, coordinatorAddress, coordinatorId, consumerGroupDescription.partitionAssignor(), GroupState.STABLE.toString(), "1");
}
return checkArgsHeaderOutput(cgcArgs, lines[0]) &&
Arrays.stream(lines[1].trim().split("\\s+")).toList().equals(expectedValues);
}, "Expected two data rows and no error in describe results with describe type " + String.join(" ", describeType) + ".");
}
}
}
}
@ClusterTest
public void testDescribeExistingGroups(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
createTopic(topic);
// Create N single-threaded consumer groups from a single-partition topic
List<AutoCloseable> protocolConsumerGroupExecutors = new ArrayList<>();
try {
List<String> groups = new ArrayList<>();
for (List<String> describeType : DESCRIBE_TYPES) {
String group = GROUP_PREFIX + groupProtocol.name() + "." + String.join("", describeType);
groups.addAll(List.of("--group", group));
protocolConsumerGroupExecutors.add(consumerGroupClosable(groupProtocol, group, topic, Map.of()));
}
int expectedNumLines = DESCRIBE_TYPES.size() * 2;
for (List<String> describeType : DESCRIBE_TYPES) {
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe"));
cgcArgs.addAll(groups);
cgcArgs.addAll(describeType);
try (ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]))) {
TestUtils.waitForCondition(() -> {
Entry<String, String> res = ToolsTestUtils.grabConsoleOutputAndError(describeGroups(service));
long numLines = Arrays.stream(res.getKey().trim().split("\n")).filter(line -> !line.isEmpty()).count();
return (numLines == expectedNumLines) &&
res.getValue().isEmpty() &&
checkArgsHeaderOutput(cgcArgs, res.getKey().trim().split("\n")[0]);
}, "Expected a data row and no error in describe results with describe type " + String.join(" ", describeType) + ".");
}
}
} finally {
for (AutoCloseable protocolConsumerGroupExecutor : protocolConsumerGroupExecutors) {
protocolConsumerGroupExecutor.close();
}
}
}
}
@ClusterTest
public void testDescribeAllExistingGroups(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
createTopic(topic);
// Create N single-threaded consumer groups from a single-partition topic
List<AutoCloseable> protocolConsumerGroupExecutors = new ArrayList<>();
List<String> groups = new ArrayList<>();
try {
for (List<String> describeType : DESCRIBE_TYPES) {
String group = GROUP_PREFIX + groupProtocol.name() + "." + String.join("", describeType);
groups.add(group);
protocolConsumerGroupExecutors.add(consumerGroupClosable(groupProtocol, group, topic, Map.of()));
}
int expectedNumLines = DESCRIBE_TYPES.size() * 2;
for (List<String> describeType : DESCRIBE_TYPES) {
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--all-groups"));
cgcArgs.addAll(describeType);
try (ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]))) {
TestUtils.waitForCondition(() -> {
Entry<String, String> res = ToolsTestUtils.grabConsoleOutputAndError(describeGroups(service));
long numLines = Arrays.stream(res.getKey().trim().split("\n")).filter(line -> !line.isEmpty()).count();
return (numLines == expectedNumLines) &&
res.getValue().isEmpty() &&
checkArgsHeaderOutput(cgcArgs, res.getKey().trim().split("\n")[0]);
}, "Expected a data row and no error in describe results with describe type " + String.join(" ", describeType) + ".");
}
}
} finally {
for (AutoCloseable protocolConsumerGroupExecutor : protocolConsumerGroupExecutors) {
protocolConsumerGroupExecutor.close();
}
// remove previous consumer groups, so we can have a clean cluster for next consumer group protocol test.
deleteConsumerGroups(groups);
deleteTopic(topic);
}
}
}
@ClusterTest
public void testDescribeOffsetsOfExistingGroup(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic);
// run one consumer in the group consuming from a single-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
) {
TestUtils.waitForCondition(() -> {
Entry<Optional<GroupState>, Optional<Collection<PartitionAssignmentState>>> groupOffsets = service.collectGroupOffsets(group);
Optional<GroupState> state = groupOffsets.getKey();
Optional<Collection<PartitionAssignmentState>> assignments = groupOffsets.getValue();
Predicate<PartitionAssignmentState> isGrp = s -> Objects.equals(s.group(), group);
boolean res = state.map(s -> s.equals(GroupState.STABLE)).orElse(false) &&
assignments.isPresent() &&
assignments.get().stream().filter(isGrp).count() == 1;
if (!res)
return false;
Optional<PartitionAssignmentState> maybePartitionState = assignments.get().stream().filter(isGrp).findFirst();
if (maybePartitionState.isEmpty())
return false;
PartitionAssignmentState partitionState = maybePartitionState.get();
return !partitionState.consumerId().map(s0 -> s0.trim().equals(ConsumerGroupCommand.MISSING_COLUMN_VALUE)).orElse(false) &&
!partitionState.clientId().map(s0 -> s0.trim().equals(ConsumerGroupCommand.MISSING_COLUMN_VALUE)).orElse(false) &&
!partitionState.host().map(h -> h.trim().equals(ConsumerGroupCommand.MISSING_COLUMN_VALUE)).orElse(false);
}, "Expected a 'Stable' group status, rows and valid values for consumer id / client id / host columns in describe results for group " + group + ".");
}
}
}
@ClusterTest
public void testDescribeMembersOfExistingGroup(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic);
// run one consumer in the group consuming from a single-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group});
Admin admin = Admin.create(Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))
) {
TestUtils.waitForCondition(() -> {
ConsumerGroupDescription consumerGroupDescription = admin.describeConsumerGroups(Set.of(group)).describedGroups().get(group).get();
return consumerGroupDescription.members().size() == 1 && consumerGroupDescription.members().iterator().next().assignment().topicPartitions().size() == 1;
}, "Expected a 'Stable' group status, rows and valid member information for group " + group + ".");
Entry<Optional<GroupState>, Optional<Collection<MemberAssignmentState>>> res = service.collectGroupMembers(group);
assertTrue(res.getValue().isPresent());
assertTrue(res.getValue().get().size() == 1 && res.getValue().get().iterator().next().assignment().size() == 1,
"Expected a topic partition assigned to the single group member for group " + group);
}
}
}
@ClusterTest
public void testDescribeStateOfExistingGroup(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic);
// run one consumer in the group consuming from a single-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG, groupProtocol == GroupProtocol.CONSUMER ? "range" : ""));
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
) {
TestUtils.waitForCondition(() -> {
GroupInformation state = service.collectGroupState(group);
return Objects.equals(state.groupState(), GroupState.STABLE) &&
state.numMembers() == 1 &&
state.coordinator() != null &&
clusterInstance.brokerIds().contains(state.coordinator().id());
}, "Expected a 'Stable' group status, with one member for group " + group + ".");
}
}
}
@ClusterTest
public void testDescribeStateOfExistingGroupWithNonDefaultAssignor(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic);
// run one consumer in the group consuming from a single-partition topic
AutoCloseable protocolConsumerGroupExecutor = null;
try {
String expectedName;
if (groupProtocol.equals(GroupProtocol.CONSUMER)) {
protocolConsumerGroupExecutor = consumerGroupClosable(GroupProtocol.CONSUMER, group, topic, Map.of(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG, "range"));
expectedName = RangeAssignor.RANGE_ASSIGNOR_NAME;
} else {
protocolConsumerGroupExecutor = consumerGroupClosable(GroupProtocol.CLASSIC, group, topic, Map.of(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, RoundRobinAssignor.class.getName()));
expectedName = RoundRobinAssignor.ROUNDROBIN_ASSIGNOR_NAME;
}
try (ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})) {
TestUtils.waitForCondition(() -> {
GroupInformation state = service.collectGroupState(group);
return Objects.equals(state.groupState(), GroupState.STABLE) &&
state.numMembers() == 1 &&
Objects.equals(state.assignmentStrategy(), expectedName) &&
state.coordinator() != null &&
clusterInstance.brokerIds().contains(state.coordinator().id());
}, "Expected a 'Stable' group status, with one member and " + expectedName + " assignment strategy for group " + group + ".");
}
} finally {
if (protocolConsumerGroupExecutor != null) {
protocolConsumerGroupExecutor.close();
}
}
}
}
@ClusterTest
public void testDescribeExistingGroupWithNoMembers(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
createTopic(topic);
for (List<String> describeType : DESCRIBE_TYPES) {
String group = GROUP_PREFIX + groupProtocol.name() + String.join("", describeType);
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group));
cgcArgs.addAll(describeType);
// run one consumer in the group consuming from a single-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]))
) {
TestUtils.waitForCondition(() -> {
Entry<String, String> res = ToolsTestUtils.grabConsoleOutputAndError(describeGroups(service));
return res.getKey().trim().split("\n").length == 2 &&
res.getValue().isEmpty() &&
checkArgsHeaderOutput(cgcArgs, res.getKey().trim().split("\n")[0]);
}, "Expected describe group results with one data row for describe type '" + String.join(" ", describeType) + "'");
protocolConsumerGroupExecutor.close();
TestUtils.waitForCondition(
() -> ToolsTestUtils.grabConsoleError(describeGroups(service)).contains("Consumer group '" + group + "' has no active members."),
"Expected no active member in describe group results with describe type " + String.join(" ", describeType));
}
}
}
}
@ClusterTest
public void testDescribeOffsetsOfExistingGroupWithNoMembers(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic);
// run one consumer in the group consuming from a single-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
) {
TestUtils.waitForCondition(() -> {
Entry<Optional<GroupState>, Optional<Collection<PartitionAssignmentState>>> res = service.collectGroupOffsets(group);
return res.getKey().map(s -> s.equals(GroupState.STABLE)).orElse(false)
&& res.getValue().map(c -> c.stream().anyMatch(assignment -> Objects.equals(assignment.group(), group) && assignment.offset().isPresent())).orElse(false);
}, "Expected the group to initially become stable, and to find group in assignments after initial offset commit.");
// stop the consumer so the group has no active member anymore
protocolConsumerGroupExecutor.close();
TestUtils.waitForCondition(() -> {
Entry<Optional<GroupState>, Optional<Collection<PartitionAssignmentState>>> offsets = service.collectGroupOffsets(group);
Optional<GroupState> state = offsets.getKey();
Optional<Collection<PartitionAssignmentState>> assignments = offsets.getValue();
List<PartitionAssignmentState> testGroupAssignments = assignments.get().stream().filter(a -> Objects.equals(a.group(), group)).toList();
PartitionAssignmentState assignment = testGroupAssignments.get(0);
return state.map(s -> s.equals(GroupState.EMPTY)).orElse(false) &&
testGroupAssignments.size() == 1 &&
assignment.consumerId().map(c -> c.trim().equals(ConsumerGroupCommand.MISSING_COLUMN_VALUE)).orElse(false) && // the member should be gone
assignment.clientId().map(c -> c.trim().equals(ConsumerGroupCommand.MISSING_COLUMN_VALUE)).orElse(false) &&
assignment.host().map(c -> c.trim().equals(ConsumerGroupCommand.MISSING_COLUMN_VALUE)).orElse(false);
}, "failed to collect group offsets");
}
}
}
@ClusterTest
public void testDescribeMembersOfExistingGroupWithNoMembers(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic);
// run one consumer in the group consuming from a single-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
) {
TestUtils.waitForCondition(() -> {
Entry<Optional<GroupState>, Optional<Collection<MemberAssignmentState>>> res = service.collectGroupMembers(group);
return res.getKey().map(s -> s.equals(GroupState.STABLE)).orElse(false)
&& res.getValue().map(c -> c.stream().anyMatch(m -> Objects.equals(m.group(), group))).orElse(false);
}, "Expected the group to initially become stable, and to find group in assignments after initial offset commit.");
// stop the consumer so the group has no active member anymore
protocolConsumerGroupExecutor.close();
TestUtils.waitForCondition(() -> {
Entry<Optional<GroupState>, Optional<Collection<MemberAssignmentState>>> res = service.collectGroupMembers(group);
return res.getKey().map(s -> s.equals(GroupState.EMPTY)).orElse(false) && res.getValue().isPresent() && res.getValue().get().isEmpty();
}, "Expected no member in describe group members results for group '" + group + "'");
}
}
}
@ClusterTest
public void testDescribeStateOfExistingGroupWithNoMembers(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic);
// run one consumer in the group consuming from a single-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
) {
TestUtils.waitForCondition(() -> {
GroupInformation state = service.collectGroupState(group);
return Objects.equals(state.groupState(), GroupState.STABLE) &&
state.numMembers() == 1 &&
state.coordinator() != null &&
clusterInstance.brokerIds().contains(state.coordinator().id());
}, "Expected the group to initially become stable, and have a single member.");
// stop the consumer so the group has no active member anymore
protocolConsumerGroupExecutor.close();
TestUtils.waitForCondition(() -> {
GroupInformation state = service.collectGroupState(group);
return Objects.equals(state.groupState(), GroupState.EMPTY) && state.numMembers() == 0;
}, "Expected the group to become empty after the only member leaving.");
}
}
}
@ClusterTest
public void testDescribeWithConsumersWithoutAssignedPartitions(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
createTopic(topic);
for (List<String> describeType : DESCRIBE_TYPES) {
String group = GROUP_PREFIX + groupProtocol.name() + String.join("", describeType);
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group));
cgcArgs.addAll(describeType);
// run two consumers in the group consuming from a single-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(), 2);
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]))
) {
TestUtils.waitForCondition(() -> {
Entry<String, String> res = ToolsTestUtils.grabConsoleOutputAndError(describeGroups(service));
int expectedNumRows = DESCRIBE_TYPE_MEMBERS.contains(describeType) ? 3 : 2;
return res.getValue().isEmpty() &&
res.getKey().trim().split("\n").length == expectedNumRows &&
checkArgsHeaderOutput(cgcArgs, res.getKey().trim().split("\n")[0]);
}, "Expected a single data row in describe group result with describe type '" + String.join(" ", describeType) + "'");
}
}
}
}
@ClusterTest
public void testDescribeOffsetsWithConsumersWithoutAssignedPartitions(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic);
// run two consumers in the group consuming from a single-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(), 2);
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
) {
TestUtils.waitForCondition(() -> {
Entry<Optional<GroupState>, Optional<Collection<PartitionAssignmentState>>> res = service.collectGroupOffsets(group);
return res.getKey().map(s -> s.equals(GroupState.STABLE)).isPresent() &&
res.getValue().isPresent() &&
res.getValue().get().stream().filter(s -> Objects.equals(s.group(), group)).count() == 1 &&
res.getValue().get().stream().filter(x -> Objects.equals(x.group(), group) && x.partition().isPresent()).count() == 1;
}, "Expected rows for consumers with no assigned partitions in describe group results");
}
}
}
@ClusterTest
public void testDescribeMembersWithConsumersWithoutAssignedPartitions(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic);
// run two consumers in the group consuming from a single-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(), 2);
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
) {
TestUtils.waitForCondition(() -> {
Entry<Optional<GroupState>, Optional<Collection<MemberAssignmentState>>> res = service.collectGroupMembers(group);
return res.getKey().map(s -> s.equals(GroupState.STABLE)).orElse(false) &&
res.getValue().isPresent() &&
res.getValue().get().stream().filter(s -> Objects.equals(s.group(), group)).count() == 2 &&
res.getValue().get().stream().filter(x -> Objects.equals(x.group(), group) && x.numPartitions() == 1).count() == 1 &&
res.getValue().get().stream().filter(x -> Objects.equals(x.group(), group) && x.numPartitions() == 0).count() == 1 &&
res.getValue().get().stream().anyMatch(s -> !s.assignment().isEmpty());
}, "Expected rows for consumers with no assigned partitions in describe group results");
Entry<Optional<GroupState>, Optional<Collection<MemberAssignmentState>>> res = service.collectGroupMembers(group);
assertTrue(res.getKey().map(s -> s.equals(GroupState.STABLE)).orElse(false)
&& res.getValue().map(c -> c.stream().anyMatch(s -> !s.assignment().isEmpty())).orElse(false),
"Expected additional columns in verbose version of describe members");
}
}
}
@ClusterTest
public void testDescribeStateWithConsumersWithoutAssignedPartitions(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic);
// run two consumers in the group consuming from a single-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(), 2);
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
) {
TestUtils.waitForCondition(() -> {
GroupInformation state = service.collectGroupState(group);
return Objects.equals(state.groupState(), GroupState.STABLE) && state.numMembers() == 2;
}, "Expected two consumers in describe group results");
}
}
}
@ClusterTest
public void testDescribeWithMultiPartitionTopicAndMultipleConsumers(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
createTopic(topic, 2);
for (List<String> describeType : DESCRIBE_TYPES) {
String group = GROUP_PREFIX + groupProtocol.name() + String.join("", describeType);
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group));
cgcArgs.addAll(describeType);
// run two consumers in the group consuming from a two-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(), 2);
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]))
) {
TestUtils.waitForCondition(() -> {
Entry<String, String> res = ToolsTestUtils.grabConsoleOutputAndError(describeGroups(service));
int expectedNumRows = DESCRIBE_TYPE_STATE.contains(describeType) ? 2 : 3;
return res.getValue().isEmpty() &&
res.getKey().trim().split("\n").length == expectedNumRows &&
checkArgsHeaderOutput(cgcArgs, res.getKey().trim().split("\n")[0]);
}, "Expected a single data row in describe group result with describe type '" + String.join(" ", describeType) + "'");
}
}
}
}
@ClusterTest
public void testDescribeOffsetsWithMultiPartitionTopicAndMultipleConsumers(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic, 2);
// run two consumers in the group consuming from a two-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(), 2);
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
) {
TestUtils.waitForCondition(() -> {
Entry<Optional<GroupState>, Optional<Collection<PartitionAssignmentState>>> res = service.collectGroupOffsets(group);
return res.getKey().map(s -> s.equals(GroupState.STABLE)).orElse(false) &&
res.getValue().isPresent() &&
res.getValue().get().stream().filter(s -> Objects.equals(s.group(), group)).count() == 2 &&
res.getValue().get().stream().filter(x -> Objects.equals(x.group(), group) && x.partition().isPresent()).count() == 2 &&
res.getValue().get().stream().noneMatch(x -> Objects.equals(x.group(), group) && x.partition().isEmpty());
}, "Expected two rows (one row per consumer) in describe group results.");
}
}
}
@ClusterTest
public void testDescribeMembersWithMultiPartitionTopicAndMultipleConsumers(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic, 2);
// run two consumers in the group consuming from a two-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(), 2);
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
) {
TestUtils.waitForCondition(() -> {
Entry<Optional<GroupState>, Optional<Collection<MemberAssignmentState>>> res = service.collectGroupMembers(group);
return res.getKey().map(s -> s.equals(GroupState.STABLE)).orElse(false) &&
res.getValue().isPresent() &&
res.getValue().get().stream().filter(s -> Objects.equals(s.group(), group)).count() == 2 &&
res.getValue().get().stream().filter(x -> Objects.equals(x.group(), group) && x.numPartitions() == 1).count() == 2 &&
res.getValue().get().stream().noneMatch(x -> Objects.equals(x.group(), group) && x.numPartitions() == 0);
}, "Expected two rows (one row per consumer) in describe group members results.");
Entry<Optional<GroupState>, Optional<Collection<MemberAssignmentState>>> res = service.collectGroupMembers(group);
assertTrue(res.getKey().map(s -> s.equals(GroupState.STABLE)).orElse(false) && res.getValue().map(s -> s.stream().filter(x -> x.assignment().isEmpty()).count()).orElse(0L) == 0,
"Expected additional columns in verbose version of describe members");
}
}
}
@ClusterTest
public void testDescribeStateWithMultiPartitionTopicAndMultipleConsumers(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic, 2);
// run two consumers in the group consuming from a two-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(), 2);
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
) {
TestUtils.waitForCondition(() -> {
GroupInformation state = service.collectGroupState(group);
return Objects.equals(state.groupState(), GroupState.STABLE) && Objects.equals(state.group(), group) && state.numMembers() == 2;
}, "Expected a stable group with two members in describe group state result.");
}
}
}
@ClusterTest
public void testDescribeSimpleConsumerGroup(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
// Ensure that the offsets of consumers which don't use group management are still displayed
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic, 2);
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(GroupProtocol.CLASSIC, group, Set.of(new TopicPartition(topic, 0), new TopicPartition(topic, 1)), Map.of());
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
) {
TestUtils.waitForCondition(() -> {
Entry<Optional<GroupState>, Optional<Collection<PartitionAssignmentState>>> res = service.collectGroupOffsets(group);
return res.getKey().map(s -> s.equals(GroupState.EMPTY)).orElse(false)
&& res.getValue().isPresent() && res.getValue().get().stream().filter(s -> Objects.equals(s.group(), group)).count() == 2;
}, "Expected a stable group with two members in describe group state result.");
}
}
}
@ClusterTest
public void testDescribeGroupWithShortInitializationTimeout(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
createTopic(topic);
// Let creation of the offsets topic happen during group initialization to ensure that initialization doesn't
// complete before the timeout expires
List<String> describeType = DESCRIBE_TYPES.get(RANDOM.nextInt(DESCRIBE_TYPES.size()));
String group = GROUP_PREFIX + groupProtocol.name() + String.join("", describeType);
// set the group initialization timeout too low for the group to stabilize
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--timeout", "1", "--group", group));
cgcArgs.addAll(describeType);
// run one consumer in the group consuming from a single-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]))
) {
ExecutionException e = assertThrows(ExecutionException.class, service::describeGroups);
assertInstanceOf(TimeoutException.class, e.getCause());
}
}
}
@ClusterTest
public void testDescribeGroupOffsetsWithShortInitializationTimeout(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic);
// Let creation of the offsets topic happen during group initialization to ensure that initialization doesn't
// complete before the timeout expires
// run one consumer in the group consuming from a single-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
// set the group initialization timeout too low for the group to stabilize
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group, "--timeout", "1"})
) {
Throwable e = assertThrows(ExecutionException.class, () -> service.collectGroupOffsets(group));
assertEquals(TimeoutException.class, e.getCause().getClass());
}
}
}
@ClusterTest
public void testDescribeGroupMembersWithShortInitializationTimeout(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic);
// Let creation of the offsets topic happen during group initialization to ensure that initialization doesn't
// complete before the timeout expires
// run one consumer in the group consuming from a single-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
// set the group initialization timeout too low for the group to stabilize
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group, "--timeout", "1"})
) {
Throwable e = assertThrows(ExecutionException.class, () -> service.collectGroupMembers(group));
assertEquals(TimeoutException.class, e.getCause().getClass());
e = assertThrows(ExecutionException.class, () -> service.collectGroupMembers(group));
assertEquals(TimeoutException.class, e.getCause().getClass());
}
}
}
@ClusterTest
public void testDescribeGroupStateWithShortInitializationTimeout(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic);
// Let creation of the offsets topic happen during group initialization to ensure that initialization doesn't
// complete before the timeout expires
// run one consumer in the group consuming from a single-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
// set the group initialization timeout too low for the group to stabilize
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group, "--timeout", "1"})
) {
Throwable e = assertThrows(ExecutionException.class, () -> service.collectGroupState(group));
assertEquals(TimeoutException.class, e.getCause().getClass());
}
}
}
@ClusterTest
public void testDescribeNonOffsetCommitGroup(ClusterInstance clusterInstance) throws Exception {
this.clusterInstance = clusterInstance;
for (GroupProtocol groupProtocol: clusterInstance.supportedGroupProtocols()) {
String topic = TOPIC_PREFIX + groupProtocol.name();
String group = GROUP_PREFIX + groupProtocol.name();
createTopic(topic);
// run one consumer in the group consuming from a single-partition topic
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"));
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
) {
TestUtils.waitForCondition(() -> {
Entry<Optional<GroupState>, Optional<Collection<PartitionAssignmentState>>> groupOffsets = service.collectGroupOffsets(group);
Predicate<PartitionAssignmentState> isGrp = s -> Objects.equals(s.group(), group);
boolean res = groupOffsets.getKey().map(s -> s.equals(GroupState.STABLE)).orElse(false) &&
groupOffsets.getValue().isPresent() &&
groupOffsets.getValue().get().stream().filter(isGrp).count() == 1;
if (!res)
return false;
Optional<PartitionAssignmentState> maybeAssignmentState = groupOffsets.getValue().get().stream().filter(isGrp).findFirst();
if (maybeAssignmentState.isEmpty())
return false;
PartitionAssignmentState assignmentState = maybeAssignmentState.get();
return assignmentState.consumerId().map(c -> !c.trim().equals(ConsumerGroupCommand.MISSING_COLUMN_VALUE)).orElse(false) &&
assignmentState.clientId().map(c -> !c.trim().equals(ConsumerGroupCommand.MISSING_COLUMN_VALUE)).orElse(false) &&
assignmentState.host().map(h -> !h.trim().equals(ConsumerGroupCommand.MISSING_COLUMN_VALUE)).orElse(false);
}, "Expected a 'Stable' group status, rows and valid values for consumer id / client id / host columns in describe results for non-offset-committing group " + group + ".");
}
}
}
/**
* The config `OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG` needs to be set to a value greater than 1 to ensure the
* normal invocation of APIs such as `FIND_COORDINATOR` when a broker has shutdown
*/
@Timeout(60)
@ClusterTest(brokers = 3, serverProperties = {@ClusterConfigProperty(key = OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "2")})
public void testDescribeConsumerGroupWithoutLeaders(ClusterInstance clusterInstance) throws Exception {
int brokerNum = 3;
this.clusterInstance = clusterInstance;
// define topic and group, then send 5 records to each partition
String topic = TOPIC_PREFIX + UUID.randomUUID();
String group = GROUP_PREFIX + UUID.randomUUID();
clusterInstance.createTopic(topic, brokerNum, (short) 1);
for (int i = 0; i < brokerNum; i++) {
sendRecords(topic, i, 5);
}
// append the command
List<String> cgcArgs = List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group, "--all-topics");
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(GroupProtocol.CLASSIC, group, topic, Map.of());
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]));
Admin admin = clusterInstance.admin()
) {
// shutdown the target broker
int noneLeaderPartition = 2;
int shutdownBrokerId = clusterInstance.getLeaderBrokerId(new TopicPartition(topic, noneLeaderPartition));
clusterInstance.shutdownBroker(shutdownBrokerId);
TestUtils.waitForCondition(() -> {
Entry<String, String> res = ToolsTestUtils.grabConsoleOutputAndError(describeGroups(service));
String[] lines = res.getKey().trim().split("\n");
if (lines.length != 4 || !res.getValue().isEmpty()) {
return false;
}
// get the client data, such as `consumerId,host,clientId`, to append the expected output
ConsumerGroupDescription consumerGroupDescription = admin.describeConsumerGroups(Set.of(group)).describedGroups().get(group).get();
MemberDescription memberDescription = consumerGroupDescription.members().iterator().next();
String consumerId = memberDescription.consumerId();
String host = memberDescription.host();
String clientId = memberDescription.clientId();
// the expected output
List<String> partition0content = List.of(group, topic, "0", "5", "5", "0", consumerId, host, clientId);
List<String> partition1content = List.of(group, topic, "1", "5", "5", "0", consumerId, host, clientId);
List<String> partition2content = List.of(group, topic, "2", "-", "-", "-", consumerId, host, clientId);
return checkArgsHeaderOutput(cgcArgs, lines[0])
&& Arrays.stream(lines[1].trim().split("\\s+")).toList().equals(partition0content)
&& Arrays.stream(lines[2].trim().split("\\s+")).toList().equals(partition1content)
&& Arrays.stream(lines[3].trim().split("\\s+")).toList().equals(partition2content);
}, "Expected 3 data rows excluding the header and no error in describe groups when a broker shutdown.");
}
}
@Test
public void testDescribeWithUnrecognizedNewConsumerOption() {
String group = GROUP_PREFIX + "unrecognized";
String[] cgcArgs = new String[]{"--new-consumer", "--bootstrap-server", "localhost:9092", "--describe", "--group", group};
assertThrows(joptsimple.OptionException.class, () -> ConsumerGroupCommandOptions.fromArgs(cgcArgs));
}
@Test
public void testDescribeWithMultipleSubActions() {
String group = GROUP_PREFIX + "multiple.sub.actions";
AtomicInteger exitStatus = new AtomicInteger(0);
AtomicReference<String> exitMessage = new AtomicReference<>("");
Exit.setExitProcedure((status, err) -> {
exitStatus.set(status);
exitMessage.set(err);
throw new RuntimeException();
});
String[] cgcArgs = new String[]{"--bootstrap-server", "localhost:9092", "--describe", "--group", group, "--members", "--state"};
try {
assertThrows(RuntimeException.class, () -> ConsumerGroupCommand.main(cgcArgs));
} finally {
Exit.resetExitProcedure();
}
assertEquals(1, exitStatus.get());
assertTrue(exitMessage.get().contains("Option [describe] takes at most one of these options"));
}
@Test
public void testDescribeWithStateValue() {
AtomicInteger exitStatus = new AtomicInteger(0);
AtomicReference<String> exitMessage = new AtomicReference<>("");
Exit.setExitProcedure((status, err) -> {
exitStatus.set(status);
exitMessage.set(err);
throw new RuntimeException();
});
String[] cgcArgs = new String[]{"--bootstrap-server", "localhost:9092", "--describe", "--all-groups", "--state", "Stable"};
try {
assertThrows(RuntimeException.class, () -> ConsumerGroupCommand.main(cgcArgs));
} finally {
Exit.resetExitProcedure();
}
assertEquals(1, exitStatus.get());
assertTrue(exitMessage.get().contains("Option [describe] does not take a value for [state]"));
}
@Test
public void testPrintVersion() {
ToolsTestUtils.MockExitProcedure exitProcedure = new ToolsTestUtils.MockExitProcedure();
Exit.setExitProcedure(exitProcedure);
try {
String out = ToolsTestUtils.captureStandardOut(() -> ConsumerGroupCommandOptions.fromArgs(new String[]{"--version"}));
assertEquals(0, exitProcedure.statusCode());
assertEquals(AppInfoParser.getVersion(), out);
} finally {
Exit.resetExitProcedure();
}
}
private static ConsumerGroupCommand.ConsumerGroupService consumerGroupService(String[] args) {
return new ConsumerGroupCommand.ConsumerGroupService(
ConsumerGroupCommandOptions.fromArgs(args),
Map.of(AdminClientConfig.RETRIES_CONFIG, Integer.toString(Integer.MAX_VALUE))
);
}
private void createTopic(String topic) {
createTopic(topic, 1);
}
private void createTopic(String topic, int numPartitions) {
try (Admin admin = Admin.create(Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
Assertions.assertDoesNotThrow(() -> admin.createTopics(List.of(new NewTopic(topic, numPartitions, (short) 1))).topicId(topic).get());
}
}
private void deleteConsumerGroups(Collection<String> groupIds) {
try (Admin admin = Admin.create(Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
Assertions.assertDoesNotThrow(() -> admin.deleteConsumerGroups(groupIds).all().get());
}
}
private void deleteTopic(String topic) {
try (Admin admin = Admin.create(Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
Assertions.assertDoesNotThrow(() -> admin.deleteTopics(List.of(topic)).topicNameValues().get(topic).get());
}
}
private AutoCloseable consumerGroupClosable(GroupProtocol protocol, String groupId, Set<TopicPartition> topicPartitions, Map<String, Object> customConfigs) {
Map<String, Object> configs = composeConfigs(
groupId,
protocol.name,
customConfigs
);
return ConsumerGroupCommandTestUtils.buildConsumers(
1,
topicPartitions,
() -> new KafkaConsumer<String, String>(configs)
);
}
private AutoCloseable consumerGroupClosable(GroupProtocol protocol, String groupId, String topicName, Map<String, Object> customConfigs) {
return consumerGroupClosable(protocol, groupId, topicName, customConfigs, 1);
}
private AutoCloseable consumerGroupClosable(GroupProtocol protocol, String groupId, String topicName, Map<String, Object> customConfigs, int numConsumers) {
return consumerGroupClosable(protocol, groupId, Set.of(topicName), customConfigs, numConsumers);
}
private AutoCloseable consumerGroupClosable(
GroupProtocol protocol,
String groupId,
Set<String> topicNames,
Map<String, Object> customConfigs,
int numConsumers
) {
Map<String, Object> configs = composeConfigs(
groupId,
protocol.name,
customConfigs
);
return ConsumerGroupCommandTestUtils.buildConsumers(
numConsumers,
true,
() -> new KafkaConsumer<String, String>(configs),
consumer -> consumer.subscribe(topicNames)
);
}
private Map<String, Object> composeConfigs(String groupId, String groupProtocol, Map<String, Object> customConfigs) {
Map<String, Object> configs = new HashMap<>();
configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers());
configs.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
configs.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol);
configs.putAll(customConfigs);
return configs;
}
private Runnable describeGroups(ConsumerGroupCommand.ConsumerGroupService service) {
return () -> Assertions.assertDoesNotThrow(service::describeGroups);
}
private boolean checkArgsHeaderOutput(List<String> args, String output) {
if (!output.contains("GROUP")) {
return false;
}
if (args.contains("--members")) {
return checkMembersArgsHeaderOutput(output, args.contains("--verbose"));
}
if (args.contains("--state")) {
return checkStateArgsHeaderOutput(output, args.contains("--verbose"));
}
// --offsets or no arguments
return checkOffsetsArgsHeaderOutput(output, args.contains("--verbose"));
}
private boolean checkOffsetsArgsHeaderOutput(String output, boolean verbose) {
List<String> expectedKeys = verbose ?
List.of("GROUP", "TOPIC", "PARTITION", "LEADER-EPOCH", "CURRENT-OFFSET", "LOG-END-OFFSET", "LAG", "CONSUMER-ID", "HOST", "CLIENT-ID") :
List.of("GROUP", "TOPIC", "PARTITION", "CURRENT-OFFSET", "LOG-END-OFFSET", "LAG", "CONSUMER-ID", "HOST", "CLIENT-ID");
return Arrays.stream(output.trim().split("\\s+")).toList().equals(expectedKeys);
}
private boolean checkStateArgsOutput(String output) {
return output.contains("COORDINATOR (ID)") && output.contains("ASSIGNMENT-STRATEGY") && output.contains("STATE") && output.contains("#MEMBERS");
}
private boolean checkMembersArgsHeaderOutput(String output, boolean verbose) {
List<String> expectedKeys = verbose ?
List.of("GROUP", "CONSUMER-ID", "HOST", "CLIENT-ID", "#PARTITIONS", "CURRENT-EPOCH", "CURRENT-ASSIGNMENT", "TARGET-EPOCH", "TARGET-ASSIGNMENT") :
List.of("GROUP", "CONSUMER-ID", "HOST", "CLIENT-ID", "#PARTITIONS");
return Arrays.stream(output.trim().split("\\s+")).toList().equals(expectedKeys);
}
private boolean checkStateArgsHeaderOutput(String output, boolean verbose) {
List<String> expectedKeys = verbose ?
List.of("GROUP", "COORDINATOR", "(ID)", "ASSIGNMENT-STRATEGY", "STATE", "GROUP-EPOCH", "TARGET-ASSIGNMENT-EPOCH", "#MEMBERS") :
List.of("GROUP", "COORDINATOR", "(ID)", "ASSIGNMENT-STRATEGY", "STATE", "#MEMBERS");
return Arrays.stream(output.trim().split("\\s+")).toList().equals(expectedKeys);
}
private void sendRecords(String topic, int partition, int recordsCount) {
try (KafkaProducer<String, String> producer = new KafkaProducer<>(Map.of(
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers(),
ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName(),
ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()
))) {
IntStream.range(0, recordsCount).forEach(i ->
producer.send(new org.apache.kafka.clients.producer.ProducerRecord<>(topic, partition, Integer.toString(i), Integer.toString(i))));
producer.flush();
}
}
}
| DescribeConsumerGroupTest |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/context/annotation/Bean.java | {
"start": 5642,
"end": 6523
} | class ____ {
*
* @Bean
* public FooService fooService() {
* return new FooService(fooRepository());
* }
*
* @Bean
* public FooRepository fooRepository() {
* return new JdbcFooRepository(dataSource());
* }
*
* // ...
* }</pre>
*
* <h3>{@code @Bean} <em>Lite</em> Mode</h3>
*
* <p>{@code @Bean} methods may also be declared within classes that are <em>not</em>
* annotated with {@code @Configuration}. If a bean method is declared on a bean
* that is <em>not</em> annotated with {@code @Configuration} it is processed in a
* so-called <em>'lite'</em> mode.
*
* <p>Bean methods in <em>lite</em> mode will be treated as plain <em>factory
* methods</em> by the container (similar to {@code factory-method} declarations
* in XML), with scoping and lifecycle callbacks properly applied. The containing
* | AppConfig |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcher.java | {
"start": 3039,
"end": 21995
} | class ____ {
private final Logger log;
private final ConsumerMetadata metadata;
private final SubscriptionState subscriptions;
private final ConsumerNetworkClient client;
private final Time time;
private final int requestTimeoutMs;
private final IsolationLevel isolationLevel;
private final OffsetsForLeaderEpochClient offsetsForLeaderEpochClient;
private final ApiVersions apiVersions;
private final OffsetFetcherUtils offsetFetcherUtils;
public OffsetFetcher(LogContext logContext,
ConsumerNetworkClient client,
ConsumerMetadata metadata,
SubscriptionState subscriptions,
Time time,
long retryBackoffMs,
int requestTimeoutMs,
IsolationLevel isolationLevel,
ApiVersions apiVersions) {
this.log = logContext.logger(getClass());
this.time = time;
this.client = client;
this.metadata = metadata;
this.subscriptions = subscriptions;
this.requestTimeoutMs = requestTimeoutMs;
this.isolationLevel = isolationLevel;
this.apiVersions = apiVersions;
this.offsetsForLeaderEpochClient = new OffsetsForLeaderEpochClient(client, logContext);
this.offsetFetcherUtils = new OffsetFetcherUtils(logContext, metadata, subscriptions,
time, retryBackoffMs, apiVersions);
}
/**
* Reset offsets for all assigned partitions that require it.
*
* @throws org.apache.kafka.clients.consumer.NoOffsetForPartitionException If no offset reset strategy is defined
* and one or more partitions aren't awaiting a seekToBeginning() or seekToEnd().
*/
public void resetPositionsIfNeeded() {
Map<TopicPartition, AutoOffsetResetStrategy> partitionAutoOffsetResetStrategyMap =
offsetFetcherUtils.getOffsetResetStrategyForPartitions();
if (partitionAutoOffsetResetStrategyMap.isEmpty())
return;
resetPositionsAsync(partitionAutoOffsetResetStrategyMap);
}
/**
* Validate offsets for all assigned partitions for which a leader change has been detected.
*/
public void validatePositionsIfNeeded() {
Map<TopicPartition, SubscriptionState.FetchPosition> partitionsToValidate =
offsetFetcherUtils.refreshAndGetPartitionsToValidate();
validatePositionsAsync(partitionsToValidate);
}
public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch,
Timer timer) {
metadata.addTransientTopics(topicsForPartitions(timestampsToSearch.keySet()));
try {
Map<TopicPartition, ListOffsetData> fetchedOffsets = fetchOffsetsByTimes(timestampsToSearch,
timer, true).fetchedOffsets;
return buildOffsetsForTimesResult(timestampsToSearch, fetchedOffsets);
} finally {
metadata.clearTransientTopics();
}
}
private ListOffsetResult fetchOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch,
Timer timer,
boolean requireTimestamps) {
ListOffsetResult result = new ListOffsetResult();
if (timestampsToSearch.isEmpty())
return result;
Map<TopicPartition, Long> remainingToSearch = new HashMap<>(timestampsToSearch);
do {
RequestFuture<ListOffsetResult> future = sendListOffsetsRequests(remainingToSearch, requireTimestamps);
future.addListener(new RequestFutureListener<>() {
@Override
public void onSuccess(ListOffsetResult value) {
synchronized (future) {
result.fetchedOffsets.putAll(value.fetchedOffsets);
remainingToSearch.keySet().retainAll(value.partitionsToRetry);
offsetFetcherUtils.updateSubscriptionState(value.fetchedOffsets, isolationLevel);
}
}
@Override
public void onFailure(RuntimeException e) {
if (!(e instanceof RetriableException)) {
throw future.exception();
}
}
});
// if timeout is set to zero, do not try to poll the network client at all
// and return empty immediately; otherwise try to get the results synchronously
// and throw timeout exception if it cannot complete in time
if (timer.timeoutMs() == 0L)
return result;
client.poll(future, timer);
if (!future.isDone()) {
break;
} else if (remainingToSearch.isEmpty()) {
return result;
} else {
client.awaitMetadataUpdate(timer);
}
} while (timer.notExpired());
throw new TimeoutException("Failed to get offsets by times in " + timer.elapsedMs() + "ms");
}
public Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, Timer timer) {
return beginningOrEndOffset(partitions, ListOffsetsRequest.EARLIEST_TIMESTAMP, timer);
}
public Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, Timer timer) {
return beginningOrEndOffset(partitions, ListOffsetsRequest.LATEST_TIMESTAMP, timer);
}
private Map<TopicPartition, Long> beginningOrEndOffset(Collection<TopicPartition> partitions,
long timestamp,
Timer timer) {
metadata.addTransientTopics(topicsForPartitions(partitions));
try {
Map<TopicPartition, Long> timestampsToSearch = partitions.stream()
.distinct()
.collect(Collectors.toMap(Function.identity(), tp -> timestamp));
ListOffsetResult result = fetchOffsetsByTimes(timestampsToSearch, timer, false);
return result.fetchedOffsets.entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().offset));
} finally {
metadata.clearTransientTopics();
}
}
private void resetPositionsAsync(Map<TopicPartition, AutoOffsetResetStrategy> partitionAutoOffsetResetStrategyMap) {
Map<TopicPartition, Long> partitionResetTimestamps = partitionAutoOffsetResetStrategyMap.entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().timestamp().get()));
Map<Node, Map<TopicPartition, ListOffsetsPartition>> timestampsToSearchByNode =
groupListOffsetRequests(partitionResetTimestamps, new HashSet<>());
for (Map.Entry<Node, Map<TopicPartition, ListOffsetsPartition>> entry : timestampsToSearchByNode.entrySet()) {
Node node = entry.getKey();
final Map<TopicPartition, ListOffsetsPartition> resetTimestamps = entry.getValue();
subscriptions.setNextAllowedRetry(resetTimestamps.keySet(), time.milliseconds() + requestTimeoutMs);
RequestFuture<ListOffsetResult> future = sendListOffsetRequest(node, resetTimestamps, false);
future.addListener(new RequestFutureListener<>() {
@Override
public void onSuccess(ListOffsetResult result) {
offsetFetcherUtils.onSuccessfulResponseForResettingPositions(result, partitionAutoOffsetResetStrategyMap);
}
@Override
public void onFailure(RuntimeException e) {
offsetFetcherUtils.onFailedResponseForResettingPositions(resetTimestamps, e);
}
});
}
}
/**
* For each partition which needs validation, make an asynchronous request to get the end-offsets for the partition
* with the epoch less than or equal to the epoch the partition last saw.
*
* <p/>
*
* Requests are grouped by Node for efficiency.
*/
private void validatePositionsAsync(Map<TopicPartition, FetchPosition> partitionsToValidate) {
final Map<Node, Map<TopicPartition, FetchPosition>> regrouped = regroupFetchPositionsByLeader(partitionsToValidate);
long nextResetTimeMs = time.milliseconds() + requestTimeoutMs;
regrouped.forEach((node, fetchPositions) -> {
if (node.isEmpty()) {
metadata.requestUpdate(true);
return;
}
NodeApiVersions nodeApiVersions = apiVersions.get(node.idString());
if (nodeApiVersions == null) {
client.tryConnect(node);
return;
}
if (!hasUsableOffsetForLeaderEpochVersion(nodeApiVersions)) {
log.debug("Skipping validation of fetch offsets for partitions {} since the broker does not " +
"support the required protocol version (introduced in Kafka 2.3)",
fetchPositions.keySet());
for (TopicPartition partition : fetchPositions.keySet()) {
subscriptions.completeValidation(partition);
}
return;
}
subscriptions.setNextAllowedRetry(fetchPositions.keySet(), nextResetTimeMs);
RequestFuture<OffsetForEpochResult> future =
offsetsForLeaderEpochClient.sendAsyncRequest(node, fetchPositions);
future.addListener(new RequestFutureListener<>() {
@Override
public void onSuccess(OffsetForEpochResult offsetsResult) {
offsetFetcherUtils.onSuccessfulResponseForValidatingPositions(fetchPositions,
offsetsResult);
}
@Override
public void onFailure(RuntimeException e) {
offsetFetcherUtils.onFailedResponseForValidatingPositions(fetchPositions, e);
}
});
});
}
/**
* Search the offsets by target times for the specified partitions.
*
* @param timestampsToSearch the mapping between partitions and target time
* @param requireTimestamps true if we should fail with an UnsupportedVersionException if the broker does
* not support fetching precise timestamps for offsets
* @return A response which can be polled to obtain the corresponding timestamps and offsets.
*/
private RequestFuture<ListOffsetResult> sendListOffsetsRequests(final Map<TopicPartition, Long> timestampsToSearch,
final boolean requireTimestamps) {
final Set<TopicPartition> partitionsToRetry = new HashSet<>();
Map<Node, Map<TopicPartition, ListOffsetsPartition>> timestampsToSearchByNode =
groupListOffsetRequests(timestampsToSearch, partitionsToRetry);
if (timestampsToSearchByNode.isEmpty())
return RequestFuture.failure(new StaleMetadataException());
final RequestFuture<ListOffsetResult> listOffsetRequestsFuture = new RequestFuture<>();
final Map<TopicPartition, ListOffsetData> fetchedTimestampOffsets = new HashMap<>();
final AtomicInteger remainingResponses = new AtomicInteger(timestampsToSearchByNode.size());
for (Map.Entry<Node, Map<TopicPartition, ListOffsetsPartition>> entry : timestampsToSearchByNode.entrySet()) {
RequestFuture<ListOffsetResult> future = sendListOffsetRequest(entry.getKey(), entry.getValue(), requireTimestamps);
future.addListener(new RequestFutureListener<>() {
@Override
public void onSuccess(ListOffsetResult partialResult) {
synchronized (listOffsetRequestsFuture) {
fetchedTimestampOffsets.putAll(partialResult.fetchedOffsets);
partitionsToRetry.addAll(partialResult.partitionsToRetry);
if (remainingResponses.decrementAndGet() == 0 && !listOffsetRequestsFuture.isDone()) {
ListOffsetResult result = new ListOffsetResult(fetchedTimestampOffsets, partitionsToRetry);
listOffsetRequestsFuture.complete(result);
}
}
}
@Override
public void onFailure(RuntimeException e) {
synchronized (listOffsetRequestsFuture) {
if (!listOffsetRequestsFuture.isDone())
listOffsetRequestsFuture.raise(e);
}
}
});
}
return listOffsetRequestsFuture;
}
/**
* Groups timestamps to search by node for topic partitions in `timestampsToSearch` that have
* leaders available. Topic partitions from `timestampsToSearch` that do not have their leader
* available are added to `partitionsToRetry`
*
* @param timestampsToSearch The mapping from partitions to the target timestamps
* @param partitionsToRetry A set of topic partitions that will be extended with partitions
* that need metadata update or re-connect to the leader.
*/
private Map<Node, Map<TopicPartition, ListOffsetsPartition>> groupListOffsetRequests(
Map<TopicPartition, Long> timestampsToSearch,
Set<TopicPartition> partitionsToRetry) {
final Map<TopicPartition, ListOffsetsPartition> partitionDataMap = new HashMap<>();
for (Map.Entry<TopicPartition, Long> entry : timestampsToSearch.entrySet()) {
TopicPartition tp = entry.getKey();
Long offset = entry.getValue();
Metadata.LeaderAndEpoch leaderAndEpoch = metadata.currentLeader(tp);
if (leaderAndEpoch.leader.isEmpty()) {
log.debug("Leader for partition {} is unknown for fetching offset {}", tp, offset);
metadata.requestUpdate(true);
partitionsToRetry.add(tp);
} else {
Node leader = leaderAndEpoch.leader.get();
if (client.isUnavailable(leader)) {
client.maybeThrowAuthFailure(leader);
// The connection has failed and we need to await the backoff period before we can
// try again. No need to request a metadata update since the disconnect will have
// done so already.
log.debug("Leader {} for partition {} is unavailable for fetching offset until reconnect backoff expires",
leader, tp);
partitionsToRetry.add(tp);
} else {
int currentLeaderEpoch = leaderAndEpoch.epoch.orElse(ListOffsetsResponse.UNKNOWN_EPOCH);
partitionDataMap.put(tp, new ListOffsetsPartition()
.setPartitionIndex(tp.partition())
.setTimestamp(offset)
.setCurrentLeaderEpoch(currentLeaderEpoch));
}
}
}
return offsetFetcherUtils.regroupPartitionMapByNode(partitionDataMap);
}
/**
* Send the ListOffsetRequest to a specific broker for the partitions and target timestamps.
*
* @param node The node to send the ListOffsetRequest to.
* @param timestampsToSearch The mapping from partitions to the target timestamps.
* @param requireTimestamp True if we require a timestamp in the response.
* @return A response which can be polled to obtain the corresponding timestamps and offsets.
*/
private RequestFuture<ListOffsetResult> sendListOffsetRequest(final Node node,
final Map<TopicPartition, ListOffsetsPartition> timestampsToSearch,
boolean requireTimestamp) {
ListOffsetsRequest.Builder builder = ListOffsetsRequest.Builder
.forConsumer(requireTimestamp, isolationLevel)
.setTargetTimes(ListOffsetsRequest.toListOffsetsTopics(timestampsToSearch))
.setTimeoutMs(requestTimeoutMs);
log.debug("Sending ListOffsetRequest {} to broker {}", builder, node);
return client.send(node, builder)
.compose(new RequestFutureAdapter<>() {
@Override
public void onSuccess(ClientResponse response, RequestFuture<ListOffsetResult> future) {
ListOffsetsResponse lor = (ListOffsetsResponse) response.responseBody();
log.trace("Received ListOffsetResponse {} from broker {}", lor, node);
handleListOffsetResponse(lor, future);
}
});
}
/**
* Callback for the response of the list offset call above.
*
* @param listOffsetsResponse The response from the server.
* @param future The future to be completed when the response returns. Note that any partition-level errors will
* generally fail the entire future result. The one exception is UNSUPPORTED_FOR_MESSAGE_FORMAT,
* which indicates that the broker does not support the v1 message format. Partitions with this
* particular error are simply left out of the future map. Note that the corresponding timestamp
* value of each partition may be null only for v0. In v1 and later the ListOffset API would not
* return a null timestamp (-1 is returned instead when necessary).
*/
private void handleListOffsetResponse(ListOffsetsResponse listOffsetsResponse,
RequestFuture<ListOffsetResult> future) {
try {
ListOffsetResult result = offsetFetcherUtils.handleListOffsetResponse(listOffsetsResponse);
future.complete(result);
} catch (RuntimeException e) {
future.raise(e);
}
}
/**
* If we have seen new metadata (as tracked by {@link org.apache.kafka.clients.Metadata#updateVersion()}), then
* we should check that all the assignments have a valid position.
*/
public void validatePositionsOnMetadataChange() {
offsetFetcherUtils.validatePositionsOnMetadataChange();
}
} | OffsetFetcher |
java | apache__camel | components/camel-ai/camel-langchain4j-tools/src/main/java/org/apache/camel/component/langchain4j/tools/LangChain4jToolsEndpoint.java | {
"start": 2340,
"end": 8771
} | class ____ extends DefaultEndpoint {
@Metadata(required = true)
@UriPath(description = "The tool id")
private final String toolId;
@Metadata(required = true)
@UriParam(description = "The tags for the tools")
private String tags;
@UriParam
private LangChain4jToolsConfiguration configuration;
@Metadata(label = "consumer")
@UriParam(description = "Tool description")
private String description;
@Metadata(label = "consumer")
@UriParam(description = "Tool name")
private String name;
@Metadata(label = "consumer")
@UriParam(description = "List of Tool parameters in the form of parameter.<name>=<type>", prefix = "parameter.",
multiValue = true)
private Map<String, String> parameters;
@Metadata(label = "consumer,advanced")
@UriParam(description = "Tool's Camel Parameters, programmatically define Tool description and parameters")
private CamelSimpleToolParameter camelToolParameter;
public LangChain4jToolsEndpoint(String uri, LangChain4jToolsComponent component, String toolId, String tags,
LangChain4jToolsConfiguration configuration) {
super(uri, component);
this.toolId = toolId;
this.tags = tags;
this.configuration = configuration;
}
@Override
public Producer createProducer() throws Exception {
return new LangChain4jToolsProducer(this);
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
ToolSpecification.Builder toolSpecificationBuilder = ToolSpecification.builder();
if (camelToolParameter != null) {
toolSpecificationBuilder.description(camelToolParameter.getDescription());
JsonObjectSchema.Builder parametersBuilder = JsonObjectSchema.builder();
List<NamedJsonSchemaProperty> properties = camelToolParameter.getProperties();
for (NamedJsonSchemaProperty namedProperty : properties) {
parametersBuilder.addProperty(
namedProperty.getName(),
namedProperty.getProperties());
}
toolSpecificationBuilder.parameters(parametersBuilder.build());
} else if (description != null) {
toolSpecificationBuilder.description(description);
if (parameters != null) {
JsonObjectSchema.Builder parametersBuilder = JsonObjectSchema.builder();
parameters.forEach((name, type) -> parametersBuilder.addProperty(name, createJsonSchema(type)));
toolSpecificationBuilder.parameters(parametersBuilder.build());
}
} else {
// Consumer without toolParameter or description
throw new IllegalArgumentException(
"In order to use the langchain4j component as a consumer, you need to specify at least description, or a camelToolParameter");
}
final String toolName;
if (name != null) {
toolName = name;
} else if (description != null) {
toolName = StringHelper.dashToCamelCase(description.replace(" ", "-"));
} else {
toolName = null;
}
ToolSpecification toolSpecification = toolSpecificationBuilder
.name(toolName)
.build();
final LangChain4jToolsConsumer langChain4jToolsConsumer = new LangChain4jToolsConsumer(this, processor);
configureConsumer(langChain4jToolsConsumer);
CamelToolSpecification camelToolSpecification
= new CamelToolSpecification(toolSpecification, langChain4jToolsConsumer);
final CamelToolExecutorCache executorCache = CamelToolExecutorCache.getInstance();
String[] splitTags = TagsHelper.splitTags(tags);
for (String tag : splitTags) {
executorCache.put(tag, camelToolSpecification);
}
return camelToolSpecification.getConsumer();
}
/**
* A freely named tool ID (prefer to use something unique)
*
* @return
*/
public String getToolId() {
return toolId;
}
/**
* The tool configuration
*
* @return
*/
public LangChain4jToolsConfiguration getConfiguration() {
return configuration;
}
/**
* A description of the tool. This is passed to the LLM, so it should be descriptive of the tool capabilities
*
* @return
*/
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
/**
* The tool name. This is passed to the LLM, so it should conform to any LLM restrictions.
*
* @return
*/
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
/**
* The input parameters for the tool
*
* @return
*/
public Map<String, String> getParameters() {
return parameters;
}
public void setParameters(Map<String, String> parameters) {
this.parameters = parameters;
}
public CamelSimpleToolParameter getCamelToolParameter() {
return camelToolParameter;
}
public void setCamelToolParameter(CamelSimpleToolParameter camelToolParameter) {
this.camelToolParameter = camelToolParameter;
}
public void setTags(String tags) {
this.tags = tags;
}
/**
* The tags associated with the tool
*
* @return
*/
public String getTags() {
return tags;
}
@Override
protected void doStop() throws Exception {
super.doStop();
CamelToolExecutorCache.getInstance().getTools().clear();
}
/**
* Creates a JsonScheùaElement based on a String type
*
* @param type
* @return
*/
private JsonSchemaElement createJsonSchema(String type) {
return switch (type.toLowerCase()) {
case "string" -> JsonStringSchema.builder().build();
case "integer" -> JsonIntegerSchema.builder().build();
case "number" -> JsonNumberSchema.builder().build();
case "boolean" -> JsonBooleanSchema.builder().build();
default -> JsonStringSchema.builder().build(); // fallback for unkown types
};
}
}
| LangChain4jToolsEndpoint |
java | quarkusio__quarkus | extensions/jaxb/deployment/src/test/java/io/quarkus/jaxb/deployment/AbstractJaxbContextTest.java | {
"start": 1797,
"end": 1840
} | class ____ known to this context.");
}
}
| is |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/config/BeanFactoryPostProcessor.java | {
"start": 2832,
"end": 3401
} | interface ____ {
/**
* Modify the application context's internal bean factory after its standard
* initialization. All bean definitions will have been loaded, but no beans
* will have been instantiated yet. This allows for overriding or adding
* properties even to eager-initializing beans.
* @param beanFactory the bean factory used by the application context
* @throws org.springframework.beans.BeansException in case of errors
*/
void postProcessBeanFactory(ConfigurableListableBeanFactory beanFactory) throws BeansException;
}
| BeanFactoryPostProcessor |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/interop/UntypedObjectWithDupsTest.java | {
"start": 674,
"end": 3517
} | class ____ extends LinkedHashMap<String,String> { };
private final String DOC_WITH_DUPS = a2q(
"{'hello': 'world',\n"
+ "'lists' : 1,\n"
+ "'lists' : 2,\n"
+ "'lists' : {\n"
+ " 'inner' : 'internal',\n"
+ " 'time' : 123\n"
+ "},\n"
+ "'lists' : 3,\n"
+ "'single' : 'one'\n"
+ "}");
// Testing the baseline non-merging behavior
@Test
public void testDocWithDupsNoMerging() throws Exception
{
_verifyDupsNoMerging(Object.class);
_verifyDupsNoMerging(Map.class);
}
// For [dataformat-xml#???]
@Test
public void testDocWithDupsAsUntyped() throws Exception
{
_verifyDupsAreMerged(Object.class);
}
// For [dataformat-xml#498] / [databind#3484]
@Test
public void testDocWithDupsAsMap() throws Exception
{
_verifyDupsAreMerged(Map.class);
}
// And also verify that Maps with values other than `Object` will
// NOT try merging no matter what
@Test
public void testDocWithDupsAsNonUntypedMap() throws Exception
{
final String DOC = a2q("{'key':'a','key':'b'}");
assertEquals(a2q("{'key':'b'}"),
_readWriteDupDoc(DOC, StringStringMap.class));
}
/*
///////////////////////////////////////////////////////////////////////
// Helper methods
///////////////////////////////////////////////////////////////////////
*/
/* Method that will verify default JSON behavior of overwriting value
* (no merging).
*/
private <T> void _verifyDupsNoMerging(Class<T> cls) throws Exception
{
// This is where need some trickery
T value;
try (JsonParser p = JSON_MAPPER.createParser(DOC_WITH_DUPS)) {
value = JSON_MAPPER.readValue(p, cls);
}
String json = JSON_MAPPER.writeValueAsString(value);
assertEquals(a2q(
"{'hello':'world','lists':3,'single':'one'}"),
json);
}
/* Method that will verify alternate behavior (used by XML module f.ex)
* in which duplicate "properties" are merged into `List`s as necessary
*/
private void _verifyDupsAreMerged(Class<?> cls) throws Exception
{
assertEquals(a2q(
"{'hello':'world','lists':[1,2,"
+"{'inner':'internal','time':123},3],'single':'one'}"),
_readWriteDupDoc(DOC_WITH_DUPS, cls));
}
private String _readWriteDupDoc(String doc, Class<?> cls) throws Exception
{
// This is where need some trickery
Object value;
try (JsonParser p = new WithDupsParser(JSON_MAPPER.createParser(doc))) {
value = JSON_MAPPER.readValue(p, cls);
}
return JSON_MAPPER.writeValueAsString(value);
}
/**
* Helper | StringStringMap |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/inject/InjectOnConstructorOfAbstractClass.java | {
"start": 2317,
"end": 3275
} | class ____ extends BugChecker implements MethodTreeMatcher {
private static final MultiMatcher<MethodTree, AnnotationTree> INJECT_FINDER =
annotations(
AT_LEAST_ONE,
anyOf(isType(InjectMatchers.JAVAX_INJECT_ANNOTATION), isType(GUICE_INJECT_ANNOTATION)));
private static final Matcher<MethodTree> TO_MATCH =
allOf(methodIsConstructor(), enclosingClass(hasModifier(ABSTRACT)));
@Override
public Description matchMethod(MethodTree methodTree, VisitorState state) {
if (TO_MATCH.matches(methodTree, state)) {
MultiMatchResult<AnnotationTree> injectAnnotations =
INJECT_FINDER.multiMatchResult(methodTree, state);
if (injectAnnotations.matches()) {
AnnotationTree injectAnnotation = injectAnnotations.matchingNodes().getFirst();
return describeMatch(injectAnnotation, delete(injectAnnotation));
}
}
return Description.NO_MATCH;
}
}
| InjectOnConstructorOfAbstractClass |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/observable/ObservableSwitchMap.java | {
"start": 1275,
"end": 2159
} | class ____<T, R> extends AbstractObservableWithUpstream<T, R> {
final Function<? super T, ? extends ObservableSource<? extends R>> mapper;
final int bufferSize;
final boolean delayErrors;
public ObservableSwitchMap(ObservableSource<T> source,
Function<? super T, ? extends ObservableSource<? extends R>> mapper, int bufferSize,
boolean delayErrors) {
super(source);
this.mapper = mapper;
this.bufferSize = bufferSize;
this.delayErrors = delayErrors;
}
@Override
public void subscribeActual(Observer<? super R> t) {
if (ObservableScalarXMap.tryScalarXMapSubscribe(source, t, mapper)) {
return;
}
source.subscribe(new SwitchMapObserver<>(t, mapper, bufferSize, delayErrors));
}
static final | ObservableSwitchMap |
java | google__guice | core/test/com/google/inject/spi/ElementsTest.java | {
"start": 51491,
"end": 51558
} | class ____<T> {
@Inject Stage stage;
B(T t) {}
}
static | B |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java | {
"start": 2318,
"end": 3772
} | class ____ {
static final String UNSAFE_COMPARER_NAME =
LexicographicalComparerHolder.class.getName() + "$UnsafeComparer";
static final Comparer<byte[]> BEST_COMPARER = getBestComparer();
/**
* Returns the Unsafe-using Comparer, or falls back to the pure-Java
* implementation if unable to do so.
*/
static Comparer<byte[]> getBestComparer() {
if (System.getProperty("os.arch").toLowerCase().startsWith("sparc")) {
if (LOG.isTraceEnabled()) {
LOG.trace("Lexicographical comparer selected for "
+ "byte aligned system architecture");
}
return lexicographicalComparerJavaImpl();
}
try {
Class<?> theClass = Class.forName(UNSAFE_COMPARER_NAME);
// yes, UnsafeComparer does implement Comparer<byte[]>
@SuppressWarnings("unchecked")
Comparer<byte[]> comparer =
(Comparer<byte[]>) theClass.getEnumConstants()[0];
if (LOG.isTraceEnabled()) {
LOG.trace("Unsafe comparer selected for "
+ "byte unaligned system architecture");
}
return comparer;
} catch (Throwable t) { // ensure we really catch *everything*
if (LOG.isTraceEnabled()) {
LOG.trace(t.getMessage());
LOG.trace("Lexicographical comparer selected");
}
return lexicographicalComparerJavaImpl();
}
}
private | LexicographicalComparerHolder |
java | apache__dubbo | dubbo-plugin/dubbo-security/src/main/java/org/apache/dubbo/security/cert/CertDeployerListener.java | {
"start": 1045,
"end": 2579
} | class ____ implements ApplicationDeployListener {
private final DubboCertManager dubboCertManager;
public CertDeployerListener(FrameworkModel frameworkModel) {
dubboCertManager = frameworkModel.getBeanFactory().getBean(DubboCertManager.class);
}
@Override
public void onInitialize(ApplicationModel scopeModel) {}
@Override
public void onStarting(ApplicationModel scopeModel) {
scopeModel.getApplicationConfigManager().getSsl().ifPresent(sslConfig -> {
if (Objects.nonNull(sslConfig.getCaAddress()) && dubboCertManager != null) {
CertConfig certConfig = new CertConfig(
sslConfig.getCaAddress(),
sslConfig.getEnvType(),
sslConfig.getCaCertPath(),
sslConfig.getOidcTokenPath());
dubboCertManager.connect(certConfig);
}
});
}
@Override
public void onStarted(ApplicationModel scopeModel) {}
@Override
public void onCompletion(ApplicationModel scopeModel) {}
@Override
public void onStopping(ApplicationModel scopeModel) {
if (dubboCertManager != null) {
dubboCertManager.disConnect();
}
}
@Override
public void onStopped(ApplicationModel scopeModel) {}
@Override
public void onFailure(ApplicationModel scopeModel, Throwable cause) {
if (dubboCertManager != null) {
dubboCertManager.disConnect();
}
}
}
| CertDeployerListener |
java | grpc__grpc-java | netty/src/main/java/io/grpc/netty/NettyServer.java | {
"start": 2727,
"end": 12416
} | class ____ implements InternalServer, InternalWithLogId {
private static final Logger log = Logger.getLogger(InternalServer.class.getName());
private final InternalLogId logId;
private final List<? extends SocketAddress> addresses;
private final ChannelFactory<? extends ServerChannel> channelFactory;
private final Map<ChannelOption<?>, ?> channelOptions;
private final Map<ChannelOption<?>, ?> childChannelOptions;
private final ProtocolNegotiator protocolNegotiator;
private final int maxStreamsPerConnection;
private final ObjectPool<? extends EventLoopGroup> bossGroupPool;
private final ObjectPool<? extends EventLoopGroup> workerGroupPool;
private final boolean forceHeapBuffer;
private EventLoopGroup bossGroup;
private EventLoopGroup workerGroup;
private ServerListener listener;
private final ChannelGroup channelGroup;
private final boolean autoFlowControl;
private final int flowControlWindow;
private final int maxMessageSize;
private final int maxHeaderListSize;
private final int softLimitHeaderListSize;
private final long keepAliveTimeInNanos;
private final long keepAliveTimeoutInNanos;
private final long maxConnectionIdleInNanos;
private final long maxConnectionAgeInNanos;
private final long maxConnectionAgeGraceInNanos;
private final boolean permitKeepAliveWithoutCalls;
private final long permitKeepAliveTimeInNanos;
private final int maxRstCount;
private final long maxRstPeriodNanos;
private final Attributes eagAttributes;
private final ReferenceCounted sharedResourceReferenceCounter =
new SharedResourceReferenceCounter();
private final List<? extends ServerStreamTracer.Factory> streamTracerFactories;
private final TransportTracer.Factory transportTracerFactory;
private final InternalChannelz channelz;
private volatile List<InternalInstrumented<SocketStats>> listenSocketStatsList =
Collections.emptyList();
private volatile boolean terminated;
private final EventLoop bossExecutor;
NettyServer(
List<? extends SocketAddress> addresses,
ChannelFactory<? extends ServerChannel> channelFactory,
Map<ChannelOption<?>, ?> channelOptions,
Map<ChannelOption<?>, ?> childChannelOptions,
ObjectPool<? extends EventLoopGroup> bossGroupPool,
ObjectPool<? extends EventLoopGroup> workerGroupPool,
boolean forceHeapBuffer,
ProtocolNegotiator protocolNegotiator,
List<? extends ServerStreamTracer.Factory> streamTracerFactories,
TransportTracer.Factory transportTracerFactory,
int maxStreamsPerConnection,
boolean autoFlowControl,
int flowControlWindow,
int maxMessageSize,
int maxHeaderListSize,
int softLimitHeaderListSize,
long keepAliveTimeInNanos,
long keepAliveTimeoutInNanos,
long maxConnectionIdleInNanos,
long maxConnectionAgeInNanos, long maxConnectionAgeGraceInNanos,
boolean permitKeepAliveWithoutCalls, long permitKeepAliveTimeInNanos,
int maxRstCount, long maxRstPeriodNanos,
Attributes eagAttributes, InternalChannelz channelz) {
this.addresses = checkNotNull(addresses, "addresses");
this.channelFactory = checkNotNull(channelFactory, "channelFactory");
checkNotNull(channelOptions, "channelOptions");
this.channelOptions = new HashMap<ChannelOption<?>, Object>(channelOptions);
checkNotNull(childChannelOptions, "childChannelOptions");
this.childChannelOptions = new HashMap<ChannelOption<?>, Object>(childChannelOptions);
this.bossGroupPool = checkNotNull(bossGroupPool, "bossGroupPool");
this.workerGroupPool = checkNotNull(workerGroupPool, "workerGroupPool");
this.forceHeapBuffer = forceHeapBuffer;
this.bossGroup = bossGroupPool.getObject();
this.bossExecutor = bossGroup.next();
this.channelGroup = new DefaultChannelGroup(this.bossExecutor);
this.workerGroup = workerGroupPool.getObject();
this.protocolNegotiator = checkNotNull(protocolNegotiator, "protocolNegotiator");
this.streamTracerFactories = checkNotNull(streamTracerFactories, "streamTracerFactories");
this.transportTracerFactory = transportTracerFactory;
this.maxStreamsPerConnection = maxStreamsPerConnection;
this.autoFlowControl = autoFlowControl;
this.flowControlWindow = flowControlWindow;
this.maxMessageSize = maxMessageSize;
this.maxHeaderListSize = maxHeaderListSize;
this.softLimitHeaderListSize = softLimitHeaderListSize;
this.keepAliveTimeInNanos = keepAliveTimeInNanos;
this.keepAliveTimeoutInNanos = keepAliveTimeoutInNanos;
this.maxConnectionIdleInNanos = maxConnectionIdleInNanos;
this.maxConnectionAgeInNanos = maxConnectionAgeInNanos;
this.maxConnectionAgeGraceInNanos = maxConnectionAgeGraceInNanos;
this.permitKeepAliveWithoutCalls = permitKeepAliveWithoutCalls;
this.permitKeepAliveTimeInNanos = permitKeepAliveTimeInNanos;
this.maxRstCount = maxRstCount;
this.maxRstPeriodNanos = maxRstPeriodNanos;
this.eagAttributes = checkNotNull(eagAttributes, "eagAttributes");
this.channelz = Preconditions.checkNotNull(channelz);
this.logId = InternalLogId.allocate(getClass(), addresses.isEmpty() ? "No address" :
String.valueOf(addresses));
}
@Override
public SocketAddress getListenSocketAddress() {
Iterator<Channel> it = channelGroup.iterator();
if (it.hasNext()) {
return it.next().localAddress();
} else {
// server is not listening/bound yet, just return the original port.
return addresses.isEmpty() ? null : addresses.get(0);
}
}
@Override
public List<SocketAddress> getListenSocketAddresses() {
List<SocketAddress> listenSocketAddresses = new ArrayList<>();
for (Channel c: channelGroup) {
listenSocketAddresses.add(c.localAddress());
}
// server is not listening/bound yet, just return the original ports.
if (listenSocketAddresses.isEmpty()) {
listenSocketAddresses.addAll(addresses);
}
return listenSocketAddresses;
}
@Override
public InternalInstrumented<SocketStats> getListenSocketStats() {
List<InternalInstrumented<SocketStats>> savedListenSocketStatsList = listenSocketStatsList;
return savedListenSocketStatsList.isEmpty() ? null : savedListenSocketStatsList.get(0);
}
@Override
public List<InternalInstrumented<SocketStats>> getListenSocketStatsList() {
return listenSocketStatsList;
}
@Override
public void start(ServerListener serverListener) throws IOException {
listener = checkNotNull(serverListener, "serverListener");
final ServerBootstrap b = new ServerBootstrap();
b.option(ALLOCATOR, Utils.getByteBufAllocator(forceHeapBuffer));
b.childOption(ALLOCATOR, Utils.getByteBufAllocator(forceHeapBuffer));
b.group(bossExecutor, workerGroup);
b.channelFactory(channelFactory);
// For non-socket based channel, the option will be ignored.
b.childOption(SO_KEEPALIVE, true);
if (channelOptions != null) {
for (Map.Entry<ChannelOption<?>, ?> entry : channelOptions.entrySet()) {
@SuppressWarnings("unchecked")
ChannelOption<Object> key = (ChannelOption<Object>) entry.getKey();
b.option(key, entry.getValue());
}
}
if (childChannelOptions != null) {
for (Map.Entry<ChannelOption<?>, ?> entry : childChannelOptions.entrySet()) {
@SuppressWarnings("unchecked")
ChannelOption<Object> key = (ChannelOption<Object>) entry.getKey();
b.childOption(key, entry.getValue());
}
}
b.childHandler(new ChannelInitializer<Channel>() {
@Override
public void initChannel(Channel ch) {
ChannelPromise channelDone = ch.newPromise();
long maxConnectionAgeInNanos = NettyServer.this.maxConnectionAgeInNanos;
if (maxConnectionAgeInNanos != MAX_CONNECTION_AGE_NANOS_DISABLED) {
// apply a random jitter of +/-10% to max connection age
maxConnectionAgeInNanos =
(long) ((.9D + Math.random() * .2D) * maxConnectionAgeInNanos);
}
NettyServerTransport transport =
new NettyServerTransport(
ch,
channelDone,
protocolNegotiator,
streamTracerFactories,
transportTracerFactory.create(),
maxStreamsPerConnection,
autoFlowControl,
flowControlWindow,
maxMessageSize,
maxHeaderListSize,
softLimitHeaderListSize,
keepAliveTimeInNanos,
keepAliveTimeoutInNanos,
maxConnectionIdleInNanos,
maxConnectionAgeInNanos,
maxConnectionAgeGraceInNanos,
permitKeepAliveWithoutCalls,
permitKeepAliveTimeInNanos,
maxRstCount,
maxRstPeriodNanos,
eagAttributes);
ServerTransportListener transportListener;
// This is to order callbacks on the listener, not to guard access to channel.
synchronized (NettyServer.this) {
if (terminated) {
// Server already terminated.
ch.close();
return;
}
// `channel` shutdown can race with `ch` initialization, so this is only safe to increment
// inside the lock.
sharedResourceReferenceCounter.retain();
transportListener = listener.transportCreated(transport);
}
/* Releases the event loop if the channel is "done", possibly due to the channel closing. */
final | NettyServer |
java | elastic__elasticsearch | x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java | {
"start": 1896,
"end": 10306
} | class ____ implements Closeable {
/**
* State added to {@link SQLException}s when the server encounters an
* error.
*/
public static final String SQL_STATE_BAD_SERVER = "bad_server";
private static final String SQL_NOT_AVAILABLE_ERROR_MESSAGE = "Incorrect HTTP method for uri ["
+ SQL_QUERY_REST_ENDPOINT
+ "?error_trace] and method [POST], allowed:";
public static <R> R http(String path, String query, ConnectionConfiguration cfg, Function<JreHttpUrlConnection, R> handler) {
final URI uriPath = appendSegmentToPath(cfg.baseUri(), path); // update path if needed
final String uriQuery = query == null ? uriPath.getQuery() : query; // update query if needed
final URL url;
try {
url = new URI(
uriPath.getScheme(),
null,
uriPath.getHost(),
uriPath.getPort(),
uriPath.getPath(),
uriQuery,
uriPath.getFragment()
).toURL();
} catch (URISyntaxException | MalformedURLException ex) {
throw new ClientException("Cannot build url using base: [" + uriPath + "] query: [" + query + "] path: [" + path + "]", ex);
}
try (JreHttpUrlConnection con = new JreHttpUrlConnection(url, cfg)) {
return handler.apply(con);
}
}
private boolean closed = false;
final HttpURLConnection con;
private final URL url;
private static final String GZIP = "gzip";
public JreHttpUrlConnection(URL url, ConnectionConfiguration cfg) throws ClientException {
this.url = url;
try {
// due to the way the URL API is designed, the proxy needs to be passed in first
Proxy p = cfg.proxyConfig().proxy();
con = (HttpURLConnection) (p != null ? url.openConnection(p) : url.openConnection());
} catch (IOException ex) {
throw new ClientException("Cannot setup connection to " + url + " (" + ex.getMessage() + ")", ex);
}
// the rest of the connection setup
setupConnection(cfg);
}
private void setupConnection(ConnectionConfiguration cfg) {
// setup basic stuff first
// timeouts
con.setConnectTimeout((int) cfg.connectTimeout());
con.setReadTimeout((int) cfg.networkTimeout());
// disable content caching
con.setAllowUserInteraction(false);
con.setUseCaches(false);
// HTTP params
// HttpURL adds this header by default, HttpS does not
// adding it here to be consistent
con.setRequestProperty("Accept-Charset", "UTF-8");
// con.setRequestProperty("Accept-Encoding", GZIP);
setupSSL(cfg);
setupBasicAuth(cfg);
}
private void setupSSL(ConnectionConfiguration cfg) {
if (cfg.sslConfig().isEnabled()) {
HttpsURLConnection https = (HttpsURLConnection) con;
SSLSocketFactory factory = cfg.sslConfig().sslSocketFactory();
AccessController.doPrivileged((PrivilegedAction<Void>) () -> {
https.setSSLSocketFactory(factory);
return null;
});
}
}
private void setupBasicAuth(ConnectionConfiguration cfg) {
if (StringUtils.hasText(cfg.authUser())) {
String basicValue = cfg.authUser() + ":" + cfg.authPass();
String encoded = StringUtils.asUTFString(Base64.getEncoder().encode(StringUtils.toUTF(basicValue)));
con.setRequestProperty("Authorization", "Basic " + encoded);
}
}
public boolean head() throws ClientException {
try {
con.setRequestMethod("HEAD");
int responseCode = con.getResponseCode();
return responseCode == HttpURLConnection.HTTP_OK;
} catch (IOException ex) {
throw new ClientException("Cannot HEAD address " + url + " (" + ex.getMessage() + ")", ex);
}
}
public <R> ResponseOrException<R> request(
CheckedConsumer<OutputStream, IOException> doc,
CheckedBiFunction<InputStream, Function<String, List<String>>, R, IOException> parser,
String requestMethod
) throws ClientException {
return request(doc, parser, requestMethod, "application/json");
}
public <R> ResponseOrException<R> request(
CheckedConsumer<OutputStream, IOException> doc,
CheckedBiFunction<InputStream, Function<String, List<String>>, R, IOException> parser,
String requestMethod,
String contentTypeHeader
) throws ClientException {
try {
con.setRequestMethod(requestMethod);
con.setDoOutput(true);
con.setRequestProperty("Content-Type", contentTypeHeader);
con.setRequestProperty("Accept", "application/json");
if (doc != null) {
try (OutputStream out = con.getOutputStream()) {
doc.accept(out);
}
}
if (shouldParseBody(con.getResponseCode())) {
try (InputStream stream = getStream(con, con.getInputStream())) {
return new ResponseOrException<>(parser.apply(new BufferedInputStream(stream), getHeaderFields(con)));
}
}
return parserError();
} catch (IOException ex) {
throw new ClientException("Cannot POST address " + url + " (" + ex.getMessage() + ")", ex);
}
}
private static Function<String, List<String>> getHeaderFields(URLConnection con) {
return header -> {
List<String> values = new LinkedList<>();
for (Map.Entry<String, List<String>> entry : con.getHeaderFields().entrySet()) {
if (header.equalsIgnoreCase(entry.getKey())) {
values.addAll(entry.getValue());
}
}
return values;
};
}
private static boolean shouldParseBody(int responseCode) {
return responseCode == 200 || responseCode == 201 || responseCode == 202;
}
private <R> ResponseOrException<R> parserError() throws IOException {
RemoteFailure failure;
try (InputStream stream = getStream(con, con.getErrorStream())) {
failure = RemoteFailure.parseFromResponse(stream);
}
if (con.getResponseCode() >= 500) {
return new ResponseOrException<>(
new SQLException(
"Server encountered an error [" + failure.reason() + "]. [" + failure.remoteTrace() + "]",
SQL_STATE_BAD_SERVER
)
);
}
SqlExceptionType type = SqlExceptionType.fromRemoteFailureType(failure.type());
if (type == null) {
// check if x-pack or sql are not available (x-pack not installed or sql not enabled)
// by checking the error message the server is sending back
if (con.getResponseCode() >= HttpURLConnection.HTTP_BAD_REQUEST && failure.reason().contains(SQL_NOT_AVAILABLE_ERROR_MESSAGE)) {
return new ResponseOrException<>(
new SQLException(
"X-Pack/SQL does not seem to be available"
+ " on the Elasticsearch node using the access path '"
+ con.getURL().getHost()
+ (con.getURL().getPort() > 0 ? ":" + con.getURL().getPort() : "")
+ "'."
+ " Please verify X-Pack is installed and SQL enabled. Alternatively, check if any proxy is interfering"
+ " the communication to Elasticsearch",
SQL_STATE_BAD_SERVER
)
);
}
return new ResponseOrException<>(
new SQLException(
"Server sent bad type ["
+ failure.type()
+ "]. Original type was ["
+ failure.reason()
+ "]. ["
+ failure.remoteTrace()
+ "]",
SQL_STATE_BAD_SERVER
)
);
}
return new ResponseOrException<>(type.asException(failure.reason()));
}
public static | JreHttpUrlConnection |
java | apache__commons-lang | src/test/java/org/apache/commons/lang3/function/FailableTest.java | {
"start": 4008,
"end": 5276
} | class ____ {
private static int invocations;
static void reset() {
invocations = 0;
}
static boolean testDouble(final double value) throws SomeException {
throwOnOdd();
return true;
}
static boolean testGetBool() throws SomeException {
throwOnOdd();
return true;
}
static int testInc(final int value) throws SomeException {
throwOnOdd();
return value + 1;
}
static boolean testInt(final int value) throws SomeException {
throwOnOdd();
return true;
}
static boolean testLong(final long value) throws SomeException {
throwOnOdd();
return true;
}
private static void throwOnOdd() throws SomeException {
final int i = ++invocations;
if (i % 2 == 1) {
throw new SomeException("Odd Invocation: " + i, i);
}
}
FailureOnOddInvocations() throws SomeException {
throwOnOdd();
}
boolean getAsBoolean() throws SomeException {
throwOnOdd();
return true;
}
}
public static | FailureOnOddInvocations |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/hashcode/WikiImage.java | {
"start": 417,
"end": 1236
} | class ____ {
@Id
@GeneratedValue
private Long id;
@Basic
private String name;
public WikiImage() {
}
public WikiImage(String name) {
this.name = name;
}
public Long getId() {
return id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( !(o instanceof WikiImage) ) {
return false;
}
WikiImage wikiImage = (WikiImage) o;
if ( name != null ? !name.equals( wikiImage.name ) : wikiImage.name != null ) {
return false;
}
return true;
}
@Override
public int hashCode() {
return name != null ? name.hashCode() : 0;
}
@Override
public String toString() {
return "WikiImage{" +
"name='" + name + '\'' +
'}';
}
}
| WikiImage |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/component/StructNestedComponentAssociationErrorTest.java | {
"start": 3272,
"end": 3907
} | class ____ {
private String name;
@OneToMany(mappedBy = "mainBook")
private List<Book2> bookCollection;
}
@Test
public void testOneToMany() {
final StandardServiceRegistry ssr = ServiceRegistryUtil.serviceRegistry();
try {
new MetadataSources( ssr )
.addAnnotatedClass( Book3.class )
.getMetadataBuilder()
.build();
Assertions.fail( "Expected a failure" );
}
catch (MappingException ex) {
assertThat( ex.getMessage(), containsString( "authorInfos.person.bookCollection" ) );
}
finally {
StandardServiceRegistryBuilder.destroy( ssr );
}
}
@Entity(name = "Book")
public static | Person2 |
java | quarkusio__quarkus | independent-projects/tools/codestarts/src/main/java/io/quarkus/devtools/codestarts/core/reader/QuteCodestartFileReader.java | {
"start": 5276,
"end": 6357
} | class ____ implements ResultMapper {
public boolean appliesTo(TemplateNode.Origin origin, Object result) {
return Results.isNotFound(result);
}
public String map(Object result, Expression expression) {
if (expression.toOriginalString().equals("merged-content")) {
return "{merged-content}";
}
throw new TemplateException("Missing required data: {" + expression.toOriginalString() + "}");
}
}
/**
* private static CompletionStage<Object> replaceResolveAsync(EvalContext context) {
* String text = (String) context.getBase();
* switch (context.getName()) {
* case "replace":
* if (context.getParams().size() == 2) {
* return context.evaluate(context.getParams().get(0)).thenCombine(context.evaluate(context.getParams().get(1)),
* (r1, r2) -> CompletableFuture.completedFuture(text.replace(r1.toString(), r2.toString())));
* }
* default:
* return Results.NOT_FOUND;
* }
* }
**/
private static | MissingValueMapper |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/generated/sqldefault/OverriddenDefaultTest.java | {
"start": 912,
"end": 2280
} | class ____ {
@Test
public void test(SessionFactoryScope scope) {
BigDecimal unitPrice = new BigDecimal("12.99");
scope.inTransaction( session -> {
OrderLine entity = new OrderLine( unitPrice, 5 );
session.persist(entity);
session.flush();
assertEquals( getDefault(scope), entity.status );
assertEquals( unitPrice, entity.unitPrice );
assertEquals( 5, entity.quantity );
} );
scope.inTransaction( session -> {
OrderLine entity = session.createQuery("from WithDefault", OrderLine.class ).getSingleResult();
assertEquals( unitPrice, entity.unitPrice );
assertEquals( 5, entity.quantity );
assertEquals( getDefault(scope), entity.status );
entity.status = "old"; //should be ignored when fetch=true
} );
scope.inTransaction( session -> {
OrderLine entity = session.createQuery("from WithDefault", OrderLine.class ).getSingleResult();
assertEquals( unitPrice, entity.unitPrice );
assertEquals( 5, entity.quantity );
assertEquals( "old", entity.status );
} );
}
String getDefault(SessionFactoryScope scope) {
return scope.getMetadataImplementor().getDatabase().getDialect() instanceof H2Dialect ? "NEW" : "new";
}
@AfterEach
public void dropTestData(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Entity(name="WithDefault")
public static | OverriddenDefaultTest |
java | apache__camel | core/camel-support/src/main/java/org/apache/camel/support/scan/InvertingPackageScanFilter.java | {
"start": 1095,
"end": 1512
} | class ____ implements PackageScanFilter {
private final PackageScanFilter filter;
public InvertingPackageScanFilter(PackageScanFilter filter) {
this.filter = filter;
}
@Override
public boolean matches(Class<?> type) {
return !filter.matches(type);
}
@Override
public String toString() {
return "![" + filter.toString() + "]";
}
}
| InvertingPackageScanFilter |
java | apache__camel | components/camel-netty-http/src/test/java/org/apache/camel/component/netty/http/rest/RestNettyProducerVerbUpperCaseTest.java | {
"start": 1163,
"end": 2569
} | class ____ extends BaseNettyTest {
@Test
public void testVerbUpperCase() {
String out = fluentTemplate.withHeader("id", "123").to("direct:start").request(String.class);
assertNotNull(out);
assertEquals("123;Donald Duck", out);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
// configure to use netty on localhost with the given port
restConfiguration().component("netty-http").host("localhost").port(getPort());
from("direct:start")
.to("rest:get:users/{id}/basic");
// use the rest DSL to define the rest services
rest("/users/")
.get("{id}/basic").to("direct:basic");
from("direct:basic")
.to("mock:input")
.process(exchange -> {
String method = exchange.getIn().getHeader(Exchange.HTTP_METHOD, String.class);
assertEquals("GET", method);
String id = exchange.getIn().getHeader("id", String.class);
exchange.getMessage().setBody(id + ";Donald Duck");
});
}
};
}
}
| RestNettyProducerVerbUpperCaseTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/dfs/DfsPhaseExecutionException.java | {
"start": 700,
"end": 1024
} | class ____ extends SearchException {
public DfsPhaseExecutionException(SearchShardTarget shardTarget, String msg, Throwable t) {
super(shardTarget, "Dfs Failed [" + msg + "]", t);
}
public DfsPhaseExecutionException(StreamInput in) throws IOException {
super(in);
}
}
| DfsPhaseExecutionException |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/chain/TestChainErrors.java | {
"start": 6518,
"end": 6802
} | class ____ extends
Mapper<LongWritable, Text, LongWritable, Text> {
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
}
}
// this reduce consumes all the input and output nothing
public static | ConsumeMap |
java | apache__rocketmq | namesrv/src/main/java/org/apache/rocketmq/namesrv/routeinfo/RouteInfoManager.java | {
"start": 50566,
"end": 52116
} | class ____ {
private String clusterName;
private String brokerAddr;
private int hash;
public BrokerAddrInfo(String clusterName, String brokerAddr) {
this.clusterName = clusterName;
this.brokerAddr = brokerAddr;
}
public String getClusterName() {
return clusterName;
}
public String getBrokerAddr() {
return brokerAddr;
}
public boolean isEmpty() {
return clusterName.isEmpty() && brokerAddr.isEmpty();
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (obj instanceof BrokerAddrInfo) {
BrokerAddrInfo addr = (BrokerAddrInfo) obj;
return clusterName.equals(addr.clusterName) && brokerAddr.equals(addr.brokerAddr);
}
return false;
}
@Override
public int hashCode() {
int h = hash;
if (h == 0 && clusterName.length() + brokerAddr.length() > 0) {
for (int i = 0; i < clusterName.length(); i++) {
h = 31 * h + clusterName.charAt(i);
}
h = 31 * h + '_';
for (int i = 0; i < brokerAddr.length(); i++) {
h = 31 * h + brokerAddr.charAt(i);
}
hash = h;
}
return h;
}
@Override
public String toString() {
return "BrokerIdentityInfo [clusterName=" + clusterName + ", brokerAddr=" + brokerAddr + "]";
}
}
| BrokerAddrInfo |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.