language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ImmutableCheckerTest.java | {
"start": 68699,
"end": 68967
} | class ____ {}
""")
.addSourceLines(
"Test.java",
"""
import com.google.common.collect.ImmutableList;
import com.google.errorprone.annotations.Immutable;
@Immutable
| MyImmutableType |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/interceptors/intercepted/InterceptedBeanInjectionTest.java | {
"start": 1905,
"end": 2052
} | class ____ {
@Simple
String pong() {
return InterceptedDependent.class.getName();
}
}
}
| InterceptedDependent |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/readonly/ReadOnlyVersionedNodesTest.java | {
"start": 748,
"end": 18704
} | class ____ extends AbstractReadOnlyTest {
@AfterEach
public void tearDown(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Test
public void testSetReadOnlyTrueAndFalse(SessionFactoryScope scope) {
VersionedNode n = createVersionNode( scope );
clearCounts( scope );
scope.inSession(
session -> {
try {
session.beginTransaction();
VersionedNode node = session.get( VersionedNode.class, n.getId() );
session.setReadOnly( node, true );
node.setName( "node-name" );
session.getTransaction().commit();
assertUpdateCount( 0, scope );
assertInsertCount( 0, scope );
// the changed name is still in node
assertEquals( "node-name", node.getName() );
session.beginTransaction();
node = session.get( VersionedNode.class, node.getId() );
// the changed name is still in the session
assertEquals( "node-name", node.getName() );
session.refresh( node );
// after refresh, the name reverts to the original value
assertEquals( "node", node.getName() );
node = session.get( VersionedNode.class, node.getId() );
assertEquals( "node", node.getName() );
session.getTransaction().commit();
}
finally {
if ( session.getTransaction().isActive() ) {
session.getTransaction().rollback();
}
}
}
);
assertUpdateCount( 0, scope );
assertInsertCount( 0, scope );
scope.inTransaction(
session -> {
VersionedNode node = session.get( VersionedNode.class, n.getId() );
assertEquals( "node", node.getName() );
session.setReadOnly( node, true );
node.setName( "diff-node-name" );
session.flush();
assertEquals( "diff-node-name", node.getName() );
session.refresh( node );
assertEquals( "node", node.getName() );
session.setReadOnly( node, false );
node.setName( "diff-node-name" );
}
);
assertUpdateCount( 1, scope );
assertInsertCount( 0, scope );
clearCounts( scope );
scope.inTransaction(
session -> {
VersionedNode node = session.get( VersionedNode.class, n.getId() );
assertEquals( "diff-node-name", node.getName() );
assertEquals( 1, node.getVersion() );
session.setReadOnly( node, true );
session.remove( node );
}
);
assertUpdateCount( 0, scope );
assertDeleteCount( 1, scope );
}
@Test
public void testUpdateSetReadOnlyTwice(SessionFactoryScope scope) {
VersionedNode n = createVersionNode( scope );
clearCounts( scope );
scope.inTransaction(
session -> {
VersionedNode node = session.get( VersionedNode.class, n.getId() );
node.setName( "node-name" );
session.setReadOnly( node, true );
session.setReadOnly( node, true );
}
);
assertUpdateCount( 0, scope );
assertInsertCount( 0, scope );
scope.inTransaction(
session -> {
VersionedNode node = session.get( VersionedNode.class, n.getId() );
assertEquals( "node", node.getName() );
assertEquals( 0, node.getVersion() );
session.setReadOnly( node, true );
session.remove( node );
}
);
assertUpdateCount( 0, scope );
assertDeleteCount( 1, scope );
}
@Test
public void testUpdateSetModifiable(SessionFactoryScope scope) {
VersionedNode n = createVersionNode( scope );
clearCounts( scope );
scope.inTransaction(
session -> {
VersionedNode node = session.get( VersionedNode.class, n.getId() );
node.setName( "node-name" );
session.setReadOnly( node, false );
}
);
assertUpdateCount( 1, scope );
assertInsertCount( 0, scope );
clearCounts( scope );
scope.inTransaction(
session -> {
VersionedNode node = session.get( VersionedNode.class, n.getId() );
assertEquals( "node-name", node.getName() );
assertEquals( 1, node.getVersion() );
session.setReadOnly( node, true );
session.remove( node );
}
);
assertUpdateCount( 0, scope );
assertDeleteCount( 1, scope );
}
private VersionedNode createVersionNode(SessionFactoryScope scope) {
return scope.fromTransaction(
session -> {
VersionedNode nd = new VersionedNode( "node", "node" );
session.persist( nd );
return nd;
}
);
}
private VersionedNode createVersionNode(String id, String name, SessionFactoryScope scope) {
return scope.fromTransaction(
session -> {
VersionedNode nd = new VersionedNode( id, name );
session.persist( nd );
return nd;
}
);
}
@Test
@FailureExpected(jiraKey = "unknown")
public void testUpdateSetReadOnlySetModifiable(SessionFactoryScope scope) {
VersionedNode n = createVersionNode( scope );
clearCounts( scope );
scope.inTransaction(
session -> {
VersionedNode node = session.get( VersionedNode.class, n.getId() );
node.setName( "node-name" );
session.setReadOnly( node, true );
session.setReadOnly( node, false );
}
);
assertUpdateCount( 1, scope );
assertInsertCount( 0, scope );
scope.inTransaction(
session -> {
VersionedNode node = session.get( VersionedNode.class, n.getId() );
assertEquals( "node-name", node.getName() );
assertEquals( 1, node.getVersion() );
session.remove( node );
}
);
}
@Test
@FailureExpected(jiraKey = "unknown")
public void testSetReadOnlyUpdateSetModifiable(SessionFactoryScope scope) {
VersionedNode n = createVersionNode( scope );
clearCounts( scope );
scope.inTransaction(
session -> {
VersionedNode node = session.get( VersionedNode.class, n.getId() );
session.setReadOnly( node, true );
node.setName( "node-name" );
session.setReadOnly( node, false );
}
);
assertUpdateCount( 1, scope );
assertInsertCount( 0, scope );
scope.inTransaction(
session -> {
VersionedNode node = session.get( VersionedNode.class, n.getId() );
assertEquals( "node-name", node.getName() );
assertEquals( 1, node.getVersion() );
session.remove( node );
}
);
}
@Test
public void testAddNewChildToReadOnlyParent(SessionFactoryScope scope) {
VersionedNode p = createVersionNode( "parent", "parent", scope );
clearCounts( scope );
VersionedNode c = scope.fromTransaction(
session -> {
VersionedNode parentManaged = session.get( VersionedNode.class, p.getId() );
session.setReadOnly( parentManaged, true );
parentManaged.setName( "new parent name" );
VersionedNode child = new VersionedNode( "child", "child" );
parentManaged.addChild( child );
return child;
}
);
assertUpdateCount( 1, scope );
assertInsertCount( 1, scope );
scope.inTransaction(
session -> {
VersionedNode parent = session.get( VersionedNode.class, p.getId() );
assertEquals( "parent", parent.getName() );
assertEquals( 1, parent.getChildren().size() );
assertEquals( 1, parent.getVersion() );
VersionedNode child = session.get( VersionedNode.class, c.getId() );
assertNotNull( child );
session.remove( parent );
}
);
}
@Test
public void testUpdateParentWithNewChildCommitWithReadOnlyParent(SessionFactoryScope scope) {
VersionedNode p = createVersionNode( "parent", "parent", scope );
clearCounts( scope );
p.setName( "new parent name" );
VersionedNode c = new VersionedNode( "child", "child" );
p.addChild( c );
scope.inTransaction(
session -> {
VersionedNode v = session.merge( p );
session.setReadOnly( v, true );
}
);
assertUpdateCount( 1, scope );
assertInsertCount( 1, scope );
clearCounts( scope );
scope.inTransaction(
session -> {
VersionedNode parent = session.get( VersionedNode.class, p.getId() );
VersionedNode child = session.get( VersionedNode.class, c.getId() );
assertEquals( parent.getName(), "parent" );
assertEquals( 1, parent.getChildren().size() );
assertEquals( 1, parent.getVersion() );
assertSame( parent, child.getParent() );
assertSame( child, parent.getChildren().iterator().next() );
assertEquals( 0, child.getVersion() );
session.setReadOnly( parent, true );
session.setReadOnly( child, true );
session.remove( parent );
session.remove( child );
}
);
assertUpdateCount( 0, scope );
assertDeleteCount( 2, scope );
}
@Test
public void testMergeDetachedParentWithNewChildCommitWithReadOnlyParent(SessionFactoryScope scope) {
VersionedNode p = createVersionNode( "parent", "parent", scope );
clearCounts( scope );
p.setName( "new parent name" );
VersionedNode c = new VersionedNode( "child", "child" );
p.addChild( c );
scope.inTransaction(
session -> {
VersionedNode parent = (VersionedNode) session.merge( p );
session.setReadOnly( parent, true );
}
);
assertUpdateCount( 1, scope );
assertInsertCount( 1, scope );
clearCounts( scope );
scope.inTransaction(
session -> {
VersionedNode parent = session.get( VersionedNode.class, p.getId() );
VersionedNode child = session.get( VersionedNode.class, c.getId() );
assertEquals( parent.getName(), "parent" );
assertEquals( 1, parent.getChildren().size() );
assertEquals( 1, parent.getVersion() );
assertSame( parent, child.getParent() );
assertSame( child, parent.getChildren().iterator().next() );
assertEquals( 0, child.getVersion() );
session.setReadOnly( parent, true );
session.setReadOnly( child, true );
session.remove( parent );
session.remove( child );
}
);
assertUpdateCount( 0, scope );
assertDeleteCount( 2, scope );
}
@Test
public void testGetParentMakeReadOnlyThenMergeDetachedParentWithNewChildC(SessionFactoryScope scope) {
VersionedNode p = createVersionNode( "parent", "parent", scope );
clearCounts( scope );
p.setName( "new parent name" );
VersionedNode c = new VersionedNode( "child", "child" );
p.addChild( c );
scope.inTransaction(
session -> {
VersionedNode parentManaged = session.get( VersionedNode.class, p.getId() );
session.setReadOnly( parentManaged, true );
VersionedNode parentMerged = (VersionedNode) session.merge( p );
assertSame( parentManaged, parentMerged );
}
);
assertUpdateCount( 1, scope );
assertInsertCount( 1, scope );
clearCounts( scope );
scope.inTransaction(
session -> {
VersionedNode parent = session.get( VersionedNode.class, p.getId() );
VersionedNode child = session.get( VersionedNode.class, c.getId() );
assertEquals( parent.getName(), "parent" );
assertEquals( 1, parent.getChildren().size() );
assertEquals( 1, parent.getVersion() );
assertSame( parent, child.getParent() );
assertSame( child, parent.getChildren().iterator().next() );
assertEquals( 0, child.getVersion() );
session.remove( parent );
session.remove( child );
}
);
assertUpdateCount( 0, scope );
assertDeleteCount( 2, scope );
}
@Test
public void testMergeUnchangedDetachedParentChildren(SessionFactoryScope scope) {
VersionedNode p = new VersionedNode( "parent", "parent" );
VersionedNode c = new VersionedNode( "child", "child" );
scope.inTransaction(
session -> {
p.addChild( c );
session.persist( p );
}
);
clearCounts( scope );
VersionedNode parent = scope.fromTransaction(
session ->
(VersionedNode) session.merge( p )
);
assertUpdateCount( 0, scope );
assertInsertCount( 0, scope );
clearCounts( scope );
scope.inTransaction(
session -> {
VersionedNode parentGet = session.get( p.getClass(), p.getId() );
session.merge( parent );
}
);
assertUpdateCount( 0, scope );
assertInsertCount( 0, scope );
clearCounts( scope );
scope.inTransaction(
session -> {
VersionedNode parentLoad = session.getReference( parent.getClass(), parent.getId() );
session.merge( parent );
}
);
assertUpdateCount( 0, scope );
assertInsertCount( 0, scope );
clearCounts( scope );
scope.inTransaction(
session -> {
VersionedNode parent_ = session.get( VersionedNode.class, parent.getId() );
VersionedNode child = session.get( VersionedNode.class, c.getId() );
assertEquals( parent_.getName(), "parent" );
assertEquals( 1, parent_.getChildren().size() );
assertEquals( 0, parent_.getVersion() );
assertSame( parent_, child.getParent() );
assertSame( child, parent_.getChildren().iterator().next() );
assertEquals( 0, child.getVersion() );
session.remove( parent_ );
session.remove( child );
}
);
assertUpdateCount( 0, scope );
assertDeleteCount( 2, scope );
}
@Test
public void testAddNewParentToReadOnlyChild(SessionFactoryScope scope) {
VersionedNode c = createVersionNode( "child", "child", scope );
clearCounts( scope );
VersionedNode p = new VersionedNode( "parent", "parent" );
scope.inTransaction(
session -> {
VersionedNode childManaged = session.get( VersionedNode.class, c.getId() );
session.setReadOnly( childManaged, true );
childManaged.setName( "new child name" );
p.addChild( childManaged );
}
);
assertUpdateCount( 0, scope );
assertInsertCount( 1, scope );
scope.inTransaction(
session -> {
VersionedNode child = session.get( VersionedNode.class, c.getId() );
assertEquals( "child", child.getName() );
assertNull( child.getParent() );
assertEquals( 0, child.getVersion() );
VersionedNode parent = session.get( VersionedNode.class, p.getId() );
assertNotNull( parent );
session.setReadOnly( child, true );
session.remove( child );
}
);
assertUpdateCount( 0, scope );
assertDeleteCount( 1, scope );
}
@Test
public void testUpdateChildWithNewParentCommitWithReadOnlyChild(SessionFactoryScope scope) {
VersionedNode c = createVersionNode( "child", "child", scope );
clearCounts( scope );
c.setName( "new child name" );
VersionedNode p = new VersionedNode( "parent", "parent" );
p.addChild( c );
scope.inTransaction(
session -> {
VersionedNode merged = session.merge( c );
session.setReadOnly( merged, true );
}
);
assertUpdateCount( 1, scope );
assertInsertCount( 1, scope );
clearCounts( scope );
scope.inTransaction(
session -> {
VersionedNode parent = session.get( VersionedNode.class, p.getId() );
VersionedNode child = session.get( VersionedNode.class, c.getId() );
assertEquals( child.getName(), "child" );
assertNull( child.getParent() );
assertEquals( 0, child.getVersion() );
assertNotNull( parent );
assertEquals( 0, parent.getChildren().size() );
assertEquals( 1, parent.getVersion() );
session.setReadOnly( parent, true );
session.setReadOnly( child, true );
session.remove( parent );
session.remove( child );
}
);
assertUpdateCount( 0, scope );
assertDeleteCount( 2, scope );
}
@Test
public void testMergeDetachedChildWithNewParentCommitWithReadOnlyChild(SessionFactoryScope scope) {
VersionedNode c = createVersionNode( "child", "child", scope );
clearCounts( scope );
c.setName( "new child name" );
VersionedNode p = new VersionedNode( "parent", "parent" );
p.addChild( c );
scope.inTransaction(
session -> {
VersionedNode child = (VersionedNode) session.merge( c );
session.setReadOnly( child, true );
}
);
assertUpdateCount( 1, scope );
assertInsertCount( 1, scope );
clearCounts( scope );
scope.inTransaction(
session -> {
VersionedNode parent = session.get( VersionedNode.class, p.getId() );
VersionedNode child = session.get( VersionedNode.class, c.getId() );
assertEquals( child.getName(), "child" );
assertNull( child.getParent() );
assertEquals( 0, child.getVersion() );
assertNotNull( parent );
assertEquals( 0, parent.getChildren().size() );
assertEquals( 1, parent.getVersion() ); // hmmm, why was version updated?
session.setReadOnly( parent, true );
session.setReadOnly( child, true );
session.remove( parent );
session.remove( child );
}
);
assertUpdateCount( 0, scope );
assertDeleteCount( 2, scope );
}
@Test
public void testGetChildMakeReadOnlyThenMergeDetachedChildWithNewParent(SessionFactoryScope scope) {
VersionedNode c = createVersionNode( "child", "child", scope );
clearCounts( scope );
c.setName( "new child name" );
VersionedNode p = new VersionedNode( "parent", "parent" );
p.addChild( c );
scope.inTransaction(
session -> {
VersionedNode childManaged = session.get( VersionedNode.class, c.getId() );
session.setReadOnly( childManaged, true );
VersionedNode childMerged = (VersionedNode) session.merge( c );
assertSame( childManaged, childMerged );
}
);
assertUpdateCount( 1, scope );
assertInsertCount( 1, scope );
clearCounts( scope );
scope.inTransaction(
session -> {
VersionedNode parent = session.get( VersionedNode.class, p.getId() );
VersionedNode child = session.get( VersionedNode.class, c.getId() );
assertEquals( child.getName(), "child" );
assertNull( child.getParent() );
assertEquals( 0, child.getVersion() );
assertNotNull( parent );
assertEquals( 0, parent.getChildren().size() );
assertEquals( 1, parent.getVersion() ); // / hmmm, why was version updated?
session.setReadOnly( parent, true );
session.setReadOnly( child, true );
session.remove( parent );
session.remove( child );
}
);
assertUpdateCount( 0, scope );
assertDeleteCount( 2, scope );
}
protected void cleanupTest(SessionFactoryScope scope) throws Exception {
cleanup( scope );
}
private void cleanup(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
session.createQuery( "delete from VersionedNode where parent is not null" ).executeUpdate();
session.createQuery( "delete from VersionedNode" ).executeUpdate();
}
);
}
}
| ReadOnlyVersionedNodesTest |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/type/descriptor/jdbc/TimeUtcAsOffsetTimeJdbcType.java | {
"start": 874,
"end": 3465
} | class ____ implements JdbcType {
public static final TimeUtcAsOffsetTimeJdbcType INSTANCE = new TimeUtcAsOffsetTimeJdbcType();
public TimeUtcAsOffsetTimeJdbcType() {
}
@Override
public int getJdbcTypeCode() {
return Types.TIME_WITH_TIMEZONE;
}
@Override
public int getDefaultSqlTypeCode() {
return SqlTypes.TIME_UTC;
}
@Override
public String getFriendlyName() {
return "TIME_UTC";
}
@Override
public String toString() {
return "TimeUtcDescriptor";
}
@Override
public <T> JavaType<T> getJdbcRecommendedJavaTypeMapping(
Integer length,
Integer scale,
TypeConfiguration typeConfiguration) {
return typeConfiguration.getJavaTypeRegistry().getDescriptor( OffsetTime.class );
}
@Override
public Class<?> getPreferredJavaTypeClass(WrapperOptions options) {
return OffsetTime.class;
}
@Override
public <T> JdbcLiteralFormatter<T> getJdbcLiteralFormatter(JavaType<T> javaType) {
return new JdbcLiteralFormatterTemporal<>( javaType, TemporalType.TIME );
}
@Override
public <X> ValueBinder<X> getBinder(final JavaType<X> javaType) {
return new BasicBinder<>( javaType, this ) {
@Override
protected void doBind(PreparedStatement st, X value, int index, WrapperOptions options) throws SQLException {
final OffsetTime offsetTime = javaType.unwrap( value, OffsetTime.class, options );
st.setObject( index, offsetTime.withOffsetSameInstant( ZoneOffset.UTC ), Types.TIME_WITH_TIMEZONE );
}
@Override
protected void doBind(CallableStatement st, X value, String name, WrapperOptions options)
throws SQLException {
final OffsetTime offsetTime = javaType.unwrap( value, OffsetTime.class, options );
st.setObject( name, offsetTime.withOffsetSameInstant( ZoneOffset.UTC ), Types.TIME_WITH_TIMEZONE );
}
};
}
@Override
public <X> ValueExtractor<X> getExtractor(final JavaType<X> javaType) {
return new BasicExtractor<>( javaType, this ) {
@Override
protected X doExtract(ResultSet rs, int paramIndex, WrapperOptions options) throws SQLException {
return javaType.wrap( rs.getObject( paramIndex, OffsetTime.class ), options );
}
@Override
protected X doExtract(CallableStatement statement, int index, WrapperOptions options) throws SQLException {
return javaType.wrap( statement.getObject( index, OffsetTime.class ), options );
}
@Override
protected X doExtract(CallableStatement statement, String name, WrapperOptions options) throws SQLException {
return javaType.wrap( statement.getObject( name, OffsetTime.class ), options );
}
};
}
}
| TimeUtcAsOffsetTimeJdbcType |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/WireTapDefinition.java | {
"start": 1534,
"end": 13454
} | class ____<Type extends ProcessorDefinition<Type>> extends ToDynamicDefinition
implements ExecutorServiceAwareDefinition<WireTapDefinition<Type>> {
@XmlTransient
private ExecutorService executorServiceBean;
@XmlTransient
private Processor onPrepareProcessor;
@XmlAttribute
@Metadata(label = "advanced", defaultValue = "true", javaType = "java.lang.Boolean")
private String copy;
@XmlAttribute
@Metadata(label = "advanced", defaultValue = "true", javaType = "java.lang.Boolean")
private String dynamicUri;
@XmlAttribute
@Metadata(label = "advanced", javaType = "org.apache.camel.Processor")
private String onPrepare;
@XmlAttribute
@Metadata(label = "advanced", javaType = "java.util.concurrent.ExecutorService")
private String executorService;
public WireTapDefinition() {
}
public WireTapDefinition(WireTapDefinition<?> source) {
super(source);
this.executorServiceBean = source.executorServiceBean;
this.onPrepareProcessor = source.onPrepareProcessor;
this.copy = source.copy;
this.dynamicUri = source.dynamicUri;
this.onPrepare = source.onPrepare;
this.executorService = source.executorService;
}
@Override
public String getPattern() {
return ExchangePattern.InOnly.name();
}
@Override
public String toString() {
return "WireTap[" + getUri() + "]";
}
@Override
public String getShortName() {
return "wireTap";
}
@Override
public String getLabel() {
return "wireTap[" + getUri() + "]";
}
@Override
@SuppressWarnings("unchecked")
public Type end() {
// allow end() to return to previous type, so you can continue in the DSL
return (Type) super.end();
}
@Override
public void addOutput(ProcessorDefinition<?> output) {
// add outputs on parent as this wiretap does not support outputs
getParent().addOutput(output);
}
// Fluent API
// -------------------------------------------------------------------------
/**
* Uses a custom thread pool
*
* @param executorService a custom {@link ExecutorService} to use as thread pool for sending tapped exchanges
* @return the builder
*/
@Override
public WireTapDefinition<Type> executorService(ExecutorService executorService) {
this.executorServiceBean = executorService;
return this;
}
/**
* Uses a custom thread pool
*
* @param executorService reference to lookup a custom {@link ExecutorService} to use as thread pool for sending
* tapped exchanges
* @return the builder
*/
@Override
public WireTapDefinition<Type> executorService(String executorService) {
setExecutorService(executorService);
return this;
}
/**
* Uses a copy of the original exchange
*
* @return the builder
*/
public WireTapDefinition<Type> copy() {
return copy(true);
}
/**
* Uses a copy of the original exchange
*
* @param copy if it is true camel will copy the original exchange, if it is false camel will not copy the original
* exchange
* @return the builder
*/
public WireTapDefinition<Type> copy(boolean copy) {
return copy(Boolean.toString(copy));
}
/**
* Uses a copy of the original exchange
*
* @param copy if it is true camel will copy the original exchange, if it is false camel will not copy the original
* exchange
* @return the builder
*/
public WireTapDefinition<Type> copy(String copy) {
setCopy(copy);
return this;
}
/**
* Whether the uri is dynamic or static. If the uri is dynamic then the simple language is used to evaluate a
* dynamic uri to use as the wire-tap destination, for each incoming message. This works similar to how the
* <tt>toD</tt> EIP pattern works. If static then the uri is used as-is as the wire-tap destination.
*
* @param dynamicUri whether to use dynamic or static uris
* @return the builder
*/
public WireTapDefinition<Type> dynamicUri(boolean dynamicUri) {
return dynamicUri(Boolean.toString(dynamicUri));
}
/**
* Whether the uri is dynamic or static. If the uri is dynamic then the simple language is used to evaluate a
* dynamic uri to use as the wire-tap destination, for each incoming message. This works similar to how the
* <tt>toD</tt> EIP pattern works. If static then the uri is used as-is as the wire-tap destination.
*
* @param dynamicUri whether to use dynamic or static uris
* @return the builder
*/
public WireTapDefinition<Type> dynamicUri(String dynamicUri) {
setDynamicUri(dynamicUri);
return this;
}
/**
* Uses the {@link Processor} when preparing the {@link org.apache.camel.Exchange} to be sent. This can be used to
* deep-clone messages that should be sent, or any custom logic needed before the exchange is sent.
*
* @param onPrepare the processor
* @return the builder
*/
public WireTapDefinition<Type> onPrepare(Processor onPrepare) {
this.onPrepareProcessor = onPrepare;
return this;
}
/**
* Uses the {@link Processor} when preparing the {@link org.apache.camel.Exchange} to be sent. This can be used to
* deep-clone messages that should be sent, or any custom logic needed before the exchange is sent.
*
* @param onPrepare reference to the processor to lookup in the {@link org.apache.camel.spi.Registry}
* @return the builder
*/
public WireTapDefinition<Type> onPrepare(String onPrepare) {
setOnPrepare(onPrepare);
return this;
}
/**
* Sets the maximum size used by the {@link org.apache.camel.spi.ProducerCache} which is used to cache and reuse
* producers, when uris are reused.
*
* Beware that when using dynamic endpoints then it affects how well the cache can be utilized. If each dynamic
* endpoint is unique then it's best to turn off caching by setting this to -1, which allows Camel to not cache both
* the producers and endpoints; they are regarded as prototype scoped and will be stopped and discarded after use.
* This reduces memory usage as otherwise producers/endpoints are stored in memory in the caches.
*
* However if there are a high degree of dynamic endpoints that have been used before, then it can benefit to use
* the cache to reuse both producers and endpoints and therefore the cache size can be set accordingly or rely on
* the default size (1000).
*
* If there is a mix of unique and used before dynamic endpoints, then setting a reasonable cache size can help
* reduce memory usage to avoid storing too many non-frequent used producers.
*
* @param cacheSize the cache size, use <tt>0</tt> for default cache size, or <tt>-1</tt> to turn cache off.
* @return the builder
*/
@Override
public WireTapDefinition<Type> cacheSize(int cacheSize) {
return cacheSize(Integer.toString(cacheSize));
}
/**
* Sets the maximum size used by the {@link org.apache.camel.spi.ProducerCache} which is used to cache and reuse
* producers, when uris are reused.
*
* Beware that when using dynamic endpoints then it affects how well the cache can be utilized. If each dynamic
* endpoint is unique then it's best to turn off caching by setting this to -1, which allows Camel to not cache both
* the producers and endpoints; they are regarded as prototype scoped and will be stopped and discarded after use.
* This reduces memory usage as otherwise producers/endpoints are stored in memory in the caches.
*
* However if there are a high degree of dynamic endpoints that have been used before, then it can benefit to use
* the cache to reuse both producers and endpoints and therefore the cache size can be set accordingly or rely on
* the default size (1000).
*
* If there is a mix of unique and used before dynamic endpoints, then setting a reasonable cache size can help
* reduce memory usage to avoid storing too many non-frequent used producers.
*
* @param cacheSize the cache size, use <tt>0</tt> for default cache size, or <tt>-1</tt> to turn cache off.
* @return the builder
*/
@Override
public WireTapDefinition<Type> cacheSize(String cacheSize) {
setCacheSize(cacheSize);
return this;
}
/**
* Ignore the invalid endpoint exception when try to create a producer with that endpoint
*
* @return the builder
*/
public WireTapDefinition<Type> ignoreInvalidEndpoint() {
setIgnoreInvalidEndpoint(Boolean.toString(true));
return this;
}
/**
* To use a variable as the source for the message body to send. This makes it handy to use variables for user data
* and to easily control what data to use for sending and receiving.
*
* Important: When using send variable then the message body is taken from this variable instead of the current
* {@link Message}, however the headers from the {@link Message} will still be used as well. In other words, the
* variable is used instead of the message body, but everything else is as usual.
*/
public WireTapDefinition<Type> variableReceive(String variableReceive) {
throw new IllegalArgumentException("WireTap does not support variableReceive");
}
/**
* To use a variable as the source for the message body to send. This makes it handy to use variables for user data
* and to easily control what data to use for sending and receiving.
*
* Important: When using send variable then the message body is taken from this variable instead of the current
* message, however the headers from the message will still be used as well. In other words, the variable is used
* instead of the message body, but everything else is as usual.
*/
public WireTapDefinition<Type> variableSend(String variableSend) {
setVariableSend(variableSend);
return this;
}
// Properties
// -------------------------------------------------------------------------
public Processor getOnPrepareProcessor() {
return onPrepareProcessor;
}
@Override
public ExecutorService getExecutorServiceBean() {
return executorServiceBean;
}
@Override
public String getExecutorServiceRef() {
return executorService;
}
@Override
public String getUri() {
return super.getUri();
}
/**
* The uri of the endpoint to wiretap to. The uri can be dynamic computed using the simple language.
*/
@Override
public void setUri(String uri) {
super.setUri(uri);
}
public String getCopy() {
return copy;
}
public void setCopy(String copy) {
this.copy = copy;
}
public String getDynamicUri() {
return dynamicUri;
}
public void setDynamicUri(String dynamicUri) {
this.dynamicUri = dynamicUri;
}
public String getOnPrepare() {
return onPrepare;
}
public void setOnPrepare(String onPrepare) {
this.onPrepare = onPrepare;
}
public String getExecutorService() {
return executorService;
}
public void setExecutorService(String executorService) {
this.executorService = executorService;
}
@Override
public WireTapDefinition copyDefinition() {
return new WireTapDefinition(this);
}
}
| WireTapDefinition |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBuilder.java | {
"start": 1361,
"end": 1517
} | class ____ to be used as a builder for {@link ReplicaInfo} objects.
* The state of the replica is used to determine which object is instantiated.
*/
public | is |
java | google__guava | guava/src/com/google/common/collect/AbstractBiMap.java | {
"start": 1922,
"end": 7187
} | class ____<K extends @Nullable Object, V extends @Nullable Object>
extends ForwardingMap<K, V> implements BiMap<K, V>, Serializable {
private transient Map<K, V> delegate;
@RetainedWith private transient AbstractBiMap<V, K> inverse;
/** Package-private constructor for creating a map-backed bimap. */
AbstractBiMap(Map<K, V> forward, Map<V, K> backward) {
inverse = checkMapsAndMakeInverse(forward, backward);
delegate = forward;
}
/** Private constructor for inverse bimap. */
private AbstractBiMap(Map<K, V> backward, AbstractBiMap<V, K> forward) {
delegate = backward;
inverse = forward;
}
@Override
protected Map<K, V> delegate() {
return delegate;
}
/** Returns its input, or throws an exception if this is not a valid key. */
@CanIgnoreReturnValue
@ParametricNullness
K checkKey(@ParametricNullness K key) {
return key;
}
/** Returns its input, or throws an exception if this is not a valid value. */
@CanIgnoreReturnValue
@ParametricNullness
V checkValue(@ParametricNullness V value) {
return value;
}
/**
* Specifies the delegate maps going in each direction. Called by subclasses during
* deserialization.
*/
void setDelegates(Map<K, V> forward, Map<V, K> backward) {
inverse = checkMapsAndMakeInverse(forward, backward);
delegate = forward;
}
private AbstractBiMap<V, K> checkMapsAndMakeInverse(Map<K, V> forward, Map<V, K> backward) {
checkArgument(forward.isEmpty());
checkArgument(backward.isEmpty());
checkArgument(forward != backward);
return makeInverse(backward);
}
AbstractBiMap<V, K> makeInverse(Map<V, K> backward) {
return new Inverse<>(backward, this);
}
void setInverse(AbstractBiMap<V, K> inverse) {
this.inverse = inverse;
}
// Query Operations (optimizations)
@Override
public boolean containsValue(@Nullable Object value) {
return inverse.containsKey(value);
}
// Modification Operations
@CanIgnoreReturnValue
@Override
public @Nullable V put(@ParametricNullness K key, @ParametricNullness V value) {
return putInBothMaps(key, value, false);
}
@CanIgnoreReturnValue
@Override
public @Nullable V forcePut(@ParametricNullness K key, @ParametricNullness V value) {
return putInBothMaps(key, value, true);
}
private @Nullable V putInBothMaps(
@ParametricNullness K key, @ParametricNullness V value, boolean force) {
checkKey(key);
checkValue(value);
boolean containedKey = containsKey(key);
if (containedKey && Objects.equals(value, get(key))) {
return value;
}
if (force) {
inverse().remove(value);
} else {
checkArgument(!containsValue(value), "value already present: %s", value);
}
V oldValue = delegate.put(key, value);
updateInverseMap(key, containedKey, oldValue, value);
return oldValue;
}
private void updateInverseMap(
@ParametricNullness K key,
boolean containedKey,
@Nullable V oldValue,
@ParametricNullness V newValue) {
if (containedKey) {
// The cast is safe because of the containedKey check.
removeFromInverseMap(uncheckedCastNullableTToT(oldValue));
}
inverse.delegate.put(newValue, key);
}
@CanIgnoreReturnValue
@Override
public @Nullable V remove(@Nullable Object key) {
return containsKey(key) ? removeFromBothMaps(key) : null;
}
@CanIgnoreReturnValue
@ParametricNullness
private V removeFromBothMaps(@Nullable Object key) {
// The cast is safe because the callers of this method first check that the key is present.
V oldValue = uncheckedCastNullableTToT(delegate.remove(key));
removeFromInverseMap(oldValue);
return oldValue;
}
private void removeFromInverseMap(@ParametricNullness V oldValue) {
inverse.delegate.remove(oldValue);
}
// Bulk Operations
@Override
public void putAll(Map<? extends K, ? extends V> map) {
for (Entry<? extends K, ? extends V> entry : map.entrySet()) {
put(entry.getKey(), entry.getValue());
}
}
@Override
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
this.delegate.replaceAll(function);
inverse.delegate.clear();
Entry<K, V> broken = null;
Iterator<Entry<K, V>> itr = this.delegate.entrySet().iterator();
while (itr.hasNext()) {
Entry<K, V> entry = itr.next();
K k = entry.getKey();
V v = entry.getValue();
K conflict = inverse.delegate.putIfAbsent(v, k);
if (conflict != null) {
broken = entry;
// We're definitely going to throw, but we'll try to keep the BiMap in an internally
// consistent state by removing the bad entry.
itr.remove();
}
}
if (broken != null) {
throw new IllegalArgumentException("value already present: " + broken.getValue());
}
}
@Override
public void clear() {
delegate.clear();
inverse.delegate.clear();
}
// Views
@Override
public BiMap<V, K> inverse() {
return inverse;
}
@LazyInit private transient @Nullable Set<K> keySet;
@Override
public Set<K> keySet() {
Set<K> result = keySet;
return (result == null) ? keySet = new KeySet() : result;
}
@WeakOuter
private final | AbstractBiMap |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/client/runtime/src/main/java/org/jboss/resteasy/reactive/client/spi/ClientMessageBodyReader.java | {
"start": 354,
"end": 739
} | interface ____<T> extends MessageBodyReader<T> {
T readFrom(Class<T> type, Type genericType,
Annotation[] annotations, MediaType mediaType,
MultivaluedMap<String, String> httpHeaders,
InputStream entityStream,
RestClientRequestContext context) throws java.io.IOException, jakarta.ws.rs.WebApplicationException;
}
| ClientMessageBodyReader |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/JmsComponentBuilderFactory.java | {
"start": 15125,
"end": 16065
} | class ____ is
* good enough as subscription name). Only makes sense when listening to
* a topic (pub-sub domain), therefore this method switches the
* pubSubDomain flag as well.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param subscriptionDurable the value to set
* @return the dsl builder
*/
default JmsComponentBuilder subscriptionDurable(boolean subscriptionDurable) {
doSetProperty("subscriptionDurable", subscriptionDurable);
return this;
}
/**
* Set the name of a subscription to create. To be applied in case of a
* topic (pub-sub domain) with a shared or durable subscription. The
* subscription name needs to be unique within this client's JMS client
* id. Default is the | name |
java | square__moshi | moshi/src/test/java/com/squareup/moshi/TypesTest.java | {
"start": 1407,
"end": 1477
} | interface ____ {}
@Retention(RUNTIME)
@JsonQualifier
@ | TestQualifier |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/concurrent/locks/LockingVisitors.java | {
"start": 5544,
"end": 8028
} | interface ____
* Java 8.
*/
L lock;
/**
* The guarded object.
*/
O object;
/**
* Supplies the read lock, usually from the lock object.
*/
private Supplier<Lock> readLockSupplier;
/**
* Supplies the write lock, usually from the lock object.
*/
private Supplier<Lock> writeLockSupplier;
/**
* Constructs a new instance.
*/
public LVBuilder() {
// empty
}
@Override
public LockVisitor<O, L> get() {
return new LockVisitor<>(this);
}
Supplier<Lock> getReadLockSupplier() {
return readLockSupplier;
}
Supplier<Lock> getWriteLockSupplier() {
return writeLockSupplier;
}
/**
* Set the lock used from accept methods.
*
* @param lock the lock.
* @return {@code this} instance.
*/
public B setLock(final L lock) {
this.lock = lock;
return asThis();
}
/**
* Set the resource.
*
* @param object the resource.
* @return {@code this} instance.
*/
public B setObject(final O object) {
this.object = object;
return asThis();
}
/**
* Supplies the read lock.
*
* @param readLockSupplier Supplies the read lock.
* @return {@code this} instance.
*/
public B setReadLockSupplier(final Supplier<Lock> readLockSupplier) {
this.readLockSupplier = readLockSupplier;
return asThis();
}
/**
* Supplies the write lock.
*
* @param writeLockSupplier Supplies the write lock.
* @return {@code this} instance.
*/
public B setWriteLockSupplier(final Supplier<Lock> writeLockSupplier) {
this.writeLockSupplier = writeLockSupplier;
return asThis();
}
}
/**
* The lock object, untyped, since, for example {@link StampedLock} does not implement a locking | in |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java | {
"start": 5971,
"end": 8630
} | class ____ {
private List<Tombstone> tombstones;
private int numPurged = -1;
private final long currentTime = System.currentTimeMillis();
private Builder() {
tombstones = new ArrayList<>();
}
private Builder(IndexGraveyard that) {
tombstones = new ArrayList<>(that.getTombstones());
}
/**
* A copy of the current tombstones in the builder.
*/
public List<Tombstone> tombstones() {
return Collections.unmodifiableList(tombstones);
}
/**
* Add a deleted index to the list of tombstones in the cluster state.
*/
public Builder addTombstone(final Index index) {
tombstones.add(new Tombstone(index, currentTime));
return this;
}
/**
* Add a set of deleted indexes to the list of tombstones in the cluster state.
*/
public Builder addTombstones(final Collection<Index> indices) {
for (Index index : indices) {
addTombstone(index);
}
return this;
}
/**
* Add a list of tombstones to the graveyard.
*/
Builder addBuiltTombstones(final List<Tombstone> tombstones) {
this.tombstones.addAll(tombstones);
return this;
}
/**
* Get the number of tombstones that were purged. This should *only* be called
* after build() has been called.
*/
public int getNumPurged() {
assert numPurged != -1;
return numPurged;
}
/**
* Purge tombstone entries. Returns the number of entries that were purged.
*
* Tombstones are purged if the number of tombstones in the list
* is greater than the input parameter of maximum allowed tombstones.
* Tombstones are purged until the list is equal to the maximum allowed.
*/
private int purge(final int maxTombstones) {
int count = tombstones().size() - maxTombstones;
if (count <= 0) {
return 0;
}
tombstones = tombstones.subList(count, tombstones.size());
return count;
}
public IndexGraveyard build() {
return build(Settings.EMPTY);
}
public IndexGraveyard build(final Settings settings) {
// first, purge the necessary amount of entries
numPurged = purge(SETTING_MAX_TOMBSTONES.get(settings));
return new IndexGraveyard(tombstones);
}
}
/**
* A | Builder |
java | quarkusio__quarkus | extensions/smallrye-graphql-client/deployment/src/test/java/io/quarkus/smallrye/graphql/client/deployment/DynamicGraphQLClientWebSocketAuthenticationClientInitTest.java | {
"start": 1862,
"end": 7380
} | class ____ {
static String url = "http://" + System.getProperty("quarkus.http.host", "localhost") + ":" +
System.getProperty("quarkus.http.test-port", "8081") + "/graphql";
@RegisterExtension
static QuarkusUnitTest test = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(SecuredApi.class, Foo.class)
.addAsResource("application-secured.properties", "application.properties")
.addAsResource("users.properties")
.addAsResource("roles.properties")
.addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml"))
.overrideConfigKey("quarkus.smallrye-graphql.authorization-client-init-payload-name", "Authorization");
private static Stream<Arguments> websocketArguments() {
return Stream.of(WebsocketSubprotocol.values())
.map(Enum::name)
.map(Arguments::of);
}
@ParameterizedTest
@MethodSource("websocketArguments")
public void testAuthenticatedUserForQueryWebSocketOverInitParams(String subprotocolName) throws Exception {
DynamicGraphQLClientBuilder clientBuilder = DynamicGraphQLClientBuilder.newBuilder()
.url(url)
.initPayload(Map.of("Authorization", "Basic ZGF2aWQ6cXdlcnR5MTIz"))
.executeSingleOperationsOverWebsocket(true)
.subprotocols(WebsocketSubprotocol.valueOf(subprotocolName));
try (DynamicGraphQLClient client = clientBuilder.build()) {
// Test that repeated queries yields the same result
for (int i = 0; i < 3; i++) {
Response response = client.executeSync("{ foo { message} }");
assertTrue(response.hasData());
assertEquals("foo", response.getData().getJsonObject("foo").getString("message"));
}
// Unauthorized query
Response response = client.executeSync("{ bar { message} }");
assertTrue(response.hasData());
assertEquals(JsonValue.ValueType.NULL, response.getData().get("bar").getValueType());
}
}
@ParameterizedTest
@MethodSource("websocketArguments")
public void testUnauthenticatedUserForQueryWebSocketOverInitParams(String subprotocolName) throws Exception {
// Validate that our unit test code actually has a correctly secured endpoint
DynamicGraphQLClientBuilder clientBuilder = DynamicGraphQLClientBuilder.newBuilder()
.url(url)
.executeSingleOperationsOverWebsocket(true)
.subprotocols(WebsocketSubprotocol.valueOf(subprotocolName));
try (DynamicGraphQLClient client = clientBuilder.build()) {
Response response = client.executeSync("{ foo { message} }");
assertTrue(response.hasData());
assertEquals(JsonValue.ValueType.NULL, response.getData().get("foo").getValueType());
}
}
@ParameterizedTest
@MethodSource("websocketArguments")
@Disabled("Reliant on next version of SmallRye GraphQL to prevent it hanging")
public void testIncorrectCredentialsForQueryWebSocketOverInitParams(String subprotocolName) {
UnexpectedCloseException exception = assertThrows(UnexpectedCloseException.class, () -> {
// Validate that our unit test code actually has a correctly secured endpoint
DynamicGraphQLClientBuilder clientBuilder = DynamicGraphQLClientBuilder.newBuilder()
.url(url)
.executeSingleOperationsOverWebsocket(true)
.initPayload(Map.of("Authorization", "Basic ZnJlZDpXUk9OR19QQVNTV09SRA=="))
.subprotocols(WebsocketSubprotocol.valueOf(subprotocolName));
try (DynamicGraphQLClient client = clientBuilder.build()) {
client.executeSync("{ foo { message} }");
}
});
assertEquals((short) 4403, exception.getCloseStatusCode());
assertTrue(exception.getMessage().contains("Forbidden"));
}
@ParameterizedTest
@MethodSource("websocketArguments")
@Disabled("Reliant on next version of SmallRye GraphQL to prevent it hanging")
public void testAuthenticatedUserForQueryWebSocketOverHeadersAndInitParams(String subprotocolName) {
UnexpectedCloseException exception = assertThrows(UnexpectedCloseException.class, () -> {
DynamicGraphQLClientBuilder clientBuilder = DynamicGraphQLClientBuilder.newBuilder()
.url(url)
// Header takes precedence over init payload
.header("Authorization", "Basic ZnJlZDpmb28=")
// This should be ignored as the header is set
.initPayload(Map.of("Authorization", "Basic ZGF2aWQ6cXdlcnR5MTIz"))
.executeSingleOperationsOverWebsocket(true)
.subprotocols(WebsocketSubprotocol.valueOf(subprotocolName));
try (DynamicGraphQLClient client = clientBuilder.build()) {
// Executing the query should fail because the server will error as we've defined two methods of auth
client.executeSync("{ foo { message} }");
}
});
assertEquals((short) 4400, exception.getCloseStatusCode());
assertTrue(exception.getMessage().contains("Authorization specified in multiple locations"));
}
public static | DynamicGraphQLClientWebSocketAuthenticationClientInitTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/StringFormatWithLiteralTest.java | {
"start": 4423,
"end": 4862
} | class ____ {
String test() {
Integer data = 3;
return String.format("Formatting this int: %d;Formatting this string: %s", data, "string");
}
}
""")
.doTest();
}
@Test
public void negativeStringFormatWithOneStringVariableStaticImport() {
compilationHelper
.addSourceLines(
"ExampleClass.java",
"""
import static java.lang.String.format;
public | ExampleClass |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ext/sql/SqlDateSerializationTest.java | {
"start": 944,
"end": 4894
} | class ____ {
@JsonFormat(shape = JsonFormat.Shape.STRING, pattern = "yyyy.MM.dd")
public java.sql.Date dateOfBirth;
}
/*
/**********************************************************************
/* Test methods
/**********************************************************************
*/
private final ObjectMapper MAPPER = newJsonMapper();
@Test
public void testSqlDate() throws Exception
{
ObjectWriter writer = MAPPER.writer()
.with(DateTimeFeature.WRITE_DATES_AS_TIMESTAMPS);
// use date 1999-04-01 (note: months are 0-based, use constant)
final java.sql.Date date99 = javaSqlDate(1999, Calendar.APRIL, 1);
final java.sql.Date date0 = new java.sql.Date(0);
// 11-Oct-2016, tatu: As per [databind#219] we really should use global
// defaults in 2.9, even if this changes behavior.
assertEquals(String.valueOf(date99.getTime()),
writer.writeValueAsString(date99));
assertEquals(a2q("{'date':0}"),
writer.writeValueAsString(new SqlDateAsDefaultBean(0L)));
// but may explicitly force timestamp too
assertEquals(a2q("{'date':0}"),
writer.writeValueAsString(new SqlDateAsNumberBean(0L)));
// And also should be able to use String output as need be:
ObjectWriter w = MAPPER.writer().without(DateTimeFeature.WRITE_DATES_AS_TIMESTAMPS);
// 03-Feb-2021, tatu: As per [databind#2405], changed to include time part by
// default
assertEquals(q("1999-04-01T00:00:00.000Z"), w.writeValueAsString(date99));
assertEquals(q("1970-01-01T00:00:00.000Z"), w.writeValueAsString(date0));
assertEquals(a2q("{'date':'1970-01-01T00:00:00.000Z'}"),
w.writeValueAsString(new SqlDateAsDefaultBean(0L)));
}
@Test
public void testSqlTime() throws Exception
{
java.sql.Time time = new java.sql.Time(0L);
// not 100% sure what we should expect wrt timezone, but what serializes
// does use is quite simple:
assertEquals(q(time.toString()), MAPPER.writeValueAsString(time));
}
@Test
public void testSqlTimestamp() throws Exception
{
java.sql.Timestamp input = new java.sql.Timestamp(0L);
// just should produce same output as standard `java.util.Date`:
Date altTnput = new Date(0L);
assertEquals(MAPPER.writeValueAsString(altTnput),
MAPPER.writeValueAsString(input));
}
@Test
public void testPatternWithSqlDate() throws Exception
{
// `java.sql.Date` applies system default zone (and not UTC)
ObjectMapper mapper = jsonMapperBuilder()
.defaultTimeZone(TimeZone.getDefault())
.build();
Person i = new Person();
i.dateOfBirth = java.sql.Date.valueOf("1980-04-14");
assertEquals(a2q("{'dateOfBirth':'1980.04.14'}"),
mapper.writeValueAsString(i));
}
// [databind#2064]
@Test
public void testSqlDateConfigOverride() throws Exception
{
// `java.sql.Date` applies system default zone (and not UTC)
final ObjectMapper mapper = jsonMapperBuilder()
.defaultTimeZone(TimeZone.getDefault())
.withConfigOverride(java.sql.Date.class,
o -> o.setFormat(JsonFormat.Value.forPattern("yyyy+MM+dd")))
.build();
assertEquals("\"1980+04+14\"",
mapper.writeValueAsString(java.sql.Date.valueOf("1980-04-14")));
}
private static java.sql.Date javaSqlDate(int year, int monthConstant, int day)
{
Calendar cal = Calendar.getInstance();
cal.set(year, monthConstant, day, 0, 0, 0);
cal.set(Calendar.MILLISECOND, 0);
cal.setTimeZone(TimeZone.getTimeZone("UTC"));
return new java.sql.Date(cal.getTime().getTime());
}
}
| Person |
java | google__gson | gson/src/main/java/com/google/gson/GsonBuilder.java | {
"start": 3815,
"end": 8065
} | class ____ {
private Excluder excluder = Excluder.DEFAULT;
private LongSerializationPolicy longSerializationPolicy = LongSerializationPolicy.DEFAULT;
private FieldNamingStrategy fieldNamingPolicy = FieldNamingPolicy.IDENTITY;
private final Map<Type, InstanceCreator<?>> instanceCreators = new HashMap<>();
private final List<TypeAdapterFactory> factories = new ArrayList<>();
/** tree-style hierarchy factories. These come after factories for backwards compatibility. */
private final List<TypeAdapterFactory> hierarchyFactories = new ArrayList<>();
private boolean serializeNulls = DEFAULT_SERIALIZE_NULLS;
private String datePattern = DEFAULT_DATE_PATTERN;
private int dateStyle = DateFormat.DEFAULT;
private int timeStyle = DateFormat.DEFAULT;
private boolean complexMapKeySerialization = DEFAULT_COMPLEX_MAP_KEYS;
private boolean serializeSpecialFloatingPointValues = DEFAULT_SPECIALIZE_FLOAT_VALUES;
private boolean escapeHtmlChars = DEFAULT_ESCAPE_HTML;
private FormattingStyle formattingStyle = DEFAULT_FORMATTING_STYLE;
private boolean generateNonExecutableJson = DEFAULT_JSON_NON_EXECUTABLE;
private Strictness strictness = DEFAULT_STRICTNESS;
private boolean useJdkUnsafe = DEFAULT_USE_JDK_UNSAFE;
private ToNumberStrategy objectToNumberStrategy = DEFAULT_OBJECT_TO_NUMBER_STRATEGY;
private ToNumberStrategy numberToNumberStrategy = DEFAULT_NUMBER_TO_NUMBER_STRATEGY;
private final ArrayDeque<ReflectionAccessFilter> reflectionFilters = new ArrayDeque<>();
/**
* Creates a GsonBuilder instance that can be used to build Gson with various configuration
* settings. GsonBuilder follows the builder pattern, and it is typically used by first invoking
* various configuration methods to set desired options, and finally calling {@link #create()}.
*/
public GsonBuilder() {}
/**
* Constructs a GsonBuilder instance from a Gson instance. The newly constructed GsonBuilder has
* the same configuration as the previously built Gson instance.
*
* @param gson the gson instance whose configuration should be applied to a new GsonBuilder.
*/
GsonBuilder(Gson gson) {
this.excluder = gson.excluder;
this.fieldNamingPolicy = gson.fieldNamingStrategy;
this.instanceCreators.putAll(gson.instanceCreators);
this.serializeNulls = gson.serializeNulls;
this.complexMapKeySerialization = gson.complexMapKeySerialization;
this.generateNonExecutableJson = gson.generateNonExecutableJson;
this.escapeHtmlChars = gson.htmlSafe;
this.formattingStyle = gson.formattingStyle;
this.strictness = gson.strictness;
this.serializeSpecialFloatingPointValues = gson.serializeSpecialFloatingPointValues;
this.longSerializationPolicy = gson.longSerializationPolicy;
this.datePattern = gson.datePattern;
this.dateStyle = gson.dateStyle;
this.timeStyle = gson.timeStyle;
this.factories.addAll(gson.builderFactories);
this.hierarchyFactories.addAll(gson.builderHierarchyFactories);
this.useJdkUnsafe = gson.useJdkUnsafe;
this.objectToNumberStrategy = gson.objectToNumberStrategy;
this.numberToNumberStrategy = gson.numberToNumberStrategy;
this.reflectionFilters.addAll(gson.reflectionFilters);
}
/**
* Configures Gson to enable versioning support. Versioning support works based on the annotation
* types {@link Since} and {@link Until}. It allows including or excluding fields and classes
* based on the specified version. See the documentation of these annotation types for more
* information.
*
* <p>By default versioning support is disabled and usage of {@code @Since} and {@code @Until} has
* no effect.
*
* @param version the version number to use.
* @return a reference to this {@code GsonBuilder} object to fulfill the "Builder" pattern
* @throws IllegalArgumentException if the version number is NaN or negative
* @see Since
* @see Until
*/
@CanIgnoreReturnValue
public GsonBuilder setVersion(double version) {
if (Double.isNaN(version) || version < 0.0) {
throw new IllegalArgumentException("Invalid version: " + version);
}
excluder = excluder.withVersion(version);
return this;
}
/**
* Configures Gson to excludes all | GsonBuilder |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/support/ListenerTimeouts.java | {
"start": 3072,
"end": 4253
} | class ____<Response> implements ActionListener<Response>, Runnable {
private final AtomicBoolean isDone = new AtomicBoolean(false);
private final ActionListener<Response> delegate;
private final Consumer<ActionListener<Response>> onTimeout;
private volatile Scheduler.ScheduledCancellable cancellable;
private TimeoutableListener(ActionListener<Response> delegate, Consumer<ActionListener<Response>> onTimeout) {
this.delegate = delegate;
this.onTimeout = onTimeout;
}
@Override
public void onResponse(Response response) {
if (isDone.compareAndSet(false, true)) {
cancellable.cancel();
delegate.onResponse(response);
}
}
@Override
public void onFailure(Exception e) {
if (isDone.compareAndSet(false, true)) {
cancellable.cancel();
delegate.onFailure(e);
}
}
@Override
public void run() {
if (isDone.compareAndSet(false, true)) {
onTimeout.accept(this);
}
}
}
}
| TimeoutableListener |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/main/java/org/springframework/data/jpa/repository/support/DefaultQueryHints.java | {
"start": 1191,
"end": 3736
} | class ____ implements QueryHints {
private final JpaEntityInformation<?, ?> information;
private final CrudMethodMetadata metadata;
private final @Nullable EntityManager entityManager;
private final boolean forCounts;
/**
* Creates a new {@link DefaultQueryHints} instance for the given {@link JpaEntityInformation},
* {@link CrudMethodMetadata}, {@link EntityManager} and whether to include fetch graphs.
*
* @param information must not be {@literal null}.
* @param metadata can be {@literal null}.
* @param entityManager must not be {@literal null}.
* @param forCounts
*/
private DefaultQueryHints(JpaEntityInformation<?, ?> information, CrudMethodMetadata metadata,
@Nullable EntityManager entityManager, boolean forCounts) {
this.information = information;
this.metadata = metadata;
this.entityManager = entityManager;
this.forCounts = forCounts;
}
/**
* Creates a new {@link QueryHints} instance for the given {@link JpaEntityInformation}, {@link CrudMethodMetadata}
* and {@link EntityManager}.
*
* @param information must not be {@literal null}.
* @param metadata must not be {@literal null}.
* @return
*/
public static QueryHints of(JpaEntityInformation<?, ?> information, CrudMethodMetadata metadata) {
Assert.notNull(information, "JpaEntityInformation must not be null");
Assert.notNull(metadata, "CrudMethodMetadata must not be null");
return new DefaultQueryHints(information, metadata, null, false);
}
@Override
public QueryHints withFetchGraphs(EntityManager em) {
return new DefaultQueryHints(this.information, this.metadata, em, this.forCounts);
}
@Override
public QueryHints forCounts() {
return new DefaultQueryHints(this.information, this.metadata, this.entityManager, true);
}
@Override
public void forEach(BiConsumer<String, Object> action) {
combineHints().forEach(action);
}
private QueryHints combineHints() {
return QueryHints.from(forCounts ? metadata.getQueryHintsForCount() : metadata.getQueryHints(), getFetchGraphs());
}
private QueryHints getFetchGraphs() {
if(entityManager != null && metadata.getEntityGraph() != null) {
return Jpa21Utils.getFetchGraphHint(entityManager, getEntityGraph(metadata.getEntityGraph()), information.getJavaType());
}
return new MutableQueryHints();
}
private JpaEntityGraph getEntityGraph(EntityGraph entityGraph) {
String fallbackName = information.getEntityName() + "." + metadata.getMethod().getName();
return new JpaEntityGraph(entityGraph, fallbackName);
}
}
| DefaultQueryHints |
java | apache__spark | common/network-common/src/main/java/org/apache/spark/network/crypto/TransportCipher.java | {
"start": 954,
"end": 1124
} | interface ____ {
String getKeyId() throws GeneralSecurityException;
void addToChannel(Channel channel) throws IOException, GeneralSecurityException;
}
| TransportCipher |
java | spring-projects__spring-boot | module/spring-boot-webflux-test/src/test/java/org/springframework/boot/webflux/test/autoconfigure/WebFluxTypeExcludeFilterTests.java | {
"start": 6746,
"end": 6889
} | class ____ {
}
@WebFluxTest(excludeFilters = @Filter(type = FilterType.ASSIGNABLE_TYPE, classes = Controller1.class))
static | WithIncludeFilter |
java | reactor__reactor-core | reactor-core/src/withMicrometerTest/java/reactor/core/publisher/AutomaticContextPropagationTest.java | {
"start": 20096,
"end": 58656
} | interface ____ we have no means to intercept the calls to
// restore ThreadLocals.
assertThat(subscriberWithContext.valueInOnNext.get()).isEqualTo("ref_init");
assertThat(subscriberWithContext.valueInOnComplete.get()).isEqualTo("ref_init");
}
// Flux tests
@Test
void fluxCreate() {
Supplier<Flux<?>> fluxSupplier =
() -> Flux.create(sink -> executorService.submit(() -> {
sink.next("Hello");
sink.complete();
}));
assertThreadLocalsPresentInFlux(fluxSupplier);
}
@Test
void fluxMap() {
assertThreadLocalsPresentInFlux(() -> threadSwitchingFlux().map(String::toUpperCase));
}
@Test
void fluxIgnoreThenSwitchThread() {
assertThreadLocalsPresentInMono(() -> Flux.just("Bye").then(threadSwitchingMono()));
}
@Test
void fluxSwitchThreadThenIgnore() {
assertThreadLocalsPresentInMono(() -> threadSwitchingFlux().then(Mono.just("Hi")));
}
@Test
void fluxDeferContextual() {
assertThreadLocalsPresentInFlux(() ->
Flux.deferContextual(ctx -> threadSwitchingFlux()));
}
@Test
void fluxFirstWithSignalArray() {
assertThreadLocalsPresentInFlux(() ->
Flux.firstWithSignal(threadSwitchingFlux()));
assertThreadLocalsPresentInFlux(() ->
Flux.firstWithSignal(threadSwitchingFlux()).or(threadSwitchingFlux()));
}
@Test
void fluxFirstWithSignalIterable() {
assertThreadLocalsPresentInFlux(() ->
Flux.firstWithSignal(Collections.singletonList(threadSwitchingFlux())));
assertThreadLocalsPresentInFlux(() ->
Flux.firstWithSignal(Stream.of(threadSwitchingFlux(), threadSwitchingFlux()).collect(Collectors.toList())));
}
@Test
void fluxRetryWhen() {
assertThreadLocalsPresentInFlux(() ->
threadSwitchingFlux().retryWhen(Retry.max(1)));
}
@Test
void fluxRetryWhenSwitchingThread() {
assertThreadLocalsPresentInFlux(() ->
Flux.error(new ExpectedException("Oops"))
.retryWhen(Retry.from(f -> threadSwitchingFlux())));
}
@Test
void fluxRepeatWhen() {
assertThreadLocalsPresentInFlux(() ->
threadSwitchingFlux()
.repeatWhen(s -> Flux.just(1)));
}
@Test
void fluxRepeatWhenSwitchingThread() {
assertThreadLocalsPresentInFlux(() ->
Flux.just("Hello")
.repeatWhen(s -> threadSwitchingFlux()));
}
@Test
void fluxWindowUntil() {
assertThreadLocalsPresentInFlux(() ->
threadSwitchingFlux().windowUntil(s -> true)
.flatMap(Function.identity()));
}
@Test
void switchOnFirst() {
assertThreadLocalsPresentInFlux(() ->
threadSwitchingFlux()
.switchOnFirst((s, f) -> f.map(String::toUpperCase)));
}
@Test
void switchOnFirstFuseable() {
assertThreadLocalsPresentInFlux(() ->
threadSwitchingFlux()
.filter("Hello"::equals)
.switchOnFirst((s, f) -> f.map(String::toUpperCase)));
}
@Test
void switchOnFirstSwitchThread() {
assertThreadLocalsPresentInFlux(() ->
threadSwitchingFlux()
.switchOnFirst((s, f) -> threadSwitchingFlux()));
}
@Test
void switchOnFirstFuseableSwitchThread() {
assertThreadLocalsPresentInFlux(() ->
threadSwitchingFlux()
.filter("Hello"::equals)
.switchOnFirst((s, f) -> threadSwitchingFlux()));
}
@Test
void fluxWindowTimeout() {
assertThreadLocalsPresentInFlux(() ->
threadSwitchingFlux()
.windowTimeout(1, Duration.ofDays(1), true));
}
@Test
void fluxMergeComparing() {
assertThreadLocalsPresentInFlux(() ->
Flux.mergeComparing(Flux.empty(), threadSwitchingFlux()));
}
@Test
void fluxFirstWithValueArray() {
assertThreadLocalsPresentInFlux(() ->
Flux.firstWithValue(Flux.empty(), threadSwitchingFlux()));
}
@Test
void fluxFirstWithValueIterable() {
assertThreadLocalsPresentInFlux(() ->
Flux.firstWithValue(
Stream.of(Flux.<String>empty(), threadSwitchingFlux())
.collect(Collectors.toList())));
}
@Test
void fluxConcatArray() {
assertThreadLocalsPresentInFlux(() ->
Flux.concat(Mono.empty(), threadSwitchingFlux()));
}
@Test
void fluxConcatIterable() {
assertThreadLocalsPresent(
Flux.concat(
Stream.of(Flux.<String>empty(), threadSwitchingFlux()).collect(Collectors.toList())));
// Direct subscription
}
@Test
void fluxCombineLatest() {
assertThreadLocalsPresentInFlux(() ->
Flux.combineLatest(
Flux.just(""), threadSwitchingFlux(), (s1, s2) -> s2));
}
@Test
void fluxUsing() {
assertThreadLocalsPresentInFlux(() ->
Flux.using(() -> 0, i -> threadSwitchingFlux(), i -> {}));
}
@Test
void fluxZip() {
assertThreadLocalsPresentInFlux(() ->
Flux.zip(Flux.just(""), threadSwitchingFlux()));
}
@Test
void fluxZipIterable() {
assertThreadLocalsPresentInFlux(() ->
Flux.zip(Stream.of(Flux.just(""), threadSwitchingFlux()).collect(Collectors.toList()),
obj -> Tuples.of((String) obj[0], (String) obj[1])));
}
@Test
void fluxBufferBoundary() {
assertThreadLocalsPresentInFlux(() ->
Flux.just("Hello").delayElements(Duration.ofMillis(20))
.buffer(threadSwitchingFlux()));
}
@Test
void fluxBufferWhen() {
assertThreadLocalsPresentInFlux(() ->
Flux.just("hello").delayElements(Duration.ofMillis(20))
.bufferWhen(threadSwitchingFlux(), x -> Flux.empty()));
}
@Test
void fluxConcatMap() {
assertThreadLocalsPresentInFlux(() ->
threadSwitchingFlux()
.concatMap(s -> threadSwitchingFlux(), 1));
}
@Test
void fluxConcatMapNoPrefetch() {
assertThreadLocalsPresentInFlux(() ->
Flux.just("hello").hide()
.concatMap(s -> threadSwitchingFlux()));
}
@Test
void fluxDelaySubscription() {
assertThreadLocalsPresentInFlux(() ->
Flux.just("hello")
.delaySubscription(threadSwitchingFlux()));
}
@Test
void fluxExpand() {
AtomicBoolean done = new AtomicBoolean(false);
// We don't validate direct subscription via CoreSubscriber with Context in
// this case as it can happen that the drain loop is in the main thread
// and won't restore TLs from the Context when contextWrite operator is
// missing along the way in the chain.
assertThreadLocalsPresent(
Flux.just("hello").expand(s -> {
if (done.get()) {
return Flux.empty();
} else {
done.set(true);
return threadSwitchingFlux();
}
}));
}
@Test
void fluxFilterWhen() {
// We don't validate direct subscription via CoreSubscriber with Context in
// this case as it can happen that the drain loop is in the main thread
// and won't restore TLs from the Context when contextWrite operator is
// missing along the way in the chain.
assertThreadLocalsPresent(
Flux.just("hello")
.filterWhen(s -> new ThreadSwitchingFlux<>(Boolean.TRUE, executorService)));
}
@Test
void fluxGroupJoinFlattened() {
assertThreadLocalsPresentInFlux(() ->
Flux.just("hello").groupJoin(threadSwitchingFlux(),
l -> Flux.never(), r -> Flux.never(),
(s, f) -> f.map(i -> s)).flatMap(Function.identity()));
}
@Test
void fluxGroupJoin() {
assertThreadLocalsPresent(
Flux.just("hello").groupJoin(threadSwitchingFlux(),
l -> Flux.never(), r -> Flux.never(),
(s, f) -> f.map(i -> s)));
// works only with contextWrite because the group is delivered using the
// signal from the left hand side
}
@Test
void fluxGroupJoinSubscribed() {
assertThreadLocalsPresentInFlux(() ->
Flux.just("hello").groupJoin(threadSwitchingFlux(),
l -> Flux.never(), r -> Flux.never(),
(s, f) -> f.map(i -> s))
.flatMap(Function.identity()));
}
@Disabled("Only contextWrite/contextCapture usages are supported")
@Test
void fluxJustRawSubscribe() {
assertThatNoException().isThrownBy(() ->
assertThatThreadLocalsPresentDirectRawSubscribe(Flux.just("hello"))
);
}
@Test
void fluxJoin() {
assertThreadLocalsPresentInFlux(() ->
Flux.just("hello").join(threadSwitchingFlux(), l -> Flux.never(),
r -> Flux.never(), (s1, s2) -> s1 + s2));
}
@Test
void fluxLift() {
assertThreadLocalsPresentInFlux(() -> {
Flux<String> flux = Flux.just("Hello").hide();
Publisher<String> lifted =
Operators.<String, String>liftPublisher((pub, sub) -> new CoreSubscriber<String>() {
@Override
public void onSubscribe(Subscription s) {
executorService.submit(() -> sub.onSubscribe(s));
}
@Override
public void onNext(String s) {
executorService.submit(() -> sub.onNext(s));
}
@Override
public void onError(Throwable t) {
executorService.submit(() -> sub.onError(t));
}
@Override
public void onComplete() {
executorService.submit(sub::onComplete);
}
@Override
public Context currentContext() {
return sub.currentContext();
}
})
.apply(flux);
return (Flux<String>) lifted;
});
}
@Test
void fluxLiftFuseable() {
assertThreadLocalsPresentInFlux(() -> {
Flux<String> flux = Flux.just("Hello");
Publisher<String> lifted =
Operators.<String, String>liftPublisher((pub, sub) -> new CoreSubscriber<String>() {
@Override
public void onSubscribe(Subscription s) {
executorService.submit(() -> sub.onSubscribe(s));
}
@Override
public void onNext(String s) {
executorService.submit(() -> sub.onNext(s));
}
@Override
public void onError(Throwable t) {
executorService.submit(() -> sub.onError(t));
}
@Override
public void onComplete() {
executorService.submit(sub::onComplete);
}
})
.apply(flux);
return (Flux<String>) lifted;
});
}
// see https://github.com/reactor/reactor-core/issues/3762
@Test
void fluxLiftOnEveryOperator() {
Function<? super Publisher<Object>, ? extends Publisher<Object>>
everyOperatorLift = Operators.lift((a, b) -> b);
Hooks.onEachOperator("testEveryOperatorLift", everyOperatorLift);
assertThreadLocalsPresentInFlux(() -> Flux.just("Hello").hide()
.publish().refCount().map(s -> s));
Hooks.resetOnEachOperator();
}
@Test
void fluxFlatMapSequential() {
assertThreadLocalsPresentInFlux(() ->
threadSwitchingFlux()
.flatMapSequential(s -> threadSwitchingFlux()));
}
@Test
void fluxOnErrorResume() {
assertThreadLocalsPresentInFlux(() ->
Flux.error(new ExpectedException("Oops"))
.onErrorResume(t -> threadSwitchingFlux()));
}
@Test
void fluxPublishMulticast() {
assertThreadLocalsPresentInFlux(() ->
Flux.just("Hello")
.publish(s -> threadSwitchingFlux()));
}
@Test
void fluxSkipUntilOther() {
assertThreadLocalsPresentInFlux(() ->
threadSwitchingFlux()
.skipUntilOther(threadSwitchingFlux()));
}
@Test
void fluxSample() {
assertThreadLocalsPresentInFlux(() ->
Flux.just("Hello").concatWith(Flux.never())
.sample(threadSwitchingFlux()));
}
@Test
void fluxSampleFirst() {
// We don't validate direct subscription via CoreSubscriber with Context in
// this case as it can happen that the drain loop is in the main thread
// and won't restore TLs from the Context when contextWrite operator is
// missing along the way in the chain.
assertThreadLocalsPresent(
Flux.just("Hello").concatWith(Flux.never())
.sampleFirst(s -> new ThreadSwitchingFlux<>(new ExpectedException("oops"), executorService)));
}
@Test
void fluxSampleTimeout() {
assertThreadLocalsPresentInFlux(() ->
threadSwitchingFlux().concatWith(Mono.delay(Duration.ofMillis(10)).map(l -> "").concatWith(Mono.empty()))
.sampleTimeout(s -> threadSwitchingFlux()));
}
@Test
void fluxSwitchIfEmpty() {
assertThreadLocalsPresentInFlux(() ->
Flux.empty()
.switchIfEmpty(threadSwitchingFlux()));
}
@Test
void fluxSwitchMapNoPrefetch() {
assertThreadLocalsPresentInFlux(() ->
threadSwitchingFlux()
.switchMap(s -> threadSwitchingFlux()));
}
@SuppressWarnings("deprecation")
@Test
void fluxSwitchMap() {
assertThreadLocalsPresentInFlux(() ->
threadSwitchingFlux()
.switchMap(s -> threadSwitchingFlux(), 1));
}
@Test
void fluxTakeUntilOther() {
// We don't validate direct subscription via CoreSubscriber with Context in
// this case as it can happen that the drain loop is in the main thread
// and won't restore TLs from the Context when contextWrite operator is
// missing along the way in the chain.
assertThreadLocalsPresent(
Flux.concat(Flux.just("Hello"), Flux.never())
.takeUntilOther(threadSwitchingFlux()));
}
@Test
void fluxTimeoutFirst() {
assertThreadLocalsPresentInFlux(() ->
Flux.never()
.timeout(threadSwitchingFlux()));
}
@Test
void fluxTimeoutOther() {
assertThreadLocalsPresentInFlux(() ->
Flux.never()
.timeout(threadSwitchingFlux(), i -> Flux.never(), threadSwitchingFlux()));
}
@Test
void fluxWindowBoundary() {
assertThreadLocalsPresentInFlux(() ->
Flux.just("Hello").delayElements(Duration.ofMillis(20))
.window(threadSwitchingFlux()));
}
@Test
void fluxWindowBoundaryFlattened() {
assertThreadLocalsPresentInFlux(() ->
Flux.just("Hello").delayElements(Duration.ofMillis(20))
.window(threadSwitchingFlux())
.flatMap(Function.identity()));
}
@Test
@Disabled("Publisher delivering the window has no notion of Context so nothing " +
"can be restored in onNext")
void fluxWindowWhen() {
assertThreadLocalsPresent(
threadSwitchingFlux()
.windowWhen(threadSwitchingFlux(), s -> threadSwitchingFlux()));
}
@Test
@Disabled("Publisher delivering the window has no notion of Context so nothing " +
"can be restored in onNext")
void fluxDelayedWindowWhen() {
assertThreadLocalsPresentInFlux(() ->
Flux.just("Hello").delayElements(Duration.ofMillis(100))
.windowWhen(threadSwitchingFlux(), s -> threadSwitchingFlux()));
}
@Test
@Disabled("Publisher completing the window has no notion of Context so nothing " +
"can be restored in onComplete")
void fluxWindowWhenFlatMapped() {
assertThreadLocalsPresentInFlux(() ->
Flux.just("Hello").delayElements(Duration.ofMillis(100))
.windowWhen(threadSwitchingFlux(), s -> threadSwitchingFlux())
.flatMap(Function.identity()));
}
@Test
void fluxWithLatestFrom() {
// We don't validate direct subscription via CoreSubscriber with Context in
// this case as it can happen that the drain loop is in the main thread
// and won't restore TLs from the Context when contextWrite operator is
// missing along the way in the chain.
assertThreadLocalsPresent(
Flux.just("Hello")
.withLatestFrom(threadSwitchingFlux(), (s1, s2) -> s1));
}
@Test
void continuationBrokenByThreadSwitch() {
assertThreadLocalsPresentInFlux(() ->
Flux.concat(Mono.empty(), threadSwitchingMono().retry()));
}
// Mono tests
@Test
void monoCreate() {
assertThreadLocalsPresentInMono(() ->
Mono.create(sink -> {
executorService.submit(() -> {
sink.success("Hello");
});
}));
}
@Test
void monoSwitchThreadIgnoreThen() {
assertThreadLocalsPresentInMono(() ->
threadSwitchingMono().then(Mono.just("Bye")));
}
@Test
void monoIgnoreThenSwitchThread() {
assertThreadLocalsPresentInMono(() ->
Mono.just("Bye").then(threadSwitchingMono()));
}
@Test
void monoSwitchThreadDelayUntil() {
assertThreadLocalsPresentInMono(() ->
threadSwitchingMono().delayUntil(s -> Mono.delay(Duration.ofMillis(1))));
}
@Test
void monoDelayUntilSwitchingThread() {
assertThreadLocalsPresentInMono(() ->
Mono.just("Hello").delayUntil(s -> threadSwitchingMono()));
}
@Test
void monoIgnoreSwitchingThread() {
assertThreadLocalsPresentInMono(() ->
Mono.ignoreElements(threadSwitchingMono()));
}
@Test
void monoDeferContextual() {
assertThreadLocalsPresentInMono(() ->
Mono.deferContextual(ctx -> threadSwitchingMono()));
}
@Test
void monoDefer() {
assertThreadLocalsPresentInMono(() ->
Mono.defer(this::threadSwitchingMono));
}
@Test
void monoFirstWithSignalArray() {
assertThreadLocalsPresentInMono(() ->
Mono.firstWithSignal(threadSwitchingMono()));
assertThreadLocalsPresentInMono(() ->
Mono.firstWithSignal(threadSwitchingMono())
.or(threadSwitchingMono()));
}
@Test
void monoFirstWithSignalIterable() {
assertThreadLocalsPresentInMono(() ->
Mono.firstWithSignal(Collections.singletonList(threadSwitchingMono())));
assertThreadLocalsPresentInMono(() ->
Mono.firstWithSignal(
Stream.of(threadSwitchingMono(), threadSwitchingMono())
.collect(Collectors.toList())));
}
@Test
void monoFromFluxSingle() {
assertThreadLocalsPresentInMono(() ->
threadSwitchingFlux().single());
}
@Test
void monoRetryWhen() {
assertThreadLocalsPresentInMono(() ->
threadSwitchingMono().retryWhen(Retry.max(1)));
}
@Test
void monoRetryWhenSwitchingThread() {
assertThreadLocalsPresentInMono(() ->
Mono.error(new ExpectedException("Oops"))
.retryWhen(Retry.from(f -> threadSwitchingMono())));
}
@Test
void monoUsing() {
assertThreadLocalsPresentInMono(() ->
Mono.using(() -> "Hello",
seed -> threadSwitchingMono(),
seed -> {},
false));
}
@Test
void monoFirstWithValueArray() {
assertThreadLocalsPresentInMono(() ->
Mono.firstWithValue(Mono.empty(), threadSwitchingMono()));
}
@Test
void monoFirstWithValueIterable() {
assertThreadLocalsPresentInMono(() ->
Mono.firstWithValue(
Stream.of(Mono.<String>empty(), threadSwitchingMono())
.collect(Collectors.toList())));
}
@Test
void monoZip() {
assertThreadLocalsPresentInMono(() ->
Mono.zip(Mono.just(""), threadSwitchingMono()));
}
@Test
void monoZipIterable() {
assertThreadLocalsPresentInMono(() ->
Mono.zip(
Stream.of(Mono.just(""), threadSwitchingMono())
.collect(Collectors.toList()),
obj -> Tuples.of((String) obj[0], (String) obj[1])));
}
@Test
void monoSequenceEqual() {
assertThreadLocalsPresentInMono(() ->
Mono.sequenceEqual(Mono.just("Hello"), threadSwitchingMono()));
}
@Test
void monoWhen() {
assertThreadLocalsPresentInMono(() ->
Mono.when(Mono.empty(), threadSwitchingMono()));
}
@Test
void monoUsingWhen() {
assertThreadLocalsPresentInMono(() ->
Mono.usingWhen(Mono.just("Hello"), s -> threadSwitchingMono(),
s -> Mono.empty()));
}
@Test
void monoFlatMapMany() {
assertThreadLocalsPresentInFlux(() ->
Mono.just("hello")
.hide()
.flatMapMany(item -> threadSwitchingFlux()));
}
@Test
void monoFlatMapManyFuseable() {
assertThreadLocalsPresentInFlux(() ->
Mono.just("hello")
.flatMapMany(item -> threadSwitchingFlux()));
}
@Test
void monoDelaySubscription() {
assertThreadLocalsPresentInMono(() ->
Mono.just("Hello").delaySubscription(threadSwitchingMono()));
}
@Test
void monoFilterWhen() {
assertThreadLocalsPresentInMono(() ->
Mono.just("Hello").hide()
.filterWhen(s -> new ThreadSwitchingMono<>(Boolean.TRUE, executorService)));
}
@Test
void monoLift() {
assertThreadLocalsPresentInMono(() -> {
Mono<String> mono = Mono.just("Hello").hide();
Publisher<String> lifted =
Operators.<String, String>liftPublisher((pub, sub) -> new CoreSubscriber<String>() {
@Override
public void onSubscribe(Subscription s) {
executorService.submit(() -> sub.onSubscribe(s));
}
@Override
public void onNext(String s) {
executorService.submit(() -> sub.onNext(s));
}
@Override
public void onError(Throwable t) {
executorService.submit(() -> sub.onError(t));
}
@Override
public void onComplete() {
executorService.submit(sub::onComplete);
}
})
.apply(mono);
return (Mono<String>) lifted;
});
}
@Test
void monoLiftFuseable() {
assertThreadLocalsPresentInMono(() -> {
Mono<String> mono = Mono.just("Hello");
Publisher<String> lifted =
Operators.<String, String>liftPublisher((pub, sub) -> new CoreSubscriber<String>() {
@Override
public void onSubscribe(Subscription s) {
executorService.submit(() -> sub.onSubscribe(s));
}
@Override
public void onNext(String s) {
executorService.submit(() -> sub.onNext(s));
}
@Override
public void onError(Throwable t) {
executorService.submit(() -> sub.onError(t));
}
@Override
public void onComplete() {
executorService.submit(sub::onComplete);
}
})
.apply(mono);
return (Mono<String>) lifted;
});
}
@Test
void monoOnErrorResume() {
assertThreadLocalsPresentInMono(() ->
Mono.error(new ExpectedException("oops"))
.onErrorResume(e -> threadSwitchingMono()));
}
@Test
void monoPublishMulticast() {
assertThreadLocalsPresentInMono(() ->
Mono.just("Hello")
.publish(s -> threadSwitchingMono()));
}
@Test
void monoSwitchIfEmpty() {
assertThreadLocalsPresentInMono(() ->
Mono.empty()
.switchIfEmpty(threadSwitchingMono()));
}
@Test
void monoTakeUntilOther() {
assertThreadLocalsPresentInMono(() ->
Mono.delay(Duration.ofDays(1)).then(Mono.just("Hello"))
.takeUntilOther(threadSwitchingMono()));
}
@Test
void monoTimeoutFirst() {
assertThreadLocalsPresentInMono(() ->
Mono.never().timeout(threadSwitchingMono()));
}
@Test
void monoTimeoutFallback() {
assertThreadLocalsPresentInMono(() ->
Mono.never().timeout(threadSwitchingMono(), threadSwitchingMono()));
}
// ParallelFlux tests
@Test
void parallelFluxFromMonoToMono() {
assertThreadLocalsPresentInMono(() ->
Mono.from(ParallelFlux.from(threadSwitchingMono())));
}
@Test
void parallelFluxFromMonoToFlux() {
assertThreadLocalsPresentInFlux(() ->
Flux.from(ParallelFlux.from(threadSwitchingMono())));
}
@Test
void parallelFluxFromFluxToMono() {
assertThreadLocalsPresentInMono(() ->
Mono.from(ParallelFlux.from(threadSwitchingFlux())));
}
@Test
void parallelFluxFromFluxToFlux() {
assertThreadLocalsPresentInFlux(() ->
Flux.from(ParallelFlux.from(threadSwitchingFlux())));
}
@Test
void parallelFluxLift() {
assertThreadLocalsPresentInFlux(() -> {
ParallelFlux<String> parallelFlux = ParallelFlux.from(Flux.just("Hello"));
Publisher<String> lifted =
Operators.<String, String>liftPublisher((pub, sub) -> new CoreSubscriber<String>() {
@Override
public void onSubscribe(Subscription s) {
executorService.submit(() -> sub.onSubscribe(s));
}
@Override
public void onNext(String s) {
executorService.submit(() -> sub.onNext(s));
}
@Override
public void onError(Throwable t) {
executorService.submit(() -> sub.onError(t));
}
@Override
public void onComplete() {
executorService.submit(sub::onComplete);
}
})
.apply(parallelFlux);
return ((ParallelFlux<?>) lifted).sequential();
});
}
@Test
void parallelFluxLiftFuseable() {
assertThreadLocalsPresentInFlux(() -> {
ParallelFlux<ArrayList<String>> parallelFlux =
ParallelFlux.from(Flux.just("Hello"))
.collect(ArrayList<String>::new, ArrayList::add);
Publisher<ArrayList<String>> lifted =
Operators.<ArrayList<String>, ArrayList<String>>liftPublisher((pub, sub) -> new CoreSubscriber<ArrayList<String>>() {
@Override
public void onSubscribe(Subscription s) {
executorService.submit(() -> sub.onSubscribe(s));
}
@Override
public void onNext(ArrayList<String> s) {
executorService.submit(() -> sub.onNext(s));
}
@Override
public void onError(Throwable t) {
executorService.submit(() -> sub.onError(t));
}
@Override
public void onComplete() {
executorService.submit(sub::onComplete);
}
})
.apply(parallelFlux);
return ((ParallelFlux<?>) lifted).sequential();
});
}
@Test
void parallelFluxFromThreadSwitchingMono() {
assertThreadLocalsPresentInFlux(() ->
ParallelFlux.from(threadSwitchingMono()).sequential());
}
@Test
void parallelFluxFromThreadSwitchingFlux() {
assertThreadLocalsPresentInFlux(() ->
ParallelFlux.from(threadSwitchingFlux()).sequential());
}
@Test
void threadSwitchingParallelFluxSequential() {
AtomicReference<String> value = new AtomicReference<>();
new ThreadSwitchingParallelFlux<>("Hello", executorService)
.sequential()
.doOnNext(i -> value.set(REF.get()))
.contextWrite(Context.of(KEY, "present"))
.blockLast();
assertThat(value.get()).isEqualTo("present");
}
@Test
void threadSwitchingParallelFluxThen() {
assertThreadLocalsPresentInMono(() ->
new ThreadSwitchingParallelFlux<>("Hello", executorService)
.then());
}
@Test
void threadSwitchingParallelFluxOrdered() {
assertThreadLocalsPresentInFlux(() ->
new ThreadSwitchingParallelFlux<>("Hello", executorService)
.ordered(Comparator.naturalOrder()));
}
@Test
void threadSwitchingParallelFluxReduce() {
AtomicReference<String> value = new AtomicReference<>();
new ThreadSwitchingParallelFlux<>("Hello", executorService)
.reduce((s1, s2) -> s2)
.doOnNext(i -> value.set(REF.get()))
.contextWrite(Context.of(KEY, "present"))
.block();
assertThat(value.get()).isEqualTo("present");
}
@Test
void threadSwitchingParallelFluxReduceSeed() {
AtomicReference<String> value = new AtomicReference<>();
new ThreadSwitchingParallelFlux<>("Hello", executorService)
.reduce(ArrayList::new, (l, s) -> {
value.set(REF.get());
l.add(s);
return l;
})
.sequential()
.contextWrite(Context.of(KEY, "present"))
.blockLast();
assertThat(value.get()).isEqualTo("present");
}
@Test
void threadSwitchingParallelFluxGroup() {
AtomicReference<String> value = new AtomicReference<>();
new ThreadSwitchingParallelFlux<>("Hello", executorService)
.groups()
.doOnNext(i -> value.set(REF.get()))
.flatMap(Flux::last)
.contextWrite(Context.of(KEY, "present"))
.blockLast();
assertThat(value.get()).isEqualTo("present");
}
@Test
void threadSwitchingParallelFluxSort() {
assertThreadLocalsPresentInFlux(() ->
new ThreadSwitchingParallelFlux<>("Hello", executorService)
.sorted(Comparator.naturalOrder()));
}
// ConnectableFlux tests
@Test
void threadSwitchingAutoConnect() {
assertThreadLocalsPresentInFlux(() -> threadSwitchingConnectableFlux().autoConnect());
}
@Test
void threadSwitchingRefCount() {
assertThreadLocalsPresentInFlux(() -> threadSwitchingConnectableFlux().refCount());
}
@Test
void threadSwitchingRefCountGrace() {
assertThreadLocalsPresentInFlux(() -> threadSwitchingConnectableFlux().refCount(1, Duration.ofMillis(100)));
}
@Test
void threadSwitchingPublishAutoConnect() {
assertThreadLocalsPresentInFlux(() -> threadSwitchingFlux().publish().autoConnect());
}
@Test
void threadSwitchingPublishRefCount() {
assertThreadLocalsPresentInFlux(() -> threadSwitchingFlux().publish().refCount());
}
@Test
void threadSwitchingPublishRefCountGrace() {
assertThreadLocalsPresentInFlux(() -> threadSwitchingFlux().publish().refCount(1, Duration.ofMillis(100)));
}
@Test
void threadSwitchingMonoPublish() {
assertThreadLocalsPresentInMono(() -> threadSwitchingMono().publish(Function.identity()));
}
@Test
void threadSwitchingMonoPublishSwitchingThread() {
assertThreadLocalsPresentInMono(() -> threadSwitchingMono().publish(m -> threadSwitchingMono()));
}
@Test
void threadSwitchingReplayAutoConnect() {
assertThreadLocalsPresentInFlux(() -> threadSwitchingFlux().replay(1).autoConnect());
}
@Test
void threadSwitchingReplayRefCount() {
assertThreadLocalsPresentInFlux(() -> threadSwitchingFlux().replay(1).refCount());
}
@Test
void threadSwitchingReplayRefCountGrace() {
assertThreadLocalsPresentInFlux(() -> threadSwitchingFlux().replay(1).refCount(1, Duration.ofMillis(100)));
}
// Sinks tests
@Test
void sink() throws InterruptedException, TimeoutException {
AtomicReference<String> value = new AtomicReference<>();
CountDownLatch latch = new CountDownLatch(1);
Sinks.One<Integer> sink = Sinks.one();
sink.asMono()
.doOnNext(i -> {
value.set(REF.get());
latch.countDown();
})
.contextWrite(Context.of(KEY, "present"))
.subscribe();
executorService.submit(() -> sink.tryEmitValue(1));
if (!latch.await(100, TimeUnit.MILLISECONDS)) {
throw new TimeoutException("timed out");
}
assertThat(value.get()).isEqualTo("present");
}
@Test
void sinkDirect() throws InterruptedException, TimeoutException, ExecutionException {
Sinks.One<String> sink1 = Sinks.one();
assertThatThreadLocalsPresentDirectCoreSubscribe(sink1.asMono(),
() -> sink1.tryEmitValue("Hello"));
Sinks.One<String> sink2 = Sinks.one();
assertThatThreadLocalsPresentDirectRawSubscribe(sink2.asMono(),
() -> sink2.tryEmitValue("Hello"));
}
@Test
void sinksEmpty() throws InterruptedException, TimeoutException {
AtomicReference<String> value = new AtomicReference<>();
CountDownLatch latch = new CountDownLatch(1);
Sinks.Empty<Void> spec = Sinks.empty();
spec.asMono()
.doOnSuccess(ignored -> {
value.set(REF.get());
latch.countDown();
})
.contextWrite(Context.of(KEY, "present"))
.subscribe();
executorService.submit(spec::tryEmitEmpty);
if (!latch.await(100, TimeUnit.MILLISECONDS)) {
throw new TimeoutException("timed out");
}
assertThat(value.get()).isEqualTo("present");
}
@Test
void sinksEmptyDirect() throws InterruptedException, TimeoutException {
Sinks.Empty<Object> empty1 = Sinks.empty();
assertThatThreadLocalsPresentDirectCoreSubscribe(empty1.asMono(), empty1::tryEmitEmpty);
Sinks.Empty<Object> empty2 = Sinks.empty();
assertThatThreadLocalsPresentDirectRawSubscribe(empty2.asMono(), empty2::tryEmitEmpty);
}
@Test
void sinkManyUnicast() throws InterruptedException, TimeoutException {
AtomicReference<String> value = new AtomicReference<>();
CountDownLatch latch = new CountDownLatch(1);
Sinks.ManySpec spec = Sinks.many();
Sinks.Many<String> many = spec.unicast()
.onBackpressureBuffer();
many.asFlux()
.doOnNext(i -> {
value.set(REF.get());
latch.countDown();
})
.contextWrite(Context.of(KEY, "present"))
.subscribe();
executorService.submit(() -> many.tryEmitNext("Hello"));
if (!latch.await(100, TimeUnit.MILLISECONDS)) {
throw new TimeoutException("timed out");
}
assertThat(value.get()).isEqualTo("present");
}
@Test
void sinkManyUnicastDirect() throws InterruptedException, TimeoutException {
Sinks.Many<String> many1 = Sinks.many().unicast()
.onBackpressureBuffer();
assertThatThreadLocalsPresentDirectCoreSubscribe(many1.asFlux(), () -> {
many1.tryEmitNext("Hello");
many1.tryEmitComplete();
});
Sinks.Many<String> many2 = Sinks.many().unicast()
.onBackpressureBuffer();
assertThatThreadLocalsPresentDirectRawSubscribe(many2.asFlux(), () -> {
many2.tryEmitNext("Hello");
many2.tryEmitComplete();
});
}
@Test
void sinkManyUnicastNoBackpressure() throws InterruptedException,
TimeoutException {
AtomicReference<String> value = new AtomicReference<>();
CountDownLatch latch = new CountDownLatch(1);
Sinks.ManySpec spec = Sinks.many();
Sinks.Many<String> many = spec.unicast().onBackpressureError();
many.asFlux()
.doOnNext(i -> {
value.set(REF.get());
latch.countDown();
})
.contextWrite(Context.of(KEY, "present"))
.subscribe();
executorService.submit(() -> many.tryEmitNext("Hello"));
if (!latch.await(100, TimeUnit.MILLISECONDS)) {
throw new TimeoutException("timed out");
}
assertThat(value.get()).isEqualTo("present");
}
@Test
void sinkManyMulticastAllOrNothing() throws InterruptedException,
TimeoutException {
AtomicReference<String> value = new AtomicReference<>();
CountDownLatch latch = new CountDownLatch(1);
Sinks.ManySpec spec = Sinks.many();
Sinks.Many<String> many = spec.multicast().directAllOrNothing();
many.asFlux()
.doOnNext(i -> {
value.set(REF.get());
latch.countDown();
})
.contextWrite(Context.of(KEY, "present"))
.subscribe();
executorService.submit(() -> many.tryEmitNext("Hello"));
if (!latch.await(100, TimeUnit.MILLISECONDS)) {
throw new TimeoutException("timed out");
}
assertThat(value.get()).isEqualTo("present");
}
@Test
void sinkManyMulticastBuffer() throws InterruptedException, TimeoutException {
AtomicReference<String> value = new AtomicReference<>();
CountDownLatch latch = new CountDownLatch(1);
Sinks.ManySpec spec = Sinks.many();
Sinks.Many<String> many = spec.multicast().onBackpressureBuffer();
many.asFlux()
.doOnNext(i -> {
value.set(REF.get());
latch.countDown();
})
.contextWrite(Context.of(KEY, "present"))
.subscribe();
executorService.submit(() -> many.tryEmitNext("Hello"));
if (!latch.await(100, TimeUnit.MILLISECONDS)) {
throw new TimeoutException("timed out");
}
assertThat(value.get()).isEqualTo("present");
}
@Test
void sinkManyMulticastBestEffort() throws InterruptedException, TimeoutException {
AtomicReference<String> value = new AtomicReference<>();
CountDownLatch latch = new CountDownLatch(1);
Sinks.ManySpec spec = Sinks.many();
Sinks.Many<String> many = spec.multicast().directBestEffort();
many.asFlux()
.doOnNext(i -> {
value.set(REF.get());
latch.countDown();
})
.contextWrite(Context.of(KEY, "present"))
.subscribe();
executorService.submit(() -> many.tryEmitNext("Hello"));
if (!latch.await(100, TimeUnit.MILLISECONDS)) {
throw new TimeoutException("timed out");
}
assertThat(value.get()).isEqualTo("present");
}
// Other
List<Class<?>> getAllClassesInClasspathRecursively(File directory) throws Exception {
List<Class<?>> classes = new ArrayList<>();
for (File file : directory.listFiles()) {
if (file.isDirectory()) {
classes.addAll(getAllClassesInClasspathRecursively(file));
} else if (file.getName().endsWith(".class") ) {
String path = file.getPath();
path = path.replace("./build/classes/java/main/reactor/", "");
String pkg = path.substring(0, path.lastIndexOf("/") + 1).replace("/",
".");
String name = path.substring(path.lastIndexOf("/") + 1).replace(".class", "");
try {
classes.add(Class.forName("reactor." + pkg + name));
}
catch (ClassNotFoundException ex) {
System.out.println("Ignoring " + pkg + name);
} catch (NoClassDefFoundError err) {
System.out.println("Ignoring " + pkg + name);
}
}
}
return classes;
}
@Test
@Disabled("Used to find Publishers that can switch threads")
void printInterestingClasses() throws Exception {
List<Class<?>> allClasses =
getAllClassesInClasspathRecursively(new File("./build/classes/java/main/reactor/"));
System.out.println("Classes that are Publisher, but not SourceProducer, " +
"ConnectableFlux, ParallelFlux, GroupedFlux, MonoFromFluxOperator, " +
"FluxFromMonoOperator:");
for (Class<?> c : allClasses) {
if (Publisher.class.isAssignableFrom(c) && !SourceProducer.class.isAssignableFrom(c)
&& !ConnectableFlux.class.isAssignableFrom(c)
&& !ParallelFlux.class.isAssignableFrom(c)
&& !GroupedFlux.class.isAssignableFrom(c)
&& !MonoFromFluxOperator.class.isAssignableFrom(c)
&& !FluxFromMonoOperator.class.isAssignableFrom(c)) {
if (Flux.class.isAssignableFrom(c) && !FluxOperator.class.isAssignableFrom(c)) {
System.out.println(c.getName());
}
if (Mono.class.isAssignableFrom(c) && !MonoOperator.class.isAssignableFrom(c)) {
System.out.println(c.getName());
}
}
}
System.out.println("Classes that are Fuseable and Publisher but not Mono or Flux, ?");
for (Class<?> c : allClasses) {
if (Fuseable.class.isAssignableFrom(c) && Publisher.class.isAssignableFrom(c)
&& !Mono.class.isAssignableFrom(c)
&& !Flux.class.isAssignableFrom(c)) {
System.out.println(c.getName());
}
}
}
private | and |
java | spring-projects__spring-boot | loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/UrlJarFileFactory.java | {
"start": 1271,
"end": 4248
} | class ____ {
/**
* Create a new {@link UrlJarFile} or {@link UrlNestedJarFile} instance.
* @param jarFileUrl the jar file URL
* @param closeAction the action to call when the file is closed
* @return a new {@link JarFile} instance
* @throws IOException on I/O error
*/
JarFile createJarFile(URL jarFileUrl, Consumer<JarFile> closeAction) throws IOException {
Runtime.Version version = getVersion(jarFileUrl);
if (isLocalFileUrl(jarFileUrl)) {
return createJarFileForLocalFile(jarFileUrl, version, closeAction);
}
if (isNestedUrl(jarFileUrl)) {
return createJarFileForNested(jarFileUrl, version, closeAction);
}
return createJarFileForStream(jarFileUrl, version, closeAction);
}
private Runtime.Version getVersion(URL url) {
// The standard JDK handler uses #runtime to indicate that the runtime version
// should be used. This unfortunately doesn't work for us as
// jdk.internal.loader.URLClassPath only adds the runtime fragment when the URL
// is using the internal JDK handler. We need to flip the default to use
// the runtime version. See gh-38050
return "base".equals(url.getRef()) ? JarFile.baseVersion() : JarFile.runtimeVersion();
}
private boolean isLocalFileUrl(URL url) {
return url.getProtocol().equalsIgnoreCase("file") && isLocal(url.getHost());
}
private boolean isLocal(String host) {
return host == null || host.isEmpty() || host.equals("~") || host.equalsIgnoreCase("localhost");
}
private JarFile createJarFileForLocalFile(URL url, Runtime.Version version, Consumer<JarFile> closeAction)
throws IOException {
String path = UrlDecoder.decode(url.getPath());
return new UrlJarFile(new File(path), version, closeAction);
}
private JarFile createJarFileForNested(URL url, Runtime.Version version, Consumer<JarFile> closeAction)
throws IOException {
NestedLocation location = NestedLocation.fromUrl(url);
return new UrlNestedJarFile(location.path().toFile(), location.nestedEntryName(), version, closeAction);
}
private JarFile createJarFileForStream(URL url, Version version, Consumer<JarFile> closeAction) throws IOException {
try (InputStream in = url.openStream()) {
return createJarFileForStream(in, version, closeAction);
}
}
private JarFile createJarFileForStream(InputStream in, Version version, Consumer<JarFile> closeAction)
throws IOException {
Path local = Files.createTempFile("jar_cache", null);
try {
Files.copy(in, local, StandardCopyOption.REPLACE_EXISTING);
JarFile jarFile = new UrlJarFile(local.toFile(), version, closeAction);
local.toFile().deleteOnExit();
return jarFile;
}
catch (Throwable ex) {
deleteIfPossible(local, ex);
throw ex;
}
}
private void deleteIfPossible(Path local, Throwable cause) {
try {
Files.delete(local);
}
catch (IOException ex) {
cause.addSuppressed(ex);
}
}
static boolean isNestedUrl(URL url) {
return url.getProtocol().equalsIgnoreCase("nested");
}
}
| UrlJarFileFactory |
java | google__guava | android/guava/src/com/google/common/collect/AbstractTable.java | {
"start": 1329,
"end": 3763
} | class ____<
R extends @Nullable Object, C extends @Nullable Object, V extends @Nullable Object>
implements Table<R, C, V> {
@Override
public boolean containsRow(@Nullable Object rowKey) {
return Maps.safeContainsKey(rowMap(), rowKey);
}
@Override
public boolean containsColumn(@Nullable Object columnKey) {
return Maps.safeContainsKey(columnMap(), columnKey);
}
@Override
public Set<R> rowKeySet() {
return rowMap().keySet();
}
@Override
public Set<C> columnKeySet() {
return columnMap().keySet();
}
@Override
public boolean containsValue(@Nullable Object value) {
for (Map<C, V> row : rowMap().values()) {
if (row.containsValue(value)) {
return true;
}
}
return false;
}
@Override
public boolean contains(@Nullable Object rowKey, @Nullable Object columnKey) {
Map<C, V> row = safeGet(rowMap(), rowKey);
return row != null && Maps.safeContainsKey(row, columnKey);
}
@Override
public @Nullable V get(@Nullable Object rowKey, @Nullable Object columnKey) {
Map<C, V> row = safeGet(rowMap(), rowKey);
return (row == null) ? null : safeGet(row, columnKey);
}
@Override
public boolean isEmpty() {
return size() == 0;
}
@Override
public void clear() {
Iterators.clear(cellSet().iterator());
}
@CanIgnoreReturnValue
@Override
public @Nullable V remove(@Nullable Object rowKey, @Nullable Object columnKey) {
Map<C, V> row = safeGet(rowMap(), rowKey);
return (row == null) ? null : Maps.safeRemove(row, columnKey);
}
@CanIgnoreReturnValue
@Override
public @Nullable V put(
@ParametricNullness R rowKey, @ParametricNullness C columnKey, @ParametricNullness V value) {
return row(rowKey).put(columnKey, value);
}
@Override
public void putAll(Table<? extends R, ? extends C, ? extends V> table) {
for (Table.Cell<? extends R, ? extends C, ? extends V> cell : table.cellSet()) {
put(cell.getRowKey(), cell.getColumnKey(), cell.getValue());
}
}
@LazyInit private transient @Nullable Set<Cell<R, C, V>> cellSet;
@Override
public Set<Cell<R, C, V>> cellSet() {
Set<Cell<R, C, V>> result = cellSet;
return (result == null) ? cellSet = createCellSet() : result;
}
Set<Cell<R, C, V>> createCellSet() {
return new CellSet();
}
abstract Iterator<Table.Cell<R, C, V>> cellIterator();
@WeakOuter
private final | AbstractTable |
java | grpc__grpc-java | gcp-observability/src/test/java/io/grpc/gcp/observability/LoggingTest.java | {
"start": 2339,
"end": 4153
} | class ____ {
@ClassRule
public static final GrpcCleanupRule cleanupRule = new GrpcCleanupRule();
private static final String PROJECT_ID = "PROJECT";
private static final ImmutableMap<String, String> CUSTOM_TAGS = ImmutableMap.of(
"KEY1", "Value1",
"KEY2", "VALUE2");
private final StaticTestingClassLoader classLoader =
new StaticTestingClassLoader(getClass().getClassLoader(), Pattern.compile("io\\.grpc\\..*"));
/**
* Cloud logging test using global interceptors.
*
* <p>Ignoring test, because it calls external Cloud Logging APIs.
* To test cloud logging setup locally,
* 1. Set up Cloud auth credentials
* 2. Assign permissions to service account to write logs to project specified by
* variable PROJECT_ID
* 3. Comment @Ignore annotation
* 4. This test is expected to pass when ran with above setup. This has been verified manually.
* </p>
*/
@Ignore
@Test
public void clientServer_interceptorCalled_logAlways() throws Exception {
Class<?> runnable =
classLoader.loadClass(LoggingTest.StaticTestingClassEndtoEndLogging.class.getName());
((Runnable) runnable.getDeclaredConstructor().newInstance()).run();
}
@Test
public void clientServer_interceptorCalled_logNever() throws Exception {
Class<?> runnable =
classLoader.loadClass(LoggingTest.StaticTestingClassLogNever.class.getName());
((Runnable) runnable.getDeclaredConstructor().newInstance()).run();
}
@Test
public void clientServer_interceptorCalled_logEvents_usingMockSink() throws Exception {
Class<?> runnable =
classLoader.loadClass(StaticTestingClassLogEventsUsingMockSink.class.getName());
((Runnable) runnable.getDeclaredConstructor().newInstance()).run();
}
// UsedReflectively
public static final | LoggingTest |
java | spring-projects__spring-framework | spring-aop/src/main/java/org/springframework/aop/aspectj/ShadowMatchUtils.java | {
"start": 964,
"end": 2049
} | class ____ {
private static final Map<Object, ShadowMatch> shadowMatchCache = new ConcurrentHashMap<>(256);
/**
* Find a {@link ShadowMatch} for the specified key.
* @param key the key to use
* @return the {@code ShadowMatch} to use for the specified key,
* or {@code null} if none found
*/
static @Nullable ShadowMatch getShadowMatch(Object key) {
return shadowMatchCache.get(key);
}
/**
* Associate the {@link ShadowMatch} with the specified key.
* If an entry already exists, the given {@code shadowMatch} is ignored.
* @param key the key to use
* @param shadowMatch the shadow match to use for this key
* if none already exists
* @return the shadow match to use for the specified key
*/
static ShadowMatch setShadowMatch(Object key, ShadowMatch shadowMatch) {
ShadowMatch existing = shadowMatchCache.putIfAbsent(key, shadowMatch);
return (existing != null ? existing : shadowMatch);
}
/**
* Clear the cache of computed {@link ShadowMatch} instances.
*/
public static void clearCache() {
shadowMatchCache.clear();
}
}
| ShadowMatchUtils |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java | {
"start": 24444,
"end": 24840
} | interface ____ {
String PREFIX = Read.PREFIX + "striped.";
String THREADPOOL_SIZE_KEY = PREFIX + "threadpool.size";
/**
* With default RS-6-3-1024k erasure coding policy, each normal read could
* span 6 DNs, so this default value accommodates 3 read streams
*/
int THREADPOOL_SIZE_DEFAULT = 18;
}
/** dfs.http.client configuration properties */
| StripedRead |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/legacy/B.java | {
"start": 164,
"end": 563
} | class ____ extends A {
private int count;
private Map map;
private String bName = "B Name";
public int getCount() {
return count;
}
public void setCount(int count) {
this.count = count;
}
public Map getMap() {
return map;
}
public void setMap(Map map) {
this.map = map;
}
public String getBName() {
return bName;
}
public void setBName(String name) {
bName = name;
}
}
| B |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_849/Issue849Mapper.java | {
"start": 339,
"end": 548
} | interface ____ {
Issue849Mapper INSTANCE = Mappers.getMapper( Issue849Mapper.class );
@Mapping(target = "targetList", source = "sourceList")
Target mapSourceToTarget(Source source);
}
| Issue849Mapper |
java | elastic__elasticsearch | modules/percolator/src/yamlRestTest/java/org/elasticsearch/percolator/PercolatorClientYamlTestSuiteIT.java | {
"start": 870,
"end": 1472
} | class ____ extends ESClientYamlSuiteTestCase {
public PercolatorClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
super(testCandidate);
}
@ParametersFactory
public static Iterable<Object[]> parameters() throws Exception {
return ESClientYamlSuiteTestCase.createParameters();
}
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local().module("percolator").build();
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
}
| PercolatorClientYamlTestSuiteIT |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/impl/MessageSupportTest.java | {
"start": 1262,
"end": 4212
} | class ____ extends ContextTestSupport {
@Test
public void testSetBodyType() {
Exchange exchange = new DefaultExchange(context);
Message in = exchange.getIn();
in.setBody("123", Integer.class);
assertIsInstanceOf(Integer.class, in.getBody());
}
@Test
public void testGetMandatoryBody() throws Exception {
Exchange exchange = new DefaultExchange(context);
Message in = exchange.getIn();
assertThrows(InvalidPayloadException.class, in::getMandatoryBody,
"Should have thrown an exception");
in.setBody("Hello World");
assertEquals("Hello World", in.getMandatoryBody());
}
@Test
public void testGetMessageIdWithGenerator() {
context.setUuidGenerator(new SimpleUuidGenerator());
Exchange exchange = new DefaultExchange(context);
Message in = exchange.getIn();
// they should use the same id
assertEquals("1", in.getMessageId());
assertEquals("1", in.getExchange().getExchangeId());
}
@Test
public void testGetMessageId() {
Exchange exchange = new DefaultExchange(context);
Message in = exchange.getIn();
// they should use the same id
assertSame(in.getExchange().getExchangeId(), in.getMessageId());
}
@Test
public void testGetMessageIdWithoutAnExchange() {
Message in = new DefaultMessage(context);
// there are no exchange so its null
assertNull(in.getMessageId());
}
@Test
public void testCopyFromSameHeadersInstance() {
Exchange exchange = new DefaultExchange(context);
Message in = exchange.getIn();
Map<String, Object> headers = in.getHeaders();
headers.put("foo", 123);
Message out = new DefaultMessage(context);
out.setBody("Bye World");
out.setHeaders(headers);
out.copyFrom(in);
assertEquals(123, headers.get("foo"));
assertEquals(123, in.getHeader("foo"));
assertEquals(123, out.getHeader("foo"));
}
@Test
public void testCopyOverExchange() {
Exchange exchange = new DefaultExchange(context);
Message in = exchange.getIn();
in.setBody("Bye World");
Message two = in.copy();
assertSame(exchange, two.getExchange());
Message three = new DefaultMessage(context);
three.copyFrom(two);
assertSame(exchange, three.getExchange());
}
@Test
public void testNoMessageTimestamp() {
Exchange exchange = new DefaultExchange(context);
assertEquals(0L, exchange.getMessage().getMessageTimestamp());
}
@Test
public void testMessageTimestamp() {
Exchange exchange = new DefaultExchange(context);
exchange.getMessage().setHeader(Exchange.MESSAGE_TIMESTAMP, 1234L);
assertEquals(1234L, exchange.getMessage().getMessageTimestamp());
}
}
| MessageSupportTest |
java | apache__kafka | metadata/src/test/java/org/apache/kafka/metadata/ControllerRegistrationTest.java | {
"start": 1587,
"end": 5990
} | class ____ {
static <K, V> Map<K, V> doubleMap(K k1, V v1, K k2, V v2) {
LinkedHashMap<K, V> map = new LinkedHashMap<>();
map.put(k1, v1);
map.put(k2, v2);
return map;
}
private static final List<ControllerRegistration> REGISTRATIONS = List.of(
new ControllerRegistration.Builder().
setId(0).
setIncarnationId(Uuid.fromString("ycRmGrOFQru7HXf6fOybZQ")).
setZkMigrationReady(true).
setListeners(doubleMap(
"PLAINTEXT", new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9107),
"SSL", new Endpoint("SSL", SecurityProtocol.SSL, "localhost", 9207))).
setSupportedFeatures(Map.of(MetadataVersion.FEATURE_NAME, VersionRange.of(1, 10))).
build(),
new ControllerRegistration.Builder().
setId(1).
setIncarnationId(Uuid.fromString("ubT_wuD6R3uopZ_lV76dQg")).
setZkMigrationReady(true).
setListeners(doubleMap(
"PLAINTEXT", new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9108),
"SSL", new Endpoint("SSL", SecurityProtocol.SSL, "localhost", 9208))).
setSupportedFeatures(Map.of(MetadataVersion.FEATURE_NAME, VersionRange.of(1, 10))).
build(),
new ControllerRegistration.Builder().
setId(2).
setIncarnationId(Uuid.fromString("muQS341gRIeNh9Ps7reDSw")).
setZkMigrationReady(false).
setListeners(doubleMap(
"PLAINTEXT", new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9109),
"SSL", new Endpoint("SSL", SecurityProtocol.SSL, "localhost", 9209))).
setSupportedFeatures(Map.of(MetadataVersion.FEATURE_NAME, VersionRange.of(1, 10))).
build()
);
@Test
public void testValues() {
assertEquals(0, REGISTRATIONS.get(0).id());
assertEquals(1, REGISTRATIONS.get(1).id());
assertEquals(2, REGISTRATIONS.get(2).id());
}
@Test
public void testEquals() {
assertNotEquals(REGISTRATIONS.get(0), REGISTRATIONS.get(1));
assertNotEquals(REGISTRATIONS.get(1), REGISTRATIONS.get(0));
assertNotEquals(REGISTRATIONS.get(0), REGISTRATIONS.get(2));
assertNotEquals(REGISTRATIONS.get(2), REGISTRATIONS.get(0));
assertEquals(REGISTRATIONS.get(0), REGISTRATIONS.get(0));
assertEquals(REGISTRATIONS.get(1), REGISTRATIONS.get(1));
assertEquals(REGISTRATIONS.get(2), REGISTRATIONS.get(2));
}
@Test
public void testToString() {
assertEquals("ControllerRegistration(id=1, " +
"incarnationId=ubT_wuD6R3uopZ_lV76dQg, " +
"zkMigrationReady=true, " +
"listeners=[" +
"Endpoint(listenerName='PLAINTEXT', securityProtocol=PLAINTEXT, host='localhost', port=9108), " +
"Endpoint(listenerName='SSL', securityProtocol=SSL, host='localhost', port=9208)]" +
", supportedFeatures={metadata.version: 1-10})",
REGISTRATIONS.get(1).toString());
}
@Test
public void testFromRecordAndToRecord() {
testRoundTrip(REGISTRATIONS.get(0));
testRoundTrip(REGISTRATIONS.get(1));
testRoundTrip(REGISTRATIONS.get(2));
}
private void testRoundTrip(ControllerRegistration registration) {
ApiMessageAndVersion messageAndVersion = registration.
toRecord(new ImageWriterOptions.Builder(MetadataVersion.latestProduction()).build());
ControllerRegistration registration2 = new ControllerRegistration.Builder(
(RegisterControllerRecord) messageAndVersion.message()).build();
assertEquals(registration, registration2);
ApiMessageAndVersion messageAndVersion2 = registration2.
toRecord(new ImageWriterOptions.Builder(MetadataVersion.latestProduction()).build());
assertEquals(messageAndVersion, messageAndVersion2);
}
@Test
public void testToNode() {
assertEquals(Optional.empty(), REGISTRATIONS.get(0).node("NONEXISTENT"));
assertEquals(Optional.of(new Node(0, "localhost", 9107, null)),
REGISTRATIONS.get(0).node("PLAINTEXT"));
assertEquals(Optional.of(new Node(0, "localhost", 9207, null)),
REGISTRATIONS.get(0).node("SSL"));
}
}
| ControllerRegistrationTest |
java | apache__kafka | connect/runtime/src/main/java/org/apache/kafka/connect/util/ConcreteSubClassValidator.java | {
"start": 940,
"end": 1490
} | class ____ implements ConfigDef.Validator {
private final Class<?> expectedSuperClass;
private ConcreteSubClassValidator(Class<?> expectedSuperClass) {
this.expectedSuperClass = expectedSuperClass;
}
public static ConcreteSubClassValidator forSuperClass(Class<?> expectedSuperClass) {
return new ConcreteSubClassValidator(expectedSuperClass);
}
@Override
public void ensureValid(String name, Object value) {
if (value == null) {
// The value will be null if the | ConcreteSubClassValidator |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/compliance/AnnotationConverterAndEmbeddableTest.java | {
"start": 1026,
"end": 2228
} | class ____ {
private static final String EXPECTED_ERROR_MESSAGE = "Exception was thrown by IntegerToStringConverter";
@Test
public void testConverterIsCorrectlyApplied(EntityManagerFactoryScope scope) {
SQLStatementInspector sqlStatementInspector = (SQLStatementInspector) scope.getStatementInspector();
sqlStatementInspector.clear();
scope.inTransaction(
entityManager -> {
Person b = new Person(
1,
"and n.",
new Address( "Localita S. Egidio n. 5", "Gradoli" )
);
entityManager.persist( b );
}
);
List<String> sqlQueries = sqlStatementInspector.getSqlQueries();
assertThat( sqlQueries.size() ).isEqualTo( 1 );
sqlStatementInspector.assertIsInsert( 0 );
String query = sqlQueries.get( 0 );
assertThat( query.contains( "Localita S. Egidio # 5" ) );
assertThat( query.contains( "and #" ) );
}
@Entity(name = "Person")
@Converts(
value = {
@Convert(attributeName = "address.street", converter = AnnotationConverterAndEmbeddableTest.StreetConverter.class),
@Convert(attributeName = "name", converter = AnnotationConverterAndEmbeddableTest.StreetConverter.class),
}
)
public static | AnnotationConverterAndEmbeddableTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/SearchPhaseResult.java | {
"start": 1491,
"end": 4542
} | class ____ extends TransportResponse {
private SearchShardTarget searchShardTarget;
private int shardIndex = -1;
protected ShardSearchContextId contextId;
private ShardSearchRequest shardSearchRequest;
private RescoreDocIds rescoreDocIds = RescoreDocIds.EMPTY;
protected SearchPhaseResult() {}
/**
* Specifies whether the specific search phase results are associated with an opened SearchContext on the shards that
* executed the request.
*/
public boolean hasSearchContext() {
return false;
}
/**
* Returns the search context ID that is used to reference the search context on the executing node
* or <code>null</code> if no context was created.
*/
@Nullable
public ShardSearchContextId getContextId() {
return contextId;
}
/**
* Null out the context id and request tracked in this instance. This is used to mark shards for which merging results on the data node
* made it clear that their search context won't be used in the fetch phase.
*/
public void clearContextId() {
this.shardSearchRequest = null;
this.contextId = null;
}
/**
* Returns the shard index in the context of the currently executing search request that is
* used for accounting on the coordinating node
*/
public int getShardIndex() {
assert shardIndex != -1 : "shardIndex is not set";
return shardIndex;
}
public SearchShardTarget getSearchShardTarget() {
return searchShardTarget;
}
public void setSearchShardTarget(SearchShardTarget shardTarget) {
this.searchShardTarget = shardTarget;
}
public void setShardIndex(int shardIndex) {
assert shardIndex >= 0 : "shardIndex must be >= 0 but was: " + shardIndex;
this.shardIndex = shardIndex;
}
/**
* Returns the query result iff it's included in this response otherwise <code>null</code>
*/
public QuerySearchResult queryResult() {
return null;
}
/**
* Returns the rank feature result iff it's included in this response otherwise <code>null</code>
*/
public RankFeatureResult rankFeatureResult() {
return null;
}
/**
* Returns the fetch result iff it's included in this response otherwise <code>null</code>
*/
public FetchSearchResult fetchResult() {
return null;
}
@Nullable
public ShardSearchRequest getShardSearchRequest() {
return shardSearchRequest;
}
public void setShardSearchRequest(ShardSearchRequest shardSearchRequest) {
this.shardSearchRequest = shardSearchRequest;
}
public RescoreDocIds getRescoreDocIds() {
return rescoreDocIds;
}
public void setRescoreDocIds(RescoreDocIds rescoreDocIds) {
this.rescoreDocIds = rescoreDocIds;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
// TODO: this seems wrong, SearchPhaseResult should have a writeTo?
}
}
| SearchPhaseResult |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configuration/WebSecurityConfigurationTests.java | {
"start": 27616,
"end": 28311
} | class ____ {
@Bean
@Order(Ordered.HIGHEST_PRECEDENCE)
public SecurityFilterChain path1(HttpSecurity http) throws Exception {
// @formatter:off
http
.securityMatchers((requests) -> requests.requestMatchers(pathPattern("/path1/**")))
.authorizeHttpRequests((requests) -> requests.anyRequest().authenticated());
// @formatter:on
return http.build();
}
@Bean
@Order(Ordered.LOWEST_PRECEDENCE)
public SecurityFilterChain permitAll(HttpSecurity http) throws Exception {
http.authorizeHttpRequests((requests) -> requests.anyRequest().permitAll());
return http.build();
}
}
@Configuration
@EnableWebSecurity(debug = true)
static | TwoSecurityFilterChainConfig |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_wuyexiong.java | {
"start": 964,
"end": 1683
} | class ____ {
private Track[] track;
public void setTrack(Track[] track) {
this.track = track;
}
public Track[] getTrack() {
return track;
}
}
public void test_for_wuyexiong() throws Exception {
InputStream is = Thread.currentThread().getContextClassLoader().getResourceAsStream("wuyexiong.json");
String text = org.apache.commons.io.IOUtils.toString(is);
org.apache.commons.io.IOUtils.closeQuietly(is);
Tracks tracks = JSON.parseObject(text, Tracks.class);
Assert.assertEquals("Learn about developing mobile handset and tablet apps for Android.", tracks.getTrack()[0].getAbstract());
}
}
| Tracks |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/lucene/store/ByteArrayIndexInput.java | {
"start": 803,
"end": 4335
} | class ____ extends IndexInput implements RandomAccessInput {
private final byte[] bytes;
private int pos;
private final int offset;
private final int length;
public ByteArrayIndexInput(String resourceDesc, byte[] bytes) {
this(resourceDesc, bytes, 0, bytes.length);
}
public ByteArrayIndexInput(String resourceDesc, byte[] bytes, int offset, int length) {
super(resourceDesc);
this.bytes = bytes;
this.offset = offset;
this.pos = offset;
this.length = length;
}
@Override
public void close() throws IOException {}
@Override
public long getFilePointer() {
return pos - offset;
}
@Override
public void seek(long l) throws IOException {
pos = position(l);
}
private int position(long p) throws EOFException {
if (p < 0) {
throw new IllegalArgumentException("Seeking to negative position: " + p);
} else if (p > length) {
throw new EOFException("seek past EOF");
}
return (int) p + offset;
}
@Override
public long length() {
return length;
}
@Override
public byte readByte(long pos) throws IOException {
return bytes[position(pos)];
}
@Override
public short readShort(long pos) throws IOException {
return (short) BitUtil.VH_LE_SHORT.get(bytes, position(pos));
}
@Override
public int readInt(long pos) throws IOException {
return (int) BitUtil.VH_LE_INT.get(bytes, position(pos));
}
@Override
public long readLong(long pos) throws IOException {
return (long) BitUtil.VH_LE_LONG.get(bytes, position(pos));
}
@Override
public IndexInput slice(String sliceDescription, long offset, long length) throws IOException {
if (offset >= 0L && length >= 0L && offset + length <= this.length) {
return new ByteArrayIndexInput(sliceDescription, bytes, this.offset + (int) offset, (int) length);
} else {
throw new IllegalArgumentException(
Strings.format(
"slice() %s out of bounds: offset=%d,length=%d,fileLength=%d: %s",
sliceDescription,
offset,
length,
this.length,
this
)
);
}
}
@Override
public byte readByte() throws IOException {
if (pos >= offset + length) {
throw new EOFException("seek past EOF");
}
return bytes[pos++];
}
@Override
public void readBytes(final byte[] b, final int offset, int len) throws IOException {
if (pos + len > this.offset + length) {
throw new EOFException("seek past EOF");
}
System.arraycopy(bytes, pos, b, offset, len);
pos += len;
}
@Override
public short readShort() throws IOException {
try {
return (short) BitUtil.VH_LE_SHORT.get(bytes, pos);
} finally {
pos += Short.BYTES;
}
}
@Override
public int readInt() throws IOException {
try {
return (int) BitUtil.VH_LE_INT.get(bytes, pos);
} finally {
pos += Integer.BYTES;
}
}
@Override
public long readLong() throws IOException {
try {
return (long) BitUtil.VH_LE_LONG.get(bytes, pos);
} finally {
pos += Long.BYTES;
}
}
}
| ByteArrayIndexInput |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/context/junit/jupiter/SpringExtension.java | {
"start": 9617,
"end": 9882
} | class ____ not annotated with {@link Autowired @Autowired}.
* @since 5.3.2
*/
private void validateAutowiredConfig(ExtensionContext context) {
// We save the result in the ExtensionContext.Store so that we don't
// re-validate all methods for the same test | are |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/ConfigurationPropertiesTests.java | {
"start": 90231,
"end": 90420
} | class ____ {
}
@EnableConfigurationProperties(ConstructorBindingWithOuterClassConstructorBoundAndNestedAutowired.class)
static | ConstructorBindingWithOuterClassConstructorBoundConfiguration |
java | elastic__elasticsearch | x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java | {
"start": 34916,
"end": 36628
} | class ____ extends SnapshotHistoryStore {
private final Consumer<SnapshotHistoryItem> verifier;
public VerifyingHistoryStore(Client client, ClusterService clusterService, Consumer<SnapshotHistoryItem> verifier) {
super(client, clusterService);
this.verifier = verifier;
}
@Override
public void putAsync(SnapshotHistoryItem item) {
verifier.accept(item);
}
}
private static SnapshotInfo randomSnapshotInfoSuccess(ProjectId projectId) {
long startTime = randomNonNegativeLong();
long endTime = randomLongBetween(startTime, Long.MAX_VALUE);
return new SnapshotInfo(
new Snapshot(projectId, "repo", randSnapshotId()),
List.of("index1", "index2"),
List.of(),
List.of(),
null,
endTime,
2,
List.of(),
randomBoolean(),
Map.of(),
startTime,
Map.of()
);
}
private static SnapshotInfo randomSnapshotInfoFailure(ProjectId projectId) {
long startTime = randomNonNegativeLong();
long endTime = randomLongBetween(startTime, Long.MAX_VALUE);
return new SnapshotInfo(
new Snapshot(projectId, "repo", randSnapshotId()),
List.of("index1", "index2"),
List.of(),
List.of(),
"failed snapshot",
endTime,
2,
List.of(new SnapshotShardFailure("nodeId", new ShardId("index", "uuid", 0), "forced failure")),
randomBoolean(),
Map.of(),
startTime,
Map.of()
);
}
}
| VerifyingHistoryStore |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/resource/UnavailableResources.java | {
"start": 361,
"end": 1216
} | enum ____ implements EventLoopResources {
INSTANCE;
@Override
public boolean matches(Class<? extends EventExecutorGroup> type) {
return false;
}
@Override
public Class<? extends EventLoopGroup> eventLoopGroupClass() {
return null;
}
@Override
public EventLoopGroup newEventLoopGroup(int nThreads, ThreadFactory threadFactory) {
return null;
}
@Override
public Class<? extends Channel> socketChannelClass() {
return null;
}
@Override
public Class<? extends Channel> domainSocketChannelClass() {
return null;
}
@Override
public Class<? extends DatagramChannel> datagramChannelClass() {
return null;
}
@Override
public SocketAddress newSocketAddress(String socketPath) {
return null;
}
}
| UnavailableResources |
java | alibaba__fastjson | src/test/java/com/alibaba/json/test/InnerInnerTest.java | {
"start": 516,
"end": 693
} | class ____{
private String name;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
}
}
public | InnerInner |
java | apache__camel | components/camel-kamelet/src/test/java/org/apache/camel/component/kamelet/KameletEipAggregateTest.java | {
"start": 1138,
"end": 2599
} | class ____ extends CamelTestSupport {
@Test
public void testAggregate() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived("A,B,C,D,E");
template.sendBody("direct:start", "A");
template.sendBody("direct:start", "B");
template.sendBody("direct:start", "C");
template.sendBody("direct:start", "D");
template.sendBody("direct:start", "E");
MockEndpoint.assertIsSatisfied(context);
}
// **********************************************
//
// test set-up
//
// **********************************************
protected RoutesBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
routeTemplate("my-aggregate")
.templateParameter("count")
.from("kamelet:source")
.aggregate(constant(true))
.completionSize("{{count}}")
.aggregationStrategy(AggregationStrategies.string(","))
.to("log:aggregate")
.to("kamelet:sink")
.end();
from("direct:start")
.kamelet("my-aggregate?count=5")
.to("log:info")
.to("mock:result");
}
};
}
}
| KameletEipAggregateTest |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/codec/bloomfilter/ES85BloomFilterRWPostingsFormat.java | {
"start": 1518,
"end": 2406
} | class ____ extends ES85BloomFilterPostingsFormat {
private final Function<String, PostingsFormat> postingsFormats;
private final BigArrays bigArrays;
ES85BloomFilterRWPostingsFormat(BigArrays bigArrays, Function<String, PostingsFormat> postingsFormats) {
super();
this.bigArrays = Objects.requireNonNull(bigArrays);
this.postingsFormats = Objects.requireNonNull(postingsFormats);
}
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
if (postingsFormats == null || bigArrays == null) {
assert false : BLOOM_CODEC_NAME + " was initialized with a wrong constructor";
throw new UnsupportedOperationException(BLOOM_CODEC_NAME + " was initialized with a wrong constructor");
}
return new FieldsWriter(state);
}
final | ES85BloomFilterRWPostingsFormat |
java | netty__netty | transport-classes-io_uring/src/main/java/io/netty/channel/uring/AbstractIoUringStreamChannel.java | {
"start": 1431,
"end": 8038
} | class ____ extends AbstractIoUringChannel implements DuplexChannel {
private static final InternalLogger logger = InternalLoggerFactory.getInstance(AbstractIoUringStreamChannel.class);
private static final ChannelMetadata METADATA = new ChannelMetadata(false, 16);
// Store the opCode so we know if we used WRITE or WRITEV.
byte writeOpCode;
// Keep track of the ids used for write and read so we can cancel these when needed.
long writeId;
byte readOpCode;
long readId;
// The configured buffer ring if any
private IoUringBufferRing bufferRing;
AbstractIoUringStreamChannel(Channel parent, LinuxSocket socket, boolean active) {
super(parent, socket, active);
}
AbstractIoUringStreamChannel(Channel parent, LinuxSocket socket, SocketAddress remote) {
super(parent, socket, remote);
}
@Override
public ChannelMetadata metadata() {
return METADATA;
}
@Override
protected AbstractUringUnsafe newUnsafe() {
return new IoUringStreamUnsafe();
}
@Override
public final ChannelFuture shutdown() {
return shutdown(newPromise());
}
@Override
public final ChannelFuture shutdown(final ChannelPromise promise) {
ChannelFuture shutdownOutputFuture = shutdownOutput();
if (shutdownOutputFuture.isDone()) {
shutdownOutputDone(shutdownOutputFuture, promise);
} else {
shutdownOutputFuture.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(final ChannelFuture shutdownOutputFuture) throws Exception {
shutdownOutputDone(shutdownOutputFuture, promise);
}
});
}
return promise;
}
@Override
protected final void doShutdownOutput() throws Exception {
socket.shutdown(false, true);
}
private void shutdownInput0(final ChannelPromise promise) {
try {
socket.shutdown(true, false);
promise.setSuccess();
} catch (Throwable cause) {
promise.setFailure(cause);
}
}
@Override
public final boolean isOutputShutdown() {
return socket.isOutputShutdown();
}
@Override
public final boolean isInputShutdown() {
return socket.isInputShutdown();
}
@Override
public final boolean isShutdown() {
return socket.isShutdown();
}
@Override
public final ChannelFuture shutdownOutput() {
return shutdownOutput(newPromise());
}
@Override
public final ChannelFuture shutdownOutput(final ChannelPromise promise) {
EventLoop loop = eventLoop();
if (loop.inEventLoop()) {
((AbstractUnsafe) unsafe()).shutdownOutput(promise);
} else {
loop.execute(new Runnable() {
@Override
public void run() {
((AbstractUnsafe) unsafe()).shutdownOutput(promise);
}
});
}
return promise;
}
@Override
public final ChannelFuture shutdownInput() {
return shutdownInput(newPromise());
}
@Override
public final ChannelFuture shutdownInput(final ChannelPromise promise) {
EventLoop loop = eventLoop();
if (loop.inEventLoop()) {
shutdownInput0(promise);
} else {
loop.execute(new Runnable() {
@Override
public void run() {
shutdownInput0(promise);
}
});
}
return promise;
}
private void shutdownOutputDone(final ChannelFuture shutdownOutputFuture, final ChannelPromise promise) {
ChannelFuture shutdownInputFuture = shutdownInput();
if (shutdownInputFuture.isDone()) {
shutdownDone(shutdownOutputFuture, shutdownInputFuture, promise);
} else {
shutdownInputFuture.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture shutdownInputFuture) throws Exception {
shutdownDone(shutdownOutputFuture, shutdownInputFuture, promise);
}
});
}
}
private static void shutdownDone(ChannelFuture shutdownOutputFuture,
ChannelFuture shutdownInputFuture,
ChannelPromise promise) {
Throwable shutdownOutputCause = shutdownOutputFuture.cause();
Throwable shutdownInputCause = shutdownInputFuture.cause();
if (shutdownOutputCause != null) {
if (shutdownInputCause != null) {
logger.info("Exception suppressed because a previous exception occurred.",
shutdownInputCause);
}
promise.setFailure(shutdownOutputCause);
} else if (shutdownInputCause != null) {
promise.setFailure(shutdownInputCause);
} else {
promise.setSuccess();
}
}
@Override
protected final void doRegister(ChannelPromise promise) {
ChannelPromise registerPromise = this.newPromise();
// Ensure that the buffer group is properly set before channel::read
registerPromise.addListener(f -> {
if (f.isSuccess()) {
try {
short bgid = ((IoUringStreamChannelConfig) config()).getBufferGroupId();
if (bgid >= 0) {
final IoUringIoHandler ioUringIoHandler = registration().attachment();
bufferRing = ioUringIoHandler.findBufferRing(bgid);
}
if (active) {
// Register for POLLRDHUP if this channel is already considered active.
schedulePollRdHup();
}
} finally {
promise.setSuccess();
}
} else {
promise.setFailure(f.cause());
}
});
super.doRegister(registerPromise);
}
@Override
protected Object filterOutboundMessage(Object msg) {
// Since we cannot use synchronous sendfile,
// the channel can only support DefaultFileRegion instead of FileRegion.
if (IoUring.isSpliceSupported() && msg instanceof DefaultFileRegion) {
return new IoUringFileRegion((DefaultFileRegion) msg);
}
return super.filterOutboundMessage(msg);
}
protected | AbstractIoUringStreamChannel |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/manytomany/ManyToManyBidirectionalTest.java | {
"start": 3657,
"end": 4105
} | class ____ {
@Id
@GeneratedValue
private Long id;
private String street;
@Column(name = "`number`")
private String number;
private String postalCode;
@ManyToMany(mappedBy = "addresses")
private List<Person> owners = new ArrayList<>();
public Address() {
}
public Address(String street, String number, String postalCode) {
this.street = street;
this.number = number;
this.postalCode = postalCode;
}
}
}
| Address |
java | google__guice | core/src/com/google/inject/spi/DefaultBindingScopingVisitor.java | {
"start": 743,
"end": 1042
} | interface ____ simply delegate to {@link #visitOther()},
* returning its result.
*
* @param <V> any type to be returned by the visit method. Use {@link Void} with {@code return null}
* if no return type is needed.
* @author jessewilson@google.com (Jesse Wilson)
* @since 2.0
*/
public | methods |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java | {
"start": 2466,
"end": 4765
} | class ____ extends FilterInitializer {
public Initializer() {}
@Override
public void initFilter(FilterContainer container, Configuration conf) {
container.addGlobalFilter("recording", RecordingFilter.class.getName(), null);
}
}
}
/** access a url, ignoring some IOException such as the page does not exist */
static void access(String urlstring) throws IOException {
LOG.warn("access " + urlstring);
URL url = new URL(urlstring);
URLConnection connection = url.openConnection();
connection.connect();
try {
BufferedReader in = new BufferedReader(new InputStreamReader(
connection.getInputStream()));
try {
for(; in.readLine() != null; );
} finally {
in.close();
}
} catch(IOException ioe) {
LOG.warn("urlstring=" + urlstring, ioe);
}
}
@Test
public void testServletFilter() throws Exception {
Configuration conf = new Configuration();
//start a http server with CountingFilter
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
RecordingFilter.Initializer.class.getName());
HttpServer2 http = createTestServer(conf);
http.start();
final String fsckURL = "/fsck";
final String stacksURL = "/stacks";
final String ajspURL = "/a.jsp";
final String listPathsURL = "/listPaths";
final String dataURL = "/data";
final String streamFile = "/streamFile";
final String rootURL = "/";
final String allURL = "/*";
final String outURL = "/static/a.out";
final String logURL = "/logs/a.log";
final String[] urls = {fsckURL, stacksURL, ajspURL, listPathsURL,
dataURL, streamFile, rootURL, allURL, outURL, logURL};
//access the urls
final String prefix = "http://"
+ NetUtils.getHostPortString(http.getConnectorAddress(0));
try {
for(int i = 0; i < urls.length; i++) {
access(prefix + urls[i]);
}
} finally {
http.stop();
}
LOG.info("RECORDS = " + RECORDS);
//verify records
for(int i = 0; i < urls.length; i++) {
assertTrue(RECORDS.remove(urls[i]));
}
assertTrue(RECORDS.size()==1);
// Accesing "/" will redirect to /index.html
assertTrue(RECORDS.contains("/index.html"));
}
}
| Initializer |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/Column.java | {
"start": 7413,
"end": 9535
} | class ____ extends Column {
private final ResolvedExpression expression;
private ComputedColumn(String name, DataType dataType, ResolvedExpression expression) {
this(name, dataType, expression, null);
}
private ComputedColumn(
String name, DataType dataType, ResolvedExpression expression, String comment) {
super(name, dataType, comment);
this.expression = expression;
}
@Override
public ComputedColumn withComment(String comment) {
if (comment == null) {
return this;
}
return new ComputedColumn(name, dataType, expression, comment);
}
@Override
public boolean isPhysical() {
return false;
}
@Override
public boolean isPersisted() {
return false;
}
public ResolvedExpression getExpression() {
return expression;
}
@Override
public Optional<String> explainExtras() {
return Optional.of("AS " + expression.asSummaryString());
}
@Override
public Column copy(DataType newDataType) {
return new ComputedColumn(name, newDataType, expression, comment);
}
@Override
public Column rename(String newName) {
return new ComputedColumn(newName, dataType, expression, comment);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (!super.equals(o)) {
return false;
}
ComputedColumn that = (ComputedColumn) o;
return expression.equals(that.expression);
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), expression);
}
}
/** Representation of a metadata column. */
@PublicEvolving
public static final | ComputedColumn |
java | elastic__elasticsearch | x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java | {
"start": 4651,
"end": 10362
} | class ____ extends Plugin implements ActionPlugin, PersistentTaskPlugin {
private final SetOnce<SystemIndices> systemIndices = new SetOnce<>();
@Override
public Collection<?> createComponents(PluginServices services) {
systemIndices.set(services.systemIndices());
var registry = new MigrateTemplateRegistry(
services.environment().settings(),
services.clusterService(),
services.threadPool(),
services.client(),
services.xContentRegistry()
);
registry.initialize();
return List.of(registry);
}
@Override
public List<RestHandler> getRestHandlers(
Settings unused,
NamedWriteableRegistry namedWriteableRegistry,
RestController restController,
ClusterSettings clusterSettings,
IndexScopedSettings indexScopedSettings,
SettingsFilter settingsFilter,
IndexNameExpressionResolver indexNameExpressionResolver,
Supplier<DiscoveryNodes> nodesInCluster,
Predicate<NodeFeature> clusterSupportsFeature
) {
List<RestHandler> handlers = new ArrayList<>();
handlers.add(new RestMigrationReindexAction());
handlers.add(new RestGetMigrationReindexStatusAction());
handlers.add(new RestCancelReindexDataStreamAction());
handlers.add(new RestCreateIndexFromSourceAction());
handlers.add(new RestGetFeatureUpgradeStatusAction());
handlers.add(new RestPostFeatureUpgradeAction());
return handlers;
}
@Override
public List<ActionHandler> getActions() {
List<ActionHandler> actions = new ArrayList<>();
actions.add(new ActionHandler(ReindexDataStreamAction.INSTANCE, ReindexDataStreamTransportAction.class));
actions.add(new ActionHandler(GetMigrationReindexStatusAction.INSTANCE, GetMigrationReindexStatusTransportAction.class));
actions.add(new ActionHandler(CancelReindexDataStreamAction.INSTANCE, CancelReindexDataStreamTransportAction.class));
actions.add(new ActionHandler(ReindexDataStreamIndexAction.INSTANCE, ReindexDataStreamIndexTransportAction.class));
actions.add(new ActionHandler(CreateIndexFromSourceAction.INSTANCE, CreateIndexFromSourceTransportAction.class));
actions.add(new ActionHandler(CopyLifecycleIndexMetadataAction.INSTANCE, CopyLifecycleIndexMetadataTransportAction.class));
actions.add(new ActionHandler(GetFeatureUpgradeStatusAction.INSTANCE, TransportGetFeatureUpgradeStatusAction.class));
actions.add(new ActionHandler(PostFeatureUpgradeAction.INSTANCE, TransportPostFeatureUpgradeAction.class));
return actions;
}
@Override
public List<NamedXContentRegistry.Entry> getNamedXContent() {
return Stream.concat(
SystemIndexMigrationExecutor.getNamedXContentParsers().stream(),
Stream.of(
new NamedXContentRegistry.Entry(
PersistentTaskState.class,
new ParseField(ReindexDataStreamPersistentTaskState.NAME),
ReindexDataStreamPersistentTaskState::fromXContent
),
new NamedXContentRegistry.Entry(
PersistentTaskParams.class,
new ParseField(ReindexDataStreamTaskParams.NAME),
ReindexDataStreamTaskParams::fromXContent
)
)
).toList();
}
@Override
public List<NamedWriteableRegistry.Entry> getNamedWriteables() {
return Stream.concat(
SystemIndexMigrationExecutor.getNamedWriteables().stream(),
Stream.of(
new NamedWriteableRegistry.Entry(Metadata.ProjectCustom.class, FeatureMigrationResults.TYPE, FeatureMigrationResults::new),
new NamedWriteableRegistry.Entry(NamedDiff.class, FeatureMigrationResults.TYPE, FeatureMigrationResults::readDiffFrom),
new NamedWriteableRegistry.Entry(
PersistentTaskState.class,
ReindexDataStreamPersistentTaskState.NAME,
ReindexDataStreamPersistentTaskState::new
),
new NamedWriteableRegistry.Entry(
PersistentTaskParams.class,
ReindexDataStreamTaskParams.NAME,
ReindexDataStreamTaskParams::new
),
new NamedWriteableRegistry.Entry(Task.Status.class, ReindexDataStreamStatus.NAME, ReindexDataStreamStatus::new)
)
).toList();
}
@Override
public List<PersistentTasksExecutor<?>> getPersistentTasksExecutor(
ClusterService clusterService,
ThreadPool threadPool,
Client client,
SettingsModule settingsModule,
IndexNameExpressionResolver expressionResolver
) {
return List.of(
new SystemIndexMigrationExecutor(
client,
clusterService,
systemIndices.get(),
settingsModule.getIndexScopedSettings(),
threadPool
),
new ReindexDataStreamPersistentTaskExecutor(
new OriginSettingClient(client, REINDEX_DATA_STREAM_ORIGIN),
clusterService,
ReindexDataStreamTask.TASK_NAME,
threadPool
)
);
}
@Override
public List<Setting<?>> getSettings() {
List<Setting<?>> pluginSettings = new ArrayList<>();
pluginSettings.add(MAX_CONCURRENT_INDICES_REINDEXED_PER_DATA_STREAM_SETTING);
pluginSettings.add(REINDEX_MAX_REQUESTS_PER_SECOND_SETTING);
return pluginSettings;
}
}
| MigratePlugin |
java | apache__maven | compat/maven-compat/src/main/java/org/apache/maven/repository/legacy/metadata/ArtifactMetadataSource.java | {
"start": 1212,
"end": 3630
} | interface ____ {
ResolutionGroup retrieve(MetadataResolutionRequest request) throws ArtifactMetadataRetrievalException;
ResolutionGroup retrieve(
Artifact artifact, ArtifactRepository localRepository, List<ArtifactRepository> remoteRepositories)
throws ArtifactMetadataRetrievalException;
/**
* Get a list of available versions for an artifact in the remote repository
*
* @param artifact artifact we are interested in. Only <code>groupid</code> and <code>artifactId</code>
* are needed, for instance the following code will work
* <code>artifactFactory.createProjectArtifact( "org.apache.maven", "maven", "" )</code>
* @param localRepository local repository
* @param remoteRepositories remote repositories, {@link List} $lt; {@link ArtifactRepository} >
* @return {@link List} $lt; {@link ArtifactVersion} >
* @throws ArtifactMetadataRetrievalException
* in case of error while retrieving repository metadata from the repository.
*/
List<ArtifactVersion> retrieveAvailableVersions(
Artifact artifact, ArtifactRepository localRepository, List<ArtifactRepository> remoteRepositories)
throws ArtifactMetadataRetrievalException;
/**
* Get a list of available versions for an artifact in the remote deployment repository. This ignores any update
* policy checks and mirrors and always retrieves the latest information from the given repository.
*
* @param artifact artifact we are interested in. Only <code>groupid</code> and <code>artifactId</code> are
* needed, for instance the following code will work
* <code>artifactFactory.createProjectArtifact( "org.apache.maven", "maven", "" )</code>
* @param localRepository local repository
* @param remoteRepository remote repository
* @return {@link List} $lt; {@link ArtifactVersion} >
* @throws ArtifactMetadataRetrievalException
* in case of error while retrieving repository metadata from the repository.
*/
List<ArtifactVersion> retrieveAvailableVersionsFromDeploymentRepository(
Artifact artifact, ArtifactRepository localRepository, ArtifactRepository remoteRepository)
throws ArtifactMetadataRetrievalException;
}
| ArtifactMetadataSource |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/reflect/FieldUtils.java | {
"start": 10822,
"end": 11186
} | class ____ annotation are {@code null}.
* @since 3.4
*/
public static Field[] getFieldsWithAnnotation(final Class<?> cls, final Class<? extends Annotation> annotationCls) {
return getFieldsListWithAnnotation(cls, annotationCls).toArray(ArrayUtils.EMPTY_FIELD_ARRAY);
}
/**
* Reads the named {@code public} {@link Field}. Only the | or |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/security/oauthbearer/ClaimValidationUtilsTest.java | {
"start": 1422,
"end": 6631
} | class ____ extends OAuthBearerTest {
@Test
public void testValidateScopes() {
Set<String> scopes = ClaimValidationUtils.validateScopes("scope", Arrays.asList(" a ", " b "));
assertEquals(2, scopes.size());
assertTrue(scopes.contains("a"));
assertTrue(scopes.contains("b"));
}
@Test
public void testValidateScopesDisallowsDuplicates() {
assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", "b", "a")));
assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", "b", " a ")));
}
@Test
public void testValidateScopesDisallowsEmptyNullAndWhitespace() {
assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", "")));
assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", null)));
assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", " ")));
}
@Test
public void testValidateScopesResultIsImmutable() {
SortedSet<String> callerSet = new TreeSet<>(Arrays.asList("a", "b", "c"));
Set<String> scopes = ClaimValidationUtils.validateScopes("scope", callerSet);
assertEquals(3, scopes.size());
callerSet.add("d");
assertEquals(4, callerSet.size());
assertTrue(callerSet.contains("d"));
assertEquals(3, scopes.size());
assertFalse(scopes.contains("d"));
callerSet.remove("c");
assertEquals(3, callerSet.size());
assertFalse(callerSet.contains("c"));
assertEquals(3, scopes.size());
assertTrue(scopes.contains("c"));
callerSet.clear();
assertEquals(0, callerSet.size());
assertEquals(3, scopes.size());
}
@Test
public void testValidateScopesResultThrowsExceptionOnMutation() {
SortedSet<String> callerSet = new TreeSet<>(Arrays.asList("a", "b", "c"));
Set<String> scopes = ClaimValidationUtils.validateScopes("scope", callerSet);
assertThrows(UnsupportedOperationException.class, scopes::clear);
}
@Test
public void testValidateExpiration() {
Long expected = 1L;
Long actual = ClaimValidationUtils.validateExpiration("exp", expected);
assertEquals(expected, actual);
}
@Test
public void testValidateExpirationAllowsZero() {
Long expected = 0L;
Long actual = ClaimValidationUtils.validateExpiration("exp", expected);
assertEquals(expected, actual);
}
@Test
public void testValidateExpirationDisallowsNull() {
assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateExpiration("exp", null));
}
@Test
public void testValidateExpirationDisallowsNegatives() {
assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateExpiration("exp", -1L));
}
@Test
public void testValidateSubject() {
String expected = "jdoe";
String actual = ClaimValidationUtils.validateSubject("sub", expected);
assertEquals(expected, actual);
}
@Test
public void testValidateSubjectDisallowsEmptyNullAndWhitespace() {
assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateSubject("sub", ""));
assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateSubject("sub", null));
assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateSubject("sub", " "));
}
@Test
public void testValidateClaimNameOverride() {
String expected = "email";
String actual = ClaimValidationUtils.validateClaimNameOverride("sub", String.format(" %s ", expected));
assertEquals(expected, actual);
}
@Test
public void testValidateClaimNameOverrideDisallowsEmptyNullAndWhitespace() {
assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateSubject("sub", ""));
assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateSubject("sub", null));
assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateSubject("sub", " "));
}
@Test
public void testValidateIssuedAt() {
Long expected = 1L;
Long actual = ClaimValidationUtils.validateIssuedAt("iat", expected);
assertEquals(expected, actual);
}
@Test
public void testValidateIssuedAtAllowsZero() {
Long expected = 0L;
Long actual = ClaimValidationUtils.validateIssuedAt("iat", expected);
assertEquals(expected, actual);
}
@Test
public void testValidateIssuedAtAllowsNull() {
Long expected = null;
Long actual = ClaimValidationUtils.validateIssuedAt("iat", expected);
assertEquals(expected, actual);
}
@Test
public void testValidateIssuedAtDisallowsNegatives() {
assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateIssuedAt("iat", -1L));
}
}
| ClaimValidationUtilsTest |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/ClassUtils.java | {
"start": 35502,
"end": 35700
} | class ____ check, not null.
* @param methodName the name of the method.
* @param parameterTypes the list of parameters.
* @return the method.
* @throws NullPointerException if the | to |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/validation/JPATestValidationOfNonEntitiesResource.java | {
"start": 394,
"end": 1335
} | class ____ {
@Inject
Validator validator;
@Path("/bean")
@GET
public String bean() {
Set<ConstraintViolation<MyNonEntity>> constraintViolations = validator.validate(new MyNonEntity());
if (constraintViolations.size() != 1) {
return "ko";
}
if (!constraintViolations.iterator().next().getPropertyPath().toString().equals("name")) {
return "ko";
}
return "ok";
}
@Path("/value")
@GET
public String value() {
Set<ConstraintViolation<MyNonEntity>> constraintViolations = validator.validateValue(MyNonEntity.class, "name", null);
if (constraintViolations.size() != 1) {
return "ko";
}
if (!constraintViolations.iterator().next().getPropertyPath().toString().equals("name")) {
return "ko";
}
return "ok";
}
public static | JPATestValidationOfNonEntitiesResource |
java | quarkusio__quarkus | independent-projects/qute/core/src/main/java/io/quarkus/qute/ParserError.java | {
"start": 33,
"end": 1618
} | enum ____ implements ErrorCode {
GENERAL_ERROR,
/**
* <code>{fo\to}</code>
*/
INVALID_IDENTIFIER,
/**
* <code>{data: }</code>
*/
EMPTY_EXPRESSION,
/**
* <code>{#include /}</code>
*/
MANDATORY_SECTION_PARAMS_MISSING,
/**
* <code>{#if 'foo is null}{/}</code>
*/
UNTERMINATED_STRING_LITERAL,
/**
* <code>{# foo=1 /}</code>
*/
NO_SECTION_NAME,
/**
* <code>{#foo test /}</code> and no helper registered for {@code foo}
*/
NO_SECTION_HELPER_FOUND,
/**
* <code>{#if test}Hello {name}!{/for}</code>
*/
SECTION_END_DOES_NOT_MATCH_START,
/**
* <code>{#if test}Hello{#else}Hi{/elsa}{/if}</code>
*/
SECTION_BLOCK_END_DOES_NOT_MATCH_START,
/**
* <code>{#if true}Bye...{/if} Hello {/if}</code>
*/
SECTION_START_NOT_FOUND,
/**
* <code>{@com.foo.Foo }</code>
*/
INVALID_PARAM_DECLARATION,
/**
* <code>{#if test}Hello {name}</code>
*/
UNTERMINATED_SECTION,
/**
* <code>{name</code>
*/
UNTERMINATED_EXPRESSION,
/**
* <code>{#if (foo || bar}{/}</code>
*/
UNTERMINATED_STRING_LITERAL_OR_COMPOSITE_PARAMETER,
/**
* <code>{foo.baz()(}</code>
*/
INVALID_VIRTUAL_METHOD,
/**
* <code>{foo.baz[}</code>
*/
INVALID_BRACKET_EXPRESSION,
/**
* <code>{foo[bar]}</code>
*/
INVALID_VALUE_BRACKET_NOTATION,
;
@Override
public String getName() {
return "PARSER_" + name();
}
}
| ParserError |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStepTests.java | {
"start": 1233,
"end": 8759
} | class ____ extends AbstractUnfollowIndexStepTestCase<PauseFollowerIndexStep> {
@Override
protected PauseFollowerIndexStep newInstance(Step.StepKey key, Step.StepKey nextKey) {
return new PauseFollowerIndexStep(key, nextKey, client);
}
public void testPauseFollowingIndex() throws Exception {
IndexMetadata indexMetadata = IndexMetadata.builder("follower-index")
.settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true"))
.putCustom(CCR_METADATA_KEY, Map.of())
.numberOfShards(1)
.numberOfReplicas(0)
.build();
ProjectState state = setupClusterStateWithFollowingIndex(indexMetadata);
Mockito.doAnswer(invocation -> {
PauseFollowAction.Request request = (PauseFollowAction.Request) invocation.getArguments()[1];
assertThat(request.getFollowIndex(), equalTo("follower-index"));
@SuppressWarnings("unchecked")
ActionListener<AcknowledgedResponse> listener = (ActionListener<AcknowledgedResponse>) invocation.getArguments()[2];
listener.onResponse(AcknowledgedResponse.TRUE);
return null;
}).when(projectClient).execute(Mockito.same(PauseFollowAction.INSTANCE), Mockito.any(), Mockito.any());
PauseFollowerIndexStep step = new PauseFollowerIndexStep(randomStepKey(), randomStepKey(), client);
performActionAndWait(step, indexMetadata, state, null);
}
public void testRequestNotAcknowledged() {
IndexMetadata indexMetadata = IndexMetadata.builder("follower-index")
.settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true"))
.putCustom(CCR_METADATA_KEY, Map.of())
.numberOfShards(1)
.numberOfReplicas(0)
.build();
ProjectState state = setupClusterStateWithFollowingIndex(indexMetadata);
Mockito.doAnswer(invocation -> {
@SuppressWarnings("unchecked")
ActionListener<AcknowledgedResponse> listener = (ActionListener<AcknowledgedResponse>) invocation.getArguments()[2];
listener.onResponse(AcknowledgedResponse.FALSE);
return null;
}).when(projectClient).execute(Mockito.same(PauseFollowAction.INSTANCE), Mockito.any(), Mockito.any());
PauseFollowerIndexStep step = new PauseFollowerIndexStep(randomStepKey(), randomStepKey(), client);
Exception e = expectThrows(Exception.class, () -> performActionAndWait(step, indexMetadata, state, null));
assertThat(e.getMessage(), is("pause follow request failed to be acknowledged"));
}
public void testPauseFollowingIndexFailed() {
IndexMetadata indexMetadata = IndexMetadata.builder("follower-index")
.settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true"))
.putCustom(CCR_METADATA_KEY, Map.of())
.numberOfShards(1)
.numberOfReplicas(0)
.build();
ProjectState state = setupClusterStateWithFollowingIndex(indexMetadata);
// Mock pause follow api call:
Exception error = new RuntimeException();
Mockito.doAnswer(invocation -> {
PauseFollowAction.Request request = (PauseFollowAction.Request) invocation.getArguments()[1];
assertThat(request.getFollowIndex(), equalTo("follower-index"));
ActionListener<?> listener = (ActionListener<?>) invocation.getArguments()[2];
listener.onFailure(error);
return null;
}).when(projectClient).execute(Mockito.same(PauseFollowAction.INSTANCE), Mockito.any(), Mockito.any());
PauseFollowerIndexStep step = new PauseFollowerIndexStep(randomStepKey(), randomStepKey(), client);
assertSame(error, expectThrows(Exception.class, () -> performActionAndWait(step, indexMetadata, state, null)));
Mockito.verify(projectClient).execute(Mockito.same(PauseFollowAction.INSTANCE), Mockito.any(), Mockito.any());
Mockito.verify(client).projectClient(state.projectId());
Mockito.verifyNoMoreInteractions(client);
}
public final void testNoShardFollowPersistentTasks() throws Exception {
IndexMetadata indexMetadata = IndexMetadata.builder("managed-index")
.settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true"))
.putCustom(CCR_METADATA_KEY, Map.of())
.numberOfShards(1)
.numberOfReplicas(0)
.build();
PersistentTasksCustomMetadata.Builder emptyPersistentTasks = PersistentTasksCustomMetadata.builder();
ProjectState state = projectStateFromProject(
ProjectMetadata.builder(randomProjectIdOrDefault())
.putCustom(PersistentTasksCustomMetadata.TYPE, emptyPersistentTasks.build())
.put(indexMetadata, false)
);
PauseFollowerIndexStep step = newInstance(randomStepKey(), randomStepKey());
performActionAndWait(step, indexMetadata, state, null);
Mockito.verifyNoMoreInteractions(client);
}
public final void testNoShardFollowTasksForManagedIndex() throws Exception {
IndexMetadata managedIndex = IndexMetadata.builder("managed-index")
.settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true"))
.numberOfShards(1)
.numberOfReplicas(0)
.build();
IndexMetadata followerIndex = IndexMetadata.builder("follower-index")
.settings(settings(IndexVersion.current()))
.putCustom(CCR_METADATA_KEY, Map.of())
.numberOfShards(1)
.numberOfReplicas(0)
.build();
final var initialState = setupClusterStateWithFollowingIndex(followerIndex);
final ProjectState state = initialState.updatedState(builder -> builder.put(managedIndex, false))
.projectState(initialState.projectId());
PauseFollowerIndexStep step = newInstance(randomStepKey(), randomStepKey());
performActionAndWait(step, managedIndex, state, null);
Mockito.verifyNoMoreInteractions(client);
}
private static ProjectState setupClusterStateWithFollowingIndex(IndexMetadata followerIndex) {
PersistentTasksCustomMetadata.Builder persistentTasks = PersistentTasksCustomMetadata.builder()
.addTask(
"1",
ShardFollowTask.NAME,
new ShardFollowTask(
null,
new ShardId(followerIndex.getIndex(), 0),
new ShardId("leader_index", "", 0),
1024,
1024,
1,
1,
ByteSizeValue.of(32, ByteSizeUnit.MB),
ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES),
10240,
ByteSizeValue.of(512, ByteSizeUnit.MB),
TimeValue.timeValueMillis(10),
TimeValue.timeValueMillis(10),
Map.of()
),
null
);
return projectStateFromProject(
ProjectMetadata.builder(randomProjectIdOrDefault())
.putCustom(PersistentTasksCustomMetadata.TYPE, persistentTasks.build())
.put(followerIndex, false)
);
}
}
| PauseFollowerIndexStepTests |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ResourceView.java | {
"start": 851,
"end": 1277
} | interface ____ {
/**
* Get virtual memory allocated to the containers.
* @return Virtual memory in bytes.
*/
long getVmemAllocatedForContainers();
boolean isVmemCheckEnabled();
/**
* Get physical memory allocated to the containers.
* @return Physical memory in bytes.
*/
long getPmemAllocatedForContainers();
boolean isPmemCheckEnabled();
long getVCoresAllocatedForContainers();
}
| ResourceView |
java | spring-projects__spring-boot | module/spring-boot-jdbc/src/main/java/org/springframework/boot/jdbc/DataSourceBuilder.java | {
"start": 21245,
"end": 21869
} | class ____ extends MappedDataSourceProperties<HikariDataSource> {
HikariDataSourceProperties() {
add(DataSourceProperty.URL, HikariDataSource::getJdbcUrl, HikariDataSource::setJdbcUrl);
add(DataSourceProperty.DRIVER_CLASS_NAME, HikariDataSource::getDriverClassName,
HikariDataSource::setDriverClassName);
add(DataSourceProperty.USERNAME, HikariDataSource::getUsername, HikariDataSource::setUsername);
add(DataSourceProperty.PASSWORD, HikariDataSource::getPassword, HikariDataSource::setPassword);
}
}
/**
* {@link DataSourceProperties} for Tomcat Pool.
*/
private static | HikariDataSourceProperties |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java | {
"start": 9598,
"end": 10894
} | class ____
extends ZlibDecompressor implements DirectDecompressor {
public ZlibDirectDecompressor() {
super(CompressionHeader.DEFAULT_HEADER, 0);
}
public ZlibDirectDecompressor(CompressionHeader header, int directBufferSize) {
super(header, directBufferSize);
}
@Override
public boolean finished() {
return (endOfInput && super.finished());
}
@Override
public void reset() {
super.reset();
endOfInput = true;
}
private boolean endOfInput;
@Override
public void decompress(ByteBuffer src, ByteBuffer dst)
throws IOException {
assert dst.isDirect() : "dst.isDirect()";
assert src.isDirect() : "src.isDirect()";
assert dst.remaining() > 0 : "dst.remaining() > 0";
this.inflateDirect(src, dst);
endOfInput = !src.hasRemaining();
}
@Override
public void setDictionary(byte[] b, int off, int len) {
throw new UnsupportedOperationException(
"byte[] arrays are not supported for DirectDecompressor");
}
@Override
public int decompress(byte[] b, int off, int len) {
throw new UnsupportedOperationException(
"byte[] arrays are not supported for DirectDecompressor");
}
}
}
| ZlibDirectDecompressor |
java | grpc__grpc-java | xds/src/main/java/io/grpc/xds/internal/security/SslContextProvider.java | {
"start": 2026,
"end": 5158
} | class ____ {
private final Executor executor;
protected Callback(Executor executor) {
this.executor = executor;
}
@VisibleForTesting public Executor getExecutor() {
return executor;
}
/** Informs callee of new/updated SslContext. */
@VisibleForTesting public abstract void updateSslContextAndExtendedX509TrustManager(
AbstractMap.SimpleImmutableEntry<SslContext, X509TrustManager> sslContext);
/** Informs callee of an exception that was generated. */
@VisibleForTesting protected abstract void onException(Throwable throwable);
}
protected SslContextProvider(BaseTlsContext tlsContext) {
this.tlsContext = checkNotNull(tlsContext, "tlsContext");
}
protected CommonTlsContext getCommonTlsContext() {
return tlsContext.getCommonTlsContext();
}
protected void setClientAuthValues(
SslContextBuilder sslContextBuilder, XdsTrustManagerFactory xdsTrustManagerFactory)
throws CertificateException, IOException, CertStoreException {
DownstreamTlsContext downstreamTlsContext = getDownstreamTlsContext();
if (xdsTrustManagerFactory != null) {
sslContextBuilder.trustManager(xdsTrustManagerFactory);
sslContextBuilder.clientAuth(
downstreamTlsContext.isRequireClientCertificate()
? ClientAuth.REQUIRE
: ClientAuth.OPTIONAL);
} else {
sslContextBuilder.clientAuth(ClientAuth.NONE);
}
}
/** Returns the DownstreamTlsContext in this SslContextProvider if this is server side. **/
public DownstreamTlsContext getDownstreamTlsContext() {
checkState(tlsContext instanceof DownstreamTlsContext,
"expected DownstreamTlsContext");
return ((DownstreamTlsContext)tlsContext);
}
/** Returns the UpstreamTlsContext in this SslContextProvider if this is client side. **/
public UpstreamTlsContext getUpstreamTlsContext() {
checkState(tlsContext instanceof UpstreamTlsContext,
"expected UpstreamTlsContext");
return ((UpstreamTlsContext)tlsContext);
}
/** Closes this provider and releases any resources. */
@Override
public abstract void close();
/**
* Registers a callback on the given executor. The callback will run when SslContext becomes
* available or immediately if the result is already available.
*/
public abstract void addCallback(Callback callback);
protected final void performCallback(
final SslContextGetter sslContextGetter, final Callback callback) {
checkNotNull(sslContextGetter, "sslContextGetter");
checkNotNull(callback, "callback");
callback.executor.execute(
new Runnable() {
@Override
public void run() {
try {
AbstractMap.SimpleImmutableEntry<SslContext, X509TrustManager> sslContextAndTm =
sslContextGetter.get();
callback.updateSslContextAndExtendedX509TrustManager(sslContextAndTm);
} catch (Throwable e) {
callback.onException(e);
}
}
});
}
/** Allows implementations to compute or get SslContext. */
protected | Callback |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java | {
"start": 1582,
"end": 4220
} | class ____ extends IOException {
private static final long serialVersionUID = 1L;
public RequestStopException(String msg) {
super(msg);
}
}
public MetaRecoveryContext(int force) {
this.force = force;
}
/**
* Display a prompt to the user and get his or her choice.
*
* @param prompt The prompt to display
* @param firstChoice First choice (will be taken if autoChooseDefault is
* true)
* @param choices Other choies
*
* @return The choice that was taken
* @throws IOException
*/
public String ask(String prompt, String firstChoice, String... choices)
throws IOException {
while (true) {
System.err.print(prompt);
if (force > FORCE_NONE) {
System.out.println("automatically choosing " + firstChoice);
return firstChoice;
}
StringBuilder responseBuilder = new StringBuilder();
while (true) {
int c = System.in.read();
if (c == -1 || c == '\r' || c == '\n') {
break;
}
responseBuilder.append((char)c);
}
String response = responseBuilder.toString();
if (response.equalsIgnoreCase(firstChoice))
return firstChoice;
for (String c : choices) {
if (response.equalsIgnoreCase(c)) {
return c;
}
}
System.err.print("I'm sorry, I cannot understand your response.\n");
}
}
public static void editLogLoaderPrompt(String prompt,
MetaRecoveryContext recovery, String contStr)
throws IOException, RequestStopException
{
if (recovery == null) {
throw new IOException(prompt);
}
LOG.error(prompt);
String answer = recovery.ask("\nEnter 'c' to continue, " + contStr + "\n" +
"Enter 's' to stop reading the edit log here, abandoning any later " +
"edits\n" +
"Enter 'q' to quit without saving\n" +
"Enter 'a' to always select the first choice in the future " +
"without prompting. " +
"(c/s/q/a)\n", "c", "s", "q", "a");
if (answer.equals("c")) {
LOG.info("Continuing");
return;
} else if (answer.equals("s")) {
throw new RequestStopException("user requested stop");
} else if (answer.equals("q")) {
recovery.quit();
} else {
recovery.setForce(FORCE_FIRST_CHOICE);
return;
}
}
/** Log a message and quit */
public void quit() {
LOG.error("Exiting on user request.");
System.exit(0);
}
public int getForce() {
return this.force;
}
public void setForce(int force) {
this.force = force;
}
}
| RequestStopException |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/tck/ObserveOnTckTest.java | {
"start": 823,
"end": 1124
} | class ____ extends BaseTck<Integer> {
public ObserveOnTckTest() {
super(100L);
}
@Override
public Publisher<Integer> createPublisher(long elements) {
return
Flowable.range(0, (int)elements).observeOn(Schedulers.single())
;
}
}
| ObserveOnTckTest |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java | {
"start": 1524,
"end": 1933
} | class ____ extends ActionType<StopTransformAction.Response> {
public static final StopTransformAction INSTANCE = new StopTransformAction();
public static final String NAME = "cluster:admin/transform/stop";
public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(30, TimeUnit.SECONDS);
private StopTransformAction() {
super(NAME);
}
public static final | StopTransformAction |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/context/support/WebApplicationObjectSupport.java | {
"start": 2093,
"end": 5762
} | class ____ to enforce running in an ApplicationContext.
* All accessors will throw IllegalStateException if not running in a context.
* @see #getApplicationContext()
* @see #getMessageSourceAccessor()
* @see #getWebApplicationContext()
* @see #getServletContext()
* @see #getTempDir()
*/
@Override
protected boolean isContextRequired() {
return true;
}
/**
* Calls {@link #initServletContext(jakarta.servlet.ServletContext)} if the
* given ApplicationContext is a {@link WebApplicationContext}.
*/
@Override
protected void initApplicationContext(ApplicationContext context) {
super.initApplicationContext(context);
if (this.servletContext == null && context instanceof WebApplicationContext wac) {
this.servletContext = wac.getServletContext();
if (this.servletContext != null) {
initServletContext(this.servletContext);
}
}
}
/**
* Subclasses may override this for custom initialization based
* on the ServletContext that this application object runs in.
* <p>The default implementation is empty. Called by
* {@link #initApplicationContext(org.springframework.context.ApplicationContext)}
* as well as {@link #setServletContext(jakarta.servlet.ServletContext)}.
* @param servletContext the ServletContext that this application object runs in
* (never {@code null})
*/
protected void initServletContext(ServletContext servletContext) {
}
/**
* Return the current application context as WebApplicationContext.
* <p><b>NOTE:</b> Only use this if you actually need to access
* WebApplicationContext-specific functionality. Preferably use
* {@code getApplicationContext()} or {@code getServletContext()}
* else, to be able to run in non-WebApplicationContext environments as well.
* @throws IllegalStateException if not running in a WebApplicationContext
* @see #getApplicationContext()
*/
protected final @Nullable WebApplicationContext getWebApplicationContext() throws IllegalStateException {
ApplicationContext ctx = getApplicationContext();
if (ctx instanceof WebApplicationContext wac) {
return wac;
}
else if (isContextRequired()) {
throw new IllegalStateException("WebApplicationObjectSupport instance [" + this +
"] does not run in a WebApplicationContext but in: " + ctx);
}
else {
return null;
}
}
/**
* Return the current ServletContext.
* @throws IllegalStateException if not running within a required ServletContext
* @see #isContextRequired()
*/
protected final @Nullable ServletContext getServletContext() throws IllegalStateException {
if (this.servletContext != null) {
return this.servletContext;
}
ServletContext servletContext = null;
WebApplicationContext wac = getWebApplicationContext();
if (wac != null) {
servletContext = wac.getServletContext();
}
if (servletContext == null && isContextRequired()) {
throw new IllegalStateException("WebApplicationObjectSupport instance [" + this +
"] does not run within a ServletContext. Make sure the object is fully configured!");
}
return servletContext;
}
/**
* Return the temporary directory for the current web application,
* as provided by the servlet container.
* @return the File representing the temporary directory
* @throws IllegalStateException if not running within a ServletContext
* @see org.springframework.web.util.WebUtils#getTempDir(jakarta.servlet.ServletContext)
*/
protected final File getTempDir() throws IllegalStateException {
ServletContext servletContext = getServletContext();
Assert.state(servletContext != null, "ServletContext is required");
return WebUtils.getTempDir(servletContext);
}
}
| behavior |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/convert/support/GenericConversionService.java | {
"start": 3655,
"end": 4723
} | class ____ those types?");
}
addConverter(new ConverterAdapter(converter, typeInfo[0], typeInfo[1]));
}
@Override
public <S, T> void addConverter(Class<S> sourceType, Class<T> targetType, Converter<? super S, ? extends T> converter) {
addConverter(new ConverterAdapter(
converter, ResolvableType.forClass(sourceType), ResolvableType.forClass(targetType)));
}
@Override
public void addConverter(GenericConverter converter) {
this.converters.add(converter);
invalidateCache();
}
@Override
public void addConverterFactory(ConverterFactory<?, ?> factory) {
ResolvableType[] typeInfo = getRequiredTypeInfo(factory.getClass(), ConverterFactory.class);
if (typeInfo == null && factory instanceof DecoratingProxy decoratingProxy) {
typeInfo = getRequiredTypeInfo(decoratingProxy.getDecoratedClass(), ConverterFactory.class);
}
if (typeInfo == null) {
throw new IllegalArgumentException("Unable to determine source type <S> and target type <T> for your " +
"ConverterFactory [" + factory.getClass().getName() + "]; does the | parameterize |
java | google__guice | extensions/throwingproviders/src/com/google/inject/throwingproviders/CheckedProvides.java | {
"start": 1549,
"end": 1930
} | interface ____ provides this value, a subinterface of {@link CheckedProvider}. */
@SuppressWarnings("rawtypes") // Class literal uses raw type.
Class<? extends CheckedProvider> value();
/**
* Whether exceptions should be put into the Guice scope. Default behavior is that exceptions are
* scoped.
*
* @since 4.0
*/
boolean scopeExceptions() default true;
}
| that |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/jakartaData/java/org/hibernate/processor/test/data/innerclass/Dummy.java | {
"start": 454,
"end": 802
} | class ____ extends Persona {
@Id
Integer id;
String name;
public Integer getId() {
return id;
}
@Override
public void setId(Integer id) {
this.id = id;
}
@Override
public String getName() {
return name;
}
@Override
public void setName(String name) {
this.name = name;
}
}
@Embeddable
public static | Inner |
java | apache__hadoop | hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobFactory.java | {
"start": 5562,
"end": 9907
} | class ____ implements JobStory {
protected final JobStory job;
public FilterJobStory(JobStory job) {
this.job = job;
}
public JobConf getJobConf() { return job.getJobConf(); }
public String getName() { return job.getName(); }
public JobID getJobID() { return job.getJobID(); }
public String getUser() { return job.getUser(); }
public long getSubmissionTime() { return job.getSubmissionTime(); }
public InputSplit[] getInputSplits() { return job.getInputSplits(); }
public int getNumberMaps() { return job.getNumberMaps(); }
public int getNumberReduces() { return job.getNumberReduces(); }
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
return job.getTaskInfo(taskType, taskNumber);
}
public TaskAttemptInfo getTaskAttemptInfo(TaskType taskType, int taskNumber,
int taskAttemptNumber) {
return job.getTaskAttemptInfo(taskType, taskNumber, taskAttemptNumber);
}
public TaskAttemptInfo getMapTaskAttemptInfoAdjusted(
int taskNumber, int taskAttemptNumber, int locality) {
return job.getMapTaskAttemptInfoAdjusted(
taskNumber, taskAttemptNumber, locality);
}
public Values getOutcome() {
return job.getOutcome();
}
public String getQueueName() {
return job.getQueueName();
}
}
protected abstract Thread createReaderThread() ;
// gets the next job from the trace and does some bookkeeping for the same
private JobStory getNextJobFromTrace() throws IOException {
JobStory story = jobProducer.getNextJob();
if (story != null) {
++numJobsInTrace;
}
return story;
}
protected JobStory getNextJobFiltered() throws IOException {
JobStory job = getNextJobFromTrace();
// filter out the following jobs
// - unsuccessful jobs
// - jobs with missing submit-time
// - reduce only jobs
// These jobs are not yet supported in Gridmix
while (job != null &&
(job.getOutcome() != Pre21JobHistoryConstants.Values.SUCCESS ||
job.getSubmissionTime() < 0 || job.getNumberMaps() == 0)) {
if (LOG.isDebugEnabled()) {
List<String> reason = new ArrayList<String>();
if (job.getOutcome() != Pre21JobHistoryConstants.Values.SUCCESS) {
reason.add("STATE (" + job.getOutcome().name() + ")");
}
if (job.getSubmissionTime() < 0) {
reason.add("SUBMISSION-TIME (" + job.getSubmissionTime() + ")");
}
if (job.getNumberMaps() == 0) {
reason.add("ZERO-MAPS-JOB");
}
// TODO This should never happen. Probably we missed something!
if (reason.size() == 0) {
reason.add("N/A");
}
LOG.debug("Ignoring job " + job.getJobID() + " from the input trace."
+ " Reason: " + StringUtils.join(reason, ","));
}
job = getNextJobFromTrace();
}
return null == job ? null : new FilterJobStory(job) {
@Override
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
TaskInfo info = this.job.getTaskInfo(taskType, taskNumber);
if (info != null) {
info = new MinTaskInfo(info);
} else {
info = new MinTaskInfo(new TaskInfo(0, 0, 0, 0, 0));
}
return info;
}
};
}
/**
* Obtain the error that caused the thread to exit unexpectedly.
*/
public IOException error() {
return error;
}
/**
* Add is disabled.
* @throws UnsupportedOperationException
*/
public void add(Void ignored) {
throw new UnsupportedOperationException(getClass().getName() +
" is at the start of the pipeline and accepts no events");
}
/**
* Start the reader thread, wait for latch if necessary.
*/
public void start() {
rThread.start();
}
/**
* Wait for the reader thread to exhaust the job trace.
*/
public void join(long millis) throws InterruptedException {
rThread.join(millis);
}
/**
* Interrupt the reader thread.
*/
public void shutdown() {
rThread.interrupt();
}
/**
* Interrupt the reader thread. This requires no special consideration, as
* the thread has no pending work queue.
*/
public void abort() {
// Currently no special work
rThread.interrupt();
}
}
| FilterJobStory |
java | apache__camel | core/camel-core-processor/src/main/java/org/apache/camel/processor/MulticastProcessor.java | {
"start": 21008,
"end": 26081
} | class ____ extends MulticastTask {
public MulticastReactiveTask(Exchange original, Iterable<ProcessorExchangePair> pairs, AsyncCallback callback,
int size) {
super(original, pairs, callback, size, false);
}
@Override
public void run() {
super.run();
try {
if (done.get()) {
return;
}
// Get next processor exchange pair to sent, skipping null ones
ProcessorExchangePair pair = getNextProcessorExchangePair();
if (pair == null) {
doDone(result.get(), true);
return;
}
boolean hasNext = iterator.hasNext();
Exchange exchange = pair.getExchange();
int index = nbExchangeSent.getAndIncrement();
updateNewExchange(exchange, index, pairs, hasNext);
if (!hasNext) {
allSent.set(true);
}
completion.submit(exchangeResult -> {
// compute time taken if sending to another endpoint
StopWatch watch = beforeSend(pair);
AsyncCallback taskCallback = (doneSync) -> {
afterSend(pair, watch);
// Decide whether to continue with the multicast or not; similar logic to the Pipeline
// remember to test for stop on exception and aggregate before copying back results
String msg = null;
if (LOG.isDebugEnabled()) {
msg = "Multicast processing failed for number " + index;
}
boolean continueProcessing = PipelineHelper.continueProcessing(exchange, msg, LOG);
if (stopOnException && !continueProcessing) {
if (exchange.getException() != null) {
// wrap in exception to explain where it failed
exchange.setException(new CamelExchangeException(
"Multicast processing failed for number " + index, exchange, exchange.getException()));
} else {
// we want to stop on exception, and the exception was handled by the error handler
// this is similar to what the pipeline does, so we should do the same to not surprise end users
// so we should set the failed exchange as the result and be done
result.set(exchange);
}
// and do the done work
doDone(exchange, true);
return;
}
exchangeResult.accept(exchange);
// aggregate exchanges if any
aggregate();
// next step
if (hasNext && !isParallelProcessing()) {
schedule(this);
}
};
AsyncProcessor async = AsyncProcessorConverterHelper.convert(pair.getProcessor());
if (synchronous) {
// force synchronous processing using await manager
// to restrict total number of threads to be bound by the thread-pool of this EIP,
// as otherwise in case of async processing then other thread pools can cause
// unbounded thread use that cannot be controlled by Camel
awaitManager.process(async, exchange);
taskCallback.done(true);
} else {
// async processing in reactive-mode which can use as many threads as possible
// if the downstream processors are async and use different threads
async.process(exchange, taskCallback);
}
});
// after submitting this pair then move on to the next pair (if in parallel mode)
if (hasNext && isParallelProcessing()) {
schedule(this);
}
} catch (Exception e) {
original.setException(e);
doDone(null, false);
}
}
private ProcessorExchangePair getNextProcessorExchangePair() {
ProcessorExchangePair tpair = null;
while (tpair == null && iterator.hasNext()) {
tpair = iterator.next();
}
return tpair;
}
}
/**
* Transacted sub task processed synchronously using {@link Processor#process(Exchange)} with the same thread in a
* while loop control flow.
*/
protected | MulticastReactiveTask |
java | elastic__elasticsearch | x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/BaseMonitoringDocTestCase.java | {
"start": 2008,
"end": 12197
} | class ____<T extends MonitoringDoc> extends ESTestCase {
protected String cluster;
protected long timestamp;
protected long interval;
protected MonitoringDoc.Node node;
protected MonitoredSystem system;
protected String type;
protected String id;
@Override
@Before
public void setUp() throws Exception {
super.setUp();
cluster = UUIDs.randomBase64UUID();
timestamp = frequently() ? randomLongBetween(1, DateUtils.MAX_MILLIS_BEFORE_9999) : 0L;
interval = randomNonNegativeLong();
node = frequently() ? MonitoringTestUtils.randomMonitoringNode(random()) : null;
system = randomFrom(MonitoredSystem.values());
type = randomAlphaOfLength(5);
id = randomBoolean() ? randomAlphaOfLength(10) : null;
}
/**
* Creates the {@link MonitoringDoc} to test. Returned value must be deterministic,
* ie multiple calls with the same parameters within the same test must return
* identical objects.
*/
protected abstract T createMonitoringDoc(
String cluster,
long timestamp,
long interval,
@Nullable MonitoringDoc.Node node,
MonitoredSystem system,
String type,
@Nullable String id
);
/**
* Assert that two {@link MonitoringDoc} are equal. By default, it
* uses {@link MonitoringDoc#equals(Object)} and {@link MonitoringDoc#hashCode()} methods
* and also checks XContent equality.
*/
private void assertMonitoringDocEquals(T expected, T actual) throws IOException {
assertEquals(expected, actual);
assertEquals(expected.hashCode(), actual.hashCode());
final boolean human = randomBoolean();
final XContentType xContentType = randomFrom(XContentType.values());
assertToXContentEquivalent(toXContent(expected, xContentType, human), toXContent(actual, xContentType, human), xContentType);
}
public final void testCreateMonitoringDoc() throws IOException {
final int nbIterations = randomIntBetween(3, 20);
for (int i = 0; i < nbIterations; i++) {
final T document1 = createMonitoringDoc(cluster, timestamp, interval, node, system, type, id);
final T document2 = createMonitoringDoc(cluster, timestamp, interval, node, system, type, id);
assertNotSame(document1, document2);
assertMonitoringDocEquals(document1, document2);
}
}
public final void testConstructorClusterMustNotBeNull() {
expectThrows(NullPointerException.class, () -> createMonitoringDoc(null, timestamp, interval, node, system, type, id));
}
public final void testConstructor() {
final T document = createMonitoringDoc(cluster, timestamp, interval, node, system, type, id);
assertThat(document.getCluster(), equalTo(cluster));
assertThat(document.getTimestamp(), equalTo(timestamp));
assertThat(document.getIntervalMillis(), equalTo(interval));
assertThat(document.getNode(), equalTo(node));
assertMonitoringDoc(document);
}
/**
* Asserts that the specific fields of a {@link MonitoringDoc} have
* the expected values.
*/
protected abstract void assertMonitoringDoc(T document);
public abstract void testToXContent() throws IOException;
/**
* Test that {@link MonitoringDoc} rendered using {@link ToXContent#toXContent(XContentBuilder, ToXContent.Params)}
* contain a common set of fields.
*/
public final void testToXContentContainsCommonFields() throws IOException {
final XContentType xContentType = randomFrom(XContentType.values());
final T document = createMonitoringDoc(cluster, timestamp, interval, node, system, type, id);
final BytesReference bytes = XContentHelper.toXContent(document, xContentType, false);
try (
XContentParser parser = xContentType.xContent()
.createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, bytes.streamInput())
) {
final Map<String, ?> map = parser.map();
assertThat(map.get("cluster_uuid"), equalTo(cluster));
assertThat(map.get("timestamp"), equalTo(MonitoringDoc.toUTC(timestamp)));
assertThat(map.get("interval_ms"), equalTo(document.getIntervalMillis()));
assertThat(map.get("type"), equalTo(document.getType()));
if (document.getType().equals(ShardMonitoringDoc.TYPE)) {
assertThat(map.get("shard"), notNullValue());
} else {
assertThat(map.get(document.getType()), notNullValue());
}
@SuppressWarnings("unchecked")
final Map<String, ?> sourceNode = (Map<String, ?>) map.get("source_node");
if (node == null) {
assertThat(sourceNode, nullValue());
} else {
assertThat(sourceNode.get("uuid"), equalTo(node.getUUID()));
assertThat(sourceNode.get("transport_address"), equalTo(node.getTransportAddress()));
assertThat(sourceNode.get("ip"), equalTo(node.getIp()));
assertThat(sourceNode.get("host"), equalTo(node.getHost()));
assertThat(sourceNode.get("name"), equalTo(node.getName()));
assertThat(sourceNode.get("timestamp"), equalTo(MonitoringDoc.toUTC(node.getTimestamp())));
}
}
}
public void testMonitoringNodeConstructor() {
id = randomAlphaOfLength(5);
final String name = randomAlphaOfLengthBetween(3, 10);
final TransportAddress fakeTransportAddress = buildNewFakeTransportAddress();
final String host = fakeTransportAddress.address().getHostString();
final String transportAddress = fakeTransportAddress.toString();
final String ip = fakeTransportAddress.getAddress();
timestamp = randomNonNegativeLong();
node = new MonitoringDoc.Node(id, host, transportAddress, ip, name, timestamp);
assertThat(node.getUUID(), equalTo(id));
assertThat(node.getHost(), equalTo(host));
assertThat(node.getTransportAddress(), equalTo(transportAddress));
assertThat(node.getIp(), equalTo(ip));
assertThat(node.getName(), equalTo(name));
assertThat(node.getTimestamp(), equalTo(timestamp));
}
public void testMonitoringNodeToXContent() throws IOException {
node = new MonitoringDoc.Node("_uuid", "_host", "_addr", "_ip", "_name", 1504169190855L);
final BytesReference xContent = XContentHelper.toXContent(node, XContentType.JSON, randomBoolean());
assertEquals(XContentHelper.stripWhitespace("""
{
"uuid": "_uuid",
"host": "_host",
"transport_address": "_addr",
"ip": "_ip",
"name": "_name",
"timestamp": "2017-08-31T08:46:30.855Z"
}"""), xContent.utf8ToString());
}
public void testMonitoringNodeEqualsAndHashcode() {
final EqualsHashCodeTestUtils.CopyFunction<MonitoringDoc.Node> copy = _node -> new MonitoringDoc.Node(
_node.getUUID(),
_node.getHost(),
_node.getTransportAddress(),
_node.getIp(),
_node.getName(),
_node.getTimestamp()
);
final List<EqualsHashCodeTestUtils.MutateFunction<MonitoringDoc.Node>> mutations = new ArrayList<>();
mutations.add(n -> {
String randomId;
do {
randomId = UUIDs.randomBase64UUID();
} while (randomId.equals(n.getUUID()));
return new MonitoringDoc.Node(randomId, n.getHost(), n.getTransportAddress(), n.getIp(), n.getName(), n.getTimestamp());
});
mutations.add(n -> {
String host;
do {
host = randomAlphaOfLength(10);
} while (host.equals(n.getHost()));
return new MonitoringDoc.Node(n.getUUID(), host, n.getTransportAddress(), n.getIp(), n.getName(), n.getTimestamp());
});
mutations.add(n -> {
String transportAddress;
do {
transportAddress = randomAlphaOfLength(10);
} while (transportAddress.equals(n.getTransportAddress()));
return new MonitoringDoc.Node(n.getUUID(), n.getHost(), transportAddress, n.getIp(), n.getName(), n.getTimestamp());
});
mutations.add(n -> {
String ip;
do {
ip = randomAlphaOfLength(10);
} while (ip.equals(n.getIp()));
return new MonitoringDoc.Node(n.getUUID(), n.getHost(), n.getTransportAddress(), ip, n.getName(), n.getTimestamp());
});
mutations.add(n -> {
String name;
do {
name = randomAlphaOfLengthBetween(3, 10);
} while (name.equals(n.getName()));
return new MonitoringDoc.Node(n.getUUID(), n.getHost(), n.getTransportAddress(), n.getIp(), name, n.getTimestamp());
});
mutations.add(n -> {
long randomTimestamp;
do {
randomTimestamp = randomBoolean() ? randomNonNegativeLong() : 0L;
} while (randomTimestamp == n.getTimestamp());
return new MonitoringDoc.Node(n.getUUID(), n.getHost(), n.getTransportAddress(), n.getIp(), n.getName(), randomTimestamp);
});
final MonitoringDoc.Node sourceNode = MonitoringTestUtils.randomMonitoringNode(random());
checkEqualsAndHashCode(sourceNode, copy, randomFrom(mutations));
}
public void testMonitoringNodeSerialization() throws IOException {
final NamedWriteableRegistry registry = new NamedWriteableRegistry(emptyList());
final MonitoringDoc.Node original = MonitoringTestUtils.randomMonitoringNode(random());
final MonitoringDoc.Node deserialized = copyWriteable(original, registry, MonitoringDoc.Node::new);
assertEquals(deserialized, original);
assertEquals(deserialized.hashCode(), original.hashCode());
assertNotSame(deserialized, original);
}
}
| BaseMonitoringDocTestCase |
java | reactor__reactor-core | reactor-core/src/jcstress/java/reactor/core/publisher/StressSubscription.java | {
"start": 922,
"end": 1625
} | class ____<T> implements Subscription {
final CoreSubscriber<? super T> actual;
volatile long requested;
@SuppressWarnings("rawtypes")
static final AtomicLongFieldUpdater<StressSubscription> REQUESTED =
AtomicLongFieldUpdater.newUpdater(StressSubscription.class, "requested");
public final AtomicInteger requestsCount = new AtomicInteger();
public final AtomicBoolean cancelled = new AtomicBoolean();
public StressSubscription(CoreSubscriber<? super T> actual) {
this.actual = actual;
}
@Override
public void request(long n) {
requestsCount.incrementAndGet();
Operators.addCap(REQUESTED, this, n);
}
@Override
public void cancel() {
cancelled.set(true);
}
}
| StressSubscription |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/tofix/PolymorphicDeserWithJsonUnwrapped4792Test.java | {
"start": 1160,
"end": 1227
} | class ____ {
public String name;
}
public static | Model |
java | spring-projects__spring-framework | spring-jdbc/src/main/java/org/springframework/jdbc/datasource/lookup/BeanFactoryDataSourceLookup.java | {
"start": 2052,
"end": 2896
} | class ____ the context of a Spring IoC container.
* @param beanFactory the bean factory to be used to lookup {@link DataSource DataSources}
*/
public BeanFactoryDataSourceLookup(BeanFactory beanFactory) {
Assert.notNull(beanFactory, "BeanFactory is required");
this.beanFactory = beanFactory;
}
@Override
public void setBeanFactory(BeanFactory beanFactory) {
this.beanFactory = beanFactory;
}
@Override
public DataSource getDataSource(String dataSourceName) throws DataSourceLookupFailureException {
Assert.state(this.beanFactory != null, "BeanFactory is required");
try {
return this.beanFactory.getBean(dataSourceName, DataSource.class);
}
catch (BeansException ex) {
throw new DataSourceLookupFailureException(
"Failed to look up DataSource bean with name '" + dataSourceName + "'", ex);
}
}
}
| outside |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ParameterNameTest.java | {
"start": 14751,
"end": 15126
} | class ____ {",
" {",
" // BUG: Diagnostic contains:",
" new Foo(/* bar= */ 1, /* foo= */ 2) {",
" };",
" }",
"}")
.doTest();
}
@Test
public void internalAnnotatedParameterNegative() {
testHelper
.addSourceLines(
"Test.java",
"""
| Test |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftEvaluator.java | {
"start": 5463,
"end": 6540
} | class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final Function<DriverContext, BytesRef> out;
private final Function<DriverContext, UnicodeUtil.UTF8CodePoint> cp;
private final EvalOperator.ExpressionEvaluator.Factory str;
private final EvalOperator.ExpressionEvaluator.Factory length;
public Factory(Source source, Function<DriverContext, BytesRef> out,
Function<DriverContext, UnicodeUtil.UTF8CodePoint> cp,
EvalOperator.ExpressionEvaluator.Factory str,
EvalOperator.ExpressionEvaluator.Factory length) {
this.source = source;
this.out = out;
this.cp = cp;
this.str = str;
this.length = length;
}
@Override
public LeftEvaluator get(DriverContext context) {
return new LeftEvaluator(source, out.apply(context), cp.apply(context), str.get(context), length.get(context), context);
}
@Override
public String toString() {
return "LeftEvaluator[" + "str=" + str + ", length=" + length + "]";
}
}
}
| Factory |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/admin/UpdateFeaturesResult.java | {
"start": 1079,
"end": 1749
} | class ____ {
private final Map<String, KafkaFuture<Void>> futures;
/**
* @param futures a map from feature name to future, which can be used to check the status of
* individual feature updates.
*/
UpdateFeaturesResult(final Map<String, KafkaFuture<Void>> futures) {
this.futures = futures;
}
public Map<String, KafkaFuture<Void>> values() {
return futures;
}
/**
* Return a future which succeeds if all the feature updates succeed.
*/
public KafkaFuture<Void> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture<?>[0]));
}
}
| UpdateFeaturesResult |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java | {
"start": 20672,
"end": 21766
} | interface ____ permits NestedSyntheticVectorPath, NestedOffsetSyntheticVectorPath, LeafSyntheticVectorPath {}
/**
* A patch representing a nested path with further child patches.
*
* @param fullPath the full dot-separated path
* @param children the list of child patches
*/
record NestedSyntheticVectorPath(String fullPath, List<SyntheticVectorPatch> children) implements SyntheticVectorPatch {}
/**
* A patch representing an indexed child within a nested structure.
*
* @param offset the index of the nested element
* @param children the list of child patches to apply at this offset
*/
record NestedOffsetSyntheticVectorPath(int offset, List<SyntheticVectorPatch> children) implements SyntheticVectorPatch {}
/**
* A patch representing a leaf field with a value to be applied.
*
* @param fullPath the fully-qualified field name
* @param value the value to assign
*/
record LeafSyntheticVectorPath(String fullPath, Object value) implements SyntheticVectorPatch {}
| SyntheticVectorPatch |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/sqm/AliasCollisionException.java | {
"start": 392,
"end": 623
} | class ____ extends SemanticException {
public AliasCollisionException(String message) {
super( message );
}
public AliasCollisionException(String message, Exception cause) {
super( message, cause );
}
}
| AliasCollisionException |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/internal/transformation/TransformationFailedException.java | {
"start": 985,
"end": 1139
} | class ____ extends RuntimeException {
public TransformationFailedException(Throwable cause) {
super(cause);
}
}
| TransformationFailedException |
java | eclipse-vertx__vert.x | vertx-core/src/test/java/io/vertx/test/fakeresolver/FakeEndpointResolver.java | {
"start": 650,
"end": 801
} | class ____<B> implements AddressResolver<FakeAddress>, EndpointResolver<FakeAddress, FakeEndpoint, FakeState<B>, B> {
public static | FakeEndpointResolver |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java | {
"start": 4321,
"end": 9717
} | class ____ extends AbstractConstraint {
private String scope;
private int minCardinality;
private int maxCardinality;
private Set<TargetExpression> targetExpressions;
private NodeAttributeOpCode attributeOpCode;
public SingleConstraint(String scope, int minCardinality,
int maxCardinality, NodeAttributeOpCode opCode,
Set<TargetExpression> targetExpressions) {
this.scope = scope;
this.minCardinality = minCardinality;
this.maxCardinality = maxCardinality;
this.targetExpressions = targetExpressions;
this.attributeOpCode = opCode;
}
public SingleConstraint(String scope, int minCardinality,
int maxCardinality, Set<TargetExpression> targetExpressions) {
this(scope, minCardinality, maxCardinality, NodeAttributeOpCode.NO_OP,
targetExpressions);
}
public SingleConstraint(String scope, int minC, int maxC,
TargetExpression... targetExpressions) {
this(scope, minC, maxC, new HashSet<>(Arrays.asList(targetExpressions)));
}
public SingleConstraint(String scope, int minC, int maxC,
NodeAttributeOpCode opCode,
TargetExpression... targetExpressions) {
this(scope, minC, maxC, opCode,
new HashSet<>(Arrays.asList(targetExpressions)));
}
/**
* Get the scope of the constraint.
*
* @return the scope of the constraint
*/
public String getScope() {
return scope;
}
/**
* Get the minimum cardinality of the constraint.
*
* @return the minimum cardinality of the constraint
*/
public int getMinCardinality() {
return minCardinality;
}
/**
* Get the maximum cardinality of the constraint.
*
* @return the maximum cardinality of the constraint
*/
public int getMaxCardinality() {
return maxCardinality;
}
/**
* Get the target expressions of the constraint.
*
* @return the set of target expressions
*/
public Set<TargetExpression> getTargetExpressions() {
return targetExpressions;
}
/**
* Get the NodeAttributeOpCode of the constraint.
*
* @return nodeAttribute Op Code
*/
public NodeAttributeOpCode getNodeAttributeOpCode() {
return attributeOpCode;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof SingleConstraint)) {
return false;
}
SingleConstraint that = (SingleConstraint) o;
if (getMinCardinality() != that.getMinCardinality()) {
return false;
}
if (getMaxCardinality() != that.getMaxCardinality()) {
return false;
}
if (!getScope().equals(that.getScope())) {
return false;
}
if (getNodeAttributeOpCode() != null && !getNodeAttributeOpCode()
.equals(that.getNodeAttributeOpCode())) {
return false;
}
return getTargetExpressions().equals(that.getTargetExpressions());
}
@Override
public int hashCode() {
int result = getScope().hashCode();
result = 31 * result + getMinCardinality();
result = 31 * result + getMaxCardinality();
result = 31 * result + getNodeAttributeOpCode().hashCode();
result = 31 * result + getTargetExpressions().hashCode();
return result;
}
@Override
public String toString() {
int max = getMaxCardinality();
int min = getMinCardinality();
List<String> targetExprList = getTargetExpressions().stream()
.map(TargetExpression::toString).collect(Collectors.toList());
List<String> targetConstraints = new ArrayList<>();
for (String targetExpr : targetExprList) {
if (min == 0 && max == 0) {
// anti-affinity
targetConstraints.add(new StringBuilder()
.append("notin").append(",")
.append(getScope()).append(",")
.append(targetExpr)
.toString());
} else if (min == 1 && max == Integer.MAX_VALUE) {
// affinity
targetConstraints.add(new StringBuilder()
.append("in").append(",")
.append(getScope()).append(",")
.append(targetExpr)
.toString());
} else if (min == -1 && max == -1) {
// node attribute
targetConstraints.add(new StringBuilder()
.append(getScope()).append(",")
.append(getNodeAttributeOpCode()).append(",")
.append(targetExpr)
.toString());
} else {
// cardinality
targetConstraints.add(new StringBuilder()
.append("cardinality").append(",")
.append(getScope()).append(",")
.append(targetExpr).append(",")
.append(min).append(",")
.append(max)
.toString());
}
}
return String.join(":", targetConstraints);
}
@Override
public <T> T accept(Visitor<T> visitor) {
return visitor.visit(this);
}
}
/**
* Class representing the target expressions that are used in placement
* constraints. They might refer to expressions on node attributes, allocation
* tags, or be self-targets (referring to the allocation to which the
* constraint is attached).
*/
public static | SingleConstraint |
java | apache__kafka | streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/utils/EmbeddedKafkaCluster.java | {
"start": 4439,
"end": 24067
} | class ____ {
private static final Logger log = LoggerFactory.getLogger(EmbeddedKafkaCluster.class);
private final KafkaClusterTestKit cluster;
private final Properties brokerConfig;
public final MockTime time;
public EmbeddedKafkaCluster(final int numBrokers) {
this(numBrokers, new Properties());
}
public EmbeddedKafkaCluster(final int numBrokers, final Properties brokerConfig) {
this(numBrokers, brokerConfig, Collections.emptyMap());
}
public EmbeddedKafkaCluster(final int numBrokers,
final Properties brokerConfig,
final long mockTimeMillisStart) {
this(numBrokers, brokerConfig, Collections.emptyMap(), mockTimeMillisStart, System.nanoTime());
}
public EmbeddedKafkaCluster(final int numBrokers,
final Properties brokerConfig,
final Map<Integer, Map<String, String>> brokerConfigOverrides) {
this(numBrokers, brokerConfig, brokerConfigOverrides, System.currentTimeMillis(), System.nanoTime());
}
public EmbeddedKafkaCluster(final int numBrokers,
final Properties brokerConfig,
final Map<Integer, Map<String, String>> brokerConfigOverrides,
final long mockTimeMillisStart,
final long mockTimeNanoStart) {
addDefaultBrokerPropsIfAbsent(brokerConfig);
if (!brokerConfigOverrides.isEmpty() && brokerConfigOverrides.size() != numBrokers) {
throw new IllegalArgumentException("Size of brokerConfigOverrides " + brokerConfigOverrides.size()
+ " must match broker number " + numBrokers);
}
try {
final KafkaClusterTestKit.Builder clusterBuilder = new KafkaClusterTestKit.Builder(
new TestKitNodes.Builder()
.setCombined(true)
.setNumBrokerNodes(numBrokers)
.setPerServerProperties(brokerConfigOverrides)
// Reduce number of controllers for faster startup
// We may make this configurable in the future if there's a use case for it
.setNumControllerNodes(1)
.build()
);
brokerConfig.forEach((k, v) -> clusterBuilder.setConfigProp((String) k, v));
cluster = clusterBuilder.build();
cluster.nonFatalFaultHandler().setIgnore(true);
} catch (final Exception e) {
throw new KafkaException("Failed to create test Kafka cluster", e);
}
this.brokerConfig = brokerConfig;
this.time = new MockTime(mockTimeMillisStart, mockTimeNanoStart);
}
public void start() {
try {
cluster.format();
cluster.startup();
cluster.waitForReadyBrokers();
} catch (final Exception e) {
throw new KafkaException("Failed to start test Kafka cluster", e);
}
verifyClusterReadiness();
}
/**
* Perform an extended check to ensure that the primary APIs of the cluster are available, including:
* <ul>
* <li>Ability to create a topic</li>
* <li>Ability to produce to a topic</li>
* <li>Ability to form a consumer group</li>
* <li>Ability to consume from a topic</li>
* </ul>
* If this method completes successfully, all resources created to verify the cluster health
* (such as topics and consumer groups) will be cleaned up before it returns.
* <p>
* This provides extra guarantees compared to other cluster readiness checks such as
* {@link KafkaClusterTestKit#waitForReadyBrokers()}, which verify that brokers have
* completed startup and joined the cluster, but do not verify that the internal consumer
* offsets topic has been created or that it's actually possible for users to create and
* interact with topics.
*/
public void verifyClusterReadiness() {
final UUID uuid = UUID.randomUUID();
final String consumerGroupId = "group-warmup-" + uuid;
final Map<String, Object> consumerConfig = Map.of(
GROUP_ID_CONFIG, consumerGroupId,
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name()
);
final String topic = "topic-warmup-" + uuid;
createTopic(topic);
final Map<String, Object> producerProps = new HashMap<>(clientDefaultConfig());
producerProps.put(ProducerConfig.CLIENT_ID_CONFIG, "warmup-producer");
produce(producerProps, topic, null, "warmup message key", "warmup message value");
try (Consumer<?, ?> consumer = createConsumerAndSubscribeTo(consumerConfig, topic)) {
final ConsumerRecords<?, ?> records = consumer.poll(Duration.ofMillis(TimeUnit.MINUTES.toMillis(2)));
if (records.isEmpty()) {
throw new AssertionError("Failed to verify availability of group coordinator and produce/consume APIs on Kafka cluster in time");
}
}
try (Admin admin = createAdminClient()) {
admin.deleteConsumerGroups(Collections.singleton(consumerGroupId)).all().get(30, TimeUnit.SECONDS);
admin.deleteTopics(Collections.singleton(topic)).all().get(30, TimeUnit.SECONDS);
} catch (final InterruptedException | ExecutionException | TimeoutException e) {
throw new AssertionError("Failed to clean up cluster health check resource(s)", e);
}
}
/**
* Stop the Kafka cluster.
*/
public void stop() {
final AtomicReference<Throwable> shutdownFailure = new AtomicReference<>();
Utils.closeQuietly(cluster, "embedded Kafka cluster", shutdownFailure);
if (shutdownFailure.get() != null) {
throw new KafkaException("Failed to shut down producer / embedded Kafka cluster", shutdownFailure.get());
}
}
public String bootstrapServers() {
return cluster.bootstrapServers();
}
public boolean sslEnabled() {
final String listenerSecurityProtocolMap = brokerConfig.getProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG);
if (listenerSecurityProtocolMap == null)
return false;
return listenerSecurityProtocolMap.contains(":SSL") || listenerSecurityProtocolMap.contains(":SASL_SSL");
}
/**
* Create multiple Kafka topics each with 1 partition and a replication factor of 1.
*
* @param topics The name of the topics.
*/
public void createTopics(final String... topics) throws InterruptedException {
for (final String topic : topics) {
createTopic(topic, 1, 1, Collections.emptyMap());
}
}
/**
* Create a Kafka topic with 1 partition and a replication factor of 1.
*
* @param topic The name of the topic.
*/
public void createTopic(final String topic) {
createTopic(topic, 1);
}
/**
* Create a Kafka topic with given partition and a replication factor of 1.
*
* @param topic The name of the topic.
* @param partitions The number of partitions for this topic.
*/
public void createTopic(final String topic, final int partitions) {
createTopic(topic, partitions, 1, Collections.emptyMap());
}
/**
* Create a Kafka topic with the given parameters.
*
* @param topic The name of the topic.
* @param partitions The number of partitions for this topic.
* @param replication The replication factor for (the partitions of) this topic.
*/
public void createTopic(final String topic, final int partitions, final int replication) throws InterruptedException {
createTopic(topic, partitions, replication, Collections.emptyMap());
}
/**
* Create a Kafka topic with given partition, replication factor, and topic config.
*
* @param topic The name of the topic.
* @param partitions The number of partitions for this topic.
* @param replication The replication factor for (partitions of) this topic.
* @param topicConfig Additional topic-level configuration settings.
*/
public void createTopic(final String topic, final int partitions, final int replication, final Map<String, String> topicConfig) {
if (replication > cluster.brokers().size()) {
throw new InvalidReplicationFactorException("Insufficient brokers ("
+ cluster.brokers().size() + ") for desired replication (" + replication + ")");
}
log.info("Creating topic { name: {}, partitions: {}, replication: {}, config: {} }",
topic, partitions, replication, topicConfig);
final NewTopic newTopic = new NewTopic(topic, partitions, (short) replication);
newTopic.configs(topicConfig);
try (final Admin adminClient = createAdminClient()) {
adminClient.createTopics(Collections.singletonList(newTopic)).all().get();
TestUtils.waitForCondition(() -> adminClient.listTopics().names().get().contains(topic),
"Wait for topic " + topic + " to get created.");
} catch (final TopicExistsException ignored) {
} catch (final InterruptedException | ExecutionException e) {
if (!(e.getCause() instanceof TopicExistsException)) {
throw new RuntimeException(e);
}
}
}
public void deleteTopics(final String... topics) {
for (final String topic : topics) {
deleteTopic(topic);
}
}
/**
* Delete a Kafka topic.
*
* @param topic the topic to delete; may not be null
*/
public void deleteTopic(final String topic) {
try (final Admin adminClient = createAdminClient()) {
adminClient.deleteTopics(Collections.singleton(topic)).all().get();
} catch (final InterruptedException | ExecutionException e) {
if (!(e.getCause() instanceof UnknownTopicOrPartitionException)) {
throw new RuntimeException(e);
}
}
}
/**
* Delete all topics except internal topics.
*/
public void deleteAllTopics() {
try (final Admin adminClient = createAdminClient()) {
final Set<String> topics = adminClient.listTopics().names().get();
adminClient.deleteTopics(topics).all().get();
} catch (final UnknownTopicOrPartitionException ignored) {
} catch (final ExecutionException | InterruptedException e) {
if (!(e.getCause() instanceof UnknownTopicOrPartitionException)) {
throw new RuntimeException(e);
}
}
}
/**
* Produce given key and value to topic partition.
* @param topic the topic to produce to; may not be null.
* @param partition the topic partition to produce to.
* @param key the record key.
* @param value the record value.
*/
public void produce(final Map<String, Object> producerProps, final String topic, final Integer partition, final String key, final String value) {
try (KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(producerProps, new ByteArraySerializer(), new ByteArraySerializer())) {
final ProducerRecord<byte[], byte[]> msg = new ProducerRecord<>(topic, partition, key == null ? null : key.getBytes(), value == null ? null : value.getBytes());
try {
producer.send(msg).get(TimeUnit.SECONDS.toMillis(120), TimeUnit.MILLISECONDS);
producer.flush();
} catch (final Exception e) {
throw new KafkaException("Could not produce message: " + msg, e);
}
}
}
public Admin createAdminClient() {
return Admin.create(mkProperties(clientDefaultConfig()));
}
public Map<String, String> clientDefaultConfig() {
final Map<String, String> props = new HashMap<>();
props.putIfAbsent(BOOTSTRAP_SERVERS_CONFIG, bootstrapServers());
if (sslEnabled()) {
props.putIfAbsent(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, brokerConfig.get(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG).toString());
props.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, ((Password) brokerConfig.get(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG)).value());
props.putIfAbsent(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL");
}
return props;
}
public KafkaConsumer<byte[], byte[]> createConsumer(final Map<String, Object> consumerProps) {
final Map<String, Object> props = new HashMap<>(clientDefaultConfig());
props.putAll(consumerProps);
props.putIfAbsent(GROUP_ID_CONFIG, UUID.randomUUID().toString());
props.putIfAbsent(ENABLE_AUTO_COMMIT_CONFIG, "false");
props.putIfAbsent(AUTO_OFFSET_RESET_CONFIG, "earliest");
props.putIfAbsent(KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer");
props.putIfAbsent(VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer");
final KafkaConsumer<byte[], byte[]> consumer;
try {
consumer = new KafkaConsumer<>(props);
} catch (final Throwable t) {
throw new KafkaException("Failed to create consumer", t);
}
return consumer;
}
public KafkaConsumer<byte[], byte[]> createConsumerAndSubscribeTo(final Map<String, Object> consumerProps, final String... topics) {
return createConsumerAndSubscribeTo(consumerProps, null, topics);
}
public KafkaConsumer<byte[], byte[]> createConsumerAndSubscribeTo(final Map<String, Object> consumerProps, final ConsumerRebalanceListener rebalanceListener, final String... topics) {
final KafkaConsumer<byte[], byte[]> consumer = createConsumer(consumerProps);
if (rebalanceListener != null) {
consumer.subscribe(Arrays.asList(topics), rebalanceListener);
} else {
consumer.subscribe(Arrays.asList(topics));
}
return consumer;
}
private void addDefaultBrokerPropsIfAbsent(final Properties brokerConfig) {
brokerConfig.putIfAbsent(CleanerConfig.LOG_CLEANER_DEDUPE_BUFFER_SIZE_PROP, 2 * 1024 * 1024L);
brokerConfig.putIfAbsent(GroupCoordinatorConfig.STREAMS_GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG, "100");
brokerConfig.putIfAbsent(GroupCoordinatorConfig.STREAMS_GROUP_MIN_HEARTBEAT_INTERVAL_MS_CONFIG, "100");
brokerConfig.putIfAbsent(GroupCoordinatorConfig.GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG, "0");
brokerConfig.putIfAbsent(GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, "0");
brokerConfig.putIfAbsent(GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, "5");
brokerConfig.putIfAbsent(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, "1");
brokerConfig.putIfAbsent(TransactionLogConfig.TRANSACTIONS_TOPIC_PARTITIONS_CONFIG, "5");
brokerConfig.putIfAbsent(TransactionLogConfig.TRANSACTIONS_TOPIC_REPLICATION_FACTOR_CONFIG, "1");
brokerConfig.putIfAbsent(ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG, true);
brokerConfig.putIfAbsent(ServerConfigs.DELETE_TOPIC_ENABLE_CONFIG, true);
}
public void waitForRemainingTopics(final long timeoutMs, final String... topics) throws InterruptedException {
TestUtils.waitForCondition(new TopicsRemainingCondition(topics), timeoutMs, "Topics are not expected after " + timeoutMs + " milliseconds.");
}
public Set<String> getAllTopicsInCluster() {
try (final Admin adminClient = createAdminClient()) {
return adminClient.listTopics(new ListTopicsOptions().listInternal(true)).names().get();
} catch (final InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
}
public Properties getLogConfig(final String topic) {
try (final Admin adminClient = createAdminClient()) {
final ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, topic);
final Config config = adminClient.describeConfigs(Collections.singleton(configResource)).values().get(configResource).get();
final Properties properties = new Properties();
for (final ConfigEntry configEntry : config.entries()) {
if (configEntry.source() == ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG) {
properties.put(configEntry.name(), configEntry.value());
}
}
return properties;
} catch (final InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
}
public void setGroupSessionTimeout(final String groupId, final int sessionTimeoutMs) {
try (final Admin adminClient = createAdminClient()) {
adminClient.incrementalAlterConfigs(
Map.of(
new ConfigResource(ConfigResource.Type.GROUP, groupId),
List.of(new AlterConfigOp(new ConfigEntry(GroupConfig.STREAMS_SESSION_TIMEOUT_MS_CONFIG, String.valueOf(sessionTimeoutMs)), AlterConfigOp.OpType.SET))
)
).all().get();
} catch (final InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
}
public void setGroupHeartbeatTimeout(final String groupId, final int heartbeatTimeoutMs) {
try (final Admin adminClient = createAdminClient()) {
adminClient.incrementalAlterConfigs(
Map.of(
new ConfigResource(ConfigResource.Type.GROUP, groupId),
List.of(new AlterConfigOp(new ConfigEntry(GroupConfig.STREAMS_HEARTBEAT_INTERVAL_MS_CONFIG, String.valueOf(heartbeatTimeoutMs)), AlterConfigOp.OpType.SET))
)
).all().get();
} catch (final InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
}
public void setGroupStandbyReplicas(final String groupId, final int numStandbyReplicas) {
try (final Admin adminClient = createAdminClient()) {
adminClient.incrementalAlterConfigs(
Map.of(
new ConfigResource(ConfigResource.Type.GROUP, groupId),
List.of(new AlterConfigOp(new ConfigEntry(GroupConfig.STREAMS_NUM_STANDBY_REPLICAS_CONFIG, String.valueOf(numStandbyReplicas)), AlterConfigOp.OpType.SET))
)
).all().get();
} catch (final InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
}
public void setGroupStreamsInitialRebalanceDelay(final String groupId, final int initialRebalanceDelayMs) {
try (final Admin adminClient = createAdminClient()) {
adminClient.incrementalAlterConfigs(
Map.of(
new ConfigResource(ConfigResource.Type.GROUP, groupId),
List.of(new AlterConfigOp(new ConfigEntry(GroupConfig.STREAMS_INITIAL_REBALANCE_DELAY_MS_CONFIG, String.valueOf(initialRebalanceDelayMs)), AlterConfigOp.OpType.SET))
)
).all().get();
} catch (final InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
}
private final | EmbeddedKafkaCluster |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMultipartUtils.java | {
"start": 1478,
"end": 4904
} | class ____ extends AbstractS3ATestBase {
private static final int UPLOAD_LEN = 1024;
private static final String PART_FILENAME_BASE = "pending-part";
private static final int LIST_BATCH_SIZE = 2;
private static final int NUM_KEYS = 5;
@Override
protected Configuration createConfiguration() {
Configuration conf = super.createConfiguration();
S3ATestUtils.disableFilesystemCaching(conf);
// Forces listings to come back in multiple batches to test that part of
// the iterators.
conf.setInt(Constants.MAX_PAGING_KEYS, LIST_BATCH_SIZE);
return conf;
}
@Override
@BeforeEach
public void setup() throws Exception {
super.setup();
assumeMultipartUploads(getFileSystem().getConf());
}
/**
* Main test case for upload part listing and iterator paging.
* @throws Exception on failure.
*/
@Test
public void testListMultipartUploads() throws Exception {
S3AFileSystem fs = getFileSystem();
Set<MultipartTestUtils.IdKey> keySet = new HashSet<>();
try (AuditSpan span = span()) {
// 1. Create NUM_KEYS pending upload parts
for (int i = 0; i < NUM_KEYS; i++) {
Path filePath = getPartFilename(i);
String key = fs.pathToKey(filePath);
describe("creating upload part with key %s", key);
// create a multipart upload
MultipartTestUtils.IdKey idKey = MultipartTestUtils
.createPartUpload(fs, key, UPLOAD_LEN,
1);
keySet.add(idKey);
}
// 2. Verify all uploads are found listing by prefix
describe("Verifying upload list by prefix");
RemoteIterator<MultipartUpload> uploads = fs.listUploads(getPartPrefix(fs));
assertUploadsPresent(uploads, keySet);
// 3. Verify all uploads are found listing without prefix
describe("Verifying list all uploads");
uploads = fs.listUploads(null);
assertUploadsPresent(uploads, keySet);
} finally {
// 4. Delete all uploads we created
MultipartTestUtils.cleanupParts(fs, keySet);
}
}
/**
* Assert that all provided multipart uploads are contained in the upload
* iterator's results.
* @param list upload iterator
* @param ourUploads set up uploads that should be present
* @throws IOException on I/O error
*/
private void assertUploadsPresent(RemoteIterator<MultipartUpload> list,
Set<MultipartTestUtils.IdKey> ourUploads) throws IOException {
// Don't modify passed-in set, use copy.
Set<MultipartTestUtils.IdKey> uploads = new HashSet<>(ourUploads);
foreach(list, (upload) -> {
MultipartTestUtils.IdKey listing = toIdKey(upload);
if (uploads.remove(listing)) {
LOG.debug("Matched: {},{}", listing.getKey(), listing.getUploadId());
} else {
LOG.debug("Not our upload {},{}", listing.getKey(),
listing.getUploadId());
}
});
Assertions.assertThat(uploads)
.describedAs("Uploads which we expected to be listed.")
.isEmpty();
}
private MultipartTestUtils.IdKey toIdKey(MultipartUpload mu) {
return new MultipartTestUtils.IdKey(mu.key(), mu.uploadId());
}
private Path getPartFilename(int index) throws IOException {
return path(String.format("%s-%d", PART_FILENAME_BASE, index));
}
private String getPartPrefix(S3AFileSystem fs) throws IOException {
return fs.pathToKey(path("blah").getParent());
}
} | ITestS3AMultipartUtils |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/expressions/UnresolvedException.java | {
"start": 994,
"end": 1120
} | class ____ extends RuntimeException {
public UnresolvedException(String msg) {
super(msg);
}
}
| UnresolvedException |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/util/ReusingKeyGroupedIteratorTest.java | {
"start": 1762,
"end": 39013
} | class ____ {
private MutableObjectIterator<Record> sourceIter; // the iterator that provides the input
private ReusingKeyGroupedIterator<Record>
psi; // the grouping iterator, progressing in key steps
@BeforeEach
void setup() {
final ArrayList<IntStringPair> source = new ArrayList<IntStringPair>();
// add elements to the source
source.add(new IntStringPair(new IntValue(1), new StringValue("A")));
source.add(new IntStringPair(new IntValue(2), new StringValue("B")));
source.add(new IntStringPair(new IntValue(3), new StringValue("C")));
source.add(new IntStringPair(new IntValue(3), new StringValue("D")));
source.add(new IntStringPair(new IntValue(4), new StringValue("E")));
source.add(new IntStringPair(new IntValue(4), new StringValue("F")));
source.add(new IntStringPair(new IntValue(4), new StringValue("G")));
source.add(new IntStringPair(new IntValue(5), new StringValue("H")));
source.add(new IntStringPair(new IntValue(5), new StringValue("I")));
source.add(new IntStringPair(new IntValue(5), new StringValue("J")));
source.add(new IntStringPair(new IntValue(5), new StringValue("K")));
source.add(new IntStringPair(new IntValue(5), new StringValue("L")));
this.sourceIter =
new MutableObjectIterator<Record>() {
final Iterator<IntStringPair> it = source.iterator();
@Override
public Record next(Record reuse) throws IOException {
if (it.hasNext()) {
IntStringPair pair = it.next();
reuse.setField(0, pair.getInteger());
reuse.setField(1, pair.getString());
return reuse;
} else {
return null;
}
}
@Override
public Record next() throws IOException {
if (it.hasNext()) {
IntStringPair pair = it.next();
Record result = new Record(2);
result.setField(0, pair.getInteger());
result.setField(1, pair.getString());
return result;
} else {
return null;
}
}
};
final RecordSerializer serializer = RecordSerializer.get();
@SuppressWarnings("unchecked")
final RecordComparator comparator =
new RecordComparator(new int[] {0}, new Class[] {IntValue.class});
this.psi = new ReusingKeyGroupedIterator<Record>(this.sourceIter, serializer, comparator);
}
@Test
void testNextKeyOnly() throws Exception {
try {
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must have another key.")
.isTrue();
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(1))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isOne();
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must have another key.")
.isTrue();
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(2))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(2);
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must have another key.")
.isTrue();
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(3))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(3);
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must have another key.")
.isTrue();
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(4))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(4);
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must have another key.")
.isTrue();
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(5))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(5);
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must not have another key.")
.isFalse();
assertThat((Iterable<? extends Record>) this.psi.getValues())
.withFailMessage("KeyGroupedIterator must not have another value.")
.isNull();
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must not have another key.")
.isFalse();
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must not have another key.")
.isFalse();
} catch (Exception e) {
e.printStackTrace();
fail("The test encountered an unexpected exception.");
}
}
@Test
void testFullIterationThroughAllValues() throws IOException {
try {
// Key 1, Value A
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must have another key.")
.isTrue();
assertThat(hasIterator(this.psi.getValues())).isTrue();
assertThat(hasIterator(this.psi.getValues())).isFalse();
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must have another value.")
.isTrue();
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(1))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isOne();
assertThat(this.psi.getValues().next().getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("A");
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must not have another value.")
.isFalse();
// Key 2, Value B
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must have another key.")
.isTrue();
assertThat(hasIterator(this.psi.getValues())).isTrue();
assertThat(hasIterator(this.psi.getValues())).isFalse();
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must have another value.")
.isTrue();
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(2))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(2);
assertThat(this.psi.getValues().next().getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("B");
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must not have another value.")
.isFalse();
// Key 3, Values C, D
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must have another key.")
.isTrue();
assertThat(hasIterator(this.psi.getValues())).isTrue();
assertThat(hasIterator(this.psi.getValues())).isFalse();
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must have another value.")
.isTrue();
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(3))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(3);
assertThat(this.psi.getValues().next().getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("C");
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must have another value.")
.isTrue();
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(3))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(3);
assertThat(this.psi.getValues().next().getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("D");
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(3))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(3);
assertThatThrownBy(() -> this.psi.getValues().next())
.withFailMessage(
"A new KeyGroupedIterator must not have any value available and hence throw an exception on next().")
.isInstanceOf(NoSuchElementException.class);
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must not have another value.")
.isFalse();
assertThatThrownBy(() -> this.psi.getValues().next())
.withFailMessage(
"A new KeyGroupedIterator must not have any value available and hence throw an exception on next().")
.isInstanceOf(NoSuchElementException.class);
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(3))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(3);
// Key 4, Values E, F, G
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must have another key.")
.isTrue();
assertThat(hasIterator(this.psi.getValues())).isTrue();
assertThat(hasIterator(this.psi.getValues())).isFalse();
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must have another value.")
.isTrue();
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(4))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(4);
assertThat(this.psi.getValues().next().getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("E");
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must have another value.")
.isTrue();
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(4))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(4);
assertThat(this.psi.getValues().next().getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("F");
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(4))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(4);
assertThat(this.psi.getValues().next().getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("G");
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(4))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(4);
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must not have another value.")
.isFalse();
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(4))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(4);
// Key 5, Values H, I, J, K, L
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must have another key.")
.isTrue();
assertThat(hasIterator(this.psi.getValues())).isTrue();
assertThat(hasIterator(this.psi.getValues())).isFalse();
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must have another value.")
.isTrue();
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(5))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(5);
assertThat(this.psi.getValues().next().getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("H");
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must have another value.")
.isTrue();
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(5))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(5);
assertThat(this.psi.getValues().next().getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("I");
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(5))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(5);
assertThat(this.psi.getValues().next().getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("J");
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(5))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(5);
assertThat(this.psi.getValues().next().getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("K");
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(5))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(5);
assertThat(this.psi.getValues().next().getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("L");
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(5))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(5);
assertThatThrownBy(() -> this.psi.getValues().next())
.withFailMessage(
"A new KeyGroupedIterator must not have any value available and hence throw an exception on next().")
.isInstanceOf(NoSuchElementException.class);
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must not have another value.")
.isFalse();
assertThat(
this.psi
.getComparatorWithCurrentReference()
.equalToReference(new Record(new IntValue(5))))
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(5);
assertThatThrownBy(() -> this.psi.getValues().next())
.withFailMessage(
"A new KeyGroupedIterator must not have any value available and hence throw an exception on next().")
.isInstanceOf(NoSuchElementException.class);
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must not have another key.")
.isFalse();
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must not have another key.")
.isFalse();
assertThat((Iterable<? extends Record>) this.psi.getValues()).isNull();
} catch (Exception e) {
e.printStackTrace();
fail("The test encountered an unexpected exception.");
}
}
@Test
void testMixedProgress() throws Exception {
try {
// Progression only via nextKey() and hasNext() - Key 1, Value A
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must have another key.")
.isTrue();
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must have another value.")
.isTrue();
assertThat(hasIterator(this.psi.getValues())).isTrue();
assertThat(hasIterator(this.psi.getValues())).isFalse();
// Progression only through nextKey() - Key 2, Value B
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must have another key.")
.isTrue();
assertThat(hasIterator(this.psi.getValues())).isTrue();
assertThat(hasIterator(this.psi.getValues())).isFalse();
// Progression first though haNext() and next(), then through hasNext() - Key 3, Values
// C, D
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must have another key.")
.isTrue();
assertThat(hasIterator(this.psi.getValues())).isTrue();
assertThat(hasIterator(this.psi.getValues())).isFalse();
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must have another value.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(3);
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(3);
assertThat(this.psi.getValues().next().getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("C");
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must have another value.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(3);
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(3);
// Progression first via next() only, then hasNext() only Key 4, Values E, F, G
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must have another key.")
.isTrue();
assertThat(hasIterator(this.psi.getValues())).isTrue();
assertThat(hasIterator(this.psi.getValues())).isFalse();
assertThat(this.psi.getValues().next().getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("E");
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must have another value.")
.isTrue();
// Key 5, Values H, I, J, K, L
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must have another key.")
.isTrue();
assertThat(this.psi.getValues().next().getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("H");
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must have another value.")
.isTrue();
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(5);
assertThat(this.psi.getCurrent().getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(5);
assertThat(this.psi.getValues().next().getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("I");
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must have another value.")
.isTrue();
assertThat(hasIterator(this.psi.getValues())).isTrue();
assertThat(hasIterator(this.psi.getValues())).isFalse();
assertThat(this.psi.getValues().hasNext())
.withFailMessage("KeyGroupedIterator must have another value.")
.isTrue();
// end
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must not have another key.")
.isFalse();
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must not have another key.")
.isFalse();
} catch (Exception e) {
e.printStackTrace();
fail("The test encountered an unexpected exception.");
}
}
@Test
void testHasNextDoesNotOverwriteCurrentRecord() throws Exception {
try {
Iterator<Record> valsIter = null;
Record rec = null;
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must have another key.")
.isTrue();
valsIter = this.psi.getValues();
assertThat(valsIter).withFailMessage("Returned Iterator must not be null").isNotNull();
assertThat(valsIter)
.withFailMessage("KeyGroupedIterator's value iterator must have another value.")
.hasNext();
rec = valsIter.next();
assertThat(rec.getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isOne();
assertThat(rec.getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("A");
assertThat(valsIter)
.withFailMessage("KeyGroupedIterator must not have another value.")
.isExhausted();
assertThat(rec.getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isOne();
assertThat(rec.getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("A");
assertThat(valsIter)
.withFailMessage(
"KeyGroupedIterator's value iterator must not have another value.")
.isExhausted();
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must have another key.")
.isTrue();
valsIter = this.psi.getValues();
assertThat(valsIter).withFailMessage("Returned Iterator must not be null").isNotNull();
assertThat(valsIter)
.withFailMessage("KeyGroupedIterator's value iterator must have another value.")
.hasNext();
rec = valsIter.next();
assertThat(rec.getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(2);
assertThat(rec.getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("B");
assertThat(valsIter)
.withFailMessage("KeyGroupedIterator must not have another value.")
.isExhausted();
assertThat(rec.getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(2);
assertThat(rec.getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("B");
assertThat(valsIter)
.withFailMessage(
"KeyGroupedIterator's value iterator must not have another value.")
.isExhausted();
assertThat(this.psi.nextKey())
.withFailMessage("KeyGroupedIterator must have another key.")
.isTrue();
valsIter = this.psi.getValues();
assertThat(valsIter).withFailMessage("Returned Iterator must not be null").isNotNull();
assertThat(valsIter)
.withFailMessage("KeyGroupedIterator's value iterator must have another value.")
.hasNext();
rec = valsIter.next();
assertThat(rec.getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(3);
assertThat(rec.getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("C");
assertThat(valsIter)
.withFailMessage("KeyGroupedIterator's value iterator must have another value.")
.hasNext();
assertThat(rec.getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(3);
assertThat(rec.getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("C");
rec = valsIter.next();
assertThat(rec.getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(3);
assertThat(rec.getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("D");
assertThat(valsIter)
.withFailMessage("KeyGroupedIterator must not have another value.")
.isExhausted();
assertThat(rec.getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(3);
assertThat(rec.getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("D");
assertThat(valsIter)
.withFailMessage(
"KeyGroupedIterator's value iterator must not have another value.")
.isExhausted();
assertThat(rec.getField(0, IntValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong key.")
.isEqualTo(3);
assertThat(rec.getField(1, StringValue.class).getValue())
.withFailMessage("KeyGroupedIterator returned a wrong value.")
.isEqualTo("D");
} catch (Exception e) {
e.printStackTrace();
fail("The test encountered an unexpected exception.");
}
}
private static final | ReusingKeyGroupedIteratorTest |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/condition/ConditionEvaluationReportTests.java | {
"start": 12423,
"end": 12574
} | class ____ {
@Bean
String negativeOuterPositiveInnerBean() {
return "negativeOuterPositiveInnerBean";
}
}
}
static | PositiveInnerConfig |
java | spring-projects__spring-boot | module/spring-boot-restclient/src/test/java/org/springframework/boot/restclient/autoconfigure/RestTemplateAutoConfigurationTests.java | {
"start": 11197,
"end": 11516
} | class ____ {
@Bean
RestTemplateBuilder restTemplateBuilder(RestTemplateBuilderConfigurer configurer) {
return configurer.configure(new RestTemplateBuilder()).messageConverters(new CustomHttpMessageConverter());
}
}
@Configuration(proxyBeanMethods = false)
static | CustomRestTemplateBuilderWithConfigurerConfig |
java | elastic__elasticsearch | x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/KibanaSystemRoleIntegTests.java | {
"start": 931,
"end": 2766
} | class ____ extends SecurityIntegTestCase {
protected static final SecureString USERS_PASSWD = SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING;
@Override
public String configUsers() {
final String usersPasswdHashed = new String(getFastStoredHashAlgoForTests().hash(USERS_PASSWD));
return super.configUsers() + "my_kibana_system:" + usersPasswdHashed;
}
@Override
public String configUsersRoles() {
return super.configUsersRoles() + "kibana_system:my_kibana_system";
}
public void testCreateIndexDeleteInKibanaIndex() throws Exception {
final String index = randomBoolean() ? ".kibana" : ".kibana-" + randomAlphaOfLengthBetween(1, 10).toLowerCase(Locale.ENGLISH);
if (randomBoolean()) {
CreateIndexResponse createIndexResponse = client().filterWithHeader(
singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("my_kibana_system", USERS_PASSWD))
).admin().indices().prepareCreate(index).get();
assertThat(createIndexResponse.isAcknowledged(), is(true));
}
DocWriteResponse response = client().filterWithHeader(
singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("my_kibana_system", USERS_PASSWD))
).prepareIndex().setIndex(index).setSource("foo", "bar").setRefreshPolicy(IMMEDIATE).get();
assertEquals(DocWriteResponse.Result.CREATED, response.getResult());
DeleteResponse deleteResponse = client().filterWithHeader(
singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("my_kibana_system", USERS_PASSWD))
).prepareDelete(index, response.getId()).get();
assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult());
}
}
| KibanaSystemRoleIntegTests |
java | google__dagger | javatests/dagger/functional/generictypes/GenericTypesComponentTest.java | {
"start": 1037,
"end": 1224
} | class ____ {
private static final String STRING_VALUE = "someString";
private static final int INT_VALUE = 3;
@Component(modules = GenericTypesModule.class)
| GenericTypesComponentTest |
java | apache__kafka | connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSourceIntegrationTest.java | {
"start": 2295,
"end": 6099
} | class ____ {
private static final String CONNECTOR_NAME = "monitorable-source";
private static final String TASK_ID = CONNECTOR_NAME + "-0";
private static final int NUM_TASKS = 1;
private static final long SOURCE_TASK_PRODUCE_TIMEOUT_MS = TimeUnit.SECONDS.toMillis(30);
private static final int MINIMUM_MESSAGES = 100;
private static final String MESSAGES_PER_POLL = Integer.toString(MINIMUM_MESSAGES);
private static final String MESSAGES_PER_SECOND = Long.toString(MINIMUM_MESSAGES / 2);
private EmbeddedConnectStandalone connect;
private ConnectorHandle connectorHandle;
@BeforeEach
public void setup() throws InterruptedException {
// setup Connect cluster with defaults
connect = new EmbeddedConnectStandalone.Builder().build();
// start Connect cluster
connect.start();
// get connector handles before starting test.
connectorHandle = RuntimeHandles.get().connectorHandle(CONNECTOR_NAME);
}
@AfterEach
public void close() {
connect.stop();
}
@Test
public void testMonitorableSourceConnectorAndTask() throws Exception {
connect.kafka().createTopic("test-topic");
Map<String, String> props = Map.of(
CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName(),
TASKS_MAX_CONFIG, "1",
TOPIC_CONFIG, "test-topic",
KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName(),
VALUE_CONVERTER_CLASS_CONFIG, StringConverter.class.getName(),
MESSAGES_PER_POLL_CONFIG, MESSAGES_PER_POLL,
MAX_MESSAGES_PER_SECOND_CONFIG, MESSAGES_PER_SECOND);
// set expected records to successfully reach the task
// expect all records to be consumed and committed by the task
connectorHandle.taskHandle(TASK_ID).expectedRecords(MINIMUM_MESSAGES);
connectorHandle.taskHandle(TASK_ID).expectedCommits(MINIMUM_MESSAGES);
connect.configureConnector(CONNECTOR_NAME, props);
connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(CONNECTOR_NAME, NUM_TASKS,
"Connector tasks did not start in time.");
// wait for the connector tasks to produce enough records
connectorHandle.taskHandle(TASK_ID).awaitRecords(SOURCE_TASK_PRODUCE_TIMEOUT_MS);
connectorHandle.taskHandle(TASK_ID).awaitCommits(TimeUnit.MINUTES.toMillis(1));
// check connector metric
Map<MetricName, KafkaMetric> metrics = connect.connectMetrics().metrics().metrics();
MetricName connectorMetric = MonitorableSourceConnector.metricsName;
assertTrue(metrics.containsKey(connectorMetric));
assertEquals(CONNECTOR_NAME, connectorMetric.tags().get("connector"));
assertEquals(MonitorableSourceConnector.VALUE, metrics.get(connectorMetric).metricValue());
// check task metric
metrics = connect.connectMetrics().metrics().metrics();
MetricName taskMetric = MonitorableSourceConnector.MonitorableSourceTask.metricsName;
assertTrue(metrics.containsKey(taskMetric));
assertEquals(CONNECTOR_NAME, taskMetric.tags().get("connector"));
assertEquals("0", taskMetric.tags().get("task"));
assertTrue(MINIMUM_MESSAGES <= (double) metrics.get(taskMetric).metricValue());
connect.deleteConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorDoesNotExist(CONNECTOR_NAME,
"Connector wasn't deleted in time.");
// verify the connector and task metrics have been deleted
metrics = connect.connectMetrics().metrics().metrics();
assertFalse(metrics.containsKey(connectorMetric));
assertFalse(metrics.containsKey(taskMetric));
}
}
| MonitorableSourceIntegrationTest |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/impl/StartupConditionRegistryTest.java | {
"start": 1175,
"end": 2165
} | class ____ extends ContextTestSupport {
@Override
public boolean isUseRouteBuilder() {
return false;
}
@Test
public void testVetoCamelContextStart() {
try {
context.start();
fail("Should throw exception");
} catch (Exception e) {
assertEquals("Startup condition: MyOtherCondition cannot continue due to: forced error from unit test",
e.getCause().getMessage());
}
}
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = super.createCamelContext();
StartupConditionStrategy scs = context.getCamelContextExtension().getContextPlugin(StartupConditionStrategy.class);
scs.setEnabled(true);
scs.setTimeout(250);
scs.setOnTimeout("fail");
context.getRegistry().bind("myCondition", new MyOtherCondition());
return context;
}
private static | StartupConditionRegistryTest |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationFailuresResetTests.java | {
"start": 2134,
"end": 7659
} | class ____ extends ESTestCase {
private ThreadPool threadPool;
private ClusterService clusterService;
private static ClusterState addNode(ClusterState state, String name) {
var nodes = DiscoveryNodes.builder(state.nodes()).add(DiscoveryNodeUtils.create(name));
return ClusterState.builder(state).nodes(nodes).build();
}
private static ClusterState removeNode(ClusterState state, String name) {
var nodes = DiscoveryNodes.builder();
state.nodes().stream().filter((node) -> node.getId() != name).forEach(nodes::add);
return ClusterState.builder(state).nodes(nodes).build();
}
private static ClusterState addShardWithFailures(ClusterState state) {
var index = "index-1";
var shard = 0;
var indexMeta = new IndexMetadata.Builder(index).settings(
Settings.builder().put(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).build())
).numberOfShards(1).numberOfReplicas(0).build();
var meta = Metadata.builder(state.metadata()).put(indexMeta, false).build();
var shardId = new ShardId(indexMeta.getIndex(), shard);
var nonZeroFailures = 5;
var unassignedInfo = new UnassignedInfo(
UnassignedInfo.Reason.ALLOCATION_FAILED,
null,
null,
nonZeroFailures,
0,
0,
false,
UnassignedInfo.AllocationStatus.NO_ATTEMPT,
Set.of(),
null
);
var shardRouting = ShardRouting.newUnassigned(
shardId,
true,
new RecoverySource.EmptyStoreRecoverySource(),
unassignedInfo,
ShardRouting.Role.DEFAULT
);
var routingTable = new RoutingTable.Builder().add(
new IndexRoutingTable.Builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY, indexMeta.getIndex()).initializeAsNew(
meta.getProject().index(index)
).addIndexShard(IndexShardRoutingTable.builder(shardId).addShard(shardRouting)).build()
).build();
return ClusterState.builder(state).metadata(meta).routingTable(routingTable).build();
}
@Override
public void setUp() throws Exception {
super.setUp();
threadPool = new TestThreadPool("reset-alloc-failures");
clusterService = ClusterServiceUtils.createClusterService(threadPool);
var allocationService = new AllocationService(
new AllocationDeciders(List.of(new MaxRetryAllocationDecider())),
new TestGatewayAllocator(),
new BalancedShardsAllocator(Settings.EMPTY),
EmptyClusterInfoService.INSTANCE,
EmptySnapshotsInfoService.INSTANCE,
TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY
);
allocationService.addAllocFailuresResetListenerTo(clusterService);
}
@Override
public void tearDown() throws Exception {
super.tearDown();
clusterService.stop();
threadPool.shutdownNow();
}
/**
* Create state with two nodes and allocation failures, and does <b>not</b> reset counter after node removal
*/
public void testRemoveNodeDoesNotResetCounter() throws Exception {
var initState = clusterService.state();
var stateWithNewNode = addNode(initState, "node-2");
clusterService.getClusterApplierService().onNewClusterState("add node", () -> stateWithNewNode, ActionListener.noop());
var stateWithFailures = addShardWithFailures(stateWithNewNode);
clusterService.getClusterApplierService().onNewClusterState("add failures", () -> stateWithFailures, ActionListener.noop());
assertBusy(() -> {
var resultState = clusterService.state();
assertEquals(2, resultState.nodes().size());
assertEquals(1, resultState.getRoutingTable().allShards().count());
assertTrue(resultState.getRoutingNodes().hasAllocationFailures());
});
var stateWithRemovedNode = removeNode(stateWithFailures, "node-2");
clusterService.getClusterApplierService().onNewClusterState("remove node", () -> stateWithRemovedNode, ActionListener.noop());
assertBusy(() -> {
var resultState = clusterService.state();
assertEquals(1, resultState.nodes().size());
assertEquals(1, resultState.getRoutingTable().allShards().count());
assertTrue(resultState.getRoutingNodes().hasAllocationFailures());
});
}
/**
* Create state with one node and allocation failures, and reset counter after node addition
*/
public void testAddNodeResetsCounter() throws Exception {
var initState = clusterService.state();
var stateWithFailures = addShardWithFailures(initState);
clusterService.getClusterApplierService().onNewClusterState("add failures", () -> stateWithFailures, ActionListener.noop());
var stateWithNewNode = addNode(stateWithFailures, "node-2");
clusterService.getClusterApplierService().onNewClusterState("add node", () -> stateWithNewNode, ActionListener.noop());
assertBusy(() -> {
var resultState = clusterService.state();
assertEquals(2, resultState.nodes().size());
assertEquals(1, resultState.getRoutingTable().allShards().count());
assertFalse(resultState.getRoutingNodes().hasAllocationFailures());
});
}
}
| AllocationFailuresResetTests |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/validator/CustomValidatorDefinition.java | {
"start": 2065,
"end": 2203
} | class ____ of the {@link Validator}
*/
public void setClassName(String className) {
this.className = className;
}
}
| name |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/cluster/models/partitions/Partitions.java | {
"start": 2608,
"end": 14838
} | class ____ implements Collection<RedisClusterNode> {
private static final RedisClusterNode[] EMPTY = new RedisClusterNode[SlotHash.SLOT_COUNT];
private final Lock lock = new ReentrantLock();
private final List<RedisClusterNode> partitions = new ArrayList<>();
private volatile RedisClusterNode[] slotCache = EMPTY;
private volatile RedisClusterNode[] masterCache = EMPTY;
private volatile Collection<RedisClusterNode> nodeReadView = Collections.emptyList();
/**
* Create a deep copy of this {@link Partitions} object.
*
* @return a deep copy of this {@link Partitions} object.
*/
@Override
public Partitions clone() {
Collection<RedisClusterNode> readView = new ArrayList<>(nodeReadView);
Partitions copy = new Partitions();
for (RedisClusterNode node : readView) {
copy.addPartition(node.clone());
}
copy.updateCache();
return copy;
}
/**
* Retrieve a {@link RedisClusterNode} by its slot number. This method does not distinguish between masters and slaves.
*
* @param slot the slot hash.
* @return the {@link RedisClusterNode} or {@code null} if not found.
*/
public RedisClusterNode getPartitionBySlot(int slot) {
return slotCache[slot];
}
/**
* Retrieve a {@link RedisClusterNode master node} by its slot number
*
* @param slot the slot hash.
* @return the {@link RedisClusterNode} or {@code null} if not found.
* @since 6.2
*/
public RedisClusterNode getMasterBySlot(int slot) {
return masterCache[slot];
}
/**
* Retrieve a {@link RedisClusterNode} by its node id.
*
* @param nodeId the nodeId.
* @return the {@link RedisClusterNode} or {@code null} if not found.
*/
public RedisClusterNode getPartitionByNodeId(String nodeId) {
for (RedisClusterNode partition : nodeReadView) {
if (partition.getNodeId().equals(nodeId)) {
return partition;
}
}
return null;
}
/**
* Retrieve a {@link RedisClusterNode} by its hostname/port considering node aliases.
*
* @param host hostname.
* @param port port number.
* @return the {@link RedisClusterNode} or {@code null} if not found.
*/
public RedisClusterNode getPartition(String host, int port) {
for (RedisClusterNode partition : nodeReadView) {
RedisURI uri = partition.getUri();
if (matches(uri, host, port)) {
return partition;
}
for (RedisURI redisURI : partition.getAliases()) {
if (matches(redisURI, host, port)) {
return partition;
}
}
}
return null;
}
private static boolean matches(RedisURI uri, String host, int port) {
return uri.getPort() == port && host.equals(uri.getHost());
}
/**
* Update the partition cache. Updates are necessary after the partition details have changed.
*/
public void updateCache() {
lock.lock();
try {
if (partitions.isEmpty()) {
invalidateCache();
return;
}
RedisClusterNode[] slotCache = new RedisClusterNode[SlotHash.SLOT_COUNT];
RedisClusterNode[] masterCache = new RedisClusterNode[SlotHash.SLOT_COUNT];
List<RedisClusterNode> readView = new ArrayList<>(partitions.size());
for (RedisClusterNode partition : partitions) {
readView.add(partition);
if (partition.is(RedisClusterNode.NodeFlag.UPSTREAM)) {
partition.forEachSlot(i -> masterCache[i] = partition);
}
partition.forEachSlot(i -> slotCache[i] = partition);
}
this.slotCache = slotCache;
this.masterCache = masterCache;
this.nodeReadView = Collections.unmodifiableCollection(readView);
} finally {
lock.unlock();
}
}
private void invalidateCache() {
this.slotCache = EMPTY;
this.masterCache = EMPTY;
this.nodeReadView = Collections.emptyList();
}
/**
* Returns an iterator over the {@link RedisClusterNode nodes} in this {@link Partitions} from the read-view. The
* {@link Iterator} remains consistent during partition updates with the nodes that have been part of the {@link Partitions}
* . {@link RedisClusterNode Nodes} added/removed during iteration/after obtaining the {@link Iterator} don't become visible
* during iteration but upon the next call to {@link #iterator()}.
*
* @return an iterator over the {@link RedisClusterNode nodes} in this {@link Partitions} from the read-view.
*/
@Override
public Iterator<RedisClusterNode> iterator() {
return nodeReadView.iterator();
}
/**
* Returns the internal {@link List} of {@link RedisClusterNode} that holds the partition source. This {@link List} is used
* to populate partition caches and should not be used directly and subject to change by refresh processes. Access
* (read/write) requires synchronization on {@link #getPartitions()}.
*
* @return the internal partition source.
*/
public List<RedisClusterNode> getPartitions() {
return partitions;
}
/**
* Adds a partition <b>without</b> updating the read view/cache.
*
* @param partition the partition
*/
public void addPartition(RedisClusterNode partition) {
LettuceAssert.notNull(partition, "Partition must not be null");
lock.lock();
try {
invalidateCache();
partitions.add(partition);
} finally {
lock.unlock();
}
}
/**
* @return the number of elements using the read-view.
*/
@Override
public int size() {
return nodeReadView.size();
}
/**
* Returns the {@link RedisClusterNode} at {@code index}.
*
* @param index the index
* @return the requested element using the read-view.
*/
public RedisClusterNode getPartition(int index) {
return partitions.get(index);
}
/**
* Update partitions and rebuild slot cache.
*
* @param partitions list of new partitions
*/
public void reload(List<RedisClusterNode> partitions) {
LettuceAssert.noNullElements(partitions, "Partitions must not contain null elements");
lock.lock();
try {
this.partitions.clear();
this.partitions.addAll(partitions);
updateCache();
} finally {
lock.unlock();
}
}
/**
* Returns {@code true} if this {@link Partitions} contains no elements using the read-view.
*
* @return {@code true} if this {@link Partitions} contains no elements using the read-view.
*/
@Override
public boolean isEmpty() {
return nodeReadView.isEmpty();
}
/**
* Returns {@code true} if this {@link Partitions} contains the specified element.
*
* @param o the element to check for
* @return {@code true} if this {@link Partitions} contains the specified element
*/
@Override
public boolean contains(Object o) {
return nodeReadView.contains(o);
}
/**
* Add all {@link RedisClusterNode nodes} from the given collection and update the read-view/caches.
*
* @param c must not be {@code null}
* @return {@code true} if this {@link Partitions} changed as a result of the call
*/
@Override
public boolean addAll(Collection<? extends RedisClusterNode> c) {
LettuceAssert.noNullElements(c, "Partitions must not contain null elements");
lock.lock();
try {
boolean b = partitions.addAll(c);
updateCache();
return b;
} finally {
lock.unlock();
}
}
/**
* Remove all {@link RedisClusterNode nodes} from the {@link Partitions} using elements from the given collection and update
* the read-view/caches.
*
* @param c must not be {@code null}
* @return {@code true} if this {@link Partitions} changed as a result of the call
*/
@Override
public boolean removeAll(Collection<?> c) {
lock.lock();
try {
boolean b = getPartitions().removeAll(c);
updateCache();
return b;
} finally {
lock.unlock();
}
}
/**
* Retains only the elements in this {@link Partitions} that are contained in the specified collection (optional
* operation)and update the read-view/caches. In other words, removes from this collection all of its elements that are not
* contained in the specified collection.
*
* @param c must not be {@code null}
* @return {@code true} if this {@link Partitions} changed as a result of the call
*/
@Override
public boolean retainAll(Collection<?> c) {
lock.lock();
try {
boolean b = getPartitions().retainAll(c);
updateCache();
return b;
} finally {
lock.unlock();
}
}
/**
* Removes all {@link RedisClusterNode nodes} and update the read-view/caches.
*/
@Override
public void clear() {
lock.lock();
try {
getPartitions().clear();
updateCache();
} finally {
lock.unlock();
}
}
/**
* Returns an array containing all the elements in this {@link Partitions} using the read-view.
*
* @return an array containing all the elements in this {@link Partitions} using the read-view.
*/
@Override
public Object[] toArray() {
return nodeReadView.toArray();
}
/**
* Returns an array containing all the elements in this {@link Partitions} using the read-view.
*
* @param a the array into which the elements of this collection are to be stored, if it is big enough; otherwise, a new
* array of the same runtime type is allocated for this purpose.
* @param <T> type of the array to contain the collection
* @return an array containing all the elements in this {@link Partitions} using the read-view.
*/
@Override
public <T> T[] toArray(T[] a) {
return nodeReadView.toArray(a);
}
/**
* Adds the {@link RedisClusterNode} to this {@link Partitions}.
*
* @param redisClusterNode must not be {@code null}
* @return {@code true} if this {@link Partitions} changed as a result of the call
*/
@Override
public boolean add(RedisClusterNode redisClusterNode) {
lock.lock();
try {
LettuceAssert.notNull(redisClusterNode, "RedisClusterNode must not be null");
boolean add = getPartitions().add(redisClusterNode);
updateCache();
return add;
} finally {
lock.unlock();
}
}
/**
* Remove the element from this {@link Partitions}.
*
* @param o must not be {@code null}
* @return {@code true} if this {@link Partitions} changed as a result of the call
*/
@Override
public boolean remove(Object o) {
lock.lock();
try {
boolean remove = getPartitions().remove(o);
updateCache();
return remove;
} finally {
lock.unlock();
}
}
/**
* Returns {@code true} if this collection contains all of the elements in the specified collection.
*
* @param c collection to be checked for containment in this collection, must not be {@code null}
* @return
*/
@Override
public boolean containsAll(Collection<?> c) {
return nodeReadView.containsAll(c);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(getClass().getSimpleName());
sb.append(" ").append(partitions);
return sb.toString();
}
}
| Partitions |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/spi/tracing/SpanKind.java | {
"start": 533,
"end": 573
} | enum ____ {
RPC,
MESSAGING
}
| SpanKind |
java | apache__spark | common/network-common/src/test/java/org/apache/spark/network/protocol/MessageWithHeaderSuite.java | {
"start": 1462,
"end": 5325
} | class ____ {
@Test
public void testSingleWrite() throws Exception {
testFileRegionBody(8, 8);
}
@Test
public void testShortWrite() throws Exception {
testFileRegionBody(8, 1);
}
@Test
public void testByteBufBody() throws Exception {
testByteBufBody(Unpooled.copyLong(42));
}
@Test
public void testCompositeByteBufBodySingleBuffer() throws Exception {
ByteBuf header = Unpooled.copyLong(42);
CompositeByteBuf compositeByteBuf = Unpooled.compositeBuffer();
compositeByteBuf.addComponent(true, header);
assertEquals(1, compositeByteBuf.nioBufferCount());
testByteBufBody(compositeByteBuf);
}
@Test
public void testCompositeByteBufBodyMultipleBuffers() throws Exception {
ByteBuf header = Unpooled.copyLong(42);
CompositeByteBuf compositeByteBuf = Unpooled.compositeBuffer();
compositeByteBuf.addComponent(true, header.retainedSlice(0, 4));
compositeByteBuf.addComponent(true, header.slice(4, 4));
assertEquals(2, compositeByteBuf.nioBufferCount());
testByteBufBody(compositeByteBuf);
}
/**
* Test writing a {@link MessageWithHeader} using the given {@link ByteBuf} as header.
*
* @param header the header to use.
* @throws Exception thrown on error.
*/
private void testByteBufBody(ByteBuf header) throws Exception {
long expectedHeaderValue = header.getLong(header.readerIndex());
ByteBuf bodyPassedToNettyManagedBuffer = Unpooled.copyLong(84);
assertEquals(1, header.refCnt());
assertEquals(1, bodyPassedToNettyManagedBuffer.refCnt());
ManagedBuffer managedBuf = new NettyManagedBuffer(bodyPassedToNettyManagedBuffer);
Object body = managedBuf.convertToNetty();
assertEquals(2, bodyPassedToNettyManagedBuffer.refCnt());
assertEquals(1, header.refCnt());
MessageWithHeader msg = new MessageWithHeader(managedBuf, header, body, managedBuf.size());
ByteBuf result = doWrite(msg, 1);
assertEquals(msg.count(), result.readableBytes());
assertEquals(expectedHeaderValue, result.readLong());
assertEquals(84, result.readLong());
assertTrue(msg.release());
assertEquals(0, bodyPassedToNettyManagedBuffer.refCnt());
assertEquals(0, header.refCnt());
}
@Test
public void testDeallocateReleasesManagedBuffer() throws Exception {
ByteBuf header = Unpooled.copyLong(42);
ManagedBuffer managedBuf = Mockito.spy(new TestManagedBuffer(84));
ByteBuf body = (ByteBuf) managedBuf.convertToNetty();
assertEquals(2, body.refCnt());
MessageWithHeader msg = new MessageWithHeader(managedBuf, header, body, body.readableBytes());
assertTrue(msg.release());
Mockito.verify(managedBuf, Mockito.times(1)).release();
assertEquals(0, body.refCnt());
}
private void testFileRegionBody(int totalWrites, int writesPerCall) throws Exception {
ByteBuf header = Unpooled.copyLong(42);
int headerLength = header.readableBytes();
TestFileRegion region = new TestFileRegion(totalWrites, writesPerCall);
MessageWithHeader msg = new MessageWithHeader(null, header, region, region.count());
ByteBuf result = doWrite(msg, totalWrites / writesPerCall);
assertEquals(headerLength + region.count(), result.readableBytes());
assertEquals(42, result.readLong());
for (long i = 0; i < 8; i++) {
assertEquals(i, result.readLong());
}
assertTrue(msg.release());
}
private ByteBuf doWrite(MessageWithHeader msg, int minExpectedWrites) throws Exception {
int writes = 0;
ByteArrayWritableChannel channel = new ByteArrayWritableChannel((int) msg.count());
while (msg.transferred() < msg.count()) {
msg.transferTo(channel, msg.transferred());
writes++;
}
assertTrue(minExpectedWrites <= writes, "Not enough writes!");
return Unpooled.wrappedBuffer(channel.getData());
}
private static | MessageWithHeaderSuite |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/feature/WriteNullStringAsEmptyTest.java | {
"start": 944,
"end": 1072
} | class ____ {
@JSONField(serialzeFeatures = SerializerFeature.WriteNullStringAsEmpty)
public String id;
}
}
| Model |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java | {
"start": 73184,
"end": 78901
} | class ____ extends TxnRequestHandler {
private final AddPartitionsToTxnRequest.Builder builder;
private long retryBackoffMs;
private AddPartitionsToTxnHandler(AddPartitionsToTxnRequest.Builder builder) {
super("AddPartitionsToTxn");
this.builder = builder;
this.retryBackoffMs = TransactionManager.this.retryBackoffMs;
}
@Override
AddPartitionsToTxnRequest.Builder requestBuilder() {
return builder;
}
@Override
Priority priority() {
return Priority.ADD_PARTITIONS_OR_OFFSETS;
}
@Override
public void handleResponse(AbstractResponse response) {
AddPartitionsToTxnResponse addPartitionsToTxnResponse = (AddPartitionsToTxnResponse) response;
Map<TopicPartition, Errors> errors = addPartitionsToTxnResponse.errors().get(AddPartitionsToTxnResponse.V3_AND_BELOW_TXN_ID);
boolean hasPartitionErrors = false;
Set<String> unauthorizedTopics = new HashSet<>();
retryBackoffMs = TransactionManager.this.retryBackoffMs;
for (Map.Entry<TopicPartition, Errors> topicPartitionErrorEntry : errors.entrySet()) {
TopicPartition topicPartition = topicPartitionErrorEntry.getKey();
Errors error = topicPartitionErrorEntry.getValue();
if (error == Errors.NONE) {
continue;
} else if (error == Errors.COORDINATOR_NOT_AVAILABLE || error == Errors.NOT_COORDINATOR) {
lookupCoordinator(FindCoordinatorRequest.CoordinatorType.TRANSACTION, transactionalId);
reenqueue();
return;
} else if (error == Errors.CONCURRENT_TRANSACTIONS) {
maybeOverrideRetryBackoffMs();
reenqueue();
return;
} else if (error.exception() instanceof RetriableException) {
reenqueue();
return;
} else if (error == Errors.INVALID_PRODUCER_EPOCH || error == Errors.PRODUCER_FENCED) {
// We could still receive INVALID_PRODUCER_EPOCH from old versioned transaction coordinator,
// just treat it the same as PRODUCE_FENCED.
fatalError(Errors.PRODUCER_FENCED.exception());
return;
} else if (error == Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED ||
error == Errors.INVALID_TXN_STATE || error == Errors.INVALID_PRODUCER_ID_MAPPING) {
fatalError(error.exception());
return;
} else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) {
unauthorizedTopics.add(topicPartition.topic());
} else if (error == Errors.OPERATION_NOT_ATTEMPTED) {
log.debug("Did not attempt to add partition {} to transaction because other partitions in the " +
"batch had errors.", topicPartition);
hasPartitionErrors = true;
} else if (error == Errors.UNKNOWN_PRODUCER_ID) {
abortableErrorIfPossible(error.exception());
return;
} else if (error == Errors.TRANSACTION_ABORTABLE) {
abortableError(error.exception());
return;
} else {
log.error("Could not add partition {} due to unexpected error {}", topicPartition, error);
hasPartitionErrors = true;
}
}
Set<TopicPartition> partitions = errors.keySet();
// Remove the partitions from the pending set regardless of the result. We use the presence
// of partitions in the pending set to know when it is not safe to send batches. However, if
// the partitions failed to be added and we enter an error state, we expect the batches to be
// aborted anyway. In this case, we must be able to continue sending the batches which are in
// retry for partitions that were successfully added.
pendingPartitionsInTransaction.removeAll(partitions);
if (!unauthorizedTopics.isEmpty()) {
abortableError(new TopicAuthorizationException(unauthorizedTopics));
} else if (hasPartitionErrors) {
abortableError(new KafkaException("Could not add partitions to transaction due to errors: " + errors));
} else {
log.debug("Successfully added partitions {} to transaction", partitions);
partitionsInTransaction.addAll(partitions);
transactionStarted = true;
result.done();
}
}
@Override
public long retryBackoffMs() {
return Math.min(TransactionManager.this.retryBackoffMs, this.retryBackoffMs);
}
private void maybeOverrideRetryBackoffMs() {
// We only want to reduce the backoff when retrying the first AddPartition which errored out due to a
// CONCURRENT_TRANSACTIONS error since this means that the previous transaction is still completing and
// we don't want to wait too long before trying to start the new one.
//
// This is only a temporary fix, the long term solution is being tracked in
// https://issues.apache.org/jira/browse/KAFKA-5482
if (partitionsInTransaction.isEmpty())
this.retryBackoffMs = ADD_PARTITIONS_RETRY_BACKOFF_MS;
}
}
private | AddPartitionsToTxnHandler |
java | apache__avro | lang/java/avro/src/main/java/org/apache/avro/Schema.java | {
"start": 10931,
"end": 11131
} | enum ____ fixed, returns its namespace-qualified name,
* otherwise returns the name of the primitive type.
*/
public String getFullName() {
return getName();
}
/** If this is a record, | or |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.