language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/TypeParameterUnusedInFormalsTest.java | {
"start": 6272,
"end": 6702
} | class ____<T> {
abstract T get(String s);
}
""")
.doTest();
}
// regression test for b/35385704
@Test
public void typeAnnotation() {
compilationHelper
.addSourceLines(
"A.java",
"""
import java.lang.annotation.ElementType;
import java.lang.annotation.Target;
@Target(ElementType.TYPE_USE)
@ | Test |
java | spring-projects__spring-framework | spring-jms/src/main/java/org/springframework/jms/support/converter/MessageType.java | {
"start": 1002,
"end": 1255
} | enum ____ {
/**
* A {@link jakarta.jms.TextMessage}.
*/
TEXT,
/**
* A {@link jakarta.jms.BytesMessage}.
*/
BYTES,
/**
* A {@link jakarta.jms.MapMessage}.
*/
MAP,
/**
* A {@link jakarta.jms.ObjectMessage}.
*/
OBJECT
}
| MessageType |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/plugin/PluginLoaderException.java | {
"start": 1511,
"end": 3695
} | class ____ extends Exception {
private String pluginKey;
public PluginLoaderException(Plugin plugin, String message, ArtifactResolutionException cause) {
super(message, cause);
pluginKey = plugin.getKey();
}
public PluginLoaderException(Plugin plugin, String message, ArtifactNotFoundException cause) {
super(message, cause);
pluginKey = plugin.getKey();
}
public PluginLoaderException(Plugin plugin, String message, PluginNotFoundException cause) {
super(message, cause);
pluginKey = plugin.getKey();
}
public PluginLoaderException(Plugin plugin, String message, PluginVersionResolutionException cause) {
super(message, cause);
pluginKey = plugin.getKey();
}
public PluginLoaderException(Plugin plugin, String message, InvalidVersionSpecificationException cause) {
super(message, cause);
pluginKey = plugin.getKey();
}
public PluginLoaderException(Plugin plugin, String message, InvalidPluginException cause) {
super(message, cause);
pluginKey = plugin.getKey();
}
public PluginLoaderException(Plugin plugin, String message, PluginManagerException cause) {
super(message, cause);
pluginKey = plugin.getKey();
}
public PluginLoaderException(Plugin plugin, String message, PluginVersionNotFoundException cause) {
super(message, cause);
pluginKey = plugin.getKey();
}
public PluginLoaderException(Plugin plugin, String message) {
super(message);
pluginKey = plugin.getKey();
}
public PluginLoaderException(String message) {
super(message);
}
public PluginLoaderException(String message, Throwable cause) {
super(message, cause);
}
public PluginLoaderException(ReportPlugin plugin, String message, Throwable cause) {
super(message, cause);
pluginKey = plugin.getKey();
}
public PluginLoaderException(ReportPlugin plugin, String message) {
super(message);
pluginKey = plugin.getKey();
}
public String getPluginKey() {
return pluginKey;
}
}
| PluginLoaderException |
java | apache__hadoop | hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/object/tos/auth/EnvironmentCredentialsProvider.java | {
"start": 918,
"end": 1372
} | class ____ extends AbstractCredentialsProvider {
public static final String NAME = EnvironmentCredentialsProvider.class.getName();
@Override
protected ExpireableCredential createCredential() {
return new ExpireableCredential(
System.getenv(TOS.ENV_TOS_ACCESS_KEY_ID),
System.getenv(TOS.ENV_TOS_SECRET_ACCESS_KEY),
System.getenv(TOS.ENV_TOS_SESSION_TOKEN),
Long.MAX_VALUE
);
}
}
| EnvironmentCredentialsProvider |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/parallel/ParallelReduceFullTest.java | {
"start": 1117,
"end": 5175
} | class ____ extends RxJavaTest {
@Test
public void cancel() {
PublishProcessor<Integer> pp = PublishProcessor.create();
TestSubscriber<Integer> ts = pp
.parallel()
.reduce(new BiFunction<Integer, Integer, Integer>() {
@Override
public Integer apply(Integer a, Integer b) throws Exception {
return a + b;
}
})
.test();
assertTrue(pp.hasSubscribers());
ts.cancel();
assertFalse(pp.hasSubscribers());
}
@Test
public void error() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
Flowable.<Integer>error(new TestException())
.parallel()
.reduce(new BiFunction<Integer, Integer, Integer>() {
@Override
public Integer apply(Integer a, Integer b) throws Exception {
return a + b;
}
})
.test()
.assertFailure(TestException.class);
assertTrue(errors.isEmpty());
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void error2() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
ParallelFlowable.fromArray(Flowable.<Integer>error(new IOException()), Flowable.<Integer>error(new TestException()))
.reduce(new BiFunction<Integer, Integer, Integer>() {
@Override
public Integer apply(Integer a, Integer b) throws Exception {
return a + b;
}
})
.test()
.assertFailure(IOException.class);
TestHelper.assertUndeliverable(errors, 0, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void empty() {
Flowable.<Integer>empty()
.parallel()
.reduce(new BiFunction<Integer, Integer, Integer>() {
@Override
public Integer apply(Integer a, Integer b) throws Exception {
return a + b;
}
})
.test()
.assertResult();
}
@Test
public void doubleError() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
new ParallelInvalid()
.reduce(new BiFunction<Object, Object, Object>() {
@Override
public Object apply(Object a, Object b) throws Exception {
return "" + a + b;
}
})
.test()
.assertFailure(TestException.class);
assertFalse(errors.isEmpty());
for (Throwable ex : errors) {
assertTrue(ex.toString(), ex.getCause() instanceof TestException);
}
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void reducerCrash() {
Flowable.range(1, 4)
.parallel(2)
.reduce(new BiFunction<Integer, Integer, Integer>() {
@Override
public Integer apply(Integer a, Integer b) throws Exception {
if (b == 3) {
throw new TestException();
}
return a + b;
}
})
.test()
.assertFailure(TestException.class);
}
@Test
public void reducerCrash2() {
Flowable.range(1, 4)
.parallel(2)
.reduce(new BiFunction<Integer, Integer, Integer>() {
@Override
public Integer apply(Integer a, Integer b) throws Exception {
if (a == 1 + 3) {
throw new TestException();
}
return a + b;
}
})
.test()
.assertFailure(TestException.class);
}
@Test
public void doubleOnSubscribe() {
TestHelper.checkDoubleOnSubscribeParallelToFlowable(
pf -> pf.reduce((a, b) -> a)
);
}
}
| ParallelReduceFullTest |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/int2darrays/Int2DArrays_assertNumberOfRows_Test.java | {
"start": 798,
"end": 1070
} | class ____ extends Int2DArraysBaseTest {
@Test
void should_delegate_to_Arrays2D() {
// WHEN
int2DArrays.assertNumberOfRows(info, actual, 2);
// THEN
verify(arrays2d).assertNumberOfRows(info, failures, actual, 2);
}
}
| Int2DArrays_assertNumberOfRows_Test |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/TwoInputStreamOperator.java | {
"start": 1503,
"end": 1728
} | class ____ you want to
* implement a custom operator.
*
* @param <IN1> The input type of the operator
* @param <IN2> The input type of the operator
* @param <OUT> The output type of the operator
*/
@PublicEvolving
public | if |
java | apache__camel | components/camel-mustache/src/test/java/org/apache/camel/component/mustache/MustacheLetterTest.java | {
"start": 1156,
"end": 2286
} | class ____ extends CamelTestSupport {
// START SNIPPET: e1
private Exchange createLetter() {
Exchange exchange = context.getEndpoint("direct:a").createExchange();
Message msg = exchange.getIn();
msg.setHeader("firstName", "Claus");
msg.setHeader("lastName", "Ibsen");
msg.setHeader("item", "Camel in Action");
msg.setBody("PS: Next beer is on me, James");
return exchange;
}
@Test
public void testMustacheLetter() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
mock.message(0).body().contains("Thanks for the order of Camel in Action");
template.send("direct:a", createLetter());
mock.assertIsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:a")
.to("mustache:letter.mustache")
.to("mock:result");
}
};
}
// END SNIPPET: e1
}
| MustacheLetterTest |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/aot/QueriesFactoryUnitTests.java | {
"start": 1986,
"end": 3472
} | class ____ {
QueriesFactory factory;
@BeforeEach
void setUp() {
RepositoryConfigurationSource configSource = mock(RepositoryConfigurationSource.class);
EntityManagerFactory entityManagerFactory = mock(EntityManagerFactory.class);
factory = new QueriesFactory(configSource, entityManagerFactory, this.getClass().getClassLoader());
}
@Test // GH-4029
void stringQueryShouldResolveEntityNameFromJakartaAnnotationIfPresent() throws NoSuchMethodException {
RepositoryInformation repositoryInformation = new AotRepositoryInformation(
AbstractRepositoryMetadata.getMetadata(MyRepository.class), MyRepository.class, Collections.emptyList());
Method method = MyRepository.class.getMethod("someFind");
JpaQueryMethod queryMethod = new JpaQueryMethod(method, repositoryInformation,
new SpelAwareProxyProjectionFactory(), mock(QueryExtractor.class));
AotQueries generatedQueries = factory.createQueries(repositoryInformation,
queryMethod.getResultProcessor().getReturnedType(), QueryEnhancerSelector.DEFAULT_SELECTOR,
MergedAnnotations.from(method).get(Query.class), queryMethod);
assertThat(generatedQueries.result()).asInstanceOf(type(StringAotQuery.class))
.extracting(StringAotQuery::getQueryString).isEqualTo("select t from CustomNamed t");
assertThat(generatedQueries.count()).asInstanceOf(type(StringAotQuery.class))
.extracting(StringAotQuery::getQueryString).isEqualTo("select count(t) from CustomNamed t");
}
| QueriesFactoryUnitTests |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/BufferBuilderAndConsumerTest.java | {
"start": 12859,
"end": 13117
} | class ____ implements BufferRecycler {
int recycleInvocationCounter;
@Override
public void recycle(MemorySegment memorySegment) {
recycleInvocationCounter++;
memorySegment.free();
}
}
}
| CountedRecycler |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/type/AnnotationMetadataTests.java | {
"start": 32092,
"end": 32315
} | interface ____ {
}
@TestComponentScan(basePackages = "A", basePackageClasses = String.class)
@ScanPackageC
@ScanPackageD
@TestComponentScan(basePackages = "B", basePackageClasses = Integer.class)
static | ScanPackagesCandD |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java | {
"start": 3044,
"end": 34239
} | class ____ tests. Visible for testing.
*/
ShardRouting(
ShardId shardId,
String currentNodeId,
String relocatingNodeId,
boolean primary,
ShardRoutingState state,
RecoverySource recoverySource,
UnassignedInfo unassignedInfo,
RelocationFailureInfo relocationFailureInfo,
AllocationId allocationId,
long expectedShardSize,
Role role
) {
this.shardId = shardId;
this.currentNodeId = currentNodeId;
this.relocatingNodeId = relocatingNodeId;
this.primary = primary;
this.state = state;
this.recoverySource = recoverySource;
this.unassignedInfo = unassignedInfo;
this.relocationFailureInfo = relocationFailureInfo;
this.allocationId = allocationId;
this.expectedShardSize = expectedShardSize;
this.role = role;
this.targetRelocatingShard = initializeTargetRelocatingShard();
assert assertConsistent();
}
private boolean assertConsistent() {
assert relocationFailureInfo != null : "relocation failure info must be always set";
assert role != null : "role must be always set";
assert primary == false || role.isPromotableToPrimary() : "shard with unpromotable role was promoted to primary: " + this;
switch (state) {
case UNASSIGNED -> {
assert currentNodeId == null : state + " shard must not be assigned to a node " + this;
assert relocatingNodeId == null : state + " shard must not be relocating to a node " + this;
assert unassignedInfo != null : state + " shard must be created with unassigned info " + this;
assert recoverySource != null : state + " shard must be created with a recovery source" + this;
assert primary ^ recoverySource == PeerRecoverySource.INSTANCE : "replica shards always recover from primary" + this;
}
case INITIALIZING -> {
assert currentNodeId != null : state + " shard must be assigned to a node " + this;
// relocatingNodeId is not set for initializing shard but set for relocating shard counterpart
// unassignedInfo is kept after starting unassigned shard but not present for relocating shard counterpart
assert recoverySource != null : state + "shard must be created with a recovery source" + this;
assert primary || recoverySource == PeerRecoverySource.INSTANCE : "replica shards always recover from primary" + this;
}
case STARTED -> {
assert currentNodeId != null : state + " shard must be assigned to a node " + this;
assert relocatingNodeId == null : state + " shard must not be relocating to a node " + this;
assert unassignedInfo == null : state + " shard must be created without unassigned info " + this;
assert recoverySource == null : state + " shard must be created without a recovery source" + this;
}
case RELOCATING -> {
assert currentNodeId != null : state + " shard must be assigned to a node " + this;
assert relocatingNodeId != null : state + " shard must be relocating to a node " + this;
assert unassignedInfo == null : state + " shard must be created without unassigned info " + this;
assert recoverySource == null : state + " shard must be created without a recovery source" + this;
}
}
return true;
}
@Nullable
private ShardRouting initializeTargetRelocatingShard() {
if (state == ShardRoutingState.RELOCATING) {
return new ShardRouting(
shardId,
relocatingNodeId,
currentNodeId,
primary,
ShardRoutingState.INITIALIZING,
PeerRecoverySource.INSTANCE,
unassignedInfo,
RelocationFailureInfo.NO_FAILURES,
AllocationId.newTargetRelocation(allocationId),
expectedShardSize,
role
);
} else {
return null;
}
}
/**
* Creates a new unassigned shard.
*/
public static ShardRouting newUnassigned(
ShardId shardId,
boolean primary,
RecoverySource recoverySource,
UnassignedInfo unassignedInfo,
Role role
) {
return new ShardRouting(
shardId,
null,
null,
primary,
ShardRoutingState.UNASSIGNED,
recoverySource,
unassignedInfo,
RelocationFailureInfo.NO_FAILURES,
null,
UNAVAILABLE_EXPECTED_SHARD_SIZE,
role
);
}
public Index index() {
return shardId.getIndex();
}
/**
* The index name.
*/
public String getIndexName() {
return shardId.getIndexName();
}
/**
* The shard id.
*/
public int id() {
return shardId.id();
}
/**
* The shard id.
*/
public int getId() {
return id();
}
/**
* The shard is unassigned (not allocated to any node).
*/
public boolean unassigned() {
return state == ShardRoutingState.UNASSIGNED;
}
/**
* The shard is initializing (usually recovering either from peer shard
* or from gateway).
*/
public boolean initializing() {
return state == ShardRoutingState.INITIALIZING;
}
/**
* Returns <code>true</code> iff the this shard is currently
* {@link ShardRoutingState#STARTED started} or
* {@link ShardRoutingState#RELOCATING relocating} to another node.
* Otherwise <code>false</code>
*/
public boolean active() {
return started() || relocating();
}
/**
* The shard is in started mode.
*/
public boolean started() {
return state == ShardRoutingState.STARTED;
}
/**
* Returns <code>true</code> iff this shard is currently relocating to
* another node. Otherwise <code>false</code>
*
* @see ShardRoutingState#RELOCATING
*/
public boolean relocating() {
return state == ShardRoutingState.RELOCATING;
}
/**
* Returns <code>true</code> iff this shard is assigned to a node ie. not
* {@link ShardRoutingState#UNASSIGNED unassigned}. Otherwise <code>false</code>
*/
public boolean assignedToNode() {
return currentNodeId != null;
}
/**
* The current node id the shard is allocated on.
*/
public String currentNodeId() {
return this.currentNodeId;
}
/**
* The relocating node id the shard is either relocating to or relocating from.
*/
public String relocatingNodeId() {
return this.relocatingNodeId;
}
/**
* Returns a shard routing representing the target shard.
* The target shard routing will be the INITIALIZING state and have relocatingNodeId set to the
* source node.
*/
public ShardRouting getTargetRelocatingShard() {
assert relocating();
return targetRelocatingShard;
}
/**
* Additional metadata on why the shard is/was unassigned. The metadata is kept around
* until the shard moves to STARTED.
*/
@Nullable
public UnassignedInfo unassignedInfo() {
return unassignedInfo;
}
@Nullable
public RelocationFailureInfo relocationFailureInfo() {
return relocationFailureInfo;
}
/**
* An id that uniquely identifies an allocation.
*/
@Nullable
public AllocationId allocationId() {
return this.allocationId;
}
/**
* Returns <code>true</code> iff this shard is a primary.
*/
public boolean primary() {
return this.primary;
}
/**
* The shard state.
*/
public ShardRoutingState state() {
return this.state;
}
/**
* The shard id.
*/
public ShardId shardId() {
return shardId;
}
/**
* A shard iterator with just this shard in it.
*/
public ShardIterator shardsIt() {
return new ShardIterator(shardId, List.of(this));
}
public ShardRouting(ShardId shardId, StreamInput in) throws IOException {
this.shardId = shardId;
currentNodeId = DiscoveryNode.deduplicateNodeIdentifier(in.readOptionalString());
relocatingNodeId = DiscoveryNode.deduplicateNodeIdentifier(in.readOptionalString());
primary = in.readBoolean();
state = ShardRoutingState.fromValue(in.readByte());
if (state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.INITIALIZING) {
recoverySource = RecoverySource.readFrom(in);
} else {
recoverySource = null;
}
unassignedInfo = in.readOptionalWriteable(UnassignedInfo::fromStreamInput);
if (in.getTransportVersion().onOrAfter(RELOCATION_FAILURE_INFO_VERSION)) {
relocationFailureInfo = RelocationFailureInfo.readFrom(in);
} else {
relocationFailureInfo = RelocationFailureInfo.NO_FAILURES;
}
allocationId = in.readOptionalWriteable(AllocationId::new);
if (state == ShardRoutingState.RELOCATING
|| state == ShardRoutingState.INITIALIZING
|| (state == ShardRoutingState.STARTED && in.getTransportVersion().onOrAfter(EXPECTED_SHARD_SIZE_FOR_STARTED_VERSION))) {
expectedShardSize = in.readLong();
} else {
expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE;
}
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) {
role = Role.readFrom(in);
} else {
role = Role.DEFAULT;
}
targetRelocatingShard = initializeTargetRelocatingShard();
}
public ShardRouting(StreamInput in) throws IOException {
this(new ShardId(in), in);
}
/**
* Writes shard information to {@link StreamOutput} without writing index name and shard id
*
* @param out {@link StreamOutput} to write shard information to
* @throws IOException if something happens during write
*/
public void writeToThin(StreamOutput out) throws IOException {
out.writeOptionalString(currentNodeId);
out.writeOptionalString(relocatingNodeId);
out.writeBoolean(primary);
out.writeByte(state.value());
if (state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.INITIALIZING) {
recoverySource.writeTo(out);
}
out.writeOptionalWriteable(unassignedInfo);
if (out.getTransportVersion().onOrAfter(RELOCATION_FAILURE_INFO_VERSION)) {
relocationFailureInfo.writeTo(out);
}
out.writeOptionalWriteable(allocationId);
if (state == ShardRoutingState.RELOCATING
|| state == ShardRoutingState.INITIALIZING
|| (state == ShardRoutingState.STARTED && out.getTransportVersion().onOrAfter(EXPECTED_SHARD_SIZE_FOR_STARTED_VERSION))) {
out.writeLong(expectedShardSize);
}
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) {
role.writeTo(out);
} else if (role != Role.DEFAULT) {
throw new IllegalStateException(
Strings.format("cannot send role [%s] to node with version [%s]", role, out.getTransportVersion().toReleaseVersion())
);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
shardId.writeTo(out);
writeToThin(out);
}
public ShardRouting updateUnassigned(UnassignedInfo unassignedInfo, RecoverySource recoverySource) {
assert this.unassignedInfo != null : "can only update unassigned info if it is already set";
assert this.unassignedInfo.delayed() || (unassignedInfo.delayed() == false) : "cannot transition from non-delayed to delayed";
return new ShardRouting(
shardId,
currentNodeId,
relocatingNodeId,
primary,
state,
recoverySource,
unassignedInfo,
relocationFailureInfo,
allocationId,
expectedShardSize,
role
);
}
public ShardRouting updateRelocationFailure(RelocationFailureInfo relocationFailureInfo) {
assert this.relocationFailureInfo != null : "can only update relocation failure info info if it is already set";
return new ShardRouting(
shardId,
currentNodeId,
relocatingNodeId,
primary,
state,
recoverySource,
unassignedInfo,
relocationFailureInfo,
allocationId,
expectedShardSize,
role
);
}
/**
* Moves the shard to unassigned state.
*/
public ShardRouting moveToUnassigned(UnassignedInfo unassignedInfo) {
assert state != ShardRoutingState.UNASSIGNED : this;
final RecoverySource recoverySource;
if (active()) {
if (primary()) {
recoverySource = ExistingStoreRecoverySource.INSTANCE;
} else {
recoverySource = PeerRecoverySource.INSTANCE;
}
} else {
recoverySource = recoverySource();
}
return new ShardRouting(
shardId,
null,
null,
primary,
ShardRoutingState.UNASSIGNED,
recoverySource,
unassignedInfo,
RelocationFailureInfo.NO_FAILURES,
null,
UNAVAILABLE_EXPECTED_SHARD_SIZE,
role
);
}
/**
* Initializes an unassigned shard on a node.
*
* @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated.
*/
public ShardRouting initialize(String nodeId, @Nullable String existingAllocationId, long expectedShardSize) {
assert state == ShardRoutingState.UNASSIGNED : this;
assert relocatingNodeId == null : this;
final AllocationId allocationId;
if (existingAllocationId == null) {
allocationId = AllocationId.newInitializing();
} else {
allocationId = AllocationId.newInitializing(existingAllocationId);
}
return new ShardRouting(
shardId,
nodeId,
null,
primary,
ShardRoutingState.INITIALIZING,
recoverySource,
unassignedInfo,
RelocationFailureInfo.NO_FAILURES,
allocationId,
expectedShardSize,
role
);
}
/**
* Relocate the shard to another node.
*
* @param relocatingNodeId id of the node to relocate the shard
*/
public ShardRouting relocate(String relocatingNodeId, long expectedShardSize) {
assert state == ShardRoutingState.STARTED : "current shard has to be started in order to be relocated " + this;
return new ShardRouting(
shardId,
currentNodeId,
relocatingNodeId,
primary,
ShardRoutingState.RELOCATING,
recoverySource,
null,
relocationFailureInfo,
AllocationId.newRelocation(allocationId),
expectedShardSize,
role
);
}
/**
* Cancel relocation of a shard. The shards state must be set
* to <code>RELOCATING</code>.
*/
public ShardRouting cancelRelocation() {
assert state == ShardRoutingState.RELOCATING : this;
assert assignedToNode() : this;
assert relocatingNodeId != null : this;
return new ShardRouting(
shardId,
currentNodeId,
null,
primary,
ShardRoutingState.STARTED,
recoverySource,
null,
relocationFailureInfo.incFailedRelocations(),
AllocationId.cancelRelocation(allocationId),
UNAVAILABLE_EXPECTED_SHARD_SIZE,
role
);
}
/**
* Removes relocation source of a non-primary shard. The shard state must be <code>INITIALIZING</code>.
* This allows the non-primary shard to continue recovery from the primary even though its non-primary
* relocation source has failed.
*/
public ShardRouting removeRelocationSource() {
assert primary == false : this;
assert state == ShardRoutingState.INITIALIZING : this;
assert assignedToNode() : this;
assert relocatingNodeId != null : this;
return new ShardRouting(
shardId,
currentNodeId,
null,
primary,
state,
recoverySource,
unassignedInfo,
relocationFailureInfo,
AllocationId.finishRelocation(allocationId),
expectedShardSize,
role
);
}
/**
* Reinitializes a replica shard, giving it a fresh allocation id
*/
public ShardRouting reinitializeReplicaShard() {
assert state == ShardRoutingState.INITIALIZING : this;
assert primary == false : this;
assert isRelocationTarget() == false : this;
return new ShardRouting(
shardId,
currentNodeId,
null,
primary,
ShardRoutingState.INITIALIZING,
recoverySource,
unassignedInfo,
relocationFailureInfo,
AllocationId.newInitializing(),
expectedShardSize,
role
);
}
/**
* Set the shards state to <code>STARTED</code>. The shards state must be
* <code>INITIALIZING</code> or <code>RELOCATING</code>. Any relocation will be
* canceled.
*/
public ShardRouting moveToStarted(long expectedShardSize) {
assert state == ShardRoutingState.INITIALIZING : "expected an initializing shard " + this;
AllocationId allocationId = this.allocationId;
if (allocationId.getRelocationId() != null) {
// relocation target
allocationId = AllocationId.finishRelocation(allocationId);
}
return new ShardRouting(
shardId,
currentNodeId,
null,
primary,
ShardRoutingState.STARTED,
null,
null,
RelocationFailureInfo.NO_FAILURES,
allocationId,
expectedShardSize,
role
);
}
/**
* Make the active shard primary unless it's not primary
*
* @throws IllegalShardRoutingStateException if shard is already a primary
*/
public ShardRouting moveActiveReplicaToPrimary() {
assert active() : "expected an active shard " + this;
if (primary) {
throw new IllegalShardRoutingStateException(this, "Already primary, can't move to primary");
}
return new ShardRouting(
shardId,
currentNodeId,
relocatingNodeId,
true,
state,
recoverySource,
unassignedInfo,
relocationFailureInfo,
allocationId,
expectedShardSize,
role
);
}
/**
* Set the unassigned primary shard to non-primary
*
* @throws IllegalShardRoutingStateException if shard is already a replica
*/
public ShardRouting moveUnassignedFromPrimary() {
assert state == ShardRoutingState.UNASSIGNED : "expected an unassigned shard " + this;
if (primary == false) {
throw new IllegalShardRoutingStateException(this, "Not primary, can't move to replica");
}
return new ShardRouting(
shardId,
currentNodeId,
relocatingNodeId,
false,
state,
PeerRecoverySource.INSTANCE,
unassignedInfo,
relocationFailureInfo,
allocationId,
expectedShardSize,
role
);
}
/**
* returns true if this routing has the same allocation ID as another.
* <p>
* Note: if both shard routing has a null as their {@link #allocationId()}, this method returns false as the routing describe
* no allocation at all..
**/
public boolean isSameAllocation(ShardRouting other) {
boolean b = this.allocationId != null && other.allocationId != null && this.allocationId.getId().equals(other.allocationId.getId());
assert b == false || this.currentNodeId.equals(other.currentNodeId)
: "ShardRoutings have the same allocation id but not the same node. This [" + this + "], other [" + other + "]";
return b;
}
/**
* Returns <code>true</code> if this shard is a relocation target for another shard
* (i.e., was created with {@link #initializeTargetRelocatingShard()}
*/
public boolean isRelocationTarget() {
return state == ShardRoutingState.INITIALIZING && relocatingNodeId != null;
}
/** returns true if the routing is the relocation target of the given routing */
public boolean isRelocationTargetOf(ShardRouting other) {
boolean b = this.allocationId != null
&& other.allocationId != null
&& this.state == ShardRoutingState.INITIALIZING
&& this.allocationId.getId().equals(other.allocationId.getRelocationId());
assert b == false || other.state == ShardRoutingState.RELOCATING
: "ShardRouting is a relocation target but the source shard state isn't relocating. This [" + this + "], other [" + other + "]";
assert b == false || other.allocationId.getId().equals(this.allocationId.getRelocationId())
: "ShardRouting is a relocation target but the source id isn't equal to source's allocationId.getRelocationId."
+ " This ["
+ this
+ "], other ["
+ other
+ "]";
assert b == false || other.currentNodeId().equals(this.relocatingNodeId)
: "ShardRouting is a relocation target but source current node id isn't equal to target relocating node."
+ " This ["
+ this
+ "], other ["
+ other
+ "]";
assert b == false || this.currentNodeId().equals(other.relocatingNodeId)
: "ShardRouting is a relocation target but current node id isn't equal to source relocating node."
+ " This ["
+ this
+ "], other ["
+ other
+ "]";
assert b == false || this.shardId.equals(other.shardId)
: "ShardRouting is a relocation target but both indexRoutings are not of the same shard id."
+ " This ["
+ this
+ "], other ["
+ other
+ "]";
assert b == false || this.primary == other.primary
: "ShardRouting is a relocation target but primary flag is different." + " This [" + this + "], target [" + other + "]";
return b;
}
/** returns true if the routing is the relocation source for the given routing */
public boolean isRelocationSourceOf(ShardRouting other) {
boolean b = this.allocationId != null
&& other.allocationId != null
&& other.state == ShardRoutingState.INITIALIZING
&& other.allocationId.getId().equals(this.allocationId.getRelocationId());
assert b == false || this.state == ShardRoutingState.RELOCATING
: "ShardRouting is a relocation source but shard state isn't relocating. This [" + this + "], other [" + other + "]";
assert b == false || this.allocationId.getId().equals(other.allocationId.getRelocationId())
: "ShardRouting is a relocation source but the allocation id isn't equal to other.allocationId.getRelocationId."
+ " This ["
+ this
+ "], other ["
+ other
+ "]";
assert b == false || this.currentNodeId().equals(other.relocatingNodeId)
: "ShardRouting is a relocation source but current node isn't equal to other's relocating node."
+ " This ["
+ this
+ "], other ["
+ other
+ "]";
assert b == false || other.currentNodeId().equals(this.relocatingNodeId)
: "ShardRouting is a relocation source but relocating node isn't equal to other's current node."
+ " This ["
+ this
+ "], other ["
+ other
+ "]";
assert b == false || this.shardId.equals(other.shardId)
: "ShardRouting is a relocation source but both indexRoutings are not of the same shard."
+ " This ["
+ this
+ "], target ["
+ other
+ "]";
assert b == false || this.primary == other.primary
: "ShardRouting is a relocation source but primary flag is different. This [" + this + "], target [" + other + "]";
return b;
}
/** returns true if the current routing is identical to the other routing in all but meta fields, i.e., unassigned info */
public boolean equalsIgnoringMetadata(ShardRouting other) {
return primary == other.primary
&& shardId.equals(other.shardId)
&& Objects.equals(currentNodeId, other.currentNodeId)
&& Objects.equals(relocatingNodeId, other.relocatingNodeId)
&& Objects.equals(allocationId, other.allocationId)
&& state == other.state
&& Objects.equals(recoverySource, other.recoverySource)
&& role == other.role;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ShardRouting that = (ShardRouting) o;
return equalsIgnoringMetadata(that)
&& Objects.equals(unassignedInfo, that.unassignedInfo)
&& Objects.equals(relocationFailureInfo, that.relocationFailureInfo);
}
/**
* Cache hash code in the same way as {@link String#hashCode()}) using racy single-check idiom
* as it is mainly used in single-threaded code ({@link BalancedShardsAllocator}).
*/
private int hashCode; // default to 0
@Override
public int hashCode() {
int h = hashCode;
if (h == 0) {
h = shardId.hashCode();
h = 31 * h + (currentNodeId != null ? currentNodeId.hashCode() : 0);
h = 31 * h + (relocatingNodeId != null ? relocatingNodeId.hashCode() : 0);
h = 31 * h + (primary ? 1 : 0);
h = 31 * h + (state != null ? state.hashCode() : 0);
h = 31 * h + (recoverySource != null ? recoverySource.hashCode() : 0);
h = 31 * h + (allocationId != null ? allocationId.hashCode() : 0);
h = 31 * h + (unassignedInfo != null ? unassignedInfo.hashCode() : 0);
h = 31 * h + (relocationFailureInfo != null ? relocationFailureInfo.hashCode() : 0);
h = 31 * h + role.hashCode();
hashCode = h;
}
return h;
}
@Override
public String toString() {
return shortSummary();
}
/**
* A short description of the shard.
*/
public String shortSummary() {
StringBuilder sb = new StringBuilder();
sb.append('[').append(shardId.getIndexName()).append(']').append('[').append(shardId.getId()).append(']');
sb.append(", node[").append(currentNodeId).append("], ");
if (relocatingNodeId != null) {
sb.append("relocating [").append(relocatingNodeId).append("], ");
}
if (role != Role.DEFAULT) {
sb.append("[").append(role).append("], ");
}
if (primary) {
sb.append("[P]");
} else {
sb.append("[R]");
}
if (recoverySource != null) {
sb.append(", recovery_source[").append(recoverySource).append("]");
}
sb.append(", s[").append(state).append("]");
if (allocationId != null) {
sb.append(", a").append(allocationId);
}
if (unassignedInfo != null) {
sb.append(", ").append(unassignedInfo);
}
sb.append(", ").append(relocationFailureInfo);
if (expectedShardSize != UNAVAILABLE_EXPECTED_SHARD_SIZE) {
sb.append(", expected_shard_size[").append(expectedShardSize).append("]");
}
return sb.toString();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject()
.field("state", state())
.field("primary", primary())
.field("node", currentNodeId())
.field("relocating_node", relocatingNodeId())
.field("shard", id())
.field("index", getIndexName());
if (expectedShardSize != UNAVAILABLE_EXPECTED_SHARD_SIZE && state != ShardRoutingState.STARTED) {
builder.field("expected_shard_size_in_bytes", expectedShardSize);
}
if (recoverySource != null) {
builder.field("recovery_source", recoverySource);
}
if (allocationId != null) {
builder.field("allocation_id");
allocationId.toXContent(builder, params);
}
if (unassignedInfo != null) {
unassignedInfo.toXContent(builder, params);
}
relocationFailureInfo.toXContent(builder, params);
role.toXContent(builder, params);
return builder.endObject();
}
/**
* Returns the expected shard size for {@link ShardRoutingState#RELOCATING} and {@link ShardRoutingState#INITIALIZING}
* shards. If it's size is not available {@value #UNAVAILABLE_EXPECTED_SHARD_SIZE} will be returned.
*/
public long getExpectedShardSize() {
return expectedShardSize;
}
/**
* Returns recovery source for the given shard. Replica shards always recover from the primary {@link PeerRecoverySource}.
*
* @return recovery source or null if shard is {@link #active()}
*/
@Nullable
public RecoverySource recoverySource() {
return recoverySource;
}
public Role role() {
return role;
}
public boolean isPromotableToPrimary() {
return role.isPromotableToPrimary();
}
/**
* Determine if role searchable. Consumers should prefer {@link IndexRoutingTable#readyForSearch()} to determine if an index
* is ready to be searched.
*/
public boolean isSearchable() {
return role.isSearchable();
}
public | or |
java | netty__netty | transport/src/main/java/io/netty/channel/AbstractChannel.java | {
"start": 37832,
"end": 38570
} | class ____ extends DefaultChannelPromise {
CloseFuture(AbstractChannel ch) {
super(ch);
}
@Override
public ChannelPromise setSuccess() {
throw new IllegalStateException();
}
@Override
public ChannelPromise setFailure(Throwable cause) {
throw new IllegalStateException();
}
@Override
public boolean trySuccess() {
throw new IllegalStateException();
}
@Override
public boolean tryFailure(Throwable cause) {
throw new IllegalStateException();
}
boolean setClosed() {
return super.trySuccess();
}
}
private static final | CloseFuture |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/transaction/JtaPlatformSettingProvider.java | {
"start": 298,
"end": 497
} | class ____ implements SettingProvider.Provider<TestingJtaPlatformImpl> {
@Override
public TestingJtaPlatformImpl getSetting() {
return TestingJtaPlatformImpl.INSTANCE;
}
}
| JtaPlatformSettingProvider |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/ExceptionHandler.java | {
"start": 985,
"end": 1225
} | interface ____<E extends Throwable> {
/**
* This method is called when the handler should deal with an exception.
*
* @param exception The exception to handle.
*/
void handleException(E exception);
}
| ExceptionHandler |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/search/ActiveReadersTests.java | {
"start": 1181,
"end": 8477
} | class ____ extends ESTestCase {
public void testAddAndGetReader() {
int numberOfTestContexts = 50;
AtomicLong idGenerator = new AtomicLong();
final String sessionId = UUIDs.randomBase64UUID();
List<String> relocatedSessionIds = randomList(5, 5, UUIDs::randomBase64UUID);
ActiveReaders activeReaders = new ActiveReaders(sessionId);
// add a couple of readers, both from same session and relocated ones (different sessionId)
Map<ShardSearchContextId, ReaderContext> controlData = new HashMap<>();
Queue<Long> randomUniqueLongs = new LinkedList<>(
randomSet(numberOfTestContexts, numberOfTestContexts, () -> randomLongBetween(1, 3 * numberOfTestContexts))
);
for (int i = 0; i < numberOfTestContexts; i++) {
final ShardSearchContextId id;
final ReaderContext readerContext;
if (randomBoolean()) {
// normal context from same session
id = new ShardSearchContextId(sessionId, idGenerator.incrementAndGet());
readerContext = createRandomReaderContext(id);
activeReaders.put(readerContext);
} else {
// relocated context from different session
id = new ShardSearchContextId(
randomFrom(relocatedSessionIds),
requireNonNull(randomUniqueLongs.poll()),
UUIDs.randomBase64UUID()
);
long mappingKey = idGenerator.incrementAndGet();
activeReaders.generateRelocationMapping(id, mappingKey);
readerContext = createRandomReaderContext(new ShardSearchContextId(sessionId, mappingKey, id.getSearcherId()));
activeReaders.put(readerContext);
}
controlData.put(id, readerContext);
}
// check that we can retrieve all of them again correctly
for (ShardSearchContextId contextId : controlData.keySet()) {
assertSame(controlData.get(contextId), activeReaders.get(contextId));
}
// check a few non-existing context ids
assertNull(activeReaders.get(new ShardSearchContextId(sessionId, idGenerator.get() + randomLongBetween(1, 100))));
assertNull(activeReaders.get(new ShardSearchContextId(UUIDs.randomBase64UUID(), randomLongBetween(0, idGenerator.get() * 2))));
}
public void testAddPreventAddingSameIdTwice() {
final String primarySessionId = UUIDs.randomBase64UUID();
AtomicLong idGenerator = new AtomicLong();
ActiveReaders activeReaders = new ActiveReaders(primarySessionId);
long id = randomLongBetween(0, 1000);
String readerId = randomBoolean() ? null : UUIDs.randomBase64UUID();
ReaderContext readerContext = createRandomReaderContext(new ShardSearchContextId(primarySessionId, id, readerId));
activeReaders.put(readerContext);
// putting same context should throw error
expectThrows(AssertionError.class, () -> activeReaders.put(readerContext));
// putting context with same id should also throw
ReaderContext anotherReaderContext = createRandomReaderContext(new ShardSearchContextId(primarySessionId, id, readerId));
expectThrows(AssertionError.class, () -> activeReaders.put(anotherReaderContext));
}
public void testRemove() {
final String sessionId = UUIDs.randomBase64UUID();
List<String> relocatedSessionIds = randomList(5, 5, UUIDs::randomBase64UUID);
int numberOfTestContexts = 50;
Queue<Long> randomUniqueLongs = new LinkedList<>(
randomSet(numberOfTestContexts, numberOfTestContexts, () -> randomLongBetween(1, 3 * numberOfTestContexts))
);
AtomicLong idGenerator = new AtomicLong();
ActiveReaders activeReaders = new ActiveReaders(sessionId);
// add a couple of readers, both from same session and relocated ones (different sessionId)
Map<ShardSearchContextId, ReaderContext> controlData = new HashMap<>();
int activeRelocatedContexts = 0;
for (int i = 0; i < numberOfTestContexts; i++) {
final ShardSearchContextId id;
final ReaderContext readerContext;
if (randomBoolean()) {
// normal context from same session
id = new ShardSearchContextId(sessionId, idGenerator.incrementAndGet());
readerContext = createRandomReaderContext(id);
activeReaders.put(readerContext);
} else {
// relocated context from different session
id = new ShardSearchContextId(
randomFrom(relocatedSessionIds),
requireNonNull(randomUniqueLongs.poll()),
UUIDs.randomBase64UUID()
);
long mappingKey = idGenerator.incrementAndGet();
activeReaders.generateRelocationMapping(id, mappingKey);
readerContext = createRandomReaderContext(new ShardSearchContextId(sessionId, mappingKey, id.getSearcherId()));
activeReaders.put(readerContext);
activeRelocatedContexts++;
}
controlData.put(id, readerContext);
}
assertEquals(controlData.size(), activeReaders.size());
assertEquals(activeRelocatedContexts, activeReaders.relocationMapSize());
// remove all contexts in random order
while (controlData.isEmpty() == false) {
int lastReaderCount = activeReaders.size();
int lastRelocatopnMapCount = activeReaders.relocationMapSize();
ShardSearchContextId contextId = randomFrom(controlData.keySet());
assertSame(controlData.remove(contextId), activeReaders.remove(contextId));
assertEquals(lastReaderCount - 1, activeReaders.size());
if (contextId.getSessionId().equals(sessionId) == false) {
assertEquals(lastRelocatopnMapCount - 1, activeReaders.relocationMapSize());
} else {
assertEquals(lastRelocatopnMapCount, activeReaders.relocationMapSize());
}
// trying to remove same id twice should not throw error but return null
assertNull(activeReaders.remove(contextId));
}
assertEquals(0, activeReaders.size());
assertEquals(0, activeReaders.relocationMapSize());
}
private static ReaderContext createRandomReaderContext(ShardSearchContextId id) {
IndexShard mockShard = Mockito.mock(IndexShard.class);
ThreadPool mockThreadPool = Mockito.mock(ThreadPool.class);
Mockito.when(mockThreadPool.relativeTimeInMillis()).thenReturn(System.currentTimeMillis());
Mockito.when(mockShard.getThreadPool()).thenReturn(mockThreadPool);
return randomBoolean() || id.isRetryable()
? new ReaderContext(id, null, mockShard, null, randomPositiveTimeValue().millis(), randomBoolean())
: new LegacyReaderContext(
id,
null,
mockShard,
null,
Mockito.mock(ShardSearchRequest.class),
randomPositiveTimeValue().millis()
);
}
}
| ActiveReadersTests |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/show/MySqlShowTest_16_createView.java | {
"start": 929,
"end": 1811
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "SHOW CREATE VIEW view_name";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
assertEquals(1, statementList.size());
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
stmt.accept(visitor);
assertEquals(0, visitor.getTables().size());
assertEquals(0, visitor.getColumns().size());
assertEquals(0, visitor.getConditions().size());
assertEquals(0, visitor.getOrderByColumns().size());
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("mytable")));
assertEquals("SHOW CREATE VIEW view_name", stmt.toString());
}
}
| MySqlShowTest_16_createView |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/javadoc/NotJavadocTest.java | {
"start": 1904,
"end": 2049
} | class ____ {
void test() {
// BUG: Diagnostic contains: nested
/** Not Javadoc. */
| Test |
java | apache__dubbo | dubbo-config/dubbo-config-api/src/test/java/org/apache/dubbo/config/AbstractReferenceConfigTest.java | {
"start": 9400,
"end": 9459
} | class ____ extends AbstractReferenceConfig {}
}
| ReferenceConfig |
java | apache__camel | core/camel-support/src/main/java/org/apache/camel/support/RouteOnDemandReloadStrategy.java | {
"start": 1465,
"end": 5243
} | class ____ extends RouteWatcherReloadStrategy {
private static final Logger LOG = LoggerFactory.getLogger(RouteOnDemandReloadStrategy.class);
public RouteOnDemandReloadStrategy() {
setScheduler(false);
}
public RouteOnDemandReloadStrategy(String directory) {
super(directory);
setScheduler(false);
}
public RouteOnDemandReloadStrategy(String directory, boolean recursive) {
super(directory, recursive);
setScheduler(false);
}
/**
* Triggers on-demand reloading
*/
@ManagedOperation(description = "Trigger on-demand reloading")
public void onReload() {
onReload("JMX Management");
}
/**
* Triggers on-demand reloading
*/
@Override
public void onReload(Object source) {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
try {
setLastError(null);
// use bootstrap classloader from camel so its consistent
ClassLoader acl = getCamelContext().getApplicationContextClassLoader();
if (acl != null) {
Thread.currentThread().setContextClassLoader(acl);
}
doOnReload(source);
incSucceededCounter();
} catch (Exception e) {
setLastError(e);
incFailedCounter();
LOG.warn("Error reloading routes due to {}. This exception is ignored.", e.getMessage(), e);
} finally {
if (cl != null) {
Thread.currentThread().setContextClassLoader(cl);
}
}
}
protected void doOnReload(Object source) throws Exception {
List<Resource> properties = new ArrayList<>();
List<Resource> groovy = new ArrayList<>();
List<Resource> routes = new ArrayList<>();
for (Resource res : findReloadedResources(source)) {
String ext = FileUtil.onlyExt(res.getLocation());
if ("properties".equals(ext)) {
properties.add(res);
} else if ("groovy".equals(ext)) {
groovy.add(res);
} else {
routes.add(res);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("On-demand reload scanned {} files (properties: {}, routes: {}, groovy: {})",
properties.size() + routes.size(), properties.size(), routes.size(), groovy.size());
}
// reload properties first
boolean reloaded = false;
for (Resource res : properties) {
reloaded |= onPropertiesReload(res, false);
}
for (Resource res : groovy) {
reloaded |= onGroovyReload(res, false);
}
boolean removeEverything = isRemoveEverything(routes);
if (reloaded || !routes.isEmpty()) {
// trigger routes to also reload if properties was reloaded
onRouteReload(routes, removeEverything);
} else {
// rare situation where all routes are deleted
onRemoveEverything(removeEverything);
}
}
protected boolean isRemoveEverything(List<Resource> routes) {
return routes.isEmpty();
}
protected void onRemoveEverything(boolean removeEverything) {
onRouteReload(null, removeEverything);
}
protected List<Resource> findReloadedResources(Object source) throws Exception {
List<Resource> answer = new ArrayList<>();
File dir = new File(getFolder());
for (Path path : ResourceHelper.findInFileSystem(dir.toPath(), getPattern())) {
Resource res = ResourceHelper.resolveResource(getCamelContext(), "file:" + path.toString());
answer.add(res);
}
return answer;
}
}
| RouteOnDemandReloadStrategy |
java | apache__camel | components/camel-microprofile/camel-microprofile-health/src/test/java/org/apache/camel/microprofile/health/CamelMicroProfileHealthTestHelper.java | {
"start": 4880,
"end": 5329
} | class ____ extends DefaultComponent {
private final MyHealthCheck check = new MyHealthCheck("my-hc");
@Override
protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception {
return new MyEndpoint(uri, this, check);
}
public void setState(HealthCheck.State state) {
check.setState(state);
}
}
public static | MyComponent |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/ConsulComponentBuilderFactory.java | {
"start": 16883,
"end": 21408
} | class ____
extends AbstractComponentBuilder<ConsulComponent>
implements ConsulComponentBuilder {
@Override
protected ConsulComponent buildConcreteComponent() {
return new ConsulComponent();
}
private org.apache.camel.component.consul.ConsulConfiguration getOrCreateConfiguration(ConsulComponent component) {
if (component.getConfiguration() == null) {
component.setConfiguration(new org.apache.camel.component.consul.ConsulConfiguration());
}
return component.getConfiguration();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "connectTimeout": getOrCreateConfiguration((ConsulComponent) component).setConnectTimeout((java.time.Duration) value); return true;
case "key": getOrCreateConfiguration((ConsulComponent) component).setKey((java.lang.String) value); return true;
case "pingInstance": getOrCreateConfiguration((ConsulComponent) component).setPingInstance((boolean) value); return true;
case "readTimeout": getOrCreateConfiguration((ConsulComponent) component).setReadTimeout((java.time.Duration) value); return true;
case "tags": getOrCreateConfiguration((ConsulComponent) component).setTags((java.lang.String) value); return true;
case "url": getOrCreateConfiguration((ConsulComponent) component).setUrl((java.lang.String) value); return true;
case "valueAsString": getOrCreateConfiguration((ConsulComponent) component).setValueAsString((boolean) value); return true;
case "writeTimeout": getOrCreateConfiguration((ConsulComponent) component).setWriteTimeout((java.time.Duration) value); return true;
case "bridgeErrorHandler": ((ConsulComponent) component).setBridgeErrorHandler((boolean) value); return true;
case "action": getOrCreateConfiguration((ConsulComponent) component).setAction((java.lang.String) value); return true;
case "lazyStartProducer": ((ConsulComponent) component).setLazyStartProducer((boolean) value); return true;
case "autowiredEnabled": ((ConsulComponent) component).setAutowiredEnabled((boolean) value); return true;
case "configuration": ((ConsulComponent) component).setConfiguration((org.apache.camel.component.consul.ConsulConfiguration) value); return true;
case "consistencyMode": getOrCreateConfiguration((ConsulComponent) component).setConsistencyMode((org.kiwiproject.consul.option.ConsistencyMode) value); return true;
case "consulClient": getOrCreateConfiguration((ConsulComponent) component).setConsulClient((org.kiwiproject.consul.Consul) value); return true;
case "datacenter": getOrCreateConfiguration((ConsulComponent) component).setDatacenter((java.lang.String) value); return true;
case "nearNode": getOrCreateConfiguration((ConsulComponent) component).setNearNode((java.lang.String) value); return true;
case "nodeMeta": getOrCreateConfiguration((ConsulComponent) component).setNodeMeta((java.lang.String) value); return true;
case "aclToken": getOrCreateConfiguration((ConsulComponent) component).setAclToken((java.lang.String) value); return true;
case "password": getOrCreateConfiguration((ConsulComponent) component).setPassword((java.lang.String) value); return true;
case "sslContextParameters": getOrCreateConfiguration((ConsulComponent) component).setSslContextParameters((org.apache.camel.support.jsse.SSLContextParameters) value); return true;
case "useGlobalSslContextParameters": ((ConsulComponent) component).setUseGlobalSslContextParameters((boolean) value); return true;
case "userName": getOrCreateConfiguration((ConsulComponent) component).setUserName((java.lang.String) value); return true;
case "blockSeconds": getOrCreateConfiguration((ConsulComponent) component).setBlockSeconds((java.lang.Integer) value); return true;
case "firstIndex": getOrCreateConfiguration((ConsulComponent) component).setFirstIndex((java.math.BigInteger) value); return true;
case "recursive": getOrCreateConfiguration((ConsulComponent) component).setRecursive((boolean) value); return true;
default: return false;
}
}
}
} | ConsulComponentBuilderImpl |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableTakeTimedTest.java | {
"start": 1141,
"end": 4281
} | class ____ extends RxJavaTest {
@Test
public void takeTimed() {
TestScheduler scheduler = new TestScheduler();
PublishProcessor<Integer> source = PublishProcessor.create();
Flowable<Integer> result = source.take(1, TimeUnit.SECONDS, scheduler);
Subscriber<Object> subscriber = TestHelper.mockSubscriber();
result.subscribe(subscriber);
source.onNext(1);
source.onNext(2);
source.onNext(3);
scheduler.advanceTimeBy(1, TimeUnit.SECONDS);
source.onNext(4);
InOrder inOrder = inOrder(subscriber);
inOrder.verify(subscriber).onNext(1);
inOrder.verify(subscriber).onNext(2);
inOrder.verify(subscriber).onNext(3);
inOrder.verify(subscriber).onComplete();
inOrder.verifyNoMoreInteractions();
verify(subscriber, never()).onNext(4);
verify(subscriber, never()).onError(any(Throwable.class));
}
@Test
public void takeTimedErrorBeforeTime() {
TestScheduler scheduler = new TestScheduler();
PublishProcessor<Integer> source = PublishProcessor.create();
Flowable<Integer> result = source.take(1, TimeUnit.SECONDS, scheduler);
Subscriber<Object> subscriber = TestHelper.mockSubscriber();
result.subscribe(subscriber);
source.onNext(1);
source.onNext(2);
source.onNext(3);
source.onError(new TestException());
scheduler.advanceTimeBy(1, TimeUnit.SECONDS);
source.onNext(4);
InOrder inOrder = inOrder(subscriber);
inOrder.verify(subscriber).onNext(1);
inOrder.verify(subscriber).onNext(2);
inOrder.verify(subscriber).onNext(3);
inOrder.verify(subscriber).onError(any(TestException.class));
inOrder.verifyNoMoreInteractions();
verify(subscriber, never()).onComplete();
verify(subscriber, never()).onNext(4);
}
@Test
public void takeTimedErrorAfterTime() {
TestScheduler scheduler = new TestScheduler();
PublishProcessor<Integer> source = PublishProcessor.create();
Flowable<Integer> result = source.take(1, TimeUnit.SECONDS, scheduler);
Subscriber<Object> subscriber = TestHelper.mockSubscriber();
result.subscribe(subscriber);
source.onNext(1);
source.onNext(2);
source.onNext(3);
scheduler.advanceTimeBy(1, TimeUnit.SECONDS);
source.onNext(4);
source.onError(new TestException());
InOrder inOrder = inOrder(subscriber);
inOrder.verify(subscriber).onNext(1);
inOrder.verify(subscriber).onNext(2);
inOrder.verify(subscriber).onNext(3);
inOrder.verify(subscriber).onComplete();
inOrder.verifyNoMoreInteractions();
verify(subscriber, never()).onNext(4);
verify(subscriber, never()).onError(any(TestException.class));
}
@Test
public void timedDefaultScheduler() {
Flowable.range(1, 5).take(1, TimeUnit.MINUTES)
.test()
.awaitDone(5, TimeUnit.SECONDS)
.assertResult(1, 2, 3, 4, 5);
}
}
| FlowableTakeTimedTest |
java | quarkusio__quarkus | test-framework/google-cloud-functions/src/main/java/io/quarkus/google/cloud/functions/test/CloudFunctionsInvoker.java | {
"start": 108,
"end": 922
} | class ____ {
private final Invoker invoker;
CloudFunctionsInvoker(FunctionType functionType) {
this(functionType, 8081);
}
CloudFunctionsInvoker(FunctionType functionType, int port) {
int realPort = port == 0 ? SocketUtil.findAvailablePort() : port;
if (realPort != port) {
System.setProperty("quarkus.http.test-port", String.valueOf(realPort));
}
this.invoker = new Invoker(
realPort,
functionType.getTarget(),
functionType.getSignatureType(),
Thread.currentThread().getContextClassLoader());
}
void start() throws Exception {
this.invoker.startTestServer();
}
void stop() throws Exception {
this.invoker.stopServer();
}
}
| CloudFunctionsInvoker |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableWindowTimed.java | {
"start": 2926,
"end": 5518
} | class ____<T>
extends AtomicInteger
implements FlowableSubscriber<T>, Subscription {
private static final long serialVersionUID = 5724293814035355511L;
final Subscriber<? super Flowable<T>> downstream;
final SimplePlainQueue<Object> queue;
final long timespan;
final TimeUnit unit;
final int bufferSize;
final AtomicLong requested;
long emitted;
volatile boolean done;
Throwable error;
Subscription upstream;
final AtomicBoolean downstreamCancelled;
volatile boolean upstreamCancelled;
final AtomicInteger windowCount;
AbstractWindowSubscriber(Subscriber<? super Flowable<T>> downstream, long timespan, TimeUnit unit, int bufferSize) {
this.downstream = downstream;
this.queue = new MpscLinkedQueue<>();
this.timespan = timespan;
this.unit = unit;
this.bufferSize = bufferSize;
this.requested = new AtomicLong();
this.downstreamCancelled = new AtomicBoolean();
this.windowCount = new AtomicInteger(1);
}
@Override
public final void onSubscribe(Subscription s) {
if (SubscriptionHelper.validate(this.upstream, s)) {
this.upstream = s;
downstream.onSubscribe(this);
createFirstWindow();
}
}
abstract void createFirstWindow();
@Override
public final void onNext(T t) {
queue.offer(t);
drain();
}
@Override
public final void onError(Throwable t) {
error = t;
done = true;
drain();
}
@Override
public final void onComplete() {
done = true;
drain();
}
@Override
public final void request(long n) {
if (SubscriptionHelper.validate(n)) {
BackpressureHelper.add(requested, n);
}
}
@Override
public final void cancel() {
if (downstreamCancelled.compareAndSet(false, true)) {
windowDone();
}
}
final void windowDone() {
if (windowCount.decrementAndGet() == 0) {
cleanupResources();
upstream.cancel();
upstreamCancelled = true;
drain();
}
}
abstract void cleanupResources();
abstract void drain();
}
static final | AbstractWindowSubscriber |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/layout/ByteBufferDestination.java | {
"start": 2987,
"end": 3182
} | interface ____ requiring
* the method to be public breaks source compatibility.
*
* @since 2.9 (see LOG4J2-1874)
*/
void writeBytes(byte[] data, int offset, int length);
}
| and |
java | netty__netty | codec-http/src/test/java/io/netty/handler/codec/http/HttpHeaderValidationUtilTest.java | {
"start": 1765,
"end": 24582
} | class ____ {
@SuppressWarnings("deprecation") // We need to check for deprecated headers as well.
public static List<Arguments> connectionRelatedHeaders() {
List<Arguments> list = new ArrayList<Arguments>();
list.add(header(false, HttpHeaderNames.ACCEPT));
list.add(header(false, HttpHeaderNames.ACCEPT_CHARSET));
list.add(header(false, HttpHeaderNames.ACCEPT_ENCODING));
list.add(header(false, HttpHeaderNames.ACCEPT_LANGUAGE));
list.add(header(false, HttpHeaderNames.ACCEPT_RANGES));
list.add(header(false, HttpHeaderNames.ACCEPT_PATCH));
list.add(header(false, HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS));
list.add(header(false, HttpHeaderNames.ACCESS_CONTROL_ALLOW_HEADERS));
list.add(header(false, HttpHeaderNames.ACCESS_CONTROL_ALLOW_METHODS));
list.add(header(false, HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN));
list.add(header(false, HttpHeaderNames.ACCESS_CONTROL_ALLOW_PRIVATE_NETWORK));
list.add(header(false, HttpHeaderNames.ACCESS_CONTROL_EXPOSE_HEADERS));
list.add(header(false, HttpHeaderNames.ACCESS_CONTROL_MAX_AGE));
list.add(header(false, HttpHeaderNames.ACCESS_CONTROL_REQUEST_HEADERS));
list.add(header(false, HttpHeaderNames.ACCESS_CONTROL_REQUEST_METHOD));
list.add(header(false, HttpHeaderNames.ACCESS_CONTROL_REQUEST_PRIVATE_NETWORK));
list.add(header(false, HttpHeaderNames.AGE));
list.add(header(false, HttpHeaderNames.ALLOW));
list.add(header(false, HttpHeaderNames.AUTHORIZATION));
list.add(header(false, HttpHeaderNames.CACHE_CONTROL));
list.add(header(true, HttpHeaderNames.CONNECTION));
list.add(header(false, HttpHeaderNames.CONTENT_BASE));
list.add(header(false, HttpHeaderNames.CONTENT_ENCODING));
list.add(header(false, HttpHeaderNames.CONTENT_LANGUAGE));
list.add(header(false, HttpHeaderNames.CONTENT_LENGTH));
list.add(header(false, HttpHeaderNames.CONTENT_LOCATION));
list.add(header(false, HttpHeaderNames.CONTENT_TRANSFER_ENCODING));
list.add(header(false, HttpHeaderNames.CONTENT_DISPOSITION));
list.add(header(false, HttpHeaderNames.CONTENT_MD5));
list.add(header(false, HttpHeaderNames.CONTENT_RANGE));
list.add(header(false, HttpHeaderNames.CONTENT_SECURITY_POLICY));
list.add(header(false, HttpHeaderNames.CONTENT_TYPE));
list.add(header(false, HttpHeaderNames.COOKIE));
list.add(header(false, HttpHeaderNames.DATE));
list.add(header(false, HttpHeaderNames.DNT));
list.add(header(false, HttpHeaderNames.ETAG));
list.add(header(false, HttpHeaderNames.EXPECT));
list.add(header(false, HttpHeaderNames.EXPIRES));
list.add(header(false, HttpHeaderNames.FROM));
list.add(header(false, HttpHeaderNames.HOST));
list.add(header(false, HttpHeaderNames.IF_MATCH));
list.add(header(false, HttpHeaderNames.IF_MODIFIED_SINCE));
list.add(header(false, HttpHeaderNames.IF_NONE_MATCH));
list.add(header(false, HttpHeaderNames.IF_RANGE));
list.add(header(false, HttpHeaderNames.IF_UNMODIFIED_SINCE));
list.add(header(true, HttpHeaderNames.KEEP_ALIVE));
list.add(header(false, HttpHeaderNames.LAST_MODIFIED));
list.add(header(false, HttpHeaderNames.LOCATION));
list.add(header(false, HttpHeaderNames.MAX_FORWARDS));
list.add(header(false, HttpHeaderNames.ORIGIN));
list.add(header(false, HttpHeaderNames.PRAGMA));
list.add(header(false, HttpHeaderNames.PROXY_AUTHENTICATE));
list.add(header(false, HttpHeaderNames.PROXY_AUTHORIZATION));
list.add(header(true, HttpHeaderNames.PROXY_CONNECTION));
list.add(header(false, HttpHeaderNames.RANGE));
list.add(header(false, HttpHeaderNames.REFERER));
list.add(header(false, HttpHeaderNames.RETRY_AFTER));
list.add(header(false, HttpHeaderNames.SEC_WEBSOCKET_KEY1));
list.add(header(false, HttpHeaderNames.SEC_WEBSOCKET_KEY2));
list.add(header(false, HttpHeaderNames.SEC_WEBSOCKET_LOCATION));
list.add(header(false, HttpHeaderNames.SEC_WEBSOCKET_ORIGIN));
list.add(header(false, HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL));
list.add(header(false, HttpHeaderNames.SEC_WEBSOCKET_VERSION));
list.add(header(false, HttpHeaderNames.SEC_WEBSOCKET_KEY));
list.add(header(false, HttpHeaderNames.SEC_WEBSOCKET_ACCEPT));
list.add(header(false, HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS));
list.add(header(false, HttpHeaderNames.SERVER));
list.add(header(false, HttpHeaderNames.SET_COOKIE));
list.add(header(false, HttpHeaderNames.SET_COOKIE2));
list.add(header(true, HttpHeaderNames.TE));
list.add(header(false, HttpHeaderNames.TRAILER));
list.add(header(true, HttpHeaderNames.TRANSFER_ENCODING));
list.add(header(true, HttpHeaderNames.UPGRADE));
list.add(header(false, HttpHeaderNames.UPGRADE_INSECURE_REQUESTS));
list.add(header(false, HttpHeaderNames.USER_AGENT));
list.add(header(false, HttpHeaderNames.VARY));
list.add(header(false, HttpHeaderNames.VIA));
list.add(header(false, HttpHeaderNames.WARNING));
list.add(header(false, HttpHeaderNames.WEBSOCKET_LOCATION));
list.add(header(false, HttpHeaderNames.WEBSOCKET_ORIGIN));
list.add(header(false, HttpHeaderNames.WEBSOCKET_PROTOCOL));
list.add(header(false, HttpHeaderNames.WWW_AUTHENTICATE));
list.add(header(false, HttpHeaderNames.X_FRAME_OPTIONS));
list.add(header(false, HttpHeaderNames.X_REQUESTED_WITH));
return list;
}
private static Arguments header(final boolean isConnectionRelated, final AsciiString headerName) {
return new Arguments() {
@Override
public Object[] get() {
return new Object[]{headerName, isConnectionRelated};
}
};
}
@ParameterizedTest
@MethodSource("connectionRelatedHeaders")
void mustIdentifyConnectionRelatedHeadersAsciiString(AsciiString headerName, boolean isConnectionRelated) {
assertEquals(isConnectionRelated, HttpHeaderValidationUtil.isConnectionHeader(headerName, false));
}
@ParameterizedTest
@MethodSource("connectionRelatedHeaders")
void mustIdentifyConnectionRelatedHeadersString(AsciiString headerName, boolean isConnectionRelated) {
assertEquals(isConnectionRelated, HttpHeaderValidationUtil.isConnectionHeader(headerName.toString(), false));
}
@Test
void teHeaderIsNotConnectionRelatedWhenIgnoredAsciiString() {
assertFalse(HttpHeaderValidationUtil.isConnectionHeader(HttpHeaderNames.TE, true));
}
@Test
void teHeaderIsNotConnectionRelatedWhenIgnoredString() {
assertFalse(HttpHeaderValidationUtil.isConnectionHeader(HttpHeaderNames.TE.toString(), true));
}
public static List<Arguments> teIsTrailersTruthTable() {
List<Arguments> list = new ArrayList<Arguments>();
list.add(teIsTrailter(HttpHeaderNames.TE, HttpHeaderValues.TRAILERS, false));
list.add(teIsTrailter(HttpHeaderNames.TE, HttpHeaderValues.CHUNKED, true));
list.add(teIsTrailter(HttpHeaderNames.COOKIE, HttpHeaderValues.CHUNKED, false));
list.add(teIsTrailter(HttpHeaderNames.COOKIE, HttpHeaderValues.TRAILERS, false));
list.add(teIsTrailter(HttpHeaderNames.TRAILER, HttpHeaderValues.TRAILERS, false));
list.add(teIsTrailter(HttpHeaderNames.TRAILER, HttpHeaderValues.CHUNKED, false));
return list;
}
private static Arguments teIsTrailter(
final AsciiString headerName, final AsciiString headerValue, final boolean result) {
return new Arguments() {
@Override
public Object[] get() {
return new Object[]{headerName, headerValue, result};
}
};
}
@ParameterizedTest
@MethodSource("teIsTrailersTruthTable")
void whenTeIsNotTrailerOrNotWithNameAndValueAsciiString(
AsciiString headerName, AsciiString headerValue, boolean result) {
assertEquals(result, HttpHeaderValidationUtil.isTeNotTrailers(headerName, headerValue));
}
@ParameterizedTest
@MethodSource("teIsTrailersTruthTable")
void whenTeIsNotTrailerOrNotSWithNameAndValueString(
AsciiString headerName, AsciiString headerValue, boolean result) {
assertEquals(result, HttpHeaderValidationUtil.isTeNotTrailers(headerName.toString(), headerValue.toString()));
}
@ParameterizedTest
@MethodSource("teIsTrailersTruthTable")
void whenTeIsNotTrailerOrNotSWithNameAsciiStringAndValueString(
AsciiString headerName, AsciiString headerValue, boolean result) {
assertEquals(result, HttpHeaderValidationUtil.isTeNotTrailers(headerName, headerValue.toString()));
}
@ParameterizedTest
@MethodSource("teIsTrailersTruthTable")
void whenTeIsNotTrailerOrNotSWithNametringAndValueAsciiString(
AsciiString headerName, AsciiString headerValue, boolean result) {
assertEquals(result, HttpHeaderValidationUtil.isTeNotTrailers(headerName.toString(), headerValue));
}
public static List<AsciiString> illegalFirstChar() {
List<AsciiString> list = new ArrayList<AsciiString>();
for (byte i = 0; i < 0x21; i++) {
asciiStrings(new byte[]{i, 'a'}, list);
}
asciiStrings(new byte[]{0x7F, 'a'}, list);
return list;
}
private static void asciiStrings(byte[] chars, List<AsciiString> out) {
out.add(new AsciiString(chars));
out.add(new AsciiString(Arrays.copyOf(chars, chars.length + 1), 0, chars.length, false));
byte[] cs = Arrays.copyOf(chars, chars.length + 1);
System.arraycopy(cs, 0, cs, 1, chars.length);
out.add(new AsciiString(cs, 1, chars.length, false));
cs = Arrays.copyOf(chars, chars.length + 2);
System.arraycopy(cs, 0, cs, 1, chars.length);
out.add(new AsciiString(cs, 1, chars.length, false));
}
@ParameterizedTest
@MethodSource("illegalFirstChar")
void decodingInvalidHeaderValuesMustFailIfFirstCharIsIllegalAsciiString(AsciiString value) {
assertEquals(0, validateValidHeaderValue(value));
}
@ParameterizedTest
@MethodSource("illegalFirstChar")
void decodingInvalidHeaderValuesMustFailIfFirstCharIsIllegalCharSequence(AsciiString value) {
assertEquals(0, validateValidHeaderValue(asCharSequence(value)));
}
public static List<AsciiString> legalFirstChar() {
List<AsciiString> list = new ArrayList<AsciiString>();
for (int i = 0x21; i <= 0xFF; i++) {
if (i == 0x7F) {
continue;
}
asciiStrings(new byte[]{(byte) i, 'a'}, list);
}
return list;
}
@ParameterizedTest
@MethodSource("legalFirstChar")
void allOtherCharsAreLegalFirstCharsAsciiString(AsciiString value) {
assertEquals(-1, validateValidHeaderValue(value));
}
@ParameterizedTest
@MethodSource("legalFirstChar")
void allOtherCharsAreLegalFirstCharsCharSequence(AsciiString value) {
assertEquals(-1, validateValidHeaderValue(value));
}
public static List<AsciiString> illegalNotFirstChar() {
ArrayList<AsciiString> list = new ArrayList<AsciiString>();
for (byte i = 0; i < 0x21; i++) {
if (i == ' ' || i == '\t') {
continue; // Space and horizontal tab are only illegal as first chars.
}
asciiStrings(new byte[]{'a', i}, list);
}
asciiStrings(new byte[]{'a', 0x7F}, list);
return list;
}
@ParameterizedTest
@MethodSource("illegalNotFirstChar")
void decodingInvalidHeaderValuesMustFailIfNotFirstCharIsIllegalAsciiString(AsciiString value) {
assertEquals(1, validateValidHeaderValue(value));
}
@ParameterizedTest
@MethodSource("illegalNotFirstChar")
void decodingInvalidHeaderValuesMustFailIfNotFirstCharIsIllegalCharSequence(AsciiString value) {
assertEquals(1, validateValidHeaderValue(asCharSequence(value)));
}
public static List<AsciiString> legalNotFirstChar() {
List<AsciiString> list = new ArrayList<AsciiString>();
for (int i = 0; i < 0xFF; i++) {
if (i == 0x7F || i < 0x21 && (i != ' ' || i != '\t')) {
continue;
}
asciiStrings(new byte[] {'a', (byte) i}, list);
}
return list;
}
@ParameterizedTest
@MethodSource("legalNotFirstChar")
void allOtherCharsArgLegalNotFirstCharsAsciiString(AsciiString value) {
assertEquals(-1, validateValidHeaderValue(value));
}
@ParameterizedTest
@MethodSource("legalNotFirstChar")
void allOtherCharsArgLegalNotFirstCharsCharSequence(AsciiString value) {
assertEquals(-1, validateValidHeaderValue(asCharSequence(value)));
}
@Test
void emptyValuesHaveNoIllegalCharsAsciiString() {
assertEquals(-1, validateValidHeaderValue(AsciiString.EMPTY_STRING));
}
@Test
void emptyValuesHaveNoIllegalCharsCharSequence() {
assertEquals(-1, validateValidHeaderValue(asCharSequence(AsciiString.EMPTY_STRING)));
}
@Test
void headerValuesCannotEndWithNewlinesAsciiString() {
assertEquals(1, validateValidHeaderValue(AsciiString.of("a\n")));
assertEquals(1, validateValidHeaderValue(AsciiString.of("a\r")));
}
@Test
void headerValuesCannotEndWithNewlinesCharSequence() {
assertEquals(1, validateValidHeaderValue("a\n"));
assertEquals(1, validateValidHeaderValue("a\r"));
}
/**
* This method returns a {@link CharSequence} instance that has the same contents as the given {@link AsciiString},
* but which is, critically, <em>not</em> itself an {@link AsciiString}.
* <p>
* Some methods specialise on {@link AsciiString}, while having a {@link CharSequence} based fallback.
* <p>
* This method exist to test those fallback methods.
*
* @param value The {@link AsciiString} instance to wrap.
* @return A new {@link CharSequence} instance which backed by the given {@link AsciiString},
* but which is itself not an {@link AsciiString}.
*/
private static CharSequence asCharSequence(final AsciiString value) {
return new CharSequence() {
@Override
public int length() {
return value.length();
}
@Override
public char charAt(int index) {
return value.charAt(index);
}
@Override
public CharSequence subSequence(int start, int end) {
return asCharSequence(value.subSequence(start, end));
}
};
}
private static final IllegalArgumentException VALIDATION_EXCEPTION = new IllegalArgumentException() {
private static final long serialVersionUID = -8857428534361331089L;
@Override
public synchronized Throwable fillInStackTrace() {
return this;
}
};
@DisabledForJreRange(max = JRE.JAVA_17) // This test is much too slow on older Java versions.
@Test
void headerValueValidationMustRejectAllValuesRejectedByOldAlgorithm() {
byte[] array = new byte[4];
final ByteBuffer buffer = ByteBuffer.wrap(array);
final AsciiString asciiString = new AsciiString(buffer, false);
CharSequence charSequence = asCharSequence(asciiString);
int i = Integer.MIN_VALUE;
Supplier<String> failureMessageSupplier = new Supplier<String>() {
@Override
public String get() {
return "validation mismatch on string '" + asciiString + "', iteration " + buffer.getInt(0);
}
};
do {
buffer.putInt(0, i);
try {
oldHeaderValueValidationAlgorithm(asciiString);
} catch (IllegalArgumentException ignore) {
assertNotEquals(-1, validateValidHeaderValue(asciiString), failureMessageSupplier);
assertNotEquals(-1, validateValidHeaderValue(charSequence), failureMessageSupplier);
}
i++;
} while (i != Integer.MIN_VALUE);
}
private static void oldHeaderValueValidationAlgorithm(CharSequence seq) {
int state = 0;
// Start looping through each of the character
for (int index = 0; index < seq.length(); index++) {
state = oldValidationAlgorithmValidateValueChar(state, seq.charAt(index));
}
if (state != 0) {
throw VALIDATION_EXCEPTION;
}
}
private static int oldValidationAlgorithmValidateValueChar(int state, char character) {
/*
* State:
* 0: Previous character was neither CR nor LF
* 1: The previous character was CR
* 2: The previous character was LF
*/
if ((character & ~15) == 0) {
// Check the absolutely prohibited characters.
switch (character) {
case 0x0: // NULL
throw VALIDATION_EXCEPTION;
case 0x0b: // Vertical tab
throw VALIDATION_EXCEPTION;
case '\f':
throw VALIDATION_EXCEPTION;
default:
break;
}
}
// Check the CRLF (HT | SP) pattern
switch (state) {
case 0:
switch (character) {
case '\r':
return 1;
case '\n':
return 2;
default:
break;
}
break;
case 1:
if (character == '\n') {
return 2;
}
throw VALIDATION_EXCEPTION;
case 2:
switch (character) {
case '\t':
case ' ':
return 0;
default:
throw VALIDATION_EXCEPTION;
}
default:
break;
}
return state;
}
@DisabledForJreRange(max = JRE.JAVA_17) // This test is much too slow on older Java versions.
@Test
void headerNameValidationMustRejectAllNamesRejectedByOldAlgorithm() throws Exception {
byte[] array = new byte[4];
final ByteBuffer buffer = ByteBuffer.wrap(array);
final AsciiString asciiString = new AsciiString(buffer, false);
CharSequence charSequence = asCharSequence(asciiString);
int i = Integer.MIN_VALUE;
Supplier<String> failureMessageSupplier = new Supplier<String>() {
@Override
public String get() {
return "validation mismatch on string '" + asciiString + "', iteration " + buffer.getInt(0);
}
};
do {
buffer.putInt(0, i);
try {
oldHeaderNameValidationAlgorithmAsciiString(asciiString);
} catch (IllegalArgumentException ignore) {
assertNotEquals(-1, validateToken(asciiString), failureMessageSupplier);
assertNotEquals(-1, validateToken(charSequence), failureMessageSupplier);
}
i++;
} while (i != Integer.MIN_VALUE);
}
private static void oldHeaderNameValidationAlgorithmAsciiString(AsciiString name) throws Exception {
byte[] array = name.array();
for (int i = name.arrayOffset(), len = name.arrayOffset() + name.length(); i < len; i++) {
validateHeaderNameElement(array[i]);
}
}
private static void validateHeaderNameElement(byte value) {
switch (value) {
case 0x1c:
case 0x1d:
case 0x1e:
case 0x1f:
case 0x00:
case '\t':
case '\n':
case 0x0b:
case '\f':
case '\r':
case ' ':
case ',':
case ':':
case ';':
case '=':
throw VALIDATION_EXCEPTION;
default:
// Check to see if the character is not an ASCII character, or invalid
if (value < 0) {
throw VALIDATION_EXCEPTION;
}
}
}
public static List<Arguments> validTokenChars() {
List<Character> charList = new ArrayList<Character>();
for (char c = '0'; c <= '9'; c++) {
charList.add(c);
}
for (char c = 'a'; c <= 'z'; c++) {
charList.add(c);
}
for (char c = 'A'; c <= 'Z'; c++) {
charList.add(c);
}
// Unreserved characters:
charList.add('-');
charList.add('.');
charList.add('_');
charList.add('~');
// Token special characters:
charList.add('!');
charList.add('#');
charList.add('$');
charList.add('%');
charList.add('&');
charList.add('\'');
charList.add('*');
charList.add('+');
charList.add('^');
charList.add('`');
charList.add('|');
List<AsciiString> asciiStrings = new ArrayList<AsciiString>();
List<Arguments> list = new ArrayList<Arguments>();
for (char tokenChar : charList) {
for (byte[] cs : Arrays.asList(new byte[] {(byte) tokenChar, 'a'}, new byte[] {'a', (byte) tokenChar})) {
asciiStrings.clear();
asciiStrings(cs, asciiStrings);
for (AsciiString asciiString : asciiStrings) {
list.add(args(asciiString, new String(cs)));
}
for (AsciiString asciiString : asciiStrings) {
list.add(args(asciiString, asciiString.toString()));
}
}
}
return list;
}
private static Arguments args(final Object... objs) {
return new Arguments() {
@Override
public Object[] get() {
return objs;
}
};
}
@ParameterizedTest
@MethodSource("validTokenChars")
void allTokenCharsAreValidInHeaderName(AsciiString asciiString, String string) {
CharSequence charSequence = asCharSequence(asciiString);
assertEquals(-1, validateToken(asciiString));
assertEquals(-1, validateToken(charSequence));
assertEquals(-1, validateToken(string));
}
}
| HttpHeaderValidationUtilTest |
java | spring-projects__spring-framework | spring-jdbc/src/main/java/org/springframework/jdbc/config/SortedResourcesFactoryBean.java | {
"start": 1565,
"end": 3474
} | class ____ extends AbstractFactoryBean<Resource[]> implements ResourceLoaderAware {
private final List<String> locations;
private ResourcePatternResolver resourcePatternResolver;
public SortedResourcesFactoryBean(List<String> locations) {
this.locations = locations;
this.resourcePatternResolver = new PathMatchingResourcePatternResolver();
}
public SortedResourcesFactoryBean(ResourceLoader resourceLoader, List<String> locations) {
this.locations = locations;
this.resourcePatternResolver = ResourcePatternUtils.getResourcePatternResolver(resourceLoader);
}
@Override
public void setResourceLoader(ResourceLoader resourceLoader) {
this.resourcePatternResolver = ResourcePatternUtils.getResourcePatternResolver(resourceLoader);
}
@Override
public Class<? extends Resource[]> getObjectType() {
return Resource[].class;
}
@Override
protected Resource[] createInstance() throws Exception {
List<Resource> result = new ArrayList<>();
for (String location : this.locations) {
Resource[] resources = this.resourcePatternResolver.getResources(location);
// Cache URLs to avoid repeated I/O during sorting
Map<Resource, String> urlCache = new LinkedHashMap<>(resources.length);
List<Resource> failingResources = new ArrayList<>();
for (Resource resource : resources) {
try {
urlCache.put(resource, resource.getURL().toString());
}
catch (IOException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Failed to resolve " + resource + " for sorting purposes: " + ex);
}
failingResources.add(resource);
}
}
// Sort using cached URLs
List<Resource> sortedResources = new ArrayList<>(urlCache.keySet());
sortedResources.sort(Comparator.comparing(urlCache::get));
result.addAll(sortedResources);
result.addAll(failingResources);
}
return result.toArray(new Resource[0]);
}
}
| SortedResourcesFactoryBean |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/data/writer/BinaryWriter.java | {
"start": 10612,
"end": 10737
} | interface ____ extends Serializable {
void setValue(BinaryArrayWriter writer, int pos, Object value);
}
}
| ValueSetter |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java | {
"start": 90562,
"end": 92642
} | class ____.elasticsearch.action.search.SearchRequest] is not compatible with version")
);
assertThat(ex.getMessage(), containsString("and the 'search.check_ccs_compatibility' setting is enabled."));
assertEquals("Not serializable to " + transportVersion, ex.getCause().getMessage());
latch.countDown();
}
});
latch.await();
} finally {
assertTrue(ESTestCase.terminate(threadPool));
}
}
public void testIgnoreIndicesWithIndexRefreshBlock() {
int numIndices = randomIntBetween(1, 10);
String[] concreteIndices = new String[numIndices];
for (int i = 0; i < numIndices; i++) {
concreteIndices[i] = "index" + i;
}
List<String> shuffledIndices = Arrays.asList(concreteIndices);
Collections.shuffle(shuffledIndices, random());
concreteIndices = shuffledIndices.toArray(new String[0]);
final ProjectId projectId = randomProjectIdOrDefault();
ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder();
int numBlockedIndices = randomIntBetween(0, numIndices);
for (int i = 0; i < numBlockedIndices; i++) {
blocksBuilder.addIndexBlock(projectId, concreteIndices[i], IndexMetadata.INDEX_REFRESH_BLOCK);
}
final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.putProjectMetadata(ProjectMetadata.builder(projectId).build())
.blocks(blocksBuilder)
.build();
final ProjectState projectState = clusterState.projectState(projectId);
String[] actual = TransportSearchAction.ignoreBlockedIndices(projectState, concreteIndices);
String[] expected = Arrays.stream(concreteIndices)
.filter(index -> clusterState.blocks().hasIndexBlock(projectId, index, IndexMetadata.INDEX_REFRESH_BLOCK) == false)
.toArray(String[]::new);
assertThat(Arrays.asList(actual), containsInAnyOrder(expected));
}
}
| org |
java | apache__spark | core/src/main/java/org/apache/spark/shuffle/api/SingleSpillShuffleMapOutputWriter.java | {
"start": 1078,
"end": 1362
} | interface ____ {
/**
* Transfer a file that contains the bytes of all the partitions written by this map task.
*/
void transferMapSpillFile(
File mapOutputFile,
long[] partitionLengths,
long[] checksums) throws IOException;
}
| SingleSpillShuffleMapOutputWriter |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/annotation/AnnotationUtils.java | {
"start": 28590,
"end": 28786
} | class ____ an inheritance hierarchy actually declares
* an {@link Annotation}, so we need to handle this explicitly.
* @param annotationType the annotation type to look for
* @param clazz the | in |
java | elastic__elasticsearch | x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherExecutorServiceBenchmark.java | {
"start": 5632,
"end": 7605
} | class ____ extends WatcherExecutorServiceBenchmark {
public static void main(String[] args) throws Exception {
start();
int numAlerts = 1000;
for (int i = 0; i < numAlerts; i++) {
final String name = "_name" + i;
PutWatchRequest putAlertRequest = new PutWatchRequest(
name,
new WatchSourceBuilder().trigger(schedule(interval("5s")))
.input(searchInput(templateRequest(new SearchSourceBuilder(), "test")).extractKeys("hits.total.value"))
.condition(new ScriptCondition(new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "1 == 1", emptyMap())))
.addAction("_id", indexAction("index"))
.buildAsBytes(XContentType.JSON),
XContentType.JSON
);
putAlertRequest.setId(name);
client.execute(PutWatchAction.INSTANCE, putAlertRequest).actionGet();
}
int numThreads = 50;
int watchersPerThread = numAlerts / numThreads;
Thread[] threads = new Thread[numThreads];
for (int i = 0; i < numThreads; i++) {
final int begin = i * watchersPerThread;
final int end = (i + 1) * watchersPerThread;
Runnable r = new Runnable() {
@Override
public void run() {
while (true) {
for (int j = begin; j < end; j++) {
scheduler.trigger("_name" + j);
}
}
}
};
threads[i] = new Thread(r);
threads[i].start();
}
for (Thread thread : threads) {
thread.join();
}
}
}
public static final | BigSearchInput |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/resource/bitmap/UnitBitmapDecoder.java | {
"start": 849,
"end": 1387
} | class ____ implements Resource<Bitmap> {
private final Bitmap bitmap;
NonOwnedBitmapResource(@NonNull Bitmap bitmap) {
this.bitmap = bitmap;
}
@NonNull
@Override
public Class<Bitmap> getResourceClass() {
return Bitmap.class;
}
@NonNull
@Override
public Bitmap get() {
return bitmap;
}
@Override
public int getSize() {
return Util.getBitmapByteSize(bitmap);
}
@Override
public void recycle() {
// Do nothing.
}
}
}
| NonOwnedBitmapResource |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/nestedmethodcall/ObjectFactory.java | {
"start": 310,
"end": 488
} | class ____ {
public JAXBElement<String> createDate(String date) {
return new JAXBElement<>( new QName( "dont-care" ), String.class, "06.07.2013" );
}
}
| ObjectFactory |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/filter/wall/WallStatTest_WhiteList_disable.java | {
"start": 270,
"end": 1388
} | class ____ extends TestCase {
protected void setUp() throws Exception {
WallContext.clearContext();
}
protected void tearDown() throws Exception {
WallContext.clearContext();
}
public void testMySql() throws Exception {
WallProvider provider = new MySqlWallProvider();
provider.setBlackListEnable(false);
provider.setWhiteListEnable(false);
for (int i = 0; i < 301; ++i) {
String sql = "select * from t where id = " + i;
assertTrue(provider.checkValid(sql));
}
for (int i = 0; i < 301; ++i) {
String sql = "select * from t where id = " + i + " OR 1 = 1";
assertFalse(provider.checkValid(sql));
}
WallTableStat tableStat = provider.getTableStat("t");
assertEquals(602, tableStat.getSelectCount());
assertEquals(0, provider.getBlackListHitCount());
assertEquals(0, provider.getWhiteListHitCount());
assertEquals(0, provider.getWhiteList().size());
assertEquals(602, provider.getCheckCount());
}
}
| WallStatTest_WhiteList_disable |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/generator/values/GeneratedValues.java | {
"start": 541,
"end": 1116
} | interface ____ {
/**
* Register a generated value for the corresponding {@link ModelPart}
*/
void addGeneratedValue(ModelPart modelPart, Object value);
/**
* Retrieve a generated value for the requested {@link ModelPart}.
*/
Object getGeneratedValue(ModelPart modelPart);
/**
* Retrieves a list of generated values corresponding to the list of requested {@link ModelPart}s.
* Ensures the order of the values in the returned list corresponds to the input properties.
*/
List<Object> getGeneratedValues(List<? extends ModelPart> modelParts);
}
| GeneratedValues |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/HealthApiFeatureSetUsageTests.java | {
"start": 1028,
"end": 5786
} | class ____ extends AbstractWireSerializingTestCase<HealthApiFeatureSetUsage> {
@Override
protected HealthApiFeatureSetUsage createTestInstance() {
return new HealthApiFeatureSetUsage(true, true, randomCounters());
}
@Override
protected HealthApiFeatureSetUsage mutateInstance(HealthApiFeatureSetUsage instance) {
Map<String, Object> originalStats = instance.stats();
Counters newStats = randomCounters(false);
while (originalStats.equals(newStats.toMutableNestedMap())) {
newStats = randomCounters(false);
}
return new HealthApiFeatureSetUsage(true, true, newStats);
}
@Override
protected Writeable.Reader<HealthApiFeatureSetUsage> instanceReader() {
return HealthApiFeatureSetUsage::new;
}
private Counters randomCounters() {
return randomCounters(true);
}
private Counters randomCounters(boolean allowNull) {
if (allowNull && rarely()) {
return null;
}
Counters counters = new Counters();
for (int i = 0; i < randomInt(20); i++) {
if (randomBoolean()) {
counters.inc(randomAlphaOfLength(10), randomInt(20));
} else {
counters.inc(randomAlphaOfLength(10) + "." + randomAlphaOfLength(10), randomInt(20));
}
}
return counters;
}
public void testEnrichingValuesEmptyStats() {
Map<String, Object> noRequestStats = Map.of("invocations", Map.of("total", 0));
enrichUsageStatsWithValues(noRequestStats);
assertThat(noRequestStats.keySet(), containsInAnyOrder("invocations"));
}
@SuppressWarnings("unchecked")
public void testEnrichValuesGreenStats() {
Map<String, Object> greenStats = new HashMap<>();
int greenCounter = randomInt();
greenStats.put("statuses", new HashMap<>(Map.of("green", greenCounter)));
enrichUsageStatsWithValues(greenStats);
assertThat(greenStats.keySet(), containsInAnyOrder("statuses"));
Map<String, Object> enriched = (Map<String, Object>) greenStats.get("statuses");
assertThat(enriched.get("green"), equalTo(greenCounter));
assertThat((List<String>) enriched.get("values"), containsInAnyOrder("green"));
}
@SuppressWarnings("unchecked")
public void testEnrichingValuesUnhealthyStats() {
Map<String, Object> stats = new HashMap<>();
Map<String, Object> statuses = generateStatsMap(
Arrays.stream(HealthStatus.values()).map(HealthStatus::xContentValue).collect(Collectors.toList())
);
stats.put("statuses", statuses);
List<String> indicatorLabels = List.of("ilm", "slm", "disk", "master_stability", "shards_availability");
Map<String, Object> redIndicators = generateStatsMap(indicatorLabels);
Map<String, Object> yellowIndicators = generateStatsMap(indicatorLabels);
stats.put("indicators", Map.of("red", redIndicators, "yellow", yellowIndicators));
List<String> diagnosisLabels = IntStream.of(randomIntBetween(1, 50)).mapToObj(i -> "diagnosis:" + i).toList();
Map<String, Object> redDiagnoses = generateStatsMap(diagnosisLabels);
Map<String, Object> yellowDiagnoses = generateStatsMap(diagnosisLabels);
stats.put("diagnoses", Map.of("red", redDiagnoses, "yellow", yellowDiagnoses));
enrichUsageStatsWithValues(stats);
assertThat(stats.keySet(), containsInAnyOrder("statuses", "indicators", "diagnoses"));
assertValues((Map<String, Object>) stats.get("statuses"));
assertValues(((Map<String, Map<String, Object>>) stats.get("indicators")).get("red"));
assertValues(((Map<String, Map<String, Object>>) stats.get("indicators")).get("yellow"));
assertValues(((Map<String, Map<String, Object>>) stats.get("diagnoses")).get("red"));
assertValues(((Map<String, Map<String, Object>>) stats.get("diagnoses")).get("yellow"));
}
@SuppressWarnings("unchecked")
private static void assertValues(Map<String, Object> map) {
Set<String> expectedValues = Sets.difference(map.keySet(), Set.of("values"));
assertThat(map.get("values"), notNullValue());
assertThat(
(List<String>) map.get("values"),
containsInAnyOrder(expectedValues.stream().map(Matchers::equalTo).collect(Collectors.toList()))
);
}
private static Map<String, Object> generateStatsMap(List<String> statLabels) {
Map<String, Object> statsMap = new HashMap<>();
for (String stat : randomNonEmptySubsetOf(statLabels)) {
statsMap.put(stat, randomIntBetween(1, 1_000));
}
return statsMap;
}
}
| HealthApiFeatureSetUsageTests |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/testFixtures/java/org/springframework/boot/autoconfigure/cache/support/MockCachingProvider.java | {
"start": 1212,
"end": 2239
} | class ____ implements CachingProvider {
@Override
public CacheManager getCacheManager(URI uri, ClassLoader classLoader, Properties properties) {
return new MockCacheManager(uri, classLoader, properties);
}
@Override
public ClassLoader getDefaultClassLoader() {
return mock(ClassLoader.class);
}
@Override
public URI getDefaultURI() {
return null;
}
@Override
public Properties getDefaultProperties() {
return new Properties();
}
@Override
public CacheManager getCacheManager(URI uri, ClassLoader classLoader) {
return getCacheManager(uri, classLoader, getDefaultProperties());
}
@Override
public CacheManager getCacheManager() {
return getCacheManager(getDefaultURI(), getDefaultClassLoader());
}
@Override
public void close() {
}
@Override
public void close(ClassLoader classLoader) {
}
@Override
public void close(URI uri, ClassLoader classLoader) {
}
@Override
public boolean isSupported(OptionalFeature optionalFeature) {
return false;
}
public static | MockCachingProvider |
java | mybatis__mybatis-3 | src/main/java/org/apache/ibatis/transaction/jdbc/JdbcTransactionFactory.java | {
"start": 1067,
"end": 1758
} | class ____ implements TransactionFactory {
private boolean skipSetAutoCommitOnClose;
@Override
public void setProperties(Properties props) {
if (props == null) {
return;
}
String value = props.getProperty("skipSetAutoCommitOnClose");
if (value != null) {
skipSetAutoCommitOnClose = Boolean.parseBoolean(value);
}
}
@Override
public Transaction newTransaction(Connection conn) {
return new JdbcTransaction(conn);
}
@Override
public Transaction newTransaction(DataSource ds, TransactionIsolationLevel level, boolean autoCommit) {
return new JdbcTransaction(ds, level, autoCommit, skipSetAutoCommitOnClose);
}
}
| JdbcTransactionFactory |
java | bumptech__glide | benchmark/src/androidTest/java/com/bumptech/glide/benchmark/BenchmarkModels.java | {
"start": 1222,
"end": 7614
} | class ____ {
private final Application app = ApplicationProvider.getApplicationContext();
private final int smallResourceId = R.raw.small;
private final int hugeHeaderResourceId = R.raw.huge_header;
@Rule public final GlideBenchmarkRule glideBenchmarkRule = new GlideBenchmarkRule();
@Test
public void smallAsCacheFile() throws Exception {
benchmarkAsCacheFile(smallResourceId);
}
@Test
public void hugeHeaderAsCacheFile() throws Exception {
benchmarkAsCacheFile(hugeHeaderResourceId);
}
@Test
public void smallAsResourceId() throws Exception {
benchmarkModel(smallResourceId);
}
@Test
public void hugeHeaderAsResourceId() throws Exception {
benchmarkModel(hugeHeaderResourceId);
}
@Test
public void smallAsResourceUri() throws Exception {
Uri uri = resourceUriFromId(smallResourceId);
benchmarkModel(uri);
}
@Test
public void hugeHeaderAsResourceUri() throws Exception {
Uri uri = resourceUriFromId(hugeHeaderResourceId);
benchmarkModel(uri);
}
@Test
public void smallAsMediaStoreUri() throws Exception {
benchmarkAsMediaStoreUri(smallResourceId);
}
@Test
public void hugeHeaderAsMediaStoreUri() throws Exception {
benchmarkAsMediaStoreUri(hugeHeaderResourceId);
}
@Test
public void pixel3aAsMediaStoreUri() throws Exception {
benchmarkAsMediaStoreUri(R.raw.pixel3a_portrait);
}
@Test
public void pixel3aExifRotatedAsMediaStoreUri() throws Exception {
benchmarkAsMediaStoreUri(R.raw.pixel3a_exif_rotated);
}
@Test
public void pixel3aMvimgExifRotatedAsMediaStoreUri() throws Exception {
benchmarkAsMediaStoreUri(R.raw.pixel3a_mvimg_exif_rotated);
}
@Test
public void smallAsMediaStoreFilepath() throws Exception {
benchmarkAsMediaStoreFilepath(smallResourceId);
}
@Test
public void pixel3aAsMediaStoreFilepath() throws Exception {
benchmarkAsMediaStoreFilepath(R.raw.pixel3a_portrait);
}
@Test
public void pixel3aExifRotatedAsMediaStoreFilepath() throws Exception {
benchmarkAsMediaStoreFilepath(R.raw.pixel3a_exif_rotated);
}
@Test
public void pixel3aMvimgExifRotatedAsMediaStoreFilepath() throws Exception {
benchmarkAsMediaStoreFilepath(R.raw.pixel3a_mvimg_exif_rotated);
}
@Test
public void hugeHeaderAsMediaStoreFilepath() throws Exception {
benchmarkAsMediaStoreFilepath(hugeHeaderResourceId);
}
private Uri resourceUriFromId(@RawRes int resourceId) {
glideBenchmarkRule.pauseTiming();
try {
return new Uri.Builder()
.scheme(ContentResolver.SCHEME_ANDROID_RESOURCE)
.authority(app.getPackageName())
.appendPath(app.getResources().getResourceTypeName(resourceId))
.appendPath(app.getResources().getResourceEntryName(resourceId))
.build();
} finally {
glideBenchmarkRule.resumeTiming();
}
}
private Uri mediaStoreUriFromId(@RawRes int resourceId) throws IOException {
glideBenchmarkRule.pauseTiming();
try {
Uri mediaStoreUri =
app.getContentResolver()
.insert(MediaStore.Images.Media.EXTERNAL_CONTENT_URI, new ContentValues());
InputStream is = null;
OutputStream os = null;
try {
is = app.getResources().openRawResource(resourceId);
os = app.getContentResolver().openOutputStream(mediaStoreUri);
byte[] buffer = new byte[1024 * 1024];
int read;
while ((read = is.read(buffer, /* off= */ 0, buffer.length)) != -1) {
os.write(buffer, /* off= */ 0, read);
}
// Make sure we actually write all of the data or fail by throwing immediately.
os.close();
return mediaStoreUri;
} finally {
if (is != null) {
try {
is.close();
} catch (IOException e) {
// Ignored.
}
}
if (os != null) {
try {
os.close();
} catch (IOException e) {
// Ignored.
}
}
}
} finally {
glideBenchmarkRule.resumeTiming();
}
}
private void benchmarkAsMediaStoreUri(@RawRes int resourceId) throws Exception {
Uri mediaStoreUri = mediaStoreUriFromId(resourceId);
try {
benchmarkModel(mediaStoreUri);
} finally {
cleanupMediaStoreUri(mediaStoreUri);
}
}
private void cleanupMediaStoreUri(Uri mediaStoreUri) {
glideBenchmarkRule.pauseTiming();
int result = app.getContentResolver().delete(mediaStoreUri, /* extras= */ null);
Preconditions.checkState(result == 1);
glideBenchmarkRule.resumeTiming();
}
private void benchmarkAsMediaStoreFilepath(@RawRes int resourceId) throws Exception {
Uri mediaStoreUri = mediaStoreUriFromId(resourceId);
try {
benchmarkModel(getMediaStoreFilepath(mediaStoreUri));
} finally {
cleanupMediaStoreUri(mediaStoreUri);
}
}
private String getMediaStoreFilepath(Uri mediaStoreUri) {
glideBenchmarkRule.pauseTiming();
String[] projection = new String[] {MediaStore.Images.Media.DATA};
Cursor cursor =
app.getContentResolver()
.query(
mediaStoreUri,
projection,
/* selection= */ null,
/* selectionArgs= */ null,
/* sortOrder= */ null);
try {
Preconditions.checkState(cursor.moveToFirst());
return cursor.getString(0);
} finally {
cursor.close();
glideBenchmarkRule.resumeTiming();
}
}
private void benchmarkAsCacheFile(@RawRes final int resourceId) throws Exception {
final FileOpener fileOpener = new FileOpener();
glideBenchmarkRule.runBenchmark(
new BeforeStep<File>() {
@Override
public File act() throws IOException {
return fileOpener.acquire(resourceId);
}
},
new AfterStep<File>() {
@Override
public void act(File beforeData) {
fileOpener.close(beforeData);
}
});
}
private void benchmarkModel(final Object model) throws Exception {
glideBenchmarkRule.runBenchmark(
new BeforeStep<Object>() {
@Override
public Object act() {
return model;
}
},
new AfterStep<Object>() {
@Override
public void act(Object beforeData) {}
});
}
}
| BenchmarkModels |
java | google__guice | core/test/com/google/inject/EagerSingletonTest.java | {
"start": 5647,
"end": 6112
} | class ____ load a new one in
// this loader.
if (name.equals(cls.getName())) {
Class<?> c = findLoadedClass(name);
if (c == null) {
return super.findClass(name);
}
return c;
}
return super.loadClass(name);
}
}.loadClass(cls.getName());
} catch (ClassNotFoundException cnfe) {
throw new AssertionError(cnfe);
}
}
@Singleton
static | we |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/subscribers/ResourceSubscriber.java | {
"start": 4308,
"end": 6994
} | class ____<T> implements FlowableSubscriber<T>, Disposable {
/** The active subscription. */
private final AtomicReference<Subscription> upstream = new AtomicReference<>();
/** The resource composite, can never be null. */
private final ListCompositeDisposable resources = new ListCompositeDisposable();
/** Remembers the request(n) counts until a subscription arrives. */
private final AtomicLong missedRequested = new AtomicLong();
/**
* Adds a resource to this {@code ResourceSubscriber}.
*
* @param resource the resource to add
*
* @throws NullPointerException if {@code resource} is {@code null}
*/
public final void add(Disposable resource) {
Objects.requireNonNull(resource, "resource is null");
resources.add(resource);
}
@Override
public final void onSubscribe(Subscription s) {
if (EndConsumerHelper.setOnce(this.upstream, s, getClass())) {
long r = missedRequested.getAndSet(0L);
if (r != 0L) {
s.request(r);
}
onStart();
}
}
/**
* Called once the upstream sets a {@link Subscription} on this {@code ResourceSubscriber}.
*
* <p>You can perform initialization at this moment. The default
* implementation requests {@link Long#MAX_VALUE} from upstream.
*/
protected void onStart() {
request(Long.MAX_VALUE);
}
/**
* Request the specified amount of elements from upstream.
*
* <p>This method can be called before the upstream calls {@link #onSubscribe(Subscription)}.
* When the subscription happens, all missed requests are requested.
*
* @param n the request amount, must be positive
*/
protected final void request(long n) {
SubscriptionHelper.deferredRequest(upstream, missedRequested, n);
}
/**
* Cancels the subscription (if any) and disposes the resources associated with
* this {@code ResourceSubscriber} (if any).
*
* <p>This method can be called before the upstream calls {@link #onSubscribe(Subscription)} at which
* case the {@link Subscription} will be immediately cancelled.
*/
@Override
public final void dispose() {
if (SubscriptionHelper.cancel(upstream)) {
resources.dispose();
}
}
/**
* Returns true if this {@code ResourceSubscriber} has been disposed/cancelled.
* @return true if this {@code ResourceSubscriber} has been disposed/cancelled
*/
@Override
public final boolean isDisposed() {
return upstream.get() == SubscriptionHelper.CANCELLED;
}
}
| ResourceSubscriber |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/usertype/UserTypeTest.java | {
"start": 623,
"end": 947
} | class ____ {
@Test
@WithClasses({ ContactDetails.class, PhoneNumber.class })
void testCustomUserTypeInMetaModel() {
assertMetamodelClassGeneratedFor( ContactDetails.class );
assertPresenceOfFieldInMetamodelFor(
ContactDetails.class, "phoneNumber", "@Type annotated field should be in metamodel"
);
}
}
| UserTypeTest |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client-jaxrs/deployment/src/main/java/io/quarkus/jaxrs/client/reactive/deployment/JaxrsClientReactiveProcessor.java | {
"start": 44161,
"end": 50658
} | class ____$$QuarkusRestClientInterface implements Closeable, BaseClient {
final WebTarget target1;
private final Method javaMethod1;
private final HeaderFiller headerFiller1;
final WebTarget target2;
private final Method javaMethod2;
private final HeaderFiller headerFiller2;
public BaseClient$$QuarkusRestClientInterface(WebTarget var1) {
WebTarget var3 = var1.path("");
DefaultClientHeadersFactoryImpl var2 = new DefaultClientHeadersFactoryImpl();
MicroProfileRestClientRequestFilter var4 = new MicroProfileRestClientRequestFilter((ClientHeadersFactory)var2);
var3 = (WebTarget)((Configurable)var3).register(var4);
String var6 = "/base";
WebTarget var5 = var3.path(var6);
this.target1 = var5;
Class[] var7 = new Class[0];
Method var8 = BaseClient.class.getMethod("executeBasePost", var7);
this.javaMethod1 = var8;
NoOpHeaderFiller var9 = NoOpHeaderFiller.INSTANCE;
this.headerFiller1 = (HeaderFiller)var9;
String var11 = "/base";
WebTarget var10 = var3.path(var11);
this.target2 = var10;
Class[] var12 = new Class[0];
Method var13 = BaseClient.class.getMethod("executeBaseGet", var12);
this.javaMethod2 = var13;
NoOpHeaderFiller var14 = NoOpHeaderFiller.INSTANCE;
this.headerFiller2 = (HeaderFiller)var14;
}
public Response executeBasePost() {
WebTarget var1 = this.target1;
String[] var2 = new String[]{"application/json"};
Builder var3 = var1.request(var2);
Method var4 = this.javaMethod1;
var3 = var3.property("org.eclipse.microprofile.rest.client.invokedMethod", var4);
HeaderFiller var5 = this.headerFiller1;
var3 = var3.property("io.quarkus.resteasy.reactive.client.microprofile.HeaderFiller", var5);
try {
return (Response)var3.method("POST", Response.class);
} catch (ProcessingException var8) {
Throwable var7 = ((Throwable)var8).getCause();
if (!(var7 instanceof WebApplicationException)) {
throw (Throwable)var8;
} else {
throw var7;
}
}
}
public Response executeBaseGet() {
WebTarget var1 = this.target2;
String[] var2 = new String[]{"application/json"};
Builder var3 = var1.request(var2);
Method var4 = this.javaMethod2;
var3 = var3.property("org.eclipse.microprofile.rest.client.invokedMethod", var4);
HeaderFiller var5 = this.headerFiller2;
var3 = var3.property("io.quarkus.resteasy.reactive.client.microprofile.HeaderFiller", var5);
try {
return (Response)var3.method("GET", Response.class);
} catch (ProcessingException var8) {
Throwable var7 = ((Throwable)var8).getCause();
if (!(var7 instanceof WebApplicationException)) {
throw (Throwable)var8;
} else {
throw var7;
}
}
}
public void close() {
((WebTargetImpl)this.target1).getRestClient().close();
((WebTargetImpl)this.target2).getRestClient().close();
}
}
```
@formatter:on
A more full example of generated client (with sub-resource) can is at the bottom of
extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/subresource/SubResourceTest.java
*/
private RuntimeValue<BiFunction<WebTarget, List<ParamConverterProvider>, ?>> generateClientInvoker(
RecorderContext recorderContext,
RestClientInterface restClientInterface, List<JaxrsClientReactiveEnricherBuildItem> enrichers,
BuildProducer<GeneratedClassBuildItem> generatedClasses, ClassInfo interfaceClass,
IndexView index, String defaultMediaType, Map<DotName, String> httpAnnotationToMethod,
boolean observabilityIntegrationNeeded, Set<ClassInfo> multipartResponseTypes,
Map<GeneratedSubResourceKey, String> generatedSubResources) {
String creatorName = restClientInterface.getClassName() + "$$QuarkusRestClientInterfaceCreator";
String name = restClientInterface.getClassName() + "$$QuarkusRestClientInterface";
MethodDescriptor constructorDesc = MethodDescriptor.ofConstructor(name, WebTarget.class.getName(), List.class);
try (ClassRestClientContext classContext = new ClassRestClientContext(name, constructorDesc, generatedClasses,
RestClientBase.class, Closeable.class.getName(), restClientInterface.getClassName())) {
classContext.constructor.invokeSpecialMethod(MethodDescriptor.ofConstructor(RestClientBase.class, List.class),
classContext.constructor.getThis(), classContext.constructor.getMethodParam(1));
AssignableResultHandle effectiveInputTarget = classContext.constructor.createVariable(WebTargetImpl.class);
ResultHandle inputTarget = classContext.constructor.getMethodParam(0);
if (restClientInterface.isEncoded()) {
classContext.constructor.assign(effectiveInputTarget,
disableEncodingForWebTarget(classContext.constructor, inputTarget));
} else {
classContext.constructor.assign(effectiveInputTarget, inputTarget);
}
// field that holds the initial value passed to the constructor (with encoding taken care of)
FieldDescriptor inputTargetField = classContext.classCreator
.getFieldCreator("inputTarget", WebTargetImpl.class.getName())
.setModifiers(Modifier.FINAL)
.getFieldDescriptor();
classContext.constructor.writeInstanceField(inputTargetField, classContext.constructor.getThis(),
effectiveInputTarget);
AssignableResultHandle baseTarget = classContext.constructor.createVariable(WebTargetImpl.class);
classContext.constructor.assign(baseTarget, effectiveInputTarget);
// method that takes the WebTarget provided as a parameter and produces the base WebTarget of the | BaseClient |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/resilience/annotation/ResilientMethodsConfiguration.java | {
"start": 1226,
"end": 1628
} | class ____ registers the Spring infrastructure beans necessary
* to enable proxy-based method invocations with retry and concurrency limit behavior.
*
* @author Juergen Hoeller
* @since 7.0
* @see EnableResilientMethods
* @see RetryAnnotationBeanPostProcessor
* @see ConcurrencyLimitBeanPostProcessor
*/
@Configuration(proxyBeanMethods = false)
@Role(BeanDefinition.ROLE_INFRASTRUCTURE)
public | that |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/connector/sink2/InitContext.java | {
"start": 1210,
"end": 1925
} | interface ____ {
/**
* The first checkpoint id when an application is started and not recovered from a previously
* taken checkpoint or savepoint.
*/
long INITIAL_CHECKPOINT_ID = 1;
/**
* Returns id of the restored checkpoint, if state was restored from the snapshot of a previous
* execution.
*/
OptionalLong getRestoredCheckpointId();
/**
* Get the meta information of current job.
*
* @return the job meta information.
*/
@PublicEvolving
JobInfo getJobInfo();
/**
* Get the meta information of current task.
*
* @return the task meta information.
*/
@PublicEvolving
TaskInfo getTaskInfo();
}
| InitContext |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/internal/DigestDiff.java | {
"start": 763,
"end": 1316
} | class ____ {
private final MessageDigest digest;
private final String expected;
private final String actual;
public DigestDiff(String actual, String expected, MessageDigest digest) {
this.digest = digest;
this.expected = expected;
this.actual = actual;
}
public String getExpected() {
return expected;
}
public String getActual() {
return actual;
}
public boolean digestsDiffer() {
return !expected.equals(actual);
}
public String getDigestAlgorithm() {
return digest.getAlgorithm();
}
}
| DigestDiff |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/processor/internals/SynchronizedPartitionGroup.java | {
"start": 1045,
"end": 3331
} | class ____ extends AbstractPartitionGroup {
private final AbstractPartitionGroup wrapped;
public SynchronizedPartitionGroup(final AbstractPartitionGroup wrapped) {
this.wrapped = wrapped;
}
@Override
synchronized ReadyToProcessResult readyToProcess(final long wallClockTime) {
return wrapped.readyToProcess(wallClockTime);
}
@Override
synchronized void updatePartitions(final Set<TopicPartition> inputPartitions, final Function<TopicPartition, RecordQueue> recordQueueCreator) {
wrapped.updatePartitions(inputPartitions, recordQueueCreator);
}
@Override
synchronized void setPartitionTime(final TopicPartition partition, final long partitionTime) {
wrapped.setPartitionTime(partition, partitionTime);
}
@Override
synchronized StampedRecord nextRecord(final RecordInfo info, final long wallClockTime) {
return wrapped.nextRecord(info, wallClockTime);
}
@Override
synchronized int addRawRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) {
return wrapped.addRawRecords(partition, rawRecords);
}
@Override
synchronized long partitionTimestamp(final TopicPartition partition) {
return wrapped.partitionTimestamp(partition);
}
@Override
synchronized long streamTime() {
return wrapped.streamTime();
}
@Override
synchronized Long headRecordOffset(final TopicPartition partition) {
return wrapped.headRecordOffset(partition);
}
@Override
Optional<Integer> headRecordLeaderEpoch(final TopicPartition partition) {
return Optional.empty();
}
@Override
synchronized int numBuffered() {
return wrapped.numBuffered();
}
@Override
synchronized int numBuffered(final TopicPartition tp) {
return wrapped.numBuffered(tp);
}
@Override
synchronized void clear() {
wrapped.clear();
}
@Override
synchronized void updateLags() {
wrapped.updateLags();
}
@Override
synchronized void close() {
wrapped.close();
}
@Override
synchronized Set<TopicPartition> partitions() {
return wrapped.partitions();
}
}
| SynchronizedPartitionGroup |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/InternalTimerServiceAsyncImplTest.java | {
"start": 17126,
"end": 17635
} | class ____ implements Triggerable<Integer, String> {
private static int eventTriggerCount = 0;
private static int processingTriggerCount = 0;
@Override
public void onEventTime(InternalTimer<Integer, String> timer) throws Exception {
eventTriggerCount++;
}
@Override
public void onProcessingTime(InternalTimer<Integer, String> timer) throws Exception {
processingTriggerCount++;
}
}
private static | TestTriggerable |
java | mapstruct__mapstruct | processor/src/test/resources/fixtures/org/mapstruct/ap/test/updatemethods/selection/ExternalMapperImpl.java | {
"start": 562,
"end": 820
} | class ____ implements ExternalMapper {
@Override
public void toDepartmentEntity(DepartmentDto dto, DepartmentEntity entity) {
if ( dto == null ) {
return;
}
entity.setName( dto.getName() );
}
}
| ExternalMapperImpl |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/junit/jupiter/nested/DynamicPropertySourceNestedTests.java | {
"start": 2833,
"end": 3214
} | class ____ extends DynamicPropertySourceSuperclass {
@Test
@DisplayName("@Service has values injected from @DynamicPropertySource in superclass")
void serviceHasInjectedValues(@Autowired Service service) {
assertServiceHasInjectedValues(service);
}
}
@Nested
@NestedTestConfiguration(OVERRIDE)
@SpringJUnitConfig(Config.class)
| DynamicPropertySourceFromSuperclassTests |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/AuthorizeHttpRequestsConfigurerTests.java | {
"start": 58772,
"end": 59106
} | class ____ {
@RequestMapping("/user/{username}")
String path(@PathVariable("username") String username) {
return username;
}
@RequestMapping("/v2/user/{username}")
String pathV2(@PathVariable("username") String username) {
return username;
}
}
}
@Configuration
@EnableWebSecurity
static | PathController |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/OpenshiftBuildsComponentBuilderFactory.java | {
"start": 1386,
"end": 1902
} | interface ____ {
/**
* OpenShift Builds (camel-kubernetes)
* Perform operations on OpenShift Builds.
*
* Category: container,cloud
* Since: 2.17
* Maven coordinates: org.apache.camel:camel-kubernetes
*
* @return the dsl builder
*/
static OpenshiftBuildsComponentBuilder openshiftBuilds() {
return new OpenshiftBuildsComponentBuilderImpl();
}
/**
* Builder for the OpenShift Builds component.
*/
| OpenshiftBuildsComponentBuilderFactory |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/enums/EnumSetDeserializationWithDefaultTyping4849Test.java | {
"start": 884,
"end": 2962
} | enum ____ {
TEST_ENUM_VALUE
}
private final ObjectMapper MAPPER = configureMapper4849();
private ObjectMapper configureMapper4849()
{
final PolymorphicTypeValidator validator = BasicPolymorphicTypeValidator.builder()
.allowIfSubType("com.fasterxml.jackson")
.allowIfSubType("java")
.build();
DefaultTypeResolverBuilder resolverBuilder
= new DefaultTypeResolverBuilder(validator, DefaultTyping.NON_FINAL,
JsonTypeInfo.As.PROPERTY) {
@Override
public boolean useForType(JavaType t) {
return true;
}
};
StdTypeResolverBuilder stdTypeResolverBuilder = resolverBuilder
.init(JsonTypeInfo.Value.construct(JsonTypeInfo.Id.CLASS, JsonTypeInfo.As.PROPERTY,
"", Object.class, false, null),
null);
return jsonMapperBuilder()
.setDefaultTyping(stdTypeResolverBuilder)
.build();
}
@Test
public void testSerializationDeserializationRoundTrip4849()
throws Exception
{
// Given
EnumSet<TestEnum4849> input = EnumSet.of(TestEnum4849.TEST_ENUM_VALUE);
// When : Serialize and deserialize
String inputJson = MAPPER.writeValueAsString(input);
Object inputDeserialized = MAPPER.readValue(inputJson, Object.class);
// Then
assertEquals(input, inputDeserialized);
}
@Test
public void testHardCodedDeserializationFromPreviousJackson4849()
throws Exception
{
// Given : Hard-coded output from Jackson 2.15.4
String input = String.format("[\"java.util.EnumSet<%s>\",[\"%s\"]]",
TestEnum4849.class.getName(),
TestEnum4849.TEST_ENUM_VALUE.name());
// When
Object deserialized = MAPPER.readValue(input, Object.class);
// Then
assertEquals(EnumSet.of(TestEnum4849.TEST_ENUM_VALUE), deserialized);
}
}
| TestEnum4849 |
java | apache__spark | sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedRleValuesReader.java | {
"start": 1882,
"end": 2307
} | class ____ extends ValuesReader
implements VectorizedValuesReader {
// Current decoding mode. The encoded data contains groups of either run length encoded data
// (RLE) or bit packed data. Each group contains a header that indicates which group it is and
// the number of values in the group.
// More details here: https://github.com/apache/parquet-format/blob/master/Encodings.md
private | VectorizedRleValuesReader |
java | micronaut-projects__micronaut-core | context-propagation/src/test/groovy/io/micronaut/context/propagation/MyTracingInterceptor.java | {
"start": 1979,
"end": 2237
} | class ____ {
private final Map<String, String> tags = new HashMap<>();
public void tag(String s1, String s2) {
tags.put(s1, s2);
}
public Map<String, String> tags() {
return tags;
}
}
}
| Trace |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/IntValueComparator.java | {
"start": 1331,
"end": 4672
} | class ____ extends TypeComparator<IntValue> {
private static final long serialVersionUID = 1L;
private final boolean ascendingComparison;
private final IntValue reference = new IntValue();
private final IntValue tempReference = new IntValue();
private final TypeComparator<?>[] comparators = new TypeComparator[] {this};
public IntValueComparator(boolean ascending) {
this.ascendingComparison = ascending;
}
@Override
public int hash(IntValue record) {
return record.hashCode();
}
@Override
public void setReference(IntValue toCompare) {
toCompare.copyTo(reference);
}
@Override
public boolean equalToReference(IntValue candidate) {
return candidate.equals(this.reference);
}
@Override
public int compareToReference(TypeComparator<IntValue> referencedComparator) {
IntValue otherRef = ((IntValueComparator) referencedComparator).reference;
int comp = otherRef.compareTo(reference);
return ascendingComparison ? comp : -comp;
}
@Override
public int compare(IntValue first, IntValue second) {
int comp = first.compareTo(second);
return ascendingComparison ? comp : -comp;
}
@Override
public int compareSerialized(DataInputView firstSource, DataInputView secondSource)
throws IOException {
reference.read(firstSource);
tempReference.read(secondSource);
int comp = reference.compareTo(tempReference);
return ascendingComparison ? comp : -comp;
}
@Override
public boolean supportsNormalizedKey() {
return NormalizableKey.class.isAssignableFrom(IntValue.class);
}
@Override
public int getNormalizeKeyLen() {
return reference.getMaxNormalizedKeyLen();
}
@Override
public boolean isNormalizedKeyPrefixOnly(int keyBytes) {
return keyBytes < getNormalizeKeyLen();
}
@Override
public void putNormalizedKey(IntValue record, MemorySegment target, int offset, int numBytes) {
record.copyNormalizedKey(target, offset, numBytes);
}
@Override
public boolean invertNormalizedKey() {
return !ascendingComparison;
}
@Override
public TypeComparator<IntValue> duplicate() {
return new IntValueComparator(ascendingComparison);
}
@Override
public int extractKeys(Object record, Object[] target, int index) {
target[index] = record;
return 1;
}
@Override
public TypeComparator<?>[] getFlatComparators() {
return comparators;
}
// --------------------------------------------------------------------------------------------
// unsupported normalization
// --------------------------------------------------------------------------------------------
@Override
public boolean supportsSerializationWithKeyNormalization() {
return false;
}
@Override
public void writeWithKeyNormalization(IntValue record, DataOutputView target)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public IntValue readWithKeyDenormalization(IntValue reuse, DataInputView source)
throws IOException {
throw new UnsupportedOperationException();
}
}
| IntValueComparator |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/deser/asm/TestASM_long.java | {
"start": 437,
"end": 619
} | class ____ {
private long i = 12;
public long getI() {
return i;
}
public void setI(long i) {
this.i = i;
}
}
}
| V0 |
java | elastic__elasticsearch | modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java | {
"start": 2579,
"end": 3931
} | class ____ extends FieldMapper {
private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(ParentJoinFieldMapper.class);
public static final String NAME = "join";
public static final String CONTENT_TYPE = "join";
private static void checkIndexCompatibility(IndexSettings settings, String name) {
String indexName = settings.getIndex().getName();
if (settings.getIndexMetadata().isRoutingPartitionedIndex()) {
throw new IllegalStateException("cannot create join field [" + name + "] for the partitioned index [" + indexName + "]");
}
if (settings.getIndexMetadata().getRoutingPaths().isEmpty() == false) {
throw new IllegalStateException("cannot create join field [" + name + "] for the index [" + indexName + "] with routing_path");
}
}
private static void checkObjectOrNested(MapperBuilderContext context, String name) {
String fullName = context.buildFullName(name);
if (fullName.equals(name) == false) {
throw new IllegalArgumentException("join field [" + fullName + "] " + "cannot be added inside an object or in a multi-field");
}
}
private static ParentJoinFieldMapper toType(FieldMapper in) {
return (ParentJoinFieldMapper) in;
}
public static | ParentJoinFieldMapper |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/interceptor/TransactedRetryWhileStackSizeTest.java | {
"start": 2571,
"end": 3062
} | class ____ {
private int counter;
public String areWeCool() {
int size = currentStackSize();
if (size > MAX_DEPTH) {
LOG.error("Stacktrace max depth: {}", size);
return "no";
}
if (counter++ < 1000) {
return "no";
} else {
return "yes";
}
}
public int getCounter() {
return counter;
}
}
}
| MyCoolDude |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/SecurityConfigurerAdapterClosureTests.java | {
"start": 999,
"end": 1594
} | class ____ {
ConcereteSecurityConfigurerAdapter conf = new ConcereteSecurityConfigurerAdapter();
@Test
public void addPostProcessorClosureWhenPostProcessThenGetsApplied() throws Exception {
SecurityBuilder<Object> builder = mock(SecurityBuilder.class);
this.conf.addObjectPostProcessor(new ObjectPostProcessor<List<String>>() {
@Override
public <O extends List<String>> O postProcess(O l) {
l.add("a");
return l;
}
});
this.conf.init(builder);
this.conf.configure(builder);
assertThat(this.conf.list).contains("a");
}
static | SecurityConfigurerAdapterClosureTests |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/operations/converters/SqlAlterMaterializedTableModifyDistributionConverter.java | {
"start": 1636,
"end": 3132
} | class ____
extends AbstractAlterMaterializedTableConverter<
SqlAlterMaterializedTableModifyDistribution> {
@Override
public Operation convertSqlNode(
SqlAlterMaterializedTableModifyDistribution node, ConvertContext context) {
ObjectIdentifier identifier = resolveIdentifier(node, context);
ResolvedCatalogMaterializedTable oldTable =
getResolvedMaterializedTable(
context,
identifier,
() -> "Operation is supported only for materialized tables");
if (oldTable.getDistribution().isEmpty()) {
throw new ValidationException(
String.format(
"Materialized table %s does not have a distribution to modify.",
identifier));
}
TableDistribution tableDistribution =
OperationConverterUtils.getDistributionFromSqlDistribution(
node.getDistribution().get());
// Build new materialized table and apply changes
CatalogMaterializedTable updatedTable =
buildUpdatedMaterializedTable(
oldTable, builder -> builder.distribution(tableDistribution));
return new AlterMaterializedTableChangeOperation(
identifier, List.of(TableChange.modify(tableDistribution)), updatedTable);
}
}
| SqlAlterMaterializedTableModifyDistributionConverter |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/jpa/boot/spi/JpaSettings.java | {
"start": 438,
"end": 1356
} | class ____ {
/**
* Names a {@link IntegratorProvider}
*/
public static final String INTEGRATOR_PROVIDER = "hibernate.integrator_provider";
/**
* Names a {@link StrategyRegistrationProviderList}
*/
public static final String STRATEGY_REGISTRATION_PROVIDERS = "hibernate.strategy_registration_provider";
/**
* Names a {@link TypeContributorList}
*
* @deprecated Consider using {@linkplain java.util.ServiceLoader discovery} instead to
* dynamically locate {@linkplain TypeContributor contributors}.
*/
@Deprecated(forRemoval = true)
public static final String TYPE_CONTRIBUTORS = "hibernate.type_contributors";
/**
* Names a {@link MetadataBuilderContributor}
*
* @deprecated Use {@linkplain java.util.ServiceLoader discovery} instead.
*/
@Deprecated(forRemoval = true)
public static final String METADATA_BUILDER_CONTRIBUTOR = "hibernate.metadata_builder_contributor";
}
| JpaSettings |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/env/PropertySource.java | {
"start": 8420,
"end": 9041
} | class ____ extends StubPropertySource {
private static final String USAGE_ERROR =
"ComparisonPropertySource instances are for use with collection comparison only";
public ComparisonPropertySource(String name) {
super(name);
}
@Override
public Object getSource() {
throw new UnsupportedOperationException(USAGE_ERROR);
}
@Override
public boolean containsProperty(String name) {
throw new UnsupportedOperationException(USAGE_ERROR);
}
@Override
public @Nullable String getProperty(String name) {
throw new UnsupportedOperationException(USAGE_ERROR);
}
}
}
| ComparisonPropertySource |
java | elastic__elasticsearch | x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/patterntext/PatternTextSyntheticFieldLoaderLayer.java | {
"start": 1868,
"end": 2643
} | class ____ implements DocValuesLoader {
private final BinaryDocValues docValues;
private boolean hasValue = false;
PatternTextSyntheticFieldLoader(BinaryDocValues docValues) {
this.docValues = docValues;
}
public boolean hasValue() {
assert docValues.docID() != DocIdSetIterator.NO_MORE_DOCS;
return hasValue;
}
@Override
public boolean advanceToDoc(int docId) throws IOException {
return hasValue = docValues.advanceExact(docId);
}
public void write(XContentBuilder b) throws IOException {
if (hasValue) {
b.value(docValues.binaryValue().utf8ToString());
}
}
}
}
| PatternTextSyntheticFieldLoader |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryIntegTestCase.java | {
"start": 1749,
"end": 17127
} | class ____ extends ESIntegTestCase {
/**
* Get the mapping for this test. It should contain a field called `location' that
* supports GeoBoundingBox queries.
*/
public abstract XContentBuilder getMapping() throws IOException;
/**
* Provides a supported version when the mapping was created.
*/
public abstract IndexVersion randomSupportedVersion() throws IOException;
@Override
protected boolean forbidPrivateIndexSettings() {
return false;
}
public void testSimpleBoundingBoxTest() throws Exception {
Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, randomSupportedVersion()).build();
XContentBuilder xContentBuilder = getMapping();
assertAcked(prepareCreate("test").setSettings(settings).setMapping(xContentBuilder));
ensureGreen();
prepareIndex("test").setId("1")
.setSource(jsonBuilder().startObject().field("name", "New York").field("location", "POINT(-74.0059731 40.7143528)").endObject())
.get();
// to NY: 5.286 km
prepareIndex("test").setId("2")
.setSource(
jsonBuilder().startObject().field("name", "Times Square").field("location", "POINT(-73.9844722 40.759011)").endObject()
)
.get();
// to NY: 0.4621 km
prepareIndex("test").setId("3")
.setSource(jsonBuilder().startObject().field("name", "Tribeca").field("location", "POINT(-74.007819 40.718266)").endObject())
.get();
// to NY: 1.055 km
prepareIndex("test").setId("4")
.setSource(
jsonBuilder().startObject().field("name", "Wall Street").field("location", "POINT(-74.0088305 40.7051157)").endObject()
)
.get();
// to NY: 1.258 km
prepareIndex("test").setId("5")
.setSource(jsonBuilder().startObject().field("name", "Soho").field("location", "POINT(-74 40.7247222)").endObject())
.get();
// to NY: 2.029 km
prepareIndex("test").setId("6")
.setSource(
jsonBuilder().startObject().field("name", "Greenwich Village").field("location", "POINT(-73.9962255 40.731033)").endObject()
)
.get();
// to NY: 8.572 km
prepareIndex("test").setId("7")
.setSource(jsonBuilder().startObject().field("name", "Brooklyn").field("location", "POINT(-73.95 40.65)").endObject())
.get();
client().admin().indices().prepareRefresh().get();
assertResponse(
client().prepareSearch() // from NY
.setQuery(geoBoundingBoxQuery("location").setCorners(40.73, -74.1, 40.717, -73.99)),
response -> {
assertThat(response.getHits().getTotalHits().value(), equalTo(2L));
assertThat(response.getHits().getHits().length, equalTo(2));
for (SearchHit hit : response.getHits()) {
assertThat(hit.getId(), anyOf(equalTo("1"), equalTo("3"), equalTo("5")));
}
}
);
assertResponse(
client().prepareSearch() // from NY
.setQuery(geoBoundingBoxQuery("location").setCorners(40.73, -74.1, 40.717, -73.99)),
response -> {
assertThat(response.getHits().getTotalHits().value(), equalTo(2L));
assertThat(response.getHits().getHits().length, equalTo(2));
for (SearchHit hit : response.getHits()) {
assertThat(hit.getId(), anyOf(equalTo("1"), equalTo("3"), equalTo("5")));
}
}
);
assertResponse(
client().prepareSearch() // top == bottom && left == right
.setQuery(geoBoundingBoxQuery("location").setCorners(40.7143528, -74.0059731, 40.7143528, -74.0059731)),
response -> {
assertThat(response.getHits().getTotalHits().value(), equalTo(1L));
assertThat(response.getHits().getHits().length, equalTo(1));
for (SearchHit hit : response.getHits()) {
assertThat(hit.getId(), equalTo("1"));
}
}
);
assertResponse(
client().prepareSearch() // top == bottom
.setQuery(geoBoundingBoxQuery("location").setCorners(40.759011, -74.00009, 40.759011, -73.0059731)),
response -> {
assertThat(response.getHits().getTotalHits().value(), equalTo(1L));
assertThat(response.getHits().getHits().length, equalTo(1));
for (SearchHit hit : response.getHits()) {
assertThat(hit.getId(), equalTo("2"));
}
}
);
assertResponse(
client().prepareSearch() // left == right
.setQuery(geoBoundingBoxQuery("location").setCorners(41.8, -73.9844722, 40.7, -73.9844722)),
response -> {
assertThat(response.getHits().getTotalHits().value(), equalTo(1L));
assertThat(response.getHits().getHits().length, equalTo(1));
for (SearchHit hit : response.getHits()) {
assertThat(hit.getId(), equalTo("2"));
}
}
);
// Distance query
assertResponse(
client().prepareSearch() // from NY
.setQuery(geoDistanceQuery("location").point(40.5, -73.9).distance(25, DistanceUnit.KILOMETERS)),
response -> {
assertThat(response.getHits().getTotalHits().value(), equalTo(2L));
assertThat(response.getHits().getHits().length, equalTo(2));
for (SearchHit hit : response.getHits()) {
assertThat(hit.getId(), anyOf(equalTo("7"), equalTo("4")));
}
}
);
}
public void testLimit2BoundingBox() throws Exception {
Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, randomSupportedVersion()).build();
XContentBuilder xContentBuilder = getMapping();
assertAcked(prepareCreate("test").setSettings(settings).setMapping(xContentBuilder));
ensureGreen();
prepareIndex("test").setId("1")
.setSource(
jsonBuilder().startObject()
.field("userid", 880)
.field("title", "Place in Stockholm")
.field("location", "POINT(59.328355000000002 18.036842)")
.endObject()
)
.setRefreshPolicy(IMMEDIATE)
.get();
prepareIndex("test").setId("2")
.setSource(
jsonBuilder().startObject()
.field("userid", 534)
.field("title", "Place in Montreal")
.field("location", "POINT(-73.570986000000005 45.509526999999999)")
.endObject()
)
.setRefreshPolicy(IMMEDIATE)
.get();
assertHitCount(
client().prepareSearch()
.setQuery(
boolQuery().must(termQuery("userid", 880))
.filter(geoBoundingBoxQuery("location").setCorners(74.579421999999994, 143.5, -66.668903999999998, 113.96875))
),
1L
);
assertHitCount(
client().prepareSearch()
.setQuery(
boolQuery().must(termQuery("userid", 534))
.filter(geoBoundingBoxQuery("location").setCorners(74.579421999999994, 143.5, -66.668903999999998, 113.96875))
),
1L
);
assertHitCount(
client().prepareSearch()
.setQuery(
boolQuery().must(termQuery("userid", 534))
.filter(geoBoundingBoxQuery("location").setCorners(74.579421999999994, 143.5, -66.668903999999998, 113.96875))
),
1L
);
// top == bottom && left == right
assertHitCount(
client().prepareSearch()
.setQuery(
boolQuery().must(termQuery("userid", 880))
.filter(geoBoundingBoxQuery("location").setCorners(18.036842, 59.328355000000002, 18.036842, 59.328355000000002))
),
1L
);
assertHitCount(
client().prepareSearch()
.setQuery(
boolQuery().must(termQuery("userid", 534))
.filter(
geoBoundingBoxQuery("location").setCorners(
45.509526999999999,
-73.570986000000005,
45.509526999999999,
-73.570986000000005
)
)
),
1L
);
// top == bottom
assertHitCount(
client().prepareSearch()
.setQuery(
boolQuery().must(termQuery("userid", 880))
.filter(geoBoundingBoxQuery("location").setCorners(18.036842, 143.5, 18.036842, 113.96875))
),
1L
);
assertHitCount(
client().prepareSearch()
.setQuery(
boolQuery().must(termQuery("userid", 534))
.filter(geoBoundingBoxQuery("location").setCorners(45.509526999999999, 143.5, 45.509526999999999, 113.96875))
),
1L
);
// left == right
assertHitCount(
client().prepareSearch()
.setQuery(
boolQuery().must(termQuery("userid", 880))
.filter(
geoBoundingBoxQuery("location").setCorners(
74.579421999999994,
59.328355000000002,
-66.668903999999998,
59.328355000000002
)
)
),
1L
);
assertHitCount(
client().prepareSearch()
.setQuery(
boolQuery().must(termQuery("userid", 534))
.filter(
geoBoundingBoxQuery("location").setCorners(
74.579421999999994,
-73.570986000000005,
-66.668903999999998,
-73.570986000000005
)
)
),
1L
);
// Distance query
assertHitCount(
client().prepareSearch()
.setQuery(
boolQuery().must(termQuery("userid", 880))
.filter(geoDistanceQuery("location").point(20, 60.0).distance(500, DistanceUnit.MILES))
),
1L
);
assertHitCount(
client().prepareSearch()
.setQuery(
boolQuery().must(termQuery("userid", 534))
.filter(geoDistanceQuery("location").point(45.0, -73.0).distance(500, DistanceUnit.MILES))
),
1L
);
}
public void testCompleteLonRange() throws Exception {
Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, randomSupportedVersion()).build();
XContentBuilder xContentBuilder = getMapping();
assertAcked(prepareCreate("test").setSettings(settings).setMapping(xContentBuilder));
ensureGreen();
prepareIndex("test").setId("1")
.setSource(
jsonBuilder().startObject()
.field("userid", 880)
.field("title", "Place in Stockholm")
.field("location", "POINT(18.036842 59.328355000000002)")
.endObject()
)
.setRefreshPolicy(IMMEDIATE)
.get();
prepareIndex("test").setId("2")
.setSource(
jsonBuilder().startObject()
.field("userid", 534)
.field("title", "Place in Montreal")
.field("location", "POINT(-73.570986000000005 45.509526999999999)")
.endObject()
)
.setRefreshPolicy(IMMEDIATE)
.get();
assertHitCount(
client().prepareSearch()
.setQuery(geoBoundingBoxQuery("location").setValidationMethod(GeoValidationMethod.COERCE).setCorners(50, -180, -50, 180)),
1L
);
assertHitCount(
client().prepareSearch()
.setQuery(geoBoundingBoxQuery("location").setValidationMethod(GeoValidationMethod.COERCE).setCorners(50, -180, -50, 180)),
1L
);
assertHitCount(
client().prepareSearch()
.setQuery(geoBoundingBoxQuery("location").setValidationMethod(GeoValidationMethod.COERCE).setCorners(90, -180, -90, 180)),
2L
);
assertHitCount(
client().prepareSearch()
.setQuery(geoBoundingBoxQuery("location").setValidationMethod(GeoValidationMethod.COERCE).setCorners(90, -180, -90, 180)),
2L
);
assertHitCount(
client().prepareSearch()
.setQuery(geoBoundingBoxQuery("location").setValidationMethod(GeoValidationMethod.COERCE).setCorners(50, 0, -50, 360)),
1L
);
assertHitCount(
client().prepareSearch()
.setQuery(geoBoundingBoxQuery("location").setValidationMethod(GeoValidationMethod.COERCE).setCorners(50, 0, -50, 360)),
1L
);
assertHitCount(
client().prepareSearch()
.setQuery(geoBoundingBoxQuery("location").setValidationMethod(GeoValidationMethod.COERCE).setCorners(90, 0, -90, 360)),
2L
);
assertHitCount(
client().prepareSearch()
.setQuery(geoBoundingBoxQuery("location").setValidationMethod(GeoValidationMethod.COERCE).setCorners(90, 0, -90, 360)),
2L
);
// top == bottom
assertHitCount(
client().prepareSearch()
.setQuery(
geoBoundingBoxQuery("location").setValidationMethod(GeoValidationMethod.COERCE)
.setCorners(59.328355000000002, 0, 59.328355000000002, 360)
),
1L
);
assertHitCount(
client().prepareSearch()
.setQuery(
geoBoundingBoxQuery("location").setValidationMethod(GeoValidationMethod.COERCE)
.setCorners(59.328355000000002, -180, 59.328355000000002, 180)
),
1L
);
// Distance query
assertHitCount(
client().prepareSearch().setQuery(geoDistanceQuery("location").point(60.0, -20.0).distance(1800, DistanceUnit.MILES)),
1L
);
}
}
| GeoBoundingBoxQueryIntegTestCase |
java | apache__thrift | lib/java/src/main/java/org/apache/thrift/transport/sasl/TSaslNegotiationException.java | {
"start": 1746,
"end": 2418
} | enum ____ {
// Unexpected system internal error during negotiation (e.g. sasl initialization failure)
INTERNAL_ERROR(NegotiationStatus.ERROR),
// Cannot read correct sasl frames from the connection => Send "ERROR" status byte to peer
PROTOCOL_ERROR(NegotiationStatus.ERROR),
// Peer is using unsupported sasl mechanisms => Send "BAD" status byte to peer
MECHANISME_MISMATCH(NegotiationStatus.BAD),
// Sasl authentication failure => Send "BAD" status byte to peer
AUTHENTICATION_FAILURE(NegotiationStatus.BAD),
;
public final NegotiationStatus code;
ErrorType(NegotiationStatus code) {
this.code = code;
}
}
}
| ErrorType |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/serde/ContextResolvedTableSerdeTest.java | {
"start": 13062,
"end": 15811
} | class ____ {
private final SerdeContext ctx =
serdeContext(
TableConfigOptions.CatalogPlanCompilation.IDENTIFIER,
TableConfigOptions.CatalogPlanRestore.ALL);
@Test
void withPermanentTable() throws Exception {
final Tuple2<JsonNode, ContextResolvedTable> result =
serDe(ctx, PERMANENT_PLAN_CONTEXT_RESOLVED_TABLE);
assertThatJsonDoesNotContain(result.f0, FIELD_NAME_CATALOG_TABLE);
assertThat(result.f1).isEqualTo(PERMANENT_CATALOG_CONTEXT_RESOLVED_TABLE);
}
@Test
void withMissingIdentifierInCatalog() throws Exception {
final SerdeContext serdeCtx =
serdeContext(
TableConfigOptions.CatalogPlanCompilation.IDENTIFIER,
TableConfigOptions.CatalogPlanRestore.ALL);
final ObjectIdentifier objectIdentifier =
ObjectIdentifier.of(DEFAULT_CATALOG, "db2", "some-invalid-table");
final ContextResolvedTable spec =
ContextResolvedTable.permanent(
objectIdentifier,
CATALOG,
new ResolvedCatalogTable(
CatalogTable.newBuilder()
.schema(CATALOG_TABLE_SCHEMA)
.options(PLAN_OPTIONS)
.build(),
CATALOG_TABLE_RESOLVED_SCHEMA));
final byte[] actualSerialized =
createJsonObjectWriter(serdeCtx).writeValueAsBytes(spec);
assertThatThrownBy(
() ->
createJsonObjectReader(serdeCtx)
.readValue(
actualSerialized,
ContextResolvedTable.class))
.satisfies(
anyCauseMatches(
TableException.class,
ContextResolvedTableJsonDeserializer
.missingTableFromCatalog(objectIdentifier, false)
.getMessage()));
}
}
@Nested
@DisplayName("and CatalogPlanRestore == ALL_ENFORCED")
| TestRestoreAll |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/id/uuid/UuidVersion6Strategy.java | {
"start": 1512,
"end": 3898
} | class ____ {
static final SecureRandom numberGenerator = new SecureRandom();
static final long EPOCH_1582_SECONDS = LocalDate.of( 1582, 10, 15 )
.atStartOfDay( ZoneId.of( "UTC" ) )
.toInstant().getEpochSecond();
}
private record State(long lastTimestamp, int lastSequence) {
public State getNextState() {
final long now = instantToTimestamp();
if ( lastTimestamp < now ) {
return new State( now, randomSequence() );
}
else if ( lastSequence == 0x3FFF ) {
return new State( lastTimestamp + 1, randomSequence() );
}
else {
return new State( lastTimestamp, lastSequence + 1 );
}
}
private static int randomSequence() {
return Holder.numberGenerator.nextInt( 1 << 14 );
}
private static long instantToTimestamp() {
final var instant = Instant.now();
final long seconds = instant.getEpochSecond() - Holder.EPOCH_1582_SECONDS;
return seconds * 10_000_000 + instant.getNano() / 100;
}
}
private final AtomicReference<State> lastState;
@Internal
public UuidVersion6Strategy() {
this( Long.MIN_VALUE, Integer.MIN_VALUE );
}
@Internal
public UuidVersion6Strategy(final long initialTimestamp, final int initialSequence) {
this.lastState = new AtomicReference<>( new State( initialTimestamp, initialSequence ) );
}
/**
* Version 6
*/
@Override
public int getGeneratedVersion() {
return 6;
}
@Override
public UUID generateUUID(final SharedSessionContractImplementor session) {
return generateUuid( session );
}
@Override
public UUID generateUuid(final SharedSessionContractImplementor session) {
final var state = lastState.updateAndGet( State::getNextState );
return new UUID(
// MSB bits 0-47 - the most significant 32 bits of the 60-bit starting timestamp
state.lastTimestamp << 4 & 0xFFFF_FFFF_FFFF_0000L
// MSB bits 48-51 - version = 6
| 0x6000L
// MSB bits 52-63 - the least significant 12 bits from the 60-bit starting timestamp
| state.lastTimestamp & 0x0FFFL,
// LSB bits 0-1 - variant = 4
0x8000_0000_0000_0000L
// LSB bits 2-15 - clock sequence
| (long) state.lastSequence << 48
// LSB bits 16-63 - pseudorandom data, the least significant bit of the first octet is set to 1
| randomNode()
);
}
private static long randomNode() {
return Holder.numberGenerator.nextLong( 0x1_0000_0000_0000L ) | 0x1000_0000_0000L;
}
}
| Holder |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java | {
"start": 24150,
"end": 25552
} | class ____ implements BiConsumer<String, TransportRequest> {
private final AtomicBoolean recoveryStarted;
private final AtomicBoolean finalizeReceived;
private final String indexName;
private SingleStartEnforcer(String indexName, AtomicBoolean recoveryStarted, AtomicBoolean finalizeReceived) {
this.indexName = indexName;
this.recoveryStarted = recoveryStarted;
this.finalizeReceived = finalizeReceived;
}
@Override
public void accept(String action, TransportRequest request) {
// The cluster state applier will immediately attempt to retry the recovery on a cluster state
// update. We want to assert that the first and only recovery attempt succeeds
if (PeerRecoverySourceService.Actions.START_RECOVERY.equals(action)) {
StartRecoveryRequest startRecoveryRequest = (StartRecoveryRequest) request;
ShardId shardId = startRecoveryRequest.shardId();
logger.info("--> attempting to send start_recovery request for shard: " + shardId);
if (indexName.equals(shardId.getIndexName()) && recoveryStarted.get() && finalizeReceived.get() == false) {
throw new IllegalStateException("Recovery cannot be started twice");
}
}
}
}
private | SingleStartEnforcer |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/UnregisteredMetricGroups.java | {
"start": 6138,
"end": 6616
} | class ____ extends TaskManagerMetricGroup {
private static final String DEFAULT_HOST_NAME = "UnregisteredHost";
private static final String DEFAULT_TASKMANAGER_ID = "0";
protected UnregisteredTaskManagerMetricGroup() {
super(NoOpMetricRegistry.INSTANCE, DEFAULT_HOST_NAME, DEFAULT_TASKMANAGER_ID);
}
}
/** A safe drop-in replacement for {@link TaskManagerJobMetricGroup}s. */
public static | UnregisteredTaskManagerMetricGroup |
java | alibaba__nacos | plugin/control/src/main/java/com/alibaba/nacos/plugin/control/rule/storage/ExternalRuleStorage.java | {
"start": 737,
"end": 792
} | interface ____ extends RuleStorage {
}
| ExternalRuleStorage |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridTestCase.java | {
"start": 918,
"end": 1121
} | class ____<B extends InternalGeoGridBucket, T extends InternalGeoGrid<B>> extends
InternalMultiBucketAggregationTestCase<T> {
/**
* Instantiate a {@link InternalGeoGrid}-derived | GeoGridTestCase |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/IndexModule.java | {
"start": 7104,
"end": 19861
} | interface ____ {
/**
* Wrap a given {@link Directory}
*
* @param directory the {@link Directory} to wrap
* @param shardRouting the {@link ShardRouting} associated with the {@link Directory} or {@code null} is unknown
* @return a {@link Directory}
* @throws IOException
*/
Directory wrap(Directory directory, @Nullable ShardRouting shardRouting) throws IOException;
}
private final IndexSettings indexSettings;
private final AnalysisRegistry analysisRegistry;
private final EngineFactory engineFactory;
private final SetOnce<DirectoryWrapper> indexDirectoryWrapper = new SetOnce<>();
private final SetOnce<Function<IndexService, CheckedFunction<DirectoryReader, DirectoryReader, IOException>>> indexReaderWrapper =
new SetOnce<>();
private final Set<IndexEventListener> indexEventListeners = new HashSet<>();
private final Map<String, TriFunction<Settings, IndexVersion, ScriptService, Similarity>> similarities = new HashMap<>();
private final Map<String, IndexStorePlugin.DirectoryFactory> directoryFactories;
private final SetOnce<BiFunction<IndexSettings, IndicesQueryCache, QueryCache>> forceQueryCacheProvider = new SetOnce<>();
private final List<SearchOperationListener> searchOperationListeners;
private final List<IndexingOperationListener> indexOperationListeners = new ArrayList<>();
private final IndexNameExpressionResolver expressionResolver;
private final AtomicBoolean frozen = new AtomicBoolean(false);
private final BooleanSupplier allowExpensiveQueries;
private final Map<String, IndexStorePlugin.RecoveryStateFactory> recoveryStateFactories;
private final SetOnce<Engine.IndexCommitListener> indexCommitListener = new SetOnce<>();
private final MapperMetrics mapperMetrics;
private final IndexingStatsSettings indexingStatsSettings;
private final SearchStatsSettings searchStatsSettings;
private final MergeMetrics mergeMetrics;
/**
* Construct the index module for the index with the specified index settings. The index module contains extension points for plugins
* via {@link org.elasticsearch.plugins.Plugin#onIndexModule(IndexModule)}.
*
* @param indexSettings the index settings
* @param analysisRegistry the analysis registry
* @param engineFactory the engine factory
* @param directoryFactories the available store types
* @param mergeMetrics
*/
public IndexModule(
final IndexSettings indexSettings,
final AnalysisRegistry analysisRegistry,
final EngineFactory engineFactory,
final Map<String, IndexStorePlugin.DirectoryFactory> directoryFactories,
final BooleanSupplier allowExpensiveQueries,
final IndexNameExpressionResolver expressionResolver,
final Map<String, IndexStorePlugin.RecoveryStateFactory> recoveryStateFactories,
final SlowLogFieldProvider slowLogFieldProvider,
final MapperMetrics mapperMetrics,
final List<SearchOperationListener> searchOperationListeners,
final IndexingStatsSettings indexingStatsSettings,
final SearchStatsSettings searchStatsSettings,
final MergeMetrics mergeMetrics
) {
this.indexSettings = indexSettings;
this.analysisRegistry = analysisRegistry;
this.engineFactory = Objects.requireNonNull(engineFactory);
// Need to have a mutable arraylist for plugins to add listeners to it
this.searchOperationListeners = new ArrayList<>(searchOperationListeners);
SlowLogFields slowLogFields = slowLogFieldProvider.create(indexSettings);
this.searchOperationListeners.add(new SearchSlowLog(indexSettings, slowLogFields));
this.indexOperationListeners.add(new IndexingSlowLog(indexSettings, slowLogFields));
this.directoryFactories = Collections.unmodifiableMap(directoryFactories);
this.allowExpensiveQueries = allowExpensiveQueries;
this.expressionResolver = expressionResolver;
this.recoveryStateFactories = recoveryStateFactories;
this.mapperMetrics = mapperMetrics;
this.indexingStatsSettings = indexingStatsSettings;
this.searchStatsSettings = searchStatsSettings;
this.mergeMetrics = mergeMetrics;
}
/**
* Adds a Setting and it's consumer for this index.
*/
public <T> void addSettingsUpdateConsumer(Setting<T> setting, Consumer<T> consumer) {
ensureNotFrozen();
if (setting == null) {
throw new IllegalArgumentException("setting must not be null");
}
indexSettings.getScopedSettings().addSettingsUpdateConsumer(setting, consumer);
}
/**
* Adds a Setting, it's consumer and validator for this index.
*/
public <T> void addSettingsUpdateConsumer(Setting<T> setting, Consumer<T> consumer, Consumer<T> validator) {
ensureNotFrozen();
if (setting == null) {
throw new IllegalArgumentException("setting must not be null");
}
indexSettings.getScopedSettings().addSettingsUpdateConsumer(setting, consumer, validator);
}
/**
* Returns the index {@link Settings} for this index
*/
public Settings getSettings() {
return indexSettings.getSettings();
}
/**
* Returns the {@link IndexSettings} for this index
*/
public IndexSettings indexSettings() {
return indexSettings;
}
/**
* Returns the index this module is associated with
*/
public Index getIndex() {
return indexSettings.getIndex();
}
/**
* The engine factory provided during construction of this index module.
*
* @return the engine factory
*/
EngineFactory getEngineFactory() {
return engineFactory;
}
/**
* Adds an {@link IndexEventListener} for this index. All listeners added here
* are maintained for the entire index lifecycle on this node. Once an index is closed or deleted these
* listeners go out of scope.
* <p>
* Note: an index might be created on a node multiple times. For instance if the last shard from an index is
* relocated to another node the internal representation will be destroyed which includes the registered listeners.
* Once the node holds at least one shard of an index all modules are reloaded and listeners are registered again.
* Listeners can't be unregistered they will stay alive for the entire time the index is allocated on a node.
* </p>
*/
public void addIndexEventListener(IndexEventListener listener) {
ensureNotFrozen();
if (listener == null) {
throw new IllegalArgumentException("listener must not be null");
}
if (indexEventListeners.contains(listener)) {
throw new IllegalArgumentException("listener already added");
}
this.indexEventListeners.add(listener);
}
/**
* Adds an {@link SearchOperationListener} for this index. All listeners added here
* are maintained for the entire index lifecycle on this node. Once an index is closed or deleted these
* listeners go out of scope.
* <p>
* Note: an index might be created on a node multiple times. For instance if the last shard from an index is
* relocated to another node the internal representation will be destroyed which includes the registered listeners.
* Once the node holds at least one shard of an index all modules are reloaded and listeners are registered again.
* Listeners can't be unregistered they will stay alive for the entire time the index is allocated on a node.
* </p>
*/
public void addSearchOperationListener(SearchOperationListener listener) {
ensureNotFrozen();
if (listener == null) {
throw new IllegalArgumentException("listener must not be null");
}
if (searchOperationListeners.contains(listener)) {
throw new IllegalArgumentException("listener already added");
}
this.searchOperationListeners.add(listener);
}
/**
* Adds an {@link IndexingOperationListener} for this index. All listeners added here
* are maintained for the entire index lifecycle on this node. Once an index is closed or deleted these
* listeners go out of scope.
* <p>
* Note: an index might be created on a node multiple times. For instance if the last shard from an index is
* relocated to another node the internal representation will be destroyed which includes the registered listeners.
* Once the node holds at least one shard of an index all modules are reloaded and listeners are registered again.
* Listeners can't be unregistered they will stay alive for the entire time the index is allocated on a node.
* </p>
*/
public void addIndexOperationListener(IndexingOperationListener listener) {
ensureNotFrozen();
if (listener == null) {
throw new IllegalArgumentException("listener must not be null");
}
if (indexOperationListeners.contains(listener)) {
throw new IllegalArgumentException("listener already added");
}
this.indexOperationListeners.add(listener);
}
/**
* Registers the given {@link Similarity} with the given name.
* The function takes as parameters:<ul>
* <li>settings for this similarity
* <li>version of Elasticsearch when the index was created
* <li>ScriptService, for script-based similarities
* </ul>
*
* @param name Name of the SimilarityProvider
* @param similarity SimilarityProvider to register
*/
public void addSimilarity(String name, TriFunction<Settings, IndexVersion, ScriptService, Similarity> similarity) {
ensureNotFrozen();
if (similarities.containsKey(name) || SimilarityService.BUILT_IN.containsKey(name)) {
throw new IllegalArgumentException("similarity for name: [" + name + " is already registered");
}
similarities.put(name, similarity);
}
/**
* Sets the factory for creating new {@link DirectoryReader} wrapper instances.
* The factory ({@link Function}) is called once the IndexService is fully constructed.
* NOTE: this method can only be called once per index. Multiple wrappers are not supported.
* <p>
* The {@link CheckedFunction} is invoked each time a {@link Engine.Searcher} is requested to do an operation,
* for example search, and must return a new directory reader wrapping the provided directory reader or if no
* wrapping was performed the provided directory reader.
* The wrapped reader can filter out document just like delete documents etc. but must not change any term or
* document content.
* NOTE: The index reader wrapper ({@link CheckedFunction}) has a per-request lifecycle,
* must delegate {@link IndexReader#getReaderCacheHelper()}, {@link LeafReader#getCoreCacheHelper()}
* and must be an instance of {@link FilterDirectoryReader} that eventually exposes the original reader
* via {@link FilterDirectoryReader#getDelegate()}.
* The returned reader is closed once it goes out of scope.
* </p>
*/
public void setReaderWrapper(
Function<IndexService, CheckedFunction<DirectoryReader, DirectoryReader, IOException>> indexReaderWrapperFactory
) {
ensureNotFrozen();
this.indexReaderWrapper.set(indexReaderWrapperFactory);
}
/**
* Sets a {@link Directory} wrapping method that allows to apply a function to the Lucene directory instance
* created by {@link org.elasticsearch.plugins.IndexStorePlugin.DirectoryFactory}.
*
* @param wrapper the wrapping function
*/
public void setDirectoryWrapper(DirectoryWrapper wrapper) {
ensureNotFrozen();
this.indexDirectoryWrapper.set(Objects.requireNonNull(wrapper));
}
public void setIndexCommitListener(Engine.IndexCommitListener listener) {
ensureNotFrozen();
this.indexCommitListener.set(Objects.requireNonNull(listener));
}
IndexEventListener freeze() { // pkg private for testing
if (this.frozen.compareAndSet(false, true)) {
return new CompositeIndexEventListener(indexSettings, indexEventListeners);
} else {
throw new IllegalStateException("already frozen");
}
}
public static boolean isBuiltinType(String storeType) {
for (Type type : Type.values()) {
if (type.match(storeType)) {
return true;
}
}
return false;
}
public | DirectoryWrapper |
java | apache__rocketmq | tieredstore/src/test/java/org/apache/rocketmq/tieredstore/metrics/TieredStoreMetricsManagerTest.java | {
"start": 1323,
"end": 2368
} | class ____ {
@Test
public void getMetricsView() {
TieredStoreMetricsManager.getMetricsView();
}
@Test
public void init() {
MessageStoreConfig storeConfig = new MessageStoreConfig();
storeConfig.setTieredBackendServiceProvider(PosixFileSegment.class.getName());
TieredMessageStore messageStore = Mockito.mock(TieredMessageStore.class);
Mockito.when(messageStore.getStoreConfig()).thenReturn(storeConfig);
Mockito.when(messageStore.getFlatFileStore()).thenReturn(Mockito.mock(FlatFileStore.class));
MessageStoreFetcherImpl fetcher = Mockito.spy(new MessageStoreFetcherImpl(messageStore));
TieredStoreMetricsManager.init(
OpenTelemetrySdk.builder().build().getMeter(""),
null, storeConfig, fetcher,
Mockito.mock(FlatFileStore.class), Mockito.mock(DefaultMessageStore.class));
}
@Test
public void newAttributesBuilder() {
TieredStoreMetricsManager.newAttributesBuilder();
}
}
| TieredStoreMetricsManagerTest |
java | quarkusio__quarkus | integration-tests/micrometer-opentelemetry/src/main/java/io/quarkus/micrometer/opentelemetry/ExporterResource.java | {
"start": 3600,
"end": 3829
} | class ____ {
@Produces
@Singleton
InMemorySpanExporter inMemorySpanExporter() {
return InMemorySpanExporter.create();
}
}
@ApplicationScoped
static | InMemorySpanExporterProducer |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/cdi/converters/legacy/ConvertBetweenTest.java | {
"start": 621,
"end": 2314
} | class ____ extends AbstractJPATest {
@Override
public String[] getOrmXmlFiles() {
return new String[0];
}
@Override
protected Class<?>[] getAnnotatedClasses() {
return new Class[] { Item.class };
}
@BeforeEach
public void fillData() {
inTransaction(
session -> {
final Item i0 = new Item();
i0.setPrice( new BigDecimal( "12.05" ) );
i0.setQuantity( 10 );
session.persist( i0 );
final Item i1 = new Item();
i1.setPrice( new BigDecimal( "5.35" ) );
i1.setQuantity( 5 );
session.persist( i1 );
final Item i2 = new Item();
i2.setPrice( new BigDecimal( "99.99" ) );
i2.setQuantity( 15 );
session.persist( i2 );
}
);
}
@AfterEach
public void cleanUpData() {
sessionFactoryScope().getSessionFactory().getSchemaManager().truncate();
}
@Test
@JiraKey(value = "HHH-9356")
public void testBetweenLiteral() {
inTransaction(
session -> {
@SuppressWarnings("unchecked") final List<Item> result = session.createQuery(
"select i from Item i where quantity between 9 and 11" ).list();
assertEquals( 1, result.size() );
assertEquals( 10, result.get( 0 ).getQuantity().intValue() );
}
);
}
@Test
public void testBetweenParameters() {
inTransaction(
session -> {
final Query query = session.createQuery(
"select i from Item i where quantity between :low and :high" );
query.setParameter( "low", 9 );
query.setParameter( "high", 11 );
@SuppressWarnings("unchecked") final List<Item> result = query.list();
assertEquals( 1, result.size() );
assertEquals( 10, result.get( 0 ).getQuantity().intValue() );
}
);
}
}
| ConvertBetweenTest |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/filter/IPFilter.java | {
"start": 1908,
"end": 18544
} | class ____ {
/**
* .http has been chosen for handling HTTP filters, which are not part of the profiles
* The profiles are only handled for the transport protocol, so we need an own kind of profile
* for HTTP. This name starts withs a dot, because no profile name can ever start like that due to
* how we handle settings
*/
public static final String HTTP_PROFILE_NAME = HttpServerTransport.HTTP_PROFILE_NAME;
public static final Setting<Boolean> ALLOW_BOUND_ADDRESSES_SETTING = Setting.boolSetting(
setting("filter.always_allow_bound_address"),
true,
Property.NodeScope
);
public static final Setting<Boolean> IP_FILTER_ENABLED_HTTP_SETTING = Setting.boolSetting(
setting("http.filter.enabled"),
true,
Property.OperatorDynamic,
Property.NodeScope
);
public static final Setting<Boolean> IP_FILTER_ENABLED_SETTING = Setting.boolSetting(
setting("transport.filter.enabled"),
true,
Property.OperatorDynamic,
Property.NodeScope
);
private static final IPFilterValidator ALLOW_VALIDATOR = new IPFilterValidator(true);
private static final IPFilterValidator DENY_VALIDATOR = new IPFilterValidator(false);
public static final Setting<List<String>> TRANSPORT_FILTER_ALLOW_SETTING = Setting.stringListSetting(
setting("transport.filter.allow"),
ALLOW_VALIDATOR,
Property.OperatorDynamic,
Property.NodeScope
);
public static final Setting<List<String>> TRANSPORT_FILTER_DENY_SETTING = Setting.stringListSetting(
setting("transport.filter.deny"),
DENY_VALIDATOR,
Property.OperatorDynamic,
Property.NodeScope
);
public static final Setting<List<String>> REMOTE_CLUSTER_FILTER_ALLOW_SETTING = Setting.listSetting(
setting(REMOTE_CLUSTER_PREFIX + "filter.allow"),
TRANSPORT_FILTER_ALLOW_SETTING,
Function.identity(),
TRANSPORT_FILTER_ALLOW_SETTING::get,
ALLOW_VALIDATOR,
Property.OperatorDynamic,
Property.NodeScope
);
public static final Setting<List<String>> REMOTE_CLUSTER_FILTER_DENY_SETTING = Setting.listSetting(
setting(REMOTE_CLUSTER_PREFIX + "filter.deny"),
TRANSPORT_FILTER_DENY_SETTING,
Function.identity(),
TRANSPORT_FILTER_DENY_SETTING::get,
DENY_VALIDATOR,
Property.OperatorDynamic,
Property.NodeScope
);
public static final Setting.AffixSetting<List<String>> PROFILE_FILTER_DENY_SETTING = Setting.affixKeySetting(
"transport.profiles.",
"xpack.security.filter.deny",
key -> Setting.stringListSetting(key, DENY_VALIDATOR, Property.OperatorDynamic, Property.NodeScope)
);
public static final Setting.AffixSetting<List<String>> PROFILE_FILTER_ALLOW_SETTING = Setting.affixKeySetting(
"transport.profiles.",
"xpack.security.filter.allow",
key -> Setting.stringListSetting(key, ALLOW_VALIDATOR, Property.OperatorDynamic, Property.NodeScope)
);
private static final Setting<List<String>> HTTP_FILTER_ALLOW_FALLBACK = Setting.listSetting(
"transport.profiles.default.xpack.security.filter.allow",
TRANSPORT_FILTER_ALLOW_SETTING,
Function.identity(),
TRANSPORT_FILTER_ALLOW_SETTING::get,
ALLOW_VALIDATOR,
Property.NodeScope
);
public static final Setting<List<String>> HTTP_FILTER_ALLOW_SETTING = Setting.listSetting(
setting("http.filter.allow"),
HTTP_FILTER_ALLOW_FALLBACK,
Function.identity(),
HTTP_FILTER_ALLOW_FALLBACK::get,
ALLOW_VALIDATOR,
Property.OperatorDynamic,
Property.NodeScope
);
private static final Setting<List<String>> HTTP_FILTER_DENY_FALLBACK = Setting.listSetting(
"transport.profiles.default.xpack.security.filter.deny",
TRANSPORT_FILTER_DENY_SETTING,
Function.identity(),
TRANSPORT_FILTER_DENY_SETTING::get,
DENY_VALIDATOR,
Property.NodeScope
);
public static final Setting<List<String>> HTTP_FILTER_DENY_SETTING = Setting.listSetting(
setting("http.filter.deny"),
HTTP_FILTER_DENY_FALLBACK,
Function.identity(),
HTTP_FILTER_DENY_FALLBACK::get,
DENY_VALIDATOR,
Property.OperatorDynamic,
Property.NodeScope
);
public static final Map<String, Object> DISABLED_USAGE_STATS = Map.of("http", false, "transport", false);
public static final SecurityIpFilterRule DEFAULT_PROFILE_ACCEPT_ALL = new SecurityIpFilterRule(true, "default:accept_all") {
@Override
public boolean matches(InetSocketAddress remoteAddress) {
return true;
}
@Override
public IpFilterRuleType ruleType() {
return IpFilterRuleType.ACCEPT;
}
};
private static final Logger logger = LogManager.getLogger(IPFilter.class);
private final AuditTrailService auditTrailService;
private final XPackLicenseState licenseState;
private final boolean alwaysAllowBoundAddresses;
private volatile Map<String, SecurityIpFilterRule[]> rules = Collections.emptyMap();
private volatile boolean isIpFilterEnabled;
private volatile boolean isHttpFilterEnabled;
private final Set<String> profiles;
private volatile List<String> transportAllowFilter;
private volatile List<String> transportDenyFilter;
private volatile List<String> httpAllowFilter;
private volatile List<String> httpDenyFilter;
private final SetOnce<BoundTransportAddress> boundTransportAddress = new SetOnce<>();
private final SetOnce<BoundTransportAddress> boundHttpTransportAddress = new SetOnce<>();
private final SetOnce<Map<String, BoundTransportAddress>> profileBoundAddress = new SetOnce<>();
private final Map<String, List<String>> profileAllowRules = Collections.synchronizedMap(new HashMap<>());
private final Map<String, List<String>> profileDenyRules = Collections.synchronizedMap(new HashMap<>());
public IPFilter(
final Settings settings,
AuditTrailService auditTrailService,
ClusterSettings clusterSettings,
XPackLicenseState licenseState
) {
this.auditTrailService = auditTrailService;
this.licenseState = licenseState;
this.alwaysAllowBoundAddresses = ALLOW_BOUND_ADDRESSES_SETTING.get(settings);
httpDenyFilter = HTTP_FILTER_DENY_SETTING.get(settings);
httpAllowFilter = HTTP_FILTER_ALLOW_SETTING.get(settings);
transportAllowFilter = TRANSPORT_FILTER_ALLOW_SETTING.get(settings);
transportDenyFilter = TRANSPORT_FILTER_DENY_SETTING.get(settings);
isHttpFilterEnabled = IP_FILTER_ENABLED_HTTP_SETTING.get(settings);
isIpFilterEnabled = IP_FILTER_ENABLED_SETTING.get(settings);
Set<String> profiles = settings.getGroups("transport.profiles.", true)
.keySet()
.stream()
.filter(k -> TransportSettings.DEFAULT_PROFILE.equals(k) == false) // exclude default profile -- it's handled differently
.collect(Collectors.toCollection(HashSet::new));
assert false == profiles.contains(REMOTE_CLUSTER_PROFILE);
for (String profile : profiles) {
Setting<List<String>> allowSetting = PROFILE_FILTER_ALLOW_SETTING.getConcreteSettingForNamespace(profile);
profileAllowRules.put(profile, allowSetting.get(settings));
Setting<List<String>> denySetting = PROFILE_FILTER_DENY_SETTING.getConcreteSettingForNamespace(profile);
profileDenyRules.put(profile, denySetting.get(settings));
}
if (REMOTE_CLUSTER_SERVER_ENABLED.get(settings)) {
logger.debug(
"Remote access is enabled, populating filters for profile [{}] with contents of [{}] and [{}]",
REMOTE_CLUSTER_PROFILE,
REMOTE_CLUSTER_FILTER_ALLOW_SETTING.getKey(),
REMOTE_CLUSTER_FILTER_DENY_SETTING.getKey()
);
profiles.add(REMOTE_CLUSTER_PROFILE);
profileAllowRules.put(REMOTE_CLUSTER_PROFILE, REMOTE_CLUSTER_FILTER_ALLOW_SETTING.get(settings));
profileDenyRules.put(REMOTE_CLUSTER_PROFILE, REMOTE_CLUSTER_FILTER_DENY_SETTING.get(settings));
}
this.profiles = Collections.unmodifiableSet(profiles);
clusterSettings.addSettingsUpdateConsumer(IP_FILTER_ENABLED_HTTP_SETTING, this::setHttpFiltering);
clusterSettings.addSettingsUpdateConsumer(IP_FILTER_ENABLED_SETTING, this::setTransportFiltering);
clusterSettings.addSettingsUpdateConsumer(TRANSPORT_FILTER_ALLOW_SETTING, this::setTransportAllowFilter);
clusterSettings.addSettingsUpdateConsumer(TRANSPORT_FILTER_DENY_SETTING, this::setTransportDenyFilter);
clusterSettings.addSettingsUpdateConsumer(REMOTE_CLUSTER_FILTER_ALLOW_SETTING, this::setRemoteAccessAllowFilter);
clusterSettings.addSettingsUpdateConsumer(REMOTE_CLUSTER_FILTER_DENY_SETTING, this::setRemoteAccessDenyFilter);
clusterSettings.addSettingsUpdateConsumer(HTTP_FILTER_ALLOW_SETTING, this::setHttpAllowFilter);
clusterSettings.addSettingsUpdateConsumer(HTTP_FILTER_DENY_SETTING, this::setHttpDenyFilter);
clusterSettings.addAffixUpdateConsumer(PROFILE_FILTER_ALLOW_SETTING, this::setProfileAllowRules, (a, b) -> {});
clusterSettings.addAffixUpdateConsumer(PROFILE_FILTER_DENY_SETTING, this::setProfileDenyRules, (a, b) -> {});
updateRules();
}
public Map<String, Object> usageStats() {
Map<String, Object> map = Maps.newMapWithExpectedSize(2);
final boolean httpFilterEnabled = isHttpFilterEnabled && (httpAllowFilter.isEmpty() == false || httpDenyFilter.isEmpty() == false);
final boolean transportFilterEnabled = isIpFilterEnabled
&& (transportAllowFilter.isEmpty() == false || transportDenyFilter.isEmpty() == false);
map.put("http", httpFilterEnabled);
map.put("transport", transportFilterEnabled);
return map;
}
private void setProfileAllowRules(String profile, List<String> rules) {
profileAllowRules.put(profile, rules);
updateRules();
}
private void setProfileDenyRules(String profile, List<String> rules) {
profileDenyRules.put(profile, rules);
updateRules();
}
private void setRemoteAccessAllowFilter(List<String> filter) {
profileAllowRules.put(REMOTE_CLUSTER_PROFILE, filter);
updateRules();
}
private void setRemoteAccessDenyFilter(List<String> filter) {
profileDenyRules.put(REMOTE_CLUSTER_PROFILE, filter);
updateRules();
}
private void setHttpDenyFilter(List<String> filter) {
this.httpDenyFilter = filter;
updateRules();
}
private void setHttpAllowFilter(List<String> filter) {
this.httpAllowFilter = filter;
updateRules();
}
private void setTransportDenyFilter(List<String> filter) {
this.transportDenyFilter = filter;
updateRules();
}
private void setTransportAllowFilter(List<String> filter) {
this.transportAllowFilter = filter;
updateRules();
}
private void setTransportFiltering(boolean enabled) {
this.isIpFilterEnabled = enabled;
updateRules();
}
private void setHttpFiltering(boolean enabled) {
this.isHttpFilterEnabled = enabled;
updateRules();
}
public boolean accept(String profile, InetSocketAddress peerAddress) {
if (Security.IP_FILTERING_FEATURE.checkWithoutTracking(licenseState) == false) {
return true;
}
if (rules.containsKey(profile) == false) {
// FIXME we need to audit here
return true;
}
AuditTrail auditTrail = auditTrailService.get();
for (SecurityIpFilterRule rule : rules.get(profile)) {
if (rule.matches(peerAddress)) {
boolean isAllowed = rule.ruleType() == IpFilterRuleType.ACCEPT;
if (isAllowed) {
auditTrail.connectionGranted(peerAddress, profile, rule);
} else {
auditTrail.connectionDenied(peerAddress, profile, rule);
}
return isAllowed;
}
}
auditTrail.connectionGranted(peerAddress, profile, DEFAULT_PROFILE_ACCEPT_ALL);
return true;
}
private synchronized void updateRules() {
this.rules = parseSettings();
}
private Map<String, SecurityIpFilterRule[]> parseSettings() {
if (isIpFilterEnabled || isHttpFilterEnabled) {
Map<String, SecurityIpFilterRule[]> profileRules = new HashMap<>();
if (isHttpFilterEnabled && boundHttpTransportAddress.get() != null) {
TransportAddress[] localAddresses = boundHttpTransportAddress.get().boundAddresses();
profileRules.put(HTTP_PROFILE_NAME, createRules(httpAllowFilter, httpDenyFilter, localAddresses));
}
if (isIpFilterEnabled && boundTransportAddress.get() != null) {
TransportAddress[] localAddresses = boundTransportAddress.get().boundAddresses();
profileRules.put(TransportSettings.DEFAULT_PROFILE, createRules(transportAllowFilter, transportDenyFilter, localAddresses));
for (String profile : profiles) {
BoundTransportAddress profileBoundTransportAddress = profileBoundAddress.get().get(profile);
if (profileBoundTransportAddress == null) {
// this could happen if a user updates the settings dynamically with a new profile
logger.warn("skipping ip filter rules for profile [{}] since the profile is not bound to any addresses", profile);
continue;
}
final List<String> allowRules = this.profileAllowRules.getOrDefault(profile, Collections.emptyList());
final List<String> denyRules = this.profileDenyRules.getOrDefault(profile, Collections.emptyList());
profileRules.put(profile, createRules(allowRules, denyRules, profileBoundTransportAddress.boundAddresses()));
}
}
logger.debug("loaded ip filtering profiles: {}", profileRules.keySet());
return unmodifiableMap(profileRules);
} else {
return Collections.emptyMap();
}
}
private SecurityIpFilterRule[] createRules(List<String> allow, List<String> deny, TransportAddress[] boundAddresses) {
List<SecurityIpFilterRule> rules = new ArrayList<>();
// if we are always going to allow the bound addresses, then the rule for them should be the first rule in the list
if (alwaysAllowBoundAddresses) {
assert boundAddresses != null && boundAddresses.length > 0;
rules.add(new SecurityIpFilterRule(true, boundAddresses));
}
// add all rules to the same list. Allow takes precedence so they must come first!
for (String value : allow) {
rules.add(new SecurityIpFilterRule(true, value));
}
for (String value : deny) {
rules.add(new SecurityIpFilterRule(false, value));
}
return rules.toArray(new SecurityIpFilterRule[rules.size()]);
}
public void setBoundTransportAddress(
BoundTransportAddress boundTransportAddress,
Map<String, BoundTransportAddress> profileBoundAddress
) {
this.boundTransportAddress.set(boundTransportAddress);
this.profileBoundAddress.set(profileBoundAddress);
updateRules();
}
public void setBoundHttpTransportAddress(BoundTransportAddress boundHttpTransportAddress) {
this.boundHttpTransportAddress.set(boundHttpTransportAddress);
updateRules();
}
public static void addSettings(List<Setting<?>> settings) {
settings.add(ALLOW_BOUND_ADDRESSES_SETTING);
settings.add(IP_FILTER_ENABLED_SETTING);
settings.add(IP_FILTER_ENABLED_HTTP_SETTING);
settings.add(HTTP_FILTER_ALLOW_SETTING);
settings.add(HTTP_FILTER_DENY_SETTING);
settings.add(TRANSPORT_FILTER_ALLOW_SETTING);
settings.add(TRANSPORT_FILTER_DENY_SETTING);
settings.add(REMOTE_CLUSTER_FILTER_ALLOW_SETTING);
settings.add(REMOTE_CLUSTER_FILTER_DENY_SETTING);
settings.add(PROFILE_FILTER_ALLOW_SETTING);
settings.add(PROFILE_FILTER_DENY_SETTING);
}
private static | IPFilter |
java | quarkusio__quarkus | integration-tests/devmode/src/test/java/io/quarkus/test/no/src/main/NoSrcMainDevModeTest.java | {
"start": 408,
"end": 1296
} | class ____ {
@RegisterExtension
static final QuarkusDevModeTest TEST = new QuarkusDevModeTest()
.withApplicationRoot((jar) -> jar
.addClass(NoSrcMainResource.class)
.addAsResource(new StringAsset("test.message = Hello from NoSrcMainDevModeTest"),
"application.properties"));
@Test
public void validateConfigBean() {
Assertions.assertFalse(Files.exists(Paths.get("src/main")), "Non-existence of src/main is a prerequisite of this test");
RestAssured.get("/message").then().body(is("Hello from NoSrcMainDevModeTest"));
TEST.modifySourceFile(NoSrcMainResource.class,
oldSource -> oldSource.replace("return message;", "return \"Changed on the fly!\";"));
RestAssured.get("/message").then().body(is("Changed on the fly!"));
}
}
| NoSrcMainDevModeTest |
java | elastic__elasticsearch | x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistryTests.java | {
"start": 3816,
"end": 15392
} | class ____ extends ESTestCase {
private SnapshotLifecycleTemplateRegistry registry;
private NamedXContentRegistry xContentRegistry;
private ClusterService clusterService;
private ThreadPool threadPool;
private VerifyingClient client;
@Before
public void createRegistryAndClient() {
threadPool = new TestThreadPool(this.getClass().getName());
client = new VerifyingClient(threadPool);
clusterService = ClusterServiceUtils.createClusterService(threadPool);
List<NamedXContentRegistry.Entry> entries = new ArrayList<>(ClusterModule.getNamedXWriteables());
entries.addAll(
Arrays.asList(
new NamedXContentRegistry.Entry(
LifecycleType.class,
new ParseField(TimeseriesLifecycleType.TYPE),
(p) -> TimeseriesLifecycleType.INSTANCE
),
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse),
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse)
)
);
xContentRegistry = new NamedXContentRegistry(entries);
registry = new SnapshotLifecycleTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client, xContentRegistry);
}
@After
@Override
public void tearDown() throws Exception {
super.tearDown();
threadPool.shutdownNow();
}
public void testDisabledDoesNotAddTemplates() {
Settings settings = Settings.builder().put(SLM_HISTORY_INDEX_ENABLED_SETTING.getKey(), false).build();
SnapshotLifecycleTemplateRegistry disabledRegistry = new SnapshotLifecycleTemplateRegistry(
settings,
clusterService,
threadPool,
client,
xContentRegistry
);
assertThat(disabledRegistry.getComposableTemplateConfigs(), anEmptyMap());
assertThat(disabledRegistry.getLifecyclePolicies(), hasSize(0));
}
public void testThatNonExistingTemplatesAreAddedImmediately() throws Exception {
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyMap(), nodes);
AtomicInteger calledTimes = new AtomicInteger(0);
client.setVerifier((action, request, listener) -> verifyTemplateInstalled(calledTimes, action, request, listener));
registry.clusterChanged(event);
assertBusy(() -> assertThat(calledTimes.get(), equalTo(registry.getComposableTemplateConfigs().size())));
calledTimes.set(0);
// attempting to register the event multiple times as a race condition can yield this test flaky, namely:
// when calling registry.clusterChanged(newEvent) the templateCreationsInProgress state that the IndexTemplateRegistry maintains
// might've not yet been updated to reflect that the first template registration was complete, so a second template registration
// will not be issued anymore, leaving calledTimes to 0
assertBusy(() -> {
// now delete one template from the cluster state and lets retry
ClusterChangedEvent newEvent = createClusterChangedEvent(Collections.emptyMap(), nodes);
registry.clusterChanged(newEvent);
assertThat(calledTimes.get(), greaterThan(1));
});
}
public void testThatNonExistingPoliciesAreAddedImmediately() throws Exception {
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
AtomicInteger calledTimes = new AtomicInteger(0);
client.setVerifier((action, request, listener) -> {
if (action == ILMActions.PUT) {
calledTimes.incrementAndGet();
assertThat(request, instanceOf(PutLifecycleRequest.class));
final PutLifecycleRequest putRequest = (PutLifecycleRequest) request;
assertThat(putRequest.getPolicy().getName(), equalTo(SLM_POLICY_NAME));
assertNotNull(listener);
return AcknowledgedResponse.TRUE;
} else if (action == TransportPutComposableIndexTemplateAction.TYPE) {
// Ignore this, it's verified in another test
return new TestPutIndexTemplateResponse(true);
} else {
fail("client called with unexpected request:" + request.toString());
return null;
}
});
ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyMap(), nodes);
registry.clusterChanged(event);
assertBusy(() -> assertThat(calledTimes.get(), equalTo(1)));
}
public void testPolicyAlreadyExists() {
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
Map<String, LifecyclePolicy> policyMap = new HashMap<>();
List<LifecyclePolicy> policies = registry.getLifecyclePolicies();
assertThat(policies, hasSize(1));
LifecyclePolicy policy = policies.get(0);
policyMap.put(policy.getName(), policy);
client.setVerifier((action, request, listener) -> {
if (action == TransportPutComposableIndexTemplateAction.TYPE) {
// Ignore this, it's verified in another test
return new TestPutIndexTemplateResponse(true);
} else if (action == ILMActions.PUT) {
fail("if the policy already exists it should be re-put");
} else {
fail("client called with unexpected request:" + request.toString());
}
return null;
});
ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyMap(), policyMap, nodes);
registry.clusterChanged(event);
}
public void testPolicyAlreadyExistsButDiffers() throws IOException {
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
Map<String, LifecyclePolicy> policyMap = new HashMap<>();
String policyStr = "{\"phases\":{\"delete\":{\"min_age\":\"1m\",\"actions\":{\"delete\":{}}}}}";
List<LifecyclePolicy> policies = registry.getLifecyclePolicies();
assertThat(policies, hasSize(1));
LifecyclePolicy policy = policies.get(0);
client.setVerifier((action, request, listener) -> {
if (action == TransportPutComposableIndexTemplateAction.TYPE) {
// Ignore this, it's verified in another test
return new TestPutIndexTemplateResponse(true);
} else if (action == ILMActions.PUT) {
fail("if the policy already exists it should be re-put");
} else {
fail("client called with unexpected request:" + request.toString());
}
return null;
});
try (
XContentParser parser = XContentType.JSON.xContent()
.createParser(XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry), policyStr)
) {
LifecyclePolicy different = LifecyclePolicy.parse(parser, policy.getName());
policyMap.put(policy.getName(), different);
ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyMap(), policyMap, nodes);
registry.clusterChanged(event);
}
}
public void testThatVersionedOldTemplatesAreUpgraded() throws Exception {
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
ClusterChangedEvent event = createClusterChangedEvent(
Collections.singletonMap(SLM_TEMPLATE_NAME, INDEX_TEMPLATE_VERSION - 1),
nodes
);
AtomicInteger calledTimes = new AtomicInteger(0);
client.setVerifier((action, request, listener) -> verifyTemplateInstalled(calledTimes, action, request, listener));
registry.clusterChanged(event);
assertBusy(() -> assertThat(calledTimes.get(), equalTo(registry.getComposableTemplateConfigs().size())));
}
public void testThatUnversionedOldTemplatesAreUpgraded() throws Exception {
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
ClusterChangedEvent event = createClusterChangedEvent(Collections.singletonMap(SLM_TEMPLATE_NAME, null), nodes);
AtomicInteger calledTimes = new AtomicInteger(0);
client.setVerifier((action, request, listener) -> verifyTemplateInstalled(calledTimes, action, request, listener));
registry.clusterChanged(event);
assertBusy(() -> assertThat(calledTimes.get(), equalTo(registry.getComposableTemplateConfigs().size())));
}
public void testSameOrHigherVersionTemplateNotUpgraded() throws Exception {
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
ClusterChangedEvent sameVersionEvent = createClusterChangedEvent(
Collections.singletonMap(SLM_TEMPLATE_NAME, INDEX_TEMPLATE_VERSION),
nodes
);
AtomicInteger calledTimes = new AtomicInteger(0);
client.setVerifier((action, request, listener) -> {
if (action == TransportPutComposableIndexTemplateAction.TYPE) {
fail("template should not have been re-installed");
return null;
} else if (action == ILMActions.PUT) {
// Ignore this, it's verified in another test
return AcknowledgedResponse.TRUE;
} else {
fail("client called with unexpected request:" + request.toString());
return null;
}
});
registry.clusterChanged(sameVersionEvent);
ClusterChangedEvent higherVersionEvent = createClusterChangedEvent(
Collections.singletonMap(SLM_TEMPLATE_NAME, INDEX_TEMPLATE_VERSION + randomIntBetween(1, 1000)),
nodes
);
registry.clusterChanged(higherVersionEvent);
}
public void testThatMissingMasterNodeDoesNothing() {
DiscoveryNode localNode = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").add(localNode).build();
client.setVerifier((a, r, l) -> {
fail("if the master is missing nothing should happen");
return null;
});
ClusterChangedEvent event = createClusterChangedEvent(Collections.singletonMap(SLM_TEMPLATE_NAME, null), nodes);
registry.clusterChanged(event);
}
public void testTemplateNameIsVersioned() {
assertThat(SLM_TEMPLATE_NAME, endsWith("-" + INDEX_TEMPLATE_VERSION));
}
// -------------
/**
* A client that delegates to a verifying function for action/request/listener
*/
public static | SnapshotLifecycleTemplateRegistryTests |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/model/ByteBufferFileLoader.java | {
"start": 1093,
"end": 1427
} | class ____ implements ModelLoaderFactory<File, ByteBuffer> {
@NonNull
@Override
public ModelLoader<File, ByteBuffer> build(@NonNull MultiModelLoaderFactory multiFactory) {
return new ByteBufferFileLoader();
}
@Override
public void teardown() {
// Do nothing.
}
}
private static final | Factory |
java | google__guava | android/guava/src/com/google/common/collect/ArrayTable.java | {
"start": 3683,
"end": 4861
} | class ____ methods involving the underlying array structure, where the array indices
* correspond to the position of a row or column in the lists of allowed keys and values. See the
* {@link #at}, {@link #set}, {@link #toArray}, {@link #rowKeyList}, and {@link #columnKeyList}
* methods for more details.
*
* <p>Note that this implementation is not synchronized. If multiple threads access the same cell of
* an {@code ArrayTable} concurrently and one of the threads modifies its value, there is no
* guarantee that the new value will be fully visible to the other threads. To guarantee that
* modifications are visible, synchronize access to the table. Unlike other {@code Table}
* implementations, synchronization is unnecessary between a thread that writes to one cell and a
* thread that reads from another.
*
* <p>See the Guava User Guide article on <a href=
* "https://github.com/google/guava/wiki/NewCollectionTypesExplained#table">{@code Table}</a>.
*
* @author Jared Levy
* @since 10.0
*/
// We explicitly list `implements Table<...>` so that its `@Nullable V` appears in Javadoc.
@SuppressWarnings("RedundancyRemover")
@GwtCompatible
public final | provides |
java | apache__camel | tooling/maven/camel-package-maven-plugin/src/main/java/org/apache/camel/maven/packaging/MvelHelper.java | {
"start": 995,
"end": 2853
} | class ____ {
public static final MvelHelper INSTANCE = new MvelHelper();
private static final Pattern CURLY_BRACKET_ESCAPE = Pattern.compile("(\\{[a-zA-Z0-9]+?)\\}");
private static final Pattern URL_ESCAPE = Pattern.compile("(?<!href=\")(http(:?s)?://|(:?s)?ftp(?:s)?://)");
private MvelHelper() {
// utility class
}
public static String escape(final String raw) {
if (raw == null) {
return null;
}
final String escapedCurlyBrackets = CURLY_BRACKET_ESCAPE.matcher(raw).replaceAll("\\\\$1\\}");
return URL_ESCAPE.matcher(escapedCurlyBrackets).replaceAll("\\\\$1");
}
public static String componentName(String scheme) {
String text = SchemaHelper.dashToCamelCase(scheme);
// first char should be upper cased
return Character.toUpperCase(text.charAt(0)) + text.substring(1);
}
public static String formatSignature(String signature) {
signature = signature.replace('$', '.');
return signature + ";";
}
public static String apiMethodAlias(ApiModel api, ApiMethodModel method) {
String name = method.getName();
for (String alias : api.getAliases()) {
int pos = alias.indexOf('=');
String pattern = alias.substring(0, pos);
String aliasMethod = alias.substring(pos + 1);
// match ignore case
if (Pattern.compile(pattern, Pattern.CASE_INSENSITIVE).matcher(name).matches()) {
return aliasMethod;
}
}
// empty if no alias
return "";
}
public static String producerOrConsumer(ApiModel api) {
if (api.isConsumerOnly()) {
return "Consumer";
} else if (api.isProducerOnly()) {
return "Producer";
}
return "Both";
}
}
| MvelHelper |
java | netty__netty | handler/src/main/java/io/netty/handler/traffic/TrafficCounter.java | {
"start": 4500,
"end": 20582
} | class ____ implements Runnable {
@Override
public void run() {
if (!monitorActive) {
return;
}
resetAccounting(milliSecondFromNano());
if (trafficShapingHandler != null) {
trafficShapingHandler.doAccounting(TrafficCounter.this);
}
}
}
/**
* Start the monitoring process.
*/
public synchronized void start() {
if (monitorActive) {
return;
}
lastTime.set(milliSecondFromNano());
long localCheckInterval = checkInterval.get();
// if executor is null, it means it is piloted by a GlobalChannelTrafficCounter, so no executor
if (localCheckInterval > 0 && executor != null) {
monitorActive = true;
monitor = new TrafficMonitoringTask();
scheduledFuture =
executor.scheduleAtFixedRate(monitor, 0, localCheckInterval, TimeUnit.MILLISECONDS);
}
}
/**
* Stop the monitoring process.
*/
public synchronized void stop() {
if (!monitorActive) {
return;
}
monitorActive = false;
resetAccounting(milliSecondFromNano());
if (trafficShapingHandler != null) {
trafficShapingHandler.doAccounting(this);
}
if (scheduledFuture != null) {
scheduledFuture.cancel(true);
}
}
/**
* Reset the accounting on Read and Write.
*
* @param newLastTime the milliseconds unix timestamp that we should be considered up-to-date for.
*/
synchronized void resetAccounting(long newLastTime) {
long interval = newLastTime - lastTime.getAndSet(newLastTime);
if (interval == 0) {
// nothing to do
return;
}
if (logger.isDebugEnabled() && interval > checkInterval() << 1) {
logger.debug("Acct schedule not ok: " + interval + " > 2*" + checkInterval() + " from " + name);
}
lastReadBytes = currentReadBytes.getAndSet(0);
lastWrittenBytes = currentWrittenBytes.getAndSet(0);
lastReadThroughput = lastReadBytes * 1000 / interval;
// nb byte / checkInterval in ms * 1000 (1s)
lastWriteThroughput = lastWrittenBytes * 1000 / interval;
// nb byte / checkInterval in ms * 1000 (1s)
realWriteThroughput = realWrittenBytes.getAndSet(0) * 1000 / interval;
lastWritingTime = Math.max(lastWritingTime, writingTime);
lastReadingTime = Math.max(lastReadingTime, readingTime);
}
/**
* Constructor with the {@link AbstractTrafficShapingHandler} that hosts it, the {@link ScheduledExecutorService}
* to use, its name, the checkInterval between two computations in milliseconds.
*
* @param executor
* the underlying executor service for scheduling checks, might be null when used
* from {@link GlobalChannelTrafficCounter}.
* @param name
* the name given to this monitor.
* @param checkInterval
* the checkInterval in millisecond between two computations.
*/
public TrafficCounter(ScheduledExecutorService executor, String name, long checkInterval) {
this.name = checkNotNull(name, "name");
trafficShapingHandler = null;
this.executor = executor;
init(checkInterval);
}
/**
* Constructor with the {@link AbstractTrafficShapingHandler} that hosts it, the Timer to use, its
* name, the checkInterval between two computations in millisecond.
*
* @param trafficShapingHandler
* the associated AbstractTrafficShapingHandler.
* @param executor
* the underlying executor service for scheduling checks, might be null when used
* from {@link GlobalChannelTrafficCounter}.
* @param name
* the name given to this monitor.
* @param checkInterval
* the checkInterval in millisecond between two computations.
*/
public TrafficCounter(
AbstractTrafficShapingHandler trafficShapingHandler, ScheduledExecutorService executor,
String name, long checkInterval) {
this.name = checkNotNull(name, "name");
this.trafficShapingHandler = checkNotNullWithIAE(trafficShapingHandler, "trafficShapingHandler");
this.executor = executor;
init(checkInterval);
}
private void init(long checkInterval) {
// absolute time: informative only
lastCumulativeTime = System.currentTimeMillis();
writingTime = milliSecondFromNano();
readingTime = writingTime;
lastWritingTime = writingTime;
lastReadingTime = writingTime;
configure(checkInterval);
}
/**
* Change checkInterval between two computations in millisecond.
*
* @param newCheckInterval The new check interval (in milliseconds)
*/
public void configure(long newCheckInterval) {
long newInterval = newCheckInterval / 10 * 10;
if (checkInterval.getAndSet(newInterval) != newInterval) {
if (newInterval <= 0) {
stop();
// No more active monitoring
lastTime.set(milliSecondFromNano());
} else {
// Restart
stop();
start();
}
}
}
/**
* Computes counters for Read.
*
* @param recv
* the size in bytes to read
*/
void bytesRecvFlowControl(long recv) {
currentReadBytes.addAndGet(recv);
cumulativeReadBytes.addAndGet(recv);
}
/**
* Computes counters for Write.
*
* @param write
* the size in bytes to write
*/
void bytesWriteFlowControl(long write) {
currentWrittenBytes.addAndGet(write);
cumulativeWrittenBytes.addAndGet(write);
}
/**
* Computes counters for Real Write.
*
* @param write
* the size in bytes to write
*/
void bytesRealWriteFlowControl(long write) {
realWrittenBytes.addAndGet(write);
}
/**
* @return the current checkInterval between two computations of traffic counter
* in millisecond.
*/
public long checkInterval() {
return checkInterval.get();
}
/**
* @return the Read Throughput in bytes/s computes in the last check interval.
*/
public long lastReadThroughput() {
return lastReadThroughput;
}
/**
* @return the Write Throughput in bytes/s computes in the last check interval.
*/
public long lastWriteThroughput() {
return lastWriteThroughput;
}
/**
* @return the number of bytes read during the last check Interval.
*/
public long lastReadBytes() {
return lastReadBytes;
}
/**
* @return the number of bytes written during the last check Interval.
*/
public long lastWrittenBytes() {
return lastWrittenBytes;
}
/**
* @return the current number of bytes read since the last checkInterval.
*/
public long currentReadBytes() {
return currentReadBytes.get();
}
/**
* @return the current number of bytes written since the last check Interval.
*/
public long currentWrittenBytes() {
return currentWrittenBytes.get();
}
/**
* @return the Time in millisecond of the last check as of System.currentTimeMillis().
*/
public long lastTime() {
return lastTime.get();
}
/**
* @return the cumulativeWrittenBytes
*/
public long cumulativeWrittenBytes() {
return cumulativeWrittenBytes.get();
}
/**
* @return the cumulativeReadBytes
*/
public long cumulativeReadBytes() {
return cumulativeReadBytes.get();
}
/**
* @return the lastCumulativeTime in millisecond as of System.currentTimeMillis()
* when the cumulative counters were reset to 0.
*/
public long lastCumulativeTime() {
return lastCumulativeTime;
}
/**
* @return the realWrittenBytes
*/
public AtomicLong getRealWrittenBytes() {
return realWrittenBytes;
}
/**
* @return the realWriteThroughput
*/
public long getRealWriteThroughput() {
return realWriteThroughput;
}
/**
* Reset both read and written cumulative bytes counters and the associated absolute time
* from System.currentTimeMillis().
*/
public void resetCumulativeTime() {
lastCumulativeTime = System.currentTimeMillis();
cumulativeReadBytes.set(0);
cumulativeWrittenBytes.set(0);
}
/**
* @return the name of this TrafficCounter.
*/
public String name() {
return name;
}
/**
* Returns the time to wait (if any) for the given length message, using the given limitTraffic and the max wait
* time.
*
* @param size
* the recv size
* @param limitTraffic
* the traffic limit in bytes per second.
* @param maxTime
* the max time in ms to wait in case of excess of traffic.
* @return the current time to wait (in ms) if needed for Read operation.
*/
@Deprecated
public long readTimeToWait(final long size, final long limitTraffic, final long maxTime) {
return readTimeToWait(size, limitTraffic, maxTime, milliSecondFromNano());
}
/**
* Returns the time to wait (if any) for the given length message, using the given limitTraffic and the max wait
* time.
*
* @param size
* the recv size
* @param limitTraffic
* the traffic limit in bytes per second
* @param maxTime
* the max time in ms to wait in case of excess of traffic.
* @param now the current time
* @return the current time to wait (in ms) if needed for Read operation.
*/
public long readTimeToWait(final long size, final long limitTraffic, final long maxTime, final long now) {
bytesRecvFlowControl(size);
if (size == 0 || limitTraffic == 0) {
return 0;
}
final long lastTimeCheck = lastTime.get();
long sum = currentReadBytes.get();
long localReadingTime = readingTime;
long lastRB = lastReadBytes;
final long interval = now - lastTimeCheck;
long pastDelay = Math.max(lastReadingTime - lastTimeCheck, 0);
if (interval > AbstractTrafficShapingHandler.MINIMAL_WAIT) {
// Enough interval time to compute shaping
long time = sum * 1000 / limitTraffic - interval + pastDelay;
if (time > AbstractTrafficShapingHandler.MINIMAL_WAIT) {
if (logger.isDebugEnabled()) {
logger.debug("Time: " + time + ':' + sum + ':' + interval + ':' + pastDelay);
}
if (time > maxTime && now + time - localReadingTime > maxTime) {
time = maxTime;
}
readingTime = Math.max(localReadingTime, now + time);
return time;
}
readingTime = Math.max(localReadingTime, now);
return 0;
}
// take the last read interval check to get enough interval time
long lastsum = sum + lastRB;
long lastinterval = interval + checkInterval.get();
long time = lastsum * 1000 / limitTraffic - lastinterval + pastDelay;
if (time > AbstractTrafficShapingHandler.MINIMAL_WAIT) {
if (logger.isDebugEnabled()) {
logger.debug("Time: " + time + ':' + lastsum + ':' + lastinterval + ':' + pastDelay);
}
if (time > maxTime && now + time - localReadingTime > maxTime) {
time = maxTime;
}
readingTime = Math.max(localReadingTime, now + time);
return time;
}
readingTime = Math.max(localReadingTime, now);
return 0;
}
/**
* Returns the time to wait (if any) for the given length message, using the given limitTraffic and
* the max wait time.
*
* @param size
* the write size
* @param limitTraffic
* the traffic limit in bytes per second.
* @param maxTime
* the max time in ms to wait in case of excess of traffic.
* @return the current time to wait (in ms) if needed for Write operation.
*/
@Deprecated
public long writeTimeToWait(final long size, final long limitTraffic, final long maxTime) {
return writeTimeToWait(size, limitTraffic, maxTime, milliSecondFromNano());
}
/**
* Returns the time to wait (if any) for the given length message, using the given limitTraffic and
* the max wait time.
*
* @param size
* the write size
* @param limitTraffic
* the traffic limit in bytes per second.
* @param maxTime
* the max time in ms to wait in case of excess of traffic.
* @param now the current time
* @return the current time to wait (in ms) if needed for Write operation.
*/
public long writeTimeToWait(final long size, final long limitTraffic, final long maxTime, final long now) {
bytesWriteFlowControl(size);
if (size == 0 || limitTraffic == 0) {
return 0;
}
final long lastTimeCheck = lastTime.get();
long sum = currentWrittenBytes.get();
long lastWB = lastWrittenBytes;
long localWritingTime = writingTime;
long pastDelay = Math.max(lastWritingTime - lastTimeCheck, 0);
final long interval = now - lastTimeCheck;
if (interval > AbstractTrafficShapingHandler.MINIMAL_WAIT) {
// Enough interval time to compute shaping
long time = sum * 1000 / limitTraffic - interval + pastDelay;
if (time > AbstractTrafficShapingHandler.MINIMAL_WAIT) {
if (logger.isDebugEnabled()) {
logger.debug("Time: " + time + ':' + sum + ':' + interval + ':' + pastDelay);
}
if (time > maxTime && now + time - localWritingTime > maxTime) {
time = maxTime;
}
writingTime = Math.max(localWritingTime, now + time);
return time;
}
writingTime = Math.max(localWritingTime, now);
return 0;
}
// take the last write interval check to get enough interval time
long lastsum = sum + lastWB;
long lastinterval = interval + checkInterval.get();
long time = lastsum * 1000 / limitTraffic - lastinterval + pastDelay;
if (time > AbstractTrafficShapingHandler.MINIMAL_WAIT) {
if (logger.isDebugEnabled()) {
logger.debug("Time: " + time + ':' + lastsum + ':' + lastinterval + ':' + pastDelay);
}
if (time > maxTime && now + time - localWritingTime > maxTime) {
time = maxTime;
}
writingTime = Math.max(localWritingTime, now + time);
return time;
}
writingTime = Math.max(localWritingTime, now);
return 0;
}
@Override
public String toString() {
return new StringBuilder(165).append("Monitor ").append(name)
.append(" Current Speed Read: ").append(lastReadThroughput >> 10).append(" KB/s, ")
.append("Asked Write: ").append(lastWriteThroughput >> 10).append(" KB/s, ")
.append("Real Write: ").append(realWriteThroughput >> 10).append(" KB/s, ")
.append("Current Read: ").append(currentReadBytes.get() >> 10).append(" KB, ")
.append("Current asked Write: ").append(currentWrittenBytes.get() >> 10).append(" KB, ")
.append("Current real Write: ").append(realWrittenBytes.get() >> 10).append(" KB").toString();
}
}
| TrafficMonitoringTask |
java | elastic__elasticsearch | libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/Kernel32Library.java | {
"start": 5976,
"end": 6577
} | interface ____ {
void setLimitFlags(int v);
void setActiveProcessLimit(int v);
}
JobObjectBasicLimitInformation newJobObjectBasicLimitInformation();
/**
* Get job limit and state information
*
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms684925%28v=vs.85%29.aspx
* Note: The infoLength parameter is omitted because implementions handle passing it
* Note: The returnLength parameter is omitted because all implementations pass null
*
* @param job job handle
* @param infoClass information | JobObjectBasicLimitInformation |
java | google__guava | android/guava-tests/test/com/google/common/collect/ForwardingTableTest.java | {
"start": 953,
"end": 1945
} | class ____ extends TestCase {
@SuppressWarnings("rawtypes")
public void testForwarding() {
new ForwardingWrapperTester()
.testForwarding(
Table.class,
new Function<Table, Table<?, ?, ?>>() {
@Override
public Table<?, ?, ?> apply(Table delegate) {
return wrap((Table<?, ?, ?>) delegate);
}
});
}
public void testEquals() {
Table<Integer, Integer, String> table1 = ImmutableTable.of(1, 1, "one");
Table<Integer, Integer, String> table2 = ImmutableTable.of(2, 2, "two");
new EqualsTester()
.addEqualityGroup(table1, wrap(table1), wrap(table1))
.addEqualityGroup(table2, wrap(table2))
.testEquals();
}
private static <R, C, V> Table<R, C, V> wrap(Table<R, C, V> delegate) {
return new ForwardingTable<R, C, V>() {
@Override
protected Table<R, C, V> delegate() {
return delegate;
}
};
}
}
| ForwardingTableTest |
java | apache__hadoop | hadoop-common-project/hadoop-annotations/src/main/java8/org/apache/hadoop/classification/tools/IncludePublicAnnotationsJDiffDoclet.java | {
"start": 1573,
"end": 2464
} | class ____ {
public static LanguageVersion languageVersion() {
return LanguageVersion.JAVA_1_5;
}
public static boolean start(RootDoc root) {
System.out.println(
IncludePublicAnnotationsJDiffDoclet.class.getSimpleName());
RootDocProcessor.treatUnannotatedClassesAsPrivate = true;
return JDiff.start(RootDocProcessor.process(root));
}
public static int optionLength(String option) {
Integer length = StabilityOptions.optionLength(option);
if (length != null) {
return length;
}
return JDiff.optionLength(option);
}
public static boolean validOptions(String[][] options,
DocErrorReporter reporter) {
StabilityOptions.validOptions(options, reporter);
String[][] filteredOptions = StabilityOptions.filterOptions(options);
return JDiff.validOptions(filteredOptions, reporter);
}
}
| IncludePublicAnnotationsJDiffDoclet |
java | quarkusio__quarkus | integration-tests/picocli-native/src/main/java/io/quarkus/it/picocli/CommandUsedAsParent.java | {
"start": 164,
"end": 304
} | class ____ {
@CommandLine.Option(names = "-p", description = "Value read by child command.")
String parentValue;
}
| CommandUsedAsParent |
java | square__javapoet | src/test/java/com/squareup/javapoet/MethodSpecTest.java | {
"start": 9350,
"end": 17901
} | interface ____{ }
abstract void foo(@PrivateAnnotation final String bar);
}
@Test public void overrideDoesNotCopyParameterAnnotations() {
TypeElement abstractTypeElement = getElement(AbstractClassWithPrivateAnnotation.class);
ExecutableElement fooElement = ElementFilter.methodsIn(abstractTypeElement.getEnclosedElements()).get(0);
ClassName implClassName = ClassName.get("com.squareup.javapoet", "Impl");
TypeSpec type = TypeSpec.classBuilder(implClassName)
.superclass(abstractTypeElement.asType())
.addMethod(MethodSpec.overriding(fooElement).build())
.build();
JavaFileObject jfo = JavaFile.builder(implClassName.packageName, type).build().toJavaFileObject();
Compilation compilation = javac().compile(jfo);
assertThat(compilation).succeeded();
}
@Test public void equalsAndHashCode() {
MethodSpec a = MethodSpec.constructorBuilder().build();
MethodSpec b = MethodSpec.constructorBuilder().build();
assertThat(a.equals(b)).isTrue();
assertThat(a.hashCode()).isEqualTo(b.hashCode());
a = MethodSpec.methodBuilder("taco").build();
b = MethodSpec.methodBuilder("taco").build();
assertThat(a.equals(b)).isTrue();
assertThat(a.hashCode()).isEqualTo(b.hashCode());
TypeElement classElement = getElement(Everything.class);
ExecutableElement methodElement = getOnlyElement(methodsIn(classElement.getEnclosedElements()));
a = MethodSpec.overriding(methodElement).build();
b = MethodSpec.overriding(methodElement).build();
assertThat(a.equals(b)).isTrue();
assertThat(a.hashCode()).isEqualTo(b.hashCode());
}
@Test public void withoutParameterJavaDoc() {
MethodSpec methodSpec = MethodSpec.methodBuilder("getTaco")
.addModifiers(Modifier.PRIVATE)
.addParameter(TypeName.DOUBLE, "money")
.addJavadoc("Gets the best Taco\n")
.build();
assertThat(methodSpec.toString()).isEqualTo(""
+ "/**\n"
+ " * Gets the best Taco\n"
+ " */\n"
+ "private void getTaco(double money) {\n"
+ "}\n");
}
@Test public void withParameterJavaDoc() {
MethodSpec methodSpec = MethodSpec.methodBuilder("getTaco")
.addParameter(ParameterSpec.builder(TypeName.DOUBLE, "money")
.addJavadoc("the amount required to buy the taco.\n")
.build())
.addParameter(ParameterSpec.builder(TypeName.INT, "count")
.addJavadoc("the number of Tacos to buy.\n")
.build())
.addJavadoc("Gets the best Taco money can buy.\n")
.build();
assertThat(methodSpec.toString()).isEqualTo(""
+ "/**\n"
+ " * Gets the best Taco money can buy.\n"
+ " *\n"
+ " * @param money the amount required to buy the taco.\n"
+ " * @param count the number of Tacos to buy.\n"
+ " */\n"
+ "void getTaco(double money, int count) {\n"
+ "}\n");
}
@Test public void withParameterJavaDocAndWithoutMethodJavadoc() {
MethodSpec methodSpec = MethodSpec.methodBuilder("getTaco")
.addParameter(ParameterSpec.builder(TypeName.DOUBLE, "money")
.addJavadoc("the amount required to buy the taco.\n")
.build())
.addParameter(ParameterSpec.builder(TypeName.INT, "count")
.addJavadoc("the number of Tacos to buy.\n")
.build())
.build();
assertThat(methodSpec.toString()).isEqualTo(""
+ "/**\n"
+ " * @param money the amount required to buy the taco.\n"
+ " * @param count the number of Tacos to buy.\n"
+ " */\n"
+ "void getTaco(double money, int count) {\n"
+ "}\n");
}
@Test public void duplicateExceptionsIgnored() {
ClassName ioException = ClassName.get(IOException.class);
ClassName timeoutException = ClassName.get(TimeoutException.class);
MethodSpec methodSpec = MethodSpec.methodBuilder("duplicateExceptions")
.addException(ioException)
.addException(timeoutException)
.addException(timeoutException)
.addException(ioException)
.build();
assertThat(methodSpec.exceptions).isEqualTo(Arrays.asList(ioException, timeoutException));
assertThat(methodSpec.toBuilder().addException(ioException).build().exceptions)
.isEqualTo(Arrays.asList(ioException, timeoutException));
}
@Test public void nullIsNotAValidMethodName() {
try {
MethodSpec.methodBuilder(null);
fail("NullPointerException expected");
} catch (NullPointerException e) {
assertThat(e.getMessage()).isEqualTo("name == null");
}
}
@Test public void addModifiersVarargsShouldNotBeNull() {
try {
MethodSpec.methodBuilder("taco")
.addModifiers((Modifier[]) null);
fail("NullPointerException expected");
} catch (NullPointerException e) {
assertThat(e.getMessage()).isEqualTo("modifiers == null");
}
}
@Test public void modifyMethodName() {
MethodSpec methodSpec = MethodSpec.methodBuilder("initialMethod")
.build()
.toBuilder()
.setName("revisedMethod")
.build();
assertThat(methodSpec.toString()).isEqualTo("" + "void revisedMethod() {\n" + "}\n");
}
@Test public void modifyAnnotations() {
MethodSpec.Builder builder = MethodSpec.methodBuilder("foo")
.addAnnotation(Override.class)
.addAnnotation(SuppressWarnings.class);
builder.annotations.remove(1);
assertThat(builder.build().annotations).hasSize(1);
}
@Test public void modifyModifiers() {
MethodSpec.Builder builder = MethodSpec.methodBuilder("foo")
.addModifiers(Modifier.PUBLIC, Modifier.STATIC);
builder.modifiers.remove(1);
assertThat(builder.build().modifiers).containsExactly(Modifier.PUBLIC);
}
@Test public void modifyParameters() {
MethodSpec.Builder builder = MethodSpec.methodBuilder("foo")
.addParameter(int.class, "source");
builder.parameters.remove(0);
assertThat(builder.build().parameters).isEmpty();
}
@Test public void modifyTypeVariables() {
TypeVariableName t = TypeVariableName.get("T");
MethodSpec.Builder builder = MethodSpec.methodBuilder("foo")
.addTypeVariable(t)
.addTypeVariable(TypeVariableName.get("V"));
builder.typeVariables.remove(1);
assertThat(builder.build().typeVariables).containsExactly(t);
}
@Test public void ensureTrailingNewline() {
MethodSpec methodSpec = MethodSpec.methodBuilder("method")
.addCode("codeWithNoNewline();")
.build();
assertThat(methodSpec.toString()).isEqualTo(""
+ "void method() {\n"
+ " codeWithNoNewline();\n"
+ "}\n");
}
/** Ensures that we don't add a duplicate newline if one is already present. */
@Test public void ensureTrailingNewlineWithExistingNewline() {
MethodSpec methodSpec = MethodSpec.methodBuilder("method")
.addCode("codeWithNoNewline();\n") // Have a newline already, so ensure we're not adding one
.build();
assertThat(methodSpec.toString()).isEqualTo(""
+ "void method() {\n"
+ " codeWithNoNewline();\n"
+ "}\n");
}
@Test public void controlFlowWithNamedCodeBlocks() {
Map<String, Object> m = new HashMap<>();
m.put("field", "valueField");
m.put("threshold", "5");
MethodSpec methodSpec = MethodSpec.methodBuilder("method")
.beginControlFlow(named("if ($field:N > $threshold:L)", m))
.nextControlFlow(named("else if ($field:N == $threshold:L)", m))
.endControlFlow()
.build();
assertThat(methodSpec.toString()).isEqualTo(""
+ "void method() {\n"
+ " if (valueField > 5) {\n"
+ " } else if (valueField == 5) {\n"
+ " }\n"
+ "}\n");
}
@Test public void doWhileWithNamedCodeBlocks() {
Map<String, Object> m = new HashMap<>();
m.put("field", "valueField");
m.put("threshold", "5");
MethodSpec methodSpec = MethodSpec.methodBuilder("method")
.beginControlFlow("do")
.addStatement(named("$field:N--", m))
.endControlFlow(named("while ($field:N > $threshold:L)", m))
.build();
assertThat(methodSpec.toString()).isEqualTo(""
+ "void method() {\n" +
" do {\n" +
" valueField--;\n" +
" } while (valueField > 5);\n" +
"}\n");
}
private static CodeBlock named(String format, Map<String, ?> args){
return CodeBlock.builder().addNamed(format, args).build();
}
}
| PrivateAnnotation |
java | quarkusio__quarkus | independent-projects/qute/generator/src/test/java/io/quarkus/qute/generator/hierarchy/Level2.java | {
"start": 53,
"end": 162
} | class ____ extends Level1 implements SecondLevel {
public int getLevel2() {
return 2;
}
}
| Level2 |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-guava-tests/src/test/java/org/assertj/tests/guava/api/InstanceOfAssertFactoriesTest.java | {
"start": 2512,
"end": 6215
} | class ____ {
@Test
void byte_source_factory_should_allow_byte_source_assertions() throws IOException {
// GIVEN
Object value = ByteSource.empty();
// WHEN
ByteSourceAssert result = assertThat(value).asInstanceOf(BYTE_SOURCE);
// THEN
result.isEmpty();
}
@Test
void multimap_factory_should_allow_multimap_assertions() {
// GIVEN
Object value = ImmutableMultimap.of("key", "value");
// WHEN
MultimapAssert<Object, Object> result = assertThat(value).asInstanceOf(MULTIMAP);
// THEN
result.contains(Assertions.entry("key", "value"));
}
@Test
void multimap_typed_factory_should_allow_multimap_typed_assertions() {
// GIVEN
Object value = ImmutableMultimap.of("key", "value");
// WHEN
MultimapAssert<String, String> result = assertThat(value).asInstanceOf(multimap(String.class, String.class));
// THEN
result.contains(Assertions.entry("key", "value"));
}
@Test
void optional_factory_should_allow_optional_assertions() {
// GIVEN
Object value = Optional.of("something");
// WHEN
OptionalAssert<Object> result = assertThat(value).asInstanceOf(OPTIONAL);
// THEN
result.isPresent();
}
@Test
void optional_typed_factory_should_allow_optional_typed_assertions() {
// GIVEN
Object value = Optional.of("something");
// WHEN
OptionalAssert<String> result = assertThat(value).asInstanceOf(optional(String.class));
// THEN
result.isPresent();
}
@Test
void range_factory_should_allow_range_assertions() {
// GIVEN
Object value = Range.atLeast(0);
// WHEN
RangeAssert<Integer> result = assertThat(value).asInstanceOf(range(Integer.class));
// THEN
result.contains(0);
}
@Test
void range_map_factory_should_allow_range_map_assertions() {
// GIVEN
Object value = ImmutableRangeMap.of(Range.atLeast(0), "value");
// WHEN
RangeMapAssert<Integer, String> result = assertThat(value).asInstanceOf(rangeMap(Integer.class, String.class));
// THEN
result.contains(entry(0, "value"));
}
@Test
void range_set_factory_should_allow_range_set_assertions() {
// GIVEN
Object value = ImmutableRangeSet.of(Range.closed(0, 1));
// WHEN
RangeSetAssert<Integer> result = assertThat(value).asInstanceOf(rangeSet(Integer.class));
// THEN
result.contains(0);
}
@Test
void table_factory_should_allow_table_assertions() {
// GIVEN
Object value = ImmutableTable.of(0, 0.0, "value");
// WHEN
TableAssert<Object, Object, Object> result = assertThat(value).asInstanceOf(TABLE);
// THEN
result.containsCell(0, 0.0, "value");
}
@Test
void table_typed_factory_should_allow_table_typed_assertions() {
// GIVEN
Object value = ImmutableTable.of(0, 0.0, "value");
// WHEN
TableAssert<Integer, Double, String> result = assertThat(value).asInstanceOf(table(Integer.class, Double.class,
String.class));
// THEN
result.containsCell(0, 0.0, "value");
}
@Test
void multiset_factory_should_allow_multiset_assertions() {
// GIVEN
Object value = ImmutableMultiset.of("value");
// WHEN
MultisetAssert<Object> result = assertThat(value).asInstanceOf(MULTISET);
// THEN
result.containsAtLeast(1, "value");
}
@Test
void multiset_typed_factory_should_allow_multiset_typed_assertions() {
// GIVEN
Object value = ImmutableMultiset.of("value");
// WHEN
MultisetAssert<String> result = assertThat(value).asInstanceOf(multiset(String.class));
// THEN
result.containsAtLeast(1, "value");
}
}
| InstanceOfAssertFactoriesTest |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/annotation/Controller.java | {
"start": 1344,
"end": 2534
} | interface ____ {
/**
* <p>This attribute returns the base URI of the controller</p>
*
* <p>A value of {@code /} can be used to map a controller
* to the root URI.</p>
*
* @return The base URI of the controller in the case of web applications
*/
@AliasFor(annotation = UriMapping.class, member = "value")
String value() default UriMapping.DEFAULT_URI;
/**
* @return The produced MediaType values. Defaults to application/json
*/
@AliasFor(annotation = Produces.class, member = "value")
String[] produces() default MediaType.APPLICATION_JSON;
/**
* @return The consumed MediaType for request bodies Defaults to application/json
*/
@AliasFor(annotation = Consumes.class, member = "value")
String[] consumes() default MediaType.APPLICATION_JSON;
/**
* Allows specifying an alternate port to run the controller on. Setting this member will
* cause.
*
* <p>The member is defined as a string to allow resolving the port value from configuration. For example: {@code member = "${my.port.number}"}</p>
* @return The port to use.
*/
String port() default "";
}
| Controller |
java | apache__rocketmq | common/src/main/java/org/apache/rocketmq/common/attribute/AttributeUtil.java | {
"start": 1208,
"end": 5152
} | class ____ {
private static final Logger log = LoggerFactory.getLogger(LoggerName.COMMON_LOGGER_NAME);
public static Map<String, String> alterCurrentAttributes(boolean create, Map<String, Attribute> all,
ImmutableMap<String, String> currentAttributes, ImmutableMap<String, String> newAttributes) {
Map<String, String> init = new HashMap<>();
Map<String, String> add = new HashMap<>();
Map<String, String> update = new HashMap<>();
Map<String, String> delete = new HashMap<>();
Set<String> keys = new HashSet<>();
for (Map.Entry<String, String> attribute : newAttributes.entrySet()) {
String key = attribute.getKey();
String realKey = realKey(key);
String value = attribute.getValue();
validate(realKey);
duplicationCheck(keys, realKey);
if (create) {
if (key.startsWith("+")) {
init.put(realKey, value);
} else {
throw new RuntimeException("only add attribute is supported while creating topic. key: " + realKey);
}
} else {
if (key.startsWith("+")) {
if (!currentAttributes.containsKey(realKey)) {
add.put(realKey, value);
} else {
update.put(realKey, value);
}
} else if (key.startsWith("-")) {
if (!currentAttributes.containsKey(realKey)) {
throw new RuntimeException("attempt to delete a nonexistent key: " + realKey);
}
delete.put(realKey, value);
} else {
throw new RuntimeException("wrong format key: " + realKey);
}
}
}
validateAlter(all, init, true, false);
validateAlter(all, add, false, false);
validateAlter(all, update, false, false);
validateAlter(all, delete, false, true);
log.info("add: {}, update: {}, delete: {}", add, update, delete);
HashMap<String, String> finalAttributes = new HashMap<>(currentAttributes);
finalAttributes.putAll(init);
finalAttributes.putAll(add);
finalAttributes.putAll(update);
for (String s : delete.keySet()) {
finalAttributes.remove(s);
}
return finalAttributes;
}
private static void duplicationCheck(Set<String> keys, String key) {
boolean notExist = keys.add(key);
if (!notExist) {
throw new RuntimeException("alter duplication key. key: " + key);
}
}
private static void validate(String kvAttribute) {
if (Strings.isNullOrEmpty(kvAttribute)) {
throw new RuntimeException("kv string format wrong.");
}
if (kvAttribute.contains("+")) {
throw new RuntimeException("kv string format wrong.");
}
if (kvAttribute.contains("-")) {
throw new RuntimeException("kv string format wrong.");
}
}
private static void validateAlter(Map<String, Attribute> all, Map<String, String> alter, boolean init, boolean delete) {
for (Map.Entry<String, String> entry : alter.entrySet()) {
String key = entry.getKey();
String value = entry.getValue();
Attribute attribute = all.get(key);
if (attribute == null) {
throw new RuntimeException("unsupported key: " + key);
}
if (!init && !attribute.isChangeable()) {
throw new RuntimeException("attempt to update an unchangeable attribute. key: " + key);
}
if (!delete) {
attribute.verify(value);
}
}
}
private static String realKey(String key) {
return key.substring(1);
}
}
| AttributeUtil |
java | apache__flink | flink-table/flink-table-api-java-bridge/src/test/java/org/apache/flink/table/api/bridge/java/internal/StreamTableEnvironmentImplTest.java | {
"start": 4203,
"end": 4614
} | class ____ extends PlannerMock {
private final Transformation<?> transformation;
private TestPlanner(Transformation<?> transformation) {
this.transformation = transformation;
}
@Override
public List<Transformation<?>> translate(List<ModifyOperation> modifyOperations) {
return Collections.singletonList(transformation);
}
}
}
| TestPlanner |
java | apache__flink | flink-formats/flink-orc/src/test/java/org/apache/flink/orc/OrcFormatStatisticsReportTest.java | {
"start": 1696,
"end": 10311
} | class ____ extends StatisticsReportTestBase {
private static OrcFileFormatFactory.OrcBulkDecodingFormat orcBulkDecodingFormat;
@BeforeEach
public void setup(@TempDir File file) throws Exception {
super.setup(file);
createFileSystemSource();
Configuration configuration = new Configuration();
orcBulkDecodingFormat = new OrcFileFormatFactory.OrcBulkDecodingFormat(configuration);
}
@Override
protected String[] properties() {
List<String> ret = new ArrayList<>();
ret.add("'format'='orc'");
ret.add("'orc.compress'='snappy'");
return ret.toArray(new String[0]);
}
@Test
public void testOrcFormatStatsReportWithSingleFile() throws Exception {
// insert data and get statistics.
DataType dataType = tEnv.from("sourceTable").getResolvedSchema().toPhysicalRowDataType();
tEnv.fromValues(dataType, getData()).executeInsert("sourceTable").await();
assertThat(folder.listFiles()).hasSize(1);
File[] files = folder.listFiles();
assertThat(files).isNotNull();
TableStats tableStats =
orcBulkDecodingFormat.reportStatistics(
Collections.singletonList(new Path(files[0].toURI().toString())), dataType);
assertOrcFormatTableStatsEquals(tableStats, 3, 1L);
}
@Test
public void testOrcFormatStatsReportWithMultiFile() throws Exception {
// insert data and get statistics.
DataType dataType = tEnv.from("sourceTable").getResolvedSchema().toPhysicalRowDataType();
tEnv.fromValues(dataType, getData()).executeInsert("sourceTable").await();
tEnv.fromValues(dataType, getData()).executeInsert("sourceTable").await();
assertThat(folder.listFiles()).isNotNull().hasSize(2);
File[] files = folder.listFiles();
List<Path> paths = new ArrayList<>();
assert files != null;
paths.add(new Path(files[0].toURI().toString()));
paths.add(new Path(files[1].toURI().toString()));
TableStats tableStats = orcBulkDecodingFormat.reportStatistics(paths, dataType);
assertOrcFormatTableStatsEquals(tableStats, 6, 2L);
}
@Test
public void testOrcFormatStatsReportWithEmptyFile() {
TableStats tableStats = orcBulkDecodingFormat.reportStatistics(null, null);
assertThat(tableStats).isEqualTo(TableStats.UNKNOWN);
}
@Override
protected Map<String, String> ddlTypesMap() {
// now orc format don't support TIME(), BINARY(), VARBINARY() and
// TIMESTAMP_WITH_LOCAL_TIME_ZONE types, so we remove these types.
Map<String, String> ddlTypes = super.ddlTypesMap();
ddlTypes.remove("timestamp with local time zone");
ddlTypes.remove("binary(1)");
ddlTypes.remove("varbinary(1)");
ddlTypes.remove("time");
return ddlTypes;
}
@Override
protected Map<String, List<Object>> getDataMap() {
// now orc format don't support TIME(), BINARY(), VARBINARY() and
// TIMESTAMP_WITH_LOCAL_TIME_ZONE types, so we remove data belong to these types.
Map<String, List<Object>> dataMap = super.getDataMap();
dataMap.remove("timestamp with local time zone");
dataMap.remove("binary(1)");
dataMap.remove("varbinary(1)");
dataMap.remove("time");
return dataMap;
}
protected static void assertOrcFormatTableStatsEquals(
TableStats tableStats, int expectedRowCount, long nullCount) {
Map<String, ColumnStats> expectedColumnStatsMap = new HashMap<>();
expectedColumnStatsMap.put(
"f_boolean", new ColumnStats.Builder().setNullCount(nullCount).build());
expectedColumnStatsMap.put(
"f_tinyint",
new ColumnStats.Builder().setMax(3L).setMin(1L).setNullCount(0L).build());
expectedColumnStatsMap.put(
"f_smallint",
new ColumnStats.Builder().setMax(128L).setMin(100L).setNullCount(0L).build());
expectedColumnStatsMap.put(
"f_int",
new ColumnStats.Builder()
.setMax(45536L)
.setMin(31000L)
.setNullCount(nullCount)
.build());
expectedColumnStatsMap.put(
"f_bigint",
new ColumnStats.Builder()
.setMax(1238123899121L)
.setMin(1238123899000L)
.setNullCount(0L)
.build());
expectedColumnStatsMap.put(
"f_float",
new ColumnStats.Builder()
.setMax(33.33300018310547D)
.setMin(33.31100082397461D)
.setNullCount(nullCount)
.build());
expectedColumnStatsMap.put(
"f_double",
new ColumnStats.Builder().setMax(10.1D).setMin(1.1D).setNullCount(0L).build());
expectedColumnStatsMap.put(
"f_string",
new ColumnStats.Builder().setMax("def").setMin("abcd").setNullCount(0L).build());
expectedColumnStatsMap.put(
"f_decimal5",
new ColumnStats.Builder()
.setMax(new BigDecimal("223.45"))
.setMin(new BigDecimal("123.45"))
.setNullCount(0L)
.build());
expectedColumnStatsMap.put(
"f_decimal14",
new ColumnStats.Builder()
.setMax(new BigDecimal("123333333355.33"))
.setMin(new BigDecimal("123333333333.33"))
.setNullCount(0L)
.build());
expectedColumnStatsMap.put(
"f_decimal38",
new ColumnStats.Builder()
.setMax(new BigDecimal("123433343334333433343334333433343334.34"))
.setMin(new BigDecimal("123433343334333433343334333433343334.33"))
.setNullCount(nullCount)
.build());
expectedColumnStatsMap.put(
"f_date",
new ColumnStats.Builder()
.setMax(Date.valueOf("1990-10-16"))
.setMin(Date.valueOf("1990-10-14"))
.setNullCount(0L)
.build());
expectedColumnStatsMap.put(
"f_timestamp3",
new ColumnStats.Builder()
.setMax(
DateTimeUtils.parseTimestampData("1990-10-16 12:12:43.123", 3)
.toTimestamp())
.setMin(
DateTimeUtils.parseTimestampData("1990-10-14 12:12:43.123", 3)
.toTimestamp())
.setNullCount(0L)
.build());
expectedColumnStatsMap.put(
"f_timestamp9",
new ColumnStats.Builder()
.setMax(
DateTimeUtils.parseTimestampData("1990-10-16 12:12:43.123", 3)
.toTimestamp())
.setMin(
DateTimeUtils.parseTimestampData("1990-10-14 12:12:43.123", 3)
.toTimestamp())
.setNullCount(0L)
.build());
expectedColumnStatsMap.put(
"f_timestamp_wtz",
new ColumnStats.Builder()
.setMax(
DateTimeUtils.parseTimestampData("1990-10-16 12:12:43.123", 3)
.toTimestamp())
.setMin(
DateTimeUtils.parseTimestampData("1990-10-14 12:12:43.123", 3)
.toTimestamp())
.setNullCount(0L)
.build());
// For complex types: ROW, ARRAY, MAP. The returned statistics have wrong null count
// value, so now complex types stats return null.
expectedColumnStatsMap.put("f_row", null);
expectedColumnStatsMap.put("f_array", null);
expectedColumnStatsMap.put("f_map", null);
assertThat(tableStats).isEqualTo(new TableStats(expectedRowCount, expectedColumnStatsMap));
}
}
| OrcFormatStatisticsReportTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/lazy/proxy/inlinedirtychecking/SamplingOrder.java | {
"start": 443,
"end": 960
} | class ____ {
@Id
@GeneratedValue
private Long id;
private String note;
@ManyToOne(fetch = FetchType.LAZY)
@JoinColumn(name = "customerId")
private Customer customer;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getNote() {
return note;
}
public void setNote(String note) {
this.note = note;
}
public Customer getCustomer() {
return customer;
}
public void setCustomer(Customer customer) {
this.customer = customer;
}
}
| SamplingOrder |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java | {
"start": 29116,
"end": 34637
} | class ____ the job output data.
*/
public void setOutputKeyClass(Class<?> theClass) {
setClass(JobContext.OUTPUT_KEY_CLASS, theClass, Object.class);
}
/**
* Get the {@link RawComparator} comparator used to compare keys.
*
* @return the {@link RawComparator} comparator used to compare keys.
*/
public RawComparator getOutputKeyComparator() {
Class<? extends RawComparator> theClass = getClass(
JobContext.KEY_COMPARATOR, null, RawComparator.class);
if (theClass != null)
return ReflectionUtils.newInstance(theClass, this);
return WritableComparator.get(getMapOutputKeyClass().asSubclass(WritableComparable.class), this);
}
/**
* Set the {@link RawComparator} comparator used to compare keys.
*
* @param theClass the {@link RawComparator} comparator used to
* compare keys.
* @see #setOutputValueGroupingComparator(Class)
*/
public void setOutputKeyComparatorClass(Class<? extends RawComparator> theClass) {
setClass(JobContext.KEY_COMPARATOR,
theClass, RawComparator.class);
}
/**
* Set the {@link KeyFieldBasedComparator} options used to compare keys.
*
* @param keySpec the key specification of the form -k pos1[,pos2], where,
* pos is of the form f[.c][opts], where f is the number
* of the key field to use, and c is the number of the first character from
* the beginning of the field. Fields and character posns are numbered
* starting with 1; a character position of zero in pos2 indicates the
* field's last character. If '.c' is omitted from pos1, it defaults to 1
* (the beginning of the field); if omitted from pos2, it defaults to 0
* (the end of the field). opts are ordering options. The supported options
* are:
* -n, (Sort numerically)
* -r, (Reverse the result of comparison)
*/
public void setKeyFieldComparatorOptions(String keySpec) {
setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
set(KeyFieldBasedComparator.COMPARATOR_OPTIONS, keySpec);
}
/**
* Get the {@link KeyFieldBasedComparator} options
*/
public String getKeyFieldComparatorOption() {
return get(KeyFieldBasedComparator.COMPARATOR_OPTIONS);
}
/**
* Set the {@link KeyFieldBasedPartitioner} options used for
* {@link Partitioner}
*
* @param keySpec the key specification of the form -k pos1[,pos2], where,
* pos is of the form f[.c][opts], where f is the number
* of the key field to use, and c is the number of the first character from
* the beginning of the field. Fields and character posns are numbered
* starting with 1; a character position of zero in pos2 indicates the
* field's last character. If '.c' is omitted from pos1, it defaults to 1
* (the beginning of the field); if omitted from pos2, it defaults to 0
* (the end of the field).
*/
public void setKeyFieldPartitionerOptions(String keySpec) {
setPartitionerClass(KeyFieldBasedPartitioner.class);
set(KeyFieldBasedPartitioner.PARTITIONER_OPTIONS, keySpec);
}
/**
* Get the {@link KeyFieldBasedPartitioner} options
*/
public String getKeyFieldPartitionerOption() {
return get(KeyFieldBasedPartitioner.PARTITIONER_OPTIONS);
}
/**
* Get the user defined {@link WritableComparable} comparator for
* grouping keys of inputs to the combiner.
*
* @return comparator set by the user for grouping values.
* @see #setCombinerKeyGroupingComparator(Class) for details.
*/
public RawComparator getCombinerKeyGroupingComparator() {
Class<? extends RawComparator> theClass = getClass(
JobContext.COMBINER_GROUP_COMPARATOR_CLASS, null, RawComparator.class);
if (theClass == null) {
return getOutputKeyComparator();
}
return ReflectionUtils.newInstance(theClass, this);
}
/**
* Get the user defined {@link WritableComparable} comparator for
* grouping keys of inputs to the reduce.
*
* @return comparator set by the user for grouping values.
* @see #setOutputValueGroupingComparator(Class) for details.
*/
public RawComparator getOutputValueGroupingComparator() {
Class<? extends RawComparator> theClass = getClass(
JobContext.GROUP_COMPARATOR_CLASS, null, RawComparator.class);
if (theClass == null) {
return getOutputKeyComparator();
}
return ReflectionUtils.newInstance(theClass, this);
}
/**
* Set the user defined {@link RawComparator} comparator for
* grouping keys in the input to the combiner.
*
* <p>This comparator should be provided if the equivalence rules for keys
* for sorting the intermediates are different from those for grouping keys
* before each call to
* {@link Reducer#reduce(Object, java.util.Iterator, OutputCollector, Reporter)}.</p>
*
* <p>For key-value pairs (K1,V1) and (K2,V2), the values (V1, V2) are passed
* in a single call to the reduce function if K1 and K2 compare as equal.</p>
*
* <p>Since {@link #setOutputKeyComparatorClass(Class)} can be used to control
* how keys are sorted, this can be used in conjunction to simulate
* <i>secondary sort on values</i>.</p>
*
* <p><i>Note</i>: This is not a guarantee of the combiner sort being
* <i>stable</i> in any sense. (In any case, with the order of available
* map-outputs to the combiner being non-deterministic, it wouldn't make
* that much sense.)</p>
*
* @param theClass the comparator | for |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/util/context/ReactorContextAccessor.java | {
"start": 998,
"end": 1199
} | class ____ the {@code libs.micrometer.contextPropagation}
* SPI library, which is an optional dependency.
*
* @author Rossen Stoyanchev
* @author Simon Baslé
* @since 3.5.0
*/
public final | implements |
java | quarkusio__quarkus | extensions/grpc/runtime/src/main/java/io/quarkus/grpc/runtime/health/GrpcHealthEndpoint.java | {
"start": 630,
"end": 2012
} | class ____ extends MutinyHealthGrpc.HealthImplBase {
@Inject
GrpcHealthStorage healthStorage;
@Override
public Uni<HealthOuterClass.HealthCheckResponse> check(HealthOuterClass.HealthCheckRequest request) {
return Uni.createFrom().item(healthStorage.statusForService(request.getService()));
}
@Override
public Multi<HealthOuterClass.HealthCheckResponse> watch(HealthOuterClass.HealthCheckRequest request) {
String service = request.getService();
BroadcastProcessor<ServingStatus> broadcastProcessor = healthStorage.createStatusBroadcastProcessor(service);
return Multi.createBy().concatenating().streams(
Multi.createFrom().item(new Supplier<HealthOuterClass.HealthCheckResponse>() {
@Override
public HealthOuterClass.HealthCheckResponse get() {
return healthStorage.statusForService(service);
}
}),
broadcastProcessor.map(new Function<ServingStatus, HealthOuterClass.HealthCheckResponse>() {
@Override
public HealthOuterClass.HealthCheckResponse apply(ServingStatus servingStatus) {
return healthStorage.resultForStatus(servingStatus);
}
})).skip().repetitions();
}
}
| GrpcHealthEndpoint |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.