language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | junit-team__junit5 | junit-jupiter-api/src/main/java/org/junit/jupiter/api/Assumptions.java | {
"start": 1921,
"end": 2077
} | class ____ used via <em>static imports</em>.
*
* @since 5.0
* @see TestAbortedException
* @see Assertions
*/
@API(status = STABLE, since = "5.0")
public | be |
java | apache__dubbo | dubbo-common/src/test/java/org/apache/dubbo/common/compiler/support/JavaCodeTest.java | {
"start": 904,
"end": 1442
} | class ____ {
public static final AtomicInteger SUBFIX = new AtomicInteger(8);
boolean shouldIgnoreWithoutPackage() {
String jdkVersion = System.getProperty("java.specification.version");
try {
return Integer.parseInt(jdkVersion) > 15;
} catch (Throwable t) {
return false;
}
}
String getSimpleCode() {
StringBuilder code = new StringBuilder();
code.append("package org.apache.dubbo.common.compiler.support;");
code.append("public | JavaCodeTest |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/ThymeleafEndpointBuilderFactory.java | {
"start": 8791,
"end": 14872
} | interface ____
extends
EndpointProducerBuilder {
default ThymeleafEndpointBuilder basic() {
return (ThymeleafEndpointBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedThymeleafEndpointBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedThymeleafEndpointBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* The character encoding to be used for reading template resources.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: advanced
*
* @param encoding the value to set
* @return the dsl builder
*/
default AdvancedThymeleafEndpointBuilder encoding(String encoding) {
doSetProperty("encoding", encoding);
return this;
}
/**
* The order in which this template will be resolved as part of the
* resolver chain.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: advanced
*
* @param order the value to set
* @return the dsl builder
*/
default AdvancedThymeleafEndpointBuilder order(Integer order) {
doSetProperty("order", order);
return this;
}
/**
* The order in which this template will be resolved as part of the
* resolver chain.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: advanced
*
* @param order the value to set
* @return the dsl builder
*/
default AdvancedThymeleafEndpointBuilder order(String order) {
doSetProperty("order", order);
return this;
}
/**
* An optional prefix added to template names to convert them into
* resource names.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: advanced
*
* @param prefix the value to set
* @return the dsl builder
*/
default AdvancedThymeleafEndpointBuilder prefix(String prefix) {
doSetProperty("prefix", prefix);
return this;
}
/**
* The type of resolver to be used by the template engine.
*
* The option is a:
* <code>org.apache.camel.component.thymeleaf.ThymeleafResolverType</code> type.
*
* Default: CLASS_LOADER
* Group: advanced
*
* @param resolver the value to set
* @return the dsl builder
*/
default AdvancedThymeleafEndpointBuilder resolver(org.apache.camel.component.thymeleaf.ThymeleafResolverType resolver) {
doSetProperty("resolver", resolver);
return this;
}
/**
* The type of resolver to be used by the template engine.
*
* The option will be converted to a
* <code>org.apache.camel.component.thymeleaf.ThymeleafResolverType</code> type.
*
* Default: CLASS_LOADER
* Group: advanced
*
* @param resolver the value to set
* @return the dsl builder
*/
default AdvancedThymeleafEndpointBuilder resolver(String resolver) {
doSetProperty("resolver", resolver);
return this;
}
/**
* An optional suffix added to template names to convert them into
* resource names.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: advanced
*
* @param suffix the value to set
* @return the dsl builder
*/
default AdvancedThymeleafEndpointBuilder suffix(String suffix) {
doSetProperty("suffix", suffix);
return this;
}
}
public | AdvancedThymeleafEndpointBuilder |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/metamodel/model/domain/TreatableDomainType.java | {
"start": 184,
"end": 265
} | interface ____<J> extends ManagedDomainType<J>, PathSource<J> {
}
| TreatableDomainType |
java | google__dagger | dagger-runtime/main/java/dagger/multibindings/LazyClassKey.java | {
"start": 1053,
"end": 1251
} | class ____ use under the hood, which prevents loading unused classes at runtime.
*/
@Target({ElementType.METHOD, ElementType.FIELD, ElementType.TYPE})
@Retention(RUNTIME)
@Documented
@MapKey
public @ | to |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/admin/indices/sampling/GetSampleAction.java | {
"start": 8139,
"end": 9305
} | class ____ extends BaseNodeResponse {
private final List<SamplingService.RawDocument> sample;
protected NodeResponse(StreamInput in) throws IOException {
super(in);
sample = in.readCollectionAsList(SamplingService.RawDocument::new);
}
protected NodeResponse(DiscoveryNode node, List<SamplingService.RawDocument> sample) {
super(node);
this.sample = sample;
}
public List<SamplingService.RawDocument> getSample() {
return sample;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeCollection(sample);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
NodeResponse other = (NodeResponse) o;
return getNode().equals(other.getNode()) && sample.equals(other.sample);
}
@Override
public int hashCode() {
return Objects.hash(getNode(), sample);
}
}
}
| NodeResponse |
java | FasterXML__jackson-core | src/test/java/tools/jackson/core/unittest/read/NumberCoercionTest.java | {
"start": 470,
"end": 12524
} | class ____ extends JacksonCoreTestBase
{
/*
/**********************************************************
/* Numeric coercions, integral
/**********************************************************
*/
@Test
void toIntCoercion() throws Exception
{
for (int mode : ALL_STREAMING_MODES) {
JsonParser p;
// long->int
p = createParser(mode, "1");
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
assertEquals(1L, p.getLongValue());
assertEquals(1, p.getIntValue());
p.close();
// BigInteger->int
p = createParser(mode, "10");
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
assertEquals(BigInteger.TEN, p.getBigIntegerValue());
assertEquals(10, p.getIntValue());
p.close();
// double->int
p = createParser(mode, "2");
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
assertEquals(2.0, p.getDoubleValue());
assertEquals(2, p.getIntValue());
p.close();
// BigDecimal->int
p = createParser(mode, "10");
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
assertEquals(BigDecimal.TEN, p.getDecimalValue());
assertEquals(10, p.getIntValue());
p.close();
}
}
@SuppressWarnings("resource")
@Test
void toIntFailing() throws Exception
{
for (int mode : ALL_STREAMING_MODES) {
// long -> error
long big = 1L + Integer.MAX_VALUE;
try (JsonParser p = createParser(mode, String.valueOf(big))) {
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
assertEquals(big, p.getLongValue());
try {
p.getIntValue();
fail("Should not pass");
} catch (InputCoercionException e) {
verifyException(e, "out of range of `int`");
assertEquals(JsonToken.VALUE_NUMBER_INT, e.getInputType());
assertEquals(Integer.TYPE, e.getTargetType());
}
}
long small = -1L + Integer.MIN_VALUE;
try (JsonParser p = createParser(mode, String.valueOf(small))) {
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
assertEquals(Long.valueOf(small), p.getNumberValue());
assertEquals(small, p.getLongValue());
try {
p.getIntValue();
fail("Should not pass");
} catch (InputCoercionException e) {
verifyException(e, "out of range of `int`");
assertEquals(JsonToken.VALUE_NUMBER_INT, e.getInputType());
assertEquals(Integer.TYPE, e.getTargetType());
}
}
// double -> error
try (JsonParser p = createParser(mode, String.valueOf(big)+".0")) {
assertToken(JsonToken.VALUE_NUMBER_FLOAT, p.nextToken());
assertEquals((double) big, p.getDoubleValue());
try {
p.getIntValue();
fail("Should not pass");
} catch (InputCoercionException e) {
verifyException(e, "out of range of `int`");
assertEquals(JsonToken.VALUE_NUMBER_FLOAT, e.getInputType());
assertEquals(Integer.TYPE, e.getTargetType());
}
}
try (JsonParser p = createParser(mode, String.valueOf(small)+".0")) {
assertToken(JsonToken.VALUE_NUMBER_FLOAT, p.nextToken());
assertEquals((double) small, p.getDoubleValue());
try {
p.getIntValue();
fail("Should not pass");
} catch (InputCoercionException e) {
verifyException(e, "out of range of `int`");
assertEquals(JsonToken.VALUE_NUMBER_FLOAT, e.getInputType());
assertEquals(Integer.TYPE, e.getTargetType());
}
}
// BigInteger -> error
try (JsonParser p = createParser(mode, String.valueOf(big))) {
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
assertEquals(BigInteger.valueOf(big), p.getBigIntegerValue());
try {
p.getIntValue();
fail("Should not pass");
} catch (InputCoercionException e) {
verifyException(e, "out of range of `int`");
assertEquals(JsonToken.VALUE_NUMBER_INT, e.getInputType());
assertEquals(Integer.TYPE, e.getTargetType());
}
}
try (JsonParser p = createParser(mode, String.valueOf(small))) {
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
assertEquals(BigInteger.valueOf(small), p.getBigIntegerValue());
try {
p.getIntValue();
fail("Should not pass");
} catch (InputCoercionException e) {
verifyException(e, "out of range of `int`");
assertEquals(JsonToken.VALUE_NUMBER_INT, e.getInputType());
assertEquals(Integer.TYPE, e.getTargetType());
}
}
}
}
@Test
void toLongCoercion() throws Exception
{
for (int mode : ALL_STREAMING_MODES) {
JsonParser p;
// int->long
p = createParser(mode, "1");
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
assertEquals(1, p.getIntValue());
assertEquals(1L, p.getLongValue());
p.close();
// BigInteger->long
long biggish = 12345678901L;
p = createParser(mode, String.valueOf(biggish));
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
assertEquals(BigInteger.valueOf(biggish), p.getBigIntegerValue());
assertEquals(biggish, p.getLongValue());
p.close();
// double->long
p = createParser(mode, "2");
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
assertEquals(2.0, p.getDoubleValue());
assertEquals(2L, p.getLongValue());
p.close();
// BigDecimal->long
p = createParser(mode, "10");
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
assertEquals(BigDecimal.TEN, p.getDecimalValue());
assertEquals(10, p.getLongValue());
p.close();
}
}
@SuppressWarnings("resource")
@Test
void toLongFailing() throws Exception
{
for (int mode : ALL_STREAMING_MODES) {
// BigInteger -> error
BigInteger big = BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.TEN);
try (JsonParser p = createParser(mode, String.valueOf(big))) {
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
assertEquals(NumberType.BIG_INTEGER, p.getNumberType());
assertEquals(big, p.getBigIntegerValue());
assertEquals(big, p.getNumberValue());
try {
p.getLongValue();
fail("Should not pass");
} catch (InputCoercionException e) {
verifyException(e, "out of range of `long`");
assertEquals(JsonToken.VALUE_NUMBER_INT, e.getInputType());
assertEquals(Long.TYPE, e.getTargetType());
}
}
BigInteger small = BigInteger.valueOf(Long.MIN_VALUE).subtract(BigInteger.TEN);
try (JsonParser p = createParser(mode, String.valueOf(small))) {
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
assertEquals(small, p.getBigIntegerValue());
try {
p.getLongValue();
fail("Should not pass");
} catch (InputCoercionException e) {
verifyException(e, "out of range of `long`");
assertEquals(JsonToken.VALUE_NUMBER_INT, e.getInputType());
assertEquals(Long.TYPE, e.getTargetType());
}
}
}
}
@Test
void toBigIntegerCoercion() throws Exception
{
for (int mode : ALL_STREAMING_MODES) {
JsonParser p;
p = createParser(mode, "1");
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
// int to BigInteger
assertEquals(1, p.getIntValue());
assertEquals(BigInteger.ONE, p.getBigIntegerValue());
p.close();
p = createParser(mode, "2.0");
assertToken(JsonToken.VALUE_NUMBER_FLOAT, p.nextToken());
// double to BigInteger
assertEquals(2.0, p.getDoubleValue());
assertEquals(BigInteger.valueOf(2L), p.getBigIntegerValue());
p.close();
p = createParser(mode, String.valueOf(Long.MAX_VALUE));
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
// long to BigInteger
assertEquals(Long.MAX_VALUE, p.getLongValue());
assertEquals(BigInteger.valueOf(Long.MAX_VALUE), p.getBigIntegerValue());
p.close();
p = createParser(mode, " 200.0");
assertToken(JsonToken.VALUE_NUMBER_FLOAT, p.nextToken());
// BigDecimal to BigInteger
assertEquals(new BigDecimal("200.0"), p.getDecimalValue());
assertEquals(BigInteger.valueOf(200L), p.getBigIntegerValue());
p.close();
}
}
/*
/**********************************************************
/* Numeric coercions, floating point
/**********************************************************
*/
@Test
void toDoubleCoercion() throws Exception
{
for (int mode : ALL_STREAMING_MODES) {
JsonParser p;
// BigDecimal->double
p = createParser(mode, "100.5");
assertToken(JsonToken.VALUE_NUMBER_FLOAT, p.nextToken());
assertEquals(new BigDecimal("100.5"), p.getDecimalValue());
assertEquals(100.5, p.getDoubleValue());
p.close();
p = createParser(mode, "10");
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
assertEquals(BigInteger.TEN, p.getBigIntegerValue());
assertEquals(10.0, p.getDoubleValue());
p.close();
}
}
@Test
void toBigDecimalCoercion() throws Exception
{
for (int mode : ALL_STREAMING_MODES) {
JsonParser p;
p = createParser(mode, "1");
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
// int to BigDecimal
assertEquals(1, p.getIntValue());
assertEquals(BigDecimal.ONE, p.getDecimalValue());
p.close();
p = createParser(mode, String.valueOf(Long.MAX_VALUE));
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
// long to BigDecimal
assertEquals(Long.MAX_VALUE, p.getLongValue());
assertEquals(BigDecimal.valueOf(Long.MAX_VALUE), p.getDecimalValue());
p.close();
BigInteger biggie = BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.TEN);
p = createParser(mode, String.valueOf(biggie));
assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken());
// BigInteger to BigDecimal
assertEquals(biggie, p.getBigIntegerValue());
assertEquals(new BigDecimal(biggie), p.getDecimalValue());
p.close();
}
}
}
| NumberCoercionTest |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/adaptivebatch/BatchJobRecoveryContext.java | {
"start": 1727,
"end": 5364
} | interface ____ {
/**
* Provides the {@code ExecutionGraph} associated with the job.
*
* @return The execution graph.
*/
ExecutionGraph getExecutionGraph();
/**
* Provides the {@code ShuffleMaster} associated with the job.
*
* @return The shuffle master.
*/
ShuffleMaster<?> getShuffleMaster();
/**
* Provides the main thread executor.
*
* @return The main thread executor.
*/
ComponentMainThreadExecutor getMainThreadExecutor();
/**
* Retrieves a set of vertices that need to be restarted. If result consumption is considered
* (`basedOnResultConsumable` is true), the set will include all downstream vertices that have
* finished and upstream vertices that have missed partitions. Otherwise, only include
* downstream finished vertices.
*
* @param vertexId The ID of the vertex from which to compute the restart set.
* @param considerResultConsumable Indicates whether to consider result partition consumption
* while computing the vertices needing restart.
* @return A set of vertex IDs that need to be restarted.
*/
Set<ExecutionVertexID> getTasksNeedingRestart(
ExecutionVertexID vertexId, boolean considerResultConsumable);
/**
* Resets vertices specified by their IDs during recovery process.
*
* @param verticesToReset The set of vertices that require resetting.
*/
void resetVerticesInRecovering(Set<ExecutionVertexID> verticesToReset) throws Exception;
/**
* Updates the metrics related to the result partition sizes.
*
* @param resultPartitionBytes Mapping of partition IDs to their respective result partition
* bytes.
*/
void updateResultPartitionBytesMetrics(
Map<IntermediateResultPartitionID, ResultPartitionBytes> resultPartitionBytes);
/**
* Initializes a given job vertex with the specified parallelism and input information.
*
* @param jobVertex The job vertex to initialize.
* @param parallelism The parallelism to set for the job vertex.
* @param jobVertexInputInfos The input information for the job vertex.
* @param createTimestamp The timestamp marking the creation of the job vertex.
*/
void initializeJobVertex(
ExecutionJobVertex jobVertex,
int parallelism,
Map<IntermediateDataSetID, JobVertexInputInfo> jobVertexInputInfos,
long createTimestamp)
throws JobException;
/**
* Updates the job topology with new job vertices that were initialized.
*
* @param newlyInitializedJobVertices List of job vertices that have been initialized.
*/
void updateTopology(List<ExecutionJobVertex> newlyInitializedJobVertices);
/**
* Notifies the recovery finished.
*
* @param jobVerticesWithUnRecoveredCoordinators A set of job vertex Ids is associated with job
* vertices whose operatorCoordinators did not successfully recover their state. If any
* execution within these job vertices needs to be restarted in the future, all other
* executions within the same job vertex must also be restarted to ensure the consistency
* and correctness of the state.
*/
void onRecoveringFinished(Set<JobVertexID> jobVerticesWithUnRecoveredCoordinators);
/** Notifies the recovery failed. */
void onRecoveringFailed();
/** Trigger job failure. */
void failJob(
Throwable cause, long timestamp, CompletableFuture<Map<String, String>> failureLabels);
}
| BatchJobRecoveryContext |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/NoArgsAllowedService.java | {
"start": 1249,
"end": 2241
} | class ____ extends AbstractLaunchableService {
private static final Logger LOG =
LoggerFactory.getLogger(NoArgsAllowedService.class);
public NoArgsAllowedService() {
super("NoArgsAllowedService");
}
public static final String NAME =
"org.apache.hadoop.service.launcher.testservices.NoArgsAllowedService";
@Override
public Configuration bindArgs(Configuration config, List<String> args)
throws Exception {
Configuration configuration = super.bindArgs(config, args);
if (!args.isEmpty()) {
StringBuilder argsList = new StringBuilder();
for (String arg : args) {
argsList.append('"').append(arg).append("\" ");
}
LOG.error("Got {} arguments: {}", args.size(), argsList);
throw new ServiceLaunchException(
LauncherExitCodes.EXIT_COMMAND_ARGUMENT_ERROR,
"Expected 0 arguments but got %d: %s",
args.size(),
argsList);
}
return configuration;
}
}
| NoArgsAllowedService |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java | {
"start": 2712,
"end": 2759
} | enum ____ disk failure type.
*/
public | defines |
java | quarkusio__quarkus | extensions/devservices/common/src/main/java/io/quarkus/devservices/common/ConfigureUtil.java | {
"start": 883,
"end": 5674
} | class ____ {
private static final Map<String, Properties> DEVSERVICES_PROPS = new ConcurrentHashMap<>();
private ConfigureUtil() {
}
public static String configureNetwork(GenericContainer<?> container,
String defaultNetworkId,
boolean useSharedNetwork,
String hostNamePrefix) {
if (defaultNetworkId != null) {
// Set the network`without creating the network
container.setNetworkMode(defaultNetworkId);
return setGeneratedHostname(container, hostNamePrefix);
} else if (useSharedNetwork) {
return configureSharedNetwork(container, hostNamePrefix);
}
return container.getHost();
}
public static boolean shouldConfigureSharedServiceLabel(LaunchMode launchMode) {
return launchMode == LaunchMode.DEVELOPMENT;
}
public static <T extends GenericContainer<T>> T configureSharedServiceLabel(T container, LaunchMode launchMode,
String serviceLabel, String serviceName) {
if (shouldConfigureSharedServiceLabel(launchMode)) {
return container.withLabel(serviceLabel, serviceName);
}
return container;
}
public static void configureLabels(GenericContainer<?> container, LaunchMode launchMode) {
// Configure the labels for the container
container.withLabel(QUARKUS_PROCESS_UUID, RunningDevServicesRegistry.APPLICATION_UUID);
container.withLabel(QUARKUS_LAUNCH_MODE, launchMode.toString());
}
public static String configureSharedNetwork(GenericContainer<?> container, String hostNamePrefix) {
// When a shared network is requested for the launched containers, we need to configure
// the container to use it. We also need to create a hostname that will be applied to the returned
// URL
var tccl = Thread.currentThread().getContextClassLoader();
if (tccl.getName().contains("Deployment")) {
// we need to use the shared network loaded from the Augmentation ClassLoader because that ClassLoader
// is what the test launching process (that has access to the curated application) has access to
// FIXME: This is an ugly hack, but there is not much we can do...
try {
Class<?> networkClass = tccl.getParent()
.loadClass("org.testcontainers.containers.Network");
Object sharedNetwork = networkClass.getField("SHARED").get(null);
Consumer<CreateNetworkCmd> addDevservicesLabel = cmd -> cmd
.withLabels(Map.of("quarkus.devservices.network", "shared"));
Field createNetworkCmdModifiersField = sharedNetwork.getClass().getSuperclass()
.getDeclaredField("createNetworkCmdModifiers");
createNetworkCmdModifiersField.setAccessible(true);
createNetworkCmdModifiersField.set(sharedNetwork, Set.of(addDevservicesLabel));
container.setNetwork((Network) sharedNetwork);
} catch (Exception e) {
throw new IllegalStateException("Unable to obtain SHARED network from testcontainers", e);
}
} else {
container.setNetwork(Network.SHARED);
}
return setGeneratedHostname(container, hostNamePrefix);
}
public static String setGeneratedHostname(GenericContainer<?> container, String hostNamePrefix) {
String hostName = (hostNamePrefix + "-" + Base58.randomString(5)).toLowerCase(Locale.ROOT);
// some containers might try to add their own aliases on start, so we want to keep this list modifiable:
container.setNetworkAliases(new ArrayList<>(List.of(hostName)));
return hostName;
}
public static String getDefaultImageNameFor(String devserviceName) {
var imageName = DEVSERVICES_PROPS.computeIfAbsent(devserviceName, ConfigureUtil::loadProperties)
.getProperty("default.image");
if (imageName == null) {
throw new IllegalArgumentException("No default.image configured for " + devserviceName);
}
return imageName;
}
private static Properties loadProperties(String devserviceName) {
var fileName = devserviceName + "-devservice.properties";
try (InputStream in = Thread.currentThread().getContextClassLoader().getResourceAsStream(fileName)) {
if (in == null) {
throw new IllegalArgumentException(fileName + " not found on classpath");
}
var properties = new Properties();
properties.load(in);
return properties;
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
| ConfigureUtil |
java | google__guice | core/test/com/google/inject/BoundInstanceInjectionTest.java | {
"start": 1527,
"end": 3658
} | class ____ {
int fromMethod;
@Inject
void setInt(int i) {
this.fromMethod = i;
}
}
public void testProvidersAreInjected() throws CreationException {
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(O.class)
.toProvider(
new Provider<O>() {
@Inject int i;
@Override
public O get() {
O o = new O();
o.setInt(i);
return o;
}
});
bind(int.class).toInstance(5);
}
});
assertEquals(5, injector.getInstance(O.class).fromMethod);
}
public void testMalformedInstance() {
try {
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(Object.class).toInstance(new MalformedInjectable());
}
});
fail();
} catch (CreationException expected) {
Asserts.assertContains(
expected.getMessage(),
"BoundInstanceInjectionTest$MalformedInjectable.doublyAnnotated() has more than one ",
"annotation annotated with @BindingAnnotation: ",
"Named and BoundInstanceInjectionTest$Another");
}
}
public void testMalformedProvider() {
try {
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(String.class).toProvider(new MalformedProvider());
}
});
fail();
} catch (CreationException expected) {
Asserts.assertContains(
expected.getMessage(),
"BoundInstanceInjectionTest$MalformedProvider.doublyAnnotated() has more than one ",
"annotation annotated with @BindingAnnotation: ",
"Named and BoundInstanceInjectionTest$Another");
}
}
static | O |
java | playframework__playframework | documentation/manual/working/javaGuide/advanced/routing/code/javaguide/binder/models/AgeRange.java | {
"start": 353,
"end": 1135
} | class ____ implements QueryStringBindable<AgeRange> {
public Integer from;
public Integer to;
// #declaration
// #bind
@Override
public Optional<AgeRange> bind(String key, Map<String, String[]> data) {
try {
from = Integer.valueOf(data.get("from")[0]);
to = Integer.valueOf(data.get("to")[0]);
return Optional.of(this);
} catch (Exception e) { // no parameter match return None
return Optional.empty();
}
}
@Override
public String unbind(String key) {
return new StringBuilder().append("from=").append(from).append("&to=").append(to).toString();
}
// #bind
@Override
public String javascriptUnbind() {
return new StringBuilder().append("from=").append(from).append(";to=").append(to).toString();
}
}
| AgeRange |
java | resilience4j__resilience4j | resilience4j-spring-boot2/src/main/java/io/github/resilience4j/circuitbreaker/autoconfigure/AbstractCircuitBreakerConfigurationOnMissingBean.java | {
"start": 2205,
"end": 5524
} | class ____ {
protected final CircuitBreakerConfiguration circuitBreakerConfiguration;
protected final CircuitBreakerConfigurationProperties circuitBreakerProperties;
public AbstractCircuitBreakerConfigurationOnMissingBean(
CircuitBreakerConfigurationProperties circuitBreakerProperties) {
this.circuitBreakerProperties = circuitBreakerProperties;
this.circuitBreakerConfiguration = new CircuitBreakerConfiguration(
circuitBreakerProperties);
}
@Bean
@ConditionalOnMissingBean(name = "compositeCircuitBreakerCustomizer")
@Qualifier("compositeCircuitBreakerCustomizer")
public CompositeCustomizer<CircuitBreakerConfigCustomizer> compositeCircuitBreakerCustomizer(
@Autowired(required = false) List<CircuitBreakerConfigCustomizer> customizers) {
return new CompositeCustomizer<>(customizers);
}
@Bean
@ConditionalOnMissingBean
public CircuitBreakerRegistry circuitBreakerRegistry(
EventConsumerRegistry<CircuitBreakerEvent> eventConsumerRegistry,
RegistryEventConsumer<CircuitBreaker> circuitBreakerRegistryEventConsumer,
@Qualifier("compositeCircuitBreakerCustomizer") CompositeCustomizer<CircuitBreakerConfigCustomizer> compositeCircuitBreakerCustomizer) {
return circuitBreakerConfiguration
.circuitBreakerRegistry(eventConsumerRegistry, circuitBreakerRegistryEventConsumer,
compositeCircuitBreakerCustomizer);
}
@Bean
@Primary
public RegistryEventConsumer<CircuitBreaker> circuitBreakerRegistryEventConsumer(
Optional<List<RegistryEventConsumer<CircuitBreaker>>> optionalRegistryEventConsumers) {
return circuitBreakerConfiguration
.circuitBreakerRegistryEventConsumer(optionalRegistryEventConsumers);
}
@Bean
@ConditionalOnMissingBean
@Conditional(value = {AspectJOnClasspathCondition.class})
public CircuitBreakerAspect circuitBreakerAspect(
CircuitBreakerRegistry circuitBreakerRegistry,
@Autowired(required = false) List<CircuitBreakerAspectExt> circuitBreakerAspectExtList,
FallbackExecutor fallbackExecutor,
SpelResolver spelResolver
) {
return circuitBreakerConfiguration
.circuitBreakerAspect(circuitBreakerRegistry, circuitBreakerAspectExtList,
fallbackExecutor, spelResolver);
}
@Bean
@Conditional(value = {RxJava2OnClasspathCondition.class, AspectJOnClasspathCondition.class})
@ConditionalOnMissingBean
public RxJava2CircuitBreakerAspectExt rxJava2CircuitBreakerAspect() {
return circuitBreakerConfiguration.rxJava2CircuitBreakerAspect();
}
@Bean
@Conditional(value = {RxJava3OnClasspathCondition.class, AspectJOnClasspathCondition.class})
@ConditionalOnMissingBean
public RxJava3CircuitBreakerAspectExt rxJava3CircuitBreakerAspect() {
return circuitBreakerConfiguration.rxJava3CircuitBreakerAspect();
}
@Bean
@Conditional(value = {ReactorOnClasspathCondition.class, AspectJOnClasspathCondition.class})
@ConditionalOnMissingBean
public ReactorCircuitBreakerAspectExt reactorCircuitBreakerAspect() {
return circuitBreakerConfiguration.reactorCircuitBreakerAspect();
}
}
| AbstractCircuitBreakerConfigurationOnMissingBean |
java | apache__camel | components/camel-bindy/src/main/java/org/apache/camel/dataformat/bindy/UnicodeHelper.java | {
"start": 1248,
"end": 1290
} | class ____ equally
* immutable.
*/
public | is |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/functions/aggfunctions/LastValueWithRetractAggFunctionWithOrderTest.java | {
"start": 6013,
"end": 7398
} | class ____
extends LastValueWithRetractAggFunctionWithOrderTestBase<Boolean> {
@Override
protected List<List<Boolean>> getInputValueSets() {
return Arrays.asList(
Arrays.asList(false, false, false),
Arrays.asList(true, true, true),
Arrays.asList(true, false, null, true, false, true, null),
Arrays.asList(null, null, null),
Arrays.asList(null, true));
}
@Override
protected List<List<Long>> getInputOrderSets() {
return Arrays.asList(
Arrays.asList(6L, 2L, 3L),
Arrays.asList(1L, 2L, 3L),
Arrays.asList(10L, 2L, 5L, 11L, 3L, 7L, 5L),
Arrays.asList(6L, 9L, 5L),
Arrays.asList(4L, 3L));
}
@Override
protected List<Boolean> getExpectedResults() {
return Arrays.asList(false, true, true, null, true);
}
@Override
protected AggregateFunction<Boolean, LastValueWithRetractAccumulator<Boolean>>
getAggregator() {
return new LastValueWithRetractAggFunction<>(DataTypes.BOOLEAN().getLogicalType());
}
}
/** Test for {@link DecimalType}. */
@Nested
final | BooleanLastValueWithRetractAggFunctionWithOrderTest |
java | apache__flink | flink-python/src/main/java/org/apache/flink/streaming/api/utils/ByteArrayWrapperSerializer.java | {
"start": 1344,
"end": 3880
} | class ____ extends TypeSerializerSingleton<ByteArrayWrapper> {
private static final long serialVersionUID = 1L;
public static final ByteArrayWrapperSerializer INSTANCE = new ByteArrayWrapperSerializer();
@Override
public boolean isImmutableType() {
return false;
}
@Override
public ByteArrayWrapper createInstance() {
return new ByteArrayWrapper(new byte[0]);
}
@Override
public ByteArrayWrapper copy(ByteArrayWrapper from) {
byte[] data = Arrays.copyOfRange(from.getData(), from.getOffset(), from.getLimit());
return new ByteArrayWrapper(data);
}
@Override
public ByteArrayWrapper copy(ByteArrayWrapper from, ByteArrayWrapper reuse) {
byte[] data = Arrays.copyOfRange(from.getData(), from.getOffset(), from.getLimit());
reuse.setData(data);
reuse.setOffset(0);
reuse.setLimit(data.length);
return reuse;
}
@Override
public int getLength() {
return -1;
}
@Override
public void serialize(ByteArrayWrapper record, DataOutputView target) throws IOException {
target.writeInt(record.getLimit() - record.getOffset());
target.write(record.getData(), record.getOffset(), record.getLimit() - record.getOffset());
}
@Override
public ByteArrayWrapper deserialize(DataInputView source) throws IOException {
int length = source.readInt();
byte[] result = new byte[length];
source.readFully(result);
return new ByteArrayWrapper(result);
}
@Override
public ByteArrayWrapper deserialize(ByteArrayWrapper reuse, DataInputView source)
throws IOException {
int length = source.readInt();
byte[] result = new byte[length];
source.readFully(result);
reuse.setData(result);
reuse.setOffset(0);
reuse.setLimit(result.length);
return reuse;
}
@Override
public void copy(DataInputView source, DataOutputView target) throws IOException {
int length = source.readInt();
byte[] result = new byte[length];
source.readFully(result);
target.writeInt(length);
target.write(result);
}
@Override
public TypeSerializerSnapshot<ByteArrayWrapper> snapshotConfiguration() {
return new ByteArrayWrapperSerializerSnapshot();
}
/** Serializer configuration snapshot for compatibility and format evolution. */
@SuppressWarnings("WeakerAccess")
public static | ByteArrayWrapperSerializer |
java | elastic__elasticsearch | qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/MockServer.java | {
"start": 2262,
"end": 7838
} | class ____ {
protected final Logger logger = LogManager.getLogger(getClass());
private static final int CONTAINER_PORT = 1080; // default for image
private final Shell shell;
private final HttpClient client;
private ExecutorService executorService;
private String containerId;
/**
* Create a new mockserver, and execute the supplied {@code runnable}. The mockserver will
* be cleaned up afterwards.
* @param runnable the code to run e.g. the test case
*/
public static void withMockServer(CheckedConsumer<MockServer, Exception> runnable) {
final MockServer mockServer = new MockServer();
try {
mockServer.start();
runnable.accept(mockServer);
mockServer.close();
} catch (Throwable e) {
mockServer.close();
}
}
private MockServer() {
this.shell = new Shell();
this.executorService = Executors.newSingleThreadExecutor();
this.client = HttpClient.newBuilder().executor(executorService).build();
}
private void start() throws Exception {
final String command = "docker run -t --detach --rm -p " + CONTAINER_PORT + ":" + CONTAINER_PORT + " mockserver/mockserver:latest";
this.containerId = this.shell.run(command).stdout().trim();
// It's a Java app, so give it a chance to wake up. I'd add a healthcheck to the above command,
// but the image doesn't have any CLI utils at all.
PackagingTestCase.assertBusy(() -> {
try {
this.reset();
} catch (Exception e) {
// Only assertions are retried.
throw new AssertionError(e);
}
}, 20, TimeUnit.SECONDS);
}
public void clearExpectations() throws Exception {
doRequest("http://localhost:" + CONTAINER_PORT + "/mockserver/clear?type=EXPECTATIONS", "{ \"path\": \"/*\" }");
}
public void reset() throws Exception {
doRequest("http://localhost:" + CONTAINER_PORT + "/mockserver/reset", null);
}
/**
* Returns all interactions with the mockserver since startup, the last call to {@link #reset()} or the
* last call to {@link #clearExpectations()}. The JSON returned by the mockserver is flattened, so that
* the period-seperated keys in each map represent the structure of the JSON.
*
* @return a list of interactions
* @throws Exception if anything goes wrong
*/
public List<Map<String, String>> getInteractions() throws Exception {
final String url = "http://localhost:" + CONTAINER_PORT + "/mockserver/retrieve?type=REQUEST_RESPONSES";
final String result = doRequest(url, null);
final ObjectMapper objectMapper = new ObjectMapper();
final JsonNode jsonNode = objectMapper.readTree(result);
assertThat("Response from mockserver is not a JSON array", jsonNode.isArray(), is(true));
final List<Map<String, String>> interactions = new ArrayList<>();
for (JsonNode node : jsonNode) {
final Map<String, String> interaction = new HashMap<>();
addKeys("", node, interaction);
interactions.add(interaction);
}
return interactions;
}
private void close() {
if (this.containerId != null) {
this.shell.run("docker rm -f " + this.containerId);
this.containerId = null;
}
if (this.executorService != null) {
this.executorService.shutdown();
this.executorService = null;
}
}
public String getContainerId() {
return containerId;
}
public int getPort() {
return CONTAINER_PORT;
}
/**
* Recursively flattens a JsonNode into a map, to make it easier to pick out entries and make assertions.
* Keys are concatenated with periods.
*
* @param currentPath used recursively to construct the key
* @param jsonNode the current node to flatten
* @param map entries are added into this map
*/
private void addKeys(String currentPath, JsonNode jsonNode, Map<String, String> map) {
if (jsonNode.isObject()) {
ObjectNode objectNode = (ObjectNode) jsonNode;
Iterator<Map.Entry<String, JsonNode>> iter = objectNode.fields();
String pathPrefix = currentPath.isEmpty() ? "" : currentPath + ".";
while (iter.hasNext()) {
Map.Entry<String, JsonNode> entry = iter.next();
addKeys(pathPrefix + entry.getKey(), entry.getValue(), map);
}
} else if (jsonNode.isArray()) {
ArrayNode arrayNode = (ArrayNode) jsonNode;
for (int i = 0; i < arrayNode.size(); i++) {
addKeys(currentPath + "[" + i + "]", arrayNode.get(i), map);
}
} else if (jsonNode.isValueNode()) {
ValueNode valueNode = (ValueNode) jsonNode;
map.put(currentPath, valueNode.asText());
}
}
private String doRequest(String urlString, String body) throws Exception {
final HttpRequest.Builder request = HttpRequest.newBuilder(URI.create(urlString));
if (body == null) {
request.method("PUT", BodyPublishers.noBody());
} else {
request.method("PUT", BodyPublishers.ofString(body)).header("Content-Type", "application/json");
}
final HttpResponse<String> response = client.send(request.build(), BodyHandlers.ofString());
return response.body();
}
}
| MockServer |
java | apache__kafka | coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/SnapshottableCoordinatorTest.java | {
"start": 1333,
"end": 6676
} | class ____ {
@Test
public void testUpdateLastWrittenOffset() {
LogContext logContext = new LogContext();
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(logContext);
SnapshottableCoordinator<MockCoordinatorShard, String> coordinator = new SnapshottableCoordinator<>(
logContext,
snapshotRegistry,
new MockCoordinatorShard(snapshotRegistry, new MockCoordinatorTimer<>(new MockTime())),
new TopicPartition("test-topic", 0)
);
assertTrue(coordinator.snapshotRegistry().hasSnapshot(0L));
coordinator.updateLastWrittenOffset(100L);
assertEquals(100L, coordinator.lastWrittenOffset());
assertTrue(coordinator.snapshotRegistry().hasSnapshot(100L));
assertTrue(coordinator.snapshotRegistry().hasSnapshot(0L));
}
@Test
public void testUpdateLastWrittenOffsetFailed() {
LogContext logContext = new LogContext();
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(logContext);
SnapshottableCoordinator<MockCoordinatorShard, String> coordinator = new SnapshottableCoordinator<>(
logContext,
snapshotRegistry,
new MockCoordinatorShard(snapshotRegistry, new MockCoordinatorTimer<>(new MockTime())),
new TopicPartition("test-topic", 0)
);
assertEquals(0L, coordinator.lastWrittenOffset());
assertThrows(IllegalStateException.class, () -> coordinator.updateLastWrittenOffset(0L));
assertEquals(0L, coordinator.lastWrittenOffset());
}
@Test
public void testRevertWrittenOffset() {
LogContext logContext = new LogContext();
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(logContext);
SnapshottableCoordinator<MockCoordinatorShard, String> coordinator = new SnapshottableCoordinator<>(
logContext,
snapshotRegistry,
new MockCoordinatorShard(snapshotRegistry, new MockCoordinatorTimer<>(new MockTime())),
new TopicPartition("test-topic", 0)
);
coordinator.updateLastWrittenOffset(100L);
coordinator.updateLastWrittenOffset(200L);
assertTrue(coordinator.snapshotRegistry().hasSnapshot(0L));
assertTrue(coordinator.snapshotRegistry().hasSnapshot(100L));
assertTrue(coordinator.snapshotRegistry().hasSnapshot(200L));
coordinator.revertLastWrittenOffset(100L);
assertEquals(100L, coordinator.lastWrittenOffset());
assertTrue(coordinator.snapshotRegistry().hasSnapshot(100L));
assertFalse(coordinator.snapshotRegistry().hasSnapshot(200L));
}
@Test
public void testRevertLastWrittenOffsetFailed() {
LogContext logContext = new LogContext();
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(logContext);
SnapshottableCoordinator<MockCoordinatorShard, String> coordinator = new SnapshottableCoordinator<>(
logContext,
snapshotRegistry,
new MockCoordinatorShard(snapshotRegistry, new MockCoordinatorTimer<>(new MockTime())),
new TopicPartition("test-topic", 0)
);
assertEquals(0, coordinator.lastWrittenOffset());
assertThrows(IllegalStateException.class, () -> coordinator.revertLastWrittenOffset(1L));
assertEquals(0, coordinator.lastWrittenOffset());
}
@Test
public void testUpdateLastCommittedOffset() {
LogContext logContext = new LogContext();
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(logContext);
SnapshottableCoordinator<MockCoordinatorShard, String> coordinator = new SnapshottableCoordinator<>(
logContext,
snapshotRegistry,
new MockCoordinatorShard(snapshotRegistry, new MockCoordinatorTimer<>(new MockTime())),
new TopicPartition("test-topic", 0)
);
coordinator.updateLastWrittenOffset(100L);
assertTrue(coordinator.snapshotRegistry().hasSnapshot(0L));
assertTrue(coordinator.snapshotRegistry().hasSnapshot(100L));
coordinator.updateLastCommittedOffset(100L);
assertEquals(100L, coordinator.lastCommittedOffset());
assertFalse(coordinator.snapshotRegistry().hasSnapshot(0L));
assertTrue(coordinator.snapshotRegistry().hasSnapshot(100L));
}
@Test
public void testUpdateLastCommittedOffsetFailed() {
LogContext logContext = new LogContext();
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(logContext);
SnapshottableCoordinator<MockCoordinatorShard, String> coordinator = new SnapshottableCoordinator<>(
logContext,
snapshotRegistry,
new MockCoordinatorShard(snapshotRegistry, new MockCoordinatorTimer<>(new MockTime())),
new TopicPartition("test-topic", 0)
);
coordinator.updateLastWrittenOffset(100L);
coordinator.updateLastCommittedOffset(100L);
assertEquals(100L, coordinator.lastCommittedOffset());
assertThrows(IllegalStateException.class, () -> coordinator.updateLastCommittedOffset(99L));
assertEquals(100L, coordinator.lastCommittedOffset());
assertThrows(IllegalStateException.class, () -> coordinator.updateLastCommittedOffset(101L));
}
}
| SnapshottableCoordinatorTest |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/Retryable.java | {
"start": 405,
"end": 554
} | interface ____ determining if an error should be retried and a way to modify
* the request to based on the type of failure that occurred.
*/
public | for |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/bzip2/BZip2TextFileWriter.java | {
"start": 1380,
"end": 4107
} | class ____ implements Closeable {
// Use minimum block size to reduce amount of data to require to be written
// to CBZip2OutputStream before a new block is created.
private static final int BLOCK_SIZE_100K = MIN_BLOCKSIZE;
/**
* The amount of bytes of run-length encoded data that needs to be written
* to this writer in order for the next byte written starts a new BZip2 block.
*/
public static final int BLOCK_SIZE =
// The + 1 is needed because of how CBZip2OutputStream checks whether the
// last offset written is less than allowable block size. Because the last
// offset is one less of the amount of bytes written to the block, we need
// to write an extra byte to trigger writing a new block.
CBZip2OutputStream.getAllowableBlockSize(BLOCK_SIZE_100K) + 1;
private final CBZip2OutputStream out;
public BZip2TextFileWriter(Path path, Configuration conf) throws IOException {
this(path.getFileSystem(conf).create(path));
}
public BZip2TextFileWriter(OutputStream rawOut) throws IOException {
try {
BZip2Codec.writeHeader(rawOut);
out = new CBZip2OutputStream(rawOut, BLOCK_SIZE_100K);
} catch (Throwable e) {
rawOut.close();
throw e;
}
}
public void writeManyRecords(int totalSize, int numRecords, byte[] delimiter)
throws IOException {
checkArgument(numRecords > 0);
checkArgument(delimiter.length > 0);
int minRecordSize = totalSize / numRecords;
checkArgument(minRecordSize >= delimiter.length);
int lastRecordExtraSize = totalSize % numRecords;
for (int i = 0; i < numRecords - 1; i++) {
writeRecord(minRecordSize, delimiter);
}
writeRecord(minRecordSize + lastRecordExtraSize, delimiter);
}
public void writeRecord(int totalSize, byte[] delimiter) throws IOException {
checkArgument(delimiter.length > 0);
checkArgument(totalSize >= delimiter.length);
int contentSize = totalSize - delimiter.length;
for (int i = 0; i < contentSize; i++) {
// Alternate between characters so that internals of CBZip2OutputStream
// cannot condensed the written bytes using run-length encoding. This
// allows the caller to use #BLOCK_SIZE in order to know whether the next
// write will end just before the end of the current block, or exceed it,
// and by how much.
out.write(i % 2 == 0 ? 'a' : 'b');
}
write(delimiter);
}
public void write(String bytes) throws IOException {
write(bytes.getBytes(StandardCharsets.UTF_8));
}
public void write(byte[] bytes) throws IOException {
out.write(bytes);
}
@Override
public void close() throws IOException {
out.close();
}
}
| BZip2TextFileWriter |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/StaticEndpointBuilders.java | {
"start": 496704,
"end": 497505
} | class ____ or
* location in the file system.
* This option can also be loaded from an existing file, by prefixing with
* file: or classpath: followed by the location of the file.
*
* @param path path
* @return the dsl builder
*/
public static SchematronEndpointBuilderFactory.SchematronEndpointBuilder schematron(String path) {
return schematron("schematron", path);
}
/**
* Schematron (camel-schematron)
* Validate XML payload using the Schematron Library.
*
* Category: validation
* Since: 2.15
* Maven coordinates: org.apache.camel:camel-schematron
*
* Syntax: <code>schematron:path</code>
*
* Path parameter: path (required)
* The path to the schematron rules file. Can either be in | path |
java | jhy__jsoup | src/main/java/org/jsoup/safety/Safelist.java | {
"start": 24977,
"end": 25223
} | class ____ extends TypedValue {
AttributeValue(String value) {
super(value);
}
static AttributeValue valueOf(String value) {
return new AttributeValue(value);
}
}
static | AttributeValue |
java | apache__camel | components/camel-test/camel-test-main-junit5/src/main/java/org/apache/camel/test/main/junit5/CamelMainContext.java | {
"start": 7851,
"end": 9533
} | class ____ used.
*/
private void mockEndpointsIfNeeded(ExtendedCamelContext context) {
boolean mockEndpointsSet = false;
boolean mockEndpointsAndSkipSet = false;
for (int i = annotations.size() - 1; i >= 0; i--) {
final CamelMainTest annotation = annotations.get(i);
// enable auto mocking if enabled
final String mockEndpoints = annotation.mockEndpoints();
if (!mockEndpointsSet && !mockEndpoints.isEmpty()) {
mockEndpointsSet = true;
context.registerEndpointCallback(new InterceptSendToMockEndpointStrategy(mockEndpoints));
}
final String mockEndpointsAndSkip = annotation.mockEndpointsAndSkip();
if (!mockEndpointsAndSkipSet && !mockEndpointsAndSkip.isEmpty()) {
mockEndpointsAndSkipSet = true;
context.registerEndpointCallback(new InterceptSendToMockEndpointStrategy(mockEndpointsAndSkip, true));
}
}
}
/**
* Configure the debug mode if the test instance is of type {@link DebuggerCallback} in a such way that the
* callback methods {@link DebuggerCallback#debugBefore} and {@link DebuggerCallback#debugAfter} are called when
* executing the routes.
* <p/>
* In case of {@code @Nested} test classes, the instance used to check if the debug mode needs to be enabled is
* the instance of the outer class.
*/
private void configureDebuggerIfNeeded(ModelCamelContext context) {
// Get the instance of the outer | is |
java | apache__camel | components/camel-smpp/src/test/java/org/apache/camel/component/smpp/SmppProducerLazySessionCreationTest.java | {
"start": 1496,
"end": 5295
} | class ____ {
private SmppProducer producer;
private SmppConfiguration configuration;
private SmppEndpoint endpoint;
private SMPPSession session;
@BeforeEach
public void setUp() {
configuration = new SmppConfiguration();
configuration.setLazySessionCreation(true);
configuration.setServiceType("CMT");
configuration.setSystemType("cp");
configuration.setPassword("password");
endpoint = mock(SmppEndpoint.class);
session = mock(SMPPSession.class);
producer = new SmppProducer(endpoint, configuration) {
SMPPSession createSMPPSession() {
return session;
}
};
}
@Test
public void processShouldCreateTheSmppSession() throws Exception {
when(endpoint.getConnectionString())
.thenReturn("smpp://smppclient@localhost:2775");
BindParameter expectedBindParameter = new BindParameter(
BindType.BIND_TX,
"smppclient",
"password",
"cp",
TypeOfNumber.UNKNOWN,
NumberingPlanIndicator.UNKNOWN,
"");
when(session.connectAndBind("localhost", Integer.valueOf(2775), expectedBindParameter))
.thenReturn("1");
when(endpoint.isSingleton()).thenReturn(true);
SmppBinding binding = mock(SmppBinding.class);
Exchange exchange = mock(Exchange.class);
Message in = mock(Message.class);
SmppCommand command = mock(SmppCommand.class);
when(endpoint.getBinding()).thenReturn(binding);
when(binding.createSmppCommand(session, exchange)).thenReturn(command);
when(exchange.getIn()).thenReturn(in);
when(in.getHeader("CamelSmppSystemId", String.class)).thenReturn(null);
when(in.getHeader("CamelSmppPassword", String.class)).thenReturn(null);
command.execute(exchange);
producer.doStart();
producer.process(exchange);
verify(session).setEnquireLinkTimer(60000);
verify(session).setTransactionTimer(10000);
verify(session).addSessionStateListener(isA(SessionStateListener.class));
verify(session).connectAndBind("localhost", Integer.valueOf(2775), expectedBindParameter);
}
@Test
public void processShouldCreateTheSmppSessionWithTheSystemIdAndPasswordFromTheExchange() throws Exception {
when(endpoint.getConnectionString())
.thenReturn("smpp://localhost:2775");
BindParameter expectedBindParameter = new BindParameter(
BindType.BIND_TX,
"smppclient2",
"password2",
"cp",
TypeOfNumber.UNKNOWN,
NumberingPlanIndicator.UNKNOWN,
"");
when(session.connectAndBind("localhost", Integer.valueOf(2775), expectedBindParameter))
.thenReturn("1");
SmppBinding binding = mock(SmppBinding.class);
Exchange exchange = mock(Exchange.class);
Message in = mock(Message.class);
SmppCommand command = mock(SmppCommand.class);
when(endpoint.getBinding()).thenReturn(binding);
when(endpoint.isSingleton()).thenReturn(true);
when(binding.createSmppCommand(session, exchange)).thenReturn(command);
when(exchange.getIn()).thenReturn(in);
when(in.getHeader("CamelSmppSystemId", String.class)).thenReturn("smppclient2");
when(in.getHeader("CamelSmppPassword", String.class)).thenReturn("password2");
command.execute(exchange);
producer.doStart();
producer.process(exchange);
verify(session).connectAndBind("localhost", Integer.valueOf(2775), expectedBindParameter);
}
}
| SmppProducerLazySessionCreationTest |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/transport/TransportKeepAliveTests.java | {
"start": 1344,
"end": 8606
} | class ____ extends ESTestCase {
private final ConnectionProfile defaultProfile = ConnectionProfile.buildDefaultConnectionProfile(Settings.EMPTY);
private BytesReference expectedPingMessage;
private AsyncBiFunction<TcpChannel, BytesReference, Void> pingSender;
private TransportKeepAlive keepAlive;
private CapturingThreadPool threadPool;
@Override
@SuppressWarnings("unchecked")
public void setUp() throws Exception {
super.setUp();
pingSender = mock(AsyncBiFunction.class);
threadPool = new CapturingThreadPool();
keepAlive = new TransportKeepAlive(threadPool, pingSender);
try (BytesStreamOutput out = new BytesStreamOutput()) {
out.writeByte((byte) 'E');
out.writeByte((byte) 'S');
out.writeInt(-1);
expectedPingMessage = out.bytes();
} catch (IOException e) {
throw new AssertionError(e.getMessage(), e); // won't happen
}
}
@Override
public void tearDown() throws Exception {
threadPool.shutdown();
super.tearDown();
}
public void testRegisterNodeConnectionSchedulesKeepAlive() {
TimeValue pingInterval = TimeValue.timeValueSeconds(randomLongBetween(1, 60));
ConnectionProfile connectionProfile = new ConnectionProfile.Builder(defaultProfile).setPingInterval(pingInterval).build();
assertEquals(0, threadPool.scheduledTasks.size());
TcpChannel channel1 = new FakeTcpChannel();
TcpChannel channel2 = new FakeTcpChannel();
channel1.getChannelStats().markAccessed(threadPool.relativeTimeInMillis());
channel2.getChannelStats().markAccessed(threadPool.relativeTimeInMillis());
keepAlive.registerNodeConnection(Arrays.asList(channel1, channel2), connectionProfile);
assertEquals(1, threadPool.scheduledTasks.size());
Tuple<TimeValue, Runnable> taskTuple = threadPool.scheduledTasks.poll();
assertEquals(pingInterval, taskTuple.v1());
Runnable keepAliveTask = taskTuple.v2();
assertEquals(0, threadPool.scheduledTasks.size());
keepAliveTask.run();
verify(pingSender, times(1)).apply(same(channel1), eq(expectedPingMessage), any());
verify(pingSender, times(1)).apply(same(channel2), eq(expectedPingMessage), any());
// Test that the task has rescheduled itself
assertEquals(1, threadPool.scheduledTasks.size());
Tuple<TimeValue, Runnable> rescheduledTask = threadPool.scheduledTasks.poll();
assertEquals(pingInterval, rescheduledTask.v1());
}
public void testRegisterMultipleKeepAliveIntervals() {
TimeValue pingInterval1 = TimeValue.timeValueSeconds(randomLongBetween(1, 30));
ConnectionProfile connectionProfile1 = new ConnectionProfile.Builder(defaultProfile).setPingInterval(pingInterval1).build();
TimeValue pingInterval2 = TimeValue.timeValueSeconds(randomLongBetween(31, 60));
ConnectionProfile connectionProfile2 = new ConnectionProfile.Builder(defaultProfile).setPingInterval(pingInterval2).build();
assertEquals(0, threadPool.scheduledTasks.size());
TcpChannel channel1 = new FakeTcpChannel();
TcpChannel channel2 = new FakeTcpChannel();
channel1.getChannelStats().markAccessed(threadPool.relativeTimeInMillis());
channel2.getChannelStats().markAccessed(threadPool.relativeTimeInMillis());
keepAlive.registerNodeConnection(Collections.singletonList(channel1), connectionProfile1);
keepAlive.registerNodeConnection(Collections.singletonList(channel2), connectionProfile2);
assertEquals(2, threadPool.scheduledTasks.size());
Tuple<TimeValue, Runnable> taskTuple1 = threadPool.scheduledTasks.poll();
Tuple<TimeValue, Runnable> taskTuple2 = threadPool.scheduledTasks.poll();
assertEquals(pingInterval1, taskTuple1.v1());
assertEquals(pingInterval2, taskTuple2.v1());
Runnable keepAliveTask1 = taskTuple1.v2();
Runnable keepAliveTask2 = taskTuple1.v2();
assertEquals(0, threadPool.scheduledTasks.size());
keepAliveTask1.run();
assertEquals(1, threadPool.scheduledTasks.size());
keepAliveTask2.run();
assertEquals(2, threadPool.scheduledTasks.size());
}
public void testClosingChannelUnregistersItFromKeepAlive() {
TimeValue pingInterval1 = TimeValue.timeValueSeconds(randomLongBetween(1, 30));
ConnectionProfile connectionProfile = new ConnectionProfile.Builder(defaultProfile).setPingInterval(pingInterval1).build();
TcpChannel channel1 = new FakeTcpChannel();
TcpChannel channel2 = new FakeTcpChannel();
channel1.getChannelStats().markAccessed(threadPool.relativeTimeInMillis());
channel2.getChannelStats().markAccessed(threadPool.relativeTimeInMillis());
keepAlive.registerNodeConnection(Collections.singletonList(channel1), connectionProfile);
keepAlive.registerNodeConnection(Collections.singletonList(channel2), connectionProfile);
channel1.close();
Runnable task = threadPool.scheduledTasks.poll().v2();
task.run();
verify(pingSender, times(0)).apply(same(channel1), eq(expectedPingMessage), any());
verify(pingSender, times(1)).apply(same(channel2), eq(expectedPingMessage), any());
}
public void testKeepAliveResponseIfServer() {
TcpChannel channel = new FakeTcpChannel(true);
channel.getChannelStats().markAccessed(threadPool.relativeTimeInMillis());
keepAlive.receiveKeepAlive(channel);
verify(pingSender, times(1)).apply(same(channel), eq(expectedPingMessage), any());
}
public void testNoKeepAliveResponseIfClient() {
TcpChannel channel = new FakeTcpChannel(false);
channel.getChannelStats().markAccessed(threadPool.relativeTimeInMillis());
keepAlive.receiveKeepAlive(channel);
verify(pingSender, times(0)).apply(same(channel), eq(expectedPingMessage), any());
}
public void testOnlySendPingIfWeHaveNotWrittenAndReadSinceLastPing() {
TimeValue pingInterval = TimeValue.timeValueSeconds(15);
ConnectionProfile connectionProfile = new ConnectionProfile.Builder(defaultProfile).setPingInterval(pingInterval).build();
TcpChannel channel1 = new FakeTcpChannel();
TcpChannel channel2 = new FakeTcpChannel();
channel1.getChannelStats().markAccessed(threadPool.relativeTimeInMillis());
channel2.getChannelStats().markAccessed(threadPool.relativeTimeInMillis());
keepAlive.registerNodeConnection(Arrays.asList(channel1, channel2), connectionProfile);
Tuple<TimeValue, Runnable> taskTuple = threadPool.scheduledTasks.poll();
taskTuple.v2().run();
TcpChannel.ChannelStats stats = channel1.getChannelStats();
stats.markAccessed(threadPool.relativeTimeInMillis() + (pingInterval.millis() / 2));
taskTuple = threadPool.scheduledTasks.poll();
taskTuple.v2().run();
verify(pingSender, times(1)).apply(same(channel1), eq(expectedPingMessage), any());
verify(pingSender, times(2)).apply(same(channel2), eq(expectedPingMessage), any());
}
private | TransportKeepAliveTests |
java | micronaut-projects__micronaut-core | http-server-tck/src/main/java/io/micronaut/http/server/tck/tests/hateoas/JsonErrorSerdeTest.java | {
"start": 1455,
"end": 3534
} | class ____ {
private static final String JSON_ERROR = """
{"_links":{"self":[{"href":"/resolve","templated":false}]},"_embedded":{"errors":[{"message":"Internal Server Error: Something bad happened"}]},"message":"Internal Server Error"}""";
private static final String SPEC_NAME = "JsonErrorSerdeTest";
/**
* @throws IOException Exception thrown while getting the server under test.
*/
@Test
void canDeserializeAJsonErrorAsAGenericResource() throws IOException {
try (ServerUnderTest server = ServerUnderTestProviderUtils.getServerUnderTestProvider().getServer(SPEC_NAME, Collections.emptyMap())) {
JsonMapper jsonMapper = server.getApplicationContext().getBean(JsonMapper.class);
//when:
Resource resource = jsonMapper.readValue(JSON_ERROR, Resource.class);
//then:
testResource(resource);
}
}
/**
* @throws IOException Exception thrown while getting the server under test.
*/
@Test
void jsonErrorShouldBeDeserializableFromAString() throws IOException {
try (ServerUnderTest server = ServerUnderTestProviderUtils.getServerUnderTestProvider().getServer(SPEC_NAME, Collections.emptyMap())) {
JsonMapper jsonMapper = server.getApplicationContext().getBean(JsonMapper.class);
//when:
JsonError jsonError = jsonMapper.readValue(JSON_ERROR, JsonError.class);
//then:
testResource(jsonError);
}
}
private <T extends Resource> void testResource(T resource) {
assertNotNull(resource);
assertTrue(resource.getEmbedded().getFirst("errors").isPresent(), "errors should be present");
assertTrue(resource.getLinks().getFirst("self").isPresent(), "self link should be present");
assertEquals("/resolve", resource.getLinks().getFirst("self").map(Link::getHref).orElse(null));
assertFalse(resource.getLinks().getFirst("self").map(Link::isTemplated).orElse(true), "self link should not be templated");
}
}
| JsonErrorSerdeTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/fetchprofile/Customer.java | {
"start": 988,
"end": 2042
} | class ____ {
@Id
@GeneratedValue
private long id;
private String name;
private long customerNumber;
@OneToMany
private Set<Order> orders = new HashSet<>();
@ManyToOne(fetch = FetchType.LAZY)
private Order lastOrder;
public Order getLastOrder() {
return lastOrder;
}
public void setLastOrder(Order lastOrder) {
this.lastOrder = lastOrder;
}
public Set<SupportTickets> getTickets() {
return tickets;
}
public void setTickets(Set<SupportTickets> tickets) {
this.tickets = tickets;
}
@OneToMany
private Set<SupportTickets> tickets;
public long getCustomerNumber() {
return customerNumber;
}
public void setCustomerNumber(long customerNumber) {
this.customerNumber = customerNumber;
}
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Set<Order> getOrders() {
return orders;
}
public void setOrders(Set<Order> orders) {
this.orders = orders;
}
}
| Customer |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/util/xml/AbstractXMLReader.java | {
"start": 1455,
"end": 4772
} | class ____ implements XMLReader {
private @Nullable DTDHandler dtdHandler;
private @Nullable ContentHandler contentHandler;
private @Nullable EntityResolver entityResolver;
private @Nullable ErrorHandler errorHandler;
private @Nullable LexicalHandler lexicalHandler;
@Override
public void setContentHandler(@Nullable ContentHandler contentHandler) {
this.contentHandler = contentHandler;
}
@Override
public @Nullable ContentHandler getContentHandler() {
return this.contentHandler;
}
@Override
public void setDTDHandler(@Nullable DTDHandler dtdHandler) {
this.dtdHandler = dtdHandler;
}
@Override
public @Nullable DTDHandler getDTDHandler() {
return this.dtdHandler;
}
@Override
public void setEntityResolver(@Nullable EntityResolver entityResolver) {
this.entityResolver = entityResolver;
}
@Override
public @Nullable EntityResolver getEntityResolver() {
return this.entityResolver;
}
@Override
public void setErrorHandler(@Nullable ErrorHandler errorHandler) {
this.errorHandler = errorHandler;
}
@Override
public @Nullable ErrorHandler getErrorHandler() {
return this.errorHandler;
}
protected @Nullable LexicalHandler getLexicalHandler() {
return this.lexicalHandler;
}
/**
* This implementation throws a {@code SAXNotRecognizedException} exception
* for any feature outside the "http://xml.org/sax/features/" namespace
* and returns {@code false} for any feature within.
*/
@Override
public boolean getFeature(String name) throws SAXNotRecognizedException, SAXNotSupportedException {
if (name.startsWith("http://xml.org/sax/features/")) {
return false;
}
else {
throw new SAXNotRecognizedException(name);
}
}
/**
* This implementation throws a {@code SAXNotRecognizedException} exception
* for any feature outside the "http://xml.org/sax/features/" namespace
* and accepts a {@code false} value for any feature within.
*/
@Override
public void setFeature(String name, boolean value) throws SAXNotRecognizedException, SAXNotSupportedException {
if (name.startsWith("http://xml.org/sax/features/")) {
if (value) {
throw new SAXNotSupportedException(name);
}
}
else {
throw new SAXNotRecognizedException(name);
}
}
/**
* Throws a {@code SAXNotRecognizedException} exception when the given property does not signify a lexical
* handler. The property name for a lexical handler is {@code http://xml.org/sax/properties/lexical-handler}.
*/
@Override
public @Nullable Object getProperty(String name) throws SAXNotRecognizedException, SAXNotSupportedException {
if ("http://xml.org/sax/properties/lexical-handler".equals(name)) {
return this.lexicalHandler;
}
else {
throw new SAXNotRecognizedException(name);
}
}
/**
* Throws a {@code SAXNotRecognizedException} exception when the given property does not signify a lexical
* handler. The property name for a lexical handler is {@code http://xml.org/sax/properties/lexical-handler}.
*/
@Override
public void setProperty(String name, Object value) throws SAXNotRecognizedException, SAXNotSupportedException {
if ("http://xml.org/sax/properties/lexical-handler".equals(name)) {
this.lexicalHandler = (LexicalHandler) value;
}
else {
throw new SAXNotRecognizedException(name);
}
}
}
| AbstractXMLReader |
java | elastic__elasticsearch | libs/cli/src/main/java/org/elasticsearch/cli/Command.java | {
"start": 950,
"end": 5815
} | class ____ implements Closeable {
/** A description of the command, used in the help output. */
protected final String description;
/** The option parser for this command. */
protected final OptionParser parser = new OptionParser();
private final OptionSpec<Void> helpOption = parser.acceptsAll(Arrays.asList("h", "help"), "Show help").forHelp();
private final OptionSpec<Void> silentOption = parser.acceptsAll(Arrays.asList("s", "silent"), "Show minimal output");
private final OptionSpec<Void> verboseOption = parser.acceptsAll(Arrays.asList("v", "verbose"), "Show verbose output")
.availableUnless(silentOption);
/**
* Construct the command with the specified command description and runnable to execute before main is invoked.
* @param description the command description
*
*/
public Command(final String description) {
this.description = description;
}
/** Parses options for this command from args and executes it. */
public final int main(String[] args, Terminal terminal, ProcessInfo processInfo) throws IOException {
try {
mainWithoutErrorHandling(args, terminal, processInfo);
} catch (OptionException e) {
// print help to stderr on exceptions
printHelp(terminal, true);
terminal.errorPrintln(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage());
return ExitCodes.USAGE;
} catch (UserException e) {
if (e.exitCode == ExitCodes.USAGE) {
printHelp(terminal, true);
}
printUserException(terminal, e);
return e.exitCode;
} catch (IOException ioe) {
terminal.errorPrintln(ioe);
return ExitCodes.IO_ERROR;
} catch (Throwable t) {
// It's acceptable to catch Throwable at this point:
// We're about to exit and only want to print the stacktrace with appropriate formatting (e.g. JSON).
terminal.errorPrintln(t);
return ExitCodes.CODE_ERROR;
}
return ExitCodes.OK;
}
/**
* Executes the command, but all errors are thrown.
*/
protected void mainWithoutErrorHandling(String[] args, Terminal terminal, ProcessInfo processInfo) throws Exception {
final OptionSet options = parseOptions(args);
if (options.has(helpOption)) {
printHelp(terminal, false);
return;
}
LoggerFactory loggerFactory = LoggerFactory.provider();
if (options.has(silentOption)) {
terminal.setVerbosity(Terminal.Verbosity.SILENT);
loggerFactory.setRootLevel(Level.OFF);
} else if (options.has(verboseOption)) {
terminal.setVerbosity(Terminal.Verbosity.VERBOSE);
loggerFactory.setRootLevel(Level.DEBUG);
} else {
terminal.setVerbosity(Terminal.Verbosity.NORMAL);
loggerFactory.setRootLevel(Level.INFO);
}
execute(terminal, options, processInfo);
}
/**
* Parse command line arguments for this command.
* @param args The string arguments passed to the command
* @return A set of parsed options
*/
public OptionSet parseOptions(String[] args) {
return parser.parse(args);
}
/** Prints a help message for the command to the terminal. */
private void printHelp(Terminal terminal, boolean toStdError) throws IOException {
StringWriter writer = new StringWriter();
parser.printHelpOn(writer);
if (toStdError) {
terminal.errorPrintln(description);
terminal.errorPrintln("");
terminal.errorPrintln(writer.toString());
} else {
terminal.println(description);
terminal.println("");
printAdditionalHelp(terminal);
terminal.println(writer.toString());
}
}
/** Prints additional help information, specific to the command */
protected void printAdditionalHelp(Terminal terminal) {}
protected void printUserException(Terminal terminal, UserException e) {
if (e.getMessage() != null) {
terminal.errorPrintln("");
terminal.errorPrintln(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage() + ", with exit code " + e.exitCode);
}
}
@SuppressForbidden(reason = "Allowed to exit explicitly from #main()")
protected static void exit(int status) {
System.exit(status);
}
/**
* Executes this command.
*
* Any runtime user errors (like an input file that does not exist), should throw a {@link UserException}. */
protected abstract void execute(Terminal terminal, OptionSet options, ProcessInfo processInfo) throws Exception;
@Override
public void close() throws IOException {
}
}
| Command |
java | apache__camel | components/camel-infinispan/camel-infinispan-embedded/src/main/java/org/apache/camel/component/infinispan/embedded/InfinispanEmbeddedEventListeners.java | {
"start": 1006,
"end": 1299
} | class ____ {
private InfinispanEmbeddedEventListeners() {
}
// ******************************************
//
// Clustered
//
// ******************************************
@Listener(clustered = true, sync = false)
public static | InfinispanEmbeddedEventListeners |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapKeyValueStateIterator.java | {
"start": 8716,
"end": 9182
} | interface ____ {
boolean hasNext();
/**
* Sets the {@link #currentKey} and {@link #currentValue} to the value of the next entry in
* the state.
*
* @return false if an entry was empty. It can be the case if we try to serialize an empty
* Map or List. In that case we should skip to a next entry.
*/
boolean writeOutNext() throws IOException;
}
private final | SingleStateIterator |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/io/ClassPathResourceTests.java | {
"start": 4093,
"end": 4934
} | class ____ extends OverridingClassLoader {
SimpleThrowawayClassLoader(ClassLoader parent) {
super(parent);
}
}
ClassPathResource resource1 = new ClassPathResource("Resource.class", getClass());
ClassPathResource resource2 = new ClassPathResource("org/springframework/core/io/Resource.class",
new SimpleThrowawayClassLoader(getClass().getClassLoader()));
assertThat(resource1.getPath()).isEqualTo(resource2.getPath());
assertThat(resource1).isNotEqualTo(resource2);
assertThat(resource2).isNotEqualTo(resource1);
}
@Test
void relativeResourcesAreEqual() throws Exception {
Resource resource = new ClassPathResource("dir/");
Resource relative = resource.createRelative("subdir");
assertThat(relative).isEqualTo(new ClassPathResource("dir/subdir"));
}
}
@Nested
| SimpleThrowawayClassLoader |
java | apache__kafka | share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareGroupOffset.java | {
"start": 6408,
"end": 10043
} | class ____ {
private int snapshotEpoch;
private int stateEpoch;
private int leaderEpoch;
private long startOffset;
private int deliveryCompleteCount;
private List<PersisterStateBatch> stateBatches;
private long createTimestamp = NO_TIMESTAMP;
private long writeTimestamp = NO_TIMESTAMP;
public Builder setSnapshotEpoch(int snapshotEpoch) {
this.snapshotEpoch = snapshotEpoch;
return this;
}
public Builder setStateEpoch(int stateEpoch) {
this.stateEpoch = stateEpoch;
return this;
}
public Builder setLeaderEpoch(int leaderEpoch) {
this.leaderEpoch = leaderEpoch;
return this;
}
public Builder setStartOffset(long startOffset) {
this.startOffset = startOffset;
return this;
}
public Builder setDeliveryCompleteCount(int deliveryCompleteCount) {
this.deliveryCompleteCount = deliveryCompleteCount;
return this;
}
public Builder setStateBatches(List<PersisterStateBatch> stateBatches) {
this.stateBatches = stateBatches == null ? List.of() : stateBatches.stream().toList();
return this;
}
public Builder setCreateTimestamp(long createTimestamp) {
this.createTimestamp = createTimestamp;
return this;
}
public Builder setWriteTimestamp(long writeTimestamp) {
this.writeTimestamp = writeTimestamp;
return this;
}
public ShareGroupOffset build() {
return new ShareGroupOffset(snapshotEpoch, stateEpoch, leaderEpoch, startOffset, deliveryCompleteCount, stateBatches, createTimestamp, writeTimestamp);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ShareGroupOffset that = (ShareGroupOffset) o;
return snapshotEpoch == that.snapshotEpoch &&
stateEpoch == that.stateEpoch &&
leaderEpoch == that.leaderEpoch &&
startOffset == that.startOffset &&
deliveryCompleteCount == that.deliveryCompleteCount &&
Objects.equals(stateBatches, that.stateBatches) &&
createTimestamp == that.createTimestamp &&
writeTimestamp == that.writeTimestamp;
}
@Override
public int hashCode() {
return Objects.hash(snapshotEpoch, stateEpoch, leaderEpoch, startOffset, deliveryCompleteCount, stateBatches, createTimestamp, writeTimestamp);
}
@Override
public String toString() {
return "ShareGroupOffset{" +
"snapshotEpoch=" + snapshotEpoch +
", stateEpoch=" + stateEpoch +
", leaderEpoch=" + leaderEpoch +
", startOffset=" + startOffset +
", deliveryCompleteCount=" + deliveryCompleteCount +
", createTimestamp=" + createTimestamp +
", writeTimestamp=" + writeTimestamp +
", stateBatches=" + stateBatches +
'}';
}
public Builder builderSupplier() {
return new Builder()
.setSnapshotEpoch(snapshotEpoch())
.setStateEpoch(stateEpoch())
.setLeaderEpoch(leaderEpoch())
.setStartOffset(startOffset())
.setDeliveryCompleteCount(deliveryCompleteCount())
.setStateBatches(stateBatches())
.setCreateTimestamp(createTimestamp())
.setWriteTimestamp(writeTimestamp());
}
}
| Builder |
java | elastic__elasticsearch | modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/AbstractRepositoryS3RestTestCase.java | {
"start": 1314,
"end": 19802
} | class ____ extends ESRestTestCase {
public record TestRepository(
String repositoryName,
String clientName,
String bucketName,
String basePath,
Settings extraRepositorySettings
) {
public Closeable register(UnaryOperator<Settings> settingsUnaryOperator) throws IOException {
assertOK(client().performRequest(getRegisterRequest(settingsUnaryOperator)));
return () -> assertOK(client().performRequest(new Request("DELETE", "/_snapshot/" + repositoryName())));
}
private Request getRegisterRequest(UnaryOperator<Settings> settingsUnaryOperator) throws IOException {
return newXContentRequest(
HttpMethod.PUT,
"/_snapshot/" + repositoryName(),
(b, p) -> b.field("type", S3Repository.TYPE)
.startObject("settings")
.value(
settingsUnaryOperator.apply(
Settings.builder()
.put("bucket", bucketName())
.put("base_path", basePath())
.put("client", clientName())
.put("canned_acl", "private")
.put("storage_class", "standard")
.put("disable_chunked_encoding", randomBoolean())
.put(
randomFrom(
Settings.EMPTY,
Settings.builder().put("add_purpose_custom_query_parameter", randomBoolean()).build()
)
)
.put(extraRepositorySettings)
.build()
)
)
.endObject()
);
}
}
protected Settings extraRepositorySettings() {
return Settings.EMPTY;
}
protected abstract String getBucketName();
protected abstract String getBasePath();
protected abstract String getClientName();
protected static String getIdentifierPrefix(String testSuiteName) {
return testSuiteName + "-" + Integer.toString(Murmur3HashFunction.hash(testSuiteName + System.getProperty("tests.seed")), 16) + "-";
}
private TestRepository newTestRepository() {
return new TestRepository(randomIdentifier(), getClientName(), getBucketName(), getBasePath(), extraRepositorySettings());
}
private static UnaryOperator<Settings> readonlyOperator(Boolean readonly) {
return readonly == null
? UnaryOperator.identity()
: s -> Settings.builder().put(s).put(BlobStoreRepository.READONLY_SETTING_KEY, readonly).build();
}
public void testGetRepository() throws IOException {
testGetRepository(null);
}
public void testGetRepositoryReadonlyTrue() throws IOException {
testGetRepository(Boolean.TRUE);
}
public void testGetRepositoryReadonlyFalse() throws IOException {
testGetRepository(Boolean.FALSE);
}
private void testGetRepository(Boolean readonly) throws IOException {
final var repository = newTestRepository();
try (var ignored = repository.register(readonlyOperator(readonly))) {
final var repositoryName = repository.repositoryName();
final var responseObjectPath = assertOKAndCreateObjectPath(
client().performRequest(new Request("GET", "/_snapshot/" + repositoryName))
);
assertEquals("s3", responseObjectPath.evaluate(repositoryName + ".type"));
assertNotNull(responseObjectPath.evaluate(repositoryName + ".settings"));
assertEquals(repository.bucketName(), responseObjectPath.evaluate(repositoryName + ".settings.bucket"));
assertEquals(repository.clientName(), responseObjectPath.evaluate(repositoryName + ".settings.client"));
assertEquals(repository.basePath(), responseObjectPath.evaluate(repositoryName + ".settings.base_path"));
assertEquals("private", responseObjectPath.evaluate(repositoryName + ".settings.canned_acl"));
assertEquals("standard", responseObjectPath.evaluate(repositoryName + ".settings.storage_class"));
assertNull(responseObjectPath.evaluate(repositoryName + ".settings.access_key"));
assertNull(responseObjectPath.evaluate(repositoryName + ".settings.secret_key"));
assertNull(responseObjectPath.evaluate(repositoryName + ".settings.session_token"));
if (readonly == null) {
assertNull(responseObjectPath.evaluate(repositoryName + ".settings." + BlobStoreRepository.READONLY_SETTING_KEY));
} else {
assertEquals(
Boolean.toString(readonly),
responseObjectPath.evaluate(repositoryName + ".settings." + BlobStoreRepository.READONLY_SETTING_KEY)
);
}
}
}
public void testNonexistentBucket() throws Exception {
testNonexistentBucket(null);
}
public void testNonexistentBucketReadonlyTrue() throws Exception {
testNonexistentBucket(Boolean.TRUE);
}
public void testNonexistentBucketReadonlyFalse() throws Exception {
testNonexistentBucket(Boolean.FALSE);
}
private void testNonexistentBucket(Boolean readonly) throws Exception {
final var repository = new TestRepository(
randomIdentifier(),
getClientName(),
randomValueOtherThan(getBucketName(), ESTestCase::randomIdentifier),
getBasePath(),
extraRepositorySettings()
);
final var registerRequest = repository.getRegisterRequest(readonlyOperator(readonly));
final var responseException = expectThrows(ResponseException.class, () -> client().performRequest(registerRequest));
assertEquals(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), responseException.getResponse().getStatusLine().getStatusCode());
final var responseObjectPath = ObjectPath.createFromResponse(responseException.getResponse());
assertThat(responseObjectPath.evaluate("error.type"), equalTo("repository_verification_exception"));
assertThat(responseObjectPath.evaluate("error.reason"), containsString("is not accessible on master node"));
}
public void testNonexistentClient() throws Exception {
testNonexistentClient(null);
}
public void testNonexistentClientReadonlyTrue() throws Exception {
testNonexistentClient(Boolean.TRUE);
}
public void testNonexistentClientReadonlyFalse() throws Exception {
testNonexistentClient(Boolean.FALSE);
}
private void testNonexistentClient(Boolean readonly) throws Exception {
final var repository = new TestRepository(
randomIdentifier(),
randomValueOtherThanMany(c -> c.equals(getClientName()) || c.equals("default"), ESTestCase::randomIdentifier),
getBucketName(),
getBasePath(),
extraRepositorySettings()
);
final var registerRequest = repository.getRegisterRequest(readonlyOperator(readonly));
final var responseException = expectThrows(ResponseException.class, () -> client().performRequest(registerRequest));
assertEquals(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), responseException.getResponse().getStatusLine().getStatusCode());
final var responseObjectPath = ObjectPath.createFromResponse(responseException.getResponse());
assertThat(responseObjectPath.evaluate("error.type"), equalTo("repository_verification_exception"));
assertThat(responseObjectPath.evaluate("error.reason"), containsString("is not accessible on master node"));
assertThat(responseObjectPath.evaluate("error.caused_by.type"), equalTo("repository_exception"));
assertThat(responseObjectPath.evaluate("error.caused_by.reason"), containsString("cannot create blob store"));
assertThat(responseObjectPath.evaluate("error.caused_by.caused_by.type"), equalTo("illegal_argument_exception"));
assertThat(responseObjectPath.evaluate("error.caused_by.caused_by.reason"), containsString("Unknown s3 client name"));
}
public void testNonexistentSnapshot() throws Exception {
testNonexistentSnapshot(null);
}
public void testNonexistentSnapshotReadonlyTrue() throws Exception {
testNonexistentSnapshot(Boolean.TRUE);
}
public void testNonexistentSnapshotReadonlyFalse() throws Exception {
testNonexistentSnapshot(Boolean.FALSE);
}
private void testNonexistentSnapshot(Boolean readonly) throws Exception {
final var repository = newTestRepository();
try (var ignored = repository.register(readonlyOperator(readonly))) {
final var repositoryName = repository.repositoryName();
final var getSnapshotRequest = new Request("GET", "/_snapshot/" + repositoryName + "/" + randomIdentifier());
final var getSnapshotException = expectThrows(ResponseException.class, () -> client().performRequest(getSnapshotRequest));
assertEquals(RestStatus.NOT_FOUND.getStatus(), getSnapshotException.getResponse().getStatusLine().getStatusCode());
final var getResponseObjectPath = ObjectPath.createFromResponse(getSnapshotException.getResponse());
assertThat(getResponseObjectPath.evaluate("error.type"), equalTo("snapshot_missing_exception"));
final var restoreRequest = new Request("POST", "/_snapshot/" + repositoryName + "/" + randomIdentifier() + "/_restore");
if (randomBoolean()) {
restoreRequest.addParameter("wait_for_completion", Boolean.toString(randomBoolean()));
}
final var restoreException = expectThrows(ResponseException.class, () -> client().performRequest(restoreRequest));
assertEquals(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), restoreException.getResponse().getStatusLine().getStatusCode());
final var restoreResponseObjectPath = ObjectPath.createFromResponse(restoreException.getResponse());
assertThat(restoreResponseObjectPath.evaluate("error.type"), equalTo("snapshot_restore_exception"));
if (readonly != Boolean.TRUE) {
final var deleteRequest = new Request("DELETE", "/_snapshot/" + repositoryName + "/" + randomIdentifier());
final var deleteException = expectThrows(ResponseException.class, () -> client().performRequest(deleteRequest));
assertEquals(RestStatus.NOT_FOUND.getStatus(), deleteException.getResponse().getStatusLine().getStatusCode());
final var deleteResponseObjectPath = ObjectPath.createFromResponse(deleteException.getResponse());
assertThat(deleteResponseObjectPath.evaluate("error.type"), equalTo("snapshot_missing_exception"));
}
}
}
public void testUsageStats() throws Exception {
testUsageStats(null);
}
public void testUsageStatsReadonlyTrue() throws Exception {
testUsageStats(Boolean.TRUE);
}
public void testUsageStatsReadonlyFalse() throws Exception {
testUsageStats(Boolean.FALSE);
}
private void testUsageStats(Boolean readonly) throws Exception {
final var repository = newTestRepository();
try (var ignored = repository.register(readonlyOperator(readonly))) {
final var responseObjectPath = assertOKAndCreateObjectPath(client().performRequest(new Request("GET", "/_cluster/stats")));
assertThat(responseObjectPath.evaluate("repositories.s3.count"), equalTo(1));
if (readonly == Boolean.TRUE) {
assertThat(responseObjectPath.evaluate("repositories.s3.read_only"), equalTo(1));
assertNull(responseObjectPath.evaluate("repositories.s3.read_write"));
} else {
assertNull(responseObjectPath.evaluate("repositories.s3.read_only"));
assertThat(responseObjectPath.evaluate("repositories.s3.read_write"), equalTo(1));
}
}
}
public void testSnapshotAndRestore() throws Exception {
final var repository = newTestRepository();
try (var ignored = repository.register(UnaryOperator.identity())) {
final var repositoryName = repository.repositoryName();
final var indexName = randomIdentifier();
final var snapshotsToDelete = new ArrayList<String>(2);
try {
indexDocuments(indexName, """
{"index":{"_id":"1"}}
{"snapshot":"one"}
{"index":{"_id":"2"}}
{"snapshot":"one"}
{"index":{"_id":"3"}}
{"snapshot":"one"}
""", 3);
// create the first snapshot
final var snapshot1Name = randomIdentifier();
createSnapshot(repositoryName, snapshotsToDelete, snapshot1Name);
// check the first snapshot's status
{
final var snapshotStatusResponse = assertOKAndCreateObjectPath(
client().performRequest(new Request("GET", "/_snapshot/" + repositoryName + "/" + snapshot1Name + "/_status"))
);
assertEquals(snapshot1Name, snapshotStatusResponse.evaluate("snapshots.0.snapshot"));
assertEquals("SUCCESS", snapshotStatusResponse.evaluate("snapshots.0.state"));
}
// add more documents to the index
indexDocuments(indexName, """
{"index":{"_id":"4"}}
{"snapshot":"one"}
{"index":{"_id":"5"}}
{"snapshot":"one"}
{"index":{"_id":"6"}}
{"snapshot":"one"}
{"index":{"_id":"7"}}
{"snapshot":"one"}
""", 7);
// create the second snapshot
final var snapshot2Name = randomValueOtherThan(snapshot1Name, ESTestCase::randomIdentifier);
createSnapshot(repositoryName, snapshotsToDelete, snapshot2Name);
// list the snapshots
{
final var listSnapshotsResponse = assertOKAndCreateObjectPath(
client().performRequest(
new Request("GET", "/_snapshot/" + repositoryName + "/" + snapshot1Name + "," + snapshot2Name)
)
);
assertEquals(2, listSnapshotsResponse.evaluateArraySize("snapshots"));
assertEquals(
Set.of(snapshot1Name, snapshot2Name),
Set.of(
listSnapshotsResponse.evaluate("snapshots.0.snapshot"),
listSnapshotsResponse.evaluate("snapshots.1.snapshot")
)
);
assertEquals("SUCCESS", listSnapshotsResponse.evaluate("snapshots.0.state"));
assertEquals("SUCCESS", listSnapshotsResponse.evaluate("snapshots.1.state"));
}
// delete and restore the index from snapshot 2
deleteAndRestoreIndex(indexName, repositoryName, snapshot2Name, 7);
// delete and restore the index from snapshot 1
deleteAndRestoreIndex(indexName, repositoryName, snapshot1Name, 3);
} finally {
if (snapshotsToDelete.isEmpty() == false) {
assertOK(
client().performRequest(
new Request(
"DELETE",
"/_snapshot/" + repositoryName + "/" + snapshotsToDelete.stream().collect(Collectors.joining(","))
)
)
);
}
}
}
}
private static void deleteAndRestoreIndex(String indexName, String repositoryName, String snapshot2Name, int expectedDocCount)
throws IOException {
assertOK(client().performRequest(new Request("DELETE", "/" + indexName)));
final var restoreRequest = new Request("POST", "/_snapshot/" + repositoryName + "/" + snapshot2Name + "/_restore");
restoreRequest.addParameter("wait_for_completion", "true");
assertOK(client().performRequest(restoreRequest));
assertIndexDocCount(indexName, expectedDocCount);
}
private static void indexDocuments(String indexName, String body, int expectedDocCount) throws IOException {
// create and populate an index
final var indexDocsRequest = new Request("POST", "/" + indexName + "/_bulk");
indexDocsRequest.addParameter("refresh", "true");
indexDocsRequest.setJsonEntity(body);
assertFalse(assertOKAndCreateObjectPath(client().performRequest(indexDocsRequest)).evaluate("errors"));
// check the index contents
assertIndexDocCount(indexName, expectedDocCount);
}
private static void createSnapshot(String repositoryName, ArrayList<String> snapshotsToDelete, String snapshotName) throws IOException {
final var createSnapshotRequest = new Request("POST", "/_snapshot/" + repositoryName + "/" + snapshotName);
createSnapshotRequest.addParameter("wait_for_completion", "true");
final var createSnapshotResponse = assertOKAndCreateObjectPath(client().performRequest(createSnapshotRequest));
snapshotsToDelete.add(snapshotName);
assertEquals(snapshotName, createSnapshotResponse.evaluate("snapshot.snapshot"));
assertEquals("SUCCESS", createSnapshotResponse.evaluate("snapshot.state"));
assertThat(createSnapshotResponse.evaluate("snapshot.shards.failed"), equalTo(0));
}
private static void assertIndexDocCount(String indexName, int expectedCount) throws IOException {
assertThat(
assertOKAndCreateObjectPath(client().performRequest(new Request("GET", "/" + indexName + "/_count"))).evaluate("count"),
equalTo(expectedCount)
);
}
}
| AbstractRepositoryS3RestTestCase |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/pl/Oracle_pl_for_0.java | {
"start": 922,
"end": 4277
} | class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = "PROCEDURE display_multiple_years (\n" +
" start_year_in IN PLS_INTEGER\n" +
" ,end_year_in IN PLS_INTEGER\n" +
")\n" +
"IS\n" +
"BEGIN\n" +
" FOR l_current_year \n" +
" IN start_year_in .. end_year_in\n" +
" LOOP\n" +
" display_total_sales \n" +
" (l_current_year);\n" +
" END LOOP;\n" +
"END display_multiple_years;";
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, JdbcConstants.ORACLE);
SQLStatement stmt = statementList.get(0);
assertEquals(1, statementList.size());
SchemaStatVisitor visitor = SQLUtils.createSchemaStatVisitor(JdbcConstants.ORACLE);
for (SQLStatement statement : statementList) {
statement.accept(visitor);
}
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("relationships : " + visitor.getRelationships());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(0, visitor.getTables().size());
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("employees")));
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("emp_name")));
// assertEquals(7, visitor.getColumns().size());
// assertEquals(3, visitor.getConditions().size());
// assertEquals(1, visitor.getRelationships().size());
// assertTrue(visitor.getColumns().contains(new TableStat.Column("employees", "salary")));
{
String output = SQLUtils.toOracleString(stmt);
assertEquals("PROCEDURE display_multiple_years (\n" +
"\tstart_year_in IN PLS_INTEGER, \n" +
"\tend_year_in IN PLS_INTEGER\n" +
")\n" +
"IS\n" +
"BEGIN\n" +
"\tFOR l_current_year IN start_year_in..end_year_in\n" +
"\tLOOP\n" +
"\t\tdisplay_total_sales(l_current_year);\n" +
"\tEND LOOP;\n" +
"END;", //
output);
}
{
String output = SQLUtils.toOracleString(stmt, SQLUtils.DEFAULT_LCASE_FORMAT_OPTION);
assertEquals("procedure display_multiple_years (\n" +
"\tstart_year_in in PLS_INTEGER, \n" +
"\tend_year_in in PLS_INTEGER\n" +
")\n" +
"IS\n" +
"begin\n" +
"\tfor l_current_year in start_year_in..end_year_in\n" +
"\tloop\n" +
"\t\tdisplay_total_sales(l_current_year);\n" +
"\tend loop;\n" +
"end;", //
output);
}
}
}
| Oracle_pl_for_0 |
java | apache__camel | components/camel-google/camel-google-mail/src/generated/java/org/apache/camel/component/google/mail/GmailUsersDraftsEndpointConfigurationConfigurer.java | {
"start": 749,
"end": 9016
} | class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, ExtendedPropertyConfigurerGetter {
private static final Map<String, Object> ALL_OPTIONS;
static {
Map<String, Object> map = new CaseInsensitiveMap();
map.put("AccessToken", java.lang.String.class);
map.put("ApiName", org.apache.camel.component.google.mail.internal.GoogleMailApiName.class);
map.put("ApplicationName", java.lang.String.class);
map.put("ClientId", java.lang.String.class);
map.put("ClientSecret", java.lang.String.class);
map.put("Content", com.google.api.services.gmail.model.Draft.class);
map.put("Delegate", java.lang.String.class);
map.put("Format", java.lang.String.class);
map.put("Id", java.lang.String.class);
map.put("IncludeSpamTrash", java.lang.Boolean.class);
map.put("MaxResults", java.lang.Long.class);
map.put("MediaContent", com.google.api.client.http.AbstractInputStreamContent.class);
map.put("MethodName", java.lang.String.class);
map.put("PageToken", java.lang.String.class);
map.put("Q", java.lang.String.class);
map.put("RefreshToken", java.lang.String.class);
map.put("Scopes", java.lang.String.class);
map.put("ServiceAccountKey", java.lang.String.class);
map.put("UserId", java.lang.String.class);
ALL_OPTIONS = map;
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
org.apache.camel.component.google.mail.GmailUsersDraftsEndpointConfiguration target = (org.apache.camel.component.google.mail.GmailUsersDraftsEndpointConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesstoken":
case "accessToken": target.setAccessToken(property(camelContext, java.lang.String.class, value)); return true;
case "apiname":
case "apiName": target.setApiName(property(camelContext, org.apache.camel.component.google.mail.internal.GoogleMailApiName.class, value)); return true;
case "applicationname":
case "applicationName": target.setApplicationName(property(camelContext, java.lang.String.class, value)); return true;
case "clientid":
case "clientId": target.setClientId(property(camelContext, java.lang.String.class, value)); return true;
case "clientsecret":
case "clientSecret": target.setClientSecret(property(camelContext, java.lang.String.class, value)); return true;
case "content": target.setContent(property(camelContext, com.google.api.services.gmail.model.Draft.class, value)); return true;
case "delegate": target.setDelegate(property(camelContext, java.lang.String.class, value)); return true;
case "format": target.setFormat(property(camelContext, java.lang.String.class, value)); return true;
case "id": target.setId(property(camelContext, java.lang.String.class, value)); return true;
case "includespamtrash":
case "includeSpamTrash": target.setIncludeSpamTrash(property(camelContext, java.lang.Boolean.class, value)); return true;
case "maxresults":
case "maxResults": target.setMaxResults(property(camelContext, java.lang.Long.class, value)); return true;
case "mediacontent":
case "mediaContent": target.setMediaContent(property(camelContext, com.google.api.client.http.AbstractInputStreamContent.class, value)); return true;
case "methodname":
case "methodName": target.setMethodName(property(camelContext, java.lang.String.class, value)); return true;
case "pagetoken":
case "pageToken": target.setPageToken(property(camelContext, java.lang.String.class, value)); return true;
case "q": target.setQ(property(camelContext, java.lang.String.class, value)); return true;
case "refreshtoken":
case "refreshToken": target.setRefreshToken(property(camelContext, java.lang.String.class, value)); return true;
case "scopes": target.setScopes(property(camelContext, java.lang.String.class, value)); return true;
case "serviceaccountkey":
case "serviceAccountKey": target.setServiceAccountKey(property(camelContext, java.lang.String.class, value)); return true;
case "userid":
case "userId": target.setUserId(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public Map<String, Object> getAllOptions(Object target) {
return ALL_OPTIONS;
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesstoken":
case "accessToken": return java.lang.String.class;
case "apiname":
case "apiName": return org.apache.camel.component.google.mail.internal.GoogleMailApiName.class;
case "applicationname":
case "applicationName": return java.lang.String.class;
case "clientid":
case "clientId": return java.lang.String.class;
case "clientsecret":
case "clientSecret": return java.lang.String.class;
case "content": return com.google.api.services.gmail.model.Draft.class;
case "delegate": return java.lang.String.class;
case "format": return java.lang.String.class;
case "id": return java.lang.String.class;
case "includespamtrash":
case "includeSpamTrash": return java.lang.Boolean.class;
case "maxresults":
case "maxResults": return java.lang.Long.class;
case "mediacontent":
case "mediaContent": return com.google.api.client.http.AbstractInputStreamContent.class;
case "methodname":
case "methodName": return java.lang.String.class;
case "pagetoken":
case "pageToken": return java.lang.String.class;
case "q": return java.lang.String.class;
case "refreshtoken":
case "refreshToken": return java.lang.String.class;
case "scopes": return java.lang.String.class;
case "serviceaccountkey":
case "serviceAccountKey": return java.lang.String.class;
case "userid":
case "userId": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
org.apache.camel.component.google.mail.GmailUsersDraftsEndpointConfiguration target = (org.apache.camel.component.google.mail.GmailUsersDraftsEndpointConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesstoken":
case "accessToken": return target.getAccessToken();
case "apiname":
case "apiName": return target.getApiName();
case "applicationname":
case "applicationName": return target.getApplicationName();
case "clientid":
case "clientId": return target.getClientId();
case "clientsecret":
case "clientSecret": return target.getClientSecret();
case "content": return target.getContent();
case "delegate": return target.getDelegate();
case "format": return target.getFormat();
case "id": return target.getId();
case "includespamtrash":
case "includeSpamTrash": return target.getIncludeSpamTrash();
case "maxresults":
case "maxResults": return target.getMaxResults();
case "mediacontent":
case "mediaContent": return target.getMediaContent();
case "methodname":
case "methodName": return target.getMethodName();
case "pagetoken":
case "pageToken": return target.getPageToken();
case "q": return target.getQ();
case "refreshtoken":
case "refreshToken": return target.getRefreshToken();
case "scopes": return target.getScopes();
case "serviceaccountkey":
case "serviceAccountKey": return target.getServiceAccountKey();
case "userid":
case "userId": return target.getUserId();
default: return null;
}
}
}
| GmailUsersDraftsEndpointConfigurationConfigurer |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/engine/TranslogOperationAsserterTests.java | {
"start": 1151,
"end": 5824
} | class ____ extends EngineTestCase {
@Override
protected Settings indexSettings() {
return Settings.builder()
.put(super.indexSettings())
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
.put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.name())
.put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true)
.build();
}
Translog.Index toIndexOp(String source) throws IOException {
XContentParser parser = createParser(XContentType.JSON.xContent(), source);
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.copyCurrentStructure(parser);
return new Translog.Index(
"1",
0,
1,
1,
new BytesArray(Strings.toString(builder)),
null,
IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP
);
}
EngineConfig engineConfig(boolean useSyntheticSource) {
EngineConfig config = engine.config();
Settings.Builder settings = Settings.builder().put(config.getIndexSettings().getSettings());
if (useSyntheticSource) {
settings.put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.name());
settings.put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true);
} else {
settings.put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED.name());
settings.put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), false);
}
IndexMetadata imd = IndexMetadata.builder(config.getIndexSettings().getIndexMetadata()).settings(settings).build();
return config(
new IndexSettings(imd, Settings.EMPTY),
config.getStore(),
config.getTranslogConfig().getTranslogPath(),
config.getMergePolicy(),
null
);
}
public void testBasic() throws Exception {
TranslogOperationAsserter syntheticAsserter = TranslogOperationAsserter.withEngineConfig(engineConfig(true));
TranslogOperationAsserter regularAsserter = TranslogOperationAsserter.withEngineConfig(engineConfig(false));
{
var o1 = toIndexOp("""
{
"value": "value-1"
}
""");
var o2 = toIndexOp("""
{
"value": [ "value-1" ]
}
""");
assertTrue(syntheticAsserter.assertSameIndexOperation(o1, o2));
assertFalse(regularAsserter.assertSameIndexOperation(o1, o2));
}
{
var o1 = toIndexOp("""
{
"value": [ "value-1", "value-2" ]
}
""");
var o2 = toIndexOp("""
{
"value": [ "value-1", "value-2" ]
}
""");
assertTrue(syntheticAsserter.assertSameIndexOperation(o1, o2));
assertTrue(regularAsserter.assertSameIndexOperation(o1, o2));
}
{
var o1 = toIndexOp("""
{
"value": [ "value-2", "value-1" ]
}
""");
var o2 = toIndexOp("""
{
"value": [ "value-1", "value-2" ]
}
""");
assertTrue(syntheticAsserter.assertSameIndexOperation(o1, o2));
assertFalse(regularAsserter.assertSameIndexOperation(o1, o2));
}
{
var o1 = toIndexOp("""
{
"value": [ "value-1", "value-2" ]
}
""");
var o2 = toIndexOp("""
{
"value": [ "value-1", "value-2", "value-2" ]
}
""");
assertTrue(syntheticAsserter.assertSameIndexOperation(o1, o2));
assertFalse(regularAsserter.assertSameIndexOperation(o1, o2));
}
{
var o1 = toIndexOp("""
{
"value": [ "value-1", "value-2" ]
}
""");
var o2 = toIndexOp("""
{
"value": [ "value-1", "value-2", "value-3" ]
}
""");
assertFalse(syntheticAsserter.assertSameIndexOperation(o1, o2));
assertFalse(regularAsserter.assertSameIndexOperation(o1, o2));
}
}
}
| TranslogOperationAsserterTests |
java | junit-team__junit5 | junit-jupiter-engine/src/main/java/org/junit/jupiter/engine/discovery/DefaultMethodOrdererContext.java | {
"start": 711,
"end": 1671
} | class ____ implements MethodOrdererContext {
private final Class<?> testClass;
private final List<? extends MethodDescriptor> methodDescriptors;
private final JupiterConfiguration configuration;
DefaultMethodOrdererContext(Class<?> testClass, List<? extends MethodDescriptor> methodDescriptors,
JupiterConfiguration configuration) {
this.testClass = testClass;
this.methodDescriptors = methodDescriptors;
this.configuration = configuration;
}
@Override
public final Class<?> getTestClass() {
return this.testClass;
}
@Override
public List<? extends MethodDescriptor> getMethodDescriptors() {
return this.methodDescriptors;
}
@Override
public Optional<String> getConfigurationParameter(String key) {
return this.configuration.getRawConfigurationParameter(key);
}
@Override
public String toString() {
return new ToStringBuilder(this).append("methodDescriptors", methodDescriptors).toString();
}
}
| DefaultMethodOrdererContext |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/RemoteIterators.java | {
"start": 13206,
"end": 13392
} | class ____ be subclassed within the hadoop codebase
* if custom iterators are needed.
* @param <S> source type
* @param <T> type of returned value
*/
public static abstract | may |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng5389LifecycleParticipantAfterSessionEnd.java | {
"start": 903,
"end": 1781
} | class ____ extends AbstractMavenIntegrationTestCase {
@Test
public void testit() throws Exception {
File testDir = extractResources("/mng-5389-lifecycleParticipant-afterSession");
File extensionDir = new File(testDir, "extension");
File projectDir = new File(testDir, "basic");
Verifier verifier;
// install the test plugin
verifier = newVerifier(extensionDir.getAbsolutePath());
verifier.addCliArgument("install");
verifier.execute();
verifier.verifyErrorFreeLog();
// build the test project
verifier = newVerifier(projectDir.getAbsolutePath());
verifier.addCliArgument("package");
verifier.execute();
verifier.verifyErrorFreeLog();
verifier.verifyFilePresent("target/afterSessionEnd.txt");
}
}
| MavenITmng5389LifecycleParticipantAfterSessionEnd |
java | square__retrofit | retrofit/java-test/src/test/java/retrofit2/CallAdapterTest.java | {
"start": 3697,
"end": 3758
} | class ____<T> {
T method() {
return null;
}
}
}
| A |
java | quarkusio__quarkus | integration-tests/elytron-security-jdbc/src/main/java/io/quarkus/elytron/security/jdbc/it/WorkdayPermission.java | {
"start": 828,
"end": 2438
} | class ____ have a formal parameter {@link String} named 'day'.
*
* @param name name of the Permission object being created.
* @param actions Permission actions
* @param day workday
*/
public WorkdayPermission(String name, String[] actions, String day) {
super(name);
this.actions = actions;
this.day = day;
}
@Override
public boolean implies(Permission permission) {
if (permission instanceof WorkdayPermission) {
WorkdayPermission that = (WorkdayPermission) permission;
// verify Permission name and actions has been passed to the constructor
if (that.getName().equals("worker") && that.getActions().contains("adult")) {
// verify we can obtain bean instance
final WorkdayEvaluator workdayEvaluator = Arc.container().instance(WorkdayEvaluator.class).get();
return workdayEvaluator.isWorkday(that.day);
}
}
return false;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
WorkdayPermission that = (WorkdayPermission) o;
return Arrays.equals(actions, that.actions) && Objects.equals(day, that.day);
}
@Override
public int hashCode() {
int result = Objects.hash(day);
result = 31 * result + Arrays.hashCode(actions);
return result;
}
@Override
public String getActions() {
return String.join(",", actions);
}
}
| must |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/rest/mapping/meta/ServiceMeta.java | {
"start": 1267,
"end": 4214
} | class ____ extends AnnotationSupport {
private final List<Class<?>> hierarchy;
private final Class<?> type;
private final Object service;
private final ServiceDescriptor serviceDescriptor;
private final URL url;
private final String contextPath;
private List<MethodMeta> exceptionHandlers;
public ServiceMeta(
Collection<Class<?>> hierarchy,
ServiceDescriptor serviceDescriptor,
Object service,
URL url,
RestToolKit toolKit) {
super(toolKit);
this.hierarchy = new ArrayList<>(hierarchy);
this.serviceDescriptor = serviceDescriptor;
type = this.hierarchy.get(0);
this.service = service;
this.url = url;
contextPath = PathUtils.getContextPath(url);
}
public List<Class<?>> getHierarchy() {
return hierarchy;
}
public Class<?> getType() {
return type;
}
public ServiceDescriptor getServiceDescriptor() {
return serviceDescriptor;
}
public Object getService() {
return service;
}
public URL getUrl() {
return url;
}
public String getServiceInterface() {
return url.getServiceInterface();
}
public String getServiceGroup() {
return url.getGroup();
}
public String getServiceVersion() {
return url.getVersion();
}
public String getContextPath() {
return contextPath;
}
public List<MethodMeta> getExceptionHandlers() {
return exceptionHandlers;
}
@Override
public List<? extends AnnotatedElement> getAnnotatedElements() {
return hierarchy;
}
@Override
protected AnnotatedElement getAnnotatedElement() {
return hierarchy.get(0);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder(64);
sb.append("ServiceMeta{interface=")
.append(getServiceInterface())
.append(", service=")
.append(toShortString());
if (StringUtils.isNotEmpty(contextPath)) {
sb.append(", contextPath='").append(contextPath).append('\'');
}
String group = getServiceGroup();
if (StringUtils.isNotEmpty(group)) {
sb.append(", group='").append(group).append('\'');
}
String version = getServiceVersion();
if (StringUtils.isNotEmpty(version)) {
sb.append(", version='").append(version).append('\'');
}
sb.append('}');
return sb.toString();
}
public String toShortString() {
return type.getSimpleName() + '@' + Integer.toHexString(System.identityHashCode(service));
}
public void addExceptionHandler(MethodMeta methodMeta) {
if (exceptionHandlers == null) {
exceptionHandlers = new ArrayList<>();
}
exceptionHandlers.add(methodMeta);
}
}
| ServiceMeta |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/MountTableStore.java | {
"start": 2273,
"end": 3386
} | class ____ extends CachedRecordStore<MountTable>
implements MountTableManager {
private static final Logger LOG =
LoggerFactory.getLogger(MountTableStore.class);
private MountTableRefresherService refreshService;
/** Router quota manager to update quota usage in mount table. */
private RouterQuotaManager quotaManager;
public MountTableStore(StateStoreDriver driver) {
super(MountTable.class, driver);
}
public void setRefreshService(MountTableRefresherService refreshService) {
this.refreshService = refreshService;
}
public void setQuotaManager(RouterQuotaManager quotaManager) {
this.quotaManager = quotaManager;
}
public RouterQuotaManager getQuotaManager() {
return quotaManager;
}
/**
* Update mount table cache of this router as well as all other routers.
*/
protected void updateCacheAllRouters() {
if (refreshService != null) {
try {
refreshService.refresh();
} catch (StateStoreUnavailableException e) {
LOG.error("Cannot refresh mount table: state store not available", e);
}
}
}
} | MountTableStore |
java | spring-projects__spring-framework | spring-messaging/src/test/java/org/springframework/messaging/handler/invocation/reactive/InvocableHandlerMethodTests.java | {
"start": 7004,
"end": 7624
} | class ____ {
private AtomicReference<String> result = new AtomicReference<>();
public Handler() {
}
public String getResult() {
return this.result.get();
}
String handle(Integer intArg, String stringArg) {
return intArg + "-" + stringArg;
}
void handle(double amount) {
this.result.set(String.valueOf(amount));
}
void handleWithException(Throwable ex) throws Throwable {
throw ex;
}
Mono<Void> handleAsync() {
return Mono.delay(Duration.ofMillis(100)).thenEmpty(Mono.defer(() -> {
this.result.set("success");
return Mono.empty();
}));
}
}
private static | Handler |
java | spring-projects__spring-security | config/src/integration-test/java/org/springframework/security/config/annotation/rsocket/HelloRSocketWithWebFluxITests.java | {
"start": 2505,
"end": 4045
} | class ____ {
@Autowired
RSocketMessageHandler handler;
@Autowired
SecuritySocketAcceptorInterceptor interceptor;
@Autowired
ServerController controller;
private CloseableChannel server;
private RSocketRequester requester;
@BeforeEach
public void setup() {
// @formatter:off
this.server = RSocketServer.create()
.payloadDecoder(PayloadDecoder.ZERO_COPY)
.interceptors((registry) -> registry.forSocketAcceptor(this.interceptor)
)
.acceptor(this.handler.responder())
.bind(TcpServerTransport.create("localhost", 0))
.block();
// @formatter:on
}
@AfterEach
public void dispose() {
this.requester.rsocket().dispose();
this.server.dispose();
this.controller.payloads.clear();
}
// gh-16161
@Test
public void retrieveMonoWhenSecureThenDenied() {
// @formatter:off
this.requester = RSocketRequester.builder()
.rsocketStrategies(this.handler.getRSocketStrategies())
.connectTcp("localhost", this.server.address().getPort())
.block();
// @formatter:on
String data = "rob";
// @formatter:off
assertThatExceptionOfType(Exception.class).isThrownBy(
() -> this.requester.route("secure.retrieve-mono")
.data(data)
.retrieveMono(String.class)
.block()
)
.matches((ex) -> ex instanceof RejectedSetupException
|| ex.getClass().toString().contains("ReactiveException"));
// @formatter:on
assertThat(this.controller.payloads).isEmpty();
}
@Configuration
@EnableRSocketSecurity
@EnableWebFluxSecurity
static | HelloRSocketWithWebFluxITests |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/collectionincompatibletype/CollectionIncompatibleTypeTest.java | {
"start": 16575,
"end": 17684
} | class ____<A, B> {
public A first;
public B second;
}
public boolean declaredTypeVsExpressionType(Pair<Integer, String> pair, List<Integer> list) {
return list.contains(pair.first);
}
public boolean containsParameterizedType(
Collection<Class<? extends String>> collection, Class<?> clazz) {
return collection.contains(clazz);
}
public boolean containsWildcard(Collection<String> collection, Optional<?> optional) {
return collection.contains(optional.get());
}
public <T extends String> T subclassHasDifferentTypeParameters(
ClassToInstanceMap<String> map, Class<T> klass) {
return klass.cast(map.get(klass));
}
// Ensure we don't match Hashtable.contains and ConcurrentHashtable.contains because there is a
// separate check, HashtableContains, specifically for them.
public boolean hashtableContains() {
Hashtable<Integer, String> hashtable = new Hashtable<>();
ConcurrentHashMap<Integer, String> concurrentHashMap = new ConcurrentHashMap<>();
return hashtable.contains(1) || concurrentHashMap.contains(1);
}
private static | Pair |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java | {
"start": 6273,
"end": 90184
} | class ____ extends InferenceServiceTestCase {
private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS);
private final MockWebServer webServer = new MockWebServer();
private ThreadPool threadPool;
private HttpClientManager clientManager;
@Before
public void init() throws Exception {
webServer.start();
threadPool = createThreadPool(inferenceUtilityExecutors());
clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mock(ThrottlerManager.class));
}
@After
public void shutdown() throws IOException {
clientManager.close();
terminate(threadPool);
webServer.close();
}
public void testParseRequestConfig_CreatesAnAzureAiStudioEmbeddingsModel() throws IOException {
try (var service = createService()) {
ActionListener<Model> modelVerificationListener = ActionListener.wrap(model -> {
assertThat(model, instanceOf(AzureAiStudioEmbeddingsModel.class));
var embeddingsModel = (AzureAiStudioEmbeddingsModel) model;
assertThat(embeddingsModel.getServiceSettings().target(), is("http://target.local"));
assertThat(embeddingsModel.getServiceSettings().provider(), is(AzureAiStudioProvider.OPENAI));
assertThat(embeddingsModel.getServiceSettings().endpointType(), is(AzureAiStudioEndpointType.TOKEN));
assertThat(embeddingsModel.getSecretSettings().apiKey().toString(), is("secret"));
assertThat(embeddingsModel.getTaskSettings().user(), is("user"));
}, exception -> fail("Unexpected exception: " + exception));
service.parseRequestConfig(
"id",
TaskType.TEXT_EMBEDDING,
getRequestConfigMap(
getEmbeddingsServiceSettingsMap("http://target.local", "openai", "token", null, null, null, null),
getEmbeddingsTaskSettingsMap("user"),
getSecretSettingsMap("secret")
),
modelVerificationListener
);
}
}
public void testParseRequestConfig_CreatesAnAzureAiStudioEmbeddingsModelWhenChunkingSettingsProvided() throws IOException {
try (var service = createService()) {
ActionListener<Model> modelVerificationListener = ActionListener.wrap(model -> {
assertThat(model, instanceOf(AzureAiStudioEmbeddingsModel.class));
var embeddingsModel = (AzureAiStudioEmbeddingsModel) model;
assertThat(embeddingsModel.getServiceSettings().target(), is("http://target.local"));
assertThat(embeddingsModel.getServiceSettings().provider(), is(AzureAiStudioProvider.OPENAI));
assertThat(embeddingsModel.getServiceSettings().endpointType(), is(AzureAiStudioEndpointType.TOKEN));
assertThat(embeddingsModel.getSecretSettings().apiKey().toString(), is("secret"));
assertThat(embeddingsModel.getTaskSettings().user(), is("user"));
assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class));
}, exception -> fail("Unexpected exception: " + exception));
service.parseRequestConfig(
"id",
TaskType.TEXT_EMBEDDING,
getRequestConfigMap(
getEmbeddingsServiceSettingsMap("http://target.local", "openai", "token", null, null, null, null),
getEmbeddingsTaskSettingsMap("user"),
createRandomChunkingSettingsMap(),
getSecretSettingsMap("secret")
),
modelVerificationListener
);
}
}
public void testParseRequestConfig_CreatesAnAzureAiStudioEmbeddingsModelWhenChunkingSettingsNotProvided() throws IOException {
try (var service = createService()) {
ActionListener<Model> modelVerificationListener = ActionListener.wrap(model -> {
assertThat(model, instanceOf(AzureAiStudioEmbeddingsModel.class));
var embeddingsModel = (AzureAiStudioEmbeddingsModel) model;
assertThat(embeddingsModel.getServiceSettings().target(), is("http://target.local"));
assertThat(embeddingsModel.getServiceSettings().provider(), is(AzureAiStudioProvider.OPENAI));
assertThat(embeddingsModel.getServiceSettings().endpointType(), is(AzureAiStudioEndpointType.TOKEN));
assertThat(embeddingsModel.getSecretSettings().apiKey().toString(), is("secret"));
assertThat(embeddingsModel.getTaskSettings().user(), is("user"));
assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class));
}, exception -> fail("Unexpected exception: " + exception));
service.parseRequestConfig(
"id",
TaskType.TEXT_EMBEDDING,
getRequestConfigMap(
getEmbeddingsServiceSettingsMap("http://target.local", "openai", "token", null, null, null, null),
getEmbeddingsTaskSettingsMap("user"),
getSecretSettingsMap("secret")
),
modelVerificationListener
);
}
}
public void testParseRequestConfig_CreatesAnAzureAiStudioChatCompletionModel() throws IOException {
try (var service = createService()) {
ActionListener<Model> modelVerificationListener = ActionListener.wrap(model -> {
assertThat(model, instanceOf(AzureAiStudioChatCompletionModel.class));
var completionModel = (AzureAiStudioChatCompletionModel) model;
assertThat(completionModel.getServiceSettings().target(), is("http://target.local"));
assertThat(completionModel.getServiceSettings().provider(), is(AzureAiStudioProvider.OPENAI));
assertThat(completionModel.getServiceSettings().endpointType(), is(AzureAiStudioEndpointType.TOKEN));
assertThat(completionModel.getSecretSettings().apiKey().toString(), is("secret"));
assertNull(completionModel.getTaskSettings().temperature());
assertTrue(completionModel.getTaskSettings().doSample());
}, exception -> fail("Unexpected exception: " + exception));
service.parseRequestConfig(
"id",
TaskType.COMPLETION,
getRequestConfigMap(
getChatCompletionServiceSettingsMap("http://target.local", "openai", "token"),
getChatCompletionTaskSettingsMap(null, null, true, null),
getSecretSettingsMap("secret")
),
modelVerificationListener
);
}
}
public void testParseRequestConfig_CreatesAnAzureAiStudioRerankModel() throws IOException {
try (var service = createService()) {
ActionListener<Model> modelVerificationListener = ActionListener.wrap(model -> {
assertThat(model, instanceOf(AzureAiStudioRerankModel.class));
var rerankModel = (AzureAiStudioRerankModel) model;
assertThat(rerankModel.getServiceSettings().target(), is("http://target.local"));
assertThat(rerankModel.getServiceSettings().provider(), is(AzureAiStudioProvider.COHERE));
assertThat(rerankModel.getServiceSettings().endpointType(), is(AzureAiStudioEndpointType.TOKEN));
assertThat(rerankModel.getSecretSettings().apiKey().toString(), is("secret"));
assertNull(rerankModel.getTaskSettings().returnDocuments());
assertNull(rerankModel.getTaskSettings().topN());
}, exception -> fail("Unexpected exception: " + exception));
service.parseRequestConfig(
"id",
TaskType.RERANK,
getRequestConfigMap(
getRerankServiceSettingsMap("http://target.local", "cohere", "token"),
getRerankTaskSettingsMap(null, null),
getSecretSettingsMap("secret")
),
modelVerificationListener
);
}
}
public void testParseRequestConfig_ThrowsUnsupportedModelType() throws IOException {
try (var service = createService()) {
ActionListener<Model> modelVerificationListener = ActionListener.wrap(
model -> fail("Expected exception, but got model: " + model),
exception -> {
assertThat(exception, instanceOf(ElasticsearchStatusException.class));
assertThat(exception.getMessage(), is("The [azureaistudio] service does not support task type [sparse_embedding]"));
}
);
service.parseRequestConfig(
"id",
TaskType.SPARSE_EMBEDDING,
getRequestConfigMap(
getChatCompletionServiceSettingsMap("http://target.local", "openai", "token"),
getChatCompletionTaskSettingsMap(null, null, true, null),
getSecretSettingsMap("secret")
),
modelVerificationListener
);
}
}
public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInConfig() throws IOException {
try (var service = createService()) {
var config = getRequestConfigMap(
getChatCompletionServiceSettingsMap("http://target.local", "openai", "token"),
getChatCompletionTaskSettingsMap(null, null, true, null),
getSecretSettingsMap("secret")
);
config.put("extra_key", "value");
ActionListener<Model> modelVerificationListener = ActionListener.wrap(
model -> fail("Expected exception, but got model: " + model),
exception -> {
assertThat(exception, instanceOf(ElasticsearchStatusException.class));
assertThat(
exception.getMessage(),
is("Configuration contains settings [{extra_key=value}] unknown to the [azureaistudio] service")
);
}
);
service.parseRequestConfig("id", TaskType.COMPLETION, config, modelVerificationListener);
}
}
public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInEmbeddingServiceSettingsMap() throws IOException {
try (var service = createService()) {
var serviceSettings = getEmbeddingsServiceSettingsMap("http://target.local", "openai", "token", null, null, null, null);
serviceSettings.put("extra_key", "value");
var config = getRequestConfigMap(serviceSettings, getEmbeddingsTaskSettingsMap("user"), getSecretSettingsMap("secret"));
ActionListener<Model> modelVerificationListener = ActionListener.wrap(
model -> fail("Expected exception, but got model: " + model),
exception -> {
assertThat(exception, instanceOf(ElasticsearchStatusException.class));
assertThat(
exception.getMessage(),
is("Configuration contains settings [{extra_key=value}] unknown to the [azureaistudio] service")
);
}
);
service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, modelVerificationListener);
}
}
public void testParseRequestConfig_ThrowsWhenDimsSetByUserExistsInEmbeddingServiceSettingsMap() throws IOException {
try (var service = createService()) {
var config = getRequestConfigMap(
getEmbeddingsServiceSettingsMap("http://target.local", "openai", "token", 1024, true, null, null),
getEmbeddingsTaskSettingsMap("user"),
getSecretSettingsMap("secret")
);
ActionListener<Model> modelVerificationListener = ActionListener.wrap(
model -> fail("Expected exception, but got model: " + model),
exception -> {
assertThat(exception, instanceOf(ValidationException.class));
assertThat(
exception.getMessage(),
containsString("[service_settings] does not allow the setting [dimensions_set_by_user]")
);
}
);
service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, modelVerificationListener);
}
}
public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInEmbeddingTaskSettingsMap() throws IOException {
try (var service = createService()) {
var taskSettings = getEmbeddingsTaskSettingsMap("user");
taskSettings.put("extra_key", "value");
var config = getRequestConfigMap(
getEmbeddingsServiceSettingsMap("http://target.local", "openai", "token", null, null, null, null),
taskSettings,
getSecretSettingsMap("secret")
);
ActionListener<Model> modelVerificationListener = ActionListener.wrap(
model -> fail("Expected exception, but got model: " + model),
exception -> {
assertThat(exception, instanceOf(ElasticsearchStatusException.class));
assertThat(
exception.getMessage(),
is("Configuration contains settings [{extra_key=value}] unknown to the [azureaistudio] service")
);
}
);
service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, modelVerificationListener);
}
}
public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInEmbeddingSecretSettingsMap() throws IOException {
try (var service = createService()) {
var secretSettings = getSecretSettingsMap("secret");
secretSettings.put("extra_key", "value");
var config = getRequestConfigMap(
getEmbeddingsServiceSettingsMap("http://target.local", "openai", "token", null, null, null, null),
getEmbeddingsTaskSettingsMap("user"),
secretSettings
);
ActionListener<Model> modelVerificationListener = ActionListener.wrap(
model -> fail("Expected exception, but got model: " + model),
exception -> {
assertThat(exception, instanceOf(ElasticsearchStatusException.class));
assertThat(
exception.getMessage(),
is("Configuration contains settings [{extra_key=value}] unknown to the [azureaistudio] service")
);
}
);
service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, modelVerificationListener);
}
}
public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInChatCompletionServiceSettingsMap() throws IOException {
try (var service = createService()) {
var serviceSettings = getChatCompletionServiceSettingsMap("http://target.local", "openai", "token");
serviceSettings.put("extra_key", "value");
var config = getRequestConfigMap(
serviceSettings,
getChatCompletionTaskSettingsMap(null, 2.0, null, null),
getSecretSettingsMap("secret")
);
ActionListener<Model> modelVerificationListener = ActionListener.wrap(
model -> fail("Expected exception, but got model: " + model),
exception -> {
assertThat(exception, instanceOf(ElasticsearchStatusException.class));
assertThat(
exception.getMessage(),
is("Configuration contains settings [{extra_key=value}] unknown to the [azureaistudio] service")
);
}
);
service.parseRequestConfig("id", TaskType.COMPLETION, config, modelVerificationListener);
}
}
public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInChatCompletionTaskSettingsMap() throws IOException {
try (var service = createService()) {
var taskSettings = getChatCompletionTaskSettingsMap(null, 2.0, null, null);
taskSettings.put("extra_key", "value");
var config = getRequestConfigMap(
getChatCompletionServiceSettingsMap("http://target.local", "openai", "token"),
taskSettings,
getSecretSettingsMap("secret")
);
ActionListener<Model> modelVerificationListener = ActionListener.wrap(
model -> fail("Expected exception, but got model: " + model),
exception -> {
assertThat(exception, instanceOf(ElasticsearchStatusException.class));
assertThat(
exception.getMessage(),
is("Configuration contains settings [{extra_key=value}] unknown to the [azureaistudio] service")
);
}
);
service.parseRequestConfig("id", TaskType.COMPLETION, config, modelVerificationListener);
}
}
public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInChatCompletionSecretSettingsMap() throws IOException {
try (var service = createService()) {
var secretSettings = getSecretSettingsMap("secret");
secretSettings.put("extra_key", "value");
var config = getRequestConfigMap(
getChatCompletionServiceSettingsMap("http://target.local", "openai", "token"),
getChatCompletionTaskSettingsMap(null, 2.0, null, null),
secretSettings
);
ActionListener<Model> modelVerificationListener = ActionListener.wrap(
model -> fail("Expected exception, but got model: " + model),
exception -> {
assertThat(exception, instanceOf(ElasticsearchStatusException.class));
assertThat(
exception.getMessage(),
is("Configuration contains settings [{extra_key=value}] unknown to the [azureaistudio] service")
);
}
);
service.parseRequestConfig("id", TaskType.COMPLETION, config, modelVerificationListener);
}
}
public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInRerankServiceSettingsMap() throws IOException {
try (var service = createService()) {
var serviceSettings = getRerankServiceSettingsMap("http://target.local", "cohere", "token");
serviceSettings.put("extra_key", "value");
var config = getRequestConfigMap(serviceSettings, getRerankTaskSettingsMap(null, null), getSecretSettingsMap("secret"));
ActionListener<Model> modelVerificationListener = ActionListener.wrap(
model -> fail("Expected exception, but got model: " + model),
exception -> {
assertThat(exception, instanceOf(ElasticsearchStatusException.class));
assertThat(
exception.getMessage(),
is("Configuration contains settings [{extra_key=value}] unknown to the [azureaistudio] service")
);
}
);
service.parseRequestConfig("id", TaskType.RERANK, config, modelVerificationListener);
}
}
public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInRerankTaskSettingsMap() throws IOException {
try (var service = createService()) {
var taskSettings = getRerankTaskSettingsMap(null, null);
taskSettings.put("extra_key", "value");
var config = getRequestConfigMap(
getRerankServiceSettingsMap("http://target.local", "cohere", "token"),
taskSettings,
getSecretSettingsMap("secret")
);
ActionListener<Model> modelVerificationListener = ActionListener.wrap(
model -> fail("Expected exception, but got model: " + model),
exception -> {
assertThat(exception, instanceOf(ElasticsearchStatusException.class));
assertThat(
exception.getMessage(),
is("Configuration contains settings [{extra_key=value}] unknown to the [azureaistudio] service")
);
}
);
service.parseRequestConfig("id", TaskType.RERANK, config, modelVerificationListener);
}
}
public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInRerankSecretSettingsMap() throws IOException {
try (var service = createService()) {
var secretSettings = getSecretSettingsMap("secret");
secretSettings.put("extra_key", "value");
var config = getRequestConfigMap(
getRerankServiceSettingsMap("http://target.local", "cohere", "token"),
getRerankTaskSettingsMap(null, null),
secretSettings
);
ActionListener<Model> modelVerificationListener = ActionListener.wrap(
model -> fail("Expected exception, but got model: " + model),
exception -> {
assertThat(exception, instanceOf(ElasticsearchStatusException.class));
assertThat(
exception.getMessage(),
is("Configuration contains settings [{extra_key=value}] unknown to the [azureaistudio] service")
);
}
);
service.parseRequestConfig("id", TaskType.RERANK, config, modelVerificationListener);
}
}
public void testParseRequestConfig_ThrowsWhenProviderIsNotValidForEmbeddings() throws IOException {
try (var service = createService()) {
var serviceSettings = getEmbeddingsServiceSettingsMap("http://target.local", "databricks", "token", null, null, null, null);
var config = getRequestConfigMap(serviceSettings, getEmbeddingsTaskSettingsMap("user"), getSecretSettingsMap("secret"));
ActionListener<Model> modelVerificationListener = ActionListener.wrap(
model -> fail("Expected exception, but got model: " + model),
exception -> {
assertThat(exception, instanceOf(ElasticsearchStatusException.class));
assertThat(exception.getMessage(), is("The [text_embedding] task type for provider [databricks] is not available"));
}
);
service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, modelVerificationListener);
}
}
public void testParseRequestConfig_ThrowsWhenEndpointTypeIsNotValidForEmbeddingsProvider() throws IOException {
try (var service = createService()) {
var serviceSettings = getEmbeddingsServiceSettingsMap("http://target.local", "openai", "realtime", null, null, null, null);
var config = getRequestConfigMap(serviceSettings, getEmbeddingsTaskSettingsMap("user"), getSecretSettingsMap("secret"));
ActionListener<Model> modelVerificationListener = ActionListener.wrap(
model -> fail("Expected exception, but got model: " + model),
exception -> {
assertThat(exception, instanceOf(ElasticsearchStatusException.class));
assertThat(
exception.getMessage(),
is("The [realtime] endpoint type with [text_embedding] task type for provider [openai] is not available")
);
}
);
service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, modelVerificationListener);
}
}
public void testParseRequestConfig_ThrowsWhenEndpointTypeIsNotValidForChatCompletionProvider() throws IOException {
try (var service = createService()) {
var serviceSettings = getChatCompletionServiceSettingsMap("http://target.local", "openai", "realtime");
var config = getRequestConfigMap(
serviceSettings,
getChatCompletionTaskSettingsMap(null, null, null, null),
getSecretSettingsMap("secret")
);
ActionListener<Model> modelVerificationListener = ActionListener.wrap(
model -> fail("Expected exception, but got model: " + model),
exception -> {
assertThat(exception, instanceOf(ElasticsearchStatusException.class));
assertThat(
exception.getMessage(),
is("The [realtime] endpoint type with [completion] task type for provider [openai] is not available")
);
}
);
service.parseRequestConfig("id", TaskType.COMPLETION, config, modelVerificationListener);
}
}
public void testParseRequestConfig_ThrowsWhenProviderIsNotValidForRerank() throws IOException {
try (var service = createService()) {
var serviceSettings = getRerankServiceSettingsMap("http://target.local", "databricks", "token");
var config = getRequestConfigMap(serviceSettings, getRerankTaskSettingsMap(null, null), getSecretSettingsMap("secret"));
ActionListener<Model> modelVerificationListener = ActionListener.wrap(
model -> fail("Expected exception, but got model: " + model),
exception -> {
assertThat(exception, instanceOf(ElasticsearchStatusException.class));
assertThat(exception.getMessage(), is("The [rerank] task type for provider [databricks] is not available"));
}
);
service.parseRequestConfig("id", TaskType.RERANK, config, modelVerificationListener);
}
}
public void testParseRequestConfig_ThrowsWhenEndpointTypeIsNotValidForRerankProvider() throws IOException {
try (var service = createService()) {
var serviceSettings = getRerankServiceSettingsMap("http://target.local", "cohere", "realtime");
var config = getRequestConfigMap(serviceSettings, getRerankTaskSettingsMap(null, null), getSecretSettingsMap("secret"));
ActionListener<Model> modelVerificationListener = ActionListener.wrap(
model -> fail("Expected exception, but got model: " + model),
exception -> {
assertThat(exception, instanceOf(ElasticsearchStatusException.class));
assertThat(
exception.getMessage(),
is("The [realtime] endpoint type with [rerank] task type for provider [cohere] is not available")
);
}
);
service.parseRequestConfig("id", TaskType.RERANK, config, modelVerificationListener);
}
}
public void testParsePersistedConfig_CreatesAnAzureAiStudioEmbeddingsModel() throws IOException {
try (var service = createService()) {
var config = getPersistedConfigMap(
getEmbeddingsServiceSettingsMap("http://target.local", "openai", "token", 1024, true, 512, null),
getEmbeddingsTaskSettingsMap("user"),
getSecretSettingsMap("secret")
);
var model = service.parsePersistedConfigWithSecrets("id", TaskType.TEXT_EMBEDDING, config.config(), config.secrets());
assertThat(model, instanceOf(AzureAiStudioEmbeddingsModel.class));
var embeddingsModel = (AzureAiStudioEmbeddingsModel) model;
assertThat(embeddingsModel.getServiceSettings().target(), is("http://target.local"));
assertThat(embeddingsModel.getServiceSettings().provider(), is(AzureAiStudioProvider.OPENAI));
assertThat(embeddingsModel.getServiceSettings().endpointType(), is(AzureAiStudioEndpointType.TOKEN));
assertThat(embeddingsModel.getServiceSettings().dimensions(), is(1024));
assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(true));
assertThat(embeddingsModel.getServiceSettings().maxInputTokens(), is(512));
assertThat(embeddingsModel.getSecretSettings().apiKey().toString(), is("secret"));
assertThat(embeddingsModel.getTaskSettings().user(), is("user"));
}
}
public void testParsePersistedConfigWithSecrets_CreatesAnEmbeddingsModelWhenChunkingSettingsProvided() throws IOException {
try (var service = createService()) {
var config = getPersistedConfigMap(
getEmbeddingsServiceSettingsMap("http://target.local", "openai", "token", 1024, true, 512, null),
getEmbeddingsTaskSettingsMap("user"),
createRandomChunkingSettingsMap(),
getSecretSettingsMap("secret")
);
var model = service.parsePersistedConfigWithSecrets("id", TaskType.TEXT_EMBEDDING, config.config(), config.secrets());
assertThat(model, instanceOf(AzureAiStudioEmbeddingsModel.class));
var embeddingsModel = (AzureAiStudioEmbeddingsModel) model;
assertThat(embeddingsModel.getServiceSettings().target(), is("http://target.local"));
assertThat(embeddingsModel.getServiceSettings().provider(), is(AzureAiStudioProvider.OPENAI));
assertThat(embeddingsModel.getServiceSettings().endpointType(), is(AzureAiStudioEndpointType.TOKEN));
assertThat(embeddingsModel.getServiceSettings().dimensions(), is(1024));
assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(true));
assertThat(embeddingsModel.getServiceSettings().maxInputTokens(), is(512));
assertThat(embeddingsModel.getSecretSettings().apiKey().toString(), is("secret"));
assertThat(embeddingsModel.getTaskSettings().user(), is("user"));
assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class));
}
}
public void testParsePersistedConfigWithSecrets_CreatesAnEmbeddingsModelWhenChunkingSettingsNotProvided() throws IOException {
try (var service = createService()) {
var config = getPersistedConfigMap(
getEmbeddingsServiceSettingsMap("http://target.local", "openai", "token", 1024, true, 512, null),
getEmbeddingsTaskSettingsMap("user"),
getSecretSettingsMap("secret")
);
var model = service.parsePersistedConfigWithSecrets("id", TaskType.TEXT_EMBEDDING, config.config(), config.secrets());
assertThat(model, instanceOf(AzureAiStudioEmbeddingsModel.class));
var embeddingsModel = (AzureAiStudioEmbeddingsModel) model;
assertThat(embeddingsModel.getServiceSettings().target(), is("http://target.local"));
assertThat(embeddingsModel.getServiceSettings().provider(), is(AzureAiStudioProvider.OPENAI));
assertThat(embeddingsModel.getServiceSettings().endpointType(), is(AzureAiStudioEndpointType.TOKEN));
assertThat(embeddingsModel.getServiceSettings().dimensions(), is(1024));
assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(true));
assertThat(embeddingsModel.getServiceSettings().maxInputTokens(), is(512));
assertThat(embeddingsModel.getSecretSettings().apiKey().toString(), is("secret"));
assertThat(embeddingsModel.getTaskSettings().user(), is("user"));
assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class));
}
}
public void testParsePersistedConfig_CreatesAnAzureAiStudioChatCompletionModel() throws IOException {
try (var service = createService()) {
var config = getPersistedConfigMap(
getChatCompletionServiceSettingsMap("http://target.local", "openai", "token"),
getChatCompletionTaskSettingsMap(1.0, 2.0, true, 512),
getSecretSettingsMap("secret")
);
var model = service.parsePersistedConfigWithSecrets("id", TaskType.COMPLETION, config.config(), config.secrets());
assertThat(model, instanceOf(AzureAiStudioChatCompletionModel.class));
var chatCompletionModel = (AzureAiStudioChatCompletionModel) model;
assertThat(chatCompletionModel.getServiceSettings().target(), is("http://target.local"));
assertThat(chatCompletionModel.getServiceSettings().provider(), is(AzureAiStudioProvider.OPENAI));
assertThat(chatCompletionModel.getServiceSettings().endpointType(), is(AzureAiStudioEndpointType.TOKEN));
assertThat(chatCompletionModel.getTaskSettings().temperature(), is(1.0));
assertThat(chatCompletionModel.getTaskSettings().topP(), is(2.0));
assertThat(chatCompletionModel.getTaskSettings().doSample(), is(true));
assertThat(chatCompletionModel.getTaskSettings().maxNewTokens(), is(512));
}
}
public void testParsePersistedConfig_CreatesAnAzureAiStudioRerankModel() throws IOException {
try (var service = createService()) {
var config = getPersistedConfigMap(
getRerankServiceSettingsMap("http://target.local", "cohere", "token"),
getRerankTaskSettingsMap(true, 2),
getSecretSettingsMap("secret")
);
var model = service.parsePersistedConfigWithSecrets("id", TaskType.RERANK, config.config(), config.secrets());
assertThat(model, instanceOf(AzureAiStudioRerankModel.class));
var chatCompletionModel = (AzureAiStudioRerankModel) model;
assertThat(chatCompletionModel.getServiceSettings().target(), is("http://target.local"));
assertThat(chatCompletionModel.getServiceSettings().provider(), is(AzureAiStudioProvider.COHERE));
assertThat(chatCompletionModel.getServiceSettings().endpointType(), is(AzureAiStudioEndpointType.TOKEN));
assertThat(chatCompletionModel.getTaskSettings().returnDocuments(), is(true));
assertThat(chatCompletionModel.getTaskSettings().topN(), is(2));
}
}
public void testParsePersistedConfig_ThrowsUnsupportedModelType() throws IOException {
try (var service = createService()) {
ActionListener<Model> modelVerificationListener = ActionListener.wrap(
model -> fail("Expected exception, but got model: " + model),
exception -> {
assertThat(exception, instanceOf(ElasticsearchStatusException.class));
assertThat(exception.getMessage(), is("The [azureaistudio] service does not support task type [sparse_embedding]"));
}
);
service.parseRequestConfig(
"id",
TaskType.SPARSE_EMBEDDING,
getRequestConfigMap(
getChatCompletionServiceSettingsMap("http://target.local", "openai", "token"),
getChatCompletionTaskSettingsMap(null, null, true, null),
getSecretSettingsMap("secret")
),
modelVerificationListener
);
}
}
public void testParsePersistedConfigWithSecrets_ThrowsErrorTryingToParseInvalidModel() throws IOException {
try (var service = createService()) {
var config = getPersistedConfigMap(
getChatCompletionServiceSettingsMap("http://target.local", "openai", "token"),
getChatCompletionTaskSettingsMap(1.0, 2.0, true, 512),
getSecretSettingsMap("secret")
);
var thrownException = expectThrows(
ElasticsearchStatusException.class,
() -> service.parsePersistedConfigWithSecrets("id", TaskType.SPARSE_EMBEDDING, config.config(), config.secrets())
);
assertThat(thrownException.getMessage(), containsString("Failed to parse stored model [id] for [azureaistudio] service"));
assertThat(
thrownException.getMessage(),
containsString("The [azureaistudio] service does not support task type [sparse_embedding]")
);
}
}
public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInConfig() throws IOException {
try (var service = createService()) {
var serviceSettings = getEmbeddingsServiceSettingsMap("http://target.local", "openai", "token", 1024, true, 512, null);
var taskSettings = getEmbeddingsTaskSettingsMap("user");
var secretSettings = getSecretSettingsMap("secret");
var config = getPersistedConfigMap(serviceSettings, taskSettings, secretSettings);
config.config().put("extra_key", "value");
var model = service.parsePersistedConfigWithSecrets("id", TaskType.TEXT_EMBEDDING, config.config(), config.secrets());
assertThat(model, instanceOf(AzureAiStudioEmbeddingsModel.class));
}
}
public void testParsePersistedConfig_DoesNotThrowWhenExtraKeyExistsInEmbeddingServiceSettingsMap() throws IOException {
try (var service = createService()) {
var serviceSettings = getEmbeddingsServiceSettingsMap("http://target.local", "openai", "token", 1024, true, 512, null);
serviceSettings.put("extra_key", "value");
var taskSettings = getEmbeddingsTaskSettingsMap("user");
var secretSettings = getSecretSettingsMap("secret");
var config = getPersistedConfigMap(serviceSettings, taskSettings, secretSettings);
var model = service.parsePersistedConfigWithSecrets("id", TaskType.TEXT_EMBEDDING, config.config(), config.secrets());
assertThat(model, instanceOf(AzureAiStudioEmbeddingsModel.class));
}
}
public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInEmbeddingTaskSettingsMap() throws IOException {
try (var service = createService()) {
var serviceSettings = getEmbeddingsServiceSettingsMap("http://target.local", "openai", "token", 1024, true, 512, null);
var taskSettings = getEmbeddingsTaskSettingsMap("user");
taskSettings.put("extra_key", "value");
var secretSettings = getSecretSettingsMap("secret");
var config = getPersistedConfigMap(serviceSettings, taskSettings, secretSettings);
var model = service.parsePersistedConfigWithSecrets("id", TaskType.TEXT_EMBEDDING, config.config(), config.secrets());
assertThat(model, instanceOf(AzureAiStudioEmbeddingsModel.class));
}
}
public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInEmbeddingSecretSettingsMap() throws IOException {
try (var service = createService()) {
var serviceSettings = getEmbeddingsServiceSettingsMap("http://target.local", "openai", "token", 1024, true, 512, null);
var taskSettings = getEmbeddingsTaskSettingsMap("user");
var secretSettings = getSecretSettingsMap("secret");
secretSettings.put("extra_key", "value");
var config = getPersistedConfigMap(serviceSettings, taskSettings, secretSettings);
var model = service.parsePersistedConfigWithSecrets("id", TaskType.TEXT_EMBEDDING, config.config(), config.secrets());
assertThat(model, instanceOf(AzureAiStudioEmbeddingsModel.class));
}
}
public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInChatCompletionServiceSettingsMap() throws IOException {
try (var service = createService()) {
var serviceSettings = getChatCompletionServiceSettingsMap("http://target.local", "openai", "token");
serviceSettings.put("extra_key", "value");
var taskSettings = getChatCompletionTaskSettingsMap(1.0, 2.0, true, 512);
var secretSettings = getSecretSettingsMap("secret");
var config = getPersistedConfigMap(serviceSettings, taskSettings, secretSettings);
var model = service.parsePersistedConfigWithSecrets("id", TaskType.COMPLETION, config.config(), config.secrets());
assertThat(model, instanceOf(AzureAiStudioChatCompletionModel.class));
}
}
public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInChatCompletionTaskSettingsMap() throws IOException {
try (var service = createService()) {
var serviceSettings = getChatCompletionServiceSettingsMap("http://target.local", "openai", "token");
var taskSettings = getChatCompletionTaskSettingsMap(1.0, 2.0, true, 512);
taskSettings.put("extra_key", "value");
var secretSettings = getSecretSettingsMap("secret");
var config = getPersistedConfigMap(serviceSettings, taskSettings, secretSettings);
var model = service.parsePersistedConfigWithSecrets("id", TaskType.COMPLETION, config.config(), config.secrets());
assertThat(model, instanceOf(AzureAiStudioChatCompletionModel.class));
}
}
public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInChatCompletionSecretSettingsMap() throws IOException {
try (var service = createService()) {
var serviceSettings = getChatCompletionServiceSettingsMap("http://target.local", "openai", "token");
var taskSettings = getChatCompletionTaskSettingsMap(1.0, 2.0, true, 512);
var secretSettings = getSecretSettingsMap("secret");
secretSettings.put("extra_key", "value");
var config = getPersistedConfigMap(serviceSettings, taskSettings, secretSettings);
var model = service.parsePersistedConfigWithSecrets("id", TaskType.COMPLETION, config.config(), config.secrets());
assertThat(model, instanceOf(AzureAiStudioChatCompletionModel.class));
}
}
public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInRerankServiceSettingsMap() throws IOException {
try (var service = createService()) {
var serviceSettings = getRerankServiceSettingsMap("http://target.local", "cohere", "token");
serviceSettings.put("extra_key", "value");
var taskSettings = getRerankTaskSettingsMap(true, 2);
var secretSettings = getSecretSettingsMap("secret");
var config = getPersistedConfigMap(serviceSettings, taskSettings, secretSettings);
var model = service.parsePersistedConfigWithSecrets("id", TaskType.RERANK, config.config(), config.secrets());
assertThat(model, instanceOf(AzureAiStudioRerankModel.class));
}
}
public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInRerankTaskSettingsMap() throws IOException {
try (var service = createService()) {
var serviceSettings = getRerankServiceSettingsMap("http://target.local", "cohere", "token");
var taskSettings = getRerankTaskSettingsMap(true, 2);
taskSettings.put("extra_key", "value");
var secretSettings = getSecretSettingsMap("secret");
var config = getPersistedConfigMap(serviceSettings, taskSettings, secretSettings);
var model = service.parsePersistedConfigWithSecrets("id", TaskType.RERANK, config.config(), config.secrets());
assertThat(model, instanceOf(AzureAiStudioRerankModel.class));
}
}
public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInRerankSecretSettingsMap() throws IOException {
try (var service = createService()) {
var serviceSettings = getRerankServiceSettingsMap("http://target.local", "cohere", "token");
var taskSettings = getRerankTaskSettingsMap(true, 2);
var secretSettings = getSecretSettingsMap("secret");
secretSettings.put("extra_key", "value");
var config = getPersistedConfigMap(serviceSettings, taskSettings, secretSettings);
var model = service.parsePersistedConfigWithSecrets("id", TaskType.RERANK, config.config(), config.secrets());
assertThat(model, instanceOf(AzureAiStudioRerankModel.class));
}
}
public void testParsePersistedConfig_WithoutSecretsCreatesEmbeddingsModel() throws IOException {
try (var service = createService()) {
var config = getPersistedConfigMap(
getEmbeddingsServiceSettingsMap("http://target.local", "openai", "token", 1024, true, 512, null),
getEmbeddingsTaskSettingsMap("user"),
Map.of()
);
var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, config.config());
assertThat(model, instanceOf(AzureAiStudioEmbeddingsModel.class));
var embeddingsModel = (AzureAiStudioEmbeddingsModel) model;
assertThat(embeddingsModel.getServiceSettings().target(), is("http://target.local"));
assertThat(embeddingsModel.getServiceSettings().provider(), is(AzureAiStudioProvider.OPENAI));
assertThat(embeddingsModel.getServiceSettings().endpointType(), is(AzureAiStudioEndpointType.TOKEN));
assertThat(embeddingsModel.getServiceSettings().dimensions(), is(1024));
assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(true));
assertThat(embeddingsModel.getServiceSettings().maxInputTokens(), is(512));
assertThat(embeddingsModel.getTaskSettings().user(), is("user"));
}
}
public void testParsePersistedConfig_CreatesAnEmbeddingsModelWhenChunkingSettingsProvided() throws IOException {
try (var service = createService()) {
var config = getPersistedConfigMap(
getEmbeddingsServiceSettingsMap("http://target.local", "openai", "token", 1024, true, 512, null),
getEmbeddingsTaskSettingsMap("user"),
createRandomChunkingSettingsMap(),
Map.of()
);
var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, config.config());
assertThat(model, instanceOf(AzureAiStudioEmbeddingsModel.class));
var embeddingsModel = (AzureAiStudioEmbeddingsModel) model;
assertThat(embeddingsModel.getServiceSettings().target(), is("http://target.local"));
assertThat(embeddingsModel.getServiceSettings().provider(), is(AzureAiStudioProvider.OPENAI));
assertThat(embeddingsModel.getServiceSettings().endpointType(), is(AzureAiStudioEndpointType.TOKEN));
assertThat(embeddingsModel.getServiceSettings().dimensions(), is(1024));
assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(true));
assertThat(embeddingsModel.getServiceSettings().maxInputTokens(), is(512));
assertThat(embeddingsModel.getTaskSettings().user(), is("user"));
assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class));
}
}
public void testParsePersistedConfig_CreatesAnEmbeddingsModelWhenChunkingSettingsNotProvided() throws IOException {
try (var service = createService()) {
var config = getPersistedConfigMap(
getEmbeddingsServiceSettingsMap("http://target.local", "openai", "token", 1024, true, 512, null),
getEmbeddingsTaskSettingsMap("user"),
Map.of()
);
var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, config.config());
assertThat(model, instanceOf(AzureAiStudioEmbeddingsModel.class));
var embeddingsModel = (AzureAiStudioEmbeddingsModel) model;
assertThat(embeddingsModel.getServiceSettings().target(), is("http://target.local"));
assertThat(embeddingsModel.getServiceSettings().provider(), is(AzureAiStudioProvider.OPENAI));
assertThat(embeddingsModel.getServiceSettings().endpointType(), is(AzureAiStudioEndpointType.TOKEN));
assertThat(embeddingsModel.getServiceSettings().dimensions(), is(1024));
assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(true));
assertThat(embeddingsModel.getServiceSettings().maxInputTokens(), is(512));
assertThat(embeddingsModel.getTaskSettings().user(), is("user"));
assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class));
}
}
public void testParsePersistedConfig_WithoutSecretsCreatesChatCompletionModel() throws IOException {
try (var service = createService()) {
var config = getPersistedConfigMap(
getChatCompletionServiceSettingsMap("http://target.local", "openai", "token"),
getChatCompletionTaskSettingsMap(1.0, 2.0, true, 512),
Map.of()
);
var model = service.parsePersistedConfig("id", TaskType.COMPLETION, config.config());
assertThat(model, instanceOf(AzureAiStudioChatCompletionModel.class));
var chatCompletionModel = (AzureAiStudioChatCompletionModel) model;
assertThat(chatCompletionModel.getServiceSettings().target(), is("http://target.local"));
assertThat(chatCompletionModel.getServiceSettings().provider(), is(AzureAiStudioProvider.OPENAI));
assertThat(chatCompletionModel.getServiceSettings().endpointType(), is(AzureAiStudioEndpointType.TOKEN));
assertThat(chatCompletionModel.getTaskSettings().temperature(), is(1.0));
assertThat(chatCompletionModel.getTaskSettings().topP(), is(2.0));
assertThat(chatCompletionModel.getTaskSettings().doSample(), is(true));
assertThat(chatCompletionModel.getTaskSettings().maxNewTokens(), is(512));
}
}
public void testParsePersistedConfig_WithoutSecretsCreatesRerankModel() throws IOException {
try (var service = createService()) {
var config = getPersistedConfigMap(
getRerankServiceSettingsMap("http://target.local", "cohere", "token"),
getRerankTaskSettingsMap(true, 2),
Map.of()
);
var model = service.parsePersistedConfig("id", TaskType.RERANK, config.config());
assertThat(model, instanceOf(AzureAiStudioRerankModel.class));
var rerankModel = (AzureAiStudioRerankModel) model;
assertThat(rerankModel.getServiceSettings().target(), is("http://target.local"));
assertThat(rerankModel.getServiceSettings().provider(), is(AzureAiStudioProvider.COHERE));
assertThat(rerankModel.getServiceSettings().endpointType(), is(AzureAiStudioEndpointType.TOKEN));
assertThat(rerankModel.getTaskSettings().returnDocuments(), is(true));
assertThat(rerankModel.getTaskSettings().topN(), is(2));
}
}
public void testUpdateModelWithEmbeddingDetails_InvalidModelProvided() throws IOException {
var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);
try (var service = new AzureAiStudioService(senderFactory, createWithEmptySettings(threadPool), mockClusterServiceEmpty())) {
var model = AzureAiStudioChatCompletionModelTests.createModel(
randomAlphaOfLength(10),
randomAlphaOfLength(10),
randomFrom(AzureAiStudioProvider.values()),
randomFrom(AzureAiStudioEndpointType.values()),
randomAlphaOfLength(10)
);
assertThrows(
ElasticsearchStatusException.class,
() -> { service.updateModelWithEmbeddingDetails(model, randomNonNegativeInt()); }
);
}
}
public void testUpdateModelWithEmbeddingDetails_NullSimilarityInOriginalModel() throws IOException {
testUpdateModelWithEmbeddingDetails_Successful(null);
}
public void testUpdateModelWithEmbeddingDetails_NonNullSimilarityInOriginalModel() throws IOException {
testUpdateModelWithEmbeddingDetails_Successful(randomFrom(SimilarityMeasure.values()));
}
private void testUpdateModelWithEmbeddingDetails_Successful(SimilarityMeasure similarityMeasure) throws IOException {
var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);
try (var service = new AzureAiStudioService(senderFactory, createWithEmptySettings(threadPool), mockClusterServiceEmpty())) {
var embeddingSize = randomNonNegativeInt();
var model = AzureAiStudioEmbeddingsModelTests.createModel(
randomAlphaOfLength(10),
randomAlphaOfLength(10),
randomFrom(AzureAiStudioProvider.values()),
randomFrom(AzureAiStudioEndpointType.values()),
randomAlphaOfLength(10),
randomNonNegativeInt(),
randomBoolean(),
randomNonNegativeInt(),
similarityMeasure,
randomAlphaOfLength(10),
RateLimitSettingsTests.createRandom()
);
Model updatedModel = service.updateModelWithEmbeddingDetails(model, embeddingSize);
SimilarityMeasure expectedSimilarityMeasure = similarityMeasure == null ? SimilarityMeasure.DOT_PRODUCT : similarityMeasure;
assertEquals(expectedSimilarityMeasure, updatedModel.getServiceSettings().similarity());
assertEquals(embeddingSize, updatedModel.getServiceSettings().dimensions().intValue());
}
}
public void testUpdateModelWithChatCompletionDetails_InvalidModelProvided() throws IOException {
var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);
try (var service = new AzureAiStudioService(senderFactory, createWithEmptySettings(threadPool), mockClusterServiceEmpty())) {
var model = AzureAiStudioEmbeddingsModelTests.createModel(
randomAlphaOfLength(10),
randomAlphaOfLength(10),
randomFrom(AzureAiStudioProvider.values()),
randomFrom(AzureAiStudioEndpointType.values()),
randomAlphaOfLength(10),
randomNonNegativeInt(),
randomBoolean(),
randomNonNegativeInt(),
randomFrom(SimilarityMeasure.values()),
randomAlphaOfLength(10),
RateLimitSettingsTests.createRandom()
);
assertThrows(ElasticsearchStatusException.class, () -> { service.updateModelWithChatCompletionDetails(model); });
}
}
public void testUpdateModelWithChatCompletionDetails_NullSimilarityInOriginalModel() throws IOException {
testUpdateModelWithChatCompletionDetails_Successful(null);
}
public void testUpdateModelWithChatCompletionDetails_NonNullSimilarityInOriginalModel() throws IOException {
testUpdateModelWithChatCompletionDetails_Successful(randomNonNegativeInt());
}
private void testUpdateModelWithChatCompletionDetails_Successful(Integer maxNewTokens) throws IOException {
var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);
try (var service = new AzureAiStudioService(senderFactory, createWithEmptySettings(threadPool), mockClusterServiceEmpty())) {
var model = AzureAiStudioChatCompletionModelTests.createModel(
randomAlphaOfLength(10),
randomAlphaOfLength(10),
randomFrom(AzureAiStudioProvider.values()),
randomFrom(AzureAiStudioEndpointType.values()),
randomAlphaOfLength(10),
randomDouble(),
randomDouble(),
randomBoolean(),
maxNewTokens,
RateLimitSettingsTests.createRandom()
);
Model updatedModel = service.updateModelWithChatCompletionDetails(model);
assertThat(updatedModel, instanceOf(AzureAiStudioChatCompletionModel.class));
AzureAiStudioChatCompletionTaskSettings updatedTaskSettings = (AzureAiStudioChatCompletionTaskSettings) updatedModel
.getTaskSettings();
Integer expectedMaxNewTokens = maxNewTokens == null
? AzureAiStudioChatCompletionTaskSettings.DEFAULT_MAX_NEW_TOKENS
: maxNewTokens;
assertEquals(expectedMaxNewTokens, updatedTaskSettings.maxNewTokens());
}
}
public void testInfer_ThrowsErrorWhenModelIsNotAzureAiStudioModel() throws IOException {
var sender = createMockSender();
var factory = mock(HttpRequestSender.Factory.class);
when(factory.createSender()).thenReturn(sender);
var mockModel = getInvalidModel("model_id", "service_name");
try (var service = new AzureAiStudioService(factory, createWithEmptySettings(threadPool), mockClusterServiceEmpty())) {
PlainActionFuture<InferenceServiceResults> listener = new PlainActionFuture<>();
service.infer(
mockModel,
null,
null,
null,
List.of(""),
false,
new HashMap<>(),
InputType.INGEST,
InferenceAction.Request.DEFAULT_TIMEOUT,
listener
);
var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT));
assertThat(
thrownException.getMessage(),
is("The internal model was invalid, please delete the service [service_name] with id [model_id] and add it again.")
);
verify(factory, times(1)).createSender();
verify(sender, times(1)).startAsynchronously(any());
}
verify(sender, times(1)).close();
verifyNoMoreInteractions(factory);
verifyNoMoreInteractions(sender);
}
public void testInfer_ThrowsValidationErrorForInvalidInputType() throws IOException {
var sender = createMockSender();
var factory = mock(HttpRequestSender.Factory.class);
when(factory.createSender()).thenReturn(sender);
var mockModel = getInvalidModel("model_id", "service_name");
try (var service = new AzureAiStudioService(factory, createWithEmptySettings(threadPool), mockClusterServiceEmpty())) {
PlainActionFuture<InferenceServiceResults> listener = new PlainActionFuture<>();
service.infer(
mockModel,
null,
null,
null,
List.of(""),
false,
new HashMap<>(),
InputType.CLASSIFICATION,
InferenceAction.Request.DEFAULT_TIMEOUT,
listener
);
var thrownException = expectThrows(ValidationException.class, () -> listener.actionGet(TIMEOUT));
assertThat(
thrownException.getMessage(),
is("Validation Failed: 1: Input type [classification] is not supported for [Azure AI Studio];")
);
verify(factory, times(1)).createSender();
verify(sender, times(1)).startAsynchronously(any());
}
verify(sender, times(1)).close();
verifyNoMoreInteractions(factory);
verifyNoMoreInteractions(sender);
}
public void testChunkedInfer_ChunkingSettingsSet() throws IOException {
var model = AzureAiStudioEmbeddingsModelTests.createModel(
"id",
getUrl(webServer),
AzureAiStudioProvider.OPENAI,
AzureAiStudioEndpointType.TOKEN,
createRandomChunkingSettings(),
"apikey",
null,
false,
null,
null,
"user",
null
);
testChunkedInfer(model);
}
public void testChunkedInfer_ChunkingSettingsNotSet() throws IOException {
var model = AzureAiStudioEmbeddingsModelTests.createModel(
"id",
getUrl(webServer),
AzureAiStudioProvider.OPENAI,
AzureAiStudioEndpointType.TOKEN,
null,
"apikey",
null,
false,
null,
null,
"user",
null
);
testChunkedInfer(model);
}
private void testChunkedInfer(AzureAiStudioEmbeddingsModel model) throws IOException {
var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);
try (var service = new AzureAiStudioService(senderFactory, createWithEmptySettings(threadPool), mockClusterServiceEmpty())) {
String responseJson = """
{
"object": "list",
"data": [
{
"object": "embedding",
"index": 0,
"embedding": [
0.0123,
-0.0123
]
},
{
"object": "embedding",
"index": 1,
"embedding": [
1.0123,
-1.0123
]
}
],
"model": "text-embedding-ada-002-v2",
"usage": {
"prompt_tokens": 8,
"total_tokens": 8
}
}
""";
webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson));
PlainActionFuture<List<ChunkedInference>> listener = new PlainActionFuture<>();
service.chunkedInfer(
model,
null,
List.of(new ChunkInferenceInput("a"), new ChunkInferenceInput("bb")),
new HashMap<>(),
InputType.INGEST,
InferenceAction.Request.DEFAULT_TIMEOUT,
listener
);
var results = listener.actionGet(TIMEOUT);
assertThat(results, hasSize(2));
{
assertThat(results.get(0), CoreMatchers.instanceOf(ChunkedInferenceEmbedding.class));
var floatResult = (ChunkedInferenceEmbedding) results.get(0);
assertThat(floatResult.chunks(), hasSize(1));
assertEquals(new ChunkedInference.TextOffset(0, 1), floatResult.chunks().get(0).offset());
assertThat(floatResult.chunks().get(0).embedding(), instanceOf(DenseEmbeddingFloatResults.Embedding.class));
assertArrayEquals(
new float[] { 0.0123f, -0.0123f },
((DenseEmbeddingFloatResults.Embedding) floatResult.chunks().get(0).embedding()).values(),
0.0f
);
}
{
assertThat(results.get(1), CoreMatchers.instanceOf(ChunkedInferenceEmbedding.class));
var floatResult = (ChunkedInferenceEmbedding) results.get(1);
assertThat(floatResult.chunks(), hasSize(1));
assertEquals(new ChunkedInference.TextOffset(0, 2), floatResult.chunks().get(0).offset());
assertThat(floatResult.chunks().get(0).embedding(), instanceOf(DenseEmbeddingFloatResults.Embedding.class));
assertArrayEquals(
new float[] { 1.0123f, -1.0123f },
((DenseEmbeddingFloatResults.Embedding) floatResult.chunks().get(0).embedding()).values(),
0.0f
);
}
assertThat(webServer.requests(), hasSize(1));
assertNull(webServer.requests().get(0).getUri().getQuery());
assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType()));
assertThat(webServer.requests().get(0).getHeader(API_KEY_HEADER), equalTo("apikey"));
var requestMap = entityAsMap(webServer.requests().get(0).getBody());
assertThat(requestMap.size(), Matchers.is(3));
assertThat(requestMap.get("input"), Matchers.is(List.of("a", "bb")));
assertThat(requestMap.get("user"), Matchers.is("user"));
assertThat(requestMap.get("input_type"), Matchers.is("document"));
}
}
public void testInfer_WithChatCompletionModel() throws IOException {
var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);
try (var service = new AzureAiStudioService(senderFactory, createWithEmptySettings(threadPool), mockClusterServiceEmpty())) {
webServer.enqueue(new MockResponse().setResponseCode(200).setBody(testChatCompletionResultJson));
var model = AzureAiStudioChatCompletionModelTests.createModel(
"id",
getUrl(webServer),
AzureAiStudioProvider.OPENAI,
AzureAiStudioEndpointType.TOKEN,
"apikey"
);
PlainActionFuture<InferenceServiceResults> listener = new PlainActionFuture<>();
service.infer(
model,
null,
null,
null,
List.of("abc"),
false,
new HashMap<>(),
InputType.INGEST,
InferenceAction.Request.DEFAULT_TIMEOUT,
listener
);
var result = listener.actionGet(TIMEOUT);
assertThat(result, CoreMatchers.instanceOf(ChatCompletionResults.class));
var completionResults = (ChatCompletionResults) result;
assertThat(completionResults.getResults().size(), is(1));
assertThat(completionResults.getResults().get(0).content(), is("test completion content"));
}
}
public void testInfer_WithRerankModel() throws IOException {
var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);
try (var service = new AzureAiStudioService(senderFactory, createWithEmptySettings(threadPool), mockClusterServiceEmpty())) {
webServer.enqueue(new MockResponse().setResponseCode(200).setBody(testRerankTokenResponseJson));
var model = AzureAiStudioRerankModelTests.createModel(
"id",
getUrl(webServer),
AzureAiStudioProvider.COHERE,
AzureAiStudioEndpointType.TOKEN,
"apikey"
);
PlainActionFuture<InferenceServiceResults> listener = new PlainActionFuture<>();
service.infer(
model,
"query",
false,
2,
List.of("abc"),
false,
new HashMap<>(),
InputType.INGEST,
InferenceAction.Request.DEFAULT_TIMEOUT,
listener
);
var result = listener.actionGet(TIMEOUT);
assertThat(result, CoreMatchers.instanceOf(RankedDocsResults.class));
var rankedDocsResults = (RankedDocsResults) result;
var rankedDocs = rankedDocsResults.getRankedDocs();
assertThat(rankedDocs.size(), is(2));
assertThat(rankedDocs.get(0).relevanceScore(), is(0.1111111F));
assertThat(rankedDocs.get(0).index(), is(0));
assertThat(rankedDocs.get(1).relevanceScore(), is(0.2222222F));
assertThat(rankedDocs.get(1).index(), is(1));
}
}
public void testInfer_UnauthorisedResponse() throws IOException {
var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);
try (var service = new AzureAiStudioService(senderFactory, createWithEmptySettings(threadPool), mockClusterServiceEmpty())) {
String responseJson = """
{
"error": {
"message": "Incorrect API key provided:",
"type": "invalid_request_error",
"param": null,
"code": "invalid_api_key"
}
}
""";
webServer.enqueue(new MockResponse().setResponseCode(401).setBody(responseJson));
var model = AzureAiStudioEmbeddingsModelTests.createModel(
"id",
getUrl(webServer),
AzureAiStudioProvider.OPENAI,
AzureAiStudioEndpointType.TOKEN,
"apikey",
null,
false,
null,
null,
"user",
null
);
PlainActionFuture<InferenceServiceResults> listener = new PlainActionFuture<>();
service.infer(
model,
null,
null,
null,
List.of("abc"),
false,
new HashMap<>(),
InputType.INGEST,
InferenceAction.Request.DEFAULT_TIMEOUT,
listener
);
var error = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT));
assertThat(error.getMessage(), containsString("Received an authentication error status code for request"));
assertThat(error.getMessage(), containsString("Error message: [Incorrect API key provided:]"));
assertThat(webServer.requests(), hasSize(1));
}
}
public void testInfer_StreamRequest() throws Exception {
String responseJson = """
data: {\
"id":"12345",\
"object":"chat.completion.chunk",\
"created":123456789,\
"model":"gpt-4o-mini",\
"system_fingerprint": "123456789",\
"choices":[\
{\
"index":0,\
"delta":{\
"content":"hello, world"\
},\
"logprobs":null,\
"finish_reason":null\
}\
]\
}
""";
webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson));
streamChatCompletion().hasNoErrors().hasEvent("""
{"completion":[{"delta":"hello, world"}]}""");
}
private InferenceEventsAssertion streamChatCompletion() throws Exception {
var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);
try (var service = new AzureAiStudioService(senderFactory, createWithEmptySettings(threadPool), mockClusterServiceEmpty())) {
var model = AzureAiStudioChatCompletionModelTests.createModel(
"id",
getUrl(webServer),
AzureAiStudioProvider.OPENAI,
AzureAiStudioEndpointType.TOKEN,
"apikey"
);
var listener = new PlainActionFuture<InferenceServiceResults>();
service.infer(
model,
null,
null,
null,
List.of("abc"),
true,
new HashMap<>(),
InputType.INGEST,
InferenceAction.Request.DEFAULT_TIMEOUT,
listener
);
return InferenceEventsAssertion.assertThat(listener.actionGet(TIMEOUT)).hasFinishedStream();
}
}
public void testInfer_StreamRequest_ErrorResponse() throws Exception {
String responseJson = """
{
"error": {
"message": "You didn't provide an API key...",
"type": "invalid_request_error",
"param": null,
"code": null
}
}""";
webServer.enqueue(new MockResponse().setResponseCode(401).setBody(responseJson));
var e = assertThrows(ElasticsearchStatusException.class, this::streamChatCompletion);
assertThat(
e.getMessage(),
equalTo(
"Received an authentication error status code for request from inference entity id [id] status [401]. "
+ "Error message: [You didn't provide an API key...]"
)
);
}
@SuppressWarnings("checkstyle:LineLength")
public void testGetConfiguration() throws Exception {
try (var service = createService()) {
String content = XContentHelper.stripWhitespace(
"""
{
"service": "azureaistudio",
"name": "Azure AI Studio",
"task_types": ["text_embedding", "rerank", "completion"],
"configurations": {
"dimensions": {
"description": "The number of dimensions the resulting embeddings should have. For more information refer to https://learn.microsoft.com/en-us/azure/ai-studio/reference/reference-model-inference-embeddings.",
"label": "Dimensions",
"required": false,
"sensitive": false,
"updatable": false,
"type": "int",
"supported_task_types": ["text_embedding"]
},
"endpoint_type": {
"description": "Specifies the type of endpoint that is used in your model deployment.",
"label": "Endpoint Type",
"required": true,
"sensitive": false,
"updatable": false,
"type": "str",
"supported_task_types": ["text_embedding", "rerank", "completion"]
},
"provider": {
"description": "The model provider for your deployment.",
"label": "Provider",
"required": true,
"sensitive": false,
"updatable": false,
"type": "str",
"supported_task_types": ["text_embedding", "rerank", "completion"]
},
"api_key": {
"description": "API Key for the provider you're connecting to.",
"label": "API Key",
"required": true,
"sensitive": true,
"updatable": true,
"type": "str",
"supported_task_types": ["text_embedding", "rerank", "completion"]
},
"rate_limit.requests_per_minute": {
"description": "Minimize the number of rate limit errors.",
"label": "Rate Limit",
"required": false,
"sensitive": false,
"updatable": false,
"type": "int",
"supported_task_types": ["text_embedding", "rerank", "completion"]
},
"target": {
"description": "The target URL of your Azure AI Studio model deployment.",
"label": "Target",
"required": true,
"sensitive": false,
"updatable": false,
"type": "str",
"supported_task_types": ["text_embedding", "rerank", "completion"]
}
}
}
"""
);
InferenceServiceConfiguration configuration = InferenceServiceConfiguration.fromXContentBytes(
new BytesArray(content),
XContentType.JSON
);
boolean humanReadable = true;
BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable);
InferenceServiceConfiguration serviceConfiguration = service.getConfiguration();
assertToXContentEquivalent(
originalBytes,
toXContent(serviceConfiguration, XContentType.JSON, humanReadable),
XContentType.JSON
);
}
}
public void testSupportsStreaming() throws IOException {
try (var service = new AzureAiStudioService(mock(), createWithEmptySettings(mock()), mockClusterServiceEmpty())) {
assertThat(service.supportedStreamingTasks(), is(EnumSet.of(TaskType.COMPLETION)));
assertFalse(service.canStream(TaskType.ANY));
}
}
// ----------------------------------------------------------------
private AzureAiStudioService createService() {
return new AzureAiStudioService(
mock(HttpRequestSender.Factory.class),
createWithEmptySettings(threadPool),
mockClusterServiceEmpty()
);
}
@Override
public InferenceService createInferenceService() {
return createService();
}
@Override
protected void assertRerankerWindowSize(RerankingInferenceService rerankingInferenceService) {
assertThat(rerankingInferenceService.rerankerWindowSize("Any model"), is(300));
}
private Map<String, Object> getRequestConfigMap(
Map<String, Object> serviceSettings,
Map<String, Object> taskSettings,
Map<String, Object> chunkingSettings,
Map<String, Object> secretSettings
) {
var requestConfigMap = getRequestConfigMap(serviceSettings, taskSettings, secretSettings);
requestConfigMap.put(ModelConfigurations.CHUNKING_SETTINGS, chunkingSettings);
return requestConfigMap;
}
private Map<String, Object> getRequestConfigMap(
Map<String, Object> serviceSettings,
Map<String, Object> taskSettings,
Map<String, Object> secretSettings
) {
var builtServiceSettings = new HashMap<>();
builtServiceSettings.putAll(serviceSettings);
builtServiceSettings.putAll(secretSettings);
return new HashMap<>(
Map.of(ModelConfigurations.SERVICE_SETTINGS, builtServiceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)
);
}
private static Map<String, Object> getEmbeddingsServiceSettingsMap(
String target,
String provider,
String endpointType,
@Nullable Integer dimensions,
@Nullable Boolean dimensionsSetByUser,
@Nullable Integer maxTokens,
@Nullable SimilarityMeasure similarityMeasure
) {
return AzureAiStudioEmbeddingsServiceSettingsTests.createRequestSettingsMap(
target,
provider,
endpointType,
dimensions,
dimensionsSetByUser,
maxTokens,
similarityMeasure
);
}
private static Map<String, Object> getEmbeddingsTaskSettingsMap(@Nullable String user) {
return AzureAiStudioEmbeddingsTaskSettingsTests.getTaskSettingsMap(user);
}
private static HashMap<String, Object> getChatCompletionServiceSettingsMap(String target, String provider, String endpointType) {
return AzureAiStudioChatCompletionServiceSettingsTests.createRequestSettingsMap(target, provider, endpointType);
}
private static HashMap<String, Object> getRerankServiceSettingsMap(String target, String provider, String endpointType) {
return AzureAiStudioRerankServiceSettingsTests.createRequestSettingsMap(target, provider, endpointType);
}
public static Map<String, Object> getChatCompletionTaskSettingsMap(
@Nullable Double temperature,
@Nullable Double topP,
@Nullable Boolean doSample,
@Nullable Integer maxNewTokens
) {
return AzureAiStudioChatCompletionTaskSettingsTests.getTaskSettingsMap(temperature, topP, doSample, maxNewTokens);
}
public static Map<String, Object> getRerankTaskSettingsMap(@Nullable Boolean returnDocuments, @Nullable Integer topN) {
return AzureAiStudioRerankTaskSettingsTests.getTaskSettingsMap(returnDocuments, topN);
}
private static Map<String, Object> getSecretSettingsMap(String apiKey) {
return new HashMap<>(Map.of(API_KEY_FIELD, apiKey));
}
private static final String testEmbeddingResultJson = """
{
"object": "list",
"data": [
{
"object": "embedding",
"index": 0,
"embedding": [
0.0123,
-0.0123
]
}
],
"model": "text-embedding-ada-002-v2",
"usage": {
"prompt_tokens": 8,
"total_tokens": 8
}
}
""";
private static final String testChatCompletionResultJson = """
{
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": {
"content": "test completion content",
"role": "assistant",
"tool_calls": null
}
}
],
"created": 1714006424,
"id": "f92b5b4d-0de3-4152-a3c6-5aae8a74555c",
"model": "",
"object": "chat.completion",
"usage": {
"completion_tokens": 35,
"prompt_tokens": 8,
"total_tokens": 43
}
}
""";
private static final String testRerankTokenResponseJson = """
{
"id": "ff2feb42-5d3a-45d7-ba29-c3dabf59988b",
"results": [
{
"index": 0,
"relevance_score": 0.1111111
},
{
"index": 1,
"relevance_score": 0.2222222
}
],
"meta": {
"api_version": {
"version": "1"
},
"billed_units": {
"search_units": 1
}
}
}
""";
}
| AzureAiStudioServiceTests |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/RequestLeakDetectionTest.java | {
"start": 11161,
"end": 11505
} | class ____ {
private int value = -1;
public void setValue(int v) {
if (value != -1) {
throw new IllegalStateException("Already initialized");
}
value = v;
}
public int getValue() {
return value;
}
}
public static | MyRequestScopeBean |
java | quarkusio__quarkus | extensions/panache/hibernate-orm-rest-data-panache/deployment/src/test/java/io/quarkus/hibernate/orm/rest/data/panache/deployment/openapi/AbstractItem.java | {
"start": 306,
"end": 654
} | class ____<IdType extends Number> extends AbstractEntity<IdType> {
private String name;
@ManyToOne(optional = false)
@JsonProperty(access = Access.WRITE_ONLY)
private Collection collection;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
| AbstractItem |
java | reactor__reactor-core | reactor-core/src/jcstress/java/reactor/core/publisher/FluxPublishOnStressTest.java | {
"start": 1211,
"end": 1413
} | class ____ {
@JCStressTest
@Outcome(id = {"0, 1", "0, 0"}, expect = ACCEPTABLE, desc = "no errors propagated after cancellation because of disposed worker")
@State
public static | FluxPublishOnStressTest |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java | {
"start": 16568,
"end": 17777
} | class ____'t be used with version " + version);
}
}
private ByteBuffer encode() {
final byte[] endPointBytes = LegacySubscriptionInfoSerde.prepareUserEndPoint(userEndPoint);
final ByteBuffer buf = ByteBuffer.allocate(
4 + // used version
4 + // latest supported version version
16 + // client ID
4 + activeTasks.size() * 8 + // length + active tasks
4 + standbyTasks.size() * 8 + // length + standby tasks
4 + endPointBytes.length + // length + endpoint
4 + //uniqueField
4 //assignment error code
);
buf.putInt(version); // used version
buf.putInt(version); // supported version
LegacySubscriptionInfoSerde.encodeClientUUID(buf, processId);
LegacySubscriptionInfoSerde.encodeTasks(buf, activeTasks, version);
LegacySubscriptionInfoSerde.encodeTasks(buf, standbyTasks, version);
LegacySubscriptionInfoSerde.encodeUserEndPoint(buf, endPointBytes);
buf.rewind();
return buf;
}
}
private static | can |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/results/graph/entity/internal/BatchEntitySelectFetchInitializer.java | {
"start": 951,
"end": 1284
} | class ____ extends AbstractBatchEntitySelectFetchInitializer<BatchEntitySelectFetchInitializer.BatchEntitySelectFetchInitializerData> {
protected final AttributeMapping[] parentAttributes;
protected final Setter referencedModelPartSetter;
protected final Type referencedModelPartType;
public static | BatchEntitySelectFetchInitializer |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/vertx/src/test/java/org/jboss/resteasy/reactive/server/vertx/test/resource/basic/MediaTypeNegotiationClientQualityTest.java | {
"start": 2253,
"end": 3895
} | class ____ {
@GET
public Object nothing() {
return new Object();
}
}
private static Client client;
private static final String DEP = "MediaTypeNegotiationClientQualityTest";
@RegisterExtension
static ResteasyReactiveUnitTest testExtension = new ResteasyReactiveUnitTest()
.setArchiveProducer(new Supplier<>() {
@Override
public JavaArchive get() {
JavaArchive war = ShrinkWrap.create(JavaArchive.class);
war.addClasses(PortProviderUtil.class, CustomMessageBodyWriter1.class, Resource.class);
return war;
}
});
@BeforeAll
public static void setup() {
client = ClientBuilder.newClient();
}
@AfterAll
public static void cleanup() {
client.close();
}
private String generateURL() {
return PortProviderUtil.generateBaseUrl();
}
@Test
@DisplayName("Test Client Quality")
public void testClientQuality() throws Exception {
Invocation.Builder request = client.target(generateURL()).path("echo").request("application/x;q=0.7",
"application/y;q=0.9");
Response response = request.get();
try {
Assertions.assertEquals(Status.OK.getStatusCode(), response.getStatus());
MediaType mediaType = response.getMediaType();
Assertions.assertEquals(mediaType.getType(), "application");
Assertions.assertEquals(mediaType.getSubtype(), "y");
} finally {
response.close();
}
}
}
| Resource |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/AbstractPropertyAccessorTests.java | {
"start": 68301,
"end": 68517
} | class ____ {
private int[] array;
public int[] getArray() {
return array;
}
public void setArray(int[] array) {
this.array = array;
}
}
@SuppressWarnings("unused")
private static | PrimitiveArrayBean |
java | micronaut-projects__micronaut-core | discovery-core/src/main/java/io/micronaut/discovery/cloud/ComputeInstanceMetadataResolver.java | {
"start": 848,
"end": 1172
} | interface ____ {
/**
* Resolves {@link ComputeInstanceMetadata} for the current environment if possible.
*
* @param environment The environment
* @return The {@link ComputeInstanceMetadata}
*/
Optional<ComputeInstanceMetadata> resolve(Environment environment);
}
| ComputeInstanceMetadataResolver |
java | google__auto | value/src/it/functional/src/test/java/com/google/auto/value/AutoValueJava8Test.java | {
"start": 39171,
"end": 39582
} | class ____'t know what the actual type argument is, so it can't know whether
// it is @Nullable. Because of the @Nullable bound, it omits an explicit null check, under the
// assumption that some static-checking framework is validating type uses.
NullableBound<@Nullable String> x = NullableBound.create(null);
assertThat(x.maybeNullable()).isNull();
}
@AutoValue
public abstract static | doesn |
java | apache__kafka | clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java | {
"start": 4495,
"end": 69692
} | class ____ {
protected final int defaultRequestTimeoutMs = 1000;
protected final MockTime time = new MockTime();
protected final MockSelector selector = new MockSelector(time);
protected final Node node = TestUtils.singletonCluster().nodes().iterator().next();
protected final long reconnectBackoffMsTest = 10 * 1000;
protected final long reconnectBackoffMaxMsTest = 10 * 10000;
protected final long connectionSetupTimeoutMsTest = 5 * 1000;
protected final long connectionSetupTimeoutMaxMsTest = 127 * 1000;
private final int reconnectBackoffExpBase = ClusterConnectionStates.RECONNECT_BACKOFF_EXP_BASE;
private final double reconnectBackoffJitter = ClusterConnectionStates.RECONNECT_BACKOFF_JITTER;
private final TestMetadataUpdater metadataUpdater = new TestMetadataUpdater(Collections.singletonList(node));
private final NetworkClient client = createNetworkClient(reconnectBackoffMaxMsTest);
private final NetworkClient clientWithNoExponentialBackoff = createNetworkClient(reconnectBackoffMsTest);
private final NetworkClient clientWithStaticNodes = createNetworkClientWithStaticNodes();
private final NetworkClient clientWithNoVersionDiscovery = createNetworkClientWithNoVersionDiscovery();
private static ArrayList<InetAddress> initialAddresses;
private static ArrayList<InetAddress> newAddresses;
static {
try {
initialAddresses = new ArrayList<>(Arrays.asList(
InetAddress.getByName("10.200.20.100"),
InetAddress.getByName("10.200.20.101"),
InetAddress.getByName("10.200.20.102")
));
newAddresses = new ArrayList<>(Arrays.asList(
InetAddress.getByName("10.200.20.103"),
InetAddress.getByName("10.200.20.104"),
InetAddress.getByName("10.200.20.105")
));
} catch (UnknownHostException e) {
fail("Attempted to create an invalid InetAddress, this should not happen");
}
}
private NetworkClient createNetworkClient(long reconnectBackoffMaxMs) {
return new NetworkClient(selector, metadataUpdater, "mock", Integer.MAX_VALUE,
reconnectBackoffMsTest, reconnectBackoffMaxMs, 64 * 1024, 64 * 1024,
defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, time, true, new ApiVersions(), new LogContext(),
MetadataRecoveryStrategy.NONE);
}
private NetworkClient createNetworkClientWithMaxInFlightRequestsPerConnection(
int maxInFlightRequestsPerConnection, long reconnectBackoffMaxMs) {
return new NetworkClient(selector, metadataUpdater, "mock", maxInFlightRequestsPerConnection,
reconnectBackoffMsTest, reconnectBackoffMaxMs, 64 * 1024, 64 * 1024,
defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, time, true, new ApiVersions(), new LogContext(),
MetadataRecoveryStrategy.NONE);
}
private NetworkClient createNetworkClientWithMultipleNodes(long reconnectBackoffMaxMs, long connectionSetupTimeoutMsTest, int nodeNumber) {
List<Node> nodes = TestUtils.clusterWith(nodeNumber).nodes();
TestMetadataUpdater metadataUpdater = new TestMetadataUpdater(nodes);
return new NetworkClient(selector, metadataUpdater, "mock", Integer.MAX_VALUE,
reconnectBackoffMsTest, reconnectBackoffMaxMs, 64 * 1024, 64 * 1024,
defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, time, true, new ApiVersions(), new LogContext(),
MetadataRecoveryStrategy.NONE);
}
private NetworkClient createNetworkClientWithStaticNodes() {
return new NetworkClient(selector, metadataUpdater,
"mock-static", Integer.MAX_VALUE, 0, 0, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs,
connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, time, true, new ApiVersions(), new LogContext(),
MetadataRecoveryStrategy.NONE);
}
private NetworkClient createNetworkClientWithNoVersionDiscovery(Metadata metadata) {
return new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE,
reconnectBackoffMsTest, 0, 64 * 1024, 64 * 1024,
defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, time, false, new ApiVersions(), new LogContext(),
MetadataRecoveryStrategy.NONE);
}
private NetworkClient createNetworkClientWithNoVersionDiscovery() {
return new NetworkClient(selector, metadataUpdater, "mock", Integer.MAX_VALUE,
reconnectBackoffMsTest, reconnectBackoffMaxMsTest,
64 * 1024, 64 * 1024, defaultRequestTimeoutMs,
connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, time, false, new ApiVersions(), new LogContext(),
MetadataRecoveryStrategy.NONE);
}
@BeforeEach
public void setup() {
selector.reset();
}
@Test
public void testSendToUnreadyNode() {
MetadataRequest.Builder builder = new MetadataRequest.Builder(Collections.singletonList("test"), true);
long now = time.milliseconds();
ClientRequest request = client.newClientRequest("5", builder, now, false);
assertThrows(IllegalStateException.class, () -> client.send(request, now));
}
@Test
public void testSimpleRequestResponse() {
checkSimpleRequestResponse(client);
}
@Test
public void testSimpleRequestResponseWithStaticNodes() {
checkSimpleRequestResponse(clientWithStaticNodes);
}
@Test
public void testSimpleRequestResponseWithNoBrokerDiscovery() {
checkSimpleRequestResponse(clientWithNoVersionDiscovery);
}
@Test
public void testDnsLookupFailure() {
/* Fail cleanly when the node has a bad hostname */
assertFalse(client.ready(new Node(1234, "badhost", 1234), time.milliseconds()));
}
@Test
public void testClose() {
client.ready(node, time.milliseconds());
awaitReady(client, node);
client.poll(1, time.milliseconds());
assertTrue(client.isReady(node, time.milliseconds()), "The client should be ready");
ProduceRequest.Builder builder = ProduceRequest.builder(new ProduceRequestData()
.setTopicData(new ProduceRequestData.TopicProduceDataCollection())
.setAcks((short) 1)
.setTimeoutMs(1000));
ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true);
client.send(request, time.milliseconds());
assertEquals(1, client.inFlightRequestCount(node.idString()),
"There should be 1 in-flight request after send");
assertTrue(client.hasInFlightRequests(node.idString()));
assertTrue(client.hasInFlightRequests());
client.close(node.idString());
assertEquals(0, client.inFlightRequestCount(node.idString()), "There should be no in-flight request after close");
assertFalse(client.hasInFlightRequests(node.idString()));
assertFalse(client.hasInFlightRequests());
assertFalse(client.isReady(node, 0), "Connection should not be ready after close");
}
@Test
public void testUnsupportedVersionDuringInternalMetadataRequest() {
List<String> topics = Collections.singletonList("topic_1");
// disabling auto topic creation for versions less than 4 is not supported
MetadataRequest.Builder builder = new MetadataRequest.Builder(topics, false, (short) 3);
client.sendInternalMetadataRequest(builder, node.idString(), time.milliseconds());
assertEquals(UnsupportedVersionException.class, metadataUpdater.getAndClearFailure().getClass());
}
@Test
public void testRebootstrap() {
long rebootstrapTriggerMs = 1000;
AtomicInteger rebootstrapCount = new AtomicInteger();
Metadata metadata = new Metadata(50, 50, 5000, new LogContext(), new ClusterResourceListeners()) {
@Override
public synchronized void rebootstrap() {
super.rebootstrap();
rebootstrapCount.incrementAndGet();
}
};
NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE,
reconnectBackoffMsTest, 0, 64 * 1024, 64 * 1024,
defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, time, false, new ApiVersions(), new LogContext(),
rebootstrapTriggerMs,
MetadataRecoveryStrategy.REBOOTSTRAP);
MetadataUpdater metadataUpdater = TestUtils.fieldValue(client, NetworkClient.class, "metadataUpdater");
metadata.bootstrap(Collections.singletonList(new InetSocketAddress("localhost", 9999)));
metadata.requestUpdate(true);
client.poll(0, time.milliseconds());
time.sleep(rebootstrapTriggerMs + 1);
client.poll(0, time.milliseconds());
assertEquals(1, rebootstrapCount.get());
time.sleep(1);
client.poll(0, time.milliseconds());
assertEquals(1, rebootstrapCount.get());
metadata.requestUpdate(true);
client.poll(0, time.milliseconds());
assertEquals(1, rebootstrapCount.get());
metadataUpdater.handleFailedRequest(time.milliseconds(), Optional.of(new KafkaException()));
client.poll(0, time.milliseconds());
assertEquals(1, rebootstrapCount.get());
time.sleep(rebootstrapTriggerMs);
client.poll(0, time.milliseconds());
assertEquals(2, rebootstrapCount.get());
metadata.requestUpdate(true);
client.poll(0, time.milliseconds());
assertEquals(2, rebootstrapCount.get());
MetadataRequest.Builder builder = new MetadataRequest.Builder(Collections.emptyList(), true);
ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true);
MetadataResponse rebootstrapResponse = (MetadataResponse) builder.build().getErrorResponse(0, new RebootstrapRequiredException("rebootstrap"));
metadataUpdater.handleSuccessfulResponse(request.makeHeader(builder.latestAllowedVersion()), time.milliseconds(), rebootstrapResponse);
assertEquals(2, rebootstrapCount.get());
time.sleep(50);
client.poll(0, time.milliseconds());
assertEquals(3, rebootstrapCount.get());
}
@Test
public void testInflightRequestsDuringRebootstrap() {
long refreshBackoffMs = 50;
long rebootstrapTriggerMs = 1000;
int defaultRequestTimeoutMs = 5000;
AtomicInteger rebootstrapCount = new AtomicInteger();
Metadata metadata = new Metadata(refreshBackoffMs, refreshBackoffMs, 5000, new LogContext(), new ClusterResourceListeners()) {
@Override
public synchronized void rebootstrap() {
super.rebootstrap();
rebootstrapCount.incrementAndGet();
}
};
metadata.bootstrap(Collections.singletonList(new InetSocketAddress("localhost", 9999)));
NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE,
reconnectBackoffMsTest, 0, 64 * 1024, 64 * 1024,
defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, time, false, new ApiVersions(), new LogContext(),
rebootstrapTriggerMs, MetadataRecoveryStrategy.REBOOTSTRAP);
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(2, Collections.emptyMap());
metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
List<Node> nodes = metadata.fetch().nodes();
nodes.forEach(node -> {
client.ready(node, time.milliseconds());
awaitReady(client, node);
});
// Queue a request
sendEmptyProduceRequest(client, nodes.get(0).idString());
List<ClientResponse> responses = client.poll(0, time.milliseconds());
assertEquals(0, responses.size());
assertEquals(1, client.inFlightRequestCount());
// Trigger rebootstrap
metadata.requestUpdate(true);
time.sleep(refreshBackoffMs);
responses = client.poll(0, time.milliseconds());
assertEquals(0, responses.size());
assertEquals(2, client.inFlightRequestCount());
time.sleep(rebootstrapTriggerMs + 1);
responses = client.poll(0, time.milliseconds());
// Verify that inflight produce request was aborted with disconnection
assertEquals(1, responses.size());
assertEquals(PRODUCE, responses.get(0).requestHeader().apiKey());
assertTrue(responses.get(0).wasDisconnected());
assertEquals(0, client.inFlightRequestCount());
assertEquals(Collections.emptySet(), nodes.stream().filter(node -> !client.connectionFailed(node)).collect(Collectors.toSet()));
}
private void checkSimpleRequestResponse(NetworkClient networkClient) {
awaitReady(networkClient, node); // has to be before creating any request, as it may send ApiVersionsRequest and its response is mocked with correlation id 0
short requestVersion = PRODUCE.latestVersion();
ProduceRequest.Builder builder = new ProduceRequest.Builder(
requestVersion,
requestVersion,
new ProduceRequestData()
.setAcks((short) 1)
.setTimeoutMs(1000));
TestCallbackHandler handler = new TestCallbackHandler();
ClientRequest request = networkClient.newClientRequest(node.idString(), builder, time.milliseconds(),
true, defaultRequestTimeoutMs, handler);
networkClient.send(request, time.milliseconds());
networkClient.poll(1, time.milliseconds());
assertEquals(1, networkClient.inFlightRequestCount());
ProduceResponse produceResponse = new ProduceResponse(new ProduceResponseData());
ByteBuffer buffer = RequestTestUtils.serializeResponseWithHeader(produceResponse, requestVersion, request.correlationId());
selector.completeReceive(new NetworkReceive(node.idString(), buffer));
List<ClientResponse> responses = networkClient.poll(1, time.milliseconds());
assertEquals(1, responses.size());
assertTrue(handler.executed, "The handler should have executed.");
assertTrue(handler.response.hasResponse(), "Should have a response body.");
assertEquals(request.correlationId(), handler.response.requestHeader().correlationId(),
"Should be correlated to the original request");
}
private void delayedApiVersionsResponse(int correlationId, short version, ApiVersionsResponse response) {
ByteBuffer buffer = RequestTestUtils.serializeResponseWithHeader(response, version, correlationId);
selector.delayedReceive(new DelayedReceive(node.idString(), new NetworkReceive(node.idString(), buffer)));
}
private void setExpectedApiVersionsResponse(ApiVersionsResponse response) {
short apiVersionsResponseVersion = response.apiVersion(ApiKeys.API_VERSIONS.id).maxVersion();
delayedApiVersionsResponse(0, apiVersionsResponseVersion, response);
}
private void awaitReady(NetworkClient client, Node node) {
if (client.discoverBrokerVersions()) {
setExpectedApiVersionsResponse(TestUtils.defaultApiVersionsResponse(
ApiMessageType.ListenerType.BROKER));
}
while (!client.ready(node, time.milliseconds()))
client.poll(1, time.milliseconds());
selector.clear();
}
@Test
public void testInvalidApiVersionsRequest() {
// initiate the connection
client.ready(node, time.milliseconds());
// handle the connection, send the ApiVersionsRequest
client.poll(0, time.milliseconds());
// check that the ApiVersionsRequest has been initiated
assertTrue(client.hasInFlightRequests(node.idString()));
// prepare response
delayedApiVersionsResponse(0, ApiKeys.API_VERSIONS.latestVersion(),
new ApiVersionsResponse(
new ApiVersionsResponseData()
.setErrorCode(Errors.INVALID_REQUEST.code())
.setThrottleTimeMs(0)
));
// handle completed receives
client.poll(0, time.milliseconds());
// the ApiVersionsRequest is gone
assertFalse(client.hasInFlightRequests(node.idString()));
// various assertions
assertFalse(client.isReady(node, time.milliseconds()));
}
@Test
public void testApiVersionsRequest() {
// initiate the connection
client.ready(node, time.milliseconds());
// handle the connection, send the ApiVersionsRequest
client.poll(0, time.milliseconds());
// check that the ApiVersionsRequest has been initiated
assertTrue(client.hasInFlightRequests(node.idString()));
// prepare response
delayedApiVersionsResponse(0, ApiKeys.API_VERSIONS.latestVersion(), defaultApiVersionsResponse());
// handle completed receives
client.poll(0, time.milliseconds());
// the ApiVersionsRequest is gone
assertFalse(client.hasInFlightRequests(node.idString()));
// various assertions
assertTrue(client.isReady(node, time.milliseconds()));
}
@Test
public void testUnsupportedApiVersionsRequestWithVersionProvidedByTheBroker() {
// initiate the connection
client.ready(node, time.milliseconds());
// handle the connection, initiate first ApiVersionsRequest
client.poll(0, time.milliseconds());
// ApiVersionsRequest is in flight but not sent yet
assertTrue(client.hasInFlightRequests(node.idString()));
// completes initiated sends
client.poll(0, time.milliseconds());
assertEquals(1, selector.completedSends().size());
ByteBuffer buffer = selector.completedSendBuffers().get(0).buffer();
RequestHeader header = parseHeader(buffer);
assertEquals(ApiKeys.API_VERSIONS, header.apiKey());
assertEquals(4, header.apiVersion());
// prepare response
ApiVersionCollection apiKeys = new ApiVersionCollection();
apiKeys.add(new ApiVersion()
.setApiKey(ApiKeys.API_VERSIONS.id)
.setMinVersion((short) 0)
.setMaxVersion((short) 2));
delayedApiVersionsResponse(0, (short) 0,
new ApiVersionsResponse(
new ApiVersionsResponseData()
.setErrorCode(Errors.UNSUPPORTED_VERSION.code())
.setApiKeys(apiKeys)
));
// handle ApiVersionResponse, initiate second ApiVersionRequest
client.poll(0, time.milliseconds());
// ApiVersionsRequest is in flight but not sent yet
assertTrue(client.hasInFlightRequests(node.idString()));
// ApiVersionsResponse has been received
assertEquals(1, selector.completedReceives().size());
// clean up the buffers
selector.completedSends().clear();
selector.completedSendBuffers().clear();
selector.completedReceives().clear();
// completes initiated sends
client.poll(0, time.milliseconds());
// ApiVersionsRequest has been sent
assertEquals(1, selector.completedSends().size());
buffer = selector.completedSendBuffers().get(0).buffer();
header = parseHeader(buffer);
assertEquals(ApiKeys.API_VERSIONS, header.apiKey());
assertEquals(2, header.apiVersion());
// prepare response
delayedApiVersionsResponse(1, (short) 0, defaultApiVersionsResponse());
// handle completed receives
client.poll(0, time.milliseconds());
// the ApiVersionsRequest is gone
assertFalse(client.hasInFlightRequests(node.idString()));
assertEquals(1, selector.completedReceives().size());
// the client is ready
assertTrue(client.isReady(node, time.milliseconds()));
}
@Test
public void testUnsupportedApiVersionsRequestWithoutVersionProvidedByTheBroker() {
// initiate the connection
client.ready(node, time.milliseconds());
// handle the connection, initiate first ApiVersionsRequest
client.poll(0, time.milliseconds());
// ApiVersionsRequest is in flight but not sent yet
assertTrue(client.hasInFlightRequests(node.idString()));
// completes initiated sends
client.poll(0, time.milliseconds());
assertEquals(1, selector.completedSends().size());
ByteBuffer buffer = selector.completedSendBuffers().get(0).buffer();
RequestHeader header = parseHeader(buffer);
assertEquals(ApiKeys.API_VERSIONS, header.apiKey());
assertEquals(4, header.apiVersion());
// prepare response
delayedApiVersionsResponse(0, (short) 0,
new ApiVersionsResponse(
new ApiVersionsResponseData()
.setErrorCode(Errors.UNSUPPORTED_VERSION.code())
));
// handle ApiVersionResponse, initiate second ApiVersionRequest
client.poll(0, time.milliseconds());
// ApiVersionsRequest is in flight but not sent yet
assertTrue(client.hasInFlightRequests(node.idString()));
// ApiVersionsResponse has been received
assertEquals(1, selector.completedReceives().size());
// clean up the buffers
selector.completedSends().clear();
selector.completedSendBuffers().clear();
selector.completedReceives().clear();
// completes initiated sends
client.poll(0, time.milliseconds());
// ApiVersionsRequest has been sent
assertEquals(1, selector.completedSends().size());
buffer = selector.completedSendBuffers().get(0).buffer();
header = parseHeader(buffer);
assertEquals(ApiKeys.API_VERSIONS, header.apiKey());
assertEquals(0, header.apiVersion());
// prepare response
delayedApiVersionsResponse(1, (short) 0, defaultApiVersionsResponse());
// handle completed receives
client.poll(0, time.milliseconds());
// the ApiVersionsRequest is gone
assertFalse(client.hasInFlightRequests(node.idString()));
assertEquals(1, selector.completedReceives().size());
// the client is ready
assertTrue(client.isReady(node, time.milliseconds()));
}
@Test
public void testRequestTimeout() {
testRequestTimeout(defaultRequestTimeoutMs + 5000);
}
@Test
public void testDefaultRequestTimeout() {
testRequestTimeout(defaultRequestTimeoutMs);
}
/**
* This is a helper method that will execute two produce calls. The first call is expected to work and the
* second produce call is intentionally made to emulate a request timeout. In the case that a timeout occurs
* during a request, we want to ensure that we {@link Metadata#requestUpdate(boolean) request a metadata update} so that
* on a subsequent invocation of {@link NetworkClient#poll(long, long) poll}, the metadata request will be sent.
*
* <p/>
*
* The {@link MetadataUpdater} has a specific method to handle
* {@link NetworkClient.DefaultMetadataUpdater#handleServerDisconnect(long, String, Optional) server disconnects}
* which is where we {@link Metadata#requestUpdate(boolean) request a metadata update}. This test helper method ensures
* that is invoked by checking {@link Metadata#updateRequested()} after the simulated timeout.
*
* @param requestTimeoutMs Timeout in ms
*/
private void testRequestTimeout(int requestTimeoutMs) {
Metadata metadata = new Metadata(50, 50, 5000, new LogContext(), new ClusterResourceListeners());
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(2, Collections.emptyMap());
metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
NetworkClient client = createNetworkClientWithNoVersionDiscovery(metadata);
// Send first produce without any timeout.
ClientResponse clientResponse = produce(client, requestTimeoutMs, false);
assertEquals(node.idString(), clientResponse.destination());
assertFalse(clientResponse.wasDisconnected(), "Expected response to succeed and not disconnect");
assertFalse(clientResponse.wasTimedOut(), "Expected response to succeed and not time out");
assertFalse(metadata.updateRequested(), "Expected NetworkClient to not need to update metadata");
// Send second request, but emulate a timeout.
clientResponse = produce(client, requestTimeoutMs, true);
assertEquals(node.idString(), clientResponse.destination());
assertTrue(clientResponse.wasDisconnected(), "Expected response to fail due to disconnection");
assertTrue(clientResponse.wasTimedOut(), "Expected response to fail due to timeout");
assertTrue(metadata.updateRequested(), "Expected NetworkClient to have called requestUpdate on metadata on timeout");
}
private ClientResponse produce(NetworkClient client, int requestTimeoutMs, boolean shouldEmulateTimeout) {
awaitReady(client, node); // has to be before creating any request, as it may send ApiVersionsRequest and its response is mocked with correlation id 0
ProduceRequest.Builder builder = ProduceRequest.builder(new ProduceRequestData()
.setTopicData(new ProduceRequestData.TopicProduceDataCollection())
.setAcks((short) 1)
.setTimeoutMs(1000));
TestCallbackHandler handler = new TestCallbackHandler();
ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true,
requestTimeoutMs, handler);
client.send(request, time.milliseconds());
if (shouldEmulateTimeout) {
// For a delay of slightly more than our timeout threshold to emulate the request timing out.
time.sleep(requestTimeoutMs + 1);
} else {
ProduceResponse produceResponse = new ProduceResponse(new ProduceResponseData());
ByteBuffer buffer = RequestTestUtils.serializeResponseWithHeader(produceResponse, PRODUCE.latestVersion(), request.correlationId());
selector.completeReceive(new NetworkReceive(node.idString(), buffer));
}
List<ClientResponse> responses = client.poll(0, time.milliseconds());
assertEquals(1, responses.size());
return responses.get(0);
}
@Test
public void testConnectionSetupTimeout() {
// Use two nodes to ensure that the logic iterate over a set of more than one
// element. ConcurrentModificationException is not triggered otherwise.
final Cluster cluster = TestUtils.clusterWith(2);
final Node node0 = cluster.nodeById(0);
final Node node1 = cluster.nodeById(1);
client.ready(node0, time.milliseconds());
selector.serverConnectionBlocked(node0.idString());
client.ready(node1, time.milliseconds());
selector.serverConnectionBlocked(node1.idString());
client.poll(0, time.milliseconds());
assertFalse(client.connectionFailed(node),
"The connections should not fail before the socket connection setup timeout elapsed");
time.sleep((long) (connectionSetupTimeoutMsTest * 1.2) + 1);
client.poll(0, time.milliseconds());
assertTrue(client.connectionFailed(node),
"Expected the connections to fail due to the socket connection setup timeout");
}
@Test
public void testConnectionTimeoutAfterThrottling() {
awaitReady(client, node);
short requestVersion = PRODUCE.latestVersion();
int timeoutMs = 1000;
ProduceRequest.Builder builder = new ProduceRequest.Builder(
requestVersion,
requestVersion,
new ProduceRequestData()
.setAcks((short) 1)
.setTimeoutMs(timeoutMs));
TestCallbackHandler handler = new TestCallbackHandler();
ClientRequest r1 = client.newClientRequest(node.idString(), builder, time.milliseconds(), true,
defaultRequestTimeoutMs, handler);
client.send(r1, time.milliseconds());
client.poll(0, time.milliseconds());
// Throttle long enough to ensure other inFlight requests timeout.
ProduceResponse pr = new ProduceResponse(new ProduceResponseData().setThrottleTimeMs(timeoutMs));
ByteBuffer buffer = RequestTestUtils.serializeResponseWithHeader(pr, requestVersion, r1.correlationId());
selector.delayedReceive(new DelayedReceive(node.idString(), new NetworkReceive(node.idString(), buffer)));
ClientRequest r2 = client.newClientRequest(node.idString(), builder, time.milliseconds(), true,
defaultRequestTimeoutMs, handler);
client.send(r2, time.milliseconds());
time.sleep(timeoutMs);
client.poll(0, time.milliseconds());
assertEquals(1, client.inFlightRequestCount(node.idString()));
assertFalse(client.connectionFailed(node), "Connection should not have failed due to the extra time spent throttling.");
}
@Test
public void testConnectionThrottling() {
// Instrument the test to return a response with a 100ms throttle delay.
awaitReady(client, node);
short requestVersion = PRODUCE.latestVersion();
ProduceRequest.Builder builder = new ProduceRequest.Builder(
requestVersion,
requestVersion,
new ProduceRequestData()
.setAcks((short) 1)
.setTimeoutMs(1000));
TestCallbackHandler handler = new TestCallbackHandler();
ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true,
defaultRequestTimeoutMs, handler);
client.send(request, time.milliseconds());
client.poll(1, time.milliseconds());
int throttleTime = 100;
ProduceResponse produceResponse = new ProduceResponse(new ProduceResponseData().setThrottleTimeMs(throttleTime));
ByteBuffer buffer = RequestTestUtils.serializeResponseWithHeader(produceResponse, requestVersion, request.correlationId());
selector.completeReceive(new NetworkReceive(node.idString(), buffer));
client.poll(1, time.milliseconds());
// The connection is not ready due to throttling.
assertFalse(client.ready(node, time.milliseconds()));
assertEquals(100, client.throttleDelayMs(node, time.milliseconds()));
// After 50ms, the connection is not ready yet.
time.sleep(50);
assertFalse(client.ready(node, time.milliseconds()));
assertEquals(50, client.throttleDelayMs(node, time.milliseconds()));
// After another 50ms, the throttling is done and the connection becomes ready again.
time.sleep(50);
assertTrue(client.ready(node, time.milliseconds()));
assertEquals(0, client.throttleDelayMs(node, time.milliseconds()));
}
private int sendEmptyProduceRequest() {
return sendEmptyProduceRequest(client, node.idString());
}
private int sendEmptyProduceRequest(NetworkClient client, String nodeId) {
ProduceRequest.Builder builder = ProduceRequest.builder(new ProduceRequestData()
.setTopicData(new ProduceRequestData.TopicProduceDataCollection())
.setAcks((short) 1)
.setTimeoutMs(1000));
TestCallbackHandler handler = new TestCallbackHandler();
ClientRequest request = client.newClientRequest(nodeId, builder, time.milliseconds(), true,
defaultRequestTimeoutMs, handler);
client.send(request, time.milliseconds());
return request.correlationId();
}
private void sendResponse(AbstractResponse response, short version, int correlationId) {
ByteBuffer buffer = RequestTestUtils.serializeResponseWithHeader(response, version, correlationId);
selector.completeReceive(new NetworkReceive(node.idString(), buffer));
}
private void sendThrottledProduceResponse(int correlationId, int throttleMs, short version) {
ProduceResponse response = new ProduceResponse(new ProduceResponseData().setThrottleTimeMs(throttleMs));
sendResponse(response, version, correlationId);
}
@Test
public void testLeastLoadedNode() {
client.ready(node, time.milliseconds());
assertFalse(client.isReady(node, time.milliseconds()));
LeastLoadedNode leastLoadedNode = client.leastLoadedNode(time.milliseconds());
assertEquals(node, leastLoadedNode.node());
assertTrue(leastLoadedNode.hasNodeAvailableOrConnectionReady());
awaitReady(client, node);
client.poll(1, time.milliseconds());
assertTrue(client.isReady(node, time.milliseconds()), "The client should be ready");
// leastloadednode should be our single node
leastLoadedNode = client.leastLoadedNode(time.milliseconds());
assertTrue(leastLoadedNode.hasNodeAvailableOrConnectionReady());
Node leastNode = leastLoadedNode.node();
assertEquals(leastNode.id(), node.id(), "There should be one leastloadednode");
// sleep for longer than reconnect backoff
time.sleep(reconnectBackoffMsTest);
// CLOSE node
selector.serverDisconnect(node.idString());
client.poll(1, time.milliseconds());
assertFalse(client.ready(node, time.milliseconds()), "After we forced the disconnection the client is no longer ready.");
leastLoadedNode = client.leastLoadedNode(time.milliseconds());
assertFalse(leastLoadedNode.hasNodeAvailableOrConnectionReady());
assertNull(leastLoadedNode.node(), "There should be NO leastloadednode");
}
@Test
public void testHasNodeAvailableOrConnectionReady() {
NetworkClient client = createNetworkClientWithMaxInFlightRequestsPerConnection(1, reconnectBackoffMaxMsTest);
awaitReady(client, node);
long now = time.milliseconds();
LeastLoadedNode leastLoadedNode = client.leastLoadedNode(now);
assertEquals(node, leastLoadedNode.node());
assertTrue(leastLoadedNode.hasNodeAvailableOrConnectionReady());
MetadataRequest.Builder builder = new MetadataRequest.Builder(Collections.emptyList(), true);
ClientRequest request = client.newClientRequest(node.idString(), builder, now, true);
client.send(request, now);
client.poll(defaultRequestTimeoutMs, now);
leastLoadedNode = client.leastLoadedNode(now);
assertNull(leastLoadedNode.node());
assertTrue(leastLoadedNode.hasNodeAvailableOrConnectionReady());
}
@Test
public void testLeastLoadedNodeProvideDisconnectedNodesPrioritizedByLastConnectionTimestamp() {
int nodeNumber = 3;
NetworkClient client = createNetworkClientWithMultipleNodes(0, connectionSetupTimeoutMsTest, nodeNumber);
Set<Node> providedNodeIds = new HashSet<>();
for (int i = 0; i < nodeNumber * 10; i++) {
Node node = client.leastLoadedNode(time.milliseconds()).node();
assertNotNull(node, "Should provide a node");
providedNodeIds.add(node);
client.ready(node, time.milliseconds());
client.disconnect(node.idString());
time.sleep(connectionSetupTimeoutMsTest + 1);
client.poll(0, time.milliseconds());
// Define a round as nodeNumber of nodes have been provided
// In each round every node should be provided exactly once
if ((i + 1) % nodeNumber == 0) {
assertEquals(nodeNumber, providedNodeIds.size(), "All the nodes should be provided");
providedNodeIds.clear();
}
}
}
@Test
public void testAuthenticationFailureWithInFlightMetadataRequest() {
int refreshBackoffMs = 50;
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(2, Collections.emptyMap());
Metadata metadata = new Metadata(refreshBackoffMs, refreshBackoffMs, 5000, new LogContext(), new ClusterResourceListeners());
metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
Cluster cluster = metadata.fetch();
Node node1 = cluster.nodes().get(0);
Node node2 = cluster.nodes().get(1);
NetworkClient client = createNetworkClientWithNoVersionDiscovery(metadata);
awaitReady(client, node1);
metadata.requestUpdate(true);
time.sleep(refreshBackoffMs);
client.poll(0, time.milliseconds());
Optional<Node> nodeWithPendingMetadataOpt = cluster.nodes().stream()
.filter(node -> client.hasInFlightRequests(node.idString()))
.findFirst();
assertEquals(Optional.of(node1), nodeWithPendingMetadataOpt);
assertFalse(client.ready(node2, time.milliseconds()));
selector.serverAuthenticationFailed(node2.idString());
client.poll(0, time.milliseconds());
assertNotNull(client.authenticationException(node2));
ByteBuffer requestBuffer = selector.completedSendBuffers().get(0).buffer();
RequestHeader header = parseHeader(requestBuffer);
assertEquals(ApiKeys.METADATA, header.apiKey());
ByteBuffer responseBuffer = RequestTestUtils.serializeResponseWithHeader(metadataResponse, header.apiVersion(), header.correlationId());
selector.delayedReceive(new DelayedReceive(node1.idString(), new NetworkReceive(node1.idString(), responseBuffer)));
int initialUpdateVersion = metadata.updateVersion();
client.poll(0, time.milliseconds());
assertEquals(initialUpdateVersion + 1, metadata.updateVersion());
}
@Test
public void testLeastLoadedNodeConsidersThrottledConnections() {
client.ready(node, time.milliseconds());
awaitReady(client, node);
client.poll(1, time.milliseconds());
assertTrue(client.isReady(node, time.milliseconds()), "The client should be ready");
int correlationId = sendEmptyProduceRequest();
client.poll(1, time.milliseconds());
sendThrottledProduceResponse(correlationId, 100, PRODUCE.latestVersion());
client.poll(1, time.milliseconds());
// leastloadednode should return null since the node is throttled
assertNull(client.leastLoadedNode(time.milliseconds()).node());
}
@Test
public void testConnectionDelayWithNoExponentialBackoff() {
long now = time.milliseconds();
long delay = clientWithNoExponentialBackoff.connectionDelay(node, now);
assertEquals(0, delay);
}
@Test
public void testConnectionDelayConnectedWithNoExponentialBackoff() {
awaitReady(clientWithNoExponentialBackoff, node);
long now = time.milliseconds();
long delay = clientWithNoExponentialBackoff.connectionDelay(node, now);
assertEquals(Long.MAX_VALUE, delay);
}
@Test
public void testConnectionDelayDisconnectedWithNoExponentialBackoff() {
awaitReady(clientWithNoExponentialBackoff, node);
selector.serverDisconnect(node.idString());
clientWithNoExponentialBackoff.poll(defaultRequestTimeoutMs, time.milliseconds());
long delay = clientWithNoExponentialBackoff.connectionDelay(node, time.milliseconds());
assertEquals(reconnectBackoffMsTest, delay);
// Sleep until there is no connection delay
time.sleep(delay);
assertEquals(0, clientWithNoExponentialBackoff.connectionDelay(node, time.milliseconds()));
// Start connecting and disconnect before the connection is established
client.ready(node, time.milliseconds());
selector.serverDisconnect(node.idString());
client.poll(defaultRequestTimeoutMs, time.milliseconds());
// Second attempt should have the same behaviour as exponential backoff is disabled
assertEquals(reconnectBackoffMsTest, delay);
}
@Test
public void testConnectionDelay() {
long now = time.milliseconds();
long delay = client.connectionDelay(node, now);
assertEquals(0, delay);
}
@Test
public void testConnectionDelayConnected() {
awaitReady(client, node);
long now = time.milliseconds();
long delay = client.connectionDelay(node, now);
assertEquals(Long.MAX_VALUE, delay);
}
@Test
public void testConnectionDelayDisconnected() {
awaitReady(client, node);
// First disconnection
selector.serverDisconnect(node.idString());
client.poll(defaultRequestTimeoutMs, time.milliseconds());
long delay = client.connectionDelay(node, time.milliseconds());
long expectedDelay = reconnectBackoffMsTest;
double jitter = 0.3;
assertEquals(expectedDelay, delay, expectedDelay * jitter);
// Sleep until there is no connection delay
time.sleep(delay);
assertEquals(0, client.connectionDelay(node, time.milliseconds()));
// Start connecting and disconnect before the connection is established
client.ready(node, time.milliseconds());
selector.serverDisconnect(node.idString());
client.poll(defaultRequestTimeoutMs, time.milliseconds());
// Second attempt should take twice as long with twice the jitter
expectedDelay = Math.round(delay * 2);
delay = client.connectionDelay(node, time.milliseconds());
jitter = 0.6;
assertEquals(expectedDelay, delay, expectedDelay * jitter);
}
@Test
public void testDisconnectDuringUserMetadataRequest() {
// this test ensures that the default metadata updater does not intercept a user-initiated
// metadata request when the remote node disconnects with the request in-flight.
awaitReady(client, node);
MetadataRequest.Builder builder = new MetadataRequest.Builder(Collections.emptyList(), true);
long now = time.milliseconds();
ClientRequest request = client.newClientRequest(node.idString(), builder, now, true);
client.send(request, now);
client.poll(defaultRequestTimeoutMs, now);
assertEquals(1, client.inFlightRequestCount(node.idString()));
assertTrue(client.hasInFlightRequests(node.idString()));
assertTrue(client.hasInFlightRequests());
selector.close(node.idString());
List<ClientResponse> responses = client.poll(defaultRequestTimeoutMs, time.milliseconds());
assertEquals(1, responses.size());
assertTrue(responses.iterator().next().wasDisconnected());
}
@Test
public void testServerDisconnectAfterInternalApiVersionRequest() throws Exception {
final long numIterations = 5;
double reconnectBackoffMaxExp = Math.log(reconnectBackoffMaxMsTest / (double) Math.max(reconnectBackoffMsTest, 1))
/ Math.log(reconnectBackoffExpBase);
for (int i = 0; i < numIterations; i++) {
selector.clear();
awaitInFlightApiVersionRequest();
selector.serverDisconnect(node.idString());
// The failed ApiVersion request should not be forwarded to upper layers
List<ClientResponse> responses = client.poll(0, time.milliseconds());
assertFalse(client.hasInFlightRequests(node.idString()));
assertTrue(responses.isEmpty());
long expectedBackoff = Math.round(Math.pow(reconnectBackoffExpBase, Math.min(i, reconnectBackoffMaxExp))
* reconnectBackoffMsTest);
long delay = client.connectionDelay(node, time.milliseconds());
assertEquals(expectedBackoff, delay, reconnectBackoffJitter * expectedBackoff);
if (i == numIterations - 1) {
break;
}
time.sleep(delay + 1);
}
}
@Test
public void testClientDisconnectAfterInternalApiVersionRequest() throws Exception {
awaitInFlightApiVersionRequest();
client.disconnect(node.idString());
assertFalse(client.hasInFlightRequests(node.idString()));
// The failed ApiVersion request should not be forwarded to upper layers
List<ClientResponse> responses = client.poll(0, time.milliseconds());
assertTrue(responses.isEmpty());
}
@Test
public void testDisconnectWithMultipleInFlights() {
NetworkClient client = this.clientWithNoVersionDiscovery;
awaitReady(client, node);
assertTrue(client.isReady(node, time.milliseconds()),
"Expected NetworkClient to be ready to send to node " + node.idString());
MetadataRequest.Builder builder = new MetadataRequest.Builder(Collections.emptyList(), true);
long now = time.milliseconds();
final List<ClientResponse> callbackResponses = new ArrayList<>();
RequestCompletionHandler callback = callbackResponses::add;
ClientRequest request1 = client.newClientRequest(node.idString(), builder, now, true, defaultRequestTimeoutMs, callback);
client.send(request1, now);
client.poll(0, now);
ClientRequest request2 = client.newClientRequest(node.idString(), builder, now, true, defaultRequestTimeoutMs, callback);
client.send(request2, now);
client.poll(0, now);
assertNotEquals(request1.correlationId(), request2.correlationId());
assertEquals(2, client.inFlightRequestCount());
assertEquals(2, client.inFlightRequestCount(node.idString()));
client.disconnect(node.idString());
List<ClientResponse> responses = client.poll(0, time.milliseconds());
assertEquals(2, responses.size());
assertEquals(responses, callbackResponses);
assertEquals(0, client.inFlightRequestCount());
assertEquals(0, client.inFlightRequestCount(node.idString()));
// Ensure that the responses are returned in the order they were sent
ClientResponse response1 = responses.get(0);
assertTrue(response1.wasDisconnected());
assertEquals(request1.correlationId(), response1.requestHeader().correlationId());
ClientResponse response2 = responses.get(1);
assertTrue(response2.wasDisconnected());
assertEquals(request2.correlationId(), response2.requestHeader().correlationId());
}
@Test
public void testCallDisconnect() {
awaitReady(client, node);
assertTrue(client.isReady(node, time.milliseconds()),
"Expected NetworkClient to be ready to send to node " + node.idString());
assertFalse(client.connectionFailed(node),
"Did not expect connection to node " + node.idString() + " to be failed");
client.disconnect(node.idString());
assertFalse(client.isReady(node, time.milliseconds()),
"Expected node " + node.idString() + " to be disconnected.");
assertTrue(client.connectionFailed(node),
"Expected connection to node " + node.idString() + " to be failed after disconnect");
assertFalse(client.canConnect(node, time.milliseconds()));
// ensure disconnect does not reset backoff period if already disconnected
time.sleep(reconnectBackoffMaxMsTest);
assertTrue(client.canConnect(node, time.milliseconds()));
client.disconnect(node.idString());
assertTrue(client.canConnect(node, time.milliseconds()));
}
@Test
public void testCorrelationId() {
int count = 100;
Set<Integer> ids = IntStream.range(0, count)
.mapToObj(i -> client.nextCorrelationId())
.collect(Collectors.toSet());
assertEquals(count, ids.size());
ids.forEach(id -> assertTrue(id < SaslClientAuthenticator.MIN_RESERVED_CORRELATION_ID));
}
@Test
public void testReconnectAfterAddressChange() {
AddressChangeHostResolver mockHostResolver = new AddressChangeHostResolver(
initialAddresses.toArray(new InetAddress[0]), newAddresses.toArray(new InetAddress[0]));
AtomicInteger initialAddressConns = new AtomicInteger();
AtomicInteger newAddressConns = new AtomicInteger();
MockSelector selector = new MockSelector(this.time, inetSocketAddress -> {
InetAddress inetAddress = inetSocketAddress.getAddress();
if (initialAddresses.contains(inetAddress)) {
initialAddressConns.incrementAndGet();
} else if (newAddresses.contains(inetAddress)) {
newAddressConns.incrementAndGet();
}
return (mockHostResolver.useNewAddresses() && newAddresses.contains(inetAddress)) ||
(!mockHostResolver.useNewAddresses() && initialAddresses.contains(inetAddress));
});
ClientTelemetrySender mockClientTelemetrySender = mock(ClientTelemetrySender.class);
when(mockClientTelemetrySender.timeToNextUpdate(anyLong())).thenReturn(0L);
NetworkClient client = new NetworkClient(metadataUpdater, null, selector, "mock", Integer.MAX_VALUE,
reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024,
defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest,
time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender,
Long.MAX_VALUE, MetadataRecoveryStrategy.NONE);
// Connect to one the initial addresses, then change the addresses and disconnect
client.ready(node, time.milliseconds());
time.sleep(connectionSetupTimeoutMaxMsTest);
client.poll(0, time.milliseconds());
assertTrue(client.isReady(node, time.milliseconds()));
// First poll should try to update the node but couldn't because node remains in connecting state
// i.e. connection handling is completed after telemetry update.
assertNull(client.telemetryConnectedNode());
client.poll(0, time.milliseconds());
assertEquals(node, client.telemetryConnectedNode());
mockHostResolver.changeAddresses();
selector.serverDisconnect(node.idString());
client.poll(0, time.milliseconds());
assertFalse(client.isReady(node, time.milliseconds()));
assertNull(client.telemetryConnectedNode());
time.sleep(reconnectBackoffMaxMsTest);
client.ready(node, time.milliseconds());
time.sleep(connectionSetupTimeoutMaxMsTest);
client.poll(0, time.milliseconds());
assertTrue(client.isReady(node, time.milliseconds()));
assertNull(client.telemetryConnectedNode());
client.poll(0, time.milliseconds());
assertEquals(node, client.telemetryConnectedNode());
// We should have tried to connect to one initial address and one new address, and resolved DNS twice
assertEquals(1, initialAddressConns.get());
assertEquals(1, newAddressConns.get());
assertEquals(2, mockHostResolver.resolutionCount());
verify(mockClientTelemetrySender, times(5)).timeToNextUpdate(anyLong());
}
@Test
public void testFailedConnectionToFirstAddress() {
AddressChangeHostResolver mockHostResolver = new AddressChangeHostResolver(
initialAddresses.toArray(new InetAddress[0]), newAddresses.toArray(new InetAddress[0]));
AtomicInteger initialAddressConns = new AtomicInteger();
AtomicInteger newAddressConns = new AtomicInteger();
MockSelector selector = new MockSelector(this.time, inetSocketAddress -> {
InetAddress inetAddress = inetSocketAddress.getAddress();
if (initialAddresses.contains(inetAddress)) {
initialAddressConns.incrementAndGet();
} else if (newAddresses.contains(inetAddress)) {
newAddressConns.incrementAndGet();
}
// Refuse first connection attempt
return initialAddressConns.get() > 1;
});
ClientTelemetrySender mockClientTelemetrySender = mock(ClientTelemetrySender.class);
when(mockClientTelemetrySender.timeToNextUpdate(anyLong())).thenReturn(0L);
NetworkClient client = new NetworkClient(metadataUpdater, null, selector, "mock", Integer.MAX_VALUE,
reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024,
defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest,
time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender,
Long.MAX_VALUE, MetadataRecoveryStrategy.NONE);
// First connection attempt should fail
client.ready(node, time.milliseconds());
time.sleep(connectionSetupTimeoutMaxMsTest);
client.poll(0, time.milliseconds());
assertFalse(client.isReady(node, time.milliseconds()));
assertNull(client.telemetryConnectedNode());
// Second connection attempt should succeed
time.sleep(reconnectBackoffMaxMsTest);
client.ready(node, time.milliseconds());
time.sleep(connectionSetupTimeoutMaxMsTest);
client.poll(0, time.milliseconds());
assertTrue(client.isReady(node, time.milliseconds()));
assertNull(client.telemetryConnectedNode());
// Next client poll after handling connection setup should update telemetry node.
client.poll(0, time.milliseconds());
assertEquals(node, client.telemetryConnectedNode());
// We should have tried to connect to two of the initial addresses, none of the new address, and should
// only have resolved DNS once
assertEquals(2, initialAddressConns.get());
assertEquals(0, newAddressConns.get());
assertEquals(1, mockHostResolver.resolutionCount());
verify(mockClientTelemetrySender, times(3)).timeToNextUpdate(anyLong());
}
@Test
public void testFailedConnectionToFirstAddressAfterReconnect() {
AddressChangeHostResolver mockHostResolver = new AddressChangeHostResolver(
initialAddresses.toArray(new InetAddress[0]), newAddresses.toArray(new InetAddress[0]));
AtomicInteger initialAddressConns = new AtomicInteger();
AtomicInteger newAddressConns = new AtomicInteger();
MockSelector selector = new MockSelector(this.time, inetSocketAddress -> {
InetAddress inetAddress = inetSocketAddress.getAddress();
if (initialAddresses.contains(inetAddress)) {
initialAddressConns.incrementAndGet();
} else if (newAddresses.contains(inetAddress)) {
newAddressConns.incrementAndGet();
}
// Refuse first connection attempt to the new addresses
return initialAddresses.contains(inetAddress) || newAddressConns.get() > 1;
});
ClientTelemetrySender mockClientTelemetrySender = mock(ClientTelemetrySender.class);
when(mockClientTelemetrySender.timeToNextUpdate(anyLong())).thenReturn(0L);
NetworkClient client = new NetworkClient(metadataUpdater, null, selector, "mock", Integer.MAX_VALUE,
reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024,
defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest,
time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender,
Long.MAX_VALUE, MetadataRecoveryStrategy.NONE);
// Connect to one the initial addresses, then change the addresses and disconnect
client.ready(node, time.milliseconds());
time.sleep(connectionSetupTimeoutMaxMsTest);
client.poll(0, time.milliseconds());
assertTrue(client.isReady(node, time.milliseconds()));
assertNull(client.telemetryConnectedNode());
// Next client poll after handling connection setup should update telemetry node.
client.poll(0, time.milliseconds());
assertEquals(node, client.telemetryConnectedNode());
mockHostResolver.changeAddresses();
selector.serverDisconnect(node.idString());
client.poll(0, time.milliseconds());
assertFalse(client.isReady(node, time.milliseconds()));
assertNull(client.telemetryConnectedNode());
// First connection attempt to new addresses should fail
time.sleep(reconnectBackoffMaxMsTest);
client.ready(node, time.milliseconds());
time.sleep(connectionSetupTimeoutMaxMsTest);
client.poll(0, time.milliseconds());
assertFalse(client.isReady(node, time.milliseconds()));
assertNull(client.telemetryConnectedNode());
// Second connection attempt to new addresses should succeed
time.sleep(reconnectBackoffMaxMsTest);
client.ready(node, time.milliseconds());
time.sleep(connectionSetupTimeoutMaxMsTest);
client.poll(0, time.milliseconds());
assertTrue(client.isReady(node, time.milliseconds()));
assertNull(client.telemetryConnectedNode());
// Next client poll after handling connection setup should update telemetry node.
client.poll(0, time.milliseconds());
assertEquals(node, client.telemetryConnectedNode());
// We should have tried to connect to one of the initial addresses and two of the new addresses (the first one
// failed), and resolved DNS twice, once for each set of addresses
assertEquals(1, initialAddressConns.get());
assertEquals(2, newAddressConns.get());
assertEquals(2, mockHostResolver.resolutionCount());
verify(mockClientTelemetrySender, times(6)).timeToNextUpdate(anyLong());
}
@Test
public void testCloseConnectingNode() {
Cluster cluster = TestUtils.clusterWith(2);
Node node0 = cluster.nodeById(0);
Node node1 = cluster.nodeById(1);
client.ready(node0, time.milliseconds());
selector.serverConnectionBlocked(node0.idString());
client.poll(1, time.milliseconds());
client.close(node0.idString());
// Poll without any connections should return without exceptions
client.poll(0, time.milliseconds());
assertFalse(NetworkClientUtils.isReady(client, node0, time.milliseconds()));
assertFalse(NetworkClientUtils.isReady(client, node1, time.milliseconds()));
// Connection to new node should work
client.ready(node1, time.milliseconds());
ByteBuffer buffer = RequestTestUtils.serializeResponseWithHeader(defaultApiVersionsResponse(), ApiKeys.API_VERSIONS.latestVersion(), 0);
selector.delayedReceive(new DelayedReceive(node1.idString(), new NetworkReceive(node1.idString(), buffer)));
while (!client.ready(node1, time.milliseconds()))
client.poll(1, time.milliseconds());
assertTrue(client.isReady(node1, time.milliseconds()));
selector.clear();
// New connection to node closed earlier should work
client.ready(node0, time.milliseconds());
buffer = RequestTestUtils.serializeResponseWithHeader(defaultApiVersionsResponse(), ApiKeys.API_VERSIONS.latestVersion(), 1);
selector.delayedReceive(new DelayedReceive(node0.idString(), new NetworkReceive(node0.idString(), buffer)));
while (!client.ready(node0, time.milliseconds()))
client.poll(1, time.milliseconds());
assertTrue(client.isReady(node0, time.milliseconds()));
}
@Test
public void testConnectionDoesNotRemainStuckInCheckingApiVersionsStateIfChannelNeverBecomesReady() {
final Cluster cluster = TestUtils.clusterWith(1);
final Node node = cluster.nodeById(0);
// Channel is ready by default so we mark it as not ready.
client.ready(node, time.milliseconds());
selector.channelNotReady(node.idString());
// Channel should not be ready.
client.poll(0, time.milliseconds());
assertFalse(NetworkClientUtils.isReady(client, node, time.milliseconds()));
// Connection should time out if the channel does not become ready within
// the connection setup timeout. This ensures that the client does not remain
// stuck in the CHECKING_API_VERSIONS state.
time.sleep((long) (connectionSetupTimeoutMsTest * 1.2) + 1);
client.poll(0, time.milliseconds());
assertTrue(client.connectionFailed(node));
}
@Test
public void testTelemetryRequest() {
ClientTelemetrySender mockClientTelemetrySender = mock(ClientTelemetrySender.class);
when(mockClientTelemetrySender.timeToNextUpdate(anyLong())).thenReturn(0L);
NetworkClient client = new NetworkClient(metadataUpdater, null, selector, "mock", Integer.MAX_VALUE,
reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024,
defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest,
time, true, new ApiVersions(), null, new LogContext(), new DefaultHostResolver(), mockClientTelemetrySender,
Long.MAX_VALUE, MetadataRecoveryStrategy.NONE);
// Send the ApiVersionsRequest
client.ready(node, time.milliseconds());
client.poll(0, time.milliseconds());
assertNull(client.telemetryConnectedNode());
assertTrue(client.hasInFlightRequests(node.idString()));
delayedApiVersionsResponse(0, ApiKeys.API_VERSIONS.latestVersion(), TestUtils.defaultApiVersionsResponse(
ApiMessageType.ListenerType.BROKER));
// handle ApiVersionsResponse
client.poll(0, time.milliseconds());
// the ApiVersionsRequest is gone
assertFalse(client.hasInFlightRequests(node.idString()));
selector.clear();
GetTelemetrySubscriptionsRequest.Builder getRequest = new GetTelemetrySubscriptionsRequest.Builder(
new GetTelemetrySubscriptionsRequestData(), true);
when(mockClientTelemetrySender.createRequest()).thenReturn(Optional.of(getRequest));
GetTelemetrySubscriptionsResponse getResponse = new GetTelemetrySubscriptionsResponse(new GetTelemetrySubscriptionsResponseData());
ByteBuffer buffer = RequestTestUtils.serializeResponseWithHeader(getResponse, ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS.latestVersion(), 1);
selector.completeReceive(new NetworkReceive(node.idString(), buffer));
// Initiate poll to send GetTelemetrySubscriptions request
client.poll(0, time.milliseconds());
assertTrue(client.isReady(node, time.milliseconds()));
assertEquals(node, client.telemetryConnectedNode());
verify(mockClientTelemetrySender, times(1)).handleResponse(any(GetTelemetrySubscriptionsResponse.class));
selector.clear();
PushTelemetryRequest.Builder pushRequest = new PushTelemetryRequest.Builder(
new PushTelemetryRequestData(), true);
when(mockClientTelemetrySender.createRequest()).thenReturn(Optional.of(pushRequest));
PushTelemetryResponse pushResponse = new PushTelemetryResponse(new PushTelemetryResponseData());
ByteBuffer pushBuffer = RequestTestUtils.serializeResponseWithHeader(pushResponse, ApiKeys.PUSH_TELEMETRY.latestVersion(), 2);
selector.completeReceive(new NetworkReceive(node.idString(), pushBuffer));
// Initiate poll to send PushTelemetry request
client.poll(0, time.milliseconds());
assertTrue(client.isReady(node, time.milliseconds()));
assertEquals(node, client.telemetryConnectedNode());
verify(mockClientTelemetrySender, times(1)).handleResponse(any(PushTelemetryResponse.class));
verify(mockClientTelemetrySender, times(4)).timeToNextUpdate(anyLong());
verify(mockClientTelemetrySender, times(2)).createRequest();
}
private RequestHeader parseHeader(ByteBuffer buffer) {
buffer.getInt(); // skip size
return RequestHeader.parse(buffer.slice());
}
private void awaitInFlightApiVersionRequest() throws Exception {
client.ready(node, time.milliseconds());
TestUtils.waitForCondition(() -> {
client.poll(0, time.milliseconds());
return client.hasInFlightRequests(node.idString());
}, 1000, "");
assertFalse(client.isReady(node, time.milliseconds()));
}
private ApiVersionsResponse defaultApiVersionsResponse() {
return TestUtils.defaultApiVersionsResponse(ApiMessageType.ListenerType.BROKER);
}
private static | NetworkClientTest |
java | google__dagger | javatests/dagger/internal/codegen/AssistedFactoryTest.java | {
"start": 2108,
"end": 2392
} | interface ____ {",
" Foo create(String factoryStr);",
"}");
Source bar =
CompilerTests.javaSource(
"test.Bar",
"package test;",
"",
"import javax.inject.Inject;",
"",
" | FooFactory |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/ingest/IngestPipelineFieldAccessPattern.java | {
"start": 534,
"end": 2103
} | enum ____ {
/**
* Field names will be split on the `.` character into their contingent parts. Resolution will strictly check
* for nested objects following the field path.
*/
CLASSIC("classic"),
/**
* Field names will be split on the `.` character into their contingent parts. Resolution will flexibly check
* for nested objects following the field path. If nested objects are not found for a key, the access pattern
* will fall back to joining subsequent path elements together until it finds the next object that matches the
* concatenated path. Allows for simple resolution of dotted field names.
*/
FLEXIBLE("flexible");
private final String key;
IngestPipelineFieldAccessPattern(String key) {
this.key = key;
}
public String getKey() {
return key;
}
private static final Map<String, IngestPipelineFieldAccessPattern> NAME_REGISTRY = Map.of(CLASSIC.key, CLASSIC, FLEXIBLE.key, FLEXIBLE);
public static boolean isValidAccessPattern(String accessPatternName) {
return NAME_REGISTRY.containsKey(accessPatternName);
}
public static IngestPipelineFieldAccessPattern getAccessPattern(String accessPatternName) {
IngestPipelineFieldAccessPattern accessPattern = NAME_REGISTRY.get(accessPatternName);
if (accessPattern == null) {
throw new IllegalArgumentException("Invalid ingest pipeline access pattern name [" + accessPatternName + "] given");
}
return accessPattern;
}
}
| IngestPipelineFieldAccessPattern |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannelTest.java | {
"start": 94366,
"end": 94656
} | class ____ extends NoOpBufferPool {
@Override
public Buffer requestBuffer() {
MemorySegment segment = MemorySegmentFactory.allocateUnpooledSegment(1024);
return new NetworkBuffer(segment, FreeingBufferRecycler.INSTANCE);
}
}
}
| TestBufferPool |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/formatstring/FormatStringShouldUsePlaceholdersTest.java | {
"start": 4048,
"end": 4400
} | class ____ {
public void checkElementIndex(int i, int j) {
Preconditions.checkElementIndex(i, j + 10);
}
}
""")
.expectUnchanged()
.doTest();
}
@Test
public void negativeCheckPositionIndex() {
refactoringHelper
.addInputLines(
"Test.java",
"""
import com.google.common.base.Preconditions;
public | Test |
java | apache__flink | flink-table/flink-table-common/src/test/java/org/apache/flink/table/factories/TableOptionsBuilder.java | {
"start": 1078,
"end": 2171
} | class ____ {
private final Map<String, String> options;
private final String connector;
private final String format;
public TableOptionsBuilder(String connector, String format) {
this.options = new HashMap<>();
this.connector = connector;
this.format = format;
}
public TableOptionsBuilder withTableOption(ConfigOption<?> option, String value) {
return withTableOption(option.key(), value);
}
public TableOptionsBuilder withFormatOption(ConfigOption<?> option, String value) {
return withFormatOption(format + "." + option.key(), value);
}
public TableOptionsBuilder withTableOption(String key, String value) {
options.put(key, value);
return this;
}
public TableOptionsBuilder withFormatOption(String key, String value) {
options.put(key, value);
return this;
}
public Map<String, String> build() {
withTableOption(FactoryUtil.CONNECTOR, connector);
withTableOption(FactoryUtil.FORMAT, format);
return options;
}
}
| TableOptionsBuilder |
java | apache__camel | components/camel-dhis2/camel-dhis2-component/src/main/java/org/apache/camel/component/dhis2/Dhis2Endpoint.java | {
"start": 2214,
"end": 5100
} | class ____ extends AbstractApiEndpoint<Dhis2ApiName, Dhis2Configuration> implements EndpointServiceLocation {
@UriParam
private final Dhis2Configuration configuration;
private Object apiProxy;
public Dhis2Endpoint(String uri, Dhis2Component component,
Dhis2ApiName apiName, String methodName, Dhis2Configuration endpointConfiguration) {
super(uri, component, apiName, methodName, Dhis2ApiCollection.getCollection().getHelper(apiName),
endpointConfiguration);
this.configuration = endpointConfiguration;
}
public Producer createProducer() throws Exception {
return new Dhis2Producer(this);
}
public Consumer createConsumer(Processor processor) throws Exception {
// make sure inBody is not set for consumers
if (inBody != null) {
throw new IllegalArgumentException("Option inBody is not supported for consumer endpoint");
}
final Dhis2Consumer consumer = new Dhis2Consumer(this, processor);
// also set consumer.* properties
configureConsumer(consumer);
return consumer;
}
@Override
protected ApiMethodPropertiesHelper<Dhis2Configuration> getPropertiesHelper() {
return Dhis2PropertiesHelper.getHelper(getCamelContext());
}
@Override
public String getServiceUrl() {
return configuration.getBaseApiUrl();
}
@Override
public Map<String, String> getServiceMetadata() {
if (configuration.getUsername() != null) {
return Map.of("username", configuration.getUsername());
}
return null;
}
@Override
public String getServiceProtocol() {
return "http";
}
protected String getThreadProfileName() {
return Dhis2Constants.THREAD_PROFILE_NAME;
}
@Override
protected void afterConfigureProperties() {
Dhis2Client dhis2Client = this.getClient();
switch (apiName) {
case GET:
apiProxy = new Dhis2Get(dhis2Client);
break;
case POST:
apiProxy = new Dhis2Post(dhis2Client);
break;
case DELETE:
apiProxy = new Dhis2Delete(dhis2Client);
break;
case PUT:
apiProxy = new Dhis2Put(dhis2Client);
break;
case RESOURCE_TABLES:
apiProxy = new Dhis2ResourceTables(dhis2Client);
break;
default:
throw new IllegalArgumentException("Invalid API name " + apiName);
}
}
@Override
public Object getApiProxy(ApiMethod method, Map<String, Object> args) {
return apiProxy;
}
protected Dhis2Client getClient() {
return ((Dhis2Component) this.getComponent()).getClient(this.configuration);
}
}
| Dhis2Endpoint |
java | quarkusio__quarkus | integration-tests/jpa-mssql/src/main/java/io/quarkus/it/jpa/mssql/JPAFunctionalityTestEndpoint.java | {
"start": 780,
"end": 3659
} | class ____ {
@Inject
EntityManager em;
@GET
public String test() throws IOException {
if (!Charset.isSupported("Cp1252"))
throw new IllegalStateException("You will very likely need support for Codepage Cp1252 to connect to SQL Server");
cleanUpData();
//Store some well known Person instances we can then test on:
QuarkusTransaction.requiringNew().run(() -> {
persistNewPerson("Gizmo");
persistNewPerson("Quarkus");
persistNewPerson("Hibernate ORM");
});
//Load all persons and run some checks on the query results:
QuarkusTransaction.requiringNew().run(() -> {
CriteriaBuilder cb = em.getCriteriaBuilder();
CriteriaQuery<Person> cq = cb.createQuery(Person.class);
Root<Person> from = cq.from(Person.class);
cq.select(from).orderBy(cb.asc(from.get("name")));
TypedQuery<Person> q = em.createQuery(cq);
List<Person> allpersons = q.getResultList();
if (allpersons.size() != 3) {
throw new RuntimeException("Incorrect number of results");
}
if (!allpersons.get(0).getName().equals("Gizmo")) {
throw new RuntimeException("Incorrect order of results");
}
StringBuilder sb = new StringBuilder("list of stored Person names:\n\t");
for (Person p : allpersons) {
p.describeFully(sb);
}
sb.append("\nList complete.\n");
System.out.print(sb);
});
//Try a JPA named query:
QuarkusTransaction.requiringNew().run(() -> {
TypedQuery<Person> typedQuery = em.createNamedQuery(
"get_person_by_name", Person.class);
typedQuery.setParameter("name", "Quarkus");
final Person singleResult = typedQuery.getSingleResult();
if (!singleResult.getName().equals("Quarkus")) {
throw new RuntimeException("Wrong result from named JPA query");
}
});
//Check that HQL fetch does not throw an exception
QuarkusTransaction.requiringNew()
.run(() -> em.createQuery("from Person p left join fetch p.address a").getResultList());
cleanUpData();
return "OK";
}
private void cleanUpData() {
QuarkusTransaction.requiringNew()
.run(() -> em.createNativeQuery("Delete from Person").executeUpdate());
}
private void persistNewPerson(String name) {
Person person = new Person();
person.setName(name);
person.setAddress(new SequencedAddress("Street " + randomName()));
em.persist(person);
}
private static String randomName() {
return UUID.randomUUID().toString();
}
}
| JPAFunctionalityTestEndpoint |
java | elastic__elasticsearch | x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlClearCursorResponse.java | {
"start": 866,
"end": 2298
} | class ____ extends ActionResponse implements ToXContentObject {
private boolean succeeded;
public SqlClearCursorResponse(boolean succeeded) {
this.succeeded = succeeded;
}
SqlClearCursorResponse(StreamInput in) throws IOException {
succeeded = in.readBoolean();
}
/**
* @return Whether the attempt to clear a cursor was successful.
*/
public boolean isSucceeded() {
return succeeded;
}
public SqlClearCursorResponse setSucceeded(boolean succeeded) {
this.succeeded = succeeded;
return this;
}
public RestStatus status() {
return succeeded ? NOT_FOUND : OK;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("succeeded", succeeded);
builder.endObject();
return builder;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(succeeded);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
SqlClearCursorResponse response = (SqlClearCursorResponse) o;
return succeeded == response.succeeded;
}
@Override
public int hashCode() {
return Objects.hash(succeeded);
}
}
| SqlClearCursorResponse |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/adaptive/DefaultStateTransitionManager.java | {
"start": 2040,
"end": 2215
} | class ____ not implemented in a thread-safe manner and relies on the fact
* that any method call happens within a single thread.
*
* @see Executing
*/
@NotThreadSafe
public | is |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregatorFactory.java | {
"start": 1429,
"end": 3244
} | class ____ extends ValuesSourceAggregatorFactory {
private final GeoBoundsAggregatorSupplier aggregatorSupplier;
private final boolean wrapLongitude;
GeoBoundsAggregatorFactory(
String name,
ValuesSourceConfig config,
boolean wrapLongitude,
AggregationContext context,
AggregatorFactory parent,
AggregatorFactories.Builder subFactoriesBuilder,
Map<String, Object> metadata,
GeoBoundsAggregatorSupplier aggregatorSupplier
) throws IOException {
super(name, config, context, parent, subFactoriesBuilder, metadata);
this.wrapLongitude = wrapLongitude;
this.aggregatorSupplier = aggregatorSupplier;
}
@Override
protected Aggregator createUnmapped(Aggregator parent, Map<String, Object> metadata) throws IOException {
final InternalAggregation empty = InternalGeoBounds.empty(name, wrapLongitude, metadata);
return new NonCollectingAggregator(name, context, parent, factories, metadata) {
@Override
public InternalAggregation buildEmptyAggregation() {
return empty;
}
};
}
@Override
protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound cardinality, Map<String, Object> metadata)
throws IOException {
return aggregatorSupplier.build(name, context, parent, config, wrapLongitude, metadata);
}
static void registerAggregators(ValuesSourceRegistry.Builder builder) {
builder.register(GeoBoundsAggregationBuilder.REGISTRY_KEY, CoreValuesSourceType.GEOPOINT, GeoBoundsAggregator::new, true);
builder.register(GeoBoundsAggregationBuilder.REGISTRY_KEY, TimeSeriesValuesSourceType.POSITION, GeoBoundsAggregator::new, true);
}
}
| GeoBoundsAggregatorFactory |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/naturalid/mutable/MutableNaturalIdTest.java | {
"start": 1760,
"end": 10167
} | class ____ {
@Test
@JiraKey( value = "HHH-10360")
public void testNaturalIdNullability(SessionFactoryScope scope) {
final SessionFactoryImplementor sessionFactory = scope.getSessionFactory();
final EntityMappingType entityMappingType = sessionFactory.getRuntimeMetamodels().getEntityMappingType( User.class );
final EntityPersister persister = entityMappingType.getEntityPersister();
// nullability is not specified, so it should be non-nullable by hbm-specific default
assertFalse( persister.getPropertyNullability()[persister.getPropertyIndex( "name" )] );
assertFalse( persister.getPropertyNullability()[persister.getPropertyIndex( "org" )] );
}
@AfterEach
public void dropTestData(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Test
public void testCacheSynchronizationOnMutation(SessionFactoryScope scope) {
final Long id = scope.fromTransaction(
(session) -> {
final User user = new User( "gavin", "hb", "secret" );
session.persist( user );
return user.getId();
}
);
scope.inTransaction(
(session) -> {
final User user = session.byId( User.class ).getReference( id );
user.setOrg( "ceylon" );
final User original = session.byNaturalId( User.class )
.using( "name", "gavin" )
.using( "org", "hb" )
.load();
assertNull( original );
assertNotSame( user, original );
}
);
}
@Test
public void testReattachmentNaturalIdCheck(SessionFactoryScope scope) throws Throwable {
final User created = scope.fromTransaction(
(session) -> {
final User user = new User( "gavin", "hb", "secret" );
session.persist( user );
return user;
}
);
final Field name = User.class.getDeclaredField( "name" );
name.setAccessible( true );
name.set( created, "Gavin" );
scope.inTransaction(
(session) -> {
try {
session.merge( created );
final User loaded = session
.byNaturalId( User.class )
.using( "name", "Gavin" )
.using( "org", "hb" )
.load();
assertNotNull( loaded );
}
catch( HibernateException expected ) {
session.getTransaction().markRollbackOnly();
}
catch( Throwable t ) {
try {
session.getTransaction().markRollbackOnly();
}
catch ( Throwable ignore ) {
}
throw t;
}
}
);
}
@Test
public void testReattachmentUnmodifiedNaturalIdCheck(SessionFactoryScope scope) throws Throwable {
final User created = scope.fromTransaction(
(session) -> {
final User user = new User( "gavin", "hb", "secret" );
session.persist( user );
return user;
}
);
final Field name = User.class.getDeclaredField( "name" );
name.setAccessible( true );
scope.inTransaction(
(session) -> {
try {
session.lock( created, LockMode.NONE );
name.set( created, "Gavin" );
final User loaded = session
.byNaturalId( User.class )
.using( "name", "Gavin" )
.using( "org", "hb" )
.load();
assertNotNull( loaded );
}
catch (Throwable t) {
try {
session.getTransaction().markRollbackOnly();
}
catch (Throwable ignore) {
// ignore
}
if ( t instanceof AssertionError ) {
throw (AssertionError) t;
}
}
} );
}
@Test
public void testNonexistentNaturalIdCache(SessionFactoryScope scope) {
final StatisticsImplementor statistics = scope.getSessionFactory().getStatistics();
statistics.clear();
scope.inTransaction(
(session) -> {
Object nullUser = session.byNaturalId( User.class )
.using( "name", "gavin" )
.using( "org", "hb" )
.load();
assertNull( nullUser );
}
);
assertEquals( 0, statistics.getNaturalIdCacheHitCount(), 0 );
assertEquals( 0, statistics.getNaturalIdCachePutCount(), 0 );
scope.inTransaction(
(session) -> session.persist( new User("gavin", "hb", "secret") )
);
statistics.clear();
scope.inTransaction(
(session) -> {
final User user = session.byNaturalId( User.class )
.using( "name", "gavin" )
.using( "org", "hb" )
.load();
assertNotNull( user );
}
);
assertEquals( 0, statistics.getNaturalIdCacheHitCount() );
assertEquals( 0, statistics.getNaturalIdCachePutCount() );
}
@Test
public void testNaturalIdCache(SessionFactoryScope scope) {
scope.inTransaction(
(session) -> session.persist( new User("gavin", "hb", "secret") )
);
scope.inTransaction(
(session) -> {
final User user = session.byNaturalId( User.class )
.using( "name", "gavin" )
.using( "org", "hb" )
.load();
assertNotNull( user );
}
);
}
@Test
public void testNaturalIdDeleteUsingCache(SessionFactoryScope scope) {
scope.inTransaction(
(session) -> session.persist( new User( "steve", "hb", "superSecret" ) )
);
scope.inTransaction(
(session) -> {
final User user = session.byNaturalId( User.class )
.using( "name", "steve" )
.using( "org", "hb" )
.load();
assertNotNull( user );
}
);
scope.inTransaction(
(session) -> {
final User user = session.bySimpleNaturalId( User.class )
.load( new Object[] { "steve", "hb" } );
assertNotNull( user );
session.remove( user );
}
);
scope.inTransaction(
(session) -> {
final User user = session.byNaturalId( User.class )
.using( "name", "steve" )
.using( "org", "hb" )
.load();
assertNull( user );
final User user2 = session.bySimpleNaturalId( User.class )
.load( new Object[] { "steve", "hb" } );
assertNull( user2 );
}
);
}
@Test
public void testNaturalIdRecreateUsingCache(SessionFactoryScope scope) {
scope.inTransaction(
(session) -> session.persist( new User( "steve", "hb", "superSecret" ) )
);
scope.inTransaction(
(session) -> {
final User user = session.byNaturalId( User.class )
.using( "name", "steve" )
.using( "org", "hb" )
.load();
assertNotNull( user );
session.remove( user );
}
);
}
@Test
public void testQuerying(SessionFactoryScope scope) {
scope.inTransaction(
(session) -> session.persist( new User( "steve", "hb", "superSecret" ) )
);
scope.inTransaction(
(session) -> {
final User user = (User) session.createQuery( "from User u where u.name = :name" )
.setParameter( "name", "steve" ).uniqueResult();
assertNotNull( user );
assertEquals( "steve", user.getName() );
}
);
}
@Test
public void testClear(SessionFactoryScope scope) {
scope.inTransaction(
(session) -> session.persist( new User( "steve", "hb", "superSecret" ) )
);
final StatisticsImplementor statistics = scope.getSessionFactory().getStatistics();
statistics.clear();
scope.inTransaction(
(session) -> {
final User beforeClear = session.byNaturalId( User.class )
.using( "name", "steve" )
.using( "org", "hb" )
.load();
assertNotNull( beforeClear );
assertEquals( 1, statistics.getPrepareStatementCount() );
session.clear();
final User afterClear = session.byNaturalId( User.class )
.using( "name", "steve" )
.using( "org", "hb" )
.load();
assertNotNull( afterClear );
assertEquals( 2, statistics.getPrepareStatementCount() );
assertNotSame( beforeClear, afterClear );
}
);
}
@Test
public void testEviction(SessionFactoryScope scope) {
scope.inTransaction(
(session) -> session.persist( new User( "steve", "hb", "superSecret" ) )
);
final StatisticsImplementor statistics = scope.getSessionFactory().getStatistics();
statistics.clear();
scope.inTransaction(
(session) -> {
final User beforeEvict = session.byNaturalId( User.class )
.using( "name", "steve" )
.using( "org", "hb" )
.load();
assertNotNull( beforeEvict );
assertEquals( 1, statistics.getPrepareStatementCount() );
session.evict( beforeEvict );
final User afterEvict = session.byNaturalId( User.class )
.using( "name", "steve" )
.using( "org", "hb" )
.load();
assertNotNull( afterEvict );
assertEquals( 2, statistics.getPrepareStatementCount() );
assertNotSame( beforeEvict, afterEvict );
}
);
}
}
| MutableNaturalIdTest |
java | quarkusio__quarkus | test-framework/junit5/src/test/java/io/quarkus/test/junit/TestResourceUtilTest.java | {
"start": 2171,
"end": 2513
} | class ____ implements QuarkusTestProfile {
public AnotherProfileClassWithResources() {
}
@Override
public List<TestResourceEntry> testResources() {
return Collections.singletonList(
new TestResourceEntry(
Dummy.class, Map.of()));
}
}
abstract | AnotherProfileClassWithResources |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java | {
"start": 15800,
"end": 15965
} | class ____ the IP address of the given InetAddress directly, therefore no
* reverse DNS lookup is required.
*/
@VisibleForTesting
public static | returns |
java | elastic__elasticsearch | libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java | {
"start": 11100,
"end": 12817
} | class ____ extends Terminal {
private static final int JDK_VERSION_WITH_IS_TERMINAL = 22;
private static final Console CONSOLE = detectTerminal();
ConsoleTerminal() {
super(CONSOLE.reader(), CONSOLE.writer(), ERROR_WRITER);
}
static boolean isSupported() {
return CONSOLE != null;
}
static Console detectTerminal() {
// JDK >= 22 returns a console even if the terminal is redirected unless using -Djdk.console=java.base
// https://bugs.openjdk.org/browse/JDK-8308591
Console console = System.console();
if (console != null && Runtime.version().feature() >= JDK_VERSION_WITH_IS_TERMINAL) {
try {
// verify the console is a terminal using isTerminal() on JDK >= 22
// TODO: Remove reflection once Java 22 sources are supported, e.g. using a MRJAR
Method isTerminal = Console.class.getMethod("isTerminal");
return Boolean.TRUE.equals(isTerminal.invoke(console)) ? console : null;
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
throw new AssertionError(e);
}
}
return console;
}
@Override
public String readText(String prompt) {
return CONSOLE.readLine("%s", prompt);
}
@Override
public char[] readSecret(String prompt) {
return CONSOLE.readPassword("%s", prompt);
}
}
/** visible for testing */
@SuppressForbidden(reason = "Access streams for construction")
static | ConsoleTerminal |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/indices/SystemIndices.java | {
"start": 32108,
"end": 40158
} | enum ____ {
/** Access level that skips system resource access checks. */
ALL,
/**
* Access level that should deny access to net-new system indices and system data streams, and issue deprecation warnings for
* backwards-compatible system indices.
*/
NONE,
/**
* At this access level, check the value of the {@link SystemIndices#EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY}. If the
* request has an allowed product origin, allow access. If not, deny access to net-new system indices and system data streams, and
* issue deprecation warnings for backwards-compatible system indices.
*/
RESTRICTED,
/**
* This value exists because there was a desire for "net-new" system indices to opt in to the post-8.0 behavior of having
* access blocked in most cases, but this caused problems with certain APIs (see
* <a href="https://github.com/elastic/elasticsearch/issues/74687">issue #74687</a>), so this access level was added as a
* workaround. Once we no longer have to support accessing existing system indices, this can and should be removed, along with the
* net-new property of system indices in general.
*/
BACKWARDS_COMPATIBLE_ONLY
}
/**
* Given a collection of {@link SystemIndexDescriptor}s and their sources, checks to see if the index patterns of the listed
* descriptors overlap with any of the other patterns. If any do, throws an exception.
*
* @param featureDescriptors A map of feature names to the Features that will provide SystemIndexDescriptors
* @throws IllegalStateException Thrown if any of the index patterns overlaps with another.
*/
static void checkForOverlappingPatterns(Map<String, Feature> featureDescriptors) {
List<Tuple<String, SystemIndexDescriptor>> sourceDescriptorPair = featureDescriptors.values()
.stream()
.flatMap(feature -> feature.getIndexDescriptors().stream().map(descriptor -> new Tuple<>(feature.getName(), descriptor)))
.sorted(Comparator.comparing(d -> d.v1() + ":" + d.v2().getIndexPattern())) // Consistent ordering -> consistent error message
.toList();
List<Tuple<String, SystemDataStreamDescriptor>> sourceDataStreamDescriptorPair = featureDescriptors.values()
.stream()
.filter(feature -> feature.getDataStreamDescriptors().isEmpty() == false)
.flatMap(feature -> feature.getDataStreamDescriptors().stream().map(descriptor -> new Tuple<>(feature.getName(), descriptor)))
.sorted(Comparator.comparing(d -> d.v1() + ":" + d.v2().getDataStreamName())) // Consistent ordering -> consistent error message
.toList();
// This is O(n^2) with the number of system index descriptors, and each check is quadratic with the number of states in the
// automaton, but the absolute number of system index descriptors should be quite small (~10s at most), and the number of states
// per pattern should be low as well. If these assumptions change, this might need to be reworked.
sourceDescriptorPair.forEach(descriptorToCheck -> {
List<Tuple<String, SystemIndexDescriptor>> descriptorsMatchingThisPattern = sourceDescriptorPair.stream()
.filter(d -> descriptorToCheck.v2() != d.v2()) // Exclude the pattern currently being checked
.filter(
d -> overlaps(descriptorToCheck.v2(), d.v2())
|| (d.v2().getAliasName() != null && descriptorToCheck.v2().matchesIndexPattern(d.v2().getAliasName()))
)
.toList();
if (descriptorsMatchingThisPattern.isEmpty() == false) {
throw new IllegalStateException(
"a system index descriptor ["
+ descriptorToCheck.v2()
+ "] from ["
+ descriptorToCheck.v1()
+ "] overlaps with other system index descriptors: ["
+ descriptorsMatchingThisPattern.stream()
.map(descriptor -> descriptor.v2() + " from [" + descriptor.v1() + "]")
.collect(Collectors.joining(", "))
);
}
List<Tuple<String, SystemDataStreamDescriptor>> dataStreamsMatching = sourceDataStreamDescriptorPair.stream()
.filter(
dsTuple -> descriptorToCheck.v2().matchesIndexPattern(dsTuple.v2().getDataStreamName())
|| overlaps(descriptorToCheck.v2().getIndexPattern(), dsTuple.v2().getBackingIndexPattern())
)
.toList();
if (dataStreamsMatching.isEmpty() == false) {
throw new IllegalStateException(
"a system index descriptor ["
+ descriptorToCheck.v2()
+ "] from ["
+ descriptorToCheck.v1()
+ "] overlaps with one or more data stream descriptors: ["
+ dataStreamsMatching.stream()
.map(descriptor -> descriptor.v2() + " from [" + descriptor.v1() + "]")
.collect(Collectors.joining(", "))
);
}
});
}
private static boolean overlaps(SystemIndexDescriptor a1, SystemIndexDescriptor a2) {
return overlaps(a1.getIndexPattern(), a2.getIndexPattern());
}
private static boolean overlaps(String pattern1, String pattern2) {
Automaton a1Automaton = SystemIndexDescriptor.buildAutomaton(pattern1, null);
Automaton a2Automaton = SystemIndexDescriptor.buildAutomaton(pattern2, null);
return Operations.isEmpty(Operations.intersection(a1Automaton, a2Automaton)) == false;
}
private static Map<String, Feature> buildFeatureMap(List<Feature> features) {
final Map<String, Feature> map = Maps.newMapWithExpectedSize(features.size() + SERVER_SYSTEM_FEATURE_DESCRIPTORS.size());
features.forEach(feature -> map.put(feature.getName(), feature));
// put the server items last since we expect less of them
SERVER_SYSTEM_FEATURE_DESCRIPTORS.forEach((source, feature) -> {
if (map.putIfAbsent(source, feature) != null) {
throw new IllegalArgumentException(
"plugin or module attempted to define the same source [" + source + "] as a built-in system index"
);
}
});
return Map.copyOf(map);
}
public Collection<SystemIndexDescriptor> getSystemIndexDescriptors() {
return this.featureDescriptors.values().stream().flatMap(f -> f.getIndexDescriptors().stream()).toList();
}
public Map<String, SystemIndexDescriptor.MappingsVersion> getMappingsVersions() {
return getSystemIndexDescriptors().stream()
.filter(SystemIndexDescriptor::isAutomaticallyManaged)
.collect(Collectors.toMap(SystemIndexDescriptor::getPrimaryIndex, SystemIndexDescriptor::getMappingsVersion));
}
/**
* Check that a feature name is not reserved
* @param name Name of feature
* @param plugin Name of plugin providing the feature
*/
public static void validateFeatureName(String name, String plugin) {
if (SnapshotsService.NO_FEATURE_STATES_VALUE.equalsIgnoreCase(name)) {
throw new IllegalArgumentException(
String.format(
Locale.ROOT,
"feature name cannot be reserved name [\"%s\"], but was for plugin [%s]",
SnapshotsService.NO_FEATURE_STATES_VALUE,
plugin
)
);
}
}
/**
* Describes an Elasticsearch system feature that keeps state in protected indices and data streams.
*
* <p>This is an internal | SystemIndexAccessLevel |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java | {
"start": 3251,
"end": 3341
} | class ____ implements StatCallback, StringCallback {
/**
* Callback | ActiveStandbyElector |
java | apache__dubbo | dubbo-config/dubbo-config-api/src/main/java/org/apache/dubbo/config/utils/SimpleReferenceCache.java | {
"start": 8376,
"end": 11870
} | interface ____
*/
@Override
@SuppressWarnings("unchecked")
public <T> T get(Class<T> type) {
List<ReferenceConfigBase<?>> referenceConfigBases = referenceTypeMap.get(type);
if (CollectionUtils.isNotEmpty(referenceConfigBases)) {
return (T) referenceConfigBases.get(0).get();
}
return null;
}
@Override
public void check(String key, Class<?> type, long timeout) {
List<ReferenceConfigBase<?>> referencesOfKey = referenceKeyMap.get(key);
if (CollectionUtils.isEmpty(referencesOfKey)) {
return;
}
List<ReferenceConfigBase<?>> referencesOfType = referenceTypeMap.get(type);
if (CollectionUtils.isEmpty(referencesOfType)) {
return;
}
for (ReferenceConfigBase<?> rc : referencesOfKey) {
rc.checkOrDestroy(timeout);
}
}
@Override
public <T> void check(ReferenceConfigBase<T> referenceConfig, long timeout) {
String key = generator.generateKey(referenceConfig);
Class<?> type = referenceConfig.getInterfaceClass();
check(key, type, timeout);
}
@Override
public void destroy(String key, Class<?> type) {
List<ReferenceConfigBase<?>> referencesOfKey = referenceKeyMap.remove(key);
if (CollectionUtils.isEmpty(referencesOfKey)) {
return;
}
List<ReferenceConfigBase<?>> referencesOfType = referenceTypeMap.get(type);
if (CollectionUtils.isEmpty(referencesOfType)) {
return;
}
for (ReferenceConfigBase<?> rc : referencesOfKey) {
referencesOfType.remove(rc);
destroyReference(rc);
}
}
@Override
public void destroy(Class<?> type) {
List<ReferenceConfigBase<?>> referencesOfType = referenceTypeMap.remove(type);
for (ReferenceConfigBase<?> rc : referencesOfType) {
String key = generator.generateKey(rc);
referenceKeyMap.remove(key);
destroyReference(rc);
}
}
/**
* clear and destroy one {@link ReferenceConfigBase} in the cache.
*
* @param referenceConfig use for create key.
*/
@Override
public <T> void destroy(ReferenceConfigBase<T> referenceConfig) {
String key = generator.generateKey(referenceConfig);
Class<?> type = referenceConfig.getInterfaceClass();
destroy(key, type);
}
/**
* clear and destroy all {@link ReferenceConfigBase} in the cache.
*/
@Override
public void destroyAll() {
if (CollectionUtils.isEmptyMap(referenceKeyMap)) {
return;
}
referenceKeyMap.forEach((_k, referencesOfKey) -> {
for (ReferenceConfigBase<?> rc : referencesOfKey) {
destroyReference(rc);
}
});
referenceKeyMap.clear();
referenceTypeMap.clear();
}
private void destroyReference(ReferenceConfigBase<?> rc) {
Destroyable proxy = (Destroyable) rc.get();
if (proxy != null) {
proxy.$destroy();
}
rc.destroy();
}
public Map<String, List<ReferenceConfigBase<?>>> getReferenceMap() {
return referenceKeyMap;
}
public Map<Class<?>, List<ReferenceConfigBase<?>>> getReferenceTypeMap() {
return referenceTypeMap;
}
@Override
public String toString() {
return "ReferenceCache(name: " + name + ")";
}
public | definition |
java | dropwizard__dropwizard | dropwizard-auth/src/main/java/io/dropwizard/auth/oauth/OAuthCredentialAuthFilter.java | {
"start": 469,
"end": 2398
} | class ____<P extends Principal> extends AuthFilter<String, P> {
/**
* Query parameter used to pass Bearer token
*
* @see <a href="https://tools.ietf.org/html/rfc6750#section-2.3">The OAuth 2.0 Authorization Framework: Bearer Token Usage</a>
*/
public static final String OAUTH_ACCESS_TOKEN_PARAM = "access_token";
private OAuthCredentialAuthFilter() {
}
@Override
public void filter(final ContainerRequestContext requestContext) throws IOException {
String credentials = getCredentials(requestContext.getHeaders().getFirst(HttpHeaders.AUTHORIZATION));
// If Authorization header is not used, check query parameter where token can be passed as well
if (credentials == null) {
credentials = requestContext.getUriInfo().getQueryParameters().getFirst(OAUTH_ACCESS_TOKEN_PARAM);
}
if (!authenticate(requestContext, credentials, SecurityContext.BASIC_AUTH)) {
throw unauthorizedHandler.buildException(prefix, realm);
}
}
/**
* Parses a value of the `Authorization` header in the form of `Bearer a892bf3e284da9bb40648ab10`.
*
* @param header the value of the `Authorization` header
* @return a token
*/
@Nullable
private String getCredentials(String header) {
if (header == null) {
return null;
}
final int space = header.indexOf(' ');
if (space <= 0) {
return null;
}
final String method = header.substring(0, space);
if (!prefix.equalsIgnoreCase(method)) {
return null;
}
return header.substring(space + 1);
}
/**
* Builder for {@link OAuthCredentialAuthFilter}.
* <p>An {@link Authenticator} must be provided during the building process.</p>
*
* @param <P> the type of the principal
*/
public static | OAuthCredentialAuthFilter |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/joincolumn/embedded/CharArrayToStringInEmbeddedJoinColumnOrFormulaTest.java | {
"start": 1289,
"end": 2565
} | class ____ {
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
Vehicle vehicle = new Vehicle();
vehicle.setId( 1L );
vehicle.setStringProp1( "VO" );
vehicle.setStringProp2( "2020" );
session.persist( vehicle );
VehicleInvoice invoice = new VehicleInvoice();
invoice.setId( new VehicleInvoiceId( "VO".toCharArray(), "2020".toCharArray() ) );
invoice.setVehicle( vehicle );
session.persist( invoice );
} );
}
@AfterAll
public void tearDown(SessionFactoryScope scope) {
scope.inTransaction( session -> {
session.createMutationQuery( "delete from VehicleInvoice" ).executeUpdate();
session.createMutationQuery( "delete from Vehicle" ).executeUpdate();
} );
}
@Test
public void testAssociation(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final VehicleInvoice vehicleInvoice = session.createQuery(
"from VehicleInvoice",
VehicleInvoice.class
).getSingleResult();
assertEquals( 1L, vehicleInvoice.getVehicle().getId() );
assertEquals( "VO", vehicleInvoice.getVehicle().getStringProp1() );
assertEquals( "2020", vehicleInvoice.getVehicle().getStringProp2() );
} );
}
@Embeddable
public static | CharArrayToStringInEmbeddedJoinColumnOrFormulaTest |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/multipleinput/input/OneInput.java | {
"start": 1583,
"end": 2915
} | class ____ extends InputBase implements AsyncKeyOrderedProcessing {
private final OneInputStreamOperator<RowData, RowData> operator;
public OneInput(OneInputStreamOperator<RowData, RowData> operator) {
this.operator = operator;
}
@Override
public void processElement(StreamRecord<RowData> element) throws Exception {
operator.processElement(element);
}
@Override
public void processWatermark(Watermark mark) throws Exception {
operator.processWatermark(mark);
}
@Override
public void processLatencyMarker(LatencyMarker latencyMarker) throws Exception {
operator.processLatencyMarker(latencyMarker);
}
@Override
public void processWatermarkStatus(WatermarkStatus watermarkStatus) throws Exception {
operator.processWatermarkStatus(watermarkStatus);
}
@Internal
@Override
public final boolean isAsyncKeyOrderedProcessingEnabled() {
return (operator instanceof AsyncKeyOrderedProcessing)
&& ((AsyncKeyOrderedProcessing) operator).isAsyncKeyOrderedProcessingEnabled();
}
@Internal
@Override
public final <T> ThrowingConsumer<StreamRecord<T>, Exception> getRecordProcessor(int inputId) {
return ((AsyncKeyOrderedProcessing) operator).getRecordProcessor(1);
}
}
| OneInput |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/HashtableContainsTest.java | {
"start": 4267,
"end": 4857
} | class ____<K, V> extends Hashtable<K, V> {
@Override
public boolean contains(Object v) {
// BUG: Diagnostic contains:
// Did you mean 'return containsValue(v);' or 'return containsKey(v);'?
return contains(v);
}
}
""")
.doTest();
}
@Test
public void negative_containsAmbiguous() {
compilationHelper
.addSourceLines(
"test/Test.java",
"""
package test;
import java.util.Hashtable;
| MyHashTable |
java | apache__logging-log4j2 | log4j-core-its/src/test/java/org/apache/logging/log4j/core/async/perftest/ResponseTimeTest.java | {
"start": 14879,
"end": 16228
} | class ____ extends DefaultAsyncQueueFullPolicy {
static AtomicLong ringbufferFull = new AtomicLong();
@Override
public EventRoute getRoute(final long backgroundThreadId, final Level level) {
ringbufferFull.incrementAndGet();
System.out.print('!');
return super.getRoute(backgroundThreadId, level);
}
}
/**
* Pacer determines the pace at which measurements are taken. Sample usage:
* <p/>
* <pre>
* - each thread has a Pacer instance
* - at start of test, call pacer.setInitialStartTime(System.nanoTime());
* - loop:
* - store result of pacer.expectedNextOperationNanoTime() as expectedStartTime
* - pacer.acquire(1);
* - before the measured operation: store System.nanoTime() as actualStartTime
* - perform the measured operation
* - store System.nanoTime() as doneTime
* - serviceTimeHistogram.recordValue(doneTime - actualStartTime);
* - responseTimeHistogram.recordValue(doneTime - expectedStartTime);
* </pre>
* <p>
* Borrowed with permission from Gil Tene's Cassandra stress test:
* https://github.com/LatencyUtils/cassandra-stress2/blob/trunk/tools/stress/src/org/apache/cassandra/stress/StressAction.java#L374
* </p>
*/
static | PrintingAsyncQueueFullPolicy |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSchedulerConfiguration.java | {
"start": 4551,
"end": 4636
} | class ____ with the queue
* @param queue name of the queue
* @return the | associated |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java | {
"start": 4797,
"end": 32382
} | enum ____ {
PRIORITY_FIRST, USERLIMIT_FIRST;
}
private static final Logger LOG =
LoggerFactory.getLogger(ProportionalCapacityPreemptionPolicy.class);
private final Clock clock;
// Configurable fields
private double maxIgnoredOverCapacity;
private long maxWaitTime;
private long monitoringInterval;
private float percentageClusterPreemptionAllowed;
private double naturalTerminationFactor;
private boolean observeOnly;
private boolean lazyPreempionEnabled;
private float maxAllowableLimitForIntraQueuePreemption;
private float minimumThresholdForIntraQueuePreemption;
private IntraQueuePreemptionOrderPolicy intraQueuePreemptionOrderPolicy;
private boolean crossQueuePreemptionConservativeDRF;
private boolean inQueuePreemptionConservativeDRF;
// Current configuration
private CapacitySchedulerConfiguration csConfig;
// Pointer to other RM components
private RMContext rmContext;
private ResourceCalculator rc;
private CapacityScheduler scheduler;
private RMNodeLabelsManager nlm;
// Internal properties to make decisions of what to preempt
private final Map<RMContainer,Long> preemptionCandidates =
new HashMap<>();
private Map<String, Map<String, TempQueuePerPartition>> queueToPartitions =
new HashMap<>();
private Map<String, LinkedHashSet<String>> partitionToUnderServedQueues =
new HashMap<String, LinkedHashSet<String>>();
private List<PreemptionCandidatesSelector> candidatesSelectionPolicies;
private Set<String> allPartitions;
private Set<String> leafQueueNames;
Map<PreemptionCandidatesSelector, Map<ApplicationAttemptId,
Set<RMContainer>>> pcsMap;
// Preemptable Entities, synced from scheduler at every run
private Map<String, PreemptableQueue> preemptableQueues;
private Set<ContainerId> killableContainers;
public ProportionalCapacityPreemptionPolicy() {
clock = SystemClock.getInstance();
allPartitions = Collections.emptySet();
leafQueueNames = Collections.emptySet();
preemptableQueues = Collections.emptyMap();
}
@VisibleForTesting
public ProportionalCapacityPreemptionPolicy(RMContext context,
CapacityScheduler scheduler, Clock clock) {
init(context.getYarnConfiguration(), context, scheduler);
this.clock = clock;
allPartitions = Collections.emptySet();
leafQueueNames = Collections.emptySet();
preemptableQueues = Collections.emptyMap();
}
public void init(Configuration config, RMContext context,
ResourceScheduler sched) {
LOG.info("Preemption monitor:" + this.getClass().getCanonicalName());
assert null == scheduler : "Unexpected duplicate call to init";
if (!(sched instanceof CapacityScheduler)) {
throw new YarnRuntimeException("Class " +
sched.getClass().getCanonicalName() + " not instance of " +
CapacityScheduler.class.getCanonicalName());
}
rmContext = context;
scheduler = (CapacityScheduler) sched;
rc = scheduler.getResourceCalculator();
nlm = scheduler.getRMContext().getNodeLabelManager();
updateConfigIfNeeded();
}
private void updateConfigIfNeeded() {
CapacitySchedulerConfiguration config = scheduler.getConfiguration();
if (config == csConfig) {
return;
}
maxIgnoredOverCapacity = config.getDouble(
CapacitySchedulerConfiguration.PREEMPTION_MAX_IGNORED_OVER_CAPACITY,
CapacitySchedulerConfiguration.DEFAULT_PREEMPTION_MAX_IGNORED_OVER_CAPACITY);
naturalTerminationFactor = config.getDouble(
CapacitySchedulerConfiguration.PREEMPTION_NATURAL_TERMINATION_FACTOR,
CapacitySchedulerConfiguration.DEFAULT_PREEMPTION_NATURAL_TERMINATION_FACTOR);
maxWaitTime = config.getLong(
CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL,
CapacitySchedulerConfiguration.DEFAULT_PREEMPTION_WAIT_TIME_BEFORE_KILL);
monitoringInterval = config.getLong(
CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL,
CapacitySchedulerConfiguration.DEFAULT_PREEMPTION_MONITORING_INTERVAL);
percentageClusterPreemptionAllowed = config.getFloat(
CapacitySchedulerConfiguration.TOTAL_PREEMPTION_PER_ROUND,
CapacitySchedulerConfiguration.DEFAULT_TOTAL_PREEMPTION_PER_ROUND);
observeOnly = config.getBoolean(
CapacitySchedulerConfiguration.PREEMPTION_OBSERVE_ONLY,
CapacitySchedulerConfiguration.DEFAULT_PREEMPTION_OBSERVE_ONLY);
lazyPreempionEnabled = config.getBoolean(
CapacitySchedulerConfiguration.LAZY_PREEMPTION_ENABLED,
CapacitySchedulerConfiguration.DEFAULT_LAZY_PREEMPTION_ENABLED);
maxAllowableLimitForIntraQueuePreemption = config.getFloat(
CapacitySchedulerConfiguration.
INTRAQUEUE_PREEMPTION_MAX_ALLOWABLE_LIMIT,
CapacitySchedulerConfiguration.
DEFAULT_INTRAQUEUE_PREEMPTION_MAX_ALLOWABLE_LIMIT);
minimumThresholdForIntraQueuePreemption = config.getFloat(
CapacitySchedulerConfiguration.
INTRAQUEUE_PREEMPTION_MINIMUM_THRESHOLD,
CapacitySchedulerConfiguration.
DEFAULT_INTRAQUEUE_PREEMPTION_MINIMUM_THRESHOLD);
intraQueuePreemptionOrderPolicy = IntraQueuePreemptionOrderPolicy
.valueOf(config
.get(
CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ORDER_POLICY,
CapacitySchedulerConfiguration.DEFAULT_INTRAQUEUE_PREEMPTION_ORDER_POLICY)
.toUpperCase());
crossQueuePreemptionConservativeDRF = config.getBoolean(
CapacitySchedulerConfiguration.
CROSS_QUEUE_PREEMPTION_CONSERVATIVE_DRF,
CapacitySchedulerConfiguration.
DEFAULT_CROSS_QUEUE_PREEMPTION_CONSERVATIVE_DRF);
inQueuePreemptionConservativeDRF = config.getBoolean(
CapacitySchedulerConfiguration.
IN_QUEUE_PREEMPTION_CONSERVATIVE_DRF,
CapacitySchedulerConfiguration.
DEFAULT_IN_QUEUE_PREEMPTION_CONSERVATIVE_DRF);
candidatesSelectionPolicies = new ArrayList<>();
// Do we need white queue-priority preemption policy?
boolean isQueuePriorityPreemptionEnabled =
config.getPUOrderingPolicyUnderUtilizedPreemptionEnabled();
if (isQueuePriorityPreemptionEnabled) {
candidatesSelectionPolicies.add(
new QueuePriorityContainerCandidateSelector(this));
}
// Do we need to specially consider reserved containers?
boolean selectCandidatesForResevedContainers = config.getBoolean(
CapacitySchedulerConfiguration.
PREEMPTION_SELECT_CANDIDATES_FOR_RESERVED_CONTAINERS,
CapacitySchedulerConfiguration.
DEFAULT_PREEMPTION_SELECT_CANDIDATES_FOR_RESERVED_CONTAINERS);
if (selectCandidatesForResevedContainers) {
candidatesSelectionPolicies
.add(new ReservedContainerCandidatesSelector(this));
}
boolean additionalPreemptionBasedOnReservedResource = config.getBoolean(
CapacitySchedulerConfiguration.ADDITIONAL_RESOURCE_BALANCE_BASED_ON_RESERVED_CONTAINERS,
CapacitySchedulerConfiguration.DEFAULT_ADDITIONAL_RESOURCE_BALANCE_BASED_ON_RESERVED_CONTAINERS);
// initialize candidates preemption selection policies
candidatesSelectionPolicies.add(new FifoCandidatesSelector(this,
additionalPreemptionBasedOnReservedResource, false));
// Do we need to do preemption to balance queue even after queues get satisfied?
boolean isPreemptionToBalanceRequired = config.getBoolean(
CapacitySchedulerConfiguration.PREEMPTION_TO_BALANCE_QUEUES_BEYOND_GUARANTEED,
CapacitySchedulerConfiguration.DEFAULT_PREEMPTION_TO_BALANCE_QUEUES_BEYOND_GUARANTEED);
long maximumKillWaitTimeForPreemptionToQueueBalance = config.getLong(
CapacitySchedulerConfiguration.MAX_WAIT_BEFORE_KILL_FOR_QUEUE_BALANCE_PREEMPTION,
CapacitySchedulerConfiguration.DEFAULT_MAX_WAIT_BEFORE_KILL_FOR_QUEUE_BALANCE_PREEMPTION);
if (isPreemptionToBalanceRequired) {
PreemptionCandidatesSelector selector = new FifoCandidatesSelector(this,
false, true);
selector.setMaximumKillWaitTime(maximumKillWaitTimeForPreemptionToQueueBalance);
candidatesSelectionPolicies.add(selector);
}
// Do we need to specially consider intra queue
boolean isIntraQueuePreemptionEnabled = config.getBoolean(
CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ENABLED,
CapacitySchedulerConfiguration.DEFAULT_INTRAQUEUE_PREEMPTION_ENABLED);
if (isIntraQueuePreemptionEnabled) {
candidatesSelectionPolicies.add(new IntraQueueCandidatesSelector(this));
}
LOG.info("Capacity Scheduler configuration changed, updated preemption " +
"properties to:\n" +
"max_ignored_over_capacity = " + maxIgnoredOverCapacity + "\n" +
"natural_termination_factor = " + naturalTerminationFactor + "\n" +
"max_wait_before_kill = " + maxWaitTime + "\n" +
"monitoring_interval = " + monitoringInterval + "\n" +
"total_preemption_per_round = " + percentageClusterPreemptionAllowed +
"\n" +
"observe_only = " + observeOnly + "\n" +
"lazy-preemption-enabled = " + lazyPreempionEnabled + "\n" +
"intra-queue-preemption.enabled = " + isIntraQueuePreemptionEnabled +
"\n" +
"intra-queue-preemption.max-allowable-limit = " +
maxAllowableLimitForIntraQueuePreemption + "\n" +
"intra-queue-preemption.minimum-threshold = " +
minimumThresholdForIntraQueuePreemption + "\n" +
"intra-queue-preemption.preemption-order-policy = " +
intraQueuePreemptionOrderPolicy + "\n" +
"priority-utilization.underutilized-preemption.enabled = " +
isQueuePriorityPreemptionEnabled + "\n" +
"select_based_on_reserved_containers = " +
selectCandidatesForResevedContainers + "\n" +
"additional_res_balance_based_on_reserved_containers = " +
additionalPreemptionBasedOnReservedResource + "\n" +
"Preemption-to-balance-queue-enabled = " +
isPreemptionToBalanceRequired + "\n" +
"cross-queue-preemption.conservative-drf = " +
crossQueuePreemptionConservativeDRF + "\n" +
"in-queue-preemption.conservative-drf = " +
inQueuePreemptionConservativeDRF);
csConfig = config;
}
@Override
public ResourceCalculator getResourceCalculator() {
return rc;
}
@Override
public synchronized void editSchedule() {
updateConfigIfNeeded();
long startTs = clock.getTime();
CSQueue root = scheduler.getRootQueue();
Resource clusterResources = Resources.clone(scheduler.getClusterResource());
containerBasedPreemptOrKill(root, clusterResources);
if (LOG.isDebugEnabled()) {
LOG.debug("Total time used=" + (clock.getTime() - startTs) + " ms.");
}
}
private void preemptOrkillSelectedContainerAfterWait(
Map<PreemptionCandidatesSelector, Map<ApplicationAttemptId,
Set<RMContainer>>> toPreemptPerSelector, long currentTime) {
int toPreemptCount = 0;
for (Map<ApplicationAttemptId, Set<RMContainer>> containers :
toPreemptPerSelector.values()) {
toPreemptCount += containers.size();
}
LOG.debug(
"Starting to preempt containers for selectedCandidates and size:{}",
toPreemptCount);
// preempt (or kill) the selected containers
// We need toPreemptPerSelector here to match list of containers to
// its selector so that we can get custom timeout per selector when
// checking if current container should be killed or not
for (Map.Entry<PreemptionCandidatesSelector, Map<ApplicationAttemptId,
Set<RMContainer>>> pc : toPreemptPerSelector
.entrySet()) {
Map<ApplicationAttemptId, Set<RMContainer>> cMap = pc.getValue();
if (cMap.size() > 0) {
for (Map.Entry<ApplicationAttemptId,
Set<RMContainer>> e : cMap.entrySet()) {
ApplicationAttemptId appAttemptId = e.getKey();
if (LOG.isDebugEnabled()) {
LOG.debug("Send to scheduler: in app=" + appAttemptId
+ " #containers-to-be-preemptionCandidates=" + e.getValue().size());
}
for (RMContainer container : e.getValue()) {
// if we tried to preempt this for more than maxWaitTime, this
// should be based on custom timeout per container per selector
if (preemptionCandidates.get(container) != null
&& preemptionCandidates.get(container)
+ pc.getKey().getMaximumKillWaitTimeMs() <= currentTime) {
// kill it
rmContext.getDispatcher().getEventHandler().handle(
new ContainerPreemptEvent(appAttemptId, container,
SchedulerEventType.MARK_CONTAINER_FOR_KILLABLE));
preemptionCandidates.remove(container);
} else {
if (preemptionCandidates.get(container) != null) {
// We already updated the information to scheduler earlier, we need
// not have to raise another event.
continue;
}
//otherwise just send preemption events
rmContext.getDispatcher().getEventHandler().handle(
new ContainerPreemptEvent(appAttemptId, container,
SchedulerEventType.MARK_CONTAINER_FOR_PREEMPTION));
preemptionCandidates.put(container, currentTime);
}
}
}
}
}
}
private void syncKillableContainersFromScheduler() {
// sync preemptable entities from scheduler
preemptableQueues =
scheduler.getPreemptionManager().getShallowCopyOfPreemptableQueues();
killableContainers = new HashSet<>();
for (Map.Entry<String, PreemptableQueue> entry : preemptableQueues
.entrySet()) {
PreemptableQueue entity = entry.getValue();
for (Map<ContainerId, RMContainer> map : entity.getKillableContainers()
.values()) {
killableContainers.addAll(map.keySet());
}
}
}
private void cleanupStaledPreemptionCandidates(long currentTime) {
// Keep the preemptionCandidates list clean
// garbage collect containers that are irrelevant for preemption
// And avoid preempt selected containers for *this execution*
// or within 1 ms
preemptionCandidates.entrySet()
.removeIf(candidate ->
candidate.getValue() + 2 * maxWaitTime < currentTime);
}
private Set<String> getLeafQueueNames(TempQueuePerPartition q) {
// Only consider this a leaf queue if:
// It is a concrete leaf queue (not a childless parent)
if (CollectionUtils.isEmpty(q.children)) {
CSQueue queue = scheduler.getQueue(q.queueName);
if (queue instanceof AbstractLeafQueue) {
return ImmutableSet.of(q.queueName);
}
return Collections.emptySet();
}
Set<String> leafQueueNames = new HashSet<>();
for (TempQueuePerPartition child : q.children) {
leafQueueNames.addAll(getLeafQueueNames(child));
}
return leafQueueNames;
}
/**
* This method selects and tracks containers to be preemptionCandidates. If a container
* is in the target list for more than maxWaitTime it is killed.
*
* @param root the root of the CapacityScheduler queue hierarchy
* @param clusterResources the total amount of resources in the cluster
*/
private void containerBasedPreemptOrKill(CSQueue root,
Resource clusterResources) {
// Sync killable containers from scheduler when lazy preemption enabled
if (lazyPreempionEnabled) {
syncKillableContainersFromScheduler();
}
// All partitions to look at
Set<String> partitions = new HashSet<>();
partitions.addAll(scheduler.getRMContext()
.getNodeLabelManager().getClusterNodeLabelNames());
partitions.add(RMNodeLabelsManager.NO_LABEL);
this.allPartitions = ImmutableSet.copyOf(partitions);
// extract a summary of the queues from scheduler
synchronized (scheduler) {
queueToPartitions.clear();
for (String partitionToLookAt : allPartitions) {
cloneQueues(root, Resources
.clone(nlm.getResourceByLabel(partitionToLookAt, clusterResources)),
partitionToLookAt);
}
// Update effective priority of queues
}
this.leafQueueNames = ImmutableSet.copyOf(getLeafQueueNames(
getQueueByPartition(CapacitySchedulerConfiguration.ROOT,
RMNodeLabelsManager.NO_LABEL)));
// compute total preemption allowed
Resource totalPreemptionAllowed = Resources.multiply(clusterResources,
percentageClusterPreemptionAllowed);
//clear under served queues for every run
partitionToUnderServedQueues.clear();
// based on ideal allocation select containers to be preemptionCandidates from each
// queue and each application
Map<ApplicationAttemptId, Set<RMContainer>> toPreempt =
new HashMap<>();
Map<PreemptionCandidatesSelector, Map<ApplicationAttemptId,
Set<RMContainer>>> toPreemptPerSelector = new HashMap<>();
for (PreemptionCandidatesSelector selector :
candidatesSelectionPolicies) {
long startTime = 0;
if (LOG.isDebugEnabled()) {
LOG.debug(MessageFormat
.format("Trying to use {0} to select preemption candidates",
selector.getClass().getName()));
startTime = clock.getTime();
}
Map<ApplicationAttemptId, Set<RMContainer>> curCandidates =
selector.selectCandidates(toPreempt, clusterResources,
totalPreemptionAllowed);
toPreemptPerSelector.putIfAbsent(selector, curCandidates);
if (LOG.isDebugEnabled()) {
LOG.debug(MessageFormat
.format("{0} uses {1} millisecond to run",
selector.getClass().getName(), clock.getTime() - startTime));
int totalSelected = 0;
int curSelected = 0;
for (Set<RMContainer> set : toPreempt.values()) {
totalSelected += set.size();
}
for (Set<RMContainer> set : curCandidates.values()) {
curSelected += set.size();
}
LOG.debug(MessageFormat
.format("So far, total {0} containers selected to be preempted, {1}"
+ " containers selected this round\n",
totalSelected, curSelected));
}
}
if (LOG.isDebugEnabled()) {
logToCSV(new ArrayList<>(leafQueueNames));
}
// if we are in observeOnly mode return before any action is taken
if (observeOnly) {
return;
}
// TODO: need consider revert killable containers when no more demandings.
// Since we could have several selectors to make decisions concurrently.
// So computed ideal-allocation varies between different selectors.
//
// We may need to "score" killable containers and revert the most preferred
// containers. The bottom line is, we shouldn't preempt a queue which is already
// below its guaranteed resource.
long currentTime = clock.getTime();
pcsMap = toPreemptPerSelector;
// preempt (or kill) the selected containers
preemptOrkillSelectedContainerAfterWait(toPreemptPerSelector, currentTime);
// cleanup staled preemption candidates
cleanupStaledPreemptionCandidates(currentTime);
}
@Override
public long getMonitoringInterval() {
return monitoringInterval;
}
@Override
public String getPolicyName() {
return "ProportionalCapacityPreemptionPolicy";
}
@VisibleForTesting
public Map<RMContainer, Long> getToPreemptContainers() {
return preemptionCandidates;
}
/**
* This method walks a tree of CSQueue and clones the portion of the state
* relevant for preemption in TempQueue(s). It also maintains a pointer to
* the leaves. Finally it aggregates pending resources in each queue and rolls
* it up to higher levels.
*
* @param curQueue current queue which I'm looking at now
* @param partitionResource the total amount of resources in the cluster
* @return the root of the cloned queue hierarchy
*/
private TempQueuePerPartition cloneQueues(CSQueue curQueue,
Resource partitionResource, String partitionToLookAt) {
TempQueuePerPartition ret;
ReadLock readLock = curQueue.getReadLock();
// Acquire a read lock from Parent/LeafQueue.
readLock.lock();
try {
String queuePath = curQueue.getQueuePath();
QueueCapacities qc = curQueue.getQueueCapacities();
float absCap = qc.getAbsoluteCapacity(partitionToLookAt);
float absMaxCap = qc.getAbsoluteMaximumCapacity(partitionToLookAt);
boolean preemptionDisabled = curQueue.getPreemptionDisabled();
QueueResourceQuotas queueResourceQuotas = curQueue
.getQueueResourceQuotas();
Resource effMinRes = queueResourceQuotas
.getEffectiveMinResource(partitionToLookAt);
Resource effMaxRes = queueResourceQuotas
.getEffectiveMaxResource(partitionToLookAt);
Resource current = Resources
.clone(curQueue.getQueueResourceUsage().getUsed(partitionToLookAt));
Resource killable = Resources.none();
Resource reserved = Resources.clone(
curQueue.getQueueResourceUsage().getReserved(partitionToLookAt));
if (null != preemptableQueues.get(queuePath)) {
killable = Resources.clone(preemptableQueues.get(queuePath)
.getKillableResource(partitionToLookAt));
}
// when partition is a non-exclusive partition, the actual maxCapacity
// could more than specified maxCapacity
try {
if (!scheduler.getRMContext().getNodeLabelManager()
.isExclusiveNodeLabel(partitionToLookAt)) {
absMaxCap = 1.0f;
}
} catch (IOException e) {
// This may cause by partition removed when running capacity monitor,
// just ignore the error, this will be corrected when doing next check.
}
ret = new TempQueuePerPartition(queuePath, current, preemptionDisabled,
partitionToLookAt, killable, absCap, absMaxCap, partitionResource,
reserved, curQueue, effMinRes, effMaxRes);
if (curQueue instanceof AbstractParentQueue) {
String configuredOrderingPolicy =
((AbstractParentQueue) curQueue).getQueueOrderingPolicy().getConfigName();
// Recursively add children
for (CSQueue c : curQueue.getChildQueues()) {
TempQueuePerPartition subq = cloneQueues(c, partitionResource,
partitionToLookAt);
// If we respect priority
if (StringUtils.equals(
CapacitySchedulerConfiguration.QUEUE_PRIORITY_UTILIZATION_ORDERING_POLICY,
configuredOrderingPolicy)) {
subq.relativePriority = c.getPriority().getPriority();
}
ret.addChild(subq);
subq.parent = ret;
}
}
} finally {
readLock.unlock();
}
addTempQueuePartition(ret);
return ret;
}
// simple printout function that reports internal queue state (useful for
// plotting)
private void logToCSV(List<String> leafQueueNames){
Collections.sort(leafQueueNames);
String queueState = " QUEUESTATE: " + clock.getTime();
StringBuilder sb = new StringBuilder();
sb.append(queueState);
for (String queueName : leafQueueNames) {
TempQueuePerPartition tq =
getQueueByPartition(queueName, RMNodeLabelsManager.NO_LABEL);
sb.append(", ");
tq.appendLogString(sb);
}
LOG.debug(sb.toString());
}
private void addTempQueuePartition(TempQueuePerPartition queuePartition) {
String queueName = queuePartition.queueName;
Map<String, TempQueuePerPartition> queuePartitions;
if (null == (queuePartitions = queueToPartitions.get(queueName))) {
queuePartitions = new HashMap<>();
queueToPartitions.put(queueName, queuePartitions);
}
queuePartitions.put(queuePartition.partition, queuePartition);
}
/**
* Get queue partition by given queueName and partitionName
*/
@Override
public TempQueuePerPartition getQueueByPartition(String queueName,
String partition) {
Map<String, TempQueuePerPartition> partitionToQueues;
if (null == (partitionToQueues = queueToPartitions.get(queueName))) {
throw new YarnRuntimeException("This shouldn't happen, cannot find "
+ "TempQueuePerPartition for queueName=" + queueName);
}
return partitionToQueues.get(partition);
}
/**
* Get all queue partitions by given queueName
*/
@Override
public Collection<TempQueuePerPartition> getQueuePartitions(String queueName) {
if (!queueToPartitions.containsKey(queueName)) {
throw new YarnRuntimeException("This shouldn't happen, cannot find "
+ "TempQueuePerPartition collection for queueName=" + queueName);
}
return queueToPartitions.get(queueName).values();
}
@Override
public CapacityScheduler getScheduler() {
return scheduler;
}
@Override
public RMContext getRMContext() {
return rmContext;
}
@Override
public boolean isObserveOnly() {
return observeOnly;
}
@Override
public Set<ContainerId> getKillableContainers() {
return killableContainers;
}
@Override
public double getMaxIgnoreOverCapacity() {
return maxIgnoredOverCapacity;
}
@Override
public double getNaturalTerminationFactor() {
return naturalTerminationFactor;
}
@Override
public Set<String> getLeafQueueNames() {
return leafQueueNames;
}
@Override
public Set<String> getAllPartitions() {
return allPartitions;
}
@VisibleForTesting
Map<String, Map<String, TempQueuePerPartition>> getQueuePartitions() {
return queueToPartitions;
}
@VisibleForTesting
Map<PreemptionCandidatesSelector, Map<ApplicationAttemptId,
Set<RMContainer>>> getToPreemptCandidatesPerSelector() {
return pcsMap;
}
@Override
public int getClusterMaxApplicationPriority() {
return scheduler.getMaxClusterLevelAppPriority().getPriority();
}
@Override
public float getMaxAllowableLimitForIntraQueuePreemption() {
return maxAllowableLimitForIntraQueuePreemption;
}
@Override
public float getMinimumThresholdForIntraQueuePreemption() {
return minimumThresholdForIntraQueuePreemption;
}
@Override
public Resource getPartitionResource(String partition) {
return Resources.clone(nlm.getResourceByLabel(partition,
Resources.clone(scheduler.getClusterResource())));
}
public LinkedHashSet<String> getUnderServedQueuesPerPartition(
String partition) {
return partitionToUnderServedQueues.get(partition);
}
public void addPartitionToUnderServedQueues(String queueName,
String partition) {
LinkedHashSet<String> underServedQueues = partitionToUnderServedQueues
.get(partition);
if (null == underServedQueues) {
underServedQueues = new LinkedHashSet<String>();
partitionToUnderServedQueues.put(partition, underServedQueues);
}
underServedQueues.add(queueName);
}
@Override
public IntraQueuePreemptionOrderPolicy getIntraQueuePreemptionOrderPolicy() {
return intraQueuePreemptionOrderPolicy;
}
@Override
public boolean getCrossQueuePreemptionConservativeDRF() {
return crossQueuePreemptionConservativeDRF;
}
@Override
public boolean getInQueuePreemptionConservativeDRF() {
return inQueuePreemptionConservativeDRF;
}
@Override
public long getDefaultMaximumKillWaitTimeout() {
return maxWaitTime;
}
}
| IntraQueuePreemptionOrderPolicy |
java | apache__dubbo | dubbo-plugin/dubbo-rest-openapi/src/main/java/org/apache/dubbo/rpc/protocol/tri/rest/openapi/OpenAPIDefinitionResolver.java | {
"start": 1677,
"end": 1781
} | interface ____ {
OpenAPI resolve(OpenAPI openAPI, ServiceMeta serviceMeta);
}
| OpenAPIChain |
java | alibaba__nacos | auth/src/main/java/com/alibaba/nacos/auth/serveridentity/ServerIdentity.java | {
"start": 730,
"end": 1164
} | class ____ {
private final String identityKey;
private final String identityValue;
public ServerIdentity(String identityKey, String identityValue) {
this.identityKey = identityKey;
this.identityValue = identityValue;
}
public String getIdentityKey() {
return identityKey;
}
public String getIdentityValue() {
return identityValue;
}
}
| ServerIdentity |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java | {
"start": 32039,
"end": 35410
} | class ____ implements ToXContentObject, Writeable {
private static final String SHARD_COUNT = "shard_count";
private static final String SIZE = "size_in_bytes";
private static final String MAX_SEGMENTS_PER_SHARD = "max_segments_per_shard";
public static final IndexSnapshotDetails SKIPPED = new IndexSnapshotDetails(0, ByteSizeValue.ZERO, 0);
public static final ConstructingObjectParser<IndexSnapshotDetails, Void> PARSER = new ConstructingObjectParser<>(
IndexSnapshotDetails.class.getName(),
true,
a -> new IndexSnapshotDetails((int) a[0], ByteSizeValue.ofBytes((long) a[1]), (int) a[2])
);
static {
PARSER.declareInt(ConstructingObjectParser.constructorArg(), new ParseField(SHARD_COUNT));
PARSER.declareLong(ConstructingObjectParser.constructorArg(), new ParseField(SIZE));
PARSER.declareInt(ConstructingObjectParser.constructorArg(), new ParseField(MAX_SEGMENTS_PER_SHARD));
}
private final int shardCount;
private final ByteSizeValue size;
private final int maxSegmentsPerShard;
public IndexSnapshotDetails(int shardCount, ByteSizeValue size, int maxSegmentsPerShard) {
this.shardCount = shardCount;
this.size = Objects.requireNonNull(size);
this.maxSegmentsPerShard = maxSegmentsPerShard;
}
public IndexSnapshotDetails(StreamInput in) throws IOException {
shardCount = in.readVInt();
size = ByteSizeValue.readFrom(in);
maxSegmentsPerShard = in.readVInt();
}
public int getShardCount() {
return shardCount;
}
public ByteSizeValue getSize() {
return size;
}
public int getMaxSegmentsPerShard() {
return maxSegmentsPerShard;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
IndexSnapshotDetails that = (IndexSnapshotDetails) o;
return shardCount == that.shardCount && maxSegmentsPerShard == that.maxSegmentsPerShard && size.equals(that.size);
}
@Override
public int hashCode() {
return Objects.hash(shardCount, size, maxSegmentsPerShard);
}
@Override
public String toString() {
return "IndexSnapshotDetails{"
+ "shardCount="
+ shardCount
+ ", size="
+ size
+ ", maxSegmentsPerShard="
+ maxSegmentsPerShard
+ '}';
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(shardCount);
size.writeTo(out);
out.writeVInt(maxSegmentsPerShard);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(SHARD_COUNT, shardCount);
builder.humanReadableField(SIZE, "size", size);
builder.field(MAX_SEGMENTS_PER_SHARD, maxSegmentsPerShard);
builder.endObject();
return builder;
}
}
}
| IndexSnapshotDetails |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeohash.java | {
"start": 1541,
"end": 3869
} | class ____ extends AbstractConvertFunction {
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(
Expression.class,
"ToGeohash",
ToGeohash::new
);
private static final Map<DataType, BuildFactory> EVALUATORS = Map.ofEntries(
Map.entry(GEOHASH, (source, fieldEval) -> fieldEval),
Map.entry(LONG, (source, fieldEval) -> fieldEval),
Map.entry(KEYWORD, ToGeohashFromStringEvaluator.Factory::new),
Map.entry(TEXT, ToGeohashFromStringEvaluator.Factory::new)
);
@FunctionInfo(
returnType = "geohash",
preview = true,
appliesTo = { @FunctionAppliesTo(lifeCycle = FunctionAppliesToLifecycle.PREVIEW) },
description = """
Converts an input value to a `geohash` value.
A string will only be successfully converted if it respects the
`geohash` format, as described for the
[geohash grid aggregation](/reference/aggregations/search-aggregations-bucket-geohashgrid-aggregation.md).""",
examples = @Example(file = "spatial-grid", tag = "to_geohash")
)
public ToGeohash(
Source source,
@Param(
name = "field",
type = { "geohash", "long", "keyword", "text" },
description = "Input value. The input can be a single- or multi-valued column or an expression."
) Expression field
) {
super(source, field);
}
private ToGeohash(StreamInput in) throws IOException {
super(in);
}
@Override
public String getWriteableName() {
return ENTRY.name;
}
@Override
protected Map<DataType, BuildFactory> factories() {
return EVALUATORS;
}
@Override
public DataType dataType() {
return GEOHASH;
}
@Override
public Expression replaceChildren(List<Expression> newChildren) {
return new ToGeohash(source(), newChildren.get(0));
}
@Override
protected NodeInfo<? extends Expression> info() {
return NodeInfo.create(this, ToGeohash::new, field());
}
@ConvertEvaluator(extraName = "FromString", warnExceptions = { IllegalArgumentException.class })
static long fromString(BytesRef in) {
return Geohash.longEncode(in.utf8ToString());
}
}
| ToGeohash |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/function/array/ArrayAggregateTest.java | {
"start": 2985,
"end": 7820
} | class ____ implements AdditionalMappingContributor {
@Override
public void contribute(
AdditionalMappingContributions contributions,
InFlightMetadataCollector metadata,
ResourceStreamLocator resourceStreamLocator,
MetadataBuildingContext buildingContext) {
if ( metadata.getDatabase().getDialect() instanceof OracleDialect ) {
final TypeConfiguration typeConfiguration = metadata.getTypeConfiguration();
final JavaTypeRegistry javaTypeRegistry = typeConfiguration.getJavaTypeRegistry();
final JdbcTypeRegistry jdbcTypeRegistry = typeConfiguration.getJdbcTypeRegistry();
new OracleArrayJdbcType(
jdbcTypeRegistry.getDescriptor( SqlTypes.VARCHAR ),
"StringArray"
).addAuxiliaryDatabaseObjects(
new ArrayJavaType<>( javaTypeRegistry.getDescriptor( String.class ) ),
null,
Size.nil(),
metadata.getDatabase(),
typeConfiguration.getCurrentBaseSqlTypeIndicators()
);
}
}
}
@BeforeEach
public void prepareData(SessionFactoryScope scope) {
scope.inTransaction( em -> {
final EntityOfBasics e1 = new EntityOfBasics( 1 );
e1.setTheString( "abc" );
final EntityOfBasics e2 = new EntityOfBasics( 2 );
e2.setTheString( "def" );
final EntityOfBasics e3 = new EntityOfBasics( 3 );
em.persist( e1 );
em.persist( e2 );
em.persist( e3 );
} );
}
@AfterEach
public void cleanup(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Test
public void testEmpty(SessionFactoryScope scope) {
scope.inSession( em -> {
//tag::hql-array-agg-example[]
List<String[]> results = em.createQuery( "select array_agg(e.data) within group (order by e.id) from BasicEntity e", String[].class )
.getResultList();
//end::hql-array-agg-example[]
assertEquals( 1, results.size() );
assertNull( results.get( 0 ) );
} );
}
@Test
public void testWithoutNull(SessionFactoryScope scope) {
scope.inSession( em -> {
List<String[]> results = em.createQuery( "select array_agg(e.theString) within group (order by e.theString) from EntityOfBasics e where e.theString is not null", String[].class )
.getResultList();
assertEquals( 1, results.size() );
assertArrayEquals( new String[]{ "abc", "def" }, results.get( 0 ) );
} );
}
@Test
public void testWithNull(SessionFactoryScope scope) {
scope.inSession( em -> {
List<String[]> results = em.createQuery( "select array_agg(e.theString) within group (order by e.theString asc nulls last) from EntityOfBasics e", String[].class )
.getResultList();
assertEquals( 1, results.size() );
assertArrayEquals( new String[]{ "abc", "def", null }, results.get( 0 ) );
} );
}
@Test
public void testCompareAgainstArray(SessionFactoryScope scope) {
scope.inSession( em -> {
List<Integer> results = em.createQuery( "select 1 where array('abc','def',null) is not distinct from (select array_agg(e.theString) within group (order by e.theString asc nulls last) from EntityOfBasics e)", Integer.class )
.getResultList();
assertEquals( 1, results.size() );
} );
}
@Test
public void testNodeBuilder(SessionFactoryScope scope) {
scope.inSession( em -> {
final NodeBuilder cb = (NodeBuilder) em.getCriteriaBuilder();
final JpaCriteriaQuery<String[]> cq = cb.createQuery( String[].class );
final JpaRoot<EntityOfBasics> root = cq.from( EntityOfBasics.class );
cq.select( cb.arrayAgg( cb.asc( root.get( "theString" ), false ), root.get( "theString" ) ) );
List<String[]> results = em.createQuery( cq ).getResultList();
assertEquals( 1, results.size() );
assertArrayEquals( new String[]{ "abc", "def", null }, results.get( 0 ) );
} );
}
@Test
@Jira("https://hibernate.atlassian.net/browse/HHH-19666")
public void testNonExistingArrayType(SessionFactoryScope scope) {
scope.inSession( em -> {
List<Integer[]> results = em.createQuery( "select array_agg(e.id) within group (order by e.id) from EntityOfBasics e", Integer[].class )
.getResultList();
assertEquals( 1, results.size() );
assertArrayEquals( new Integer[]{ 1, 2, 3 }, results.get( 0 ) );
} );
}
@Test
@Jira("https://hibernate.atlassian.net/browse/HHH-19681")
@RequiresDialect(PostgreSQLDialect.class)
public void testJsonBJdbcArray(SessionFactoryScope scope) {
scope.inTransaction( session -> {
String sql = "select groupId, array_agg(json_values) " +
"from (VALUES (1,'[1,2]'::jsonb),(1,'[10,20]'::jsonb)) as row(groupId,json_values) " +
"group by groupId";
List<Object[]> result = session.createNativeQuery(sql, Object[].class).getResultList();
assertEquals(1,result.size());
assertEquals(2, result.get(0).length);
assertEquals( 1,result.get(0)[0] );
assertEquals( "[[1, 2], [10, 20]]", Arrays.toString((String[])result.get(0)[1]) );
} );
}
}
| UdtContributor |
java | quarkusio__quarkus | extensions/mongodb-client/runtime/src/test/java/io/quarkus/mongodb/reactive/MongoTestBase.java | {
"start": 940,
"end": 7010
} | class ____ {
private static final Logger LOGGER = Logger.getLogger(MongoTestBase.class);
public static final String COLLECTION_PREFIX = "mongo-extension-test-";
public static final String DATABASE = "mongo-extension-test-db";
private static TransitionWalker.ReachedState<RunningMongodProcess> MONGO;
protected static String getConfiguredConnectionString() {
return getProperty("connection_string");
}
protected static String getProperty(String name) {
String s = System.getProperty(name);
if (s != null) {
s = s.trim();
if (s.length() > 0) {
return s;
}
}
return null;
}
@BeforeAll
public static void startMongoDatabase() throws IOException {
fixIssue14424();
String uri = getConfiguredConnectionString();
// This switch allow testing against a running mongo database.
if (uri == null) {
Version.Main version = Version.Main.V7_0;
int port = 27018;
LOGGER.infof("Starting Mongo %s on port %s", version, port);
MONGO = Mongod.instance()
.withNet(Start.to(Net.class).initializedWith(Net.builder()
.from(Net.defaults())
.port(port)
.build()))
.withMongodArguments(Start.to(MongodArguments.class)
.initializedWith(MongodArguments.defaults().withUseNoJournal(false)))
.withProcessConfig(
Start.to(ProcessConfig.class)
.initializedWith(ProcessConfig.defaults().withStopTimeoutInMillis(15_000)))
.start(version);
} else {
LOGGER.infof("Using existing Mongo %s", uri);
}
}
@AfterAll
public static void stopMongoDatabase() {
if (MONGO != null) {
try {
MONGO.close();
} catch (Exception e) {
LOGGER.error("Unable to stop MongoDB", e);
}
}
}
protected String getConnectionString() {
if (getConfiguredConnectionString() != null) {
return getConfiguredConnectionString();
} else {
return "mongodb://localhost:27018";
}
}
public static String randomAlphaString(int length) {
StringBuilder builder = new StringBuilder(length);
for (int i = 0; i < length; ++i) {
char c = (char) ((int) (65.0D + 25.0D * Math.random()));
builder.append(c);
}
return builder.toString();
}
protected List<io.quarkus.mongodb.reactive.ReactiveMongoCollection<Document>> getOurCollections(
io.quarkus.mongodb.reactive.ReactiveMongoClient client) {
ReactiveMongoDatabase database = client.getDatabase(DATABASE);
List<String> names = database.listCollectionNames().collect().asList().await().indefinitely();
return names
.stream()
.filter(c -> c.startsWith(COLLECTION_PREFIX))
.map(database::getCollection)
.collect(Collectors.toList());
}
protected void dropOurCollection(io.quarkus.mongodb.reactive.ReactiveMongoClient client) {
List<io.quarkus.mongodb.reactive.ReactiveMongoCollection<Document>> collections = getOurCollections(client);
for (ReactiveMongoCollection<Document> col : collections) {
col.drop().await().indefinitely();
}
}
protected String randomCollection() {
return COLLECTION_PREFIX + randomAlphaString(20);
}
protected Uni<Void> insertDocs(ReactiveMongoClient mongoClient, String collection, int num) {
io.quarkus.mongodb.reactive.ReactiveMongoDatabase database = mongoClient.getDatabase(DATABASE);
io.quarkus.mongodb.reactive.ReactiveMongoCollection<Document> mongoCollection = database
.getCollection(collection);
List<CompletableFuture<InsertOneResult>> list = new ArrayList<>();
for (int i = 0; i < num; i++) {
Document doc = createDoc(i);
list.add(mongoCollection.insertOne(doc).subscribeAsCompletionStage());
}
CompletableFuture<InsertOneResult>[] array = list.toArray(new CompletableFuture[0]);
return Uni.createFrom().completionStage(CompletableFuture.allOf(array));
}
protected Document createDoc() {
Document document = new Document();
document.put("foo", "bar");
document.put("num", 123);
document.put("big", true);
document.put("nullentry", null);
Document nested = new Document();
nested.put("wib", "wob");
document.put("arr", Arrays.asList("x", true, 1.23, null, nested));
document.put("date", new Date());
Document other = new Document();
other.put("quux", "flib");
other.put("myarr", Arrays.asList("blah", true, 312));
document.put("other", other);
return document;
}
protected Document createDoc(int num) {
Document document = new Document();
document.put("foo", "bar" + (num != -1 ? num : ""));
document.put("num", 123);
document.put("big", true);
document.put("nullentry", null);
Document nested = new Document();
nested.put("wib", "wob");
document.put("arr", Arrays.asList("x", true, 12, 1.23, null, nested));
document.put("date", new Date());
document.put("object_id", new ObjectId());
Document other = new Document();
other.put("quux", "flib");
other.put("myarr", Arrays.asList("blah", true, 312));
document.put("other", other);
document.put("longval", 123456789L);
document.put("dblval", 1.23);
return document;
}
public static void fixIssue14424() {
try {
//JDK bug workaround
//https://github.com/quarkusio/quarkus/issues/14424
//force | MongoTestBase |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/builditem/nativeimage/RuntimeReinitializedClassBuildItem.java | {
"start": 111,
"end": 452
} | class ____ will be reinitialized at runtime in native mode. This will result in the static
* initializer running twice.
*
* @deprecated Starting with Mandrel/GraalVM 23.1 for JDK 21 this is functionally the same with
* {@link RuntimeInitializedClassBuildItem}.
*/
@Deprecated(since = "3.18", forRemoval = true)
public final | that |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/api/AbstractFileSizeAssert.java | {
"start": 733,
"end": 1484
} | class ____<SELF extends AbstractFileAssert<SELF>>
extends AbstractLongAssert<AbstractFileSizeAssert<SELF>> {
protected AbstractFileSizeAssert(Long actualFileSize, Class<?> selfType) {
super(actualFileSize, selfType);
}
/**
* Returns to the file on which we ran size assertions on.
* <p>
* Example:
* <pre><code class='java'> File file = File.createTempFile("tmp", "bin");
* Files.write(file.toPath(), new byte[] {1, 1});
*
* assertThat(file).size().isGreaterThan(1L).isLessThan(5L)
* .returnToFile().hasBinaryContent(new byte[] {1, 1});</code></pre>
*
* @return file assertions.
*/
public abstract AbstractFileAssert<SELF> returnToFile();
}
| AbstractFileSizeAssert |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/deser/jdk/JDKFromStringDeserializer.java | {
"start": 11838,
"end": 12701
} | class ____ extends JDKFromStringDeserializer
{
public StringBufferDeserializer() { super(StringBuffer.class, -1); }
@Override
public LogicalType logicalType() { return LogicalType.Textual;}
@Override
public Object getEmptyValue(DeserializationContext ctxt) { return new StringBuffer(); }
@Override
public Object deserialize(JsonParser p, DeserializationContext ctxt) throws JacksonException
{
String text = p.getValueAsString();
if (text != null) {
return _deserialize(text, ctxt);
}
return super.deserialize(p, ctxt);
}
@Override
public Object _deserialize(String value, DeserializationContext ctxt) {
return new StringBuffer(value);
}
}
private static | StringBufferDeserializer |
java | quarkusio__quarkus | test-framework/junit5-component/src/test/java/io/quarkus/test/component/config/ConfigConverterTest.java | {
"start": 723,
"end": 1074
} | class ____ {
@TestConfigProperty(key = "my.boolean", value = "jo")
@TestConfigProperty(key = "my.duration", value = "5s")
@Test
public void testConverters(Foo foo) {
assertEquals(TimeUnit.SECONDS.toMillis(5), foo.durationVal.toMillis());
assertTrue(foo.boolVal);
}
@Singleton
public static | ConfigConverterTest |
java | micronaut-projects__micronaut-core | test-suite/src/test/java/io/micronaut/test/issue5379/GoogleUserDetailsMapper.java | {
"start": 1157,
"end": 1279
} | class ____ {
@Singleton
AmazonDynamoDB amazonDynamoDB() {
return () -> "good";
}
}
| AmazonDynamoDBFactory |
java | quarkusio__quarkus | extensions/flyway/deployment/src/test/java/io/quarkus/flyway/test/FlywayExtensionCleanAtStartTest.java | {
"start": 638,
"end": 1722
} | class ____ {
@Inject
Flyway flyway;
@Inject
AgroalDataSource defaultDataSource;
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addAsResource("db/migration/V1.0.0__Quarkus.sql")
.addAsResource("clean-at-start-config.properties", "application.properties"));
@Test
@DisplayName("Clean at start correctly")
public void testFlywayConfigInjection() throws SQLException {
try (Connection connection = defaultDataSource.getConnection(); Statement stat = connection.createStatement()) {
try (ResultSet executeQuery = stat.executeQuery("select * from fake_existing_tbl")) {
fail("fake_existing_tbl should not exist");
} catch (JdbcSQLSyntaxErrorException e) {
// expected fake_existing_tbl does not exist
}
}
MigrationInfo current = flyway.info().current();
assertNull(current, "Info is not null");
}
}
| FlywayExtensionCleanAtStartTest |
java | google__truth | extensions/proto/src/main/java/com/google/common/truth/extensions/proto/FieldScopeLogic.java | {
"start": 12979,
"end": 14342
} | class ____ extends FieldMatcherLogicBase {
private final ImmutableSet<Integer> fieldNumbers;
FieldNumbersLogic(Iterable<Integer> fieldNumbers, boolean isRecursive) {
super(isRecursive);
this.fieldNumbers = ImmutableSet.copyOf(fieldNumbers);
}
@Override
public void validate(
Descriptor rootDescriptor, FieldDescriptorValidator fieldDescriptorValidator) {
super.validate(rootDescriptor, fieldDescriptorValidator);
for (int fieldNumber : fieldNumbers) {
FieldDescriptor fieldDescriptor = rootDescriptor.findFieldByNumber(fieldNumber);
checkArgument(
fieldDescriptor != null,
"Message type %s has no field with number %s.",
rootDescriptor.getFullName(),
fieldNumber);
fieldDescriptorValidator.validate(fieldDescriptor);
}
}
@Override
boolean matchesFieldDescriptor(Descriptor descriptor, FieldDescriptor fieldDescriptor) {
return fieldDescriptor.getContainingType() == descriptor
&& fieldNumbers.contains(fieldDescriptor.getNumber());
}
@Override
public String toString() {
return String.format("FieldScopes.allowingFields(%s)", join(fieldNumbers));
}
}
// Matches any specific fields which fall under one of the specified FieldDescriptors.
private static final | FieldNumbersLogic |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestWebHDFSStoragePolicyCommands.java | {
"start": 1180,
"end": 1567
} | class ____
extends TestStoragePolicyCommands {
@BeforeEach
public void clusterSetUp() throws IOException, URISyntaxException {
super.clusterSetUp();
fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsConstants.WEBHDFS_SCHEME);
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
fs.getUri().toString());
}
}
| TestWebHDFSStoragePolicyCommands |
java | elastic__elasticsearch | libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyChecker.java | {
"start": 1144,
"end": 3244
} | interface ____ {
void checkAllNetworkAccess(Class<?> callerClass);
void checkChangeFilesHandling(Class<?> callerClass);
void checkChangeJVMGlobalState(Class<?> callerClass);
void checkChangeNetworkHandling(Class<?> callerClass);
void checkCreateClassLoader(Class<?> callerClass);
void checkCreateTempFile(Class<?> callerClass);
void checkEntitlementPresent(Class<?> callerClass, Class<? extends Entitlement> entitlementClass);
void checkEntitlementForUrl(Class<?> callerClass, URL url);
void checkEntitlementForURLConnection(Class<?> callerClass, URLConnection urlConnection);
void checkExitVM(Class<?> callerClass);
void checkFileDescriptorRead(Class<?> callerClass);
void checkFileDescriptorWrite(Class<?> callerClass);
void checkFileRead(Class<?> callerClass, File file);
void checkFileRead(Class<?> callerClass, Path path, boolean followLinks) throws NoSuchFileException;
void checkFileRead(Class<?> callerClass, Path path);
void checkFileWithZipMode(Class<?> callerClass, File file, int zipMode);
void checkFileWrite(Class<?> callerClass, File file);
void checkFileWrite(Class<?> callerClass, Path path);
void checkGetFileAttributeView(Class<?> callerClass);
void checkInboundNetworkAccess(Class<?> callerClass);
void checkJarURLAccess(Class<?> callerClass, JarURLConnection connection);
void checkLoadingNativeLibraries(Class<?> callerClass);
void checkLoggingFileHandler(Class<?> callerClass);
void checkManageThreadsEntitlement(Class<?> callerClass);
void checkOutboundNetworkAccess(Class<?> callerClass);
void checkReadStoreAttributes(Class<?> callerClass);
void checkSetHttpsConnectionProperties(Class<?> callerClass);
void checkStartProcess(Class<?> callerClass);
void checkUnsupportedURLProtocolConnection(Class<?> callerClass, String protocol);
void checkURLFileRead(Class<?> callerClass, URL url);
void checkWriteProperty(Class<?> callerClass, String property);
void checkWriteStoreAttributes(Class<?> callerClass);
}
| PolicyChecker |
java | apache__dubbo | dubbo-plugin/dubbo-triple-servlet/src/main/java/org/apache/dubbo/rpc/protocol/tri/rest/support/servlet/ServletHttpMessageAdapterFactory.java | {
"start": 1399,
"end": 3022
} | class ____
implements HttpMessageAdapterFactory<ServletHttpRequestAdapter, HttpMetadata, Void> {
private final FrameworkModel frameworkModel;
private final ServletContext servletContext;
private final HttpSessionFactory httpSessionFactory;
public ServletHttpMessageAdapterFactory(FrameworkModel frameworkModel) {
this.frameworkModel = frameworkModel;
servletContext = (ServletContext) createDummyServletContext(frameworkModel);
httpSessionFactory = getHttpSessionFactory(frameworkModel);
}
private HttpSessionFactory getHttpSessionFactory(FrameworkModel frameworkModel) {
for (RestExtension extension : frameworkModel.getActivateExtensions(RestExtension.class)) {
if (extension instanceof HttpSessionFactory) {
return (HttpSessionFactory) extension;
}
}
return null;
}
@Override
public ServletHttpRequestAdapter adaptRequest(HttpMetadata rawRequest, HttpChannel channel) {
return new ServletHttpRequestAdapter(rawRequest, channel, servletContext, httpSessionFactory);
}
@Override
public HttpResponse adaptResponse(ServletHttpRequestAdapter request, HttpMetadata rawRequest, Void rawResponse) {
return new ServletHttpResponseAdapter();
}
public Object adaptFilterConfig(String filterName) {
return new DummyFilterConfig(filterName, frameworkModel, servletContext);
}
private Object createDummyServletContext(FrameworkModel frameworkModel) {
return new DummyServletContext(frameworkModel);
}
}
| ServletHttpMessageAdapterFactory |
java | spring-projects__spring-framework | spring-web/src/test/java/org/springframework/http/converter/xml/MappingJackson2XmlHttpMessageConverterTests.java | {
"start": 1622,
"end": 8895
} | class ____ {
private final MappingJackson2XmlHttpMessageConverter converter = new MappingJackson2XmlHttpMessageConverter();
@Test
void canRead() {
assertThat(converter.canRead(MyBean.class, new MediaType("application", "xml"))).isTrue();
assertThat(converter.canRead(MyBean.class, new MediaType("text", "xml"))).isTrue();
assertThat(converter.canRead(MyBean.class, new MediaType("application", "soap+xml"))).isTrue();
assertThat(converter.canRead(MyBean.class, new MediaType("text", "xml", StandardCharsets.UTF_8))).isTrue();
assertThat(converter.canRead(MyBean.class, new MediaType("text", "xml", StandardCharsets.ISO_8859_1))).isTrue();
}
@Test
void canWrite() {
assertThat(converter.canWrite(MyBean.class, new MediaType("application", "xml"))).isTrue();
assertThat(converter.canWrite(MyBean.class, new MediaType("text", "xml"))).isTrue();
assertThat(converter.canWrite(MyBean.class, new MediaType("application", "soap+xml"))).isTrue();
assertThat(converter.canWrite(MyBean.class, new MediaType("text", "xml", StandardCharsets.UTF_8))).isTrue();
assertThat(converter.canWrite(MyBean.class, new MediaType("text", "xml", StandardCharsets.ISO_8859_1))).isFalse();
}
@Test
void read() throws IOException {
String body = "<MyBean>" +
"<string>Foo</string>" +
"<number>42</number>" +
"<fraction>42.0</fraction>" +
"<array><array>Foo</array>" +
"<array>Bar</array></array>" +
"<bool>true</bool>" +
"<bytes>AQI=</bytes></MyBean>";
MockHttpInputMessage inputMessage = new MockHttpInputMessage(body.getBytes(StandardCharsets.UTF_8));
inputMessage.getHeaders().setContentType(new MediaType("application", "xml"));
MyBean result = (MyBean) converter.read(MyBean.class, inputMessage);
assertThat(result.getString()).isEqualTo("Foo");
assertThat(result.getNumber()).isEqualTo(42);
assertThat(result.getFraction()).isCloseTo(42F, within(0F));
assertThat(result.getArray()).isEqualTo(new String[]{"Foo", "Bar"});
assertThat(result.isBool()).isTrue();
assertThat(result.getBytes()).isEqualTo(new byte[]{0x1, 0x2});
}
@Test
void write() throws IOException {
MockHttpOutputMessage outputMessage = new MockHttpOutputMessage();
MyBean body = new MyBean();
body.setString("Foo");
body.setNumber(42);
body.setFraction(42F);
body.setArray(new String[]{"Foo", "Bar"});
body.setBool(true);
body.setBytes(new byte[]{0x1, 0x2});
converter.write(body, null, outputMessage);
String result = outputMessage.getBodyAsString(StandardCharsets.UTF_8);
assertThat(result).contains("<string>Foo</string>");
assertThat(result).contains("<number>42</number>");
assertThat(result).contains("<fraction>42.0</fraction>");
assertThat(result).contains("<array><array>Foo</array><array>Bar</array></array>");
assertThat(result).contains("<bool>true</bool>");
assertThat(result).contains("<bytes>AQI=</bytes>");
assertThat(outputMessage.getHeaders().getContentType())
.as("Invalid content-type").isEqualTo(new MediaType("application", "xml", StandardCharsets.UTF_8));
}
@Test
void readInvalidXml() {
String body = "FooBar";
MockHttpInputMessage inputMessage = new MockHttpInputMessage(body.getBytes(StandardCharsets.UTF_8));
inputMessage.getHeaders().setContentType(MediaType.APPLICATION_XML);
assertThatExceptionOfType(HttpMessageNotReadableException.class).isThrownBy(() ->
converter.read(MyBean.class, inputMessage));
}
@Test
void readValidXmlWithUnknownProperty() throws IOException {
String body = "<MyBean><string>string</string><unknownProperty>value</unknownProperty></MyBean>";
MockHttpInputMessage inputMessage = new MockHttpInputMessage(body.getBytes(StandardCharsets.UTF_8));
inputMessage.getHeaders().setContentType(MediaType.APPLICATION_XML);
converter.read(MyBean.class, inputMessage);
// Assert no HttpMessageNotReadableException is thrown
}
@Test
void jsonView() throws Exception {
MockHttpOutputMessage outputMessage = new MockHttpOutputMessage();
JacksonViewBean bean = new JacksonViewBean();
bean.setWithView1("with");
bean.setWithView2("with");
bean.setWithoutView("without");
MappingJacksonValue jacksonValue = new MappingJacksonValue(bean);
jacksonValue.setSerializationView(MyJacksonView1.class);
this.converter.write(jacksonValue, null, outputMessage);
String result = outputMessage.getBodyAsString(StandardCharsets.UTF_8);
assertThat(result).contains("<withView1>with</withView1>");
assertThat(result).doesNotContain("<withView2>with</withView2>");
assertThat(result).doesNotContain("<withoutView>without</withoutView>");
}
@Test
void customXmlMapper() {
new MappingJackson2XmlHttpMessageConverter(new MyXmlMapper());
// Assert no exception is thrown
}
@Test
void readWithExternalReference() throws IOException {
String body = "<!DOCTYPE MyBean SYSTEM \"https://192.168.28.42/1.jsp\" [" +
" <!ELEMENT root ANY >\n" +
" <!ENTITY ext SYSTEM \"" +
new ClassPathResource("external.txt", getClass()).getURI() +
"\" >]><MyBean><string>&ext;</string></MyBean>";
MockHttpInputMessage inputMessage = new MockHttpInputMessage(body.getBytes(StandardCharsets.UTF_8));
inputMessage.getHeaders().setContentType(MediaType.APPLICATION_XML);
assertThatExceptionOfType(HttpMessageNotReadableException.class).isThrownBy(() ->
this.converter.read(MyBean.class, inputMessage));
}
@Test
void readWithXmlBomb() {
// https://en.wikipedia.org/wiki/Billion_laughs
// https://msdn.microsoft.com/en-us/magazine/ee335713.aspx
String body = """
<?xml version="1.0"?>
<!DOCTYPE lolz [
<!ENTITY lol "lol">
<!ELEMENT lolz (#PCDATA)>
<!ENTITY lol1 "&lol;&lol;&lol;&lol;&lol;&lol;&lol;&lol;&lol;&lol;">
<!ENTITY lol2 "&lol1;&lol1;&lol1;&lol1;&lol1;&lol1;&lol1;&lol1;&lol1;&lol1;">
<!ENTITY lol3 "&lol2;&lol2;&lol2;&lol2;&lol2;&lol2;&lol2;&lol2;&lol2;&lol2;">
<!ENTITY lol4 "&lol3;&lol3;&lol3;&lol3;&lol3;&lol3;&lol3;&lol3;&lol3;&lol3;">
<!ENTITY lol5 "&lol4;&lol4;&lol4;&lol4;&lol4;&lol4;&lol4;&lol4;&lol4;&lol4;">
<!ENTITY lol6 "&lol5;&lol5;&lol5;&lol5;&lol5;&lol5;&lol5;&lol5;&lol5;&lol5;">
<!ENTITY lol7 "&lol6;&lol6;&lol6;&lol6;&lol6;&lol6;&lol6;&lol6;&lol6;&lol6;">
<!ENTITY lol8 "&lol7;&lol7;&lol7;&lol7;&lol7;&lol7;&lol7;&lol7;&lol7;&lol7;">
<!ENTITY lol9 "&lol8;&lol8;&lol8;&lol8;&lol8;&lol8;&lol8;&lol8;&lol8;&lol8;">
]>
<MyBean>&lol9;</MyBean>""";
MockHttpInputMessage inputMessage = new MockHttpInputMessage(body.getBytes(StandardCharsets.UTF_8));
inputMessage.getHeaders().setContentType(MediaType.APPLICATION_XML);
assertThatExceptionOfType(HttpMessageNotReadableException.class).isThrownBy(() ->
this.converter.read(MyBean.class, inputMessage));
}
@Test
void readNonUnicode() throws Exception {
String body = "<MyBean>" +
"<string>føø bår</string>" +
"</MyBean>";
Charset charset = StandardCharsets.ISO_8859_1;
MockHttpInputMessage inputMessage = new MockHttpInputMessage(body.getBytes(charset));
inputMessage.getHeaders().setContentType(new MediaType("application", "xml", charset));
MyBean result = (MyBean) converter.read(MyBean.class, inputMessage);
assertThat(result.getString()).isEqualTo("føø bår");
}
public static | MappingJackson2XmlHttpMessageConverterTests |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.