language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__rocketmq | test/src/main/java/org/apache/rocketmq/test/util/MQRandomUtils.java | {
"start": 855,
"end": 1080
} | class ____ {
public static String getRandomTopic() {
return RandomUtils.getStringByUUID();
}
public static String getRandomConsumerGroup() {
return RandomUtils.getStringByUUID();
}
}
| MQRandomUtils |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/annotation/AnnotatedElementUtilsTests.java | {
"start": 62883,
"end": 63115
} | interface ____ {
@AliasFor("alias")
String[] value() default {};
@AliasFor("value")
String[] alias() default {};
}
@Retention(RetentionPolicy.RUNTIME)
@ValueAttributeMeta("FromValueAttributeMetaMeta")
@ | ValueAttributeMeta |
java | alibaba__nacos | test/config-test/src/test/java/com/alibaba/nacos/test/config/EmbeddedStorageContextUtilsConfigITCase.java | {
"start": 1416,
"end": 2683
} | class ____ {
@BeforeAll
@AfterAll
static void cleanClientCache() throws Exception {
ConfigCleanUtils.cleanClientCache();
ConfigCleanUtils.changeToNewTestNacosHome(EmbeddedStorageContextUtilsConfigITCase.class.getSimpleName());
}
@Test
void testMultiThreadSqlContexts() throws Exception {
CountDownLatch latch = new CountDownLatch(3);
ExecutorService service = Executors.newFixedThreadPool(3);
for (int i = 1; i < 4; i++) {
final int j = i;
service.submit(() -> {
try {
EmbeddedStorageContextHolder.addSqlContext("test_" + j, j);
EmbeddedStorageContextHolder.addSqlContext("test_" + j * 10, j);
List<ModifyRequest> list = EmbeddedStorageContextHolder.getCurrentSqlContext();
System.out.println(list);
assertEquals("test_" + j, list.get(0).getSql());
assertEquals("test_" + j * 10, list.get(0).getSql());
} finally {
latch.countDown();
}
});
}
latch.await();
}
}
| EmbeddedStorageContextUtilsConfigITCase |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/doublearray/DoubleArrayAssert_containsExactly_Test.java | {
"start": 1219,
"end": 2430
} | class ____ extends DoubleArrayAssertBaseTest {
@Override
protected DoubleArrayAssert invoke_api_method() {
return assertions.containsExactly(1d, 2d);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertContainsExactly(getInfo(assertions), getActual(assertions), arrayOf(1d, 2d));
}
@Test
void should_pass_with_precision_specified_as_last_argument() {
// GIVEN
double[] actual = arrayOf(1.0, 2.0);
// THEN
assertThat(actual).containsExactly(arrayOf(1.01, 2.0), withPrecision(0.1));
assertThat(actual).containsExactly(arrayOf(1.01, 2.0), withPrecision(0.1));
}
@Test
void should_pass_when_multiple_expected_values_are_the_same_according_to_the_given_precision() {
// GIVEN
double[] actual = arrayOf(-1.71, -1.51, -1.51);
// THEN
assertThat(actual).containsExactly(arrayOf(-1.7, -1.6, -1.4101), within(0.1));
}
@Test
void should_pass_with_precision_specified_in_comparator() {
// GIVEN
double[] actual = arrayOf(1.0, 2.0, 2.0, 2.09);
// THEN
assertThat(actual).usingComparatorWithPrecision(0.1)
.containsExactly(1.01, 2.0, 2.0, 2.0);
}
}
| DoubleArrayAssert_containsExactly_Test |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java | {
"start": 1433,
"end": 7892
} | class ____ extends JobTaskRequest<Request> implements ToXContentObject {
public static final ParseField CALC_INTERIM = new ParseField("calc_interim");
public static final ParseField START = new ParseField("start");
public static final ParseField END = new ParseField("end");
public static final ParseField ADVANCE_TIME = new ParseField("advance_time");
public static final ParseField SKIP_TIME = new ParseField("skip_time");
private static final ObjectParser<Request, Void> PARSER = new ObjectParser<>(NAME, Request::new);
static {
PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID);
PARSER.declareBoolean(Request::setCalcInterim, CALC_INTERIM);
PARSER.declareString(Request::setStart, START);
PARSER.declareString(Request::setEnd, END);
PARSER.declareString(Request::setAdvanceTime, ADVANCE_TIME);
PARSER.declareString(Request::setSkipTime, SKIP_TIME);
}
public static Request parseRequest(String jobId, XContentParser parser) {
Request request = PARSER.apply(parser, null);
if (jobId != null) {
request.jobId = jobId;
}
return request;
}
private boolean calcInterim = false;
private boolean waitForNormalization = true;
private boolean refreshRequired = true;
private String start;
private String end;
private String advanceTime;
private String skipTime;
public Request() {}
public Request(StreamInput in) throws IOException {
super(in);
calcInterim = in.readBoolean();
start = in.readOptionalString();
end = in.readOptionalString();
advanceTime = in.readOptionalString();
skipTime = in.readOptionalString();
waitForNormalization = in.readBoolean();
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) {
refreshRequired = in.readBoolean();
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(calcInterim);
out.writeOptionalString(start);
out.writeOptionalString(end);
out.writeOptionalString(advanceTime);
out.writeOptionalString(skipTime);
out.writeBoolean(waitForNormalization);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) {
out.writeBoolean(refreshRequired);
}
}
public Request(String jobId) {
super(jobId);
}
public boolean getCalcInterim() {
return calcInterim;
}
public void setCalcInterim(boolean calcInterim) {
this.calcInterim = calcInterim;
}
public String getStart() {
return start;
}
public void setStart(String start) {
this.start = start;
}
public String getEnd() {
return end;
}
public void setEnd(String end) {
this.end = end;
}
public String getAdvanceTime() {
return advanceTime;
}
public void setAdvanceTime(String advanceTime) {
this.advanceTime = advanceTime;
}
public String getSkipTime() {
return skipTime;
}
public void setSkipTime(String skipTime) {
this.skipTime = skipTime;
}
public boolean isWaitForNormalization() {
return waitForNormalization;
}
public boolean isRefreshRequired() {
return refreshRequired;
}
/**
* Used internally. Datafeeds do not need to wait for renormalization to complete before continuing.
*
* For large jobs, renormalization can take minutes, causing datafeeds to needlessly pause execution.
*/
public void setWaitForNormalization(boolean waitForNormalization) {
this.waitForNormalization = waitForNormalization;
}
/**
* Used internally. For datafeeds, there is no need for the results to be searchable after the flush,
* as the datafeed itself does not search them immediately.
*
* Particularly for short bucket spans these refreshes could be a significant cost.
**/
public void setRefreshRequired(boolean refreshRequired) {
this.refreshRequired = refreshRequired;
}
@Override
public int hashCode() {
return Objects.hash(jobId, calcInterim, start, end, advanceTime, skipTime, waitForNormalization, refreshRequired);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(jobId, other.jobId)
&& calcInterim == other.calcInterim
&& waitForNormalization == other.waitForNormalization
&& refreshRequired == other.refreshRequired
&& Objects.equals(start, other.start)
&& Objects.equals(end, other.end)
&& Objects.equals(advanceTime, other.advanceTime)
&& Objects.equals(skipTime, other.skipTime);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Job.ID.getPreferredName(), jobId);
builder.field(CALC_INTERIM.getPreferredName(), calcInterim);
if (start != null) {
builder.field(START.getPreferredName(), start);
}
if (end != null) {
builder.field(END.getPreferredName(), end);
}
if (advanceTime != null) {
builder.field(ADVANCE_TIME.getPreferredName(), advanceTime);
}
if (skipTime != null) {
builder.field(SKIP_TIME.getPreferredName(), skipTime);
}
builder.endObject();
return builder;
}
}
public static | Request |
java | apache__rocketmq | client/src/main/java/org/apache/rocketmq/client/producer/DefaultMQProducer.java | {
"start": 3380,
"end": 3477
} | class ____ be regarded as thread-safe
* and used among multiple threads context. </p>
*/
public | can |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/ConfigurationPropertiesTests.java | {
"start": 70515,
"end": 70707
} | class ____ {
private @Nullable String name;
void setName(@Nullable String name) {
this.name = name;
}
}
}
@ConfigurationProperties(ignoreUnknownFields = false)
static | Nested |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/util/keys/KeySelectorUtil.java | {
"start": 6975,
"end": 8860
} | class ____<IN>
implements KeySelector<IN, Tuple>, ResultTypeQueryable<Tuple> {
private static final long serialVersionUID = 1L;
private final TypeComparator<IN> comparator;
private final int keyLength;
private transient TupleTypeInfo<Tuple> tupleTypeInfo;
/**
* Reusable array to hold the key objects. Since this is initially empty (all positions are
* null), it does not have any serialization problems
*/
@SuppressWarnings("NonSerializableFieldInSerializableClass")
private final Object[] keyArray;
ComparableKeySelector(
TypeComparator<IN> comparator, int keyLength, TupleTypeInfo<Tuple> tupleTypeInfo) {
this.comparator = comparator;
this.keyLength = keyLength;
this.tupleTypeInfo = tupleTypeInfo;
this.keyArray = new Object[keyLength];
}
@Override
public Tuple getKey(IN value) {
Tuple key = Tuple.newInstance(keyLength);
comparator.extractKeys(value, keyArray, 0);
for (int i = 0; i < keyLength; i++) {
key.setField(keyArray[i], i);
}
return key;
}
@Override
public TypeInformation<Tuple> getProducedType() {
if (tupleTypeInfo == null) {
throw new IllegalStateException(
"The return type information is not available after serialization");
}
return tupleTypeInfo;
}
}
// ------------------------------------------------------------------------
/**
* A key selector for selecting individual array fields as keys and returns them as a Tuple.
*
* @param <IN> The type from which the key is extracted, i.e., the array type.
*/
public static final | ComparableKeySelector |
java | google__error-prone | core/src/test/java/com/google/errorprone/matchers/MatchersTest.java | {
"start": 13359,
"end": 14098
} | class ____ {
public void matches(Object... args) {}
public void callsMatch() {
Object obj = new Object();
matches(obj, "some arg");
}
}
""")
.doTest());
assertThat(thrown).hasMessageThat().contains("IndexOutOfBoundsException");
}
@Test
public void booleanConstantMatchesTrue() {
CompilationTestHelper.newInstance(BooleanConstantTrueChecker.class, getClass())
.addSourceLines(
"test/BooleanConstantTrueCheckerTest.java",
"""
package test;
public | SameArgumentCheckerTest |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/generictyperesolution/Mapper.java | {
"start": 808,
"end": 1101
} | interface ____ {
@Select("select * from users where id = #{id}")
User getUser(User criteria);
@Select("select * from users where name = #{name}")
User getUserByName(String name);
@Insert("insert into users (name, fld2) values (#{name}, #{fld2})")
void insertUser(User user);
}
| Mapper |
java | junit-team__junit5 | junit-platform-launcher/src/main/java/org/junit/platform/launcher/TestExecutionListener.java | {
"start": 836,
"end": 1000
} | interface ____ a {@link Launcher}
* or {@link LauncherExecutionRequest} to be notified of events that occur
* during test execution.
*
* <p>All methods in this | with |
java | apache__flink | flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/decorators/KerberosMountDecorator.java | {
"start": 2097,
"end": 9470
} | class ____ extends AbstractKubernetesStepDecorator {
private static final Logger LOG = LoggerFactory.getLogger(KerberosMountDecorator.class);
private final AbstractKubernetesParameters kubernetesParameters;
private final SecurityConfiguration securityConfig;
public KerberosMountDecorator(AbstractKubernetesParameters kubernetesParameters) {
this.kubernetesParameters = checkNotNull(kubernetesParameters);
this.securityConfig =
new SecurityConfiguration(kubernetesParameters.getFlinkConfiguration());
}
@Override
public FlinkPod decorateFlinkPod(FlinkPod flinkPod) {
PodBuilder podBuilder = new PodBuilder(flinkPod.getPodWithoutMainContainer());
ContainerBuilder containerBuilder = new ContainerBuilder(flinkPod.getMainContainer());
if (!StringUtils.isNullOrWhitespaceOnly(securityConfig.getKeytab())
&& !StringUtils.isNullOrWhitespaceOnly(securityConfig.getPrincipal())) {
podBuilder =
podBuilder
.editOrNewSpec()
.addNewVolume()
.withName(Constants.KERBEROS_KEYTAB_VOLUME)
.withNewSecret()
.withSecretName(
getKerberosKeytabSecretName(
kubernetesParameters.getClusterId()))
.endSecret()
.endVolume()
.endSpec();
containerBuilder =
containerBuilder
.addNewVolumeMount()
.withName(Constants.KERBEROS_KEYTAB_VOLUME)
.withMountPath(Constants.KERBEROS_KEYTAB_MOUNT_POINT)
.endVolumeMount();
}
if (!StringUtils.isNullOrWhitespaceOnly(
kubernetesParameters
.getFlinkConfiguration()
.get(SecurityOptions.KERBEROS_KRB5_PATH))) {
final File krb5Conf =
new File(
kubernetesParameters
.getFlinkConfiguration()
.get(SecurityOptions.KERBEROS_KRB5_PATH));
podBuilder =
podBuilder
.editOrNewSpec()
.addNewVolume()
.withName(Constants.KERBEROS_KRB5CONF_VOLUME)
.withNewConfigMap()
.withName(
getKerberosKrb5confConfigMapName(
kubernetesParameters.getClusterId()))
.withItems(
new KeyToPathBuilder()
.withKey(krb5Conf.getName())
.withPath(KERBEROS_KRB5CONF_FILE)
.build())
.endConfigMap()
.endVolume()
.endSpec();
containerBuilder =
containerBuilder
.addNewVolumeMount()
.withName(Constants.KERBEROS_KRB5CONF_VOLUME)
.withMountPath(
Constants.KERBEROS_KRB5CONF_MOUNT_DIR
+ "/"
+ KERBEROS_KRB5CONF_FILE)
.withSubPath(KERBEROS_KRB5CONF_FILE)
.endVolumeMount();
}
return new FlinkPod(podBuilder.build(), containerBuilder.build());
}
@Override
public List<HasMetadata> buildAccompanyingKubernetesResources() throws IOException {
final List<HasMetadata> resources = new ArrayList<>();
if (!StringUtils.isNullOrWhitespaceOnly(securityConfig.getKeytab())
&& !StringUtils.isNullOrWhitespaceOnly(securityConfig.getPrincipal())) {
final File keytab = new File(securityConfig.getKeytab());
if (!keytab.exists()) {
LOG.warn(
"Could not found the kerberos keytab file in {}.",
keytab.getAbsolutePath());
} else {
resources.add(
new SecretBuilder()
.withNewMetadata()
.withName(
getKerberosKeytabSecretName(
kubernetesParameters.getClusterId()))
.endMetadata()
.addToData(
keytab.getName(),
Base64.getEncoder()
.encodeToString(Files.toByteArray(keytab)))
.build());
// Set keytab path in the container. One should make sure this decorator is
// triggered before FlinkConfMountDecorator.
kubernetesParameters
.getFlinkConfiguration()
.set(
SecurityOptions.KERBEROS_LOGIN_KEYTAB,
String.format(
"%s/%s",
Constants.KERBEROS_KEYTAB_MOUNT_POINT, keytab.getName()));
}
}
if (!StringUtils.isNullOrWhitespaceOnly(
kubernetesParameters
.getFlinkConfiguration()
.get(SecurityOptions.KERBEROS_KRB5_PATH))) {
final File krb5Conf =
new File(
kubernetesParameters
.getFlinkConfiguration()
.get(SecurityOptions.KERBEROS_KRB5_PATH));
if (!krb5Conf.exists()) {
LOG.warn(
"Could not found the kerberos config file in {}.",
krb5Conf.getAbsolutePath());
} else {
resources.add(
new ConfigMapBuilder()
.withNewMetadata()
.withName(
getKerberosKrb5confConfigMapName(
kubernetesParameters.getClusterId()))
.endMetadata()
.addToData(
krb5Conf.getName(),
Files.toString(krb5Conf, StandardCharsets.UTF_8))
.build());
}
}
return resources;
}
public static String getKerberosKeytabSecretName(String clusterId) {
return Constants.KERBEROS_KEYTAB_SECRET_PREFIX + clusterId;
}
public static String getKerberosKrb5confConfigMapName(String clusterID) {
return Constants.KERBEROS_KRB5CONF_CONFIG_MAP_PREFIX + clusterID;
}
}
| KerberosMountDecorator |
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/DeferredResultReturnValueHandlerTests.java | {
"start": 1746,
"end": 4708
} | class ____ {
private DeferredResultMethodReturnValueHandler handler;
private MockHttpServletRequest request;
private NativeWebRequest webRequest;
@BeforeEach
void setup() throws Exception {
this.handler = new DeferredResultMethodReturnValueHandler();
this.request = new MockHttpServletRequest();
MockHttpServletResponse response = new MockHttpServletResponse();
this.webRequest = new ServletWebRequest(this.request, response);
AsyncWebRequest asyncWebRequest = new StandardServletAsyncWebRequest(this.request, response);
WebAsyncUtils.getAsyncManager(this.webRequest).setAsyncWebRequest(asyncWebRequest);
this.request.setAsyncSupported(true);
}
@Test
public void supportsReturnType() throws Exception {
assertThat(this.handler.supportsReturnType(
on(TestController.class).resolveReturnType(DeferredResult.class, String.class))).isTrue();
assertThat(this.handler.supportsReturnType(
on(TestController.class).resolveReturnType(CompletableFuture.class, String.class))).isTrue();
}
@Test
void doesNotSupportReturnType() {
assertThat(this.handler.supportsReturnType(on(TestController.class).resolveReturnType(String.class))).isFalse();
}
@Test
void deferredResult() throws Exception {
DeferredResult<String> result = new DeferredResult<>();
IllegalStateException ex = new IllegalStateException();
testHandle(result, DeferredResult.class, () -> result.setErrorResult(ex), ex);
}
@Test
void completableFuture() throws Exception {
CompletableFuture<String> future = new CompletableFuture<>();
testHandle(future, CompletableFuture.class, () -> future.complete("foo"), "foo");
}
@Test
void deferredResultWithError() throws Exception {
DeferredResult<String> result = new DeferredResult<>();
testHandle(result, DeferredResult.class, () -> result.setResult("foo"), "foo");
}
@Test
void completableFutureWithError() throws Exception {
CompletableFuture<String> future = new CompletableFuture<>();
IllegalStateException ex = new IllegalStateException();
testHandle(future, CompletableFuture.class, () -> future.completeExceptionally(ex), ex);
}
private void testHandle(Object returnValue, Class<?> asyncType,
Runnable setResultTask, Object expectedValue) throws Exception {
ModelAndViewContainer mavContainer = new ModelAndViewContainer();
MethodParameter returnType = on(TestController.class).resolveReturnType(asyncType, String.class);
this.handler.handleReturnValue(returnValue, returnType, mavContainer, this.webRequest);
assertThat(this.request.isAsyncStarted()).isTrue();
assertThat(WebAsyncUtils.getAsyncManager(this.webRequest).hasConcurrentResult()).isFalse();
setResultTask.run();
assertThat(WebAsyncUtils.getAsyncManager(this.webRequest).hasConcurrentResult()).isTrue();
assertThat(WebAsyncUtils.getAsyncManager(this.webRequest).getConcurrentResult()).isEqualTo(expectedValue);
}
@SuppressWarnings("unused")
static | DeferredResultReturnValueHandlerTests |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/IgniteIdGenEndpointBuilderFactory.java | {
"start": 10390,
"end": 12481
} | interface ____ {
/**
* Ignite ID Generator (camel-ignite)
* Interact with Ignite Atomic Sequences and ID Generators .
*
* Category: cache,clustering
* Since: 2.17
* Maven coordinates: org.apache.camel:camel-ignite
*
* @return the dsl builder for the headers' name.
*/
default IgniteIdGenHeaderNameBuilder igniteIdgen() {
return IgniteIdGenHeaderNameBuilder.INSTANCE;
}
/**
* Ignite ID Generator (camel-ignite)
* Interact with Ignite Atomic Sequences and ID Generators .
*
* Category: cache,clustering
* Since: 2.17
* Maven coordinates: org.apache.camel:camel-ignite
*
* Syntax: <code>ignite-idgen:name</code>
*
* Path parameter: name (required)
* The sequence name.
*
* @param path name
* @return the dsl builder
*/
default IgniteIdGenEndpointBuilder igniteIdgen(String path) {
return IgniteIdGenEndpointBuilderFactory.endpointBuilder("ignite-idgen", path);
}
/**
* Ignite ID Generator (camel-ignite)
* Interact with Ignite Atomic Sequences and ID Generators .
*
* Category: cache,clustering
* Since: 2.17
* Maven coordinates: org.apache.camel:camel-ignite
*
* Syntax: <code>ignite-idgen:name</code>
*
* Path parameter: name (required)
* The sequence name.
*
* @param componentName to use a custom component name for the endpoint
* instead of the default name
* @param path name
* @return the dsl builder
*/
default IgniteIdGenEndpointBuilder igniteIdgen(String componentName, String path) {
return IgniteIdGenEndpointBuilderFactory.endpointBuilder(componentName, path);
}
}
/**
* The builder of headers' name for the Ignite ID Generator component.
*/
public static | IgniteIdGenBuilders |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilitiesBuilder.java | {
"start": 697,
"end": 2411
} | class ____ {
private final String name;
private final String type;
private boolean isMetadataField;
private boolean isSearchable;
private boolean isAggregatable;
private boolean isDimension;
private @Nullable TimeSeriesParams.MetricType metricType;
private Map<String, String> meta;
public IndexFieldCapabilitiesBuilder(String name, String type) {
this.name = name;
this.type = type;
this.isSearchable = true;
this.isAggregatable = true;
this.meta = Collections.emptyMap();
}
public IndexFieldCapabilitiesBuilder isMetadataField(boolean isMetadataField) {
this.isMetadataField = isMetadataField;
return this;
}
public IndexFieldCapabilitiesBuilder isSearchable(boolean isSearchable) {
this.isSearchable = isSearchable;
return this;
}
public IndexFieldCapabilitiesBuilder isAggregatable(boolean isAggregatable) {
this.isAggregatable = isAggregatable;
return this;
}
public IndexFieldCapabilitiesBuilder isDimension(boolean isDimension) {
this.isDimension = isDimension;
return this;
}
public IndexFieldCapabilitiesBuilder metricType(TimeSeriesParams.MetricType metricType) {
this.metricType = metricType;
return this;
}
public IndexFieldCapabilitiesBuilder meta(@Nullable Map<String, String> meta) {
this.meta = meta != null ? new TreeMap<>(meta) : null;
return this;
}
public IndexFieldCapabilities build() {
return new IndexFieldCapabilities(name, type, isMetadataField, isSearchable, isAggregatable, isDimension, metricType, meta);
}
}
| IndexFieldCapabilitiesBuilder |
java | qos-ch__slf4j | integration/src/test/java/integrator/Activator.java | {
"start": 1499,
"end": 2114
} | class ____ implements BundleActivator {
private BundleContext m_context = null;
public void start(BundleContext context) {
Logger logger = LoggerFactory.getLogger(this.getClass());
logger.info("Activator.start()");
m_context = context;
}
public void stop(BundleContext context) {
m_context = null;
Logger logger = LoggerFactory.getLogger(this.getClass());
logger.info("Activator.stop");
}
public Bundle[] getBundles() {
if (m_context != null) {
return m_context.getBundles();
}
return null;
}
} | Activator |
java | spring-projects__spring-security | core/src/test/java/org/springframework/security/authorization/RequiredFactorErrorTests.java | {
"start": 1048,
"end": 2119
} | class ____ {
public static final RequiredFactor REQUIRED_FACTOR = RequiredFactor
.withAuthority(FactorGrantedAuthority.PASSWORD_AUTHORITY)
.validDuration(Duration.ofHours(1))
.build();
@Test
void createMissing() {
RequiredFactorError error = RequiredFactorError.createMissing(REQUIRED_FACTOR);
assertThat(error.isMissing()).isTrue();
assertThat(error.isExpired()).isFalse();
assertThat(error.getRequiredFactor()).isEqualTo(REQUIRED_FACTOR);
}
@Test
void createExpired() {
RequiredFactorError error = RequiredFactorError.createExpired(REQUIRED_FACTOR);
assertThat(error.isMissing()).isFalse();
assertThat(error.isExpired()).isTrue();
assertThat(error.getRequiredFactor()).isEqualTo(REQUIRED_FACTOR);
}
@Test
void createExpiredWhenNullValidDurationThenIllegalArgumentException() {
RequiredFactor requiredPassword = RequiredFactor.withAuthority(FactorGrantedAuthority.PASSWORD_AUTHORITY)
.build();
assertThatIllegalArgumentException().isThrownBy(() -> RequiredFactorError.createExpired(requiredPassword));
}
}
| RequiredFactorErrorTests |
java | quarkusio__quarkus | extensions/vertx-http/deployment/src/test/java/io/quarkus/vertx/http/EmptyHostTest.java | {
"start": 544,
"end": 1038
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(BeanRegisteringRouteUsingObserves.class));
@Test
public void testWithEmptyHost() {
assertEquals(RestAssured
.given()
.header("Host", "")
.get("/hello")
.asString(), "Hello World! ");
}
@ApplicationScoped
static | EmptyHostTest |
java | quarkusio__quarkus | independent-projects/qute/core/src/main/java/io/quarkus/qute/ValueResolverBuilder.java | {
"start": 644,
"end": 4590
} | class ____ assignable to the specified class.
* <p>
* The {@link ValueResolver#appliesTo(EvalContext)} logic defined earlier is replaced with a composite predicate.
*
* @param name
* @return self
*/
public ValueResolverBuilder applyToBaseClass(Class<?> baseClass) {
Predicate<EvalContext> p = new Predicate<EvalContext>() {
@Override
public boolean test(EvalContext ec) {
return ValueResolvers.matchClass(ec, baseClass);
}
};
if (appliesTo != null) {
appliesTo = appliesTo.and(p);
} else {
appliesTo = p;
}
return this;
}
/**
* And applies to a part of an expression where the name is equal to the specified value.
* <p>
* The {@link ValueResolver#appliesTo(EvalContext)} logic defined earlier is replaced with a composite predicate.
*
* @param name
* @return self
*/
public ValueResolverBuilder applyToName(String name) {
Predicate<EvalContext> p = new Predicate<EvalContext>() {
@Override
public boolean test(EvalContext ec) {
return ec.getName().equals(name);
}
};
if (appliesTo != null) {
appliesTo = appliesTo.and(p);
} else {
appliesTo = p;
}
return this;
}
/**
* And applies to a part of an expression where the number of parameters is equal to zero.
* <p>
* The {@link ValueResolver#appliesTo(EvalContext)} logic defined earlier is replaced with a composite predicate.
*
* @return self
*/
public ValueResolverBuilder applyToNoParameters() {
Predicate<EvalContext> p = new Predicate<EvalContext>() {
@Override
public boolean test(EvalContext ec) {
return ec.getParams().size() == 0;
}
};
if (appliesTo != null) {
appliesTo = appliesTo.and(p);
} else {
appliesTo = p;
}
return this;
}
/**
* And applies to a part of an expression where the number of parameters is equal to the specified size.
* <p>
* The {@link ValueResolver#appliesTo(EvalContext)} logic defined earlier is replaced with a composite predicate.
*
* @param size
* @return self
*/
public ValueResolverBuilder applyToParameters(int size) {
Predicate<EvalContext> p = new Predicate<EvalContext>() {
@Override
public boolean test(EvalContext ec) {
return ec.getParams().size() == size;
}
};
if (appliesTo != null) {
appliesTo = appliesTo.and(p);
} else {
appliesTo = p;
}
return this;
}
/**
* The {@link ValueResolver#appliesTo(EvalContext)} logic defined earlier is replaced with the specified predicate.
*
* @param predicate
* @return self
*/
public ValueResolverBuilder appliesTo(Predicate<EvalContext> predicate) {
this.appliesTo = predicate;
return this;
}
public ValueResolverBuilder resolveSync(Function<EvalContext, Object> fun) {
this.resolve = new Function<EvalContext, CompletionStage<Object>>() {
@Override
public CompletionStage<Object> apply(EvalContext context) {
return CompletedStage.of(fun.apply(context));
}
};
return this;
}
public ValueResolverBuilder resolveAsync(Function<EvalContext, CompletionStage<Object>> fun) {
this.resolve = fun;
return this;
}
public ValueResolverBuilder resolveWith(Object value) {
return resolveAsync(ec -> CompletedStage.of(value));
}
public ValueResolver build() {
return new ValueResolverImpl(priority, appliesTo, resolve);
}
private static final | is |
java | apache__kafka | raft/src/test/java/org/apache/kafka/raft/internals/MemoryBatchReaderTest.java | {
"start": 1220,
"end": 2363
} | class ____ {
@Test
public void testIteration() {
Batch<String> batch1 = Batch.data(
0L, 1, 0L, 3, List.of("a", "b", "c")
);
Batch<String> batch2 = Batch.data(
3L, 2, 1L, 2, List.of("d", "e")
);
Batch<String> batch3 = Batch.data(
5L, 2, 3L, 4, List.of("f", "g", "h", "i")
);
@SuppressWarnings("unchecked")
CloseListener<BatchReader<String>> listener = Mockito.mock(CloseListener.class);
MemoryBatchReader<String> reader = MemoryBatchReader.of(
List.of(batch1, batch2, batch3),
listener
);
assertEquals(0L, reader.baseOffset());
assertEquals(OptionalLong.of(8L), reader.lastOffset());
assertTrue(reader.hasNext());
assertEquals(batch1, reader.next());
assertTrue(reader.hasNext());
assertEquals(batch2, reader.next());
assertTrue(reader.hasNext());
assertEquals(batch3, reader.next());
assertFalse(reader.hasNext());
reader.close();
Mockito.verify(listener).onClose(reader);
}
}
| MemoryBatchReaderTest |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/env/ExtendedDefaultPropertiesFileDetectionTestPropertySourceTests.java | {
"start": 949,
"end": 1030
} | class ____.
*
* @author Sam Brannen
* @since 4.1
*/
@TestPropertySource
| hierarchy |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/authentication/ObservationAuthenticationManager.java | {
"start": 1133,
"end": 2704
} | class ____ implements AuthenticationManager {
private final ObservationRegistry registry;
private final AuthenticationManager delegate;
private ObservationConvention<AuthenticationObservationContext> convention = new AuthenticationObservationConvention();
public ObservationAuthenticationManager(ObservationRegistry registry, AuthenticationManager delegate) {
Assert.notNull(registry, "observationRegistry cannot be null");
Assert.notNull(delegate, "authenticationManager cannot be null");
this.registry = registry;
this.delegate = delegate;
}
@Override
@SuppressWarnings("NullAway") // Dataflow analysis limitation
public Authentication authenticate(Authentication authentication) throws AuthenticationException {
AuthenticationObservationContext context = new AuthenticationObservationContext();
context.setAuthenticationRequest(authentication);
context.setAuthenticationManagerClass(this.delegate.getClass());
return Observation.createNotStarted(this.convention, () -> context, this.registry).observe(() -> {
Authentication result = this.delegate.authenticate(authentication);
context.setAuthenticationResult(result);
return result;
});
}
/**
* Use the provided convention for reporting observation data
* @param convention The provided convention
*
* @since 6.1
*/
public void setObservationConvention(ObservationConvention<AuthenticationObservationContext> convention) {
Assert.notNull(convention, "The observation convention cannot be null");
this.convention = convention;
}
}
| ObservationAuthenticationManager |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/test/rest/ObjectPath.java | {
"start": 1232,
"end": 7533
} | class ____ {
private final Object object;
public static ObjectPath createFromResponse(Response response) throws IOException {
byte[] bytes = EntityUtils.toByteArray(response.getEntity());
String contentType = response.getHeader("Content-Type");
XContentType xContentType = XContentType.fromMediaType(contentType);
return ObjectPath.createFromXContent(xContentType.xContent(), new BytesArray(bytes));
}
public static ObjectPath createFromXContent(XContent xContent, BytesReference input) throws IOException {
try (XContentParser parser = XContentHelper.createParserNotCompressed(XContentParserConfiguration.EMPTY, input, xContent.type())) {
if (parser.nextToken() == XContentParser.Token.START_ARRAY) {
return new ObjectPath(parser.listOrderedMap());
}
return new ObjectPath(parser.mapOrdered());
}
}
public ObjectPath(Object object) {
this.object = object;
}
/**
* A utility method that creates an {@link ObjectPath} via {@link #ObjectPath(Object)} returns
* the result of calling {@link #evaluate(String)} on it.
*/
public static <T> T evaluate(Object object, String path) throws IOException {
return new ObjectPath(object).evaluate(path, Stash.EMPTY);
}
/**
* Returns the object corresponding to the provided path if present, null otherwise
*/
public <T> T evaluate(String path) throws IOException {
return evaluate(path, Stash.EMPTY);
}
/**
* Returns the object corresponding to the provided path if present, null otherwise
*/
public <T> T evaluate(String path, Stash stash) throws IOException {
return evaluateExact(stash, parsePath(path));
}
/**
* Returns the object corresponding to the provided path if present, null otherwise
*/
public <T> T evaluateExact(String... path) throws IOException {
return evaluateExact(Stash.EMPTY, path);
}
/**
* Returns the object corresponding to the provided path if present, null otherwise
*/
@SuppressWarnings("unchecked")
public <T> T evaluateExact(Stash stash, String... path) throws IOException {
Object result = this.object;
for (String part : path) {
result = evaluate(part, result, stash);
if (result == null) {
return null;
}
}
return (T) result;
}
@SuppressWarnings("unchecked")
private static Object evaluate(String key, Object objectToEvaluate, Stash stash) throws IOException {
if (stash.containsStashedValue(key)) {
key = stash.getValue(key).toString();
}
if (objectToEvaluate instanceof Map) {
final Map<String, Object> objectAsMap = (Map<String, Object>) objectToEvaluate;
if ("_arbitrary_key_".equals(key)) {
if (objectAsMap.isEmpty()) {
throw new IllegalArgumentException("requested [" + key + "] but the map was empty");
}
if (objectAsMap.containsKey(key)) {
throw new IllegalArgumentException("requested meta-key [" + key + "] but the map unexpectedly contains this key");
}
return objectAsMap.keySet().iterator().next();
}
return objectAsMap.get(key);
}
if (objectToEvaluate instanceof List) {
List<Object> list = (List<Object>) objectToEvaluate;
try {
return list.get(Integer.parseInt(key));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("element was a list, but [" + key + "] was not numeric", e);
} catch (IndexOutOfBoundsException e) {
throw new IllegalArgumentException(
"element was a list with " + list.size() + " elements, but [" + key + "] was out of bounds",
e
);
}
}
throw new IllegalArgumentException(
"no object found for [" + key + "] within object of class [" + objectToEvaluate.getClass() + "]"
);
}
private static String[] parsePath(String path) {
List<String> list = new ArrayList<>();
StringBuilder current = new StringBuilder();
boolean escape = false;
for (int i = 0; i < path.length(); i++) {
char c = path.charAt(i);
if (c == '\\') {
escape = true;
continue;
}
if (c == '.') {
if (escape) {
escape = false;
} else {
if (current.length() > 0) {
list.add(current.toString());
current.setLength(0);
}
continue;
}
}
current.append(c);
}
if (current.length() > 0) {
list.add(current.toString());
}
return list.toArray(new String[0]);
}
/**
* Create a new {@link XContentBuilder} from the xContent object underlying this {@link ObjectPath}.
* This only works for {@link ObjectPath} instances created from an xContent object, not from nested
* substructures. We throw an {@link UnsupportedOperationException} in those cases.
*/
@SuppressWarnings("unchecked")
public XContentBuilder toXContentBuilder(XContent xContent) throws IOException {
XContentBuilder builder = XContentBuilder.builder(xContent);
if (this.object instanceof Map) {
builder.map((Map<String, Object>) this.object);
} else {
throw new UnsupportedOperationException("Only ObjectPath created from a map supported.");
}
return builder;
}
@Override
public String toString() {
return "ObjectPath[" + object + "]";
}
public int evaluateArraySize(String path) throws IOException {
final List<?> list = evaluate(path);
return list.size();
}
public Set<String> evaluateMapKeys(String path) throws IOException {
final Map<String, ?> map = evaluate(path);
return map.keySet();
}
}
| ObjectPath |
java | apache__flink | flink-filesystems/flink-s3-fs-base/src/test/java/org/apache/flink/fs/s3/common/writer/RecoverableMultiPartUploadImplTest.java | {
"start": 10896,
"end": 14367
} | class ____ implements S3AccessHelper {
private final List<RecoverableMultiPartUploadImplTest.TestUploadPartResult>
completePartsUploaded = new ArrayList<>();
private final List<RecoverableMultiPartUploadImplTest.TestPutObjectResult>
incompletePartsUploaded = new ArrayList<>();
List<RecoverableMultiPartUploadImplTest.TestUploadPartResult> getCompletePartsUploaded() {
return completePartsUploaded;
}
List<RecoverableMultiPartUploadImplTest.TestPutObjectResult> getIncompletePartsUploaded() {
return incompletePartsUploaded;
}
@Override
public String startMultiPartUpload(String key) throws IOException {
return createMPUploadId(key);
}
@Override
public UploadPartResult uploadPart(
String key, String uploadId, int partNumber, File inputFile, long length)
throws IOException {
final byte[] content =
getFileContentBytes(inputFile, MathUtils.checkedDownCast(length));
return storeAndGetUploadPartResult(key, partNumber, content);
}
@Override
public PutObjectResult putObject(String key, File inputFile) throws IOException {
final byte[] content =
getFileContentBytes(inputFile, MathUtils.checkedDownCast(inputFile.length()));
return storeAndGetPutObjectResult(key, content);
}
@Override
public boolean deleteObject(String key) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long getObject(String key, File targetLocation) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public CompleteMultipartUploadResult commitMultiPartUpload(
String key,
String uploadId,
List<PartETag> partETags,
long length,
AtomicInteger errorCount)
throws IOException {
return null;
}
@Override
public ObjectMetadata getObjectMetadata(String key) throws IOException {
throw new UnsupportedOperationException();
}
private byte[] getFileContentBytes(File file, int length) throws IOException {
final byte[] content = new byte[length];
IOUtils.readFully(new FileInputStream(file), content, 0, length);
return content;
}
private RecoverableMultiPartUploadImplTest.TestUploadPartResult storeAndGetUploadPartResult(
String key, int number, byte[] payload) {
final RecoverableMultiPartUploadImplTest.TestUploadPartResult result =
createUploadPartResult(key, number, payload);
completePartsUploaded.add(result);
return result;
}
private RecoverableMultiPartUploadImplTest.TestPutObjectResult storeAndGetPutObjectResult(
String key, byte[] payload) {
final RecoverableMultiPartUploadImplTest.TestPutObjectResult result =
createPutObjectResult(key, payload);
incompletePartsUploaded.add(result);
return result;
}
}
/** A {@link PutObjectResult} that also contains the actual content of the uploaded part. */
private static | StubMultiPartUploader |
java | apache__kafka | tools/src/main/java/org/apache/kafka/tools/reassign/LogDirMoveState.java | {
"start": 900,
"end": 1014
} | interface ____ {
/**
* True if the move is done without errors.
*/
boolean done();
}
| LogDirMoveState |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/selection/jaxb/OrderShippingDetailsDto.java | {
"start": 235,
"end": 734
} | class ____ {
private String orderShippedFrom;
private String orderShippedTo;
public String getOrderShippedFrom() {
return orderShippedFrom;
}
public void setOrderShippedFrom(String orderShippedFrom) {
this.orderShippedFrom = orderShippedFrom;
}
public String getOrderShippedTo() {
return orderShippedTo;
}
public void setOrderShippedTo(String orderShippedTo) {
this.orderShippedTo = orderShippedTo;
}
}
| OrderShippingDetailsDto |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/multipart/support/StandardMultipartHttpServletRequest.java | {
"start": 2059,
"end": 6968
} | class ____ extends AbstractMultipartHttpServletRequest {
@SuppressWarnings("NullAway.Init")
private Set<String> multipartParameterNames;
/**
* Create a new StandardMultipartHttpServletRequest wrapper for the given request,
* immediately parsing the multipart content.
* @param request the servlet request to wrap
* @throws MultipartException if parsing failed
*/
public StandardMultipartHttpServletRequest(HttpServletRequest request) throws MultipartException {
this(request, false);
}
/**
* Create a new StandardMultipartHttpServletRequest wrapper for the given request.
* @param request the servlet request to wrap
* @param lazyParsing whether multipart parsing should be triggered lazily on
* first access of multipart files or parameters
* @throws MultipartException if an immediate parsing attempt failed
* @since 3.2.9
*/
public StandardMultipartHttpServletRequest(HttpServletRequest request, boolean lazyParsing)
throws MultipartException {
super(request);
if (!lazyParsing) {
parseRequest(request);
}
}
private void parseRequest(HttpServletRequest request) {
try {
Collection<Part> parts = request.getParts();
this.multipartParameterNames = CollectionUtils.newLinkedHashSet(parts.size());
MultiValueMap<String, MultipartFile> files = new LinkedMultiValueMap<>(parts.size());
for (Part part : parts) {
String headerValue = part.getHeader(HttpHeaders.CONTENT_DISPOSITION);
ContentDisposition disposition = ContentDisposition.parse(headerValue);
String filename = disposition.getFilename();
if (filename != null) {
files.add(part.getName(), new StandardMultipartFile(part, filename));
}
else {
this.multipartParameterNames.add(part.getName());
}
}
setMultipartFiles(files);
}
catch (Throwable ex) {
handleParseFailure(ex);
}
}
protected void handleParseFailure(Throwable ex) {
// MaxUploadSizeExceededException ?
Throwable cause = ex;
do {
String msg = cause.getMessage();
if (msg != null) {
msg = msg.toLowerCase(Locale.ROOT);
if ((msg.contains("exceed") && (msg.contains("size") || msg.contains("length"))) ||
(msg.contains("request") && (msg.contains("big") || msg.contains("large")))) {
throw new MaxUploadSizeExceededException(-1, ex);
}
}
cause = cause.getCause();
}
while (cause != null);
// General MultipartException
throw new MultipartException("Failed to parse multipart servlet request", ex);
}
@Override
protected void initializeMultipart() {
parseRequest(getRequest());
}
@Override
public Enumeration<String> getParameterNames() {
if (this.multipartParameterNames == null) {
initializeMultipart();
}
if (this.multipartParameterNames.isEmpty()) {
return super.getParameterNames();
}
// Servlet getParameterNames() not guaranteed to include multipart form items
// (for example, on WebLogic 12) -> need to merge them here to be on the safe side
Set<String> paramNames = new LinkedHashSet<>();
Enumeration<String> paramEnum = super.getParameterNames();
while (paramEnum.hasMoreElements()) {
paramNames.add(paramEnum.nextElement());
}
paramNames.addAll(this.multipartParameterNames);
return Collections.enumeration(paramNames);
}
@Override
public Map<String, String[]> getParameterMap() {
if (this.multipartParameterNames == null) {
initializeMultipart();
}
if (this.multipartParameterNames.isEmpty()) {
return super.getParameterMap();
}
// Servlet getParameterMap() not guaranteed to include multipart form items
// (for example, on WebLogic 12) -> need to merge them here to be on the safe side
Map<String, String[]> paramMap = new LinkedHashMap<>(super.getParameterMap());
for (String paramName : this.multipartParameterNames) {
if (!paramMap.containsKey(paramName)) {
paramMap.put(paramName, getParameterValues(paramName));
}
}
return paramMap;
}
@Override
public @Nullable String getMultipartContentType(String paramOrFileName) {
try {
Part part = getPart(paramOrFileName);
return (part != null ? part.getContentType() : null);
}
catch (Throwable ex) {
throw new MultipartException("Could not access multipart servlet request", ex);
}
}
@Override
public @Nullable HttpHeaders getMultipartHeaders(String paramOrFileName) {
try {
Part part = getPart(paramOrFileName);
if (part != null) {
HttpHeaders headers = new HttpHeaders();
for (String headerName : part.getHeaderNames()) {
headers.put(headerName, new ArrayList<>(part.getHeaders(headerName)));
}
return headers;
}
else {
return null;
}
}
catch (Throwable ex) {
throw new MultipartException("Could not access multipart servlet request", ex);
}
}
/**
* Spring MultipartFile adapter, wrapping a Servlet Part object.
*/
@SuppressWarnings("serial")
private static | StandardMultipartHttpServletRequest |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/env/repeatable/DefaultPropertiesFileDetectionRepeatedTestPropertySourceTests.java | {
"start": 1020,
"end": 1278
} | class ____
extends AbstractRepeatableTestPropertySourceTests {
@Test
void test() {
assertEnvironmentValue("default.value", "default file");
assertEnvironmentValue("key1", "local file");
}
}
| DefaultPropertiesFileDetectionRepeatedTestPropertySourceTests |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/RateLimiter.java | {
"start": 1879,
"end": 7962
} | class ____ {
private double tokensPerMicros;
private double accumulatedTokensLimit;
private double accumulatedTokens;
private Instant nextTokenAvailability;
private final Sleeper sleeper;
private final Clock clock;
/**
* @param accumulatedTokensLimit the limit for tokens stashed in the bucket
* @param tokensPerTimeUnit the number of tokens to produce per the time unit passed in
* @param unit the time unit frequency for generating tokens
*/
public RateLimiter(double accumulatedTokensLimit, double tokensPerTimeUnit, TimeUnit unit) {
this(accumulatedTokensLimit, tokensPerTimeUnit, unit, new TimeUnitSleeper(), Clock.systemUTC());
}
// default for testing
RateLimiter(double accumulatedTokensLimit, double tokensPerTimeUnit, TimeUnit unit, Sleeper sleeper, Clock clock) {
this.sleeper = Objects.requireNonNull(sleeper);
this.clock = Objects.requireNonNull(clock);
nextTokenAvailability = Instant.MIN;
setRate(accumulatedTokensLimit, tokensPerTimeUnit, unit);
}
public synchronized void setRate(double newAccumulatedTokensLimit, double newTokensPerTimeUnit, TimeUnit newUnit) {
Objects.requireNonNull(newUnit);
if (newAccumulatedTokensLimit < 0) {
throw new IllegalArgumentException("Accumulated tokens limit must be greater than or equal to 0");
}
if (Double.isInfinite(newAccumulatedTokensLimit)) {
throw new IllegalArgumentException(
Strings.format("Accumulated tokens limit must be less than or equal to %s", Double.MAX_VALUE)
);
}
if (newTokensPerTimeUnit <= 0) {
throw new IllegalArgumentException("Tokens per time unit must be greater than 0");
}
if (newTokensPerTimeUnit == Double.POSITIVE_INFINITY) {
throw new IllegalArgumentException(Strings.format("Tokens per time unit must be less than or equal to %s", Double.MAX_VALUE));
}
// If the new token limit is smaller than what we've accumulated already we need to drop tokens to meet the new token limit
accumulatedTokens = Math.min(accumulatedTokens, newAccumulatedTokensLimit);
accumulatedTokensLimit = newAccumulatedTokensLimit;
var unitsInMicros = newUnit.toMicros(1);
tokensPerMicros = newTokensPerTimeUnit / unitsInMicros;
assert Double.isInfinite(tokensPerMicros) == false : "Tokens per microsecond should not be infinity";
accumulateTokens();
}
/**
* Causes the thread to wait until the tokens are available.
* This reserves token in advance leading to a reduction of accumulated tokens.
* @param tokens the number of items of work that should be throttled, typically you'd pass a value of 1 here
* @throws InterruptedException _
*/
public void acquire(int tokens) throws InterruptedException {
sleeper.sleep(reserveInternal(tokens));
}
/**
* Returns the amount of time to wait for the tokens to become available but does not reserve them in advance.
* A caller will need to call {@link #reserve(int)} or {@link #acquire(int)} after this call.
* @param tokens the number of items of work that should be throttled, typically you'd pass a value of 1 here. Must be greater than 0.
* @return the amount of time to wait
*/
public TimeValue timeToReserve(int tokens) {
var timeToReserveRes = timeToReserveInternal(tokens);
return new TimeValue((long) timeToReserveRes.microsToWait, TimeUnit.MICROSECONDS);
}
private TimeToReserve timeToReserveInternal(int tokens) {
validateTokenRequest(tokens);
double microsToWait;
accumulateTokens();
var accumulatedTokensToUse = Math.min(tokens, accumulatedTokens);
var additionalTokensRequired = tokens - accumulatedTokensToUse;
microsToWait = additionalTokensRequired / tokensPerMicros;
return new TimeToReserve(microsToWait, accumulatedTokensToUse);
}
private record TimeToReserve(double microsToWait, double accumulatedTokensToUse) {}
private static void validateTokenRequest(int tokens) {
if (tokens <= 0) {
throw new IllegalArgumentException("Requested tokens must be positive");
}
}
/**
* Returns the amount of time to wait for the tokens to become available.
* This reserves tokens in advance leading to a reduction of accumulated tokens.
* @param tokens the number of items of work that should be throttled, typically you'd pass a value of 1 here. Must be greater than 0.
* @return the amount of time to wait
*/
public TimeValue reserve(int tokens) {
return new TimeValue(reserveInternal(tokens), TimeUnit.MICROSECONDS);
}
private synchronized long reserveInternal(int tokens) {
var timeToReserveRes = timeToReserveInternal(tokens);
accumulatedTokens -= timeToReserveRes.accumulatedTokensToUse;
nextTokenAvailability = nextTokenAvailability.plus((long) timeToReserveRes.microsToWait, ChronoUnit.MICROS);
return (long) timeToReserveRes.microsToWait;
}
private synchronized void accumulateTokens() {
var now = Instant.now(clock);
if (now.isAfter(nextTokenAvailability)) {
var elapsedTimeMicros = microsBetweenExact(nextTokenAvailability, now);
var newTokens = tokensPerMicros * elapsedTimeMicros;
accumulatedTokens = Math.min(accumulatedTokensLimit, accumulatedTokens + newTokens);
nextTokenAvailability = now;
}
}
private static long microsBetweenExact(Instant start, Instant end) {
try {
return ChronoUnit.MICROS.between(start, end);
} catch (ArithmeticException e) {
if (end.isAfter(start)) {
return Long.MAX_VALUE;
}
return 0;
}
}
// default for testing
Instant getNextTokenAvailability() {
return nextTokenAvailability;
}
public | RateLimiter |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/component/StructNestedComponentAssociationErrorTest.java | {
"start": 3171,
"end": 3272
} | class ____ {
Person2 person;
}
@Embeddable
@Struct(name = "person_type")
public static | AuthorInfo2 |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java | {
"start": 2920,
"end": 12083
} | class ____ extends ESTestCase {
public void testBottomFieldSort() throws Exception {
testCase(false, false);
}
public void testScrollDisableBottomFieldSort() throws Exception {
testCase(true, false);
}
public void testCollapseDisableBottomFieldSort() throws Exception {
testCase(false, true);
}
private void testCase(boolean withScroll, boolean withCollapse) throws Exception {
final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(
0,
System.nanoTime(),
System::nanoTime
);
Map<String, Transport.Connection> lookup = new ConcurrentHashMap<>();
DiscoveryNode primaryNode = DiscoveryNodeUtils.create("node1");
DiscoveryNode replicaNode = DiscoveryNodeUtils.create("node2");
lookup.put("node1", new SearchAsyncActionTests.MockConnection(primaryNode));
lookup.put("node2", new SearchAsyncActionTests.MockConnection(replicaNode));
int numShards = randomIntBetween(10, 20);
int numConcurrent = randomIntBetween(1, 4);
AtomicInteger numWithTopDocs = new AtomicInteger();
AtomicInteger successfulOps = new AtomicInteger();
AtomicBoolean canReturnNullResponse = new AtomicBoolean(false);
var transportService = mock(TransportService.class);
when(transportService.getLocalNode()).thenReturn(primaryNode);
SearchTransportService searchTransportService = new SearchTransportService(transportService, null, null) {
@Override
public void sendExecuteQuery(
Transport.Connection connection,
ShardSearchRequest request,
SearchTask task,
ActionListener<SearchPhaseResult> listener
) {
int shardId = request.shardId().id();
if (request.canReturnNullResponseIfMatchNoDocs()) {
canReturnNullResponse.set(true);
}
if (request.getBottomSortValues() != null) {
assertNotEquals(shardId, (int) request.getBottomSortValues().getFormattedSortValues()[0]);
numWithTopDocs.incrementAndGet();
}
QuerySearchResult queryResult = new QuerySearchResult(
new ShardSearchContextId("N/A", 123),
new SearchShardTarget("node1", new ShardId("idx", "na", shardId), null),
null
);
try {
SortField sortField = new SortField("timestamp", SortField.Type.LONG);
if (withCollapse) {
queryResult.topDocs(
new TopDocsAndMaxScore(
new TopFieldGroups(
"collapse_field",
new TotalHits(
1,
withScroll ? TotalHits.Relation.EQUAL_TO : TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO
),
new FieldDoc[] { new FieldDoc(randomInt(1000), Float.NaN, new Object[] { request.shardId().id() }) },
new SortField[] { sortField },
new Object[] { 0L }
),
Float.NaN
),
new DocValueFormat[] { DocValueFormat.RAW }
);
} else {
queryResult.topDocs(
new TopDocsAndMaxScore(
new TopFieldDocs(
new TotalHits(
1,
withScroll ? TotalHits.Relation.EQUAL_TO : TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO
),
new FieldDoc[] { new FieldDoc(randomInt(1000), Float.NaN, new Object[] { request.shardId().id() }) },
new SortField[] { sortField }
),
Float.NaN
),
new DocValueFormat[] { DocValueFormat.RAW }
);
}
queryResult.from(0);
queryResult.size(1);
successfulOps.incrementAndGet();
queryResult.incRef();
new Thread(() -> ActionListener.respondAndRelease(listener, queryResult)).start();
} finally {
queryResult.decRef();
}
}
};
CountDownLatch latch = new CountDownLatch(1);
List<SearchShardIterator> shardsIter = SearchAsyncActionTests.getShardsIter(
"idx",
new OriginalIndices(new String[] { "idx" }, SearchRequest.DEFAULT_INDICES_OPTIONS),
numShards,
randomBoolean(),
primaryNode,
replicaNode
);
final SearchRequest searchRequest = new SearchRequest();
searchRequest.setMaxConcurrentShardRequests(numConcurrent);
searchRequest.setBatchedReduceSize(2);
searchRequest.source(new SearchSourceBuilder().size(1).sort(SortBuilders.fieldSort("timestamp")));
if (withScroll) {
searchRequest.scroll(TimeValue.timeValueMillis(100));
} else {
searchRequest.source().trackTotalHitsUpTo(2);
}
if (withCollapse) {
searchRequest.source().collapse(new CollapseBuilder("collapse_field"));
}
searchRequest.allowPartialSearchResults(false);
SearchPhaseController controller = new SearchPhaseController((t, r) -> InternalAggregationTestCase.emptyReduceContextBuilder());
SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap());
try (
QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer(
searchRequest,
EsExecutors.DIRECT_EXECUTOR_SERVICE,
new NoopCircuitBreaker(CircuitBreaker.REQUEST),
controller,
task::isCancelled,
task.getProgressListener(),
shardsIter.size(),
exc -> {}
)
) {
SearchQueryThenFetchAsyncAction action = new SearchQueryThenFetchAsyncAction(
logger,
null,
searchTransportService,
(clusterAlias, node) -> lookup.get(node),
Collections.singletonMap("_na_", AliasFilter.EMPTY),
Collections.emptyMap(),
EsExecutors.DIRECT_EXECUTOR_SERVICE,
resultConsumer,
searchRequest,
null,
shardsIter,
timeProvider,
new ClusterState.Builder(new ClusterName("test")).build(),
task,
SearchResponse.Clusters.EMPTY,
null,
false,
new SearchResponseMetrics(TelemetryProvider.NOOP.getMeterRegistry()),
Map.of()
) {
@Override
protected SearchPhase getNextPhase() {
return new SearchPhase("test") {
@Override
protected void run() {
latch.countDown();
}
};
}
};
action.start();
latch.await();
assertThat(successfulOps.get(), equalTo(numShards));
if (withScroll) {
assertFalse(canReturnNullResponse.get());
assertThat(numWithTopDocs.get(), equalTo(0));
} else if (withCollapse) {
assertThat(numWithTopDocs.get(), equalTo(0));
}
SearchPhaseController.ReducedQueryPhase phase = action.results.reduce();
assertThat(phase.numReducePhases(), greaterThanOrEqualTo(1));
if (withScroll) {
assertThat(phase.totalHits().value(), equalTo((long) numShards));
assertThat(phase.totalHits().relation(), equalTo(TotalHits.Relation.EQUAL_TO));
} else {
assertThat(phase.totalHits().value(), equalTo(2L));
assertThat(phase.totalHits().relation(), equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO));
}
assertThat(phase.sortedTopDocs().scoreDocs().length, equalTo(1));
assertThat(phase.sortedTopDocs().scoreDocs()[0], instanceOf(FieldDoc.class));
assertThat(((FieldDoc) phase.sortedTopDocs().scoreDocs()[0]).fields.length, equalTo(1));
assertThat(((FieldDoc) phase.sortedTopDocs().scoreDocs()[0]).fields[0], equalTo(0));
}
}
static | SearchQueryThenFetchAsyncActionTests |
java | apache__camel | tooling/camel-tooling-util/src/test/java/org/apache/camel/tooling/util/JavadocHelperTest.java | {
"start": 6778,
"end": 7033
} | class ____ not need to be instantiated directly. Instead, use";
String s2 = JavadocHelper.sanitizeDescription(s, false);
Assertions.assertEquals(
"Provides methods to create, delete, find, and update Customer objects. This | does |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/TestContextAnnotationUtilsTests.java | {
"start": 25225,
"end": 25281
} | class ____ {
}
@Meta1
| HasLocalAndMetaComponentAnnotation |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/qualifiers/defaultvalues/Cat.java | {
"start": 143,
"end": 242
} | class ____ implements Animal {
@Override
public int noOfLeg() {
return 4;
}
}
| Cat |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_2478/Issue2478Mapper.java | {
"start": 515,
"end": 872
} | class ____ {
protected final String name;
protected final Shop shop;
public Product(String name, Shop shop) {
this.name = name;
this.shop = shop;
}
public String getName() {
return name;
}
public Shop getShop() {
return shop;
}
}
| Product |
java | micronaut-projects__micronaut-core | http-server-tck/src/main/java/io/micronaut/http/server/tck/tests/forms/FormsJacksonAnnotationsTest.java | {
"start": 1779,
"end": 3775
} | class ____ {
private static final String SPEC_NAME = "FormsJacksonAnnotationsTest";
private static final String JSON_WITH_PAGES = "{\"title\":\"Building Microservices\",\"paginas\":100}";
private static final String JSON_WITHOUT_PAGES = "{\"title\":\"Building Microservices\"}";
@Test
public void serverFormSubmissionsSupportJacksonAnnotations() throws IOException {
String body = "title=Building+Microservices&paginas=100";
assertWithBody(body, JSON_WITH_PAGES);
body = "title=Building+Microservices&pages=";
assertWithBody(body, JSON_WITHOUT_PAGES);
}
@Test
public void httpClientFormSubmissionsDoesNotSupportJacksonAnnotations() throws IOException {
Book book = new Book("Building Microservices", 100);
// Jackson annotations (@JsonProperty) are not supported by the HTTP Client and form-url encoded payload.
assertWithBody(book, JSON_WITHOUT_PAGES);
}
private static void assertWithBody(Object body, String expectedJson) throws IOException {
TestScenario.builder()
.specName(SPEC_NAME)
.request(HttpRequest.POST("/book/save", body).contentType(MediaType.APPLICATION_FORM_URLENCODED_TYPE))
.assertion((server, request) ->
AssertionUtils.assertDoesNotThrow(server, request, HttpResponseAssertion.builder()
.status(HttpStatus.OK)
.assertResponse(httpResponse -> {
Optional<String> bodyOptional = httpResponse.getBody(String.class);
assertTrue(bodyOptional.isPresent());
assertEquals(expectedJson, bodyOptional.get());
})
.build()))
.run();
}
@Requires(property = "spec.name", value = SPEC_NAME)
@Controller("/book")
static | FormsJacksonAnnotationsTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/component/StructComponentErrorTest.java | {
"start": 1511,
"end": 1802
} | class ____ {
@Id
@GeneratedValue
private Long id;
private String title;
private String author;
@Struct( name = "publisher_type")
private Publisher1 ebookPublisher;
@Struct( name = "publisher_type")
private Publisher2 paperBackPublisher;
}
@Embeddable
public static | Book |
java | spring-projects__spring-boot | module/spring-boot-security/src/test/java/org/springframework/boot/security/autoconfigure/SecurityAutoConfigurationTests.java | {
"start": 3816,
"end": 3996
} | class ____ {
@Bean
AuthenticationEventPublisher authenticationEventPublisher() {
return new TestAuthenticationEventPublisher();
}
| AuthenticationEventPublisherConfiguration |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/example/MyVmConsumer.java | {
"start": 1168,
"end": 1908
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(MyVmConsumer.class);
@EndpointInject("mock:result")
private ProducerTemplate destination;
@Consume("seda:start")
public void doSomething(String body, Exchange exchange) {
ObjectHelper.notNull(destination, "destination");
ObjectHelper.notNull(exchange, "exchange");
ObjectHelper.notNull(exchange.getContext(), "exchange.getContext");
LOG.info("Received body: {}", body);
destination.sendBody(body);
}
public ProducerTemplate getDestination() {
return destination;
}
public void setDestination(ProducerTemplate destination) {
this.destination = destination;
}
}
| MyVmConsumer |
java | netty__netty | transport-native-unix-common/src/main/java/io/netty/channel/unix/GenericUnixChannelOption.java | {
"start": 825,
"end": 1517
} | class ____<T> extends UnixChannelOption<T> {
private final int level;
private final int optname;
GenericUnixChannelOption(String name, int level, int optname) {
super(name);
this.level = level;
this.optname = optname;
}
/**
* Returns the level. See <a href="https://linux.die.net/man/2/setsockopt">man setsockopt</a>
*
* @return the level.
*/
public int level() {
return level;
}
/**
* Returns the optname. See <a href="https://linux.die.net/man/2/setsockopt">man setsockopt</a>
*
* @return the level.
*/
public int optname() {
return optname;
}
}
| GenericUnixChannelOption |
java | hibernate__hibernate-orm | hibernate-testing/src/main/java/org/hibernate/testing/orm/jpa/PersistenceUnitInfoAdapter.java | {
"start": 915,
"end": 2563
} | class ____ implements PersistenceUnitInfo {
private final String name = "persistenceUnitInfoAdapter@" + identityHashCode( this );
private Properties properties;
public String getPersistenceUnitName() {
return name;
}
public String getPersistenceProviderClassName() {
return HibernatePersistenceProvider.class.getName();
}
@Override
public String getScopeAnnotationName() {
return null;
}
@Override
public List<String> getQualifierAnnotationNames() {
return List.of();
}
@Override @SuppressWarnings("removal")
public PersistenceUnitTransactionType getTransactionType() {
return null;
}
public DataSource getJtaDataSource() {
return null;
}
public DataSource getNonJtaDataSource() {
return null;
}
public List<String> getMappingFileNames() {
return emptyList();
}
public List<URL> getJarFileUrls() {
return emptyList();
}
public URL getPersistenceUnitRootUrl() {
return null;
}
public List<String> getManagedClassNames() {
return emptyList();
}
public boolean excludeUnlistedClasses() {
return false;
}
public SharedCacheMode getSharedCacheMode() {
return null;
}
public ValidationMode getValidationMode() {
return null;
}
public Properties getProperties() {
if ( properties == null ) {
properties = new Properties();
}
return properties;
}
public String getPersistenceXMLSchemaVersion() {
return null;
}
public ClassLoader getClassLoader() {
return Thread.currentThread().getContextClassLoader();
}
public void addTransformer(ClassTransformer transformer) {
}
public ClassLoader getNewTempClassLoader() {
return null;
}
}
| PersistenceUnitInfoAdapter |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/FluxOnErrorResume.java | {
"start": 1889,
"end": 3184
} | class ____<T>
extends Operators.MultiSubscriptionSubscriber<T, T> {
final Function<? super Throwable, ? extends Publisher<? extends T>> nextFactory;
boolean second;
ResumeSubscriber(CoreSubscriber<? super T> actual,
Function<? super Throwable, ? extends Publisher<? extends T>> nextFactory) {
super(actual);
this.nextFactory = nextFactory;
}
@Override
public void onSubscribe(Subscription s) {
if (!second) {
actual.onSubscribe(this);
}
set(s);
}
@Override
public void onNext(T t) {
actual.onNext(t);
if (!second) {
producedOne();
}
}
@Override
public void onError(Throwable t) {
if (!second) {
second = true;
Publisher<? extends T> p;
try {
p = Operators.toFluxOrMono(Objects.requireNonNull(nextFactory.apply(t),
"The nextFactory returned a null Publisher"));
}
catch (Throwable e) {
Throwable _e = Operators.onOperatorError(e, actual.currentContext());
_e = Exceptions.addSuppressed(_e, t);
actual.onError(_e);
return;
}
p.subscribe(this);
}
else {
actual.onError(t);
}
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return super.scanUnsafe(key);
}
}
}
| ResumeSubscriber |
java | spring-projects__spring-boot | module/spring-boot-webmvc/src/test/java/org/springframework/boot/webmvc/autoconfigure/DispatcherServletAutoConfigurationTests.java | {
"start": 9266,
"end": 9531
} | class ____ {
@Bean(name = DispatcherServletAutoConfiguration.DEFAULT_DISPATCHER_SERVLET_BEAN_NAME)
DispatcherServlet dispatcherServlet() {
return new DispatcherServlet();
}
}
@Configuration(proxyBeanMethods = false)
static | CustomDispatcherServletSameName |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/parallel/ParallelJoin.java | {
"start": 9305,
"end": 13946
} | class ____<T> extends JoinSubscriptionBase<T> {
private static final long serialVersionUID = -5737965195918321883L;
JoinSubscriptionDelayError(Subscriber<? super T> actual, int n, int prefetch) {
super(actual, n, prefetch);
}
@Override
void onNext(JoinInnerSubscriber<T> inner, T value) {
if (get() == 0 && compareAndSet(0, 1)) {
if (requested.get() != 0) {
downstream.onNext(value);
if (requested.get() != Long.MAX_VALUE) {
requested.decrementAndGet();
}
inner.request(1);
} else {
SimplePlainQueue<T> q = inner.getQueue();
if (!q.offer(value)) {
inner.cancel();
errors.tryAddThrowableOrReport(new QueueOverflowException());
done.decrementAndGet();
drainLoop();
return;
}
}
if (decrementAndGet() == 0) {
return;
}
} else {
SimplePlainQueue<T> q = inner.getQueue();
if (!q.offer(value)) {
inner.cancel();
errors.tryAddThrowableOrReport(new QueueOverflowException());
done.decrementAndGet();
}
if (getAndIncrement() != 0) {
return;
}
}
drainLoop();
}
@Override
void onError(Throwable e) {
if (errors.tryAddThrowableOrReport(e)) {
done.decrementAndGet();
drain();
}
}
@Override
void onComplete() {
done.decrementAndGet();
drain();
}
@Override
void drain() {
if (getAndIncrement() != 0) {
return;
}
drainLoop();
}
void drainLoop() {
int missed = 1;
JoinInnerSubscriber<T>[] s = this.subscribers;
int n = s.length;
Subscriber<? super T> a = this.downstream;
for (;;) {
long r = requested.get();
long e = 0;
middle:
while (e != r) {
if (cancelled) {
cleanup();
return;
}
boolean d = done.get() == 0;
boolean empty = true;
for (int i = 0; i < n; i++) {
JoinInnerSubscriber<T> inner = s[i];
SimplePlainQueue<T> q = inner.queue;
if (q != null) {
T v = q.poll();
if (v != null) {
empty = false;
a.onNext(v);
inner.requestOne();
if (++e == r) {
break middle;
}
}
}
}
if (d && empty) {
errors.tryTerminateConsumer(a);
return;
}
if (empty) {
break;
}
}
if (e == r) {
if (cancelled) {
cleanup();
return;
}
boolean d = done.get() == 0;
boolean empty = true;
for (int i = 0; i < n; i++) {
JoinInnerSubscriber<T> inner = s[i];
SimpleQueue<T> q = inner.queue;
if (q != null && !q.isEmpty()) {
empty = false;
break;
}
}
if (d && empty) {
errors.tryTerminateConsumer(a);
return;
}
}
if (e != 0) {
BackpressureHelper.produced(requested, e);
}
missed = addAndGet(-missed);
if (missed == 0) {
break;
}
}
}
}
static final | JoinSubscriptionDelayError |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/cglib/proxy/Enhancer.java | {
"start": 23777,
"end": 28043
} | interface ____
// its superinterfaces.
List actualMethods = new ArrayList();
List interfaceMethods = new ArrayList();
final Set forcePublic = new HashSet();
getMethods(sc, interfaces, actualMethods, interfaceMethods, forcePublic);
List methods = CollectionUtils.transform(actualMethods, value -> {
Method method = (Method) value;
int modifiers = Constants.ACC_FINAL
| (method.getModifiers()
& ~Constants.ACC_ABSTRACT
& ~Constants.ACC_NATIVE
& ~Constants.ACC_SYNCHRONIZED);
if (forcePublic.contains(MethodWrapper.create(method))) {
modifiers = (modifiers & ~Constants.ACC_PROTECTED) | Constants.ACC_PUBLIC;
}
return ReflectUtils.getMethodInfo(method, modifiers);
});
ClassEmitter e = new ClassEmitter(v);
if (currentData == null) {
// Byte code level cannot be higher than 1.8 due to STATICHOOK methods
// which set static final fields outside the initializer method <clinit>.
e.begin_class(Constants.V1_8,
Constants.ACC_PUBLIC,
getClassName(),
Type.getType(sc),
(useFactory ?
TypeUtils.add(TypeUtils.getTypes(interfaces), FACTORY) :
TypeUtils.getTypes(interfaces)),
Constants.SOURCE_FILE);
}
else {
// Byte code level cannot be higher than 1.8 due to STATICHOOK methods
// which set static final fields outside the initializer method <clinit>.
e.begin_class(Constants.V1_8,
Constants.ACC_PUBLIC,
getClassName(),
null,
new Type[]{FACTORY},
Constants.SOURCE_FILE);
}
List constructorInfo = CollectionUtils.transform(constructors, MethodInfoTransformer.getInstance());
e.declare_field(Constants.ACC_PRIVATE, BOUND_FIELD, Type.BOOLEAN_TYPE, null);
e.declare_field(Constants.ACC_PUBLIC | Constants.ACC_STATIC, FACTORY_DATA_FIELD, OBJECT_TYPE, null);
if (!interceptDuringConstruction) {
e.declare_field(Constants.ACC_PRIVATE, CONSTRUCTED_FIELD, Type.BOOLEAN_TYPE, null);
}
e.declare_field(Constants.PRIVATE_FINAL_STATIC, THREAD_CALLBACKS_FIELD, THREAD_LOCAL, null);
e.declare_field(Constants.PRIVATE_FINAL_STATIC, STATIC_CALLBACKS_FIELD, CALLBACK_ARRAY, null);
if (serialVersionUID != null) {
e.declare_field(Constants.PRIVATE_FINAL_STATIC, Constants.SUID_FIELD_NAME, Type.LONG_TYPE, serialVersionUID);
}
for (int i = 0; i < callbackTypes.length; i++) {
e.declare_field(Constants.ACC_PRIVATE, getCallbackField(i), callbackTypes[i], null);
}
// This is declared private to avoid "public field" pollution
e.declare_field(Constants.ACC_PRIVATE | Constants.ACC_STATIC, CALLBACK_FILTER_FIELD, OBJECT_TYPE, null);
if (currentData == null) {
emitMethods(e, methods, actualMethods);
emitConstructors(e, constructorInfo);
}
else {
emitDefaultConstructor(e);
}
emitSetThreadCallbacks(e);
emitSetStaticCallbacks(e);
emitBindCallbacks(e);
if (useFactory || currentData != null) {
int[] keys = getCallbackKeys();
emitNewInstanceCallbacks(e);
emitNewInstanceCallback(e);
emitNewInstanceMultiarg(e, constructorInfo);
emitGetCallback(e, keys);
emitSetCallback(e, keys);
emitGetCallbacks(e);
emitSetCallbacks(e);
}
e.end_class();
}
/**
* Filter the list of constructors from the superclass. The
* constructors which remain will be included in the generated
* class. The default implementation is to filter out all private
* constructors, but subclasses may extend Enhancer to override this
* behavior.
* @param sc the superclass
* @param constructors the list of all declared constructors from the superclass
* @throws IllegalArgumentException if there are no non-private constructors
*/
protected void filterConstructors(Class sc, List constructors) {
CollectionUtils.filter(constructors, new VisibilityPredicate(sc, true));
if (constructors.size() == 0) {
throw new IllegalArgumentException("No visible constructors in " + sc);
}
}
/**
* This method should not be called in regular flow.
* Technically speaking {@link #wrapCachedClass(Class)} uses {@link Enhancer.EnhancerFactoryData} as a cache value,
* and the latter enables faster instantiation than plain old reflection lookup and invoke.
* This method is left intact for backward compatibility reasons: just in case it was ever used.
* @param type | and |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/runtime/src/test/java/org/jboss/resteasy/reactive/server/jaxrs/RestResponseBuilderImplTest.java | {
"start": 395,
"end": 1301
} | class ____ {
@Test
public void shouldBuildWithNonAbsoulteLocationAndIPv6Address() {
var context = Mockito.mock(ResteasyReactiveRequestContext.class, Mockito.RETURNS_DEEP_STUBS);
Mockito.when(context.getScheme()).thenReturn("https");
Mockito.when(context.getAuthority()).thenReturn("[0:0:0:0:0:0:0:1]");
Mockito.when(context.getDeployment().getPrefix()).thenReturn("/prefix");
CurrentRequestManager.set(context);
var response = RestResponseBuilderImpl.ok().location(URI.create("/host")).build();
assertEquals("https://[0:0:0:0:0:0:0:1]/prefix/host", response.getLocation().toString());
response = RestResponseBuilderImpl.ok().contentLocation(URI.create("/host")).build();
assertEquals("https://[0:0:0:0:0:0:0:1]/host", response.getHeaders().getFirst(HttpHeaders.CONTENT_LOCATION).toString());
}
}
| RestResponseBuilderImplTest |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/observable/ObservableCollectSingle.java | {
"start": 2184,
"end": 3863
} | class ____<T, U> implements Observer<T>, Disposable {
final SingleObserver<? super U> downstream;
final BiConsumer<? super U, ? super T> collector;
final U u;
Disposable upstream;
boolean done;
CollectObserver(SingleObserver<? super U> actual, U u, BiConsumer<? super U, ? super T> collector) {
this.downstream = actual;
this.collector = collector;
this.u = u;
}
@Override
public void onSubscribe(Disposable d) {
if (DisposableHelper.validate(this.upstream, d)) {
this.upstream = d;
downstream.onSubscribe(this);
}
}
@Override
public void dispose() {
upstream.dispose();
}
@Override
public boolean isDisposed() {
return upstream.isDisposed();
}
@Override
public void onNext(T t) {
if (done) {
return;
}
try {
collector.accept(u, t);
} catch (Throwable e) {
Exceptions.throwIfFatal(e);
upstream.dispose();
onError(e);
}
}
@Override
public void onError(Throwable t) {
if (done) {
RxJavaPlugins.onError(t);
return;
}
done = true;
downstream.onError(t);
}
@Override
public void onComplete() {
if (done) {
return;
}
done = true;
downstream.onSuccess(u);
}
}
}
| CollectObserver |
java | google__guava | android/guava-tests/test/com/google/common/io/ByteSourceTest.java | {
"start": 8919,
"end": 9378
} | class ____ extends ByteSource {
private byte[] bytes;
AppendableByteSource(byte[] initialBytes) {
this.bytes = initialBytes.clone();
}
@Override
public InputStream openStream() {
return new In();
}
void append(byte[] b) {
byte[] newBytes = Arrays.copyOf(bytes, bytes.length + b.length);
System.arraycopy(b, 0, newBytes, bytes.length, b.length);
bytes = newBytes;
}
private | AppendableByteSource |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/annotation/AnnotationUtilsTests.java | {
"start": 61146,
"end": 61319
} | class ____ {
}
// Attribute value intentionally matches attribute name:
@ImplicitAliasesContextConfig(location1 = "location1")
static | ValueImplicitAliasesContextConfigClass |
java | quarkusio__quarkus | extensions/reactive-pg-client/deployment/src/test/java/io/quarkus/reactive/pg/client/ConfigUrlMissingDefaultDatasourceDynamicInjectionTest.java | {
"start": 527,
"end": 2428
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
// The URL won't be missing if dev services are enabled
.overrideConfigKey("quarkus.devservices.enabled", "false");
@Inject
InjectableInstance<Pool> pool;
@Inject
InjectableInstance<io.vertx.mutiny.sqlclient.Pool> mutinyPool;
@Inject
InjectableInstance<PgPool> vendorPool;
@Inject
InjectableInstance<io.vertx.mutiny.pgclient.PgPool> mutinyVendorPool;
@Test
public void pool() {
doTest(pool, pool1 -> pool1.getConnection().toCompletionStage().toCompletableFuture().join());
}
@Test
public void mutinyPool() {
doTest(mutinyPool, pool1 -> pool1.getConnection().subscribe().asCompletionStage().join());
}
@Test
public void vendorPool() {
doTest(vendorPool, PGPool -> PGPool.getConnection().toCompletionStage().toCompletableFuture().join());
}
@Test
public void mutinyVendorPool() {
doTest(mutinyVendorPool, PGPool -> PGPool.getConnection().subscribe().asCompletionStage().join());
}
private <T> void doTest(InjectableInstance<T> instance, Consumer<T> action) {
var pool = instance.get();
assertThat(pool).isNotNull();
assertThatThrownBy(() -> action.accept(pool))
.isInstanceOf(InactiveBeanException.class)
.hasMessageContainingAll("Datasource '<default>' was deactivated automatically because its URL is not set.",
"To avoid this exception while keeping the bean inactive", // Message from Arc with generic hints
"To activate the datasource, set configuration property 'quarkus.datasource.reactive.url'",
"Refer to https://quarkus.io/guides/datasource for guidance.");
}
}
| ConfigUrlMissingDefaultDatasourceDynamicInjectionTest |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/struct/UnwrappedWithView1559Test.java | {
"start": 458,
"end": 625
} | class ____ {
@JsonUnwrapped(prefix="xxx.")
public Status status;
}
// NOTE: `final` is required to trigger [databind#1559]
static final | Health |
java | quarkusio__quarkus | extensions/spring-di/deployment/src/test/java/io/quarkus/spring/di/deployment/ListOfBeansTest.java | {
"start": 1952,
"end": 2298
} | class ____ {
@Autowired
List<Service> services;
final List<Converter> converters;
@Autowired
Foo(List<Converter> converters) {
this.converters = converters;
}
}
/**
* Test Spring with JSR-303 support
*/
@org.springframework.stereotype.Service
public static | Foo |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1800/Issue1834.java | {
"start": 1441,
"end": 1859
} | class ____{
List<? extends Number> keys;
public List<? extends Number> getKeys() {
return keys;
}
public void setKeys(List<? extends Number> keys) {
this.keys = keys;
}
@Override
public String toString() {
return "IndexQuery{" +
"keys=" + keys +
'}';
}
}
}
| IndexQuery_Number |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/entities/reventity/CustomDateRevEntity.java | {
"start": 1009,
"end": 2062
} | class ____ {
@Id
@GeneratedValue(generator = "EnversTestingRevisionGenerator")
@RevisionNumber
private int customId;
@RevisionTimestamp
private Date dateTimestamp;
public int getCustomId() {
return customId;
}
public void setCustomId(int customId) {
this.customId = customId;
}
public Date getDateTimestamp() {
return dateTimestamp;
}
public void setDateTimestamp(Date dateTimestamp) {
this.dateTimestamp = dateTimestamp;
}
@Override
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( o == null || getClass() != o.getClass() ) {
return false;
}
CustomDateRevEntity that = (CustomDateRevEntity) o;
if ( customId != that.customId ) {
return false;
}
if ( dateTimestamp != null ? !dateTimestamp.equals( that.dateTimestamp ) : that.dateTimestamp != null ) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = customId;
result = 31 * result + (dateTimestamp != null ? dateTimestamp.hashCode() : 0);
return result;
}
}
| CustomDateRevEntity |
java | apache__hadoop | hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/object/tos/GetObjectOutput.java | {
"start": 1179,
"end": 1980
} | class ____ {
private final GetObjectV2Output output;
private final byte[] checksum;
public GetObjectOutput(GetObjectV2Output output, byte[] checksum) {
Preconditions.checkNotNull(checksum, "Checksum should not be null.");
this.output = output;
this.checksum = checksum;
}
public GetObjectV2Output output() {
return output;
}
public byte[] checksum() {
return checksum;
}
public InputStream verifiedContent(byte[] expectedChecksum) throws IOException {
if (!Arrays.equals(expectedChecksum, checksum)) {
CommonUtils.runQuietly(this::forceClose);
throw new ChecksumMismatchException(expectedChecksum, checksum);
}
return output.getContent();
}
public void forceClose() throws IOException {
output.forceClose();
}
}
| GetObjectOutput |
java | reactor__reactor-core | reactor-core/src/jcstress/java/reactor/core/publisher/MonoDelayStressTest.java | {
"start": 2263,
"end": 4354
} | class ____ {
/*
Implementation notes: in this test we use the VirtualTimeScheduler to better coordinate
the triggering of the `run` method. We directly interpret the end STATE to track what
happened.
*/
final StressSubscriber<Long> subscriber = new StressSubscriber<>(0L);
final VirtualTimeScheduler virtualTimeScheduler;
final MonoDelay monoDelay;
MonoDelay.MonoDelayRunnable subscription;
{
virtualTimeScheduler = VirtualTimeScheduler.create();
monoDelay = new MonoDelay(Long.MAX_VALUE, TimeUnit.MILLISECONDS, virtualTimeScheduler);
monoDelay.doOnSubscribe(s -> subscription = (MonoDelay.MonoDelayRunnable) s).subscribe(subscriber);
}
@Actor
public void delayTrigger() {
subscription.run();
}
@Actor
public void request() {
subscriber.request(1);
}
@Arbiter
public void arbiter(III_Result r) {
r.r1 = subscriber.onNextCalls.get();
r.r2 = subscriber.onErrorCalls.get();
r.r3 = subscription.state;
}
}
@JCStressTest
@Outcome(id = {"1, 0, " + REQUEST_BEFORE_TICK}, expect = ACCEPTABLE, desc = "Tick was delivered, request happened before tick")
@Outcome(id = {"1, 0, " + REQUEST_AFTER_TICK}, expect = ACCEPTABLE, desc = "Tick was delivered, request happened after tick")
@Outcome(id = {"0, 0, " + CANCELLED_AFTER_REQUEST_FIRST}, expect = ACCEPTABLE, desc = "Cancelled after request, tick not done yet")
@Outcome(id = {"0, 0, " + CANCELLED_AFTER_REQUEST_FIRST_AND_DELAY_DONE}, expect = ACCEPTABLE, desc = "Cancelled after (request then tick)")
@Outcome(id = {"0, 0, " + CANCELLED_AFTER_REQUEST_SECOND}, expect = ACCEPTABLE, desc = "Cancelled after (tick then request)")
@Outcome(id = {"0, 0, " + CANCELLED_BEFORE_REQUEST_BUT_DELAY_DONE}, expect = ACCEPTABLE, desc = "Cancelled before request, but after tick happened")
@Outcome(id = {"0, 0, " + CANCELLED_EARLY}, expect = ACCEPTABLE, desc = "Cancelled before request and tick")
@Outcome(id = {"0, 0, " + CANCELLED_SUPER_EARLY}, expect = ACCEPTABLE_INTERESTING, desc = "Cancelled before even setCancel")
@State
public static | RequestAndRunStressTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/refaster/TemplatingTest.java | {
"start": 16575,
"end": 17192
} | class ____ {",
" public String example(Object o) {",
" return (String) o;",
" }",
"}");
assertThat(UTemplater.createTemplate(context, getMethodDeclaration("example")))
.isEqualTo(
ExpressionTemplate.create(
ImmutableMap.of("o", UClassType.create("java.lang.Object")),
UTypeCast.create(UClassIdent.create("java.lang.String"), UFreeIdent.create("o")),
UClassType.create("java.lang.String")));
}
@Test
public void constructor() {
compile(
"import java.util.ArrayList;",
" | ObjectCastExample |
java | apache__camel | components/camel-metrics/src/main/java/org/apache/camel/component/metrics/HistogramProducer.java | {
"start": 1154,
"end": 1971
} | class ____ extends AbstractMetricsProducer {
private static final Logger LOG = LoggerFactory.getLogger(HistogramProducer.class);
public HistogramProducer(MetricsEndpoint endpoint) {
super(endpoint);
}
@Override
protected void doProcess(Exchange exchange, MetricsEndpoint endpoint, MetricRegistry registry, String metricsName)
throws Exception {
Message in = exchange.getIn();
Histogram histogram = registry.histogram(metricsName);
Long value = endpoint.getValue();
Long finalValue = getLongHeader(in, HEADER_HISTOGRAM_VALUE, value);
if (finalValue != null) {
histogram.update(finalValue);
} else {
LOG.warn("Cannot update histogram \"{}\" with null value", metricsName);
}
}
}
| HistogramProducer |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlotTableImpl.java | {
"start": 27793,
"end": 29185
} | class ____ implements Iterator<T> {
private final Iterator<TaskSlot<T>> taskSlotIterator;
private Iterator<T> currentTasks;
private PayloadIterator(JobID jobId) {
this.taskSlotIterator = new TaskSlotIterator(jobId, TaskSlotState.ACTIVE);
this.currentTasks = null;
}
@Override
public boolean hasNext() {
while ((currentTasks == null || !currentTasks.hasNext())
&& taskSlotIterator.hasNext()) {
TaskSlot<T> taskSlot = taskSlotIterator.next();
currentTasks = taskSlot.getTasks();
}
return (currentTasks != null && currentTasks.hasNext());
}
@Override
public T next() {
while ((currentTasks == null || !currentTasks.hasNext())) {
TaskSlot<T> taskSlot;
try {
taskSlot = taskSlotIterator.next();
} catch (NoSuchElementException e) {
throw new NoSuchElementException("No more tasks.");
}
currentTasks = taskSlot.getTasks();
}
return currentTasks.next();
}
@Override
public void remove() {
throw new UnsupportedOperationException("Cannot remove tasks via this iterator.");
}
}
private | PayloadIterator |
java | netty__netty | handler/src/main/java/io/netty/handler/ssl/OpenSslCachingKeyMaterialProvider.java | {
"start": 1054,
"end": 2863
} | class ____ extends OpenSslKeyMaterialProvider {
private final int maxCachedEntries;
private volatile boolean full;
private final ConcurrentMap<String, OpenSslKeyMaterial> cache = new ConcurrentHashMap<String, OpenSslKeyMaterial>();
OpenSslCachingKeyMaterialProvider(X509KeyManager keyManager, String password, int maxCachedEntries) {
super(keyManager, password);
this.maxCachedEntries = maxCachedEntries;
}
@Override
OpenSslKeyMaterial chooseKeyMaterial(ByteBufAllocator allocator, String alias) throws Exception {
OpenSslKeyMaterial material = cache.get(alias);
if (material == null) {
material = super.chooseKeyMaterial(allocator, alias);
if (material == null) {
// No keymaterial should be used.
return null;
}
if (full) {
return material;
}
if (cache.size() > maxCachedEntries) {
full = true;
// Do not cache...
return material;
}
OpenSslKeyMaterial old = cache.putIfAbsent(alias, material);
if (old != null) {
material.release();
material = old;
}
}
// We need to call retain() as we want to always have at least a refCnt() of 1 before destroy() was called.
return material.retain();
}
@Override
void destroy() {
// Remove and release all entries.
do {
Iterator<OpenSslKeyMaterial> iterator = cache.values().iterator();
while (iterator.hasNext()) {
iterator.next().release();
iterator.remove();
}
} while (!cache.isEmpty());
}
}
| OpenSslCachingKeyMaterialProvider |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/LprComponentBuilderFactory.java | {
"start": 1783,
"end": 3877
} | interface ____ extends ComponentBuilder<PrinterComponent> {
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default LprComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default LprComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
}
| LprComponentBuilder |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/authx/EntraIdIntegrationTests.java | {
"start": 1065,
"end": 6996
} | class ____ {
private static final EntraIdTestContext testCtx = EntraIdTestContext.DEFAULT;
private RedisClient client;
private Endpoint standalone;
private ClientOptions clientOptions;
private TokenBasedRedisCredentialsProvider credentialsProvider;
@BeforeEach
public void setup() {
standalone = Endpoints.DEFAULT.getEndpoint("standalone-entraid-acl");
assumeTrue(standalone != null, "Skipping EntraID tests. Redis host with enabled EntraId not provided!");
Assumptions.assumeTrue(testCtx.getClientId() != null && testCtx.getClientSecret() != null,
"Skipping EntraID tests. Azure AD credentials not provided!");
clientOptions = ClientOptions.builder()
.socketOptions(SocketOptions.builder().connectTimeout(Duration.ofSeconds(1)).build())
.timeoutOptions(TimeoutOptions.enabled(Duration.ofSeconds(1)))
.reauthenticateBehavior(ClientOptions.ReauthenticateBehavior.ON_NEW_CREDENTIALS).build();
TokenAuthConfig tokenAuthConfig = EntraIDTokenAuthConfigBuilder.builder().clientId(testCtx.getClientId())
.secret(testCtx.getClientSecret()).authority(testCtx.getAuthority()).scopes(testCtx.getRedisScopes())
.expirationRefreshRatio(0.0000001F).build();
credentialsProvider = TokenBasedRedisCredentialsProvider.create(tokenAuthConfig);
client = createClient(credentialsProvider);
}
@AfterEach
public void cleanUp() {
if (credentialsProvider != null) {
credentialsProvider.close();
}
if (client != null) {
client.shutdown();
}
}
// T.1.1
// Verify authentication using Azure AD with service principals using Redis Standalone client
@Test
public void standaloneWithSecret_azureServicePrincipalIntegrationTest() throws ExecutionException, InterruptedException {
try (StatefulRedisConnection<String, String> connection = client.connect()) {
RedisCommands<String, String> sync = connection.sync();
String key = UUID.randomUUID().toString();
sync.set(key, "value");
assertThat(connection.sync().get(key)).isEqualTo("value");
assertThat(connection.async().get(key).get()).isEqualTo("value");
assertThat(connection.reactive().get(key).block()).isEqualTo("value");
sync.del(key);
}
}
// T.2.2
// Test that the Redis client is not blocked/interrupted during token renewal.
@Test
public void renewalDuringOperationsTest() throws InterruptedException {
AtomicInteger commandCycleCount = new AtomicInteger(0);
Thread commandThread = new Thread(() -> {
try (StatefulRedisConnection<String, String> connection = client.connect()) {
RedisAsyncCommands<String, String> async = connection.async();
for (int i = 1; i <= 10; i++) {
async.multi();
async.set("key", "1");
async.incrby("key", 1);
RedisFuture<TransactionResult> exec = async.exec();
TransactionResult results = exec.get(1, TimeUnit.SECONDS);
commandCycleCount.incrementAndGet();
assertThat(results).hasSize(2);
assertThat((String) results.get(0)).isEqualTo("OK");
assertThat((Long) results.get(1)).isEqualTo(2L);
}
} catch (Exception e) {
fail("Command execution failed during token refresh", e);
}
});
commandThread.start();
CountDownLatch latch = new CountDownLatch(10); // Wait for at least 10 token renewalss
credentialsProvider.credentials().subscribe(cred -> latch.countDown());
assertThat(latch.await(2, TimeUnit.SECONDS)).isTrue(); // Wait to reach 10 renewals
commandThread.join(); // Wait for the command thread to finish
assertThat(commandCycleCount.get()).isGreaterThanOrEqualTo(10);
}
// T.2.2
// Test basic Pub/Sub functionality is not blocked/interrupted during token renewal.
@Test
public void renewalDuringPubSubOperationsTest() throws InterruptedException {
assumeTrue(standalone != null, "Skipping EntraID tests. Redis host with enabled EntraId not provided!");
try (StatefulRedisPubSubConnection<String, String> connectionPubSub = client.connectPubSub();
StatefulRedisPubSubConnection<String, String> connectionPubSub1 = client.connectPubSub()) {
PubSubTestListener listener = new PubSubTestListener();
connectionPubSub.addListener(listener);
connectionPubSub.sync().subscribe("channel");
Thread pubsubThread = new Thread(() -> {
for (int i = 1; i <= 100; i++) {
connectionPubSub1.sync().publish("channel", "message");
}
});
pubsubThread.start();
CountDownLatch latch = new CountDownLatch(10);
credentialsProvider.credentials().subscribe(cred -> latch.countDown());
assertThat(latch.await(2, TimeUnit.SECONDS)).isTrue(); // Wait for at least 10 token renewals
pubsubThread.join(); // Wait for the pub/sub thread to finish
Wait.untilEquals(100, () -> listener.getMessages().size()).waitOrTimeout();
assertThat(listener.getMessages()).allMatch(msg -> msg.equals("message"));
}
}
private RedisClient createClient(TokenBasedRedisCredentialsProvider credentialsProvider) {
RedisURI uri = RedisURI.create((standalone.getEndpoints().get(0)));
uri.setCredentialsProvider(credentialsProvider);
RedisClient redis = RedisClient.create(uri);
redis.setOptions(clientOptions);
return redis;
}
}
| EntraIdIntegrationTests |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/FunctionalInterfaceClash.java | {
"start": 2802,
"end": 5811
} | interface
____<String, MethodSymbol> methodsByName = HashMultimap.create();
for (Symbol sym :
types.membersClosure(getType(tree), /* skipInterface= */ false).getSymbols()) {
if (!(sym instanceof MethodSymbol msym)) {
continue;
}
if (msym.getParameters().stream()
.noneMatch(p -> maybeFunctionalInterface(p.type, types, state))) {
continue;
}
if (msym.isConstructor() && !msym.owner.equals(origin)) {
continue;
}
methodsByName.put(msym.getSimpleName().toString(), msym);
}
// Only consider methods which don't have strictly more specific overloads; these won't actually
// clash.
SetMultimap<String, MethodSymbol> methodsBySignature = HashMultimap.create();
for (MethodSymbol msym : methodsByName.values()) {
if (methodsByName.get(msym.getSimpleName().toString()).stream()
.anyMatch(
o ->
!msym.overrides(o, (TypeSymbol) msym.owner, types, /* checkResult= */ true)
&& !o.equals(msym)
&& o.getParameters().length() == msym.getParameters().length()
&& zip(
msym.getParameters().stream(),
o.getParameters().stream(),
(a, b) -> isSubtype(a.type, b.type, state))
.allMatch(x -> x))) {
continue;
}
methodsBySignature.put(functionalInterfaceSignature(state, msym), msym);
}
// check if any declared members clash with another declared or inherited member
// (don't report clashes between inherited members)
for (Tree member : tree.getMembers()) {
if (!(member instanceof MethodTree methodTree)) {
continue;
}
MethodSymbol msym = getSymbol(methodTree);
if (msym.getParameters().stream()
.noneMatch(p -> maybeFunctionalInterface(p.type, types, state))) {
continue;
}
List<MethodSymbol> clash =
new ArrayList<>(methodsBySignature.removeAll(functionalInterfaceSignature(state, msym)));
// Ignore inherited methodsBySignature that are overridden in the original class. Note that we
// have to
// handle transitive inheritance explicitly to handle cases where the visibility of an
// overridden method is expanded somewhere in the type hierarchy.
Deque<MethodSymbol> worklist = new ArrayDeque<>();
worklist.push(msym);
clash.remove(msym);
while (!worklist.isEmpty()) {
MethodSymbol msym2 = worklist.removeFirst();
ImmutableList<MethodSymbol> overrides =
clash.stream()
.filter(m -> msym2.overrides(m, origin, types, /* checkResult= */ false))
.collect(toImmutableList());
worklist.addAll(overrides);
clash.removeAll(overrides);
}
if (!clash.isEmpty()) {
// ignore if there are overridden clashing methodsBySignature in | SetMultimap |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/SystemExitOutsideMainTest.java | {
"start": 2704,
"end": 3125
} | class ____ {
public static int main(String[] args) {
// BUG: Diagnostic contains: SystemExitOutsideMain
System.exit(0);
return 0;
}
}
""")
.doTest();
}
@Test
public void systemExitMainLookalikeDifferentVisibility() {
helper
.addSourceLines(
"Test.java",
"""
| Test |
java | quarkusio__quarkus | integration-tests/maven/src/test/resources-filtered/projects/test-test-profile-run-main/src/main/java/org/acme/ConfigResource.java | {
"start": 308,
"end": 571
} | class ____ {
@Inject
SmallRyeConfig config;
@GET
@Produces(MediaType.TEXT_PLAIN)
@Path("/{name}")
public String configValue(@PathParam("name") final String name) {
return config.getConfigValue(name).getValue();
}
}
| ConfigResource |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/bean/BeanPipelineTest.java | {
"start": 2393,
"end": 2540
} | class ____ {
public void onlyPlainBody(Object body) {
assertEquals("Hello World", body);
}
}
public static | FooBean |
java | apache__kafka | raft/src/main/java/org/apache/kafka/snapshot/RecordsSnapshotReader.java | {
"start": 1381,
"end": 5305
} | class ____<T> implements SnapshotReader<T> {
private final OffsetAndEpoch snapshotId;
private final RecordsIterator<T> iterator;
private Optional<Batch<T>> nextBatch = Optional.empty();
private OptionalLong lastContainedLogTimestamp = OptionalLong.empty();
private RecordsSnapshotReader(
OffsetAndEpoch snapshotId,
RecordsIterator<T> iterator
) {
this.snapshotId = snapshotId;
this.iterator = iterator;
}
@Override
public OffsetAndEpoch snapshotId() {
return snapshotId;
}
@Override
public long lastContainedLogOffset() {
return snapshotId.offset() - 1;
}
@Override
public int lastContainedLogEpoch() {
return snapshotId.epoch();
}
@Override
public long lastContainedLogTimestamp() {
if (lastContainedLogTimestamp.isEmpty()) {
nextBatch.ifPresent(batch -> {
throw new IllegalStateException(
String.format(
"nextBatch was present when last contained log timestamp was not present: Batch(baseOffset=%d" +
", epoch=%d, appendTimestamp=%d, sizeInBytes=%d, lastOffset=%d)",
batch.baseOffset(),
batch.epoch(),
batch.appendTimestamp(),
batch.sizeInBytes(),
batch.lastOffset()
)
);
});
nextBatch = nextBatch();
}
return lastContainedLogTimestamp.getAsLong();
}
@Override
public boolean hasNext() {
if (nextBatch.isEmpty()) {
nextBatch = nextBatch();
}
return nextBatch.isPresent();
}
@Override
public Batch<T> next() {
if (!hasNext()) {
throw new NoSuchElementException("Snapshot reader doesn't have any more elements");
}
Batch<T> batch = nextBatch.get();
nextBatch = Optional.empty();
return batch;
}
@Override
public void close() {
iterator.close();
}
public static <T> RecordsSnapshotReader<T> of(
RawSnapshotReader snapshot,
RecordSerde<T> serde,
BufferSupplier bufferSupplier,
int maxBatchSize,
boolean doCrcValidation,
LogContext logContext
) {
return new RecordsSnapshotReader<>(
snapshot.snapshotId(),
new RecordsIterator<>(snapshot.records(), serde, bufferSupplier, maxBatchSize, doCrcValidation, logContext)
);
}
/**
* Returns the next batch
*/
private Optional<Batch<T>> nextBatch() {
if (iterator.hasNext()) {
Batch<T> batch = iterator.next();
if (lastContainedLogTimestamp.isEmpty()) {
// This must be the first batch which is expected to be a control batch with at least one record for
// the snapshot header.
if (batch.controlRecords().isEmpty()) {
throw new IllegalStateException(
"First batch is not a control batch with at least one record"
);
} else if (ControlRecordType.SNAPSHOT_HEADER != batch.controlRecords().get(0).type()) {
throw new IllegalStateException(
String.format(
"First control record is not a snapshot header (%s)",
batch.controlRecords().get(0).type()
)
);
}
lastContainedLogTimestamp = OptionalLong.of(
((SnapshotHeaderRecord) batch.controlRecords().get(0).message()).lastContainedLogTimestamp()
);
}
return Optional.of(batch);
}
return Optional.empty();
}
}
| RecordsSnapshotReader |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/deser/BeanDeserializerFactory.java | {
"start": 5363,
"end": 5865
} | interface ____'t have constructors, for one)
beanDescRef = ctxt.lazyIntrospectBeanDescription(concreteType);
return buildBeanDeserializer(ctxt, concreteType, beanDescRef);
}
}
// Otherwise, may want to check handlers for standard types, from superclass:
deser = findStdDeserializer(ctxt, type, beanDescRef);
if (deser != null) {
return (ValueDeserializer<Object>)deser;
}
// Otherwise: could the | doesn |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/cfg/persister/Gate.java | {
"start": 259,
"end": 380
} | class ____ {
@Id
public Long getId() { return id; }
public void setId(Long id) { this.id = id; }
private Long id;
}
| Gate |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/service/CreateServiceAccountTokenAction.java | {
"start": 369,
"end": 747
} | class ____ extends ActionType<CreateServiceAccountTokenResponse> {
public static final String NAME = "cluster:admin/xpack/security/service_account/token/create";
public static final CreateServiceAccountTokenAction INSTANCE = new CreateServiceAccountTokenAction();
private CreateServiceAccountTokenAction() {
super(NAME);
}
}
| CreateServiceAccountTokenAction |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/cluster/metadata/ComponentTemplateTests.java | {
"start": 1726,
"end": 16397
} | class ____ extends SimpleDiffableSerializationTestCase<ComponentTemplate> {
@Override
protected ComponentTemplate makeTestChanges(ComponentTemplate testInstance) {
return mutateInstance(testInstance);
}
@Override
protected Writeable.Reader<Diff<ComponentTemplate>> diffReader() {
return ComponentTemplate::readComponentTemplateDiffFrom;
}
@Override
protected ComponentTemplate doParseInstance(XContentParser parser) throws IOException {
return ComponentTemplate.parse(parser);
}
@Override
protected Writeable.Reader<ComponentTemplate> instanceReader() {
return ComponentTemplate::new;
}
@Override
protected ComponentTemplate createTestInstance() {
return randomInstance(true);
}
// In many cases the index template is used with indices adding lifecycle would render it invalid that's why we
// do not always want to randomly add a lifecycle.
public static ComponentTemplate randomInstance() {
return randomInstance(false);
}
// Deprecated component templates may lead to deprecation warnings when used in non-deprecated index templates
// to avoid test failures due to unexpected deprecation warnings, returns a non-deprecated instance
public static ComponentTemplate randomNonDeprecatedInstance() {
return randomInstance(false, randomFrom(Boolean.FALSE, null));
}
public static ComponentTemplate randomInstance(boolean lifecycleAllowed) {
return randomInstance(lifecycleAllowed, randomOptionalBoolean());
}
public static ComponentTemplate randomInstance(boolean supportsDataStreams, Boolean deprecated) {
Template.Builder templateBuilder = Template.builder();
if (randomBoolean()) {
templateBuilder.settings(randomSettings());
}
if (randomBoolean()) {
templateBuilder.mappings(randomMappings());
}
if (randomBoolean()) {
templateBuilder.aliases(randomAliases());
}
if (randomBoolean() && supportsDataStreams) {
templateBuilder.lifecycle(DataStreamLifecycleTemplateTests.randomDataLifecycleTemplate());
}
if (randomBoolean() && supportsDataStreams) {
templateBuilder.dataStreamOptions(randomDataStreamOptionsTemplate());
}
Template template = templateBuilder.build();
Map<String, Object> meta = null;
if (randomBoolean()) {
meta = randomMeta();
}
final Long createdDate = randomBoolean() ? randomNonNegativeLong() : null;
final Long modifiedDate;
if (randomBoolean()) {
modifiedDate = createdDate == null ? randomNonNegativeLong() : randomLongBetween(createdDate, Long.MAX_VALUE);
} else {
modifiedDate = null;
}
return new ComponentTemplate(
template,
randomBoolean() ? null : randomNonNegativeLong(),
meta,
deprecated,
createdDate,
modifiedDate
);
}
public static ResettableValue<DataStreamOptions.Template> randomDataStreamOptionsTemplate() {
return switch (randomIntBetween(0, 2)) {
case 0 -> ResettableValue.undefined();
case 1 -> ResettableValue.reset();
case 2 -> ResettableValue.create(DataStreamOptionsTemplateTests.randomDataStreamOptions());
default -> throw new IllegalArgumentException("Illegal randomisation branch");
};
}
public static Map<String, AliasMetadata> randomAliases() {
String aliasName = randomAlphaOfLength(5);
AliasMetadata aliasMeta = AliasMetadata.builder(aliasName)
.filter("{\"term\":{\"year\":" + randomIntBetween(1, 3000) + "}}")
.routing(randomBoolean() ? null : randomAlphaOfLength(3))
.isHidden(randomBoolean() ? null : randomBoolean())
.writeIndex(randomBoolean() ? null : randomBoolean())
.build();
return Collections.singletonMap(aliasName, aliasMeta);
}
public static CompressedXContent randomMappings() {
try {
return new CompressedXContent("{\"properties\":{\"" + randomAlphaOfLength(5) + "\":{\"type\":\"keyword\"}}}");
} catch (IOException e) {
fail("got an IO exception creating fake mappings: " + e);
return null;
}
}
public static Settings randomSettings() {
return indexSettings(randomIntBetween(1, 10), randomIntBetween(0, 5)).put(IndexMetadata.SETTING_BLOCKS_READ, randomBoolean())
.put(IndexMetadata.SETTING_BLOCKS_WRITE, randomBoolean())
.put(IndexMetadata.SETTING_BLOCKS_WRITE, randomBoolean())
.put(IndexMetadata.SETTING_PRIORITY, randomIntBetween(0, 100000))
.build();
}
private static Map<String, Object> randomMeta() {
if (randomBoolean()) {
return Collections.singletonMap(randomAlphaOfLength(4), randomAlphaOfLength(4));
} else {
return Collections.singletonMap(
randomAlphaOfLength(5),
Collections.singletonMap(randomAlphaOfLength(4), randomAlphaOfLength(4))
);
}
}
@Override
protected ComponentTemplate mutateInstance(ComponentTemplate orig) {
return mutateTemplate(orig);
}
public static ComponentTemplate mutateTemplate(ComponentTemplate orig) {
return switch (randomIntBetween(0, 3)) {
case 0 -> {
Template ot = orig.template();
yield switch (randomIntBetween(0, 4)) {
case 0 -> new ComponentTemplate(
Template.builder(ot).settings(randomValueOtherThan(ot.settings(), ComponentTemplateTests::randomSettings)).build(),
orig.version(),
orig.metadata(),
orig.deprecated(),
orig.createdDateMillis().orElse(null),
orig.modifiedDateMillis().orElse(null)
);
case 1 -> new ComponentTemplate(
Template.builder(ot).mappings(randomValueOtherThan(ot.mappings(), ComponentTemplateTests::randomMappings)).build(),
orig.version(),
orig.metadata(),
orig.deprecated(),
orig.createdDateMillis().orElse(null),
orig.modifiedDateMillis().orElse(null)
);
case 2 -> new ComponentTemplate(
Template.builder(ot).aliases(randomValueOtherThan(ot.aliases(), ComponentTemplateTests::randomAliases)).build(),
orig.version(),
orig.metadata(),
orig.deprecated(),
orig.createdDateMillis().orElse(null),
orig.modifiedDateMillis().orElse(null)
);
case 3 -> new ComponentTemplate(
Template.builder(ot)
.lifecycle(randomValueOtherThan(ot.lifecycle(), DataStreamLifecycleTemplateTests::randomDataLifecycleTemplate))
.build(),
orig.version(),
orig.metadata(),
orig.deprecated(),
orig.createdDateMillis().orElse(null),
orig.modifiedDateMillis().orElse(null)
);
case 4 -> new ComponentTemplate(
Template.builder(ot)
.dataStreamOptions(
randomValueOtherThan(ot.dataStreamOptions(), DataStreamOptionsTemplateTests::randomDataStreamOptions)
)
.build(),
orig.version(),
orig.metadata(),
orig.deprecated(),
orig.createdDateMillis().orElse(null),
orig.modifiedDateMillis().orElse(null)
);
default -> throw new IllegalStateException("illegal randomization branch");
};
}
case 1 -> new ComponentTemplate(
orig.template(),
randomValueOtherThan(orig.version(), ESTestCase::randomNonNegativeLong),
orig.metadata(),
orig.deprecated(),
orig.createdDateMillis().orElse(null),
orig.modifiedDateMillis().orElse(null)
);
case 2 -> new ComponentTemplate(
orig.template(),
orig.version(),
randomValueOtherThan(orig.metadata(), ComponentTemplateTests::randomMeta),
orig.deprecated(),
orig.createdDateMillis().orElse(null),
orig.modifiedDateMillis().orElse(null)
);
case 3 -> new ComponentTemplate(
orig.template(),
orig.version(),
orig.metadata(),
orig.isDeprecated() ? randomFrom(false, null) : true,
orig.createdDateMillis().orElse(null),
orig.modifiedDateMillis().orElse(null)
);
default -> throw new IllegalStateException("illegal randomization branch");
};
}
public void testMappingsEquals() throws IOException {
{
CompressedXContent mappings = randomMappings();
assertThat(Template.mappingsEquals(mappings, mappings), equalTo(true));
}
{
assertThat(Template.mappingsEquals(null, null), equalTo(true));
}
{
CompressedXContent mappings = randomMappings();
assertThat(Template.mappingsEquals(mappings, null), equalTo(false));
assertThat(Template.mappingsEquals(null, mappings), equalTo(false));
}
{
String randomString = randomAlphaOfLength(10);
CompressedXContent m1 = new CompressedXContent(Strings.format("""
{"properties":{"%s":{"type":"keyword"}}}
""", randomString));
CompressedXContent m2 = new CompressedXContent(Strings.format("""
{"properties":{"%s":{"type":"keyword"}}}
""", randomString));
assertThat(Template.mappingsEquals(m1, m2), equalTo(true));
}
{
CompressedXContent m1 = randomMappings();
CompressedXContent m2 = new CompressedXContent(Strings.format("""
{"properties":{"%s":{"type":"keyword"}}}
""", randomAlphaOfLength(10)));
assertThat(Template.mappingsEquals(m1, m2), equalTo(false));
}
{
Map<String, Object> map = XContentHelper.convertToMap(new BytesArray(Strings.format("""
{"%s":{"properties":{"%s":{"type":"keyword"}}}}
""", MapperService.SINGLE_MAPPING_NAME, randomAlphaOfLength(10))), true, XContentType.JSON).v2();
Map<String, Object> reduceMap = Template.reduceMapping(map);
CompressedXContent m1 = new CompressedXContent(Strings.toString(XContentFactory.jsonBuilder().map(map)));
CompressedXContent m2 = new CompressedXContent(Strings.toString(XContentFactory.jsonBuilder().map(reduceMap)));
assertThat(Template.mappingsEquals(m1, m2), equalTo(true));
}
}
public void testXContentSerializationWithRolloverAndEffectiveRetention() throws IOException {
Settings settings = null;
CompressedXContent mappings = null;
Map<String, AliasMetadata> aliases = null;
DataStreamOptions.Template dataStreamOptions = null;
if (randomBoolean()) {
settings = randomSettings();
}
if (randomBoolean()) {
mappings = randomMappings();
}
if (randomBoolean()) {
aliases = randomAliases();
}
if (randomBoolean()) {
// Do not set random lifecycle to avoid having data_retention and effective_retention in the response.
dataStreamOptions = new DataStreamOptions.Template(DataStreamFailureStore.builder().enabled(randomBoolean()).buildTemplate());
}
DataStreamLifecycle.Template lifecycle = DataStreamLifecycle.Template.DATA_DEFAULT;
ComponentTemplate template = new ComponentTemplate(
new Template(settings, mappings, aliases, lifecycle, dataStreamOptions),
randomNonNegativeLong(),
null
);
try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) {
builder.humanReadable(true);
RolloverConfiguration rolloverConfiguration = RolloverConfigurationTests.randomRolloverConfiguration();
DataStreamGlobalRetention globalRetention = DataStreamGlobalRetentionTests.randomGlobalRetention();
ToXContent.Params withEffectiveRetention = new ToXContent.MapParams(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAMS);
template.toXContent(builder, withEffectiveRetention, rolloverConfiguration);
String serialized = Strings.toString(builder);
assertThat(serialized, containsString("rollover"));
for (String label : rolloverConfiguration.resolveRolloverConditions(
lifecycle.toDataStreamLifecycle().getEffectiveDataRetention(globalRetention, randomBoolean())
).getConditions().keySet()) {
assertThat(serialized, containsString(label));
}
/*
* A template does not have a global retention and the lifecycle has no retention, so there will be no data_retention or
* effective_retention.
*/
assertThat(serialized, not(containsString("data_retention")));
assertThat(serialized, not(containsString("effective_retention")));
}
}
public void testHangingParsing() throws IOException {
String cutDown = """
{
"template": {
"aliases": {
"foo": "bar"
},
"food": "eggplant"
},
"potato": true
}
""";
try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, cutDown)) {
expectThrows(Exception.class, () -> ComponentTemplate.parse(parser));
}
}
}
| ComponentTemplateTests |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDenseVector.java | {
"start": 1627,
"end": 4010
} | class ____ extends AbstractConvertFunction {
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(
Expression.class,
"ToDenseVector",
ToDenseVector::new
);
private static final Map<DataType, BuildFactory> EVALUATORS = Map.ofEntries(
Map.entry(DENSE_VECTOR, (source, fieldEval) -> fieldEval),
Map.entry(LONG, ToDenseVectorFromLongEvaluator.Factory::new),
Map.entry(INTEGER, ToDenseVectorFromIntEvaluator.Factory::new),
Map.entry(DOUBLE, ToDenseVectorFromDoubleEvaluator.Factory::new),
Map.entry(KEYWORD, ToDenseVectorFromStringEvaluator.Factory::new)
);
@FunctionInfo(
returnType = "dense_vector",
description = "Converts a multi-valued input of numbers, or a hexadecimal string, to a dense_vector.",
preview = true,
examples = @Example(file = "dense_vector", tag = "to_dense_vector-ints"),
appliesTo = { @FunctionAppliesTo(lifeCycle = FunctionAppliesToLifecycle.PREVIEW, version = "9.2.0") }
)
public ToDenseVector(
Source source,
@Param(
name = "field",
type = { "double", "long", "integer", "keyword" },
description = "multi-valued input of numbers or hexadecimal string to convert."
) Expression field
) {
super(source, field);
}
private ToDenseVector(StreamInput in) throws IOException {
super(in);
}
@Override
public String getWriteableName() {
return ENTRY.name;
}
@Override
protected Map<DataType, BuildFactory> factories() {
return EVALUATORS;
}
@Override
public DataType dataType() {
return DENSE_VECTOR;
}
@Override
public Expression replaceChildren(List<Expression> newChildren) {
return new ToDenseVector(source(), newChildren.get(0));
}
@Override
protected NodeInfo<? extends Expression> info() {
return NodeInfo.create(this, ToDenseVector::new, field());
}
@ConvertEvaluator(extraName = "FromLong")
static float fromLong(long l) {
return l;
}
@ConvertEvaluator(extraName = "FromInt")
static float fromInt(int i) {
return i;
}
@ConvertEvaluator(extraName = "FromDouble")
static float fromDouble(double d) {
return (float) d;
}
}
| ToDenseVector |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/config/dialect/DbVersionInvalidPersistenceXmlTest.java | {
"start": 658,
"end": 2918
} | class ____ {
private static final String ACTUAL_H2_VERSION = DialectVersions.Defaults.H2;
// We will set the DB version to something higher than the actual version: this is invalid.
private static final String CONFIGURED_DB_VERSION = "999.999";
static {
assertThat(ACTUAL_H2_VERSION)
.as("Test setup - we need the required version to be different from the actual one")
.doesNotStartWith(CONFIGURED_DB_VERSION);
}
private static final String CONFIGURED_DB_VERSION_REPORTED;
static {
// For some reason Hibernate ORM infers a micro version of 0; no big deal.
CONFIGURED_DB_VERSION_REPORTED = CONFIGURED_DB_VERSION + ".0";
}
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClass(MyEntity.class)
.addAsManifestResource(new StringAsset(loadResourceAndReplacePlaceholders(
"META-INF/some-persistence-with-h2-version-placeholder.xml",
Map.of("H2_VERSION", "999.999"))),
"persistence.xml"))
.withConfigurationResource("application-datasource-only.properties")
.assertException(throwable -> assertThat(throwable)
.rootCause()
.hasMessageContainingAll(
"Persistence unit 'templatePU' was configured to run with a database version"
+ " of at least '" + CONFIGURED_DB_VERSION_REPORTED + "', but the actual version is '"
+ ACTUAL_H2_VERSION + "'",
"Consider upgrading your database",
"Alternatively, rebuild your application with 'jakarta.persistence.database-product-version="
+ ACTUAL_H2_VERSION + "'",
"this may disable some features and/or impact performance negatively"));
@Inject
SessionFactory sessionFactory;
@Inject
Session session;
@Test
public void test() {
Assertions.fail("Bootstrap should have failed");
}
}
| DbVersionInvalidPersistenceXmlTest |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/UnquoteNameTest.java | {
"start": 250,
"end": 2264
} | class ____ extends TestCase {
public void test_unquote() throws Exception {
String text = "{_id:1001}";
Model model = JSON.parseObject(text, Model.class);
Assert.assertEquals(1001, model._id);
}
public void test_unquote_parse() throws Exception {
String text = "{ _id:1001}";
JSONObject model = JSON.parseObject(text);
Assert.assertEquals(1001, model.get("_id"));
}
public void test_unquote_parse_1() throws Exception {
String text = "{ $id:1001}";
JSONObject model = JSON.parseObject(text);
Assert.assertEquals(1001, model.get("$id"));
}
public void test_unquote_reader() throws Exception {
String text = "{_id:1001}";
JSONReader reader = new JSONReader(new StringReader(text));
Model model = reader.readObject(Model.class);
Assert.assertEquals(1001, model._id);
reader.close();
}
public void test_unquote_reader_parse() throws Exception {
String text = "{_id:1001}";
JSONReader reader = new JSONReader(new StringReader(text));
JSONObject model = (JSONObject) reader.readObject();
Assert.assertEquals(1001, model.get("_id"));
reader.close();
}
public void test_obj() throws Exception {
JSONReader reader = new JSONReader(new StringReader("{_id:123}"));
reader.startObject();
Assert.assertEquals("_id", reader.readString());
Assert.assertEquals(Integer.valueOf(123), reader.readInteger());
reader.endObject();
reader.close();
}
public void test_obj_1() throws Exception {
JSONReader reader = new JSONReader(new StringReader("{$id:123}"));
reader.startObject();
Assert.assertEquals("$id", reader.readString());
Assert.assertEquals(Integer.valueOf(123), reader.readInteger());
reader.endObject();
reader.close();
}
public static | UnquoteNameTest |
java | apache__rocketmq | broker/src/test/java/org/apache/rocketmq/broker/filter/CommitLogDispatcherCalcBitMapTest.java | {
"start": 1243,
"end": 6698
} | class ____ {
@Test
public void testDispatch_filterDataIllegal() {
BrokerConfig brokerConfig = new BrokerConfig();
brokerConfig.setEnableCalcFilterBitMap(true);
ConsumerFilterManager filterManager = new ConsumerFilterManager();
filterManager.register("topic0", "CID_0", "a is not null and a >= 5",
ExpressionType.SQL92, System.currentTimeMillis());
filterManager.register("topic0", "CID_1", "a is not null and a >= 15",
ExpressionType.SQL92, System.currentTimeMillis());
ConsumerFilterData nullExpression = filterManager.get("topic0", "CID_0");
nullExpression.setExpression(null);
nullExpression.setCompiledExpression(null);
ConsumerFilterData nullBloomData = filterManager.get("topic0", "CID_1");
nullBloomData.setBloomFilterData(null);
CommitLogDispatcherCalcBitMap calcBitMap = new CommitLogDispatcherCalcBitMap(brokerConfig,
filterManager);
for (int i = 0; i < 1; i++) {
Map<String, String> properties = new HashMap<>(4);
properties.put("a", String.valueOf(i * 10 + 5));
String topic = "topic" + i;
DispatchRequest dispatchRequest = new DispatchRequest(
topic,
0,
i * 100 + 123,
100,
(long) ("tags" + i).hashCode(),
System.currentTimeMillis(),
i,
null,
UUID.randomUUID().toString(),
0,
0,
properties
);
calcBitMap.dispatch(dispatchRequest);
assertThat(dispatchRequest.getBitMap()).isNotNull();
BitsArray bitsArray = BitsArray.create(dispatchRequest.getBitMap(),
filterManager.getBloomFilter().getM());
for (int j = 0; j < bitsArray.bitLength(); j++) {
assertThat(bitsArray.getBit(j)).isFalse();
}
}
}
@Test
public void testDispatch_blankFilterData() {
BrokerConfig brokerConfig = new BrokerConfig();
brokerConfig.setEnableCalcFilterBitMap(true);
ConsumerFilterManager filterManager = new ConsumerFilterManager();
CommitLogDispatcherCalcBitMap calcBitMap = new CommitLogDispatcherCalcBitMap(brokerConfig,
filterManager);
for (int i = 0; i < 10; i++) {
Map<String, String> properties = new HashMap<>(4);
properties.put("a", String.valueOf(i * 10 + 5));
String topic = "topic" + i;
DispatchRequest dispatchRequest = new DispatchRequest(
topic,
0,
i * 100 + 123,
100,
(long) ("tags" + i).hashCode(),
System.currentTimeMillis(),
i,
null,
UUID.randomUUID().toString(),
0,
0,
properties
);
calcBitMap.dispatch(dispatchRequest);
assertThat(dispatchRequest.getBitMap()).isNull();
}
}
@Test
public void testDispatch() {
BrokerConfig brokerConfig = new BrokerConfig();
brokerConfig.setEnableCalcFilterBitMap(true);
ConsumerFilterManager filterManager = ConsumerFilterManagerTest.gen(10, 10);
CommitLogDispatcherCalcBitMap calcBitMap = new CommitLogDispatcherCalcBitMap(brokerConfig,
filterManager);
for (int i = 0; i < 10; i++) {
Map<String, String> properties = new HashMap<>(4);
properties.put("a", String.valueOf(i * 10 + 5));
String topic = "topic" + i;
DispatchRequest dispatchRequest = new DispatchRequest(
topic,
0,
i * 100 + 123,
100,
(long) ("tags" + i).hashCode(),
System.currentTimeMillis(),
i,
null,
UUID.randomUUID().toString(),
0,
0,
properties
);
calcBitMap.dispatch(dispatchRequest);
assertThat(dispatchRequest.getBitMap()).isNotNull();
BitsArray bits = BitsArray.create(dispatchRequest.getBitMap());
Collection<ConsumerFilterData> filterDatas = filterManager.get(topic);
for (ConsumerFilterData filterData : filterDatas) {
if (filterManager.getBloomFilter().isHit(filterData.getBloomFilterData(), bits)) {
try {
assertThat((Boolean) filterData.getCompiledExpression().evaluate(
new MessageEvaluationContext(properties)
)).isTrue();
} catch (Exception e) {
e.printStackTrace();
assertThat(true).isFalse();
}
} else {
try {
assertThat((Boolean) filterData.getCompiledExpression().evaluate(
new MessageEvaluationContext(properties)
)).isFalse();
} catch (Exception e) {
e.printStackTrace();
assertThat(true).isFalse();
}
}
}
}
}
}
| CommitLogDispatcherCalcBitMapTest |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/engine/bitmap_recycle/LruBitmapPool.java | {
"start": 903,
"end": 10245
} | class ____ implements BitmapPool {
private static final String TAG = "LruBitmapPool";
private static final Bitmap.Config DEFAULT_CONFIG = Bitmap.Config.ARGB_8888;
private final LruPoolStrategy strategy;
private final Set<Bitmap.Config> allowedConfigs;
private final long initialMaxSize;
private final BitmapTracker tracker;
private long maxSize;
private long currentSize;
private int hits;
private int misses;
private int puts;
private int evictions;
// Exposed for testing only.
LruBitmapPool(long maxSize, LruPoolStrategy strategy, Set<Bitmap.Config> allowedConfigs) {
this.initialMaxSize = maxSize;
this.maxSize = maxSize;
this.strategy = strategy;
this.allowedConfigs = allowedConfigs;
this.tracker = new NullBitmapTracker();
}
/**
* Constructor for LruBitmapPool.
*
* @param maxSize The initial maximum size of the pool in bytes.
*/
public LruBitmapPool(long maxSize) {
this(maxSize, getDefaultStrategy(), getDefaultAllowedConfigs());
}
/**
* Constructor for LruBitmapPool.
*
* @param maxSize The initial maximum size of the pool in bytes.
* @param allowedConfigs A white listed put of {@link android.graphics.Bitmap.Config} that are
* allowed to be put into the pool. Configs not in the allowed put will be rejected.
*/
// Public API.
@SuppressWarnings("unused")
public LruBitmapPool(long maxSize, Set<Bitmap.Config> allowedConfigs) {
this(maxSize, getDefaultStrategy(), allowedConfigs);
}
/** Returns the number of cache hits for bitmaps in the pool. */
public long hitCount() {
return hits;
}
/** Returns the number of cache misses for bitmaps in the pool. */
public long missCount() {
return misses;
}
/** Returns the number of bitmaps that have been evicted from the pool. */
public long evictionCount() {
return evictions;
}
/** Returns the current size of the pool in bytes. */
public long getCurrentSize() {
return currentSize;
}
@Override
public long getMaxSize() {
return maxSize;
}
@Override
public synchronized void setSizeMultiplier(float sizeMultiplier) {
maxSize = Math.round(initialMaxSize * sizeMultiplier);
evict();
}
@Override
public synchronized void put(Bitmap bitmap) {
if (bitmap == null) {
throw new NullPointerException("Bitmap must not be null");
}
if (bitmap.isRecycled()) {
throw new IllegalStateException("Cannot pool recycled bitmap");
}
if (!bitmap.isMutable()
|| strategy.getSize(bitmap) > maxSize
|| !allowedConfigs.contains(bitmap.getConfig())) {
if (Log.isLoggable(TAG, Log.VERBOSE)) {
Log.v(
TAG,
"Reject bitmap from pool"
+ ", bitmap: "
+ strategy.logBitmap(bitmap)
+ ", is mutable: "
+ bitmap.isMutable()
+ ", is allowed config: "
+ allowedConfigs.contains(bitmap.getConfig()));
}
bitmap.recycle();
return;
}
final int size = strategy.getSize(bitmap);
strategy.put(bitmap);
tracker.add(bitmap);
puts++;
currentSize += size;
if (Log.isLoggable(TAG, Log.VERBOSE)) {
Log.v(TAG, "Put bitmap in pool=" + strategy.logBitmap(bitmap));
}
dump();
evict();
}
private void evict() {
trimToSize(maxSize);
}
@Override
@NonNull
public Bitmap get(int width, int height, Bitmap.Config config) {
Bitmap result = getDirtyOrNull(width, height, config);
if (result != null) {
// Bitmaps in the pool contain random data that in some cases must be cleared for an image
// to be rendered correctly. we shouldn't force all consumers to independently erase the
// contents individually, so we do so here. See issue #131.
result.eraseColor(Color.TRANSPARENT);
} else {
result = createBitmap(width, height, config);
}
return result;
}
@NonNull
@Override
public Bitmap getDirty(int width, int height, Bitmap.Config config) {
Bitmap result = getDirtyOrNull(width, height, config);
if (result == null) {
result = createBitmap(width, height, config);
}
return result;
}
@NonNull
private static Bitmap createBitmap(int width, int height, @Nullable Bitmap.Config config) {
return Bitmap.createBitmap(width, height, config != null ? config : DEFAULT_CONFIG);
}
@TargetApi(Build.VERSION_CODES.O)
private static void assertNotHardwareConfig(Bitmap.Config config) {
// Avoid short circuiting on sdk int since it breaks on some versions of Android.
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.O) {
return;
}
if (config == Bitmap.Config.HARDWARE) {
throw new IllegalArgumentException(
"Cannot create a mutable Bitmap with config: "
+ config
+ ". Consider setting Downsampler#ALLOW_HARDWARE_CONFIG to false in your"
+ " RequestOptions and/or in GlideBuilder.setDefaultRequestOptions");
}
}
@Nullable
private synchronized Bitmap getDirtyOrNull(
int width, int height, @Nullable Bitmap.Config config) {
assertNotHardwareConfig(config);
// Config will be null for non public config types, which can lead to transformations naively
// passing in null as the requested config here. See issue #194.
final Bitmap result = strategy.get(width, height, config != null ? config : DEFAULT_CONFIG);
if (result == null) {
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(TAG, "Missing bitmap=" + strategy.logBitmap(width, height, config));
}
misses++;
} else {
hits++;
currentSize -= strategy.getSize(result);
tracker.remove(result);
normalize(result);
}
if (Log.isLoggable(TAG, Log.VERBOSE)) {
Log.v(TAG, "Get bitmap=" + strategy.logBitmap(width, height, config));
}
dump();
return result;
}
// Setting these two values provides Bitmaps that are essentially equivalent to those returned
// from Bitmap.createBitmap.
private static void normalize(Bitmap bitmap) {
bitmap.setHasAlpha(true);
maybeSetPreMultiplied(bitmap);
}
@TargetApi(Build.VERSION_CODES.KITKAT)
private static void maybeSetPreMultiplied(Bitmap bitmap) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT) {
bitmap.setPremultiplied(true);
}
}
@Override
public void clearMemory() {
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(TAG, "clearMemory");
}
trimToSize(0);
}
@SuppressWarnings("checkstyle:UnnecessaryParentheses") // Readability
@SuppressLint("InlinedApi")
@Override
public void trimMemory(int level) {
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(TAG, "trimMemory, level=" + level);
}
if (level >= ComponentCallbacks2.TRIM_MEMORY_BACKGROUND
|| (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M
&& level >= ComponentCallbacks2.TRIM_MEMORY_UI_HIDDEN)) {
clearMemory();
} else if (level >= ComponentCallbacks2.TRIM_MEMORY_UI_HIDDEN
|| level == ComponentCallbacks2.TRIM_MEMORY_RUNNING_CRITICAL) {
trimToSize(getMaxSize() / 2);
}
}
private synchronized void trimToSize(long size) {
while (currentSize > size) {
final Bitmap removed = strategy.removeLast();
// TODO: This shouldn't ever happen, see #331.
if (removed == null) {
if (Log.isLoggable(TAG, Log.WARN)) {
Log.w(TAG, "Size mismatch, resetting");
dumpUnchecked();
}
currentSize = 0;
return;
}
tracker.remove(removed);
currentSize -= strategy.getSize(removed);
evictions++;
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(TAG, "Evicting bitmap=" + strategy.logBitmap(removed));
}
dump();
removed.recycle();
}
}
private void dump() {
if (Log.isLoggable(TAG, Log.VERBOSE)) {
dumpUnchecked();
}
}
private void dumpUnchecked() {
Log.v(
TAG,
"Hits="
+ hits
+ ", misses="
+ misses
+ ", puts="
+ puts
+ ", evictions="
+ evictions
+ ", currentSize="
+ currentSize
+ ", maxSize="
+ maxSize
+ "\nStrategy="
+ strategy);
}
private static LruPoolStrategy getDefaultStrategy() {
final LruPoolStrategy strategy;
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT) {
strategy = new SizeConfigStrategy();
} else {
strategy = new AttributeStrategy();
}
return strategy;
}
@TargetApi(Build.VERSION_CODES.O)
private static Set<Bitmap.Config> getDefaultAllowedConfigs() {
Set<Bitmap.Config> configs = new HashSet<>(Arrays.asList(Bitmap.Config.values()));
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT) {
// GIFs, among other types, end up with a native Bitmap config that doesn't map to a java
// config and is treated as null in java code. On KitKat+ these Bitmaps can be reconfigured
// and are suitable for re-use.
configs.add(null);
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
configs.remove(Bitmap.Config.HARDWARE);
}
return Collections.unmodifiableSet(configs);
}
private | LruBitmapPool |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/search/AggregationReply.java | {
"start": 1513,
"end": 5497
} | class ____<K, V> {
long aggregationGroups = 1;
List<SearchReply<K, V>> replies = new ArrayList<>();
/**
* Optional Cursor metadata of the shard that created/owns the cursor. Present only when running in cluster mode, WITHCURSOR
* was used, and the server created a cursor (cursorId > 0).
*/
private Cursor cursor;
/**
* Creates a new empty AggregationReply. The reply is initialized with defaults.
*/
public AggregationReply() {
}
/**
* Returns the number of aggregation groups in this reply.
*
* <p>
* This value represents:
* </p>
* <ul>
* <li>For grouped aggregations: the number of distinct groups returned</li>
* <li>For non-grouped aggregations: typically 1, representing the entire result set</li>
* <li>For empty results: may be 0 or 1 depending on the aggregation type</li>
* </ul>
*
* <p>
* Note: This count may be different from {@code getReplies().size()} in some cases, particularly when dealing with
* cursor-based pagination where not all groups are returned in a single response.
* </p>
*
* @return the number of aggregation groups, typically a positive integer
*/
public long getAggregationGroups() {
return aggregationGroups;
}
/**
* Returns the list of search replies containing the aggregation results.
*
* <p>
* Each {@link SearchReply} in the list represents:
* </p>
* <ul>
* <li>For grouped aggregations: one aggregation group with its computed values</li>
* <li>For non-grouped aggregations: typically a single reply containing all results</li>
* <li>For cursor-based results: the current page of results</li>
* </ul>
*
* <p>
* The structure of each {@link SearchReply} depends on the aggregation operations performed:
* </p>
* <ul>
* <li>GROUP BY operations create separate replies for each group</li>
* <li>REDUCE operations add computed fields to each reply</li>
* <li>LOAD operations include specified fields in the results</li>
* <li>SORTBY operations determine the order of replies</li>
* </ul>
*
* <p>
* The returned list is mutable and reflects the current state of the aggregation results. Modifying this list will affect
* the aggregation reply.
* </p>
*
* @return a mutable list of {@link SearchReply} objects containing the aggregation results. Never {@code null}, but may be
* empty if no results were found.
*/
public List<SearchReply<K, V>> getReplies() {
return replies;
}
/**
* Returns the optional Cursor metadata for pagination and (in cluster) sticky routing, if applicable.
*
* <p>
* When {@code WITHCURSOR} is used and Redis returns a cursor, this method yields a {@link Cursor} containing the cursor id.
* In cluster mode, the cursor may additionally carry the node id on which the cursor resides for sticky FT.CURSOR READ/DEL
* routing.
* </p>
*
* @return an {@link Optional} with {@link Cursor} when a cursor was returned; otherwise an empty Optional.
*/
public Optional<Cursor> getCursor() {
return Optional.ofNullable(cursor);
}
/**
* Set the {@link Cursor} metadata carried by this reply. Intended for post-parse stamping of node affinity (cluster mode)
* or attaching the server-returned cursor id (standalone).
*/
public void setCursor(Cursor cursor) {
if (this.cursor == null) {
this.cursor = cursor;
}
}
void setGroupCount(long value) {
this.aggregationGroups = value;
}
void addSearchReply(SearchReply<K, V> searchReply) {
this.replies.add(searchReply);
}
/**
* Lightweight cursor handle containing the server-assigned cursor id and optional node id (cluster sticky routing).
*/
public static | AggregationReply |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/yearmonth/YearMonthAssert_isNotEqualTo_Test.java | {
"start": 1177,
"end": 2169
} | class ____ extends YearMonthAssertBaseTest {
@Test
void should_pass_if_actual_is_not_equal_to_year_month_as_string_parameter() {
assertThat(REFERENCE).isNotEqualTo(AFTER.toString());
}
@Test
void should_fail_if_actual_is_equal_to_year_month_as_string_parameter() {
// WHEN
ThrowingCallable code = () -> assertThat(REFERENCE).isNotEqualTo(REFERENCE.toString());
// THEN
assertThatAssertionErrorIsThrownBy(code).withMessage(shouldNotBeEqual(REFERENCE, REFERENCE).create());
}
@Test
void should_fail_if_year_month_as_string_parameter_is_null() {
// GIVEN
String otherYearMonthAsString = null;
// WHEN
ThrowingCallable code = () -> assertThat(YearMonth.now()).isNotEqualTo(otherYearMonthAsString);
// THEN
assertThatIllegalArgumentException().isThrownBy(code)
.withMessage("The String representing the YearMonth to compare actual with should not be null");
}
}
| YearMonthAssert_isNotEqualTo_Test |
java | apache__maven | api/maven-api-core/src/main/java/org/apache/maven/api/Language.java | {
"start": 1050,
"end": 1462
} | enum ____ two defined values, {@link #NONE} and {@link #JAVA_FAMILY},
* but can be extended by registering a {@code org.apache.maven.api.spi.LanguageProvider}.
* <p>
* Implementation must have {@code equals()} and {@code hashCode()} implemented, so implementations of this interface
* can be used as keys.
*
* @since 4.0.0
*/
@Experimental
@Immutable
@SuppressWarnings("checkstyle:InterfaceIsType")
public | has |
java | elastic__elasticsearch | x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureArgumentsParser.java | {
"start": 941,
"end": 4001
} | class ____ {
private static final TimeValue DEFAULT_TIMEOUT = new TimeValue(25, TimeUnit.SECONDS);
static void parse(RestRequest restRequest, AbstractFindStructureRequest request) {
if (request instanceof FindStructureAction.Request) {
request.setLinesToSample(
restRequest.paramAsInt(
FindStructureAction.Request.LINES_TO_SAMPLE.getPreferredName(),
TextStructureFinderManager.DEFAULT_IDEAL_SAMPLE_LINE_COUNT
)
);
request.setLineMergeSizeLimit(
restRequest.paramAsInt(
FindStructureAction.Request.LINE_MERGE_SIZE_LIMIT.getPreferredName(),
TextStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT
)
);
request.setCharset(restRequest.param(FindStructureAction.Request.CHARSET.getPreferredName()));
request.setHasHeaderRow(restRequest.paramAsBoolean(FindStructureAction.Request.HAS_HEADER_ROW.getPreferredName(), null));
} else if (request instanceof FindFieldStructureAction.Request) {
request.setLinesToSample(
restRequest.paramAsInt(
FindStructureAction.Request.DOCUMENTS_TO_SAMPLE.getPreferredName(),
TextStructureFinderManager.DEFAULT_IDEAL_SAMPLE_LINE_COUNT
)
);
}
request.setTimeout(
TimeValue.parseTimeValue(
restRequest.param(FindStructureAction.Request.TIMEOUT.getPreferredName()),
DEFAULT_TIMEOUT,
FindStructureAction.Request.TIMEOUT.getPreferredName()
)
);
request.setFormat(restRequest.param(FindStructureAction.Request.FORMAT.getPreferredName()));
request.setColumnNames(restRequest.paramAsStringArray(FindStructureAction.Request.COLUMN_NAMES.getPreferredName(), null));
request.setDelimiter(restRequest.param(FindStructureAction.Request.DELIMITER.getPreferredName()));
request.setQuote(restRequest.param(FindStructureAction.Request.QUOTE.getPreferredName()));
request.setShouldTrimFields(restRequest.paramAsBoolean(FindStructureAction.Request.SHOULD_TRIM_FIELDS.getPreferredName(), null));
request.setGrokPattern(restRequest.param(FindStructureAction.Request.GROK_PATTERN.getPreferredName()));
request.setEcsCompatibility(restRequest.param(FindStructureAction.Request.ECS_COMPATIBILITY.getPreferredName()));
request.setTimestampFormat(restRequest.param(FindStructureAction.Request.TIMESTAMP_FORMAT.getPreferredName()));
request.setTimestampField(restRequest.param(FindStructureAction.Request.TIMESTAMP_FIELD.getPreferredName()));
if (request instanceof FindMessageStructureAction.Request || request instanceof FindFieldStructureAction.Request) {
if (TextStructure.Format.DELIMITED.equals(request.getFormat())) {
request.setHasHeaderRow(false);
}
}
}
}
| RestFindStructureArgumentsParser |
java | quarkusio__quarkus | test-framework/maven/src/main/java/io/quarkus/maven/it/continuoustesting/TestModeContinuousTestingMavenTestUtils.java | {
"start": 653,
"end": 910
} | class ____ extends ContinuousTestingMavenTestUtils {
// Example output we look for
// 1 test failed (1 passing, 0 skipped), 1 test was run in 217ms. Tests completed at 21:22:34 due to changes to HelloResource$Blah. | TestModeContinuousTestingMavenTestUtils |
java | apache__flink | flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/sink/compactor/CompactorOperatorTest.java | {
"start": 3525,
"end": 28078
} | class ____ extends AbstractCompactTestBase {
@Test
void testCompact() throws Exception {
FileCompactor fileCompactor =
new RecordWiseFileCompactor<>(new DecoderBasedReader.Factory<>(IntDecoder::new));
CompactorOperator compactor = createTestOperator(fileCompactor);
try (OneInputStreamOperatorTestHarness<
CompactorRequest, CommittableMessage<FileSinkCommittable>>
harness = new OneInputStreamOperatorTestHarness<>(compactor)) {
harness.setup();
harness.open();
harness.processElement(
request(
"0",
Arrays.asList(committable("0", ".0", 5), committable("0", ".1", 5)),
null));
assertThat(harness.extractOutputValues()).isEmpty();
harness.prepareSnapshotPreBarrier(1);
harness.snapshot(1, 1L);
harness.notifyOfCompletedCheckpoint(1);
compactor.getAllTasksFuture().join();
assertThat(harness.extractOutputValues()).isEmpty();
harness.prepareSnapshotPreBarrier(2);
// 1summary+1compacted+2cleanup
ListAssert<CommittableMessage<FileSinkCommittable>> results =
assertThat(harness.extractOutputValues()).hasSize(4);
results.element(1, as(committableWithLineage()))
.hasCommittable(committable("0", "compacted-0", 10));
results.element(2, as(committableWithLineage())).hasCommittable(cleanupPath("0", ".0"));
results.element(3, as(committableWithLineage())).hasCommittable(cleanupPath("0", ".1"));
}
}
@Test
void testPassthrough() throws Exception {
FileCompactor fileCompactor =
new RecordWiseFileCompactor<>(new DecoderBasedReader.Factory<>(IntDecoder::new));
CompactorOperator compactor = createTestOperator(fileCompactor);
try (OneInputStreamOperatorTestHarness<
CompactorRequest, CommittableMessage<FileSinkCommittable>>
harness = new OneInputStreamOperatorTestHarness<>(compactor)) {
harness.setup();
harness.open();
FileSinkCommittable cleanupInprogressRequest = cleanupInprogress("0", "0", 1);
FileSinkCommittable cleanupPathRequest = cleanupPath("0", "1");
harness.processElement(
request("0", null, Collections.singletonList(cleanupInprogressRequest)));
harness.processElement(
request("0", null, Collections.singletonList(cleanupPathRequest)));
assertThat(harness.extractOutputValues()).isEmpty();
harness.prepareSnapshotPreBarrier(1);
harness.snapshot(1, 1L);
harness.notifyOfCompletedCheckpoint(1);
compactor.getAllTasksFuture().join();
assertThat(harness.extractOutputValues()).isEmpty();
harness.prepareSnapshotPreBarrier(2);
ListAssert<CommittableMessage<FileSinkCommittable>> messages =
assertThat(harness.extractOutputValues()).hasSize(3);
messages.element(1, as(committableWithLineage()))
.hasCommittable(cleanupInprogressRequest);
messages.element(2, as(committableWithLineage())).hasCommittable(cleanupPathRequest);
}
}
@Test
void testRestore() throws Exception {
FileCompactor fileCompactor =
new RecordWiseFileCompactor<>(new DecoderBasedReader.Factory<>(IntDecoder::new));
CompactorOperator compactor = createTestOperator(fileCompactor);
OperatorSubtaskState state;
try (OneInputStreamOperatorTestHarness<
CompactorRequest, CommittableMessage<FileSinkCommittable>>
harness = new OneInputStreamOperatorTestHarness<>(compactor)) {
harness.setup();
harness.open();
harness.processElement(
request(
"0",
Arrays.asList(committable("0", ".0", 5), committable("0", ".1", 5)),
null));
harness.snapshot(1, 1L);
harness.processElement(
request(
"0",
Arrays.asList(committable("0", ".2", 5), committable("0", ".3", 5)),
null));
harness.notifyOfCompletedCheckpoint(1);
// request 1 is submitted and request 2 is pending
state = harness.snapshot(2, 2L);
}
compactor = createTestOperator(fileCompactor);
try (OneInputStreamOperatorTestHarness<
CompactorRequest, CommittableMessage<FileSinkCommittable>>
harness = new OneInputStreamOperatorTestHarness<>(compactor)) {
harness.setup();
harness.initializeState(state);
harness.open();
// request 1 should be submitted
compactor.getAllTasksFuture().join();
harness.prepareSnapshotPreBarrier(3);
// the result of request 1 should be emitted
assertThat(harness.extractOutputValues()).hasSize(4);
harness.snapshot(3, 3L);
harness.notifyOfCompletedCheckpoint(3L);
// request 2 should be submitted
compactor.getAllTasksFuture().join();
harness.prepareSnapshotPreBarrier(4);
// the result of request 2 should be emitted
assertThat(harness.extractOutputValues()).hasSize(8);
// 1summary+1compacted+2cleanup * 2
ListAssert<CommittableMessage<FileSinkCommittable>> results =
assertThat(harness.extractOutputValues()).hasSize(8);
results.element(0, as(committableSummary()));
results.element(1, as(committableWithLineage()))
.hasCommittable(committable("0", "compacted-0", 10));
results.element(2, as(committableWithLineage())).hasCommittable(cleanupPath("0", ".0"));
results.element(3, as(committableWithLineage())).hasCommittable(cleanupPath("0", ".1"));
results.element(4, as(committableSummary()));
results.element(5, as(committableWithLineage()))
.hasCommittable(committable("0", "compacted-2", 10));
results.element(6, as(committableWithLineage())).hasCommittable(cleanupPath("0", ".2"));
results.element(7, as(committableWithLineage())).hasCommittable(cleanupPath("0", ".3"));
}
}
@Test
void testStateHandler() throws Exception {
FileCompactor fileCompactor =
new RecordWiseFileCompactor<>(new DecoderBasedReader.Factory<>(IntDecoder::new));
CompactorOperator compactor = createTestOperator(fileCompactor);
OperatorSubtaskState state;
try (OneInputStreamOperatorTestHarness<
CompactorRequest, CommittableMessage<FileSinkCommittable>>
harness = new OneInputStreamOperatorTestHarness<>(compactor)) {
harness.setup();
harness.open();
harness.processElement(
request(
"0",
Arrays.asList(committable("0", ".0", 1), committable("0", ".1", 2)),
null));
harness.snapshot(1, 1L);
harness.processElement(
request(
"0",
Arrays.asList(committable("0", ".2", 3), committable("0", ".3", 4)),
null));
harness.notifyOfCompletedCheckpoint(1);
// request 1 is submitted and request 2 is pending
state = harness.snapshot(2, 2L);
}
CompactorOperatorStateHandler handler =
new CompactorOperatorStateHandler(
null, getTestCommittableSerializer(), createTestBucketWriter());
try (OneInputStreamOperatorTestHarness<
Either<CommittableMessage<FileSinkCommittable>, CompactorRequest>,
CommittableMessage<FileSinkCommittable>>
harness = new OneInputStreamOperatorTestHarness<>(handler)) {
harness.setup();
harness.initializeState(state);
harness.open();
// remaining requests from coordinator
harness.processElement(
new StreamRecord<>(
Either.Right(
request(
"0",
Collections.singletonList(
committable("0", ".4", 5)),
null)
.getValue())));
harness.processElement(
new StreamRecord<>(
Either.Right(
request(
"0",
Collections.singletonList(
committable("0", ".5", 6)),
null)
.getValue())));
harness.processElement(
new StreamRecord<>(Either.Left(new CommittableSummary<>(0, 1, 3L, 2, 2, 0))));
// remaining in-progress file from file writer
harness.processElement(
new StreamRecord<>(
Either.Left(
new CommittableWithLineage<>(
committable("0", ".6", 7), 3L, 0))));
// new pending file written this time
harness.processElement(
new StreamRecord<>(
Either.Left(
new CommittableWithLineage<>(
committable("0", "7", 8), 3L, 0))));
harness.processElement(
new StreamRecord<>(Either.Left(new CommittableSummary<>(0, 1, 4L, 0, 0, 0))));
harness.processElement(
new StreamRecord<>(Either.Left(new CommittableSummary<>(0, 1, 5L, 3, 3, 0))));
// 1 summary + (1 compacted committable + 1 compacted cleanup) * 6 + 1 hidden + 1 normal
// + 1 summary + 1 cleanup + 1 summary
ListAssert<CommittableMessage<FileSinkCommittable>> results =
assertThat(harness.extractOutputValues()).hasSize(18);
results.element(0, as(committableSummary()));
List<FileSinkCommittable> expectedResult =
Arrays.asList(
committable("0", "compacted-0", 1),
cleanupPath("0", ".0"),
committable("0", "compacted-1", 2),
cleanupPath("0", ".1"),
committable("0", "compacted-2", 3),
cleanupPath("0", ".2"),
committable("0", "compacted-3", 4),
cleanupPath("0", ".3"),
committable("0", "compacted-4", 5),
cleanupPath("0", ".4"),
committable("0", "compacted-5", 6),
cleanupPath("0", ".5"),
committable("0", "compacted-6", 7),
committable("0", "7", 8));
for (int i = 0; i < expectedResult.size(); ++i) {
results.element(i + 1, as(committableWithLineage()))
.hasCommittable(expectedResult.get(i));
}
results.element(15, as(committableSummary()));
results.element(16, as(committableWithLineage()))
.hasCommittable(cleanupPath("0", ".6"));
results.element(17, as(committableSummary()));
}
}
@Test
void testStateHandlerRestore() throws Exception {
OperatorSubtaskState state;
try (OneInputStreamOperatorTestHarness<
Either<CommittableMessage<FileSinkCommittable>, CompactorRequest>,
CommittableMessage<FileSinkCommittable>>
harness =
new OneInputStreamOperatorTestHarness<>(
new CompactorOperatorStateHandler(
null,
getTestCommittableSerializer(),
createTestBucketWriter()))) {
harness.setup();
harness.open();
// remaining request from coordinator
harness.processElement(
new StreamRecord<>(
Either.Right(
request(
"0",
Collections.singletonList(
committable("0", ".1", 1)),
null)
.getValue())));
// process only summary during cp1, unaligned barrier may be processed ahead of the
// elements
harness.processElement(
new StreamRecord<>(Either.Left(new CommittableSummary<>(0, 1, 1L, 2, 2, 0))));
state = harness.snapshot(1, 1L);
ListAssert<CommittableMessage<FileSinkCommittable>> results =
assertThat(harness.extractOutputValues()).hasSize(3);
results.element(0, as(committableSummary()));
results.element(1, as(committableWithLineage()))
.hasCommittable(committable("0", "compacted-1", 1));
results.element(2, as(committableWithLineage())).hasCommittable(cleanupPath("0", ".1"));
}
try (OneInputStreamOperatorTestHarness<
Either<CommittableMessage<FileSinkCommittable>, CompactorRequest>,
CommittableMessage<FileSinkCommittable>>
harness =
new OneInputStreamOperatorTestHarness<>(
new CompactorOperatorStateHandler(
null,
getTestCommittableSerializer(),
createTestBucketWriter()))) {
harness.setup();
harness.initializeState(state);
harness.open();
harness.processElement(
new StreamRecord<>(
Either.Left(
new CommittableWithLineage<>(
committable("0", ".2", 2), 1L, 0))));
harness.processElement(
new StreamRecord<>(
Either.Left(
new CommittableWithLineage<>(
committable("0", "3", 3), 1L, 0))));
state = harness.snapshot(2, 2L);
ListAssert<CommittableMessage<FileSinkCommittable>> results =
assertThat(harness.extractOutputValues()).hasSize(2);
results.element(0, as(committableWithLineage()))
.hasCommittable(committable("0", "2", 2));
results.element(1, as(committableWithLineage()))
.hasCommittable(committable("0", "3", 3));
}
try (OneInputStreamOperatorTestHarness<
Either<CommittableMessage<FileSinkCommittable>, CompactorRequest>,
CommittableMessage<FileSinkCommittable>>
harness =
new OneInputStreamOperatorTestHarness<>(
new CompactorOperatorStateHandler(
null,
getTestCommittableSerializer(),
createTestBucketWriter()))) {
harness.setup();
harness.initializeState(state);
harness.open();
harness.processElement(
new StreamRecord<>(Either.Left(new CommittableSummary<>(0, 1, 2L, 0, 0, 0))));
ListAssert<CommittableMessage<FileSinkCommittable>> results =
assertThat(harness.extractOutputValues()).hasSize(2);
results.element(0, as(committableSummary()));
results.element(1, as(committableWithLineage())).hasCommittable(cleanupPath("0", ".2"));
}
}
private StreamRecord<CompactorRequest> request(
String bucketId,
List<FileSinkCommittable> toCompact,
List<FileSinkCommittable> toPassthrough) {
return new StreamRecord<>(
new CompactorRequest(
bucketId,
toCompact == null ? new ArrayList<>() : toCompact,
toPassthrough == null ? new ArrayList<>() : toPassthrough),
0L);
}
private FileSinkCommittable committable(String bucketId, String name, int size)
throws IOException {
// put bucketId after name to keep the possible '.' prefix in name
return new FileSinkCommittable(
bucketId,
new TestPendingFileRecoverable(
newFile(name + "_" + bucketId, size <= 0 ? 1 : size), size));
}
private FileSinkCommittable cleanupInprogress(String bucketId, String name, int size)
throws IOException {
Path toCleanup = newFile(name + "_" + bucketId, size);
return new FileSinkCommittable(
bucketId, new TestInProgressFileRecoverable(toCleanup, size));
}
private FileSinkCommittable cleanupPath(String bucketId, String name) throws IOException {
Path toCleanup = newFile(name + "_" + bucketId, 1);
return new FileSinkCommittable(bucketId, toCleanup);
}
private SimpleVersionedSerializer<FileSinkCommittable> getTestCommittableSerializer() {
return new FileSinkCommittableSerializer(
new FileSinkTestUtils.SimpleVersionedWrapperSerializer<>(
TestPendingFileRecoverable::new),
new FileSinkTestUtils.SimpleVersionedWrapperSerializer<>(
TestInProgressFileRecoverable::new));
}
private CompactorOperator createTestOperator(FileCompactor compactor) {
return new CompactorOperator(
null,
FileCompactStrategy.Builder.newBuilder()
.setNumCompactThreads(2)
.enableCompactionOnCheckpoint(1)
.build(),
getTestCommittableSerializer(),
compactor,
createTestBucketWriter());
}
private BucketWriter<?, String> createTestBucketWriter() {
return new BucketWriter<Integer, String>() {
@Override
public InProgressFileWriter<Integer, String> openNewInProgressFile(
String bucketId, Path path, long creationTime) throws IOException {
return new InProgressFileWriter<Integer, String>() {
BufferedWriter writer;
long size = 0L;
@Override
public void write(Integer element, long currentTime) throws IOException {
if (writer == null) {
writer = new BufferedWriter(new FileWriter(path.toString()));
}
writer.write(element);
size += 1;
}
@Override
public InProgressFileRecoverable persist() throws IOException {
return new TestInProgressFileRecoverable(path, size);
}
@Override
public PendingFileRecoverable closeForCommit() throws IOException {
return new TestPendingFileRecoverable(path, size);
}
@Override
public void dispose() {}
@Override
public String getBucketId() {
return bucketId;
}
@Override
public long getCreationTime() {
return 0;
}
@Override
public long getSize() throws IOException {
return size;
}
@Override
public long getLastUpdateTime() {
return 0;
}
};
}
@Override
public InProgressFileWriter<Integer, String> resumeInProgressFileFrom(
String s, InProgressFileRecoverable inProgressFileSnapshot, long creationTime)
throws IOException {
return null;
}
@Override
public WriterProperties getProperties() {
return null;
}
@Override
public PendingFile recoverPendingFile(PendingFileRecoverable pendingFileRecoverable)
throws IOException {
return new PendingFile() {
@Override
public void commit() throws IOException {
TestPendingFileRecoverable testRecoverable =
(TestPendingFileRecoverable) pendingFileRecoverable;
if (testRecoverable.getPath() != null) {
if (!testRecoverable
.getPath()
.equals(testRecoverable.getUncommittedPath())) {
testRecoverable
.getPath()
.getFileSystem()
.rename(
testRecoverable.getUncommittedPath(),
testRecoverable.getPath());
}
}
}
@Override
public void commitAfterRecovery() throws IOException {
commit();
}
};
}
@Override
public boolean cleanupInProgressFileRecoverable(
InProgressFileRecoverable inProgressFileRecoverable) throws IOException {
return false;
}
@Override
public CompactingFileWriter openNewCompactingFile(
CompactingFileWriter.Type type, String bucketId, Path path, long creationTime)
throws IOException {
if (type == CompactingFileWriter.Type.RECORD_WISE) {
return openNewInProgressFile(bucketId, path, creationTime);
} else {
FileOutputStream fileOutputStream = new FileOutputStream(path.toString());
return new OutputStreamBasedCompactingFileWriter() {
@Override
public OutputStream asOutputStream() throws IOException {
return fileOutputStream;
}
@Override
public PendingFileRecoverable closeForCommit() throws IOException {
fileOutputStream.flush();
return new TestPendingFileRecoverable(
path, fileOutputStream.getChannel().position());
}
};
}
}
};
}
}
| CompactorOperatorTest |
java | spring-projects__spring-framework | spring-jms/src/main/java/org/springframework/jms/core/JmsMessagingTemplate.java | {
"start": 2338,
"end": 18322
} | class ____ extends AbstractMessagingTemplate<Destination>
implements JmsMessageOperations, InitializingBean {
private @Nullable JmsOperations jmsTemplate;
private MessageConverter jmsMessageConverter;
private boolean converterSet;
private @Nullable String defaultDestinationName;
/**
* Constructor for use with bean properties.
* Requires {@link #setConnectionFactory} or {@link #setJmsTemplate} to be called.
*/
public JmsMessagingTemplate() {
this.jmsMessageConverter = new MessagingMessageConverter();
}
/**
* Create a {@code JmsMessagingTemplate} instance with the JMS {@link ConnectionFactory}
* to use, implicitly building a {@link JmsTemplate} based on it.
* @since 4.1.2
*/
public JmsMessagingTemplate(ConnectionFactory connectionFactory) {
this(new JmsTemplate(connectionFactory));
}
/**
* Create a {@code JmsMessagingTemplate} instance with the {@link JmsTemplate} to use.
*/
public JmsMessagingTemplate(JmsTemplate jmsTemplate) {
Assert.notNull(jmsTemplate, "JmsTemplate must not be null");
this.jmsTemplate = jmsTemplate;
this.jmsMessageConverter = new MessagingMessageConverter(jmsTemplate.getMessageConverter());
}
/**
* Create a {@code JmsMessagingTemplate} instance with the {@link JmsOperations} to use.
* @since 7.0
*/
public JmsMessagingTemplate(JmsOperations jmsTemplate) {
Assert.notNull(jmsTemplate, "JmsTemplate must not be null");
this.jmsTemplate = jmsTemplate;
this.jmsMessageConverter = (jmsTemplate instanceof JmsTemplate template ?
new MessagingMessageConverter(template.getMessageConverter()) : new MessagingMessageConverter());
}
/**
* Set the ConnectionFactory to use for the underlying {@link JmsTemplate}.
* @since 4.1.2
*/
public void setConnectionFactory(ConnectionFactory connectionFactory) {
if (this.jmsTemplate instanceof JmsAccessor accessor) {
JmsTemplate template = new JmsTemplate(accessor);
template.setConnectionFactory(connectionFactory);
this.jmsTemplate = template;
}
else {
this.jmsTemplate = new JmsTemplate(connectionFactory);
}
}
/**
* Return the ConnectionFactory that the underlying {@link JmsTemplate} uses.
* @since 4.1.2
*/
public @Nullable ConnectionFactory getConnectionFactory() {
return (this.jmsTemplate instanceof JmsTemplate template ? template.getConnectionFactory() : null);
}
/**
* Set the {@link JmsTemplate} to use.
*/
public void setJmsTemplate(@Nullable JmsTemplate jmsTemplate) {
this.jmsTemplate = jmsTemplate;
}
/**
* Return the configured {@link JmsTemplate}.
*/
public @Nullable JmsTemplate getJmsTemplate() {
return (this.jmsTemplate instanceof JmsTemplate template ? template : null);
}
/**
* Set the {@link MessageConverter} to use to convert a {@link Message}
* to and from a {@link jakarta.jms.Message}.
* <p>By default, a {@link MessagingMessageConverter} is defined using a
* {@link SimpleMessageConverter} to convert the payload of the message.
* <p>Consider configuring a {@link MessagingMessageConverter} with a different
* {@link MessagingMessageConverter#setPayloadConverter(MessageConverter) payload converter}
* for more advanced scenarios.
* @see org.springframework.jms.support.converter.MessagingMessageConverter
*/
public void setJmsMessageConverter(MessageConverter jmsMessageConverter) {
Assert.notNull(jmsMessageConverter, "MessageConverter must not be null");
this.jmsMessageConverter = jmsMessageConverter;
this.converterSet = true;
}
/**
* Return the {@link MessageConverter} to use to convert a {@link Message}
* to and from a {@link jakarta.jms.Message}.
*/
public MessageConverter getJmsMessageConverter() {
return this.jmsMessageConverter;
}
/**
* Configure the default destination name to use in send methods that don't have
* a destination argument. If a default destination is not configured, send methods
* without a destination argument will raise an exception if invoked.
* @see #setDefaultDestination(Object)
*/
public void setDefaultDestinationName(@Nullable String defaultDestinationName) {
this.defaultDestinationName = defaultDestinationName;
}
/**
* Return the configured default destination name.
*/
public @Nullable String getDefaultDestinationName() {
return this.defaultDestinationName;
}
@Override
public void afterPropertiesSet() {
Assert.notNull(this.jmsTemplate, "Property 'connectionFactory' or 'jmsTemplate' is required");
if (!this.converterSet && this.jmsTemplate instanceof JmsTemplate template) {
((MessagingMessageConverter) this.jmsMessageConverter).setPayloadConverter(template.getMessageConverter());
}
}
JmsOperations obtainJmsTemplate() {
Assert.state(this.jmsTemplate != null, "No JmsTemplate set");
return this.jmsTemplate;
}
@Override
public void send(Message<?> message) throws MessagingException {
Destination defaultDestination = getDefaultDestination();
if (defaultDestination != null) {
send(defaultDestination, message);
}
else {
send(getRequiredDefaultDestinationName(), message);
}
}
@Override
public void convertAndSend(Object payload, @Nullable Map<String, Object> headers,
@Nullable MessagePostProcessor postProcessor) throws MessagingException {
Destination defaultDestination = getDefaultDestination();
if (defaultDestination != null) {
convertAndSend(defaultDestination, payload, headers, postProcessor);
}
else {
convertAndSend(getRequiredDefaultDestinationName(), payload, headers, postProcessor);
}
}
@Override
public void send(String destinationName, Message<?> message) throws MessagingException {
doSend(destinationName, message);
}
@Override
public void convertAndSend(String destinationName, Object payload) throws MessagingException {
convertAndSend(destinationName, payload, (Map<String, Object>) null);
}
@Override
public void convertAndSend(String destinationName, Object payload, @Nullable Map<String, Object> headers)
throws MessagingException {
convertAndSend(destinationName, payload, headers, null);
}
@Override
public void convertAndSend(String destinationName, Object payload, @Nullable MessagePostProcessor postProcessor)
throws MessagingException {
convertAndSend(destinationName, payload, null, postProcessor);
}
@Override
public void convertAndSend(String destinationName, Object payload, @Nullable Map<String, Object> headers,
@Nullable MessagePostProcessor postProcessor) throws MessagingException {
Message<?> message = doConvert(payload, headers, postProcessor);
send(destinationName, message);
}
@Override
public @Nullable Message<?> receive() throws MessagingException {
Destination defaultDestination = getDefaultDestination();
if (defaultDestination != null) {
return receive(defaultDestination);
}
else {
return receive(getRequiredDefaultDestinationName());
}
}
@Override
public <T> @Nullable T receiveAndConvert(Class<T> targetClass) throws MessagingException {
Destination defaultDestination = getDefaultDestination();
if (defaultDestination != null) {
return receiveAndConvert(defaultDestination, targetClass);
}
else {
return receiveAndConvert(getRequiredDefaultDestinationName(), targetClass);
}
}
@Override
public @Nullable Message<?> receive(String destinationName) throws MessagingException {
return doReceive(destinationName);
}
@Override
public <T> @Nullable T receiveAndConvert(String destinationName, Class<T> targetClass) throws MessagingException {
Message<?> message = doReceive(destinationName);
if (message != null) {
return doConvert(message, targetClass);
}
else {
return null;
}
}
@Override
public @Nullable Message<?> receiveSelected(@Nullable String messageSelector) throws MessagingException {
Destination defaultDestination = getDefaultDestination();
if (defaultDestination != null) {
return receiveSelected(defaultDestination, messageSelector);
}
else {
return receiveSelected(getRequiredDefaultDestinationName(), messageSelector);
}
}
@Override
public @Nullable Message<?> receiveSelected(Destination destination, @Nullable String messageSelector)
throws MessagingException {
return doReceiveSelected(destination, messageSelector);
}
@Override
public @Nullable Message<?> receiveSelected(String destinationName, @Nullable String messageSelector)
throws MessagingException {
return doReceiveSelected(destinationName, messageSelector);
}
@Override
public <T> @Nullable T receiveSelectedAndConvert(@Nullable String messageSelector, Class<T> targetClass)
throws MessagingException {
Destination defaultDestination = getDefaultDestination();
if (defaultDestination != null) {
return receiveSelectedAndConvert(defaultDestination, messageSelector, targetClass);
}
else {
return receiveSelectedAndConvert(getRequiredDefaultDestinationName(), messageSelector, targetClass);
}
}
@Override
public <T> @Nullable T receiveSelectedAndConvert(Destination destination, @Nullable String messageSelector,
Class<T> targetClass) throws MessagingException {
Message<?> message = doReceiveSelected(destination, messageSelector);
if (message != null) {
return doConvert(message, targetClass);
}
else {
return null;
}
}
@Override
public <T> @Nullable T receiveSelectedAndConvert(String destinationName, @Nullable String messageSelector,
Class<T> targetClass) throws MessagingException {
Message<?> message = doReceiveSelected(destinationName, messageSelector);
if (message != null) {
return doConvert(message, targetClass);
}
else {
return null;
}
}
@Override
public @Nullable Message<?> sendAndReceive(Message<?> requestMessage) throws MessagingException {
Destination defaultDestination = getDefaultDestination();
if (defaultDestination != null) {
return sendAndReceive(defaultDestination, requestMessage);
}
else {
return sendAndReceive(getRequiredDefaultDestinationName(), requestMessage);
}
}
@Override
public @Nullable Message<?> sendAndReceive(String destinationName, Message<?> requestMessage)
throws MessagingException {
return doSendAndReceive(destinationName, requestMessage);
}
@Override
public <T> @Nullable T convertSendAndReceive(String destinationName, Object request, Class<T> targetClass)
throws MessagingException {
return convertSendAndReceive(destinationName, request, null, targetClass);
}
@Override
public <T> @Nullable T convertSendAndReceive(Object request, Class<T> targetClass) throws MessagingException {
return convertSendAndReceive(request, targetClass, null);
}
@Override
public <T> @Nullable T convertSendAndReceive(String destinationName, Object request,
@Nullable Map<String, Object> headers, Class<T> targetClass) throws MessagingException {
return convertSendAndReceive(destinationName, request, headers, targetClass, null);
}
@Override
public <T> @Nullable T convertSendAndReceive(Object request, Class<T> targetClass,
@Nullable MessagePostProcessor postProcessor) throws MessagingException {
Destination defaultDestination = getDefaultDestination();
if (defaultDestination != null) {
return convertSendAndReceive(defaultDestination, request, targetClass, postProcessor);
}
else {
return convertSendAndReceive(getRequiredDefaultDestinationName(), request, targetClass, postProcessor);
}
}
@Override
public <T> @Nullable T convertSendAndReceive(String destinationName, Object request, Class<T> targetClass,
@Nullable MessagePostProcessor requestPostProcessor) throws MessagingException {
return convertSendAndReceive(destinationName, request, null, targetClass, requestPostProcessor);
}
@SuppressWarnings("unchecked")
@Override
public <T> @Nullable T convertSendAndReceive(String destinationName, Object request,
@Nullable Map<String, Object> headers, Class<T> targetClass,
@Nullable MessagePostProcessor postProcessor) throws MessagingException {
Message<?> requestMessage = doConvert(request, headers, postProcessor);
Message<?> replyMessage = sendAndReceive(destinationName, requestMessage);
return (replyMessage != null ? (T) getMessageConverter().fromMessage(replyMessage, targetClass) : null);
}
@Override
protected void doSend(Destination destination, Message<?> message) {
try {
obtainJmsTemplate().send(destination, createMessageCreator(message));
}
catch (JmsException ex) {
throw convertJmsException(ex);
}
}
protected void doSend(String destinationName, Message<?> message) {
try {
obtainJmsTemplate().send(destinationName, createMessageCreator(message));
}
catch (JmsException ex) {
throw convertJmsException(ex);
}
}
@Override
protected @Nullable Message<?> doReceive(Destination destination) {
try {
jakarta.jms.Message jmsMessage = obtainJmsTemplate().receive(destination);
return convertJmsMessage(jmsMessage);
}
catch (JmsException ex) {
throw convertJmsException(ex);
}
}
protected @Nullable Message<?> doReceive(String destinationName) {
try {
jakarta.jms.Message jmsMessage = obtainJmsTemplate().receive(destinationName);
return convertJmsMessage(jmsMessage);
}
catch (JmsException ex) {
throw convertJmsException(ex);
}
}
protected @Nullable Message<?> doReceiveSelected(Destination destination, @Nullable String messageSelector) {
try {
jakarta.jms.Message jmsMessage = obtainJmsTemplate().receiveSelected(destination, messageSelector);
return convertJmsMessage(jmsMessage);
}
catch (JmsException ex) {
throw convertJmsException(ex);
}
}
protected @Nullable Message<?> doReceiveSelected(String destinationName, @Nullable String messageSelector) {
try {
jakarta.jms.Message jmsMessage = obtainJmsTemplate().receiveSelected(destinationName, messageSelector);
return convertJmsMessage(jmsMessage);
}
catch (JmsException ex) {
throw convertJmsException(ex);
}
}
@Override
protected @Nullable Message<?> doSendAndReceive(Destination destination, Message<?> requestMessage) {
try {
jakarta.jms.Message jmsMessage = obtainJmsTemplate().sendAndReceive(
destination, createMessageCreator(requestMessage));
return convertJmsMessage(jmsMessage);
}
catch (JmsException ex) {
throw convertJmsException(ex);
}
}
protected @Nullable Message<?> doSendAndReceive(String destinationName, Message<?> requestMessage) {
try {
jakarta.jms.Message jmsMessage = obtainJmsTemplate().sendAndReceive(
destinationName, createMessageCreator(requestMessage));
return convertJmsMessage(jmsMessage);
}
catch (JmsException ex) {
throw convertJmsException(ex);
}
}
private MessagingMessageCreator createMessageCreator(Message<?> message) {
return new MessagingMessageCreator(message, getJmsMessageConverter());
}
protected String getRequiredDefaultDestinationName() {
String name = getDefaultDestinationName();
if (name == null) {
throw new IllegalStateException("No 'defaultDestination' or 'defaultDestinationName' specified. " +
"Check configuration of JmsMessagingTemplate.");
}
return name;
}
protected @Nullable Message<?> convertJmsMessage(jakarta.jms.@Nullable Message message) {
if (message == null) {
return null;
}
try {
return (Message<?>) getJmsMessageConverter().fromMessage(message);
}
catch (Exception ex) {
throw new MessageConversionException("Could not convert '" + message + "'", ex);
}
}
protected MessagingException convertJmsException(JmsException ex) {
if (ex instanceof org.springframework.jms.support.destination.DestinationResolutionException ||
ex instanceof InvalidDestinationException) {
return new DestinationResolutionException(ex.getMessage(), ex);
}
if (ex instanceof org.springframework.jms.support.converter.MessageConversionException) {
return new MessageConversionException(ex.getMessage(), ex);
}
// Fallback
return new MessagingException(ex.getMessage(), ex);
}
private static | JmsMessagingTemplate |
java | apache__rocketmq | store/src/main/java/org/apache/rocketmq/store/queue/RocksDBConsumeQueue.java | {
"start": 1518,
"end": 12751
} | class ____ implements ConsumeQueueInterface {
private static final Logger log = LoggerFactory.getLogger(LoggerName.STORE_LOGGER_NAME);
private static final Logger ERROR_LOG = LoggerFactory.getLogger(LoggerName.STORE_ERROR_LOGGER_NAME);
private final MessageStoreConfig messageStoreConfig;
private final RocksDBConsumeQueueStore consumeQueueStore;
private final String topic;
private final int queueId;
public RocksDBConsumeQueue(final MessageStoreConfig messageStoreConfig,
final RocksDBConsumeQueueStore consumeQueueStore,
final String topic, final int queueId) {
this.messageStoreConfig = messageStoreConfig;
this.consumeQueueStore = consumeQueueStore;
this.topic = topic;
this.queueId = queueId;
}
/**
* Only used to pass parameters when calling the destroy method
*
* @see RocksDBConsumeQueueStore#destroy(ConsumeQueueInterface)
*/
public RocksDBConsumeQueue(final String topic, final int queueId) {
this(null, null, topic, queueId);
}
@Override
public boolean load() {
return true;
}
@Override
public void recover() {
// ignore
}
@Override
public void checkSelf() {
// ignore
}
@Override
public boolean flush(final int flushLeastPages) {
return true;
}
@Override
public void destroy() {
// ignore
}
@Override
public void truncateDirtyLogicFiles(long maxCommitLogPos) {
// ignored
}
@Override
public int deleteExpiredFile(long minCommitLogPos) {
return 0;
}
@Override
public long rollNextFile(long nextBeginOffset) {
return 0;
}
@Override
public boolean isFirstFileAvailable() {
return true;
}
@Override
public boolean isFirstFileExist() {
return true;
}
@Override
public void swapMap(int reserveNum, long forceSwapIntervalMs, long normalSwapIntervalMs) {
// ignore
}
@Override
public void cleanSwappedMap(long forceCleanSwapIntervalMs) {
// ignore
}
@Override
public long getMaxOffsetInQueue() {
try {
return this.consumeQueueStore.getMaxOffsetInQueue(topic, queueId);
} catch (RocksDBException e) {
ERROR_LOG.error("getMaxOffsetInQueue Failed. topic: {}, queueId: {}", topic, queueId, e);
return 0;
}
}
@Override
public long getMessageTotalInQueue() {
try {
long maxOffsetInQueue = this.consumeQueueStore.getMaxOffsetInQueue(topic, queueId);
long minOffsetInQueue = this.consumeQueueStore.getMinOffsetInQueue(topic, queueId);
return maxOffsetInQueue - minOffsetInQueue;
} catch (RocksDBException e) {
ERROR_LOG.error("getMessageTotalInQueue Failed. topic: {}, queueId: {}, {}", topic, queueId, e);
}
return -1;
}
/**
* We already implement it in RocksDBConsumeQueueStore.
* @see RocksDBConsumeQueueStore#getOffsetInQueueByTime
* @param timestamp timestamp
* @return
*/
@Override
public long getOffsetInQueueByTime(long timestamp) {
return 0;
}
/**
* We already implement it in RocksDBConsumeQueueStore.
* @see RocksDBConsumeQueueStore#getOffsetInQueueByTime
* @param timestamp timestamp
* @param boundaryType Lower or Upper
* @return
*/
@Override
public long getOffsetInQueueByTime(long timestamp, BoundaryType boundaryType) {
return 0;
}
@Override
public long getMaxPhysicOffset() {
Long maxPhyOffset = this.consumeQueueStore.getMaxPhyOffsetInConsumeQueue(topic, queueId);
return maxPhyOffset == null ? -1 : maxPhyOffset;
}
@Override
public long getMinLogicOffset() {
return 0;
}
@Override
public CQType getCQType() {
return CQType.RocksDBCQ;
}
@Override
public long getTotalSize() {
// ignored
return 0;
}
@Override
public int getUnitSize() {
// attention: unitSize should equal to 'ConsumeQueue.CQ_STORE_UNIT_SIZE'
return ConsumeQueue.CQ_STORE_UNIT_SIZE;
}
/**
* Ignored, we already implement this method
* @see org.apache.rocketmq.store.queue.RocksDBConsumeQueueOffsetTable#getMinCqOffset(String, int)
*/
@Override
public void correctMinOffset(long minCommitLogOffset) {
}
/**
* Ignored, in rocksdb mode, we build cq in RocksDBConsumeQueueStore
*/
@Override
public void putMessagePositionInfoWrapper(DispatchRequest request) {
}
@Override
public void assignQueueOffset(QueueOffsetOperator queueOffsetOperator, MessageExtBrokerInner msg) throws RocksDBException {
String topicQueueKey = getTopic() + "-" + getQueueId();
Long queueOffset = queueOffsetOperator.getTopicQueueNextOffset(topicQueueKey);
if (queueOffset == null) {
// we will recover topic queue table from rocksdb when we use it.
queueOffset = this.consumeQueueStore.getMaxOffsetInQueue(topic, queueId);
queueOffsetOperator.updateQueueOffset(topicQueueKey, queueOffset);
}
msg.setQueueOffset(queueOffset);
}
@Override
public void increaseQueueOffset(QueueOffsetOperator queueOffsetOperator, MessageExtBrokerInner msg, short messageNum) {
String topicQueueKey = getTopic() + "-" + getQueueId();
queueOffsetOperator.increaseQueueOffset(topicQueueKey, messageNum);
}
@Override
public long estimateMessageCount(long from, long to, MessageFilter filter) {
// Check from and to offset validity
Pair<CqUnit, Long> fromUnit = getCqUnitAndStoreTime(from);
if (fromUnit == null) {
return -1;
}
if (from >= to) {
return -1;
}
if (to > getMaxOffsetInQueue()) {
to = getMaxOffsetInQueue();
}
int maxSampleSize = messageStoreConfig.getMaxConsumeQueueScan();
int sampleSize = to - from > maxSampleSize ? maxSampleSize : (int) (to - from);
int matchThreshold = messageStoreConfig.getSampleCountThreshold();
int matchSize = 0;
for (int i = 0; i < sampleSize; i++) {
long index = from + i;
Pair<CqUnit, Long> pair = getCqUnitAndStoreTime(index);
if (pair == null) {
continue;
}
CqUnit cqUnit = pair.getObject1();
if (filter.isMatchedByConsumeQueue(cqUnit.getTagsCode(), cqUnit.getCqExtUnit())) {
matchSize++;
// if matchSize is plenty, early exit estimate
if (matchSize > matchThreshold) {
sampleSize = i;
break;
}
}
}
// Make sure the second half is a floating point number, otherwise it will be truncated to 0
return sampleSize == 0 ? 0 : (long) ((to - from) * (matchSize / (sampleSize * 1.0)));
}
@Override
public long getMinOffsetInQueue() {
try {
return this.consumeQueueStore.getMinOffsetInQueue(topic, queueId);
} catch (RocksDBException e) {
ERROR_LOG.error("getMinOffsetInQueue Failed. topic: {}, queueId: {}", topic, queueId, e);
return -1;
}
}
private int pullNum(long cqOffset, long maxCqOffset) {
long diffLong = maxCqOffset - cqOffset;
if (diffLong < Integer.MAX_VALUE) {
return (int) diffLong;
}
return Integer.MAX_VALUE;
}
@Override
public ReferredIterator<CqUnit> iterateFrom(final long startIndex) {
long maxCqOffset = getMaxOffsetInQueue();
if (startIndex < maxCqOffset && startIndex >= 0) {
int num = pullNum(startIndex, maxCqOffset);
return new LargeRocksDBConsumeQueueIterator(startIndex, num);
}
return null;
}
@Override
public ReferredIterator<CqUnit> iterateFrom(long startIndex, int count) throws RocksDBException {
long maxCqOffset = getMaxOffsetInQueue();
if (startIndex < maxCqOffset) {
int num = Math.min((int)(maxCqOffset - startIndex), count);
return iterateFrom0(startIndex, num);
}
return null;
}
@Override
public CqUnit get(long index) {
Pair<CqUnit, Long> pair = getCqUnitAndStoreTime(index);
return pair == null ? null : pair.getObject1();
}
@Override
public Pair<CqUnit, Long> getCqUnitAndStoreTime(long index) {
ByteBuffer byteBuffer;
try {
byteBuffer = this.consumeQueueStore.get(topic, queueId, index);
} catch (RocksDBException e) {
ERROR_LOG.error("getUnitAndStoreTime Failed. topic: {}, queueId: {}", topic, queueId, e);
return null;
}
if (byteBuffer == null || byteBuffer.remaining() < RocksDBConsumeQueueTable.CQ_UNIT_SIZE) {
return null;
}
long phyOffset = byteBuffer.getLong();
int size = byteBuffer.getInt();
long tagCode = byteBuffer.getLong();
long messageStoreTime = byteBuffer.getLong();
return new Pair<>(new CqUnit(index, phyOffset, size, tagCode), messageStoreTime);
}
@Override
public Pair<CqUnit, Long> getEarliestUnitAndStoreTime() {
try {
long minOffset = this.consumeQueueStore.getMinOffsetInQueue(topic, queueId);
return getCqUnitAndStoreTime(minOffset);
} catch (RocksDBException e) {
ERROR_LOG.error("getEarliestUnitAndStoreTime Failed. topic: {}, queueId: {}", topic, queueId, e);
}
return null;
}
@Override
public CqUnit getEarliestUnit() {
Pair<CqUnit, Long> pair = getEarliestUnitAndStoreTime();
return pair == null ? null : pair.getObject1();
}
@Override
public CqUnit getLatestUnit() {
try {
long maxOffset = this.consumeQueueStore.getMaxOffsetInQueue(topic, queueId);
return get(maxOffset > 0 ? maxOffset - 1 : maxOffset);
} catch (RocksDBException e) {
ERROR_LOG.error("getLatestUnit Failed. topic: {}, queueId: {}, {}", topic, queueId, e.getMessage());
}
return null;
}
@Override
public long getLastOffset() {
return getMaxPhysicOffset();
}
private ReferredIterator<CqUnit> iterateFrom0(final long startIndex, final int count) throws RocksDBException {
List<ByteBuffer> byteBufferList = this.consumeQueueStore.rangeQuery(topic, queueId, startIndex, count);
if (byteBufferList == null || byteBufferList.isEmpty()) {
if (this.messageStoreConfig.isEnableRocksDBLog()) {
log.warn("iterateFrom0 - find nothing, startIndex:{}, count:{}", startIndex, count);
}
return null;
}
return new RocksDBConsumeQueueIterator(byteBufferList, startIndex);
}
@Override
public String getTopic() {
return topic;
}
@Override
public int getQueueId() {
return queueId;
}
private | RocksDBConsumeQueue |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/utils/UniqueTopicSerdeScope.java | {
"start": 2137,
"end": 3052
} | class ____<T> implements Serde<T> {
private final AtomicBoolean isKey = new AtomicBoolean(false);
private final Serde<T> delegate;
public UniqueTopicSerdeDecorator(final Serde<T> delegate) {
this.delegate = delegate;
}
@Override
public void configure(final Map<String, ?> configs, final boolean isKey) {
delegate.configure(configs, isKey);
this.isKey.set(isKey);
}
@Override
public void close() {
delegate.close();
}
@Override
public Serializer<T> serializer() {
return new UniqueTopicSerializerDecorator<>(isKey, delegate.serializer());
}
@Override
public Deserializer<T> deserializer() {
return new UniqueTopicDeserializerDecorator<>(isKey, delegate.deserializer());
}
}
public | UniqueTopicSerdeDecorator |
java | micronaut-projects__micronaut-core | inject-java/src/main/java/io/micronaut/annotation/processing/visitor/JavaNativeElement.java | {
"start": 1307,
"end": 1843
} | class ____ element.
*
* @param element The element
* @param typeMirror The type mirror
* @param owner The owner
*/
record Class(TypeElement element, @Nullable TypeMirror typeMirror,
@Nullable JavaNativeElement owner) implements JavaNativeElement {
Class(TypeElement element) {
this(element, null, null);
}
Class(TypeElement element, @Nullable TypeMirror typeMirror) {
this(element, typeMirror, null);
}
}
/**
* The | native |
java | google__guice | core/test/com/googlecode/guice/BytecodeGenTest.java | {
"start": 16715,
"end": 16810
} | class ____ extends LogCreator {
@Inject
public PublicInject() {}
}
static | PublicInject |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/type/TypeFactoryTest.java | {
"start": 869,
"end": 941
} | class ____<K,V> implements Map<K,V> { }
abstract static | IntermediateMap |
java | apache__flink | flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/SupervisorActorTest.java | {
"start": 9879,
"end": 10206
} | class ____ {
private final Throwable cause;
private Fail(Throwable cause) {
this.cause = cause;
}
private Throwable getCause() {
return cause;
}
private static Fail exceptionally(Throwable cause) {
return new Fail(cause);
}
}
}
| Fail |
java | spring-projects__spring-security | test/src/main/java/org/springframework/security/test/context/support/ReactorContextTestExecutionListener.java | {
"start": 2496,
"end": 2996
} | class ____ extends AbstractTestExecutionListener {
@Override
public void beforeTestMethod(TestContext testContext) {
SecurityContext securityContext = TestSecurityContextHolder.getContext();
Hooks.onLastOperator(CONTEXT_OPERATOR_KEY,
Operators.lift((s, sub) -> new SecuritySubContext<>(sub, securityContext)));
}
@Override
public void afterTestMethod(TestContext testContext) {
Hooks.resetOnLastOperator(CONTEXT_OPERATOR_KEY);
}
private static | DelegateTestExecutionListener |
java | spring-projects__spring-boot | documentation/spring-boot-docs/src/main/java/org/springframework/boot/docs/actuator/cloudfoundry/customcontextpath/MyReactiveCloudFoundryConfiguration.java | {
"start": 1536,
"end": 1892
} | class ____ {
@Bean
public HttpHandler httpHandler(ApplicationContext applicationContext, WebFluxProperties properties) {
HttpHandler httpHandler = WebHttpHandlerBuilder.applicationContext(applicationContext).build();
return new CloudFoundryHttpHandler(properties.getBasePath(), httpHandler);
}
private static final | MyReactiveCloudFoundryConfiguration |
java | grpc__grpc-java | binder/src/main/java/io/grpc/binder/internal/BinderTransport.java | {
"start": 6249,
"end": 22037
} | enum ____ {
NOT_STARTED, // We haven't been started yet.
SETUP, // We're setting up the connection.
READY, // The transport is ready.
SHUTDOWN, // We've been shutdown and won't accept any additional calls (thought existing calls
// may continue).
SHUTDOWN_TERMINATED // We've been shutdown completely (or we failed to start). We can't send or
// receive any data.
}
private final ObjectPool<ScheduledExecutorService> executorServicePool;
private final ScheduledExecutorService scheduledExecutorService;
private final InternalLogId logId;
@GuardedBy("this")
private final LeakSafeOneWayBinder incomingBinder;
protected final ConcurrentHashMap<Integer, Inbound<?>> ongoingCalls;
protected final OneWayBinderProxy.Decorator binderDecorator;
@GuardedBy("this")
private final LinkedHashSet<Integer> callIdsToNotifyWhenReady = new LinkedHashSet<>();
@GuardedBy("this")
private final List<Future<?>> ownedFutures = new ArrayList<>(); // To cancel upon terminate.
@GuardedBy("this")
protected Attributes attributes;
@GuardedBy("this")
private TransportState transportState = TransportState.NOT_STARTED;
@GuardedBy("this")
@Nullable
protected Status shutdownStatus;
@Nullable private OneWayBinderProxy outgoingBinder;
private final FlowController flowController;
/** The number of incoming bytes we've received. */
// Only read/written on @BinderThread.
private long numIncomingBytes;
/** The number of incoming bytes we've told our peer we've received. */
// Only read/written on @BinderThread.
private long acknowledgedIncomingBytes;
protected BinderTransport(
ObjectPool<ScheduledExecutorService> executorServicePool,
Attributes attributes,
OneWayBinderProxy.Decorator binderDecorator,
InternalLogId logId) {
this.binderDecorator = binderDecorator;
this.executorServicePool = executorServicePool;
this.attributes = attributes;
this.logId = logId;
scheduledExecutorService = executorServicePool.getObject();
incomingBinder = new LeakSafeOneWayBinder(this::handleTransaction);
ongoingCalls = new ConcurrentHashMap<>();
flowController = new FlowController(TRANSACTION_BYTES_WINDOW);
}
// Override in child class.
public final ScheduledExecutorService getScheduledExecutorService() {
return scheduledExecutorService;
}
// Override in child class.
public final ListenableFuture<SocketStats> getStats() {
Attributes attributes = getAttributes();
return immediateFuture(
new InternalChannelz.SocketStats(
/* data= */ null, // TODO: Keep track of these stats with TransportTracer or similar.
/* local= */ attributes.get(Grpc.TRANSPORT_ATTR_LOCAL_ADDR),
/* remote= */ attributes.get(Grpc.TRANSPORT_ATTR_REMOTE_ADDR),
// TODO: SocketOptions are meaningless for binder but we're still forced to provide one.
new InternalChannelz.SocketOptions.Builder().build(),
/* security= */ null));
}
// Override in child class.
public final InternalLogId getLogId() {
return logId;
}
// Override in child class.
public final synchronized Attributes getAttributes() {
return attributes;
}
/**
* Returns whether this transport is able to send rpc transactions. Intentionally unsynchronized
* since this will be called while Outbound is held.
*/
final boolean isReady() {
return !flowController.isTransmitWindowFull();
}
@GuardedBy("this")
abstract void notifyShutdown(Status shutdownStatus);
@GuardedBy("this")
abstract void notifyTerminated();
void releaseExecutors() {
executorServicePool.returnObject(scheduledExecutorService);
}
// Registers the specified future for eventual safe cancellation upon shutdown/terminate.
@GuardedBy("this")
protected final <T extends Future<?>> T register(T future) {
ownedFutures.add(future);
return future;
}
@GuardedBy("this")
boolean inState(TransportState transportState) {
return this.transportState == transportState;
}
@GuardedBy("this")
boolean isShutdown() {
return inState(TransportState.SHUTDOWN) || inState(TransportState.SHUTDOWN_TERMINATED);
}
@GuardedBy("this")
final void setState(TransportState newState) {
checkTransition(transportState, newState);
transportState = newState;
}
/**
* Sets the binder to use for sending subsequent transactions to our peer.
*
* <p>Subclasses should call this as early as possible but not from a constructor.
*
* <p>Returns true for success, false if the process hosting 'binder' is already dead. Callers are
* responsible for handling this.
*/
@GuardedBy("this")
protected boolean setOutgoingBinder(OneWayBinderProxy binder) {
binder = binderDecorator.decorate(binder);
this.outgoingBinder = binder;
try {
binder.getDelegate().linkToDeath(this, 0);
return true;
} catch (RemoteException re) {
return false;
}
}
@Override
public synchronized void binderDied() {
shutdownInternal(
Status.UNAVAILABLE.withDescription(
"Peer process crashed, exited or was killed (binderDied)"),
true);
}
@GuardedBy("this")
final void shutdownInternal(Status shutdownStatus, boolean forceTerminate) {
if (!isShutdown()) {
this.shutdownStatus = shutdownStatus;
setState(TransportState.SHUTDOWN);
notifyShutdown(shutdownStatus);
}
if (!inState(TransportState.SHUTDOWN_TERMINATED)
&& (forceTerminate || ongoingCalls.isEmpty())) {
incomingBinder.detach();
setState(TransportState.SHUTDOWN_TERMINATED);
sendShutdownTransaction();
ArrayList<Inbound<?>> calls = new ArrayList<>(ongoingCalls.values());
ongoingCalls.clear();
ArrayList<Future<?>> futuresToCancel = new ArrayList<>(ownedFutures);
ownedFutures.clear();
scheduledExecutorService.execute(
() -> {
for (Inbound<?> inbound : calls) {
synchronized (inbound) {
inbound.closeAbnormal(shutdownStatus);
}
}
for (Future<?> future : futuresToCancel) {
// Not holding any locks here just in case some listener runs on a direct Executor.
future.cancel(false); // No effect if already isDone().
}
synchronized (this) {
notifyTerminated();
}
releaseExecutors();
});
}
}
@GuardedBy("this")
final void sendSetupTransaction() {
sendSetupTransaction(checkNotNull(outgoingBinder));
}
@GuardedBy("this")
final void sendSetupTransaction(OneWayBinderProxy iBinder) {
try (ParcelHolder parcel = ParcelHolder.obtain()) {
parcel.get().writeInt(WIRE_FORMAT_VERSION);
parcel.get().writeStrongBinder(incomingBinder);
iBinder.transact(SETUP_TRANSPORT, parcel);
} catch (RemoteException re) {
shutdownInternal(statusFromRemoteException(re), true);
}
}
@GuardedBy("this")
private final void sendShutdownTransaction() {
if (outgoingBinder != null) {
try {
outgoingBinder.getDelegate().unlinkToDeath(this, 0);
} catch (NoSuchElementException e) {
// Ignore.
}
try (ParcelHolder parcel = ParcelHolder.obtain()) {
// Send empty flags to avoid a memory leak linked to empty parcels (b/207778694).
parcel.get().writeInt(0);
outgoingBinder.transact(SHUTDOWN_TRANSPORT, parcel);
} catch (RemoteException re) {
// Ignore.
}
}
}
protected synchronized void sendPing(int id) throws StatusException {
if (inState(TransportState.SHUTDOWN_TERMINATED)) {
throw shutdownStatus.asException();
} else if (outgoingBinder == null) {
throw Status.FAILED_PRECONDITION.withDescription("Transport not ready.").asException();
} else {
try (ParcelHolder parcel = ParcelHolder.obtain()) {
parcel.get().writeInt(id);
outgoingBinder.transact(PING, parcel);
} catch (RemoteException re) {
throw statusFromRemoteException(re).asException();
}
}
}
protected void unregisterInbound(Inbound<?> inbound) {
unregisterCall(inbound.callId);
}
final void unregisterCall(int callId) {
boolean removed = (ongoingCalls.remove(callId) != null);
if (removed && ongoingCalls.isEmpty()) {
// Possibly shutdown (not synchronously, since inbound is held).
scheduledExecutorService.execute(
() -> {
synchronized (this) {
if (inState(TransportState.SHUTDOWN)) {
// No more ongoing calls, and we're shutdown. Finish the shutdown.
shutdownInternal(shutdownStatus, true);
}
}
});
}
}
final void sendTransaction(int callId, ParcelHolder parcel) throws StatusException {
int dataSize = parcel.get().dataSize();
try {
outgoingBinder.transact(callId, parcel);
} catch (RemoteException re) {
throw statusFromRemoteException(re).asException();
}
if (flowController.notifyBytesSent(dataSize)) {
logger.log(Level.FINE, "transmit window now full " + this);
}
}
final void sendOutOfBandClose(int callId, Status status) {
try (ParcelHolder parcel = ParcelHolder.obtain()) {
parcel.get().writeInt(0); // Placeholder for flags. Will be filled in below.
int flags = TransactionUtils.writeStatus(parcel.get(), status);
TransactionUtils.fillInFlags(parcel.get(), flags | TransactionUtils.FLAG_OUT_OF_BAND_CLOSE);
sendTransaction(callId, parcel);
} catch (StatusException e) {
logger.log(Level.FINER, "Failed sending oob close transaction", e);
}
}
@BinderThread
@VisibleForTesting
final boolean handleTransaction(int code, Parcel parcel) {
try {
return handleTransactionInternal(code, parcel);
} catch (RuntimeException e) {
logger.log(
Level.SEVERE, "Terminating transport for uncaught Exception in transaction " + code, e);
synchronized (this) {
// This unhandled exception may have put us in an inconsistent state. Force terminate the
// whole transport so our peer knows something is wrong and so that clients can retry with
// a fresh transport instance on both sides.
shutdownInternal(Status.INTERNAL.withCause(e), true);
return false;
}
}
}
@BinderThread
private boolean handleTransactionInternal(int code, Parcel parcel) {
if (code < FIRST_CALL_ID) {
synchronized (this) {
switch (code) {
case ACKNOWLEDGE_BYTES:
handleAcknowledgedBytes(parcel.readLong());
break;
case SHUTDOWN_TRANSPORT:
shutdownInternal(
Status.UNAVAILABLE.withDescription("transport shutdown by peer"), true);
break;
case SETUP_TRANSPORT:
handleSetupTransport(parcel);
break;
case PING:
handlePing(parcel);
break;
case PING_RESPONSE:
handlePingResponse(parcel);
break;
default:
return false;
}
return true;
}
} else {
int size = parcel.dataSize();
Inbound<?> inbound = ongoingCalls.get(code);
if (inbound == null) {
synchronized (this) {
if (!isShutdown()) {
inbound = createInbound(code);
if (inbound != null) {
Inbound<?> existing = ongoingCalls.put(code, inbound);
// Can't happen as only one invocation of handleTransaction() is running at a time.
Verify.verify(existing == null, "impossible appearance of %s", existing);
}
}
}
}
if (inbound != null) {
inbound.handleTransaction(parcel);
}
numIncomingBytes += size;
if ((numIncomingBytes - acknowledgedIncomingBytes) > TRANSACTION_BYTES_WINDOW_FORCE_ACK) {
synchronized (this) {
sendAcknowledgeBytes(checkNotNull(outgoingBinder), numIncomingBytes);
}
acknowledgedIncomingBytes = numIncomingBytes;
}
return true;
}
}
@BinderThread
@GuardedBy("this")
protected void restrictIncomingBinderToCallsFrom(int allowedCallingUid) {
TransactionHandler currentHandler = incomingBinder.getHandler();
if (currentHandler != null) {
incomingBinder.setHandler(newCallerFilteringHandler(allowedCallingUid, currentHandler));
}
}
@Nullable
@GuardedBy("this")
protected Inbound<?> createInbound(int callId) {
return null;
}
@GuardedBy("this")
protected void handleSetupTransport(Parcel parcel) {}
@GuardedBy("this")
private final void handlePing(Parcel requestParcel) {
int id = requestParcel.readInt();
if (transportState == TransportState.READY) {
try (ParcelHolder replyParcel = ParcelHolder.obtain()) {
replyParcel.get().writeInt(id);
outgoingBinder.transact(PING_RESPONSE, replyParcel);
} catch (RemoteException re) {
// Ignore.
}
}
}
@GuardedBy("this")
protected void handlePingResponse(Parcel parcel) {}
@GuardedBy("this")
private void sendAcknowledgeBytes(OneWayBinderProxy iBinder, long n) {
// Send a transaction to acknowledge reception of incoming data.
try (ParcelHolder parcel = ParcelHolder.obtain()) {
parcel.get().writeLong(n);
iBinder.transact(ACKNOWLEDGE_BYTES, parcel);
} catch (RemoteException re) {
shutdownInternal(statusFromRemoteException(re), true);
}
}
@GuardedBy("this")
final void handleAcknowledgedBytes(long numBytes) {
if (flowController.handleAcknowledgedBytes(numBytes)) {
logger.log(
Level.FINE,
"handleAcknowledgedBytes: Transmit Window No-Longer Full. Unblock calls: " + this);
// The LinkedHashSet contract guarantees that an id already present in this collection will
// not lose its priority if we re-insert it here.
callIdsToNotifyWhenReady.addAll(ongoingCalls.keySet());
Iterator<Integer> i = callIdsToNotifyWhenReady.iterator();
while (isReady() && i.hasNext()) {
Inbound<?> inbound = ongoingCalls.get(i.next());
i.remove();
if (inbound != null) { // Calls can be removed out from under us.
inbound.onTransportReady();
}
}
}
}
private static void checkTransition(TransportState current, TransportState next) {
switch (next) {
case SETUP:
checkState(current == TransportState.NOT_STARTED);
break;
case READY:
checkState(current == TransportState.NOT_STARTED || current == TransportState.SETUP);
break;
case SHUTDOWN:
checkState(
current == TransportState.NOT_STARTED
|| current == TransportState.SETUP
|| current == TransportState.READY);
break;
case SHUTDOWN_TERMINATED:
checkState(current == TransportState.SHUTDOWN);
break;
default:
throw new AssertionError();
}
}
@VisibleForTesting
Map<Integer, Inbound<?>> getOngoingCalls() {
return ongoingCalls;
}
@VisibleForTesting
synchronized LeakSafeOneWayBinder getIncomingBinderForTesting() {
return this.incomingBinder;
}
private static Status statusFromRemoteException(RemoteException e) {
if (e instanceof DeadObjectException || e instanceof TransactionTooLargeException) {
// These are to be expected from time to time and can simply be retried.
return Status.UNAVAILABLE.withCause(e);
}
// Otherwise, this exception from transact is unexpected.
return Status.INTERNAL.withCause(e);
}
}
| TransportState |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/project/ProjectDeletedListener.java | {
"start": 848,
"end": 1288
} | class ____ {
private final Consumer<ProjectId> consumer;
public ProjectDeletedListener(Consumer<ProjectId> consumer) {
this.consumer = consumer;
}
public void attach(ClusterService clusterService) {
clusterService.addListener(event -> {
final ClusterChangedEvent.ProjectsDelta delta = event.projectDelta();
delta.removed().forEach(consumer);
});
}
}
| ProjectDeletedListener |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/XmlVerifierEndpointBuilderFactory.java | {
"start": 23701,
"end": 24524
} | interface ____ the application to check the XML signature
* before the validation is executed. This step is recommended in
* http://www.w3.org/TR/xmldsig-bestpractices/#check-what-is-signed.
*
* The option will be converted to a
* <code>org.apache.camel.component.xmlsecurity.api.XmlSignatureChecker</code> type.
*
* Group: producer
*
* @param xmlSignatureChecker the value to set
* @return the dsl builder
*/
default XmlVerifierEndpointBuilder xmlSignatureChecker(String xmlSignatureChecker) {
doSetProperty("xmlSignatureChecker", xmlSignatureChecker);
return this;
}
}
/**
* Advanced builder for endpoint for the XML Security Verify component.
*/
public | allows |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/type/descriptor/sql/internal/ArrayDdlTypeImpl.java | {
"start": 801,
"end": 4590
} | class ____ extends DdlTypeImpl {
private final boolean castRawElementType;
public ArrayDdlTypeImpl(Dialect dialect, boolean castRawElementType) {
super( ARRAY, "array", dialect );
this.castRawElementType = castRawElementType;
}
@Override
public String getCastTypeName(Size columnSize, SqlExpressible type, DdlTypeRegistry ddlTypeRegistry) {
final BasicPluralType<?, ?> pluralType = (BasicPluralType<?, ?>) type;
final BasicType<?> elementType = pluralType.getElementType();
String arrayElementTypeName;
if ( elementType.getJavaTypeDescriptor() instanceof EmbeddableAggregateJavaType<?> embeddableAggregateJavaType ) {
arrayElementTypeName = embeddableAggregateJavaType.getStructName();
}
else {
arrayElementTypeName = ddlTypeRegistry.getDescriptor( elementType.getJdbcType().getDdlTypeCode() )
.getCastTypeName(
dialect.getSizeStrategy().resolveSize(
elementType.getJdbcMapping().getJdbcType(),
elementType.getJavaTypeDescriptor(),
columnSize
),
elementType,
ddlTypeRegistry
);
}
if ( castRawElementType ) {
final int paren = arrayElementTypeName.indexOf( '(' );
if ( paren > 0 ) {
final int parenEnd = arrayElementTypeName.lastIndexOf( ')' );
arrayElementTypeName = parenEnd + 1 == arrayElementTypeName.length()
? arrayElementTypeName.substring( 0, paren )
: ( arrayElementTypeName.substring( 0, paren ) + arrayElementTypeName.substring( parenEnd + 1 ) );
}
}
return dialect.getArrayTypeName(
getElementTypeSimpleName( pluralType.getElementType(), dialect ),
arrayElementTypeName,
columnSize.getArrayLength()
);
}
@Override
public String getTypeName(Size columnSize, Type type, DdlTypeRegistry ddlTypeRegistry) {
final BasicPluralType<?, ?> pluralType = (BasicPluralType<?, ?>) type;
final BasicType<?> elementType = pluralType.getElementType();
final String arrayElementTypeName = ddlTypeRegistry.getTypeName(
elementType.getJdbcType().getDdlTypeCode(),
dialect.getSizeStrategy().resolveSize(
elementType.getJdbcMapping().getJdbcType(),
elementType.getJavaTypeDescriptor(),
columnSize
),
elementType
);
return dialect.getArrayTypeName(
getElementTypeSimpleName( pluralType.getElementType(), dialect ),
arrayElementTypeName,
columnSize.getArrayLength()
);
}
private static String getElementTypeSimpleName(BasicType<?> elementType, Dialect dialect) {
final BasicValueConverter<?, ?> converter = elementType.getValueConverter();
if ( converter != null ) {
if ( converter instanceof JpaAttributeConverter<?, ?> attributeConverter ) {
return attributeConverter.getConverterJavaType()
.getJavaTypeClass()
.getSimpleName();
}
else {
return converter.getClass().getSimpleName();
}
}
final JavaType<?> elementJavaType = elementType.getJavaTypeDescriptor();
if ( elementJavaType.getJavaTypeClass().isArray() ) {
return dialect.getArrayTypeName(
elementJavaType.getJavaTypeClass().getComponentType().getSimpleName(),
null,
null
);
}
else {
final Class<?> preferredJavaTypeClass = elementType.getJdbcType().getPreferredJavaTypeClass( null );
if ( preferredJavaTypeClass == null || preferredJavaTypeClass == elementJavaType.getJavaTypeClass() ) {
return elementJavaType.getJavaTypeClass().getSimpleName();
}
else {
if ( preferredJavaTypeClass.isArray() ) {
return elementJavaType.getJavaTypeClass().getSimpleName() + dialect.getArrayTypeName(
preferredJavaTypeClass.getComponentType().getSimpleName(),
null,
null
);
}
else {
return elementJavaType.getJavaTypeClass().getSimpleName() + preferredJavaTypeClass.getSimpleName();
}
}
}
}
}
| ArrayDdlTypeImpl |
java | apache__flink | flink-python/src/test/java/org/apache/flink/python/util/PythonConfigUtilTest.java | {
"start": 1368,
"end": 1897
} | class ____ {
@Test
void testJobName() {
String jobName = "MyTestJob";
Configuration config = new Configuration();
config.set(PipelineOptions.NAME, jobName);
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(config);
env.fromData(Collections.singletonList("test")).sinkTo(new DiscardingSink<>());
StreamGraph streamGraph = env.getStreamGraph(true);
assertThat(streamGraph.getJobName()).isEqualTo(jobName);
}
}
| PythonConfigUtilTest |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/EnumSerializerCompatibilityTest.java | {
"start": 1670,
"end": 2433
} | class ____ {
@TempDir private static Path tempDir;
private static final String ENUM_NAME = "EnumSerializerUpgradeTestEnum";
private static final String ENUM_A = "public enum " + ENUM_NAME + " { A, B, C }";
private static final String ENUM_B = "public enum " + ENUM_NAME + " { A, B, C, D }";
private static final String ENUM_C = "public enum " + ENUM_NAME + " { A, C }";
private static final String ENUM_D = "public enum " + ENUM_NAME + " { A, C, B }";
/** Check that identical enums don't require migration */
@Test
void checkIndenticalEnums() throws Exception {
assertThat(checkCompatibility(ENUM_A, ENUM_A).isCompatibleAsIs()).isTrue();
}
/** Check that appending fields to the | EnumSerializerCompatibilityTest |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java | {
"start": 1310,
"end": 7535
} | class ____ extends ConnectorActionRequest implements ToXContentObject {
@Nullable
private final String connectorId;
@Nullable
private final String description;
@Nullable
private final String indexName;
@Nullable
private final Boolean isNative;
@Nullable
private final String language;
@Nullable
private final String name;
@Nullable
private final String serviceType;
public Request(
String connectorId,
String description,
String indexName,
Boolean isNative,
String language,
String name,
String serviceType
) {
this.connectorId = connectorId;
this.description = description;
this.indexName = indexName;
this.isNative = isNative;
this.language = language;
this.name = name;
this.serviceType = serviceType;
}
public Request(String connectorId) {
this(connectorId, null, null, false, null, null, null);
}
public Request(StreamInput in) throws IOException {
super(in);
this.connectorId = in.readString();
this.description = in.readOptionalString();
this.indexName = in.readOptionalString();
this.isNative = in.readOptionalBoolean();
this.language = in.readOptionalString();
this.name = in.readOptionalString();
this.serviceType = in.readOptionalString();
}
private static final ConstructingObjectParser<Request, String> PARSER = new ConstructingObjectParser<>(
"connector_put_request",
false,
((args, connectorId) -> new Request(
connectorId,
(String) args[0],
(String) args[1],
(Boolean) args[2],
(String) args[3],
(String) args[4],
(String) args[5]
))
);
static {
PARSER.declareString(optionalConstructorArg(), new ParseField("description"));
PARSER.declareStringOrNull(optionalConstructorArg(), new ParseField("index_name"));
PARSER.declareBoolean(optionalConstructorArg(), new ParseField("is_native"));
PARSER.declareString(optionalConstructorArg(), new ParseField("language"));
PARSER.declareString(optionalConstructorArg(), new ParseField("name"));
PARSER.declareString(optionalConstructorArg(), new ParseField("service_type"));
}
public boolean isConnectorIdNullOrEmpty() {
return Strings.isNullOrEmpty(connectorId);
}
public static Request fromXContent(XContentParser parser, String connectorId) throws IOException {
return PARSER.parse(parser, connectorId);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
{
if (description != null) {
builder.field("description", description);
}
if (indexName != null) {
builder.field("index_name", indexName);
}
if (isNative != null) {
builder.field("is_native", isNative);
}
if (language != null) {
builder.field("language", language);
}
if (name != null) {
builder.field("name", name);
}
if (serviceType != null) {
builder.field("service_type", serviceType);
}
}
builder.endObject();
return builder;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
validationException = validateIndexName(indexName, validationException);
if (Boolean.TRUE.equals(isNative)) {
validationException = validateManagedConnectorIndexPrefix(indexName, validationException);
}
return validationException;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(connectorId);
out.writeOptionalString(description);
out.writeOptionalString(indexName);
out.writeOptionalBoolean(isNative);
out.writeOptionalString(language);
out.writeOptionalString(name);
out.writeOptionalString(serviceType);
}
public String getConnectorId() {
return connectorId;
}
public String getDescription() {
return description;
}
public String getIndexName() {
return indexName;
}
public Boolean getIsNative() {
return isNative;
}
public String getLanguage() {
return language;
}
public String getName() {
return name;
}
public String getServiceType() {
return serviceType;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Request request = (Request) o;
return Objects.equals(connectorId, request.connectorId)
&& Objects.equals(description, request.description)
&& Objects.equals(indexName, request.indexName)
&& Objects.equals(isNative, request.isNative)
&& Objects.equals(language, request.language)
&& Objects.equals(name, request.name)
&& Objects.equals(serviceType, request.serviceType);
}
@Override
public int hashCode() {
return Objects.hash(connectorId, description, indexName, isNative, language, name, serviceType);
}
}
}
| Request |
java | elastic__elasticsearch | libs/core/src/test/java/org/elasticsearch/core/internal/provider/EmbeddedImplClassLoaderTests.java | {
"start": 4189,
"end": 4830
} | class ____ loaded, when the multi-release attribute
* is present and the versioned entry is less than or equal to the runtime version.
*/
public void testLoadWithMultiReleaseEnabled9() throws Exception {
assumeTrue("JDK version not greater than or equal to 9", Runtime.version().feature() >= 9);
Object foobar = newFooBar(true, 9);
// expect 9 version of FooBar to be loaded
assertThat(foobar.toString(), equalTo("FooBar " + 9));
foobar = newFooBar(true, 9, 8);
assertThat(foobar.toString(), equalTo("FooBar " + 9));
}
/*
* Tests that the specific, 11, version of a | is |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/HistogramMergeOverTime.java | {
"start": 1232,
"end": 3421
} | class ____ extends TimeSeriesAggregateFunction implements OptionalArgument {
// TODO Eventually we want to replace this with some increase/rate implementation
// for histograms to be consistent with counters on extrapolation.
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(
Expression.class,
"HistogramMergeOverTime",
HistogramMergeOverTime::new
);
@FunctionInfo(returnType = "exponential_histogram", type = FunctionType.TIME_SERIES_AGGREGATE)
public HistogramMergeOverTime(
Source source,
@Param(name = "histogram", type = "exponential_histogram") Expression field,
@Param(name = "window", type = "time_duration", optional = true) Expression window
) {
this(source, field, Literal.TRUE, Objects.requireNonNullElse(window, NO_WINDOW));
}
public HistogramMergeOverTime(Source source, Expression field, Expression filter, Expression window) {
super(source, field, filter, window, emptyList());
}
private HistogramMergeOverTime(StreamInput in) throws IOException {
super(in);
}
@Override
protected TypeResolution resolveType() {
return perTimeSeriesAggregation().resolveType();
}
@Override
public String getWriteableName() {
return ENTRY.name;
}
@Override
public DataType dataType() {
return perTimeSeriesAggregation().dataType();
}
@Override
protected NodeInfo<HistogramMergeOverTime> info() {
return NodeInfo.create(this, HistogramMergeOverTime::new, field(), filter(), window());
}
@Override
public HistogramMergeOverTime replaceChildren(List<Expression> newChildren) {
return new HistogramMergeOverTime(source(), newChildren.get(0), newChildren.get(1), newChildren.get(2));
}
@Override
public HistogramMergeOverTime withFilter(Expression filter) {
return new HistogramMergeOverTime(source(), field(), filter, window());
}
@Override
public AggregateFunction perTimeSeriesAggregation() {
return new HistogramMerge(source(), field(), filter(), window());
}
}
| HistogramMergeOverTime |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.