language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | spring-projects__spring-boot | module/spring-boot-http-client/src/main/java/org/springframework/boot/http/client/ReactorHttpClientBuilder.java | {
"start": 1610,
"end": 5753
} | class ____ {
private final Supplier<HttpClient> factory;
private final UnaryOperator<HttpClient> customizer;
public ReactorHttpClientBuilder() {
this(HttpClient::create, UnaryOperator.identity());
}
private ReactorHttpClientBuilder(Supplier<HttpClient> httpClientFactory, UnaryOperator<HttpClient> customizer) {
this.factory = httpClientFactory;
this.customizer = customizer;
}
/**
* Return a new {@link ReactorHttpClientBuilder} that uses the given
* {@link ReactorResourceFactory} to create the {@link HttpClient}.
* @param reactorResourceFactory the {@link ReactorResourceFactory} to use
* @return a new {@link ReactorHttpClientBuilder} instance
*/
public ReactorHttpClientBuilder withReactorResourceFactory(ReactorResourceFactory reactorResourceFactory) {
Assert.notNull(reactorResourceFactory, "'reactorResourceFactory' must not be null");
return new ReactorHttpClientBuilder(() -> HttpClient.create(reactorResourceFactory.getConnectionProvider()),
(httpClient) -> this.customizer.apply(httpClient).runOn(reactorResourceFactory.getLoopResources()));
}
/**
* Return a new {@link ReactorHttpClientBuilder} that uses the given factory to create
* the {@link HttpClient}.
* @param factory the factory to use
* @return a new {@link ReactorHttpClientBuilder} instance
*/
public ReactorHttpClientBuilder withHttpClientFactory(Supplier<HttpClient> factory) {
Assert.notNull(factory, "'factory' must not be null");
return new ReactorHttpClientBuilder(factory, this.customizer);
}
/**
* Return a new {@link ReactorHttpClientBuilder} that applies additional customization
* to the underlying {@link HttpClient}.
* @param customizer the customizer to apply
* @return a new {@link ReactorHttpClientBuilder} instance
*/
public ReactorHttpClientBuilder withHttpClientCustomizer(UnaryOperator<HttpClient> customizer) {
Assert.notNull(customizer, "'customizer' must not be null");
return new ReactorHttpClientBuilder(this.factory,
(httpClient) -> customizer.apply(this.customizer.apply(httpClient)));
}
/**
* Build a new {@link HttpClient} instance with the given settings applied.
* @param settings the settings to apply
* @return a new {@link HttpClient} instance
*/
public HttpClient build(@Nullable HttpClientSettings settings) {
settings = (settings != null) ? settings : HttpClientSettings.defaults();
HttpClient httpClient = applyDefaults(this.factory.get());
PropertyMapper map = PropertyMapper.get();
httpClient = map.from(settings::connectTimeout).to(httpClient, this::setConnectTimeout);
httpClient = map.from(settings::readTimeout).to(httpClient, HttpClient::responseTimeout);
httpClient = map.from(settings::redirects)
.orFrom(() -> HttpRedirects.FOLLOW_WHEN_POSSIBLE)
.as(this::followRedirects)
.to(httpClient, HttpClient::followRedirect);
httpClient = map.from(settings::sslBundle).to(httpClient, this::secure);
return this.customizer.apply(httpClient);
}
HttpClient applyDefaults(HttpClient httpClient) {
// Aligns with Spring Framework defaults
return httpClient.compress(true);
}
private HttpClient setConnectTimeout(HttpClient httpClient, Duration timeout) {
return httpClient.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, (int) timeout.toMillis());
}
private boolean followRedirects(HttpRedirects redirects) {
return switch (redirects) {
case FOLLOW_WHEN_POSSIBLE, FOLLOW -> true;
case DONT_FOLLOW -> false;
};
}
private HttpClient secure(HttpClient httpClient, SslBundle sslBundle) {
return httpClient.secure((ThrowingConsumer.of((spec) -> configureSsl(spec, sslBundle))));
}
private void configureSsl(SslContextSpec spec, SslBundle sslBundle) throws SSLException {
SslOptions options = sslBundle.getOptions();
SslManagerBundle managers = sslBundle.getManagers();
SslContextBuilder builder = SslContextBuilder.forClient()
.keyManager(managers.getKeyManagerFactory())
.trustManager(managers.getTrustManagerFactory())
.ciphers(SslOptions.asSet(options.getCiphers()))
.protocols(options.getEnabledProtocols());
spec.sslContext(builder.build());
}
}
| ReactorHttpClientBuilder |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/adaptive/allocator/SlotTaskExecutorWeight.java | {
"start": 1047,
"end": 1182
} | class ____ represent the slot and the loading or slots utilization weight info of the task
* executor where the slot is located at.
*/
| to |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/IgniteQueueEndpointBuilderFactory.java | {
"start": 15985,
"end": 16327
} | class ____ extends AbstractEndpointBuilder implements IgniteQueueEndpointBuilder, AdvancedIgniteQueueEndpointBuilder {
public IgniteQueueEndpointBuilderImpl(String path) {
super(componentName, path);
}
}
return new IgniteQueueEndpointBuilderImpl(path);
}
} | IgniteQueueEndpointBuilderImpl |
java | elastic__elasticsearch | modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureRepositoryMissingCredentialsIT.java | {
"start": 1332,
"end": 2938
} | class ____ extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return CollectionUtils.appendToCopyNoNullElements(super.nodePlugins(), AzureRepositoryPlugin.class);
}
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
final MockSecureSettings secureSettings = new MockSecureSettings();
secureSettings.setString(AzureStorageSettings.ACCOUNT_SETTING.getConcreteSettingForNamespace("default").getKey(), "test-account");
return Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)).setSecureSettings(secureSettings).build();
}
public void testMissingCredentialsException() {
assertThat(
safeAwaitAndUnwrapFailure(
RepositoryVerificationException.class,
AcknowledgedResponse.class,
l -> client().execute(
TransportPutRepositoryAction.TYPE,
new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo").type("azure"),
l
)
).getCause().getMessage(),
allOf(
containsString("EnvironmentCredential authentication unavailable"),
containsString("WorkloadIdentityCredential authentication unavailable"),
containsString("Managed Identity authentication is not available"),
containsString("SharedTokenCacheCredential authentication unavailable")
)
);
}
}
| AzureRepositoryMissingCredentialsIT |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/suite/engine/SuiteLauncherDiscoveryRequestBuilderTests.java | {
"start": 19507,
"end": 19768
} | class ____ {
}
LauncherDiscoveryRequest request = builder.applySelectorsAndFiltersFromSuite(Suite.class).build();
assertTrue(request.getSelectorsByType(DirectorySelector.class).isEmpty());
}
@Test
void selectFile() {
@SelectFile("path/to/root")
| Suite |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/BuildProducerImpl.java | {
"start": 308,
"end": 709
} | class ____<T extends BuildItem> implements BuildProducer<T> {
private final Class<T> type;
private final BuildContext buildContext;
public BuildProducerImpl(Class<T> type, BuildContext buildContext) {
this.type = type;
this.buildContext = buildContext;
}
@Override
public void produce(T item) {
buildContext.produce(type, item);
}
}
| BuildProducerImpl |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/util/KeyedTwoInputStreamOperatorTestHarness.java | {
"start": 1631,
"end": 4897
} | class ____<K, IN1, IN2, OUT>
extends TwoInputStreamOperatorTestHarness<IN1, IN2, OUT> {
public KeyedTwoInputStreamOperatorTestHarness(
TwoInputStreamOperator<IN1, IN2, OUT> operator,
KeySelector<IN1, K> keySelector1,
KeySelector<IN2, K> keySelector2,
TypeInformation<K> keyType,
int maxParallelism,
int numSubtasks,
int subtaskIndex)
throws Exception {
this(
operator,
keySelector1,
keySelector2,
keyType,
maxParallelism,
numSubtasks,
subtaskIndex,
null,
null);
}
public KeyedTwoInputStreamOperatorTestHarness(
TwoInputStreamOperator<IN1, IN2, OUT> operator,
KeySelector<IN1, K> keySelector1,
KeySelector<IN2, K> keySelector2,
TypeInformation<K> keyType,
int maxParallelism,
int numSubtasks,
int subtaskIndex,
@Nullable TypeSerializer<?> leftSerializer,
@Nullable TypeSerializer<?> rightSerializer)
throws Exception {
super(operator, maxParallelism, numSubtasks, subtaskIndex);
ClosureCleaner.clean(keySelector1, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, false);
ClosureCleaner.clean(keySelector2, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, false);
config.setStatePartitioner(0, keySelector1);
config.setStatePartitioner(1, keySelector2);
config.setStateKeySerializer(
keyType.createSerializer(executionConfig.getSerializerConfig()));
if (leftSerializer != null && rightSerializer != null) {
config.setupNetworkInputs(leftSerializer, rightSerializer);
}
config.serializeAllConfigs();
}
public KeyedTwoInputStreamOperatorTestHarness(
TwoInputStreamOperator<IN1, IN2, OUT> operator,
final KeySelector<IN1, K> keySelector1,
final KeySelector<IN2, K> keySelector2,
TypeInformation<K> keyType)
throws Exception {
this(operator, keySelector1, keySelector2, keyType, 1, 1, 0);
}
public int numKeyedStateEntries() {
AbstractStreamOperator<?> abstractStreamOperator = (AbstractStreamOperator<?>) operator;
KeyedStateBackend<Object> keyedStateBackend = abstractStreamOperator.getKeyedStateBackend();
if (keyedStateBackend instanceof HeapKeyedStateBackend) {
return ((HeapKeyedStateBackend) keyedStateBackend).numKeyValueStateEntries();
} else {
throw new UnsupportedOperationException(
String.format(
"Unsupported keyed state backend: %s",
keyedStateBackend.getClass().getCanonicalName()));
}
}
public void endAllInputs() throws Exception {
TwoInputStreamOperator<IN1, IN2, OUT> op = (TwoInputStreamOperator<IN1, IN2, OUT>) operator;
if (op instanceof BoundedMultiInput) {
((BoundedMultiInput) op).endInput(1);
((BoundedMultiInput) op).endInput(2);
}
}
}
| KeyedTwoInputStreamOperatorTestHarness |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/filter/GenericFilterBean.java | {
"start": 3398,
"end": 11927
} | class ____ implements Filter, BeanNameAware, EnvironmentAware,
EnvironmentCapable, ServletContextAware, InitializingBean, DisposableBean {
/** Logger available to subclasses. */
protected final Log logger = LogFactory.getLog(getClass());
private @Nullable String beanName;
private @Nullable Environment environment;
private @Nullable ServletContext servletContext;
private @Nullable FilterConfig filterConfig;
private final Set<String> requiredProperties = new HashSet<>(4);
/**
* Stores the bean name as defined in the Spring bean factory.
* <p>Only relevant in case of initialization as bean, to have a name as
* fallback to the filter name usually provided by a FilterConfig instance.
* @see org.springframework.beans.factory.BeanNameAware
* @see #getFilterName()
*/
@Override
public void setBeanName(String beanName) {
this.beanName = beanName;
}
/**
* Set the {@code Environment} that this filter runs in.
* <p>Any environment set here overrides the {@link StandardServletEnvironment}
* provided by default.
* <p>This {@code Environment} object is used only for resolving placeholders in
* resource paths passed into init-parameters for this filter. If no init-params are
* used, this {@code Environment} can be essentially ignored.
*/
@Override
public void setEnvironment(Environment environment) {
this.environment = environment;
}
/**
* Return the {@link Environment} associated with this filter.
* <p>If none specified, a default environment will be initialized via
* {@link #createEnvironment()}.
* @since 4.3.9
*/
@Override
public Environment getEnvironment() {
if (this.environment == null) {
this.environment = createEnvironment();
}
return this.environment;
}
/**
* Create and return a new {@link StandardServletEnvironment}.
* <p>Subclasses may override this in order to configure the environment or
* specialize the environment type returned.
* @since 4.3.9
*/
protected Environment createEnvironment() {
return new StandardServletEnvironment();
}
/**
* Stores the ServletContext that the bean factory runs in.
* <p>Only relevant in case of initialization as bean, to have a ServletContext
* as fallback to the context usually provided by a FilterConfig instance.
* @see org.springframework.web.context.ServletContextAware
* @see #getServletContext()
*/
@Override
public void setServletContext(ServletContext servletContext) {
this.servletContext = servletContext;
}
/**
* Calls the {@code initFilterBean()} method that might
* contain custom initialization of a subclass.
* <p>Only relevant in case of initialization as bean, where the
* standard {@code init(FilterConfig)} method won't be called.
* @see #initFilterBean()
* @see #init(jakarta.servlet.FilterConfig)
*/
@Override
public void afterPropertiesSet() throws ServletException {
initFilterBean();
}
/**
* Subclasses may override this to perform custom filter shutdown.
* <p>Note: This method will be called from standard filter destruction
* as well as filter bean destruction in a Spring application context.
* <p>This default implementation is empty.
*/
@Override
public void destroy() {
}
/**
* Subclasses can invoke this method to specify that this property
* (which must match a JavaBean property they expose) is mandatory,
* and must be supplied as a config parameter. This should be called
* from the constructor of a subclass.
* <p>This method is only relevant in case of traditional initialization
* driven by a FilterConfig instance.
* @param property name of the required property
*/
protected final void addRequiredProperty(String property) {
this.requiredProperties.add(property);
}
/**
* Standard way of initializing this filter.
* Map config parameters onto bean properties of this filter, and
* invoke subclass initialization.
* @param filterConfig the configuration for this filter
* @throws ServletException if bean properties are invalid (or required
* properties are missing), or if subclass initialization fails.
* @see #initFilterBean
*/
@Override
public final void init(FilterConfig filterConfig) throws ServletException {
Assert.notNull(filterConfig, "FilterConfig must not be null");
this.filterConfig = filterConfig;
// Set bean properties from init parameters.
PropertyValues pvs = new FilterConfigPropertyValues(filterConfig, this.requiredProperties);
if (!pvs.isEmpty()) {
try {
BeanWrapper bw = PropertyAccessorFactory.forBeanPropertyAccess(this);
ResourceLoader resourceLoader = new ServletContextResourceLoader(filterConfig.getServletContext());
Environment env = this.environment;
if (env == null) {
env = new StandardServletEnvironment();
}
bw.registerCustomEditor(Resource.class, new ResourceEditor(resourceLoader, env));
initBeanWrapper(bw);
bw.setPropertyValues(pvs, true);
}
catch (BeansException ex) {
String msg = "Failed to set bean properties on filter '" +
filterConfig.getFilterName() + "': " + ex.getMessage();
logger.error(msg, ex);
throw new ServletException(msg, ex);
}
}
// Let subclasses do whatever initialization they like.
initFilterBean();
if (logger.isDebugEnabled()) {
logger.debug("Filter '" + filterConfig.getFilterName() + "' configured for use");
}
}
/**
* Initialize the BeanWrapper for this GenericFilterBean,
* possibly with custom editors.
* <p>This default implementation is empty.
* @param bw the BeanWrapper to initialize
* @throws BeansException if thrown by BeanWrapper methods
* @see org.springframework.beans.BeanWrapper#registerCustomEditor
*/
protected void initBeanWrapper(BeanWrapper bw) throws BeansException {
}
/**
* Subclasses may override this to perform custom initialization.
* All bean properties of this filter will have been set before this
* method is invoked.
* <p>Note: This method will be called from standard filter initialization
* as well as filter bean initialization in a Spring application context.
* Filter name and ServletContext will be available in both cases.
* <p>This default implementation is empty.
* @throws ServletException if subclass initialization fails
* @see #getFilterName()
* @see #getServletContext()
*/
protected void initFilterBean() throws ServletException {
}
/**
* Make the FilterConfig of this filter available, if any.
* Analogous to GenericServlet's {@code getServletConfig()}.
* <p>Public to resemble the {@code getFilterConfig()} method
* of the Servlet Filter version that shipped with WebLogic 6.1.
* @return the FilterConfig instance, or {@code null} if none available
* @see jakarta.servlet.GenericServlet#getServletConfig()
*/
public @Nullable FilterConfig getFilterConfig() {
return this.filterConfig;
}
/**
* Make the name of this filter available to subclasses.
* Analogous to GenericServlet's {@code getServletName()}.
* <p>Takes the FilterConfig's filter name by default.
* If initialized as bean in a Spring application context,
* it falls back to the bean name as defined in the bean factory.
* @return the filter name, or {@code null} if none available
* @see jakarta.servlet.GenericServlet#getServletName()
* @see jakarta.servlet.FilterConfig#getFilterName()
* @see #setBeanName
*/
protected @Nullable String getFilterName() {
return (this.filterConfig != null ? this.filterConfig.getFilterName() : this.beanName);
}
/**
* Make the ServletContext of this filter available to subclasses.
* Analogous to GenericServlet's {@code getServletContext()}.
* <p>Takes the FilterConfig's ServletContext by default.
* If initialized as bean in a Spring application context,
* it falls back to the ServletContext that the bean factory runs in.
* @return the ServletContext instance
* @throws IllegalStateException if no ServletContext is available
* @see jakarta.servlet.GenericServlet#getServletContext()
* @see jakarta.servlet.FilterConfig#getServletContext()
* @see #setServletContext
*/
protected ServletContext getServletContext() {
if (this.filterConfig != null) {
return this.filterConfig.getServletContext();
}
else if (this.servletContext != null) {
return this.servletContext;
}
else {
throw new IllegalStateException("No ServletContext");
}
}
/**
* PropertyValues implementation created from FilterConfig init parameters.
*/
@SuppressWarnings("serial")
private static | GenericFilterBean |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestHashBasedRouterPolicy.java | {
"start": 1406,
"end": 1517
} | class ____ the {@link HashBasedRouterPolicy}. Tests that one of
* the active sub-cluster is chosen.
*/
public | for |
java | apache__camel | test-infra/camel-test-infra-azure-storage-queue/src/test/java/org/apache/camel/test/infra/azure/storage/queue/services/AzureStorageQueueServiceFactory.java | {
"start": 1695,
"end": 1804
} | class ____ extends AzureStorageQueueInfraService implements AzureService {
}
}
| AzureStorageQueueRemoteService |
java | spring-projects__spring-framework | framework-docs/src/main/java/org/springframework/docs/integration/jmx/jmxexporting/IJmxTestBean.java | {
"start": 697,
"end": 861
} | interface ____ {
int getAge();
void setAge(int age);
void setName(String name);
String getName();
int add(int x, int y);
void dontExposeMe();
}
| IJmxTestBean |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/clientproxy/ClientProxyGetContextualInstanceTest.java | {
"start": 496,
"end": 990
} | class ____ {
@RegisterExtension
public ArcTestContainer container = new ArcTestContainer(Moo.class);
@Test
public void testProducer() throws IOException {
Moo moo = Arc.container().instance(Moo.class).get();
assertTrue(moo instanceof ClientProxy);
assertEquals(10, ((Moo) ((ClientProxy) moo).arc_contextualInstance()).val);
assertEquals(10, ClientProxy.unwrap(moo).val);
}
@ApplicationScoped
static | ClientProxyGetContextualInstanceTest |
java | quarkusio__quarkus | extensions/arc/deployment/src/test/java/io/quarkus/arc/test/unproxyable/MultipleAddMissingNoargsConstructorTest.java | {
"start": 504,
"end": 1144
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(MyBean.class, MyBeanProducer.class));
@Inject
Instance<MyBean> myBeanInstances;
@Test
public void testBeansProperlyCreated() {
List<String> vals = new ArrayList<>(2);
for (MyBean myBeanInstance : myBeanInstances) {
vals.add(myBeanInstance.getVal());
}
Collections.sort(vals);
Assertions.assertEquals(Arrays.asList("val1", "val2"), vals);
}
static | MultipleAddMissingNoargsConstructorTest |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/OperatorEventHandler.java | {
"start": 1205,
"end": 1290
} | interface ____ {
void handleOperatorEvent(OperatorEvent evt);
}
| OperatorEventHandler |
java | resilience4j__resilience4j | resilience4j-retry/src/test/java/io/github/resilience4j/retry/utils/AsyncUtils.java | {
"start": 887,
"end": 1947
} | class ____ {
private static final long DEFAULT_TIMEOUT_SECONDS = 5;
public static <T> T awaitResult(CompletionStage<T> completionStage, long timeoutSeconds) {
try {
return completionStage.toCompletableFuture().get(timeoutSeconds, TimeUnit.SECONDS);
} catch (InterruptedException | TimeoutException e) {
throw new AssertionError(e);
} catch (ExecutionException e) {
throw new RuntimeExecutionException(e.getCause());
}
}
public static <T> T awaitResult(CompletionStage<T> completionStage) {
return awaitResult(completionStage, DEFAULT_TIMEOUT_SECONDS);
}
public static <T> T awaitResult(Supplier<CompletionStage<T>> completionStageSupplier,
long timeoutSeconds) {
return awaitResult(completionStageSupplier.get(), timeoutSeconds);
}
public static <T> T awaitResult(Supplier<CompletionStage<T>> completionStageSupplier) {
return awaitResult(completionStageSupplier, DEFAULT_TIMEOUT_SECONDS);
}
private static | AsyncUtils |
java | elastic__elasticsearch | x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/optimizer/OptimizerRules.java | {
"start": 75310,
"end": 77050
} | class ____ extends OptimizerRule<Filter> {
@Override
protected LogicalPlan rule(Filter filter) {
Expression condition = filter.condition().transformUp(BinaryLogic.class, PruneFilters::foldBinaryLogic);
if (condition instanceof Literal) {
if (TRUE.equals(condition)) {
return filter.child();
}
if (FALSE.equals(condition) || Expressions.isNull(condition)) {
return skipPlan(filter);
}
}
if (condition.equals(filter.condition()) == false) {
return new Filter(filter.source(), filter.child(), condition);
}
return filter;
}
protected abstract LogicalPlan skipPlan(Filter filter);
private static Expression foldBinaryLogic(BinaryLogic binaryLogic) {
if (binaryLogic instanceof Or or) {
boolean nullLeft = Expressions.isNull(or.left());
boolean nullRight = Expressions.isNull(or.right());
if (nullLeft && nullRight) {
return new Literal(binaryLogic.source(), null, DataTypes.NULL);
}
if (nullLeft) {
return or.right();
}
if (nullRight) {
return or.left();
}
}
if (binaryLogic instanceof And and) {
if (Expressions.isNull(and.left()) || Expressions.isNull(and.right())) {
return new Literal(binaryLogic.source(), null, DataTypes.NULL);
}
}
return binaryLogic;
}
}
public static final | PruneFilters |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/Retryable.java | {
"start": 554,
"end": 652
} | interface ____ {
Request rebuildRequest(Request original);
boolean shouldRetry();
}
| Retryable |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/NodeFakeAvailabilityZoneMapperTests.java | {
"start": 1539,
"end": 14220
} | class ____ extends ESTestCase {
public void testBeforeClusterReady() {
Settings settings = Settings.builder().build();
ClusterSettings clusterSettings = ClusterSettings.createBuiltInClusterSettings(settings);
NodeFakeAvailabilityZoneMapper nodeFakeAvailabilityZoneMapper = new NodeFakeAvailabilityZoneMapper(settings, clusterSettings);
assertThat(nodeFakeAvailabilityZoneMapper.getAllNodesByAvailabilityZone(), anEmptyMap());
assertThat(nodeFakeAvailabilityZoneMapper.getNumAvailabilityZones(), is(OptionalInt.empty()));
}
public void testAvailabilityZonesAttributesNotConfiguredMultiRoleNodes() {
// Fake availability zones are populated completely independently of cluster settings,
// attributes etc. Here we test that even in the absence of zone attributes that the
// fake availability zone mapper still assigns each node to a single, unique availability
// zone
Settings settings = Settings.EMPTY;
ClusterSettings clusterSettings = ClusterSettings.createBuiltInClusterSettings(settings);
DiscoveryNode node1 = DiscoveryNodeUtils.create(
"node-1",
new TransportAddress(InetAddress.getLoopbackAddress(), 9300),
Map.of(),
Set.of(MASTER_ROLE, DATA_ROLE, ML_ROLE)
);
DiscoveryNode node2 = DiscoveryNodeUtils.create(
"node-2",
new TransportAddress(InetAddress.getLoopbackAddress(), 9301),
Map.of(),
Set.of(MASTER_ROLE, DATA_ROLE, ML_ROLE)
);
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(node1).add(node2).build();
NodeFakeAvailabilityZoneMapper nodeFakeAvailabilityZoneMapper = new NodeFakeAvailabilityZoneMapper(
settings,
clusterSettings,
discoveryNodes
);
DiscoveryNodes expectedDiscoveryNodes = DiscoveryNodes.builder().add(node1).add(node2).build();
assertThat(nodeFakeAvailabilityZoneMapper.getAllNodesByAvailabilityZone(), aMapWithSize(2));
for (DiscoveryNode node : expectedDiscoveryNodes) {
assertThat(
nodeFakeAvailabilityZoneMapper.getAllNodesByAvailabilityZone().get(List.of(node.getId())),
equalTo(new ArrayList<DiscoveryNode>(List.of(node)))
);
}
assertThat(nodeFakeAvailabilityZoneMapper.getNumAvailabilityZones().getAsInt(), is(2));
DiscoveryNodes expectedMlDiscoveryNodes = DiscoveryNodes.builder().add(node1).add(node2).build();
assertThat(nodeFakeAvailabilityZoneMapper.getMlNodesByAvailabilityZone(), aMapWithSize(2));
for (DiscoveryNode node : expectedMlDiscoveryNodes) {
assertThat(
nodeFakeAvailabilityZoneMapper.getMlNodesByAvailabilityZone().get(List.of(node.getId())),
equalTo(new ArrayList<DiscoveryNode>(List.of(node)))
);
}
assertThat(nodeFakeAvailabilityZoneMapper.getNumMlAvailabilityZones().getAsInt(), is(2));
}
public void testAvailabilityZonesAttributesNotConfiguredDedicatedNodes() {
// Fake availability zones are populated completely independently of cluster settings,
// attributes etc. Here we test that even in the absence of zone attributes that the
// fake availability zone mapper still assigns each node to a single, unique availability
// zone, and that dedicated ML nodes are identified and allocated to ML availability zones.
Settings settings = Settings.EMPTY;
ClusterSettings clusterSettings = ClusterSettings.createBuiltInClusterSettings(settings);
DiscoveryNode mlNode = DiscoveryNodeUtils.create(
"node-1",
new TransportAddress(InetAddress.getLoopbackAddress(), 9300),
Map.of(),
Set.of(ML_ROLE)
);
DiscoveryNode node1 = DiscoveryNodeUtils.create(
"node-2",
new TransportAddress(InetAddress.getLoopbackAddress(), 9301),
Map.of(),
Set.of(MASTER_ROLE)
);
DiscoveryNode node2 = DiscoveryNodeUtils.create(
"node-3",
new TransportAddress(InetAddress.getLoopbackAddress(), 9202),
Map.of(),
Set.of(DATA_ROLE)
);
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(mlNode).add(node1).add(node2).build();
NodeFakeAvailabilityZoneMapper nodeFakeAvailabilityZoneMapper = new NodeFakeAvailabilityZoneMapper(
settings,
clusterSettings,
discoveryNodes
);
DiscoveryNodes expectedDiscoveryNodes = DiscoveryNodes.builder().add(mlNode).add(node1).add(node2).build();
assertThat(nodeFakeAvailabilityZoneMapper.getAllNodesByAvailabilityZone(), aMapWithSize(3));
for (DiscoveryNode node : expectedDiscoveryNodes) {
assertThat(
nodeFakeAvailabilityZoneMapper.getAllNodesByAvailabilityZone().get(List.of(node.getId())),
equalTo(new ArrayList<DiscoveryNode>(List.of(node)))
);
}
assertThat(nodeFakeAvailabilityZoneMapper.getNumAvailabilityZones().getAsInt(), is(3));
DiscoveryNodes expectedMlDiscoveryNodes = DiscoveryNodes.builder().add(mlNode).build();
assertThat(nodeFakeAvailabilityZoneMapper.getMlNodesByAvailabilityZone(), aMapWithSize(1));
assertThat(nodeFakeAvailabilityZoneMapper.getMlNodesByAvailabilityZone().get(List.of("node-1")), contains(mlNode));
assertThat(nodeFakeAvailabilityZoneMapper.getNumMlAvailabilityZones().getAsInt(), is(1));
}
public void testAvailabilityZonesAttributesConfiguredMultiRoleNodes() {
// Fake availability zones are populated completely independently of cluster settings,
// attributes etc. Here we test that the fake availability zone mapper assigns each node
// to a single, unique availability zone, even when explicit allocation awareness attributes are
// configured in the cluster settings.
Settings settings = Settings.builder()
.putList(
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(),
List.of("region", "logical_availability_zone")
)
.build();
ClusterSettings clusterSettings = ClusterSettings.createBuiltInClusterSettings(settings);
DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder();
int numNodes = randomIntBetween(2, 50);
// For fake availability zones, each node is mapped to a unique zone, i.e. the number of zones is equal to the number of nodes.
// This allocation of zones is performed completely independently of cluster settings, attributes etc.
int numZones = numNodes;
for (int nodeNum = 1; nodeNum <= numNodes; ++nodeNum) {
discoveryNodesBuilder.add(
DiscoveryNodeUtils.create(
"node-" + nodeNum,
new TransportAddress(InetAddress.getLoopbackAddress(), 9299 + nodeNum),
Map.of("region", "unknown-region", "logical_availability_zone", "zone-" + (nodeNum % numZones)),
Set.of(MASTER_ROLE, DATA_ROLE, ML_ROLE)
)
);
}
DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build();
NodeFakeAvailabilityZoneMapper nodeFakeAvailabilityZoneMapper = new NodeFakeAvailabilityZoneMapper(
settings,
clusterSettings,
discoveryNodes
);
assertThat(nodeFakeAvailabilityZoneMapper.getAllNodesByAvailabilityZone(), aMapWithSize(numNodes));
int totalNodesMapped = 0;
for (Map.Entry<List<String>, Collection<DiscoveryNode>> entry : nodeFakeAvailabilityZoneMapper.getAllNodesByAvailabilityZone()
.entrySet()) {
List<String> key = entry.getKey();
assertThat(key, hasSize(1));
assertThat(entry.getValue().size(), is(1));
assertThat(key.get(0), equalTo(entry.getValue().iterator().next().getId()));
++totalNodesMapped;
}
assertThat(totalNodesMapped, is(numNodes));
assertThat(nodeFakeAvailabilityZoneMapper.getNumAvailabilityZones().getAsInt(), is(numZones));
totalNodesMapped = 0;
for (Map.Entry<List<String>, Collection<DiscoveryNode>> entry : nodeFakeAvailabilityZoneMapper.getMlNodesByAvailabilityZone()
.entrySet()) {
List<String> key = entry.getKey();
assertThat(key, hasSize(1));
assertThat(entry.getValue().size(), is(1));
assertThat(key.get(0), equalTo(entry.getValue().iterator().next().getId()));
String zoneAttributeValue = key.get(0);
++totalNodesMapped;
}
assertThat(totalNodesMapped, is(numNodes));
assertThat(nodeFakeAvailabilityZoneMapper.getNumMlAvailabilityZones().getAsInt(), is(numZones));
}
public void testAvailabilityZonesAttributesConfiguredDedicatedNodes() {
// Fake availability zones are populated completely independently of cluster settings,
// attributes etc. Here we test that the fake availability zone mapper assigns each node
// to a single, unique availability zone, even when explicit allocation awareness attributes are
// configured in the cluster settings, and that dedicated ML nodes are identified and allocated
// to ML availability zones.
Settings settings = Settings.builder()
.putList(
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(),
List.of("region", "logical_availability_zone")
)
.build();
ClusterSettings clusterSettings = ClusterSettings.createBuiltInClusterSettings(settings);
List<DiscoveryNode> mlNodes = new ArrayList<>();
Set<Integer> mlZones = new HashSet<>();
DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder();
int numNodes = randomIntBetween(10, 50);
int numZones = numNodes;
int numMlZones = randomIntBetween(1, numZones);
for (int nodeNum = 1; nodeNum <= numNodes; ++nodeNum) {
int zone = nodeNum % numZones;
DiscoveryNodeRole role = (zone < numMlZones) ? randomFrom(MASTER_ROLE, DATA_ROLE, ML_ROLE) : randomFrom(MASTER_ROLE, DATA_ROLE);
DiscoveryNode node = DiscoveryNodeUtils.create(
"node-" + nodeNum,
new TransportAddress(InetAddress.getLoopbackAddress(), 9199 + nodeNum),
Map.of("region", "unknown-region", "logical_availability_zone", "zone-" + zone),
Set.of(role)
);
if (role == ML_ROLE) {
mlNodes.add(node);
mlZones.add(zone);
}
discoveryNodesBuilder.add(node);
}
DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build();
NodeFakeAvailabilityZoneMapper nodeFakeAvailabilityZoneMapper = new NodeFakeAvailabilityZoneMapper(
settings,
clusterSettings,
discoveryNodes
);
assertThat(nodeFakeAvailabilityZoneMapper.getAllNodesByAvailabilityZone(), aMapWithSize(numZones));
int totalNodesMapped = 0;
for (Map.Entry<List<String>, Collection<DiscoveryNode>> entry : nodeFakeAvailabilityZoneMapper.getAllNodesByAvailabilityZone()
.entrySet()) {
List<String> key = entry.getKey();
assertThat(key, hasSize(1));
assertThat(entry.getValue().size(), is(1));
assertThat(key.get(0), equalTo(entry.getValue().iterator().next().getId()));
++totalNodesMapped;
}
assertThat(totalNodesMapped, is(numNodes));
assertThat(nodeFakeAvailabilityZoneMapper.getNumAvailabilityZones().getAsInt(), is(numZones));
int totalMlNodesMapped = 0;
for (Map.Entry<List<String>, Collection<DiscoveryNode>> entry : nodeFakeAvailabilityZoneMapper.getMlNodesByAvailabilityZone()
.entrySet()) {
List<String> key = entry.getKey();
assertThat(key, hasSize(1));
assertThat(entry.getValue().size(), is(1));
assertThat(key.get(0), equalTo(entry.getValue().iterator().next().getId()));
++totalMlNodesMapped;
}
assertThat(totalMlNodesMapped, is(mlNodes.size()));
assertThat(nodeFakeAvailabilityZoneMapper.getNumMlAvailabilityZones().getAsInt(), is(mlZones.size()));
}
}
| NodeFakeAvailabilityZoneMapperTests |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java | {
"start": 3448,
"end": 9573
} | class ____ {
RestrictedIndices restrictedIndices;
List<Group> groups = new ArrayList<>();
public Builder(RestrictedIndices restrictedIndices) {
this.restrictedIndices = restrictedIndices;
}
public Builder addGroup(
IndexPrivilege privilege,
FieldPermissions fieldPermissions,
@Nullable Set<BytesReference> query,
boolean allowRestrictedIndices,
String... indices
) {
groups.add(new Group(privilege, fieldPermissions, query, allowRestrictedIndices, restrictedIndices, indices));
return this;
}
public IndicesPermission build() {
return new IndicesPermission(restrictedIndices, groups.toArray(Group.EMPTY_ARRAY));
}
}
private IndicesPermission(RestrictedIndices restrictedIndices, Group[] groups) {
this.restrictedIndices = restrictedIndices;
this.groups = groups;
this.hasFieldOrDocumentLevelSecurity = Arrays.stream(groups).noneMatch(Group::isTotal)
&& Arrays.stream(groups).anyMatch(g -> g.hasQuery() || g.fieldPermissions.hasFieldLevelSecurity());
}
/**
* This function constructs an index matcher that can be used to find indices allowed by
* permissions groups.
*
* @param ordinaryIndices A list of ordinary indices. If this collection contains restricted indices,
* according to the restrictedNamesAutomaton, they will not be matched.
* @param restrictedIndices A list of restricted index names. All of these will be matched.
* @return A matcher that will match all non-restricted index names in the ordinaryIndices
* collection and all index names in the restrictedIndices collection.
*/
private StringMatcher indexMatcher(Collection<String> ordinaryIndices, Collection<String> restrictedIndices) {
StringMatcher matcher;
if (ordinaryIndices.isEmpty()) {
matcher = StringMatcher.of(restrictedIndices);
} else {
matcher = StringMatcher.of(ordinaryIndices);
if (this.restrictedIndices != null) {
matcher = matcher.and("<not-restricted>", name -> this.restrictedIndices.isRestricted(name) == false);
}
if (restrictedIndices.isEmpty() == false) {
matcher = StringMatcher.of(restrictedIndices).or(matcher);
}
}
return matcher;
}
public Group[] groups() {
return groups;
}
/**
* @return A predicate that will match all the indices that this permission
* has the privilege for executing the given action on.
*/
public IsResourceAuthorizedPredicate allowedIndicesMatcher(String action) {
return allowedIndicesMatchersForAction.computeIfAbsent(action, this::buildIndexMatcherPredicateForAction);
}
public boolean hasFieldOrDocumentLevelSecurity() {
return hasFieldOrDocumentLevelSecurity;
}
private IsResourceAuthorizedPredicate buildIndexMatcherPredicateForAction(String action) {
final Set<String> dataAccessOrdinaryIndices = new HashSet<>();
final Set<String> failuresAccessOrdinaryIndices = new HashSet<>();
final Set<String> dataAccessRestrictedIndices = new HashSet<>();
final Set<String> failuresAccessRestrictedIndices = new HashSet<>();
final Set<String> grantMappingUpdatesOnIndices = new HashSet<>();
final Set<String> grantMappingUpdatesOnRestrictedIndices = new HashSet<>();
final boolean isMappingUpdateAction = isMappingUpdateAction(action);
for (final Group group : groups) {
if (group.actionMatcher.test(action)) {
final List<String> indexList = Arrays.asList(group.indices());
final boolean dataAccess = group.checkSelector(IndexComponentSelector.DATA);
final boolean failuresAccess = group.checkSelector(IndexComponentSelector.FAILURES);
assert dataAccess || failuresAccess : "group must grant access at least one of [DATA, FAILURES] selectors";
if (group.allowRestrictedIndices) {
if (dataAccess) {
dataAccessRestrictedIndices.addAll(indexList);
}
if (failuresAccess) {
failuresAccessRestrictedIndices.addAll(indexList);
}
} else {
if (dataAccess) {
dataAccessOrdinaryIndices.addAll(indexList);
}
if (failuresAccess) {
failuresAccessOrdinaryIndices.addAll(indexList);
}
}
} else if (isMappingUpdateAction && containsPrivilegeThatGrantsMappingUpdatesForBwc(group)) {
// special BWC case for certain privileges: allow put mapping on indices and aliases (but not on data streams), even if
// the privilege definition does not currently allow it
if (group.allowRestrictedIndices) {
grantMappingUpdatesOnRestrictedIndices.addAll(Arrays.asList(group.indices()));
} else {
grantMappingUpdatesOnIndices.addAll(Arrays.asList(group.indices()));
}
}
}
final StringMatcher dataAccessNameMatcher = indexMatcher(dataAccessOrdinaryIndices, dataAccessRestrictedIndices);
final StringMatcher failuresAccessNameMatcher = indexMatcher(failuresAccessOrdinaryIndices, failuresAccessRestrictedIndices);
final StringMatcher bwcSpecialCaseMatcher = indexMatcher(grantMappingUpdatesOnIndices, grantMappingUpdatesOnRestrictedIndices);
return new IsResourceAuthorizedPredicate(dataAccessNameMatcher, failuresAccessNameMatcher, bwcSpecialCaseMatcher);
}
/**
* This encapsulates the authorization test for resources.
* There is an additional test for resources that are missing or that are not a datastream or a backing index.
*/
public static | Builder |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/commons/util/AnnotationUtilsTests.java | {
"start": 28159,
"end": 28319
} | class ____ extends MultiExtensionClass {
}
@Extensions({ @ExtendWith("a"), @ExtendWith("b"), @ExtendWith("c"), @ExtendWith("a") })
static | SubMultiExtensionClass |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/ignorebydefaultsource/IgnoreByDefaultSourcesTest.java | {
"start": 629,
"end": 1704
} | class ____ {
@ProcessorTest
@WithClasses({ ErroneousSourceTargetMapperWithIgnoreByDefault.class, Source.class, Target.class })
@ExpectedCompilationOutcome(
value = CompilationResult.FAILED,
diagnostics = {
@Diagnostic(type = ErroneousSourceTargetMapperWithIgnoreByDefault.class,
kind = Kind.ERROR,
line = 23,
message = "Unmapped source property: \"other\".")
}
)
public void shouldRaiseErrorDueToNonIgnoredSourcePropertyWithBeanMappingIgnoreByDefault() {
}
@ProcessorTest
@WithClasses({ ErroneousSourceTargetMapper.class, Source.class, Target.class })
@ExpectedCompilationOutcome(
value = CompilationResult.FAILED,
diagnostics = {
@Diagnostic(type = ErroneousSourceTargetMapper.class,
kind = Kind.ERROR,
line = 20,
message = "Unmapped source property: \"other\".")
}
)
public void shouldRaiseErrorDueToNonIgnoredSourceProperty() {
}
}
| IgnoreByDefaultSourcesTest |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceFields.java | {
"start": 326,
"end": 596
} | class ____ {
public static final String RESOURCE_NAME = "resource_name";
public static final String DEPLOYMENT_ID = "deployment_id";
public static final String API_VERSION = "api_version";
public static final String USER = "user";
}
| AzureOpenAiServiceFields |
java | elastic__elasticsearch | modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java | {
"start": 2666,
"end": 11950
} | class ____ extends AbstractLifecycleComponent {
private static final Logger logger = LogManager.getLogger(AzureClientProvider.class);
private static final TimeValue DEFAULT_CONNECTION_TIMEOUT = TimeValue.timeValueSeconds(30);
private static final TimeValue DEFAULT_MAX_CONNECTION_IDLE_TIME = TimeValue.timeValueSeconds(60);
private static final int DEFAULT_MAX_CONNECTIONS = 50;
private static final int DEFAULT_EVENT_LOOP_THREAD_COUNT = 1;
private static final int PENDING_CONNECTION_QUEUE_SIZE = -1; // see ConnectionProvider.ConnectionPoolSpec.pendingAcquireMaxCount
/**
* Test-only system property to disable instance discovery for workload identity authentication in the Azure SDK.
* This is necessary since otherwise the SDK will attempt to verify identities via a real host
* (e.g. <a href="https://login.microsoft.com/">https://login.microsoft.com/</a>) for
* workload identity authentication. This is incompatible with our test environment.
*/
private static final boolean DISABLE_INSTANCE_DISCOVERY = System.getProperty(
"tests.azure.credentials.disable_instance_discovery",
"false"
).equals("true");
static final Setting<Integer> EVENT_LOOP_THREAD_COUNT = Setting.intSetting(
"repository.azure.http_client.event_loop_executor_thread_count",
DEFAULT_EVENT_LOOP_THREAD_COUNT,
1,
Setting.Property.NodeScope
);
static final Setting<Integer> MAX_OPEN_CONNECTIONS = Setting.intSetting(
"repository.azure.http_client.max_open_connections",
DEFAULT_MAX_CONNECTIONS,
1,
Setting.Property.NodeScope
);
static final Setting<TimeValue> OPEN_CONNECTION_TIMEOUT = Setting.timeSetting(
"repository.azure.http_client.connection_timeout",
DEFAULT_CONNECTION_TIMEOUT,
Setting.Property.NodeScope
);
static final Setting<TimeValue> MAX_IDLE_TIME = Setting.timeSetting(
"repository.azure.http_client.connection_max_idle_time",
DEFAULT_MAX_CONNECTION_IDLE_TIME,
Setting.Property.NodeScope
);
private final ThreadPool threadPool;
private final String reactorExecutorName;
private final EventLoopGroup eventLoopGroup;
private final ConnectionProvider connectionProvider;
private final ByteBufAllocator byteBufAllocator;
private final LoopResources nioLoopResources;
private final int multipartUploadMaxConcurrency;
private volatile boolean closed = false;
AzureClientProvider(
ThreadPool threadPool,
String reactorExecutorName,
EventLoopGroup eventLoopGroup,
ConnectionProvider connectionProvider,
ByteBufAllocator byteBufAllocator,
int multipartUploadMaxConcurrency
) {
this.threadPool = threadPool;
this.reactorExecutorName = reactorExecutorName;
this.eventLoopGroup = eventLoopGroup;
this.connectionProvider = connectionProvider;
this.byteBufAllocator = byteBufAllocator;
// The underlying http client uses this as part of the connection pool key,
// hence we need to use the same instance across all the client instances
// to avoid creating multiple connection pools.
this.nioLoopResources = useNative -> eventLoopGroup;
this.multipartUploadMaxConcurrency = multipartUploadMaxConcurrency;
}
static int eventLoopThreadsFromSettings(Settings settings) {
return EVENT_LOOP_THREAD_COUNT.get(settings);
}
static AzureClientProvider create(ThreadPool threadPool, Settings settings) {
final ExecutorService eventLoopExecutor = threadPool.executor(NETTY_EVENT_LOOP_THREAD_POOL_NAME);
// Most of the code that needs special permissions (i.e. jackson serializers generation) is executed
// in the event loop executor. That's the reason why we should provide an executor that allows the
// execution of privileged code
final EventLoopGroup eventLoopGroup = new NioEventLoopGroup(eventLoopThreadsFromSettings(settings), eventLoopExecutor);
final TimeValue openConnectionTimeout = OPEN_CONNECTION_TIMEOUT.get(settings);
final TimeValue maxIdleTime = MAX_IDLE_TIME.get(settings);
ConnectionProvider provider = ConnectionProvider.builder("azure-sdk-connection-pool")
.maxConnections(MAX_OPEN_CONNECTIONS.get(settings))
.pendingAcquireMaxCount(PENDING_CONNECTION_QUEUE_SIZE) // This determines the max outstanding queued requests
.pendingAcquireTimeout(Duration.ofMillis(openConnectionTimeout.millis()))
.maxIdleTime(Duration.ofMillis(maxIdleTime.millis()))
.build();
// Just to verify that this executor exists
threadPool.executor(REPOSITORY_THREAD_POOL_NAME);
return new AzureClientProvider(
threadPool,
REPOSITORY_THREAD_POOL_NAME,
eventLoopGroup,
provider,
NettyAllocator.getAllocator(),
threadPool.info(REPOSITORY_THREAD_POOL_NAME).getMax()
);
}
AzureBlobServiceClient createClient(
AzureStorageSettings settings,
LocationMode locationMode,
RequestRetryOptions retryOptions,
ProxyOptions proxyOptions,
RequestMetricsHandler requestMetricsHandler,
OperationPurpose purpose
) {
if (closed) {
throw new IllegalStateException("AzureClientProvider is already closed");
}
reactor.netty.http.client.HttpClient nettyHttpClient = reactor.netty.http.client.HttpClient.create(connectionProvider);
nettyHttpClient = nettyHttpClient.port(80)
.wiretap(false)
.resolver(DefaultAddressResolverGroup.INSTANCE)
.runOn(nioLoopResources)
.option(ChannelOption.ALLOCATOR, byteBufAllocator);
final HttpClient httpClient = new NettyAsyncHttpClientBuilder(nettyHttpClient).disableBufferCopy(true).proxy(proxyOptions).build();
final String connectionString = settings.getConnectString();
BlobServiceClientBuilder builder = new BlobServiceClientBuilder().connectionString(connectionString)
.httpClient(httpClient)
.retryOptions(retryOptions);
if (settings.hasCredentials() == false) {
final DefaultAzureCredentialBuilder credentialBuilder = new DefaultAzureCredentialBuilder().executorService(eventLoopGroup);
if (DISABLE_INSTANCE_DISCOVERY) {
credentialBuilder.disableInstanceDiscovery();
}
builder.credential(credentialBuilder.build());
}
if (requestMetricsHandler != null) {
builder.addPolicy(new RequestMetricsTracker(purpose, requestMetricsHandler));
builder.addPolicy(RetryMetricsTracker.INSTANCE);
}
if (locationMode.isSecondary()) {
String secondaryUri = settings.getStorageEndpoint().secondaryURI();
if (secondaryUri == null) {
throw new IllegalArgumentException(
"Unable to configure an AzureClient using a secondary location without a secondary endpoint"
);
}
builder.endpoint(secondaryUri);
}
BlobServiceClient blobServiceClient = builder.buildClient();
BlobServiceAsyncClient asyncClient = builder.buildAsyncClient();
return new AzureBlobServiceClient(blobServiceClient, asyncClient, settings.getMaxRetries(), byteBufAllocator);
}
@Override
protected void doStart() {
ReactorScheduledExecutorService executorService = new ReactorScheduledExecutorService(threadPool, reactorExecutorName);
// The only way to configure the schedulers used by the SDK is to inject a new global factory. This is a bit ugly...
// See https://github.com/Azure/azure-sdk-for-java/issues/17272 for a feature request to avoid this need.
Schedulers.setFactory(new Schedulers.Factory() {
@Override
public Scheduler newParallel(int parallelism, ThreadFactory threadFactory) {
return Schedulers.fromExecutor(executorService);
}
@Override
public Scheduler newElastic(int ttlSeconds, ThreadFactory threadFactory) {
return Schedulers.fromExecutor(executorService);
}
@Override
public Scheduler newBoundedElastic(int threadCap, int queuedTaskCap, ThreadFactory threadFactory, int ttlSeconds) {
return Schedulers.fromExecutor(executorService);
}
@Override
public Scheduler newSingle(ThreadFactory threadFactory) {
return Schedulers.fromExecutor(executorService);
}
});
}
@Override
protected void doStop() {
closed = true;
connectionProvider.dispose();
eventLoopGroup.shutdownGracefully();
Schedulers.resetFactory();
}
@Override
protected void doClose() {}
public int getMultipartUploadMaxConcurrency() {
return multipartUploadMaxConcurrency;
}
// visible for testing
ConnectionProvider getConnectionProvider() {
return connectionProvider;
}
static | AzureClientProvider |
java | mybatis__mybatis-3 | src/main/java/org/apache/ibatis/logging/jdk14/Jdk14LoggingImpl.java | {
"start": 837,
"end": 1584
} | class ____ implements Log {
private final Logger log;
public Jdk14LoggingImpl(String clazz) {
log = Logger.getLogger(clazz);
}
@Override
public boolean isDebugEnabled() {
return log.isLoggable(Level.FINE);
}
@Override
public boolean isTraceEnabled() {
return log.isLoggable(Level.FINER);
}
@Override
public void error(String s, Throwable e) {
log.log(Level.SEVERE, s, e);
}
@Override
public void error(String s) {
log.log(Level.SEVERE, s);
}
@Override
public void debug(String s) {
log.log(Level.FINE, s);
}
@Override
public void trace(String s) {
log.log(Level.FINER, s);
}
@Override
public void warn(String s) {
log.log(Level.WARNING, s);
}
}
| Jdk14LoggingImpl |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/GuardedByCheckerTest.java | {
"start": 53584,
"end": 54421
} | class ____ {
@GuardedBy("mu")
// BUG: Diagnostic contains: could not resolve guard
int x;
}
public void m(Baz b) {
synchronized (mu) {
// BUG: Diagnostic contains: 'mu', which could not be resolved
b.x++;
}
}
}
""")
.doTest();
}
// Ensure sure outer instance handling doesn't accidentally include enclosing classes of
// static member classes.
@Test
public void staticMemberClass_staticOuterClassLock() {
compilationHelper
.addSourceLines(
"threadsafety/Test.java",
"""
package threadsafety;
import com.google.errorprone.annotations.concurrent.GuardedBy;
public | Baz |
java | apache__camel | components/camel-ai/camel-langchain4j-tools/src/generated/java/org/apache/camel/component/langchain4j/tools/LangChain4jToolsComponentConfigurer.java | {
"start": 744,
"end": 3961
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
private org.apache.camel.component.langchain4j.tools.LangChain4jToolsConfiguration getOrCreateConfiguration(LangChain4jToolsComponent target) {
if (target.getConfiguration() == null) {
target.setConfiguration(new org.apache.camel.component.langchain4j.tools.LangChain4jToolsConfiguration());
}
return target.getConfiguration();
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
LangChain4jToolsComponent target = (LangChain4jToolsComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "chatmodel":
case "chatModel": getOrCreateConfiguration(target).setChatModel(property(camelContext, dev.langchain4j.model.chat.ChatModel.class, value)); return true;
case "configuration": target.setConfiguration(property(camelContext, org.apache.camel.component.langchain4j.tools.LangChain4jToolsConfiguration.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public String[] getAutowiredNames() {
return new String[]{"chatModel"};
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "chatmodel":
case "chatModel": return dev.langchain4j.model.chat.ChatModel.class;
case "configuration": return org.apache.camel.component.langchain4j.tools.LangChain4jToolsConfiguration.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
LangChain4jToolsComponent target = (LangChain4jToolsComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "chatmodel":
case "chatModel": return getOrCreateConfiguration(target).getChatModel();
case "configuration": return target.getConfiguration();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
default: return null;
}
}
}
| LangChain4jToolsComponentConfigurer |
java | elastic__elasticsearch | x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/ResourceDeprecationChecker.java | {
"start": 846,
"end": 1502
} | interface ____ {
/**
* This runs the checks for the current deprecation checker.
*
* @param project The project metadata provided for the checker
* @param request The deprecation request that triggered this check
* @param precomputedData Data that have been remotely retrieved and might be useful in the checks
*/
Map<String, List<DeprecationIssue>> check(
ProjectMetadata project,
DeprecationInfoAction.Request request,
TransportDeprecationInfoAction.PrecomputedData precomputedData
);
/**
* @return The name of the checker
*/
String getName();
}
| ResourceDeprecationChecker |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/benckmark/pool/model/TableOperator.java | {
"start": 207,
"end": 2374
} | class ____ {
private DataSource dataSource;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
private static final int COUNT = 2;
public TableOperator() {
}
public void insert() throws Exception {
StringBuilder ddl = new StringBuilder();
ddl.append("INSERT INTO t_big (");
for (int i = 0; i < COUNT; ++i) {
if (i != 0) {
ddl.append(", ");
}
ddl.append("F" + i);
}
ddl.append(") VALUES (");
for (int i = 0; i < COUNT; ++i) {
if (i != 0) {
ddl.append(", ");
}
ddl.append("?");
}
ddl.append(")");
Connection conn = dataSource.getConnection();
// System.out.println(ddl.toString());
try {
PreparedStatement stmt = conn.prepareStatement(ddl.toString());
for (int i = 0; i < COUNT; ++i) {
stmt.setInt(i + 1, i);
}
stmt.execute();
stmt.close();
} finally {
conn.close();
}
}
public void dropTable() throws SQLException {
Connection conn = dataSource.getConnection();
try {
Statement stmt = conn.createStatement();
stmt.execute("DROP TABLE t_big");
stmt.close();
} finally {
conn.close();
}
}
public void createTable() throws SQLException {
StringBuilder ddl = new StringBuilder();
ddl.append("CREATE TABLE t_big (FID INT AUTO_INCREMENT PRIMARY KEY ");
for (int i = 0; i < COUNT; ++i) {
ddl.append(", ");
ddl.append("F" + i);
ddl.append(" BIGINT NULL");
}
ddl.append(")");
Connection conn = dataSource.getConnection();
try {
Statement stmt = conn.createStatement();
stmt.addBatch("DROP TABLE IF EXISTS t_big");
stmt.addBatch(ddl.toString());
stmt.executeBatch();
stmt.close();
} finally {
conn.close();
}
}
}
| TableOperator |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/dataformat/YAMLLibrary.java | {
"start": 999,
"end": 1263
} | enum ____ {
SnakeYAML("snakeYaml");
private final String dataFormatName;
YAMLLibrary(String dataFormatName) {
this.dataFormatName = dataFormatName;
}
public String getDataFormatName() {
return dataFormatName;
}
}
| YAMLLibrary |
java | apache__flink | flink-libraries/flink-cep/src/test/java/org/apache/flink/cep/nfa/IterativeConditionsITCase.java | {
"start": 1650,
"end": 6191
} | class ____ extends TestLogger {
////////////////////// Iterative BooleanConditions /////////////////////////
private final Event startEvent1 = new Event(40, "start", 1.0);
private final Event startEvent2 = new Event(40, "start", 2.0);
private final Event startEvent3 = new Event(40, "start", 3.0);
private final Event startEvent4 = new Event(40, "start", 4.0);
private final SubEvent middleEvent1 = new SubEvent(41, "foo1", 1.0, 10);
private final SubEvent middleEvent2 = new SubEvent(42, "foo2", 2.0, 10);
private final SubEvent middleEvent3 = new SubEvent(43, "foo3", 3.0, 10);
private final SubEvent middleEvent4 = new SubEvent(43, "foo4", 1.0, 10);
private final Event nextOne = new Event(44, "next-one", 1.0);
private final Event endEvent = new Event(46, "end", 1.0);
@Test
public void testIterativeWithBranchingPatternEager() throws Exception {
List<List<Event>> actual = testIterativeWithBranchingPattern(true);
comparePatterns(
actual,
Lists.<List<Event>>newArrayList(
Lists.newArrayList(
startEvent1, endEvent, middleEvent1, middleEvent2, middleEvent4),
Lists.newArrayList(startEvent1, endEvent, middleEvent2, middleEvent1),
Lists.newArrayList(startEvent1, endEvent, middleEvent1),
Lists.newArrayList(startEvent2, endEvent, middleEvent3, middleEvent4),
Lists.newArrayList(startEvent2, endEvent, middleEvent3)));
}
@Test
public void testIterativeWithBranchingPatternCombinations() throws Exception {
List<List<Event>> actual = testIterativeWithBranchingPattern(false);
comparePatterns(
actual,
Lists.<List<Event>>newArrayList(
Lists.newArrayList(
startEvent1, endEvent, middleEvent1, middleEvent2, middleEvent4),
Lists.newArrayList(startEvent1, endEvent, middleEvent2, middleEvent1),
Lists.newArrayList(startEvent1, endEvent, middleEvent3, middleEvent1),
Lists.newArrayList(startEvent2, endEvent, middleEvent3, middleEvent4),
Lists.newArrayList(startEvent1, endEvent, middleEvent4, middleEvent1),
Lists.newArrayList(startEvent1, endEvent, middleEvent1),
Lists.newArrayList(startEvent2, endEvent, middleEvent3)));
}
private List<List<Event>> testIterativeWithBranchingPattern(boolean eager) throws Exception {
List<StreamRecord<Event>> inputEvents = new ArrayList<>();
inputEvents.add(new StreamRecord<>(startEvent1, 1));
inputEvents.add(new StreamRecord<Event>(middleEvent1, 2));
inputEvents.add(new StreamRecord<Event>(middleEvent2, 3));
inputEvents.add(new StreamRecord<>(startEvent2, 4));
inputEvents.add(new StreamRecord<Event>(middleEvent3, 5));
inputEvents.add(new StreamRecord<Event>(middleEvent4, 5));
inputEvents.add(new StreamRecord<>(nextOne, 6));
inputEvents.add(new StreamRecord<>(endEvent, 8));
Pattern<Event, ?> pattern =
eager
? Pattern.<Event>begin("start")
.where(SimpleCondition.of(value -> value.getName().equals("start")))
.followedBy("middle")
.subtype(SubEvent.class)
.where(new MySubeventIterCondition())
.oneOrMore()
.followedBy("end")
.where(SimpleCondition.of(value -> value.getName().equals("end")))
: Pattern.<Event>begin("start")
.where(SimpleCondition.of(value -> value.getName().equals("start")))
.followedBy("middle")
.subtype(SubEvent.class)
.where(new MySubeventIterCondition())
.oneOrMore()
.allowCombinations()
.followedBy("end")
.where(SimpleCondition.of(value -> value.getName().equals("end")));
NFA<Event> nfa = compile(pattern, false);
return feedNFA(inputEvents, nfa);
}
private static | IterativeConditionsITCase |
java | spring-projects__spring-framework | spring-messaging/src/test/java/org/springframework/messaging/rsocket/service/RSocketServiceMethodTests.java | {
"start": 1483,
"end": 4542
} | class ____ {
private TestRSocket rsocket;
private RSocketServiceProxyFactory proxyFactory;
@BeforeEach
void setUp() {
this.rsocket = new TestRSocket();
RSocketRequester requester = RSocketRequester.wrap(this.rsocket, TEXT_PLAIN, TEXT_PLAIN, RSocketStrategies.create());
this.proxyFactory = RSocketServiceProxyFactory.builder(requester).build();
}
@Test
void fireAndForget() {
ReactorService service = this.proxyFactory.createClient(ReactorService.class);
String payload = "p1";
service.fireAndForget(Mono.just(payload)).block(Duration.ofSeconds(5));
assertThat(this.rsocket.getSavedMethodName()).isEqualTo("fireAndForget");
assertThat(this.rsocket.getSavedPayload().getMetadataUtf8()).isEqualTo("ff");
assertThat(this.rsocket.getSavedPayload().getDataUtf8()).isEqualTo(payload);
}
@Test
void requestResponse() {
ReactorService service = this.proxyFactory.createClient(ReactorService.class);
String payload1 = "p1";
String payload2 = "p2";
this.rsocket.setPayloadMonoToReturn(
Mono.just(DefaultPayload.create(payload2)));
String response = service.requestResponse(Mono.just(payload1)).block(Duration.ofSeconds(5));
assertThat(response).isEqualTo(payload2);
assertThat(this.rsocket.getSavedMethodName()).isEqualTo("requestResponse");
assertThat(this.rsocket.getSavedPayload().getMetadataUtf8()).isEqualTo("rr");
assertThat(this.rsocket.getSavedPayload().getDataUtf8()).isEqualTo(payload1);
}
@Test
void requestStream() {
ReactorService service = this.proxyFactory.createClient(ReactorService.class);
String payload1 = "p1";
String payload2 = "p2";
String payload3 = "p3";
this.rsocket.setPayloadFluxToReturn(
Flux.just(DefaultPayload.create(payload2), DefaultPayload.create(payload3)));
List<String> response = service.requestStream(Mono.just(payload1))
.collectList()
.block(Duration.ofSeconds(5));
assertThat(response).containsExactly(payload2, payload3);
assertThat(this.rsocket.getSavedMethodName()).isEqualTo("requestStream");
assertThat(this.rsocket.getSavedPayload().getMetadataUtf8()).isEqualTo("rs");
assertThat(this.rsocket.getSavedPayload().getDataUtf8()).isEqualTo(payload1);
}
@Test
void requestChannel() {
ReactorService service = this.proxyFactory.createClient(ReactorService.class);
String payload1 = "p1";
String payload2 = "p2";
String payload3 = "p3";
String payload4 = "p4";
this.rsocket.setPayloadFluxToReturn(
Flux.just(DefaultPayload.create(payload3), DefaultPayload.create(payload4)));
List<String> response = service.requestChannel(Flux.just(payload1, payload2))
.collectList()
.block(Duration.ofSeconds(5));
assertThat(response).containsExactly(payload3, payload4);
assertThat(this.rsocket.getSavedMethodName()).isEqualTo("requestChannel");
List<String> savedPayloads = this.rsocket.getSavedPayloadFlux()
.map(io.rsocket.Payload::getDataUtf8)
.collectList()
.block(Duration.ofSeconds(5));
assertThat(savedPayloads).containsExactly("p1", "p2");
}
private | RSocketServiceMethodTests |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/completable/CompletableCreate.java | {
"start": 1634,
"end": 4013
} | class ____
extends AtomicReference<Disposable>
implements CompletableEmitter, Disposable {
private static final long serialVersionUID = -2467358622224974244L;
final CompletableObserver downstream;
Emitter(CompletableObserver downstream) {
this.downstream = downstream;
}
@Override
public void onComplete() {
if (get() != DisposableHelper.DISPOSED) {
Disposable d = getAndSet(DisposableHelper.DISPOSED);
if (d != DisposableHelper.DISPOSED) {
try {
downstream.onComplete();
} finally {
if (d != null) {
d.dispose();
}
}
}
}
}
@Override
public void onError(Throwable t) {
if (!tryOnError(t)) {
RxJavaPlugins.onError(t);
}
}
@Override
public boolean tryOnError(Throwable t) {
if (t == null) {
t = ExceptionHelper.createNullPointerException("onError called with a null Throwable.");
}
if (get() != DisposableHelper.DISPOSED) {
Disposable d = getAndSet(DisposableHelper.DISPOSED);
if (d != DisposableHelper.DISPOSED) {
try {
downstream.onError(t);
} finally {
if (d != null) {
d.dispose();
}
}
return true;
}
}
return false;
}
@Override
public void setDisposable(Disposable d) {
DisposableHelper.set(this, d);
}
@Override
public void setCancellable(Cancellable c) {
setDisposable(new CancellableDisposable(c));
}
@Override
public void dispose() {
DisposableHelper.dispose(this);
}
@Override
public boolean isDisposed() {
return DisposableHelper.isDisposed(get());
}
@Override
public String toString() {
return String.format("%s{%s}", getClass().getSimpleName(), super.toString());
}
}
}
| Emitter |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java | {
"start": 30855,
"end": 40988
} | interface
____ (!local) {
try {
local = NetworkInterface.getByInetAddress(addr) != null;
} catch (SocketException e) {
local = false;
}
}
return local;
}
/**
* Take an IOException , the local host port and remote host port details and
* return an IOException with the input exception as the cause and also
* include the host details. The new exception provides the stack trace of the
* place where the exception is thrown and some extra diagnostics information.
* If the exception is of type BindException, ConnectException,
* UnknownHostException, SocketTimeoutException or has a String constructor,
* return a new one of the same type; Otherwise return an IOException.
*
* @param destHost target host (nullable)
* @param destPort target port
* @param localHost local host (nullable)
* @param localPort local port
* @param exception the caught exception.
* @return an exception to throw
*/
public static IOException wrapException(final String destHost,
final int destPort,
final String localHost,
final int localPort,
final IOException exception) {
try {
if (exception instanceof BindException) {
return wrapWithMessage(exception,
"Problem binding to ["
+ localHost
+ ":"
+ localPort
+ "] "
+ exception
+ ";"
+ see("BindException"));
} else if (exception instanceof ConnectException) {
// Check if client was trying to connect to an unspecified IPv4 address
// (0.0.0.0) or IPv6 address(0:0:0:0:0:0:0:0 or ::)
if ((destHost != null && (destHost.equals("0.0.0.0") ||
destHost.equals("0:0:0:0:0:0:0:0") || destHost.equals("::")))
|| destPort == 0) {
return wrapWithMessage(exception, "Your endpoint configuration" +
" is wrong;" + see("UnsetHostnameOrPort"));
} else {
// connection refused; include the host:port in the error
return wrapWithMessage(exception,
"Call From "
+ localHost
+ " to "
+ destHost
+ ":"
+ destPort
+ " failed on connection exception: "
+ exception
+ ";"
+ see("ConnectionRefused"));
}
} else if (exception instanceof UnknownHostException) {
return wrapWithMessage(exception,
"Invalid host name: "
+ getHostDetailsAsString(destHost, destPort, localHost)
+ exception
+ ";"
+ see("UnknownHost"));
} else if (exception instanceof SocketTimeoutException) {
return wrapWithMessage(exception,
"Call From "
+ localHost + " to " + destHost + ":" + destPort
+ " failed on socket timeout exception: " + exception
+ ";"
+ see("SocketTimeout"));
} else if (exception instanceof NoRouteToHostException) {
return wrapWithMessage(exception,
"No Route to Host from "
+ localHost + " to " + destHost + ":" + destPort
+ " failed on socket timeout exception: " + exception
+ ";"
+ see("NoRouteToHost"));
} else if (exception instanceof EOFException) {
return wrapWithMessage(exception,
"End of File Exception between "
+ getHostDetailsAsString(destHost, destPort, localHost)
+ ": " + exception
+ ";"
+ see("EOFException"));
} else if (exception instanceof SocketException) {
// Many of the predecessor exceptions are subclasses of SocketException,
// so must be handled before this
return wrapWithMessage(exception,
"Call From "
+ localHost + " to " + destHost + ":" + destPort
+ " failed on socket exception: " + exception
+ ";"
+ see("SocketException"));
} else if (exception instanceof AccessControlException) {
return wrapWithMessage(exception,
"Call From "
+ localHost + " to " + destHost + ":" + destPort
+ " failed: " + exception.getMessage());
} else {
// 1. Return instance of same type with exception msg if Exception has a
// String constructor.
// 2. Return instance of same type if Exception doesn't have a String
// constructor.
// Related HADOOP-16453.
return wrapWithMessage(exception,
"DestHost:destPort " + destHost + ":" + destPort
+ " , LocalHost:localPort " + localHost
+ ":" + localPort + ". Failed on local exception: " +
exception);
}
} catch (IOException ex) {
try {
return new IOException("Failed on local exception: "
+ exception + "; Host Details : "
+ getHostDetailsAsString(destHost, destPort, localHost), exception);
} catch (Exception ignore) {
// in worst case, return the original exception
return exception;
}
}
}
/**
* Return an @{@link IOException} of the same type as the input exception but with
* a modified exception message that includes the node name.
*
* @param ioe existing exception.
* @param nodeName name of the node.
* @return IOException
*/
public static IOException addNodeNameToIOException(final IOException ioe, final String nodeName) {
try {
final Throwable cause = ioe.getCause();
IOException newIoe = null;
if (cause != null) {
try {
DynConstructors.Ctor<? extends IOException> ctor =
new DynConstructors.Builder()
.impl(ioe.getClass(), String.class, Throwable.class)
.buildChecked();
newIoe = ctor.newInstance(nodeName + ": " + ioe.getMessage(), cause);
} catch (NoSuchMethodException e) {
// no matching constructor - try next approach below
}
}
if (newIoe == null) {
DynConstructors.Ctor<? extends IOException> ctor =
new DynConstructors.Builder()
.impl(ioe.getClass(), String.class)
.buildChecked();
newIoe = ctor.newInstance(nodeName + ": " + ioe.getMessage());
if (cause != null) {
try {
newIoe.initCause(cause);
} catch (Exception e) {
// Unable to initCause. Ignore the exception.
}
}
}
newIoe.setStackTrace(ioe.getStackTrace());
return newIoe;
} catch (Exception e) {
// Unable to create new exception. Return the original exception.
return ioe;
}
}
private static String see(final String entry) {
return FOR_MORE_DETAILS_SEE + HADOOP_WIKI + entry;
}
@SuppressWarnings("unchecked")
private static <T extends IOException> T wrapWithMessage(
T exception, String msg) throws T {
Class<? extends Throwable> clazz = exception.getClass();
try {
try {
DynConstructors.Ctor<T> ctor =
new DynConstructors.Builder()
.impl(clazz, String.class, Throwable.class)
.buildChecked();
return ctor.newInstance(msg, exception);
} catch (NoSuchMethodException e) {
// no matching constructor - try next approach below
}
DynConstructors.Ctor<T> ctor =
new DynConstructors.Builder()
.impl(clazz, String.class)
.buildChecked();
T newException = ctor.newInstance(msg);
newException.initCause(exception);
return newException;
} catch (NoSuchMethodException e) {
return exception;
} catch (Throwable e) {
throw exception;
}
}
/**
* Get the host details as a string
* @param destHost destinatioon host (nullable)
* @param destPort destination port
* @param localHost local host (nullable)
* @return a string describing the destination host:port and the local host
*/
private static String getHostDetailsAsString(final String destHost,
final int destPort,
final String localHost) {
StringBuilder hostDetails = new StringBuilder(27);
hostDetails.append("local host is: ")
.append(quoteHost(localHost))
.append("; ")
.append("destination host is: ").append(quoteHost(destHost))
.append(":")
.append(destPort).append("; ");
return hostDetails.toString();
}
/**
* Quote a hostname if it is not null
* @param hostname the hostname; nullable
* @return a quoted hostname or {@link #UNKNOWN_HOST} if the hostname is null
*/
private static String quoteHost(final String hostname) {
return (hostname != null) ?
("\"" + hostname + "\"")
: UNKNOWN_HOST;
}
/**
* isValidSubnet.
* @param subnet subnet.
* @return true if the given string is a subnet specified
* using CIDR notation, false otherwise
*/
public static boolean isValidSubnet(String subnet) {
try {
new SubnetUtils(subnet);
return true;
} catch (IllegalArgumentException iae) {
return false;
}
}
/**
* Add all addresses associated with the given nif in the
* given subnet to the given list.
*/
private static void addMatchingAddrs(NetworkInterface nif,
SubnetInfo subnetInfo, List<InetAddress> addrs) {
Enumeration<InetAddress> ifAddrs = nif.getInetAddresses();
while (ifAddrs.hasMoreElements()) {
InetAddress ifAddr = ifAddrs.nextElement();
if (subnetInfo.isInRange(ifAddr.getHostAddress())) {
addrs.add(ifAddr);
}
}
}
/**
* Return an InetAddress for each | if |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/internal/model/beanmapping/TargetReference.java | {
"start": 1534,
"end": 3712
} | class ____ {
private final List<String> pathProperties;
private final Parameter parameter;
private final List<String> propertyEntries;
public TargetReference(Parameter parameter, List<String> propertyEntries) {
this( parameter, propertyEntries, Collections.emptyList() );
}
public TargetReference(Parameter parameter, List<String> propertyEntries, List<String> pathProperties) {
this.pathProperties = pathProperties;
this.parameter = parameter;
this.propertyEntries = propertyEntries;
}
public List<String> getPathProperties() {
return pathProperties;
}
public List<String> getPropertyEntries() {
return propertyEntries;
}
public List<String> getElementNames() {
List<String> elementNames = new ArrayList<>();
if ( parameter != null ) {
// only relevant for source properties
elementNames.add( parameter.getName() );
}
elementNames.addAll( propertyEntries );
return elementNames;
}
/**
* @return the property name on the shallowest nesting level
*/
public String getShallowestPropertyName() {
if ( propertyEntries.isEmpty() ) {
return null;
}
return first( propertyEntries );
}
public boolean isNested() {
return propertyEntries.size() > 1;
}
@Override
public String toString() {
String result = "";
if ( propertyEntries.isEmpty() ) {
if ( parameter != null ) {
result = String.format( "parameter \"%s %s\"", parameter.getType(), parameter.getName() );
}
}
else if ( propertyEntries.size() == 1 ) {
String propertyEntry = propertyEntries.get( 0 );
result = String.format( "property \"%s\"", propertyEntry );
}
else {
result = String.format(
"property \"%s\"",
Strings.join( getElementNames(), "." )
);
}
return result;
}
/**
* Builds a {@link TargetReference} from an {@code @Mappping}.
*/
public static | TargetReference |
java | apache__camel | components/camel-thymeleaf/src/test/java/org/apache/camel/component/thymeleaf/ThymeleafUrlResolverTest.java | {
"start": 1852,
"end": 5208
} | class ____ extends ThymeleafAbstractBaseTest {
@Test
public void testThymeleaf() throws Exception {
stubFor(get("/dontcare.html").willReturn(ok(fragment())));
MockEndpoint mock = getMockEndpoint(MOCK_RESULT);
mock.expectedMessageCount(1);
mock.message(0).body().contains(YOU_WILL_NOTIFIED);
mock.message(0).header(ThymeleafConstants.THYMELEAF_TEMPLATE).isNull();
mock.message(0).header(FIRST_NAME).isEqualTo(JANE);
template.request(DIRECT_START, urlTemplateHeaderProcessor);
mock.assertIsSatisfied();
ThymeleafEndpoint thymeleafEndpoint = context.getEndpoint(
"thymeleaf:dontcare?allowTemplateFromHeader=true&allowContextMapAll=true&resolver=URL",
ThymeleafEndpoint.class);
assertAll("properties",
() -> assertNotNull(thymeleafEndpoint),
() -> assertTrue(thymeleafEndpoint.isAllowContextMapAll()),
() -> assertNull(thymeleafEndpoint.getCacheable()),
() -> assertNull(thymeleafEndpoint.getCacheTimeToLive()),
() -> assertNull(thymeleafEndpoint.getCheckExistence()),
() -> assertNull(thymeleafEndpoint.getEncoding()),
() -> assertEquals(ExchangePattern.InOut, thymeleafEndpoint.getExchangePattern()),
() -> assertNull(thymeleafEndpoint.getOrder()),
() -> assertNull(thymeleafEndpoint.getPrefix()),
() -> assertEquals(ThymeleafResolverType.URL, thymeleafEndpoint.getResolver()),
() -> assertNull(thymeleafEndpoint.getSuffix()),
() -> assertNotNull(thymeleafEndpoint.getTemplateEngine()),
() -> assertNull(thymeleafEndpoint.getTemplateMode()));
assertEquals(1, thymeleafEndpoint.getTemplateEngine().getTemplateResolvers().size());
ITemplateResolver resolver = thymeleafEndpoint.getTemplateEngine().getTemplateResolvers().stream().findFirst().get();
assertTrue(resolver instanceof UrlTemplateResolver);
UrlTemplateResolver templateResolver = (UrlTemplateResolver) resolver;
assertAll("templateResolver",
() -> assertTrue(templateResolver.isCacheable()),
() -> assertNull(templateResolver.getCacheTTLMs()),
() -> assertNull(templateResolver.getCharacterEncoding()),
() -> assertFalse(templateResolver.getCheckExistence()),
() -> assertNull(templateResolver.getOrder()),
() -> assertNull(templateResolver.getPrefix()),
() -> assertNull(templateResolver.getSuffix()),
() -> assertEquals(TemplateMode.HTML, templateResolver.getTemplateMode()));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from(DIRECT_START)
.to("thymeleaf:dontcare?allowTemplateFromHeader=true&allowContextMapAll=true&resolver=URL")
.to(MOCK_RESULT);
}
};
}
protected String fragment() {
return """
<span th:fragment="test" th:remove="tag">
You will be notified when your order ships.
</span>
""";
}
}
| ThymeleafUrlResolverTest |
java | quarkusio__quarkus | independent-projects/bootstrap/app-model/src/main/java/io/quarkus/maven/dependency/GAV.java | {
"start": 159,
"end": 1470
} | class ____ implements WorkspaceModuleId, Serializable {
private static final long serialVersionUID = -1110768961345248967L;
private final String groupId;
private final String artifactId;
private final String version;
public GAV(String groupId, String artifactId, String version) {
super();
this.groupId = groupId;
this.artifactId = artifactId;
this.version = version;
}
@Override
public String getGroupId() {
return groupId;
}
@Override
public String getArtifactId() {
return artifactId;
}
@Override
public String getVersion() {
return version;
}
@Override
public int hashCode() {
return Objects.hash(artifactId, groupId, version);
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
GAV other = (GAV) obj;
return Objects.equals(artifactId, other.artifactId) && Objects.equals(groupId, other.groupId)
&& Objects.equals(version, other.version);
}
@Override
public String toString() {
return groupId + ":" + artifactId + ":" + version;
}
}
| GAV |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/ignore/PreditorDto.java | {
"start": 230,
"end": 350
} | class ____ {
private boolean hasClaws;
public boolean isHasClaws() {
return hasClaws;
}
}
| PreditorDto |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/path/JSONPath_set.java | {
"start": 1559,
"end": 1940
} | class ____ {
private Integer id;
private String name;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
}
| Entity |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/common/typeinfo/TypeInformationTest.java | {
"start": 1309,
"end": 2546
} | class ____ {
@Test
void testOfClass() {
assertThat(TypeInformation.of(String.class)).isEqualTo(BasicTypeInfo.STRING_TYPE_INFO);
}
@Test
void testOfGenericClassForFlink() {
assertThatThrownBy(() -> TypeInformation.of(Tuple3.class))
.isInstanceOf(FlinkRuntimeException.class)
.hasMessageContaining("TypeHint");
}
@Test
void testOfGenericClassForGenericType() {
assertThat(TypeInformation.of(List.class)).isEqualTo(new GenericTypeInfo<>(List.class));
}
@Test
void testOfTypeHint() {
assertThat(TypeInformation.of(String.class)).isEqualTo(BasicTypeInfo.STRING_TYPE_INFO);
assertThat(TypeInformation.of(new TypeHint<String>() {}))
.isEqualTo(BasicTypeInfo.STRING_TYPE_INFO);
TypeInformation<Tuple3<String, Double, Boolean>> tupleInfo =
new TupleTypeInfo<>(
BasicTypeInfo.STRING_TYPE_INFO,
BasicTypeInfo.DOUBLE_TYPE_INFO,
BasicTypeInfo.BOOLEAN_TYPE_INFO);
assertThat(TypeInformation.of(new TypeHint<Tuple3<String, Double, Boolean>>() {}))
.isEqualTo(tupleInfo);
}
}
| TypeInformationTest |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/ReadOnlyCollectionTest_final_field.java | {
"start": 164,
"end": 459
} | class ____ extends TestCase {
public void test_readOnlyNullList() throws Exception {
String text = "{\"list\":[1,2,3]}";
Entity entity = JSON.parseObject(text, Entity.class);
Assert.assertNull(entity.list);
}
public static | ReadOnlyCollectionTest_final_field |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/mappedBy/OneToOneMappedByTypeTest.java | {
"start": 4509,
"end": 4674
} | class ____ {
@Id
private Long id;
@OneToOne( mappedBy = "parent" )
private EntityBWrong child;
}
@Entity( name = "EntityBWrong" )
public static | EntityAWrong |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/ingest/SamplingServiceSampleStatsTests.java | {
"start": 1196,
"end": 6349
} | class ____ extends AbstractWireSerializingTestCase<SampleStats> {
@Override
protected Writeable.Reader<SampleStats> instanceReader() {
return SampleStats::new;
}
@Override
protected SampleStats createTestInstance() {
SampleStats stats = new SampleStats();
stats.samples.add(randomReasonableLong());
stats.potentialSamples.add(randomReasonableLong());
stats.samplesRejectedForMaxSamplesExceeded.add(randomReasonableLong());
stats.samplesRejectedForCondition.add(randomReasonableLong());
stats.samplesRejectedForRate.add(randomReasonableLong());
stats.samplesRejectedForException.add(randomReasonableLong());
stats.samplesRejectedForSize.add(randomReasonableLong());
stats.timeSamplingInNanos.add(randomReasonableLong());
stats.timeEvaluatingConditionInNanos.add(randomReasonableLong());
stats.timeCompilingConditionInNanos.add(randomReasonableLong());
stats.lastException = randomBoolean() ? null : new ElasticsearchException(randomAlphanumericOfLength(10));
return stats;
}
/*
* This is to avoid overflow errors in these tests.
*/
private long randomReasonableLong() {
long randomLong = randomNonNegativeLong();
if (randomLong > Long.MAX_VALUE / 2) {
return randomLong / 2;
} else {
return randomLong;
}
}
@Override
protected SampleStats mutateInstance(SampleStats instance) throws IOException {
SampleStats mutated = instance.combine(new SampleStats());
switch (between(0, 10)) {
case 0 -> mutated.samples.add(1);
case 1 -> mutated.potentialSamples.add(1);
case 2 -> mutated.samplesRejectedForMaxSamplesExceeded.add(1);
case 3 -> mutated.samplesRejectedForCondition.add(1);
case 4 -> mutated.samplesRejectedForRate.add(1);
case 5 -> mutated.samplesRejectedForException.add(1);
case 6 -> mutated.samplesRejectedForSize.add(1);
case 7 -> mutated.timeSamplingInNanos.add(1);
case 8 -> mutated.timeEvaluatingConditionInNanos.add(1);
case 9 -> mutated.timeCompilingConditionInNanos.add(1);
case 10 -> mutated.lastException = mutated.lastException == null
? new ElasticsearchException(randomAlphanumericOfLength(10))
: null;
default -> throw new IllegalArgumentException("Should never get here");
}
return mutated;
}
public void testCombine() {
SampleStats stats1 = createTestInstance();
stats1.lastException = null;
SampleStats combinedWithEmpty = stats1.combine(new SampleStats());
assertThat(combinedWithEmpty, equalTo(stats1));
assertNotSame(stats1, combinedWithEmpty);
SampleStats stats2 = createTestInstance();
SampleStats stats1CombineStats2 = stats1.combine(stats2);
SampleStats stats2CombineStats1 = stats2.combine(stats1);
assertThat(stats1CombineStats2, equalTo(stats2CombineStats1));
assertThat(stats1CombineStats2.getSamples(), equalTo(stats1.getSamples() + stats2.getSamples()));
assertThat(stats1CombineStats2.getPotentialSamples(), equalTo(stats1.getPotentialSamples() + stats2.getPotentialSamples()));
assertThat(
stats1CombineStats2.getSamplesRejectedForMaxSamplesExceeded(),
equalTo(stats1.getSamplesRejectedForMaxSamplesExceeded() + stats2.getSamplesRejectedForMaxSamplesExceeded())
);
assertThat(
stats1CombineStats2.getSamplesRejectedForCondition(),
equalTo(stats1.getSamplesRejectedForCondition() + stats2.getSamplesRejectedForCondition())
);
assertThat(
stats1CombineStats2.getSamplesRejectedForRate(),
equalTo(stats1.getSamplesRejectedForRate() + stats2.getSamplesRejectedForRate())
);
assertThat(
stats1CombineStats2.getSamplesRejectedForException(),
equalTo(stats1.getSamplesRejectedForException() + stats2.getSamplesRejectedForException())
);
assertThat(
stats1CombineStats2.getSamplesRejectedForSize(),
equalTo(stats1.getSamplesRejectedForSize() + stats2.getSamplesRejectedForSize())
);
assertThat(
stats1CombineStats2.getTimeSampling(),
equalTo(TimeValue.timeValueNanos(stats1.getTimeSampling().nanos() + stats2.getTimeSampling().nanos()))
);
assertThat(
stats1CombineStats2.getTimeEvaluatingCondition(),
equalTo(TimeValue.timeValueNanos(stats1.getTimeEvaluatingCondition().nanos() + stats2.getTimeEvaluatingCondition().nanos()))
);
assertThat(
stats1CombineStats2.getTimeCompilingCondition(),
equalTo(TimeValue.timeValueNanos(stats1.getTimeCompilingCondition().nanos() + stats2.getTimeCompilingCondition().nanos()))
);
}
@SuppressWarnings("unchecked")
public void testToXContent() throws IOException {
/*
* SampleStats | SamplingServiceSampleStatsTests |
java | google__dagger | javatests/dagger/internal/codegen/ProductionGraphValidationTest.java | {
"start": 10994,
"end": 11707
} | interface ____");
});
}
@Test
public void monitoringDependsOnUnboundType() {
Source component =
CompilerTests.javaSource(
"test.TestClass",
"package test;",
"",
"import com.google.common.util.concurrent.ListenableFuture;",
"import dagger.Module;",
"import dagger.Provides;",
"import dagger.multibindings.IntoSet;",
"import dagger.producers.ProducerModule;",
"import dagger.producers.Produces;",
"import dagger.producers.ProductionComponent;",
"import dagger.producers.monitoring.ProductionComponentMonitor;",
"",
"final | AComponent |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/jdbc/SqlScriptsTestExecutionListenerTests.java | {
"start": 1752,
"end": 6230
} | class ____ {
private final SqlScriptsTestExecutionListener listener = new SqlScriptsTestExecutionListener();
private final TestContext testContext = mock();
@Test
void missingValueAndScriptsAndStatementsAtClassLevel() throws Exception {
Class<?> clazz = MissingValueAndScriptsAndStatementsAtClassLevel.class;
BDDMockito.<Class<?>> given(testContext.getTestClass()).willReturn(clazz);
given(testContext.getTestMethod()).willReturn(clazz.getDeclaredMethod("foo"));
assertExceptionContains(clazz.getSimpleName() + ".sql");
}
@Test
void missingValueAndScriptsAndStatementsAtMethodLevel() throws Exception {
Class<?> clazz = MissingValueAndScriptsAndStatementsAtMethodLevel.class;
BDDMockito.<Class<?>> given(testContext.getTestClass()).willReturn(clazz);
given(testContext.getTestMethod()).willReturn(clazz.getDeclaredMethod("foo"));
assertExceptionContains(clazz.getSimpleName() + ".foo" + ".sql");
}
@Test
void valueAndScriptsDeclared() throws Exception {
Class<?> clazz = ValueAndScriptsDeclared.class;
BDDMockito.<Class<?>> given(testContext.getTestClass()).willReturn(clazz);
given(testContext.getTestMethod()).willReturn(clazz.getDeclaredMethod("foo"));
assertThatExceptionOfType(AnnotationConfigurationException.class)
.isThrownBy(() -> listener.beforeTestMethod(testContext))
.withMessageContainingAll(
"Different @AliasFor mirror values",
"attribute 'scripts' and its alias 'value'",
"values of [{bar}] and [{foo}]");
}
@Test
void isolatedTxModeDeclaredWithoutTxMgr() throws Exception {
ApplicationContext ctx = mock();
given(ctx.getResource(anyString())).willReturn(mock());
given(ctx.getAutowireCapableBeanFactory()).willReturn(mock());
given(ctx.getEnvironment()).willReturn(new MockEnvironment());
Class<?> clazz = IsolatedWithoutTxMgr.class;
BDDMockito.<Class<?>> given(testContext.getTestClass()).willReturn(clazz);
given(testContext.getTestMethod()).willReturn(clazz.getDeclaredMethod("foo"));
given(testContext.getApplicationContext()).willReturn(ctx);
assertExceptionContains("cannot execute SQL scripts using Transaction Mode [ISOLATED] without a PlatformTransactionManager");
}
@Test
void missingDataSourceAndTxMgr() throws Exception {
ApplicationContext ctx = mock();
given(ctx.getResource(anyString())).willReturn(mock());
given(ctx.getAutowireCapableBeanFactory()).willReturn(mock());
given(ctx.getEnvironment()).willReturn(new MockEnvironment());
Class<?> clazz = MissingDataSourceAndTxMgr.class;
BDDMockito.<Class<?>> given(testContext.getTestClass()).willReturn(clazz);
given(testContext.getTestMethod()).willReturn(clazz.getDeclaredMethod("foo"));
given(testContext.getApplicationContext()).willReturn(ctx);
assertExceptionContains("supply at least a DataSource or PlatformTransactionManager");
}
@Test
void beforeTestClassOnMethod() throws Exception {
Class<?> clazz = ClassLevelExecutionPhaseOnMethod.class;
BDDMockito.<Class<?>> given(testContext.getTestClass()).willReturn(clazz);
given(testContext.getTestMethod()).willReturn(clazz.getDeclaredMethod("beforeTestClass"));
assertThatIllegalArgumentException()
.isThrownBy(() -> listener.beforeTestMethod(testContext))
.withMessage("@SQL execution phase BEFORE_TEST_CLASS cannot be used on methods");
assertThatIllegalArgumentException()
.isThrownBy(() -> listener.afterTestMethod(testContext))
.withMessage("@SQL execution phase BEFORE_TEST_CLASS cannot be used on methods");
}
@Test
void afterTestClassOnMethod() throws Exception {
Class<?> clazz = ClassLevelExecutionPhaseOnMethod.class;
BDDMockito.<Class<?>> given(testContext.getTestClass()).willReturn(clazz);
given(testContext.getTestMethod()).willReturn(clazz.getDeclaredMethod("afterTestClass"));
assertThatIllegalArgumentException()
.isThrownBy(() -> listener.beforeTestMethod(testContext))
.withMessage("@SQL execution phase AFTER_TEST_CLASS cannot be used on methods");
assertThatIllegalArgumentException()
.isThrownBy(() -> listener.afterTestMethod(testContext))
.withMessage("@SQL execution phase AFTER_TEST_CLASS cannot be used on methods");
}
private void assertExceptionContains(String msg) {
assertThatIllegalStateException()
.isThrownBy(() -> listener.beforeTestMethod(testContext))
.withMessageContaining(msg);
}
// -------------------------------------------------------------------------
@Sql
static | SqlScriptsTestExecutionListenerTests |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/Hibernate.java | {
"start": 11785,
"end": 12202
} | class ____ the instance
*
* @since 6.3
*/
@SuppressWarnings("unchecked")
public static <T> Class<? extends T> getClassLazy(T proxy) {
final var lazyInitializer = extractLazyInitializer( proxy );
final Class<?> result =
lazyInitializer != null
? lazyInitializer.getImplementationClass()
: proxy.getClass();
return (Class<? extends T>) result;
}
/**
* Determine if the true, underlying | of |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/discriminator/JoinedInheritanceDiscriminatorSelectionTest.java | {
"start": 8389,
"end": 8646
} | class ____ extends ParentEntity {
private Integer propertyA;
public ChildA() {
}
public ChildA(Long id, String name, Integer propertyA) {
super( id, name );
this.propertyA = propertyA;
}
}
@Entity( name = "SubChildA" )
public static | ChildA |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/common/typeutils/CompositeTypeSerializerUpgradeTest.java | {
"start": 4632,
"end": 5083
} | class ____
implements TypeSerializerUpgradeTestBase.PreUpgradeSetup<String[]> {
@Override
public TypeSerializer<String[]> createPriorSerializer() {
return new GenericArraySerializer<>(String.class, StringSerializer.INSTANCE);
}
@Override
public String[] createTestData() {
return new String[] {"Apache", "Flink"};
}
}
/**
* This | GenericArraySerializerSetup |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java | {
"start": 5701,
"end": 6544
} | class ____ implements HealthIndicatorService {
public static final String NAME = "test_indicator";
public static final String SYMPTOM = "Symptom";
@Override
public String name() {
return NAME;
}
@Override
public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) {
assertThat(healthInfo.diskInfoByNode().size(), equalTo(internalCluster().getNodeNames().length));
for (DiskHealthInfo diskHealthInfo : healthInfo.diskInfoByNode().values()) {
assertThat(diskHealthInfo.healthStatus(), equalTo(HealthStatus.GREEN));
}
return createIndicator(HealthStatus.RED, SYMPTOM, HealthIndicatorDetails.EMPTY, List.of(), List.of());
}
}
}
| TestHealthIndicatorService |
java | apache__maven | compat/maven-builder-support/src/main/java/org/apache/maven/building/Problem.java | {
"start": 1232,
"end": 1346
} | interface ____ {
/**
* The different severity levels for a problem, in decreasing order.
*/
| Problem |
java | mybatis__mybatis-3 | src/main/java/org/apache/ibatis/datasource/pooled/PooledConnection.java | {
"start": 949,
"end": 7094
} | class ____ implements InvocationHandler {
private static final String CLOSE = "close";
private static final Class<?>[] IFACES = { Connection.class };
private final int hashCode;
private final PooledDataSource dataSource;
private final Connection realConnection;
private final Connection proxyConnection;
private long checkoutTimestamp;
private long createdTimestamp;
private long lastUsedTimestamp;
private int connectionTypeCode;
private boolean valid;
/**
* Constructor for SimplePooledConnection that uses the Connection and PooledDataSource passed in.
*
* @param connection
* - the connection that is to be presented as a pooled connection
* @param dataSource
* - the dataSource that the connection is from
*/
public PooledConnection(Connection connection, PooledDataSource dataSource) {
this.hashCode = connection.hashCode();
this.realConnection = connection;
this.dataSource = dataSource;
this.createdTimestamp = System.currentTimeMillis();
this.lastUsedTimestamp = System.currentTimeMillis();
this.valid = true;
this.proxyConnection = (Connection) Proxy.newProxyInstance(Connection.class.getClassLoader(), IFACES, this);
}
/**
* Invalidates the connection.
*/
public void invalidate() {
valid = false;
}
/**
* Method to see if the connection is usable.
*
* @return True if the connection is usable
*/
public boolean isValid() {
return valid && realConnection != null && dataSource.pingConnection(this);
}
/**
* Getter for the *real* connection that this wraps.
*
* @return The connection
*/
public Connection getRealConnection() {
return realConnection;
}
/**
* Getter for the proxy for the connection.
*
* @return The proxy
*/
public Connection getProxyConnection() {
return proxyConnection;
}
/**
* Gets the hashcode of the real connection (or 0 if it is null).
*
* @return The hashcode of the real connection (or 0 if it is null)
*/
public int getRealHashCode() {
return realConnection == null ? 0 : realConnection.hashCode();
}
/**
* Getter for the connection type (based on url + user + password).
*
* @return The connection type
*/
public int getConnectionTypeCode() {
return connectionTypeCode;
}
/**
* Setter for the connection type.
*
* @param connectionTypeCode
* - the connection type
*/
public void setConnectionTypeCode(int connectionTypeCode) {
this.connectionTypeCode = connectionTypeCode;
}
/**
* Getter for the time that the connection was created.
*
* @return The creation timestamp
*/
public long getCreatedTimestamp() {
return createdTimestamp;
}
/**
* Setter for the time that the connection was created.
*
* @param createdTimestamp
* - the timestamp
*/
public void setCreatedTimestamp(long createdTimestamp) {
this.createdTimestamp = createdTimestamp;
}
/**
* Getter for the time that the connection was last used.
*
* @return - the timestamp
*/
public long getLastUsedTimestamp() {
return lastUsedTimestamp;
}
/**
* Setter for the time that the connection was last used.
*
* @param lastUsedTimestamp
* - the timestamp
*/
public void setLastUsedTimestamp(long lastUsedTimestamp) {
this.lastUsedTimestamp = lastUsedTimestamp;
}
/**
* Getter for the time since this connection was last used.
*
* @return - the time since the last use
*/
public long getTimeElapsedSinceLastUse() {
return System.currentTimeMillis() - lastUsedTimestamp;
}
/**
* Getter for the age of the connection.
*
* @return the age
*/
public long getAge() {
return System.currentTimeMillis() - createdTimestamp;
}
/**
* Getter for the timestamp that this connection was checked out.
*
* @return the timestamp
*/
public long getCheckoutTimestamp() {
return checkoutTimestamp;
}
/**
* Setter for the timestamp that this connection was checked out.
*
* @param timestamp
* the timestamp
*/
public void setCheckoutTimestamp(long timestamp) {
this.checkoutTimestamp = timestamp;
}
/**
* Getter for the time that this connection has been checked out.
*
* @return the time
*/
public long getCheckoutTime() {
return System.currentTimeMillis() - checkoutTimestamp;
}
@Override
public int hashCode() {
return hashCode;
}
/**
* Allows comparing this connection to another.
*
* @param obj
* - the other connection to test for equality
*
* @see Object#equals(Object)
*/
@Override
public boolean equals(Object obj) {
if (obj instanceof PooledConnection) {
return realConnection.hashCode() == ((PooledConnection) obj).realConnection.hashCode();
}
if (obj instanceof Connection) {
return hashCode == obj.hashCode();
} else {
return false;
}
}
/**
* Required for InvocationHandler implementation.
*
* @param proxy
* - not used
* @param method
* - the method to be executed
* @param args
* - the parameters to be passed to the method
*
* @see java.lang.reflect.InvocationHandler#invoke(Object, java.lang.reflect.Method, Object[])
*/
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
String methodName = method.getName();
if (CLOSE.equals(methodName)) {
dataSource.pushConnection(this);
return null;
}
try {
if (!Object.class.equals(method.getDeclaringClass())) {
// issue #579 toString() should never fail
// throw an SQLException instead of a Runtime
checkConnection();
}
return method.invoke(realConnection, args);
} catch (Throwable t) {
throw ExceptionUtil.unwrapThrowable(t);
}
}
private void checkConnection() throws SQLException {
if (!valid) {
throw new SQLException("Error accessing PooledConnection. Connection is invalid.");
}
}
}
| PooledConnection |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/impl/DefaultEndpointRegistryTest.java | {
"start": 1626,
"end": 7216
} | class ____ {
@Test
public void testRemoveEndpoint() {
DefaultCamelContext ctx = new DefaultCamelContext();
ctx.start();
ctx.getEndpoint("direct:one");
Endpoint e = ctx.getEndpoint("direct:two");
ctx.getEndpoint("direct:three");
Assertions.assertEquals(3, ctx.getEndpoints().size());
ctx.removeEndpoint(e);
Assertions.assertEquals(2, ctx.getEndpoints().size());
}
@Test
public void testRemoveEndpointWithHash() {
DefaultCamelContext ctx = new DefaultCamelContext();
ctx.start();
ctx.getEndpoint("direct:one");
Endpoint e = ctx.getEndpoint("stub:me?bean=#myBean");
ctx.getEndpoint("direct:three");
Assertions.assertEquals(3, ctx.getEndpoints().size());
ctx.removeEndpoint(e);
Assertions.assertEquals(2, ctx.getEndpoints().size());
}
@Test
public void testRemoveEndpointToD() throws Exception {
DefaultCamelContext ctx = new DefaultCamelContext();
ctx.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.toD().cacheSize(10).uri("mock:${header.foo}");
}
});
final AtomicInteger cnt = new AtomicInteger();
ctx.addLifecycleStrategy(new DummyLifecycleStrategy() {
@Override
public void onEndpointRemove(Endpoint endpoint) {
cnt.incrementAndGet();
}
});
ctx.start();
Assertions.assertEquals(0, cnt.get());
Assertions.assertEquals(1, ctx.getEndpoints().size());
FluentProducerTemplate template = ctx.createFluentProducerTemplate();
for (int i = 0; i < 100; i++) {
template.withBody("Hello").withHeader("foo", Integer.toString(i)).to("direct:start").send();
}
Awaitility.await().untilAsserted(() -> {
Assertions.assertEquals(11, ctx.getEndpoints().size());
});
Assertions.assertEquals(90, cnt.get());
ctx.stop();
}
@Test
public void testMigration() {
DefaultCamelContext ctx = new DefaultCamelContext();
ctx.start();
DefaultEndpointRegistry reg = (DefaultEndpointRegistry) ctx.getEndpointRegistry();
// creates a new endpoint after context is stated and therefore dynamic
ctx.getEndpoint("direct:error");
assertTrue(reg.isDynamic("direct:error"));
ctx.removeEndpoints("direct:error");
// mark we are setting up routes (done = false)
ctx.getCamelContextExtension().setupRoutes(false);
ctx.getEndpoint("direct:error");
assertTrue(reg.isStatic("direct:error"));
}
@Test
public void testMigrationRoute() throws Exception {
DefaultCamelContext ctx = new DefaultCamelContext();
ctx.addRoutes(new RouteBuilder() {
@Override
public void configure() {
errorHandler(deadLetterChannel("direct:error")
.maximumRedeliveries(2)
.redeliveryDelay(0));
from("direct:error")
.routeId("error")
.errorHandler(deadLetterChannel("log:dead?level=ERROR"))
.to("mock:error")
.to("file:error");
}
});
ctx.start();
EndpointRegistry reg = ctx.getEndpointRegistry();
assertTrue(reg.isStatic("direct:error"));
assertTrue(reg.isStatic("mock:error"));
assertTrue(reg.isStatic("file:error"));
}
//Testing the issue https://issues.apache.org/jira/browse/CAMEL-19295
@Test
public void testConcurrency() throws InterruptedException {
SimpleCamelContext context = new SimpleCamelContext();
context.start();
ProducerTemplate producerTemplate = context.createProducerTemplate();
EndpointRegistry endpointRegistry = context.getEndpointRegistry();
int nThreads = 4;
ExecutorService executorService = Executors.newFixedThreadPool(nThreads);
int iterations = 500;
for (int j = 0; j < iterations; j++) {
CountDownLatch allThreadCompletionSemaphore = new CountDownLatch(nThreads);
for (int i = 0; i < nThreads; i++) {
executorService.submit(() -> {
producerTemplate.requestBody("controlbus:route?routeId=route1&action=ACTION_STATUS&loggingLevel=off", null,
ServiceStatus.class);
producerTemplate.requestBody("controlbus:route?routeId=route2&action=ACTION_STATUS&loggingLevel=off", null,
ServiceStatus.class);
producerTemplate.requestBody("controlbus:route?routeId=route3&action=ACTION_STATUS&loggingLevel=off", null,
ServiceStatus.class);
producerTemplate.requestBody("controlbus:route?routeId=route4&action=ACTION_STATUS&loggingLevel=off", null,
ServiceStatus.class);
producerTemplate.requestBody("controlbus:route?routeId=route5&action=ACTION_STATUS&loggingLevel=off", null,
ServiceStatus.class);
allThreadCompletionSemaphore.countDown();
});
}
allThreadCompletionSemaphore.await();
assertNotNull(endpointRegistry.values().toArray());
}
executorService.shutdown();
}
}
| DefaultEndpointRegistryTest |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/annotations/JavaTypeRegistrations.java | {
"start": 726,
"end": 795
} | interface ____ {
JavaTypeRegistration[] value();
}
| JavaTypeRegistrations |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/jmx/access/ConnectorDelegate.java | {
"start": 1171,
"end": 1257
} | class ____ managing a JMX connector.
*
* @author Juergen Hoeller
* @since 2.5.2
*/
| for |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/node/NullNode.java | {
"start": 368,
"end": 433
} | class ____ used to contain explicit JSON null
* value.
*/
public | is |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/net/impl/pkcs1/PrivateKeyParser.java | {
"start": 13599,
"end": 17311
} | class ____ {
protected final int type;
protected final int length;
protected final byte[] value;
protected final int tag;
/**
* Construct a ASN.1 TLV. The TLV could be either a
* constructed or primitive entity.
* <p/>
* <p/>The first byte in DER encoding is made of following fields,
* <pre>
* -------------------------------------------------
* |Bit 8|Bit 7|Bit 6|Bit 5|Bit 4|Bit 3|Bit 2|Bit 1|
* -------------------------------------------------
* | Class | CF | + Type |
* -------------------------------------------------
* </pre>
* <ul>
* <li>Class: Universal, Application, Context or Private
* <li>CF: Constructed flag. If 1, the field is constructed.
* <li>Type: This is actually called tag in ASN.1. It
* indicates data type (Integer, String) or a construct
* (sequence, choice, set).
* </ul>
*
* @param tag Tag or Identifier
* @param length Length of the field
* @param value Encoded octet string for the field.
*/
public Asn1Object(int tag, int length, byte[] value) {
this.tag = tag;
this.type = tag & 0x1F;
this.length = length;
this.value = value;
}
public int getType() {
return type;
}
public int getLength() {
return length;
}
public byte[] getValue() {
return value;
}
public boolean isConstructed() {
return (tag & DerParser.CONSTRUCTED) == DerParser.CONSTRUCTED;
}
/**
* For constructed field, return a parser for its content.
*
* @return A parser for the construct.
* @throws VertxException
*/
public DerParser getParser() throws VertxException {
if (!isConstructed()) {
throw new VertxException("Invalid DER: can't parse primitive entity");
}
return new DerParser(value);
}
/**
* Get the value as integer
*
* @return BigInteger
* @throws VertxException
*/
public BigInteger getInteger() throws VertxException {
if (type != DerParser.INTEGER) {
throw new VertxException("Invalid DER: object is not integer");
}
return new BigInteger(value);
}
public byte[] getObjectIdentifier() throws VertxException {
switch(type) {
case DerParser.OBJECT_IDENTIFIER:
return value;
default:
throw new VertxException("Invalid DER: object is not an Object Identifier");
}
}
/**
* Get value as string. Most strings are treated
* as Latin-1.
*
* @return Java string
* @throws VertxException
*/
public String getString() throws VertxException {
String encoding;
switch (type) {
// Not all are Latin-1 but it's the closest thing
case DerParser.NUMERIC_STRING:
case DerParser.PRINTABLE_STRING:
case DerParser.VIDEOTEX_STRING:
case DerParser.IA5_STRING:
case DerParser.GRAPHIC_STRING:
case DerParser.ISO646_STRING:
case DerParser.GENERAL_STRING:
encoding = "ISO-8859-1";
break;
case DerParser.BMP_STRING:
encoding = "UTF-16BE";
break;
case DerParser.UTF8_STRING:
encoding = "UTF-8";
break;
case DerParser.UNIVERSAL_STRING:
throw new VertxException("Invalid DER: can't handle UCS-4 string");
default:
throw new VertxException("Invalid DER: object is not a string");
}
try {
return new String(value, encoding);
} catch (UnsupportedEncodingException e) {
throw new VertxException(e);
}
}
}
}
| Asn1Object |
java | elastic__elasticsearch | x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupIndexCaps.java | {
"start": 1629,
"end": 3070
} | class ____ implements Writeable, ToXContentFragment {
private static ParseField ROLLUP_JOBS = new ParseField("rollup_jobs");
private static ParseField DOC_FIELD = new ParseField("_doc");
private static ParseField META_FIELD = new ParseField("_meta");
private static ParseField ROLLUP_FIELD = new ParseField(RollupField.ROLLUP_META);
// Note: we ignore unknown fields since there may be unrelated metadata
private static final ObjectParser<RollupIndexCaps, Void> METADATA_PARSER = new ObjectParser<>(
GetRollupCapsAction.NAME,
true,
RollupIndexCaps::new
);
static {
/*
Rollup index metadata layout is:
"_doc": {
"_meta" : {
"_rollup": {
"job-1": {
... job config, parsable by RollupJobConfig.PARSER ...
},
"job-2": {
... job config, parsable by RollupJobConfig.PARSER ...
}
},
"rollup-version": ""
}
}
*/
METADATA_PARSER.declareField(
(parser, rollupIndexCaps, aVoid) -> rollupIndexCaps.setJobs(DocParser.DOC_PARSER.apply(parser, aVoid).jobs),
DOC_FIELD,
ObjectParser.ValueType.OBJECT
);
}
/**
* Parser for `_doc` portion of mapping metadata
*/
private static | RollupIndexCaps |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/functions/aggfunctions/LeadLagAggFunction.java | {
"start": 6305,
"end": 6642
} | class ____ extends LeadLagAggFunction {
public DoubleLeadLagAggFunction(int operandCount) {
super(operandCount);
}
@Override
public DataType getResultType() {
return DataTypes.DOUBLE();
}
}
/** BooleanLeadLagAggFunction. */
public static | DoubleLeadLagAggFunction |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/manytoone/ManyToOneBidirectionalEagerTest.java | {
"start": 4420,
"end": 5447
} | class ____ {
@Id
private Long id;
@Column( nullable = false )
private String description;
@OneToMany( mappedBy = "one", cascade = { ALL }, fetch = FetchType.EAGER )
private Set<ManyEntity> many = new HashSet<>();
public OneEntity(Long id, String description) {
this.id = id;
this.description = description;
}
public OneEntity() {
}
public Long getId() {
return id;
}
public String getDescription() {
return description;
}
public Set<ManyEntity> getMany() {
return many;
}
public void addMany(ManyEntity newMany) {
many.add( newMany );
newMany.setOne( this );
}
@Override
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( o == null || getClass() != o.getClass() ) {
return false;
}
final OneEntity that = (OneEntity) o;
return Objects.equals( id, that.id ) && Objects.equals( description, that.description );
}
@Override
public int hashCode() {
return Objects.hash( id, description );
}
}
}
| OneEntity |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/error/clientexceptionmapper/DummyException.java | {
"start": 128,
"end": 377
} | class ____ extends RuntimeException {
static final AtomicInteger executionCount = new AtomicInteger(0);
public DummyException() {
executionCount.incrementAndGet();
setStackTrace(new StackTraceElement[0]);
}
}
| DummyException |
java | apache__camel | core/camel-support/src/main/java/org/apache/camel/support/DefaultPollingEndpoint.java | {
"start": 1084,
"end": 1552
} | class ____ extends ScheduledPollEndpoint {
protected DefaultPollingEndpoint() {
}
protected DefaultPollingEndpoint(String endpointUri, Component component) {
super(endpointUri, component);
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
Consumer result = new DefaultScheduledPollConsumer(this, processor);
configureConsumer(result);
return result;
}
}
| DefaultPollingEndpoint |
java | grpc__grpc-java | core/src/test/java/io/grpc/internal/ForwardingClientStreamListenerTest.java | {
"start": 1140,
"end": 2296
} | class ____ {
private ClientStreamListener mock = mock(ClientStreamListener.class);
private ForwardingClientStreamListener forward = new ForwardingClientStreamListener() {
@Override
protected ClientStreamListener delegate() {
return mock;
}
};
@Test
public void allMethodsForwarded() throws Exception {
ForwardingTestUtil.testMethodsForwarded(
ClientStreamListener.class,
mock,
forward,
Collections.<Method>emptyList());
}
@Test
public void headersReadTest() {
Metadata headers = new Metadata();
forward.headersRead(headers);
verify(mock).headersRead(same(headers));
}
@Test
public void closedTest() {
Status status = Status.UNKNOWN;
Metadata trailers = new Metadata();
forward.closed(status, RpcProgress.PROCESSED, trailers);
verify(mock).closed(same(status), same(RpcProgress.PROCESSED), same(trailers));
}
@Test
public void messagesAvailableTest() {
MessageProducer producer = mock(MessageProducer.class);
forward.messagesAvailable(producer);
verify(mock).messagesAvailable(same(producer));
}
}
| ForwardingClientStreamListenerTest |
java | apache__flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/wordcount/WordCount.java | {
"start": 3356,
"end": 9904
} | class ____ {
// *************************************************************************
// PROGRAM
// *************************************************************************
public static void main(String[] args) throws Exception {
final CLI params = CLI.fromArgs(args);
// Create the execution environment. This is the main entrypoint
// to building a Flink application.
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// For async state, by default we will use the forst state backend.
if (params.isAsyncState()) {
Configuration config = Configuration.fromMap(env.getConfiguration().toMap());
if (!config.containsKey(StateBackendOptions.STATE_BACKEND.key())) {
config.set(StateBackendOptions.STATE_BACKEND, FORST_STATE_BACKEND_NAME);
env.configure(config);
}
}
// Apache Flink’s unified approach to stream and batch processing means that a DataStream
// application executed over bounded input will produce the same final results regardless
// of the configured execution mode. It is important to note what final means here: a job
// executing in STREAMING mode might produce incremental updates (think upserts in
// a database) while in BATCH mode, it would only produce one final result at the end. The
// final result will be the same if interpreted correctly, but getting there can be
// different.
//
// The “classic” execution behavior of the DataStream API is called STREAMING execution
// mode. Applications should use streaming execution for unbounded jobs that require
// continuous incremental processing and are expected to stay online indefinitely.
//
// By enabling BATCH execution, we allow Flink to apply additional optimizations that we
// can only do when we know that our input is bounded. For example, different
// join/aggregation strategies can be used, in addition to a different shuffle
// implementation that allows more efficient task scheduling and failure recovery behavior.
//
// By setting the runtime mode to AUTOMATIC, Flink will choose BATCH if all sources
// are bounded and otherwise STREAMING.
env.setRuntimeMode(params.getExecutionMode());
// This optional step makes the input parameters
// available in the Flink UI.
env.getConfig().setGlobalJobParameters(params);
DataStream<String> text;
if (params.getInputs().isPresent()) {
// Create a new file source that will read files from a given set of directories.
// Each file will be processed as plain text and split based on newlines.
FileSource.FileSourceBuilder<String> builder =
FileSource.forRecordStreamFormat(
new TextLineInputFormat(), params.getInputs().get());
// If a discovery interval is provided, the source will
// continuously watch the given directories for new files.
params.getDiscoveryInterval().ifPresent(builder::monitorContinuously);
text = env.fromSource(builder.build(), WatermarkStrategy.noWatermarks(), "file-input");
} else {
text = env.fromData(WordCountData.WORDS).name("in-memory-input");
}
KeyedStream<Tuple2<String, Integer>, String> keyedStream =
// The text lines read from the source are split into words
// using a user-defined function. The tokenizer, implemented below,
// will output each word as a (2-tuple) containing (word, 1)
text.flatMap(new Tokenizer())
.name("tokenizer")
// keyBy groups tuples based on the "0" field, the word.
// Using a keyBy allows performing aggregations and other
// stateful transformations over data on a per-key basis.
// This is similar to a GROUP BY clause in a SQL query.
.keyBy(value -> value.f0);
if (params.isAsyncState()) {
keyedStream.enableAsyncState();
}
DataStream<Tuple2<String, Integer>> counts =
keyedStream
// For each key, we perform a simple sum of the "1" field, the count.
// If the input data stream is bounded, sum will output a final count for
// each word. If it is unbounded, it will continuously output updates
// each time it sees a new instance of each word in the stream.
.sum(1)
.name("counter");
if (params.getOutput().isPresent()) {
// Given an output directory, Flink will write the results to a file
// using a simple string encoding. In a production environment, this might
// be something more structured like CSV, Avro, JSON, or Parquet.
counts.sinkTo(
FileSink.<Tuple2<String, Integer>>forRowFormat(
params.getOutput().get(), new SimpleStringEncoder<>())
.withRollingPolicy(
DefaultRollingPolicy.builder()
.withMaxPartSize(MemorySize.ofMebiBytes(1))
.withRolloverInterval(Duration.ofSeconds(10))
.build())
.build())
.name("file-sink");
} else {
counts.print().name("print-sink");
}
// Apache Flink applications are composed lazily. Calling execute
// submits the Job and begins processing.
env.execute("WordCount");
}
// *************************************************************************
// USER FUNCTIONS
// *************************************************************************
/**
* Implements the string tokenizer that splits sentences into words as a user-defined
* FlatMapFunction. The function takes a line (String) and splits it into multiple pairs in the
* form of "(word,1)" ({@code Tuple2<String, Integer>}).
*/
public static final | WordCount |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ImmutableCheckerTest.java | {
"start": 82062,
"end": 82383
} | class ____<T extends ImmutableClass>
extends GenericWithImmutableParam<T> {}
""")
.doTest();
}
@Test
public void methodInvocation_violation() {
compilationHelper
.addSourceLines(
"MutableClass.java",
"""
| ChildGenericWithImmutableParam |
java | jhy__jsoup | src/main/java/org/jsoup/nodes/Element.java | {
"start": 71297,
"end": 71591
} | class ____ to add
@return this element
*/
public Element addClass(String className) {
Validate.notNull(className);
Set<String> classes = classNames();
classes.add(className);
classNames(classes);
return this;
}
/**
Remove a | name |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/state/internals/CachingKeyValueStore.java | {
"start": 3172,
"end": 20226
} | interface ____ {
QueryResult<?> apply(
final Query<?> query,
final Position mergedPosition,
final PositionBound positionBound,
final QueryConfig config,
final StateStore store
);
}
@SuppressWarnings("rawtypes")
private final Map<Class, CacheQueryHandler> queryHandlers =
mkMap(
mkEntry(
KeyQuery.class,
(query, mergedPosition, positionBound, config, store) ->
runKeyQuery(query, mergedPosition, positionBound, config)
)
);
CachingKeyValueStore(final KeyValueStore<Bytes, byte[]> underlying, final boolean timestampedSchema) {
super(underlying);
position = Position.emptyPosition();
this.timestampedSchema = timestampedSchema;
}
@Override
public void init(final StateStoreContext stateStoreContext, final StateStore root) {
internalContext = asInternalProcessorContext(stateStoreContext);
cacheName = ThreadCache.nameSpaceFromTaskIdAndStore(internalContext.taskId().toString(), name());
internalContext.registerCacheFlushListener(cacheName, entries -> {
for (final ThreadCache.DirtyEntry entry : entries) {
putAndMaybeForward(entry, internalContext);
}
});
super.init(stateStoreContext, root);
// save the stream thread as we only ever want to trigger a flush
// when the stream thread is the current thread.
streamThread = Thread.currentThread();
}
@Override
public Position getPosition() {
// We return the merged position since the query uses the merged position as well
final Position mergedPosition = Position.emptyPosition();
final Position wrappedPosition = wrapped().getPosition();
synchronized (position) {
synchronized (wrappedPosition) {
mergedPosition.merge(position);
mergedPosition.merge(wrappedPosition);
}
}
return mergedPosition;
}
@SuppressWarnings("unchecked")
@Override
public <R> QueryResult<R> query(final Query<R> query,
final PositionBound positionBound,
final QueryConfig config) {
final long start = config.isCollectExecutionInfo() ? System.nanoTime() : -1L;
final QueryResult<R> result;
final CacheQueryHandler handler = queryHandlers.get(query.getClass());
if (handler == null) {
result = wrapped().query(query, positionBound, config);
} else {
final int partition = internalContext.taskId().partition();
final Lock lock = this.lock.readLock();
lock.lock();
try {
validateStoreOpen();
final Position mergedPosition = getPosition();
// We use the merged position since the cache and the store may be at different positions
if (!StoreQueryUtils.isPermitted(mergedPosition, positionBound, partition)) {
result = QueryResult.notUpToBound(mergedPosition, positionBound, partition);
} else {
result = (QueryResult<R>) handler.apply(
query,
mergedPosition,
positionBound,
config,
this
);
}
} finally {
lock.unlock();
}
}
if (config.isCollectExecutionInfo()) {
result.addExecutionInfo(
"Handled in " + getClass() + " in " + (System.nanoTime() - start) + "ns");
}
return result;
}
@SuppressWarnings("unchecked")
private <R> QueryResult<R> runKeyQuery(final Query<R> query,
final Position mergedPosition,
final PositionBound positionBound,
final QueryConfig config) {
QueryResult<R> result = null;
final KeyQuery<Bytes, byte[]> keyQuery = (KeyQuery<Bytes, byte[]>) query;
if (keyQuery.isSkipCache()) {
return wrapped().query(query, positionBound, config);
}
final Bytes key = keyQuery.getKey();
synchronized (mergedPosition) {
if (internalContext.cache() != null) {
final LRUCacheEntry lruCacheEntry = internalContext.cache().get(cacheName, key);
if (lruCacheEntry != null) {
final byte[] rawValue;
if (timestampedSchema && !WrappedStateStore.isTimestamped(wrapped()) && !StoreQueryUtils.isAdapter(wrapped())) {
rawValue = ValueAndTimestampDeserializer.rawValue(lruCacheEntry.value());
} else {
rawValue = lruCacheEntry.value();
}
result = (QueryResult<R>) QueryResult.forResult(rawValue);
}
}
// We don't need to check the position at the state store since we already performed the check on
// the merged position above
if (result == null) {
result = wrapped().query(query, PositionBound.unbounded(), config);
}
result.setPosition(mergedPosition.copy());
}
return result;
}
private void putAndMaybeForward(final ThreadCache.DirtyEntry entry,
final InternalProcessorContext<?, ?> context) {
if (flushListener != null) {
final byte[] rawNewValue = entry.newValue();
final byte[] rawOldValue = rawNewValue == null || sendOldValues ? wrapped().get(entry.key()) : null;
// this is an optimization: if this key did not exist in underlying store and also not in the cache,
// we can skip flushing to downstream as well as writing to underlying store
if (rawNewValue != null || rawOldValue != null) {
// we need to get the old values if needed, and then put to store, and then flush
final ProcessorRecordContext current = context.recordContext();
try {
context.setRecordContext(entry.entry().context());
wrapped().put(entry.key(), entry.newValue());
flushListener.apply(
new Record<>(
entry.key().get(),
new Change<>(rawNewValue, sendOldValues ? rawOldValue : null),
entry.entry().context().timestamp(),
entry.entry().context().headers()));
} finally {
context.setRecordContext(current);
}
}
} else {
final ProcessorRecordContext current = context.recordContext();
try {
context.setRecordContext(entry.entry().context());
wrapped().put(entry.key(), entry.newValue());
} finally {
context.setRecordContext(current);
}
}
}
@Override
public boolean setFlushListener(final CacheFlushListener<byte[], byte[]> flushListener,
final boolean sendOldValues) {
this.flushListener = flushListener;
this.sendOldValues = sendOldValues;
return true;
}
@Override
public void put(final Bytes key,
final byte[] value) {
Objects.requireNonNull(key, "key cannot be null");
validateStoreOpen();
lock.writeLock().lock();
try {
validateStoreOpen();
// for null bytes, we still put it into cache indicating tombstones
putInternal(key, value);
} finally {
lock.writeLock().unlock();
}
}
private void putInternal(final Bytes key,
final byte[] value) {
synchronized (position) {
internalContext.cache().put(
cacheName,
key,
new LRUCacheEntry(
value,
internalContext.recordContext().headers(),
true,
internalContext.recordContext().offset(),
internalContext.recordContext().timestamp(),
internalContext.recordContext().partition(),
internalContext.recordContext().topic(),
internalContext.recordContext().sourceRawKey(),
internalContext.recordContext().sourceRawValue()
)
);
StoreQueryUtils.updatePosition(position, internalContext);
}
}
@Override
public byte[] putIfAbsent(final Bytes key,
final byte[] value) {
Objects.requireNonNull(key, "key cannot be null");
validateStoreOpen();
lock.writeLock().lock();
try {
validateStoreOpen();
final byte[] v = getInternal(key);
if (v == null) {
putInternal(key, value);
}
return v;
} finally {
lock.writeLock().unlock();
}
}
@Override
public void putAll(final List<KeyValue<Bytes, byte[]>> entries) {
validateStoreOpen();
lock.writeLock().lock();
try {
validateStoreOpen();
for (final KeyValue<Bytes, byte[]> entry : entries) {
Objects.requireNonNull(entry.key, "key cannot be null");
put(entry.key, entry.value);
}
} finally {
lock.writeLock().unlock();
}
}
@Override
public byte[] delete(final Bytes key) {
Objects.requireNonNull(key, "key cannot be null");
validateStoreOpen();
lock.writeLock().lock();
try {
validateStoreOpen();
return deleteInternal(key);
} finally {
lock.writeLock().unlock();
}
}
private byte[] deleteInternal(final Bytes key) {
final byte[] v = getInternal(key);
putInternal(key, null);
return v;
}
@Override
public byte[] get(final Bytes key) {
Objects.requireNonNull(key, "key cannot be null");
validateStoreOpen();
final Lock theLock;
if (Thread.currentThread().equals(streamThread)) {
theLock = lock.writeLock();
} else {
theLock = lock.readLock();
}
theLock.lock();
try {
validateStoreOpen();
return getInternal(key);
} finally {
theLock.unlock();
}
}
private byte[] getInternal(final Bytes key) {
LRUCacheEntry entry = null;
if (internalContext.cache() != null) {
entry = internalContext.cache().get(cacheName, key);
}
if (entry == null) {
final byte[] rawValue = wrapped().get(key);
if (rawValue == null) {
return null;
}
// only update the cache if this call is on the streamThread
// as we don't want other threads to trigger an eviction/flush
if (Thread.currentThread().equals(streamThread)) {
internalContext.cache().put(cacheName, key, new LRUCacheEntry(rawValue));
}
return rawValue;
} else {
return entry.value();
}
}
@Override
public KeyValueIterator<Bytes, byte[]> range(final Bytes from,
final Bytes to) {
if (Objects.nonNull(from) && Objects.nonNull(to) && from.compareTo(to) > 0) {
LOG.warn("Returning empty iterator for fetch with invalid key range: from > to. " +
"This may be due to range arguments set in the wrong order, " +
"or serdes that don't preserve ordering when lexicographically comparing the serialized bytes. " +
"Note that the built-in numerical serdes do not follow this for negative numbers");
return KeyValueIterators.emptyIterator();
}
validateStoreOpen();
final KeyValueIterator<Bytes, byte[]> storeIterator = wrapped().range(from, to);
final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().range(cacheName, from, to);
return new MergedSortedCacheKeyValueBytesStoreIterator(cacheIterator, storeIterator, true);
}
@Override
public KeyValueIterator<Bytes, byte[]> reverseRange(final Bytes from,
final Bytes to) {
if (Objects.nonNull(from) && Objects.nonNull(to) && from.compareTo(to) > 0) {
LOG.warn("Returning empty iterator for fetch with invalid key range: from > to. " +
"This may be due to range arguments set in the wrong order, " +
"or serdes that don't preserve ordering when lexicographically comparing the serialized bytes. " +
"Note that the built-in numerical serdes do not follow this for negative numbers");
return KeyValueIterators.emptyIterator();
}
validateStoreOpen();
final KeyValueIterator<Bytes, byte[]> storeIterator = wrapped().reverseRange(from, to);
final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().reverseRange(cacheName, from, to);
return new MergedSortedCacheKeyValueBytesStoreIterator(cacheIterator, storeIterator, false);
}
@Override
public KeyValueIterator<Bytes, byte[]> all() {
validateStoreOpen();
final KeyValueIterator<Bytes, byte[]> storeIterator = wrapped().all();
final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().all(cacheName);
return new MergedSortedCacheKeyValueBytesStoreIterator(cacheIterator, storeIterator, true);
}
@Override
public <PS extends Serializer<P>, P> KeyValueIterator<Bytes, byte[]> prefixScan(final P prefix, final PS prefixKeySerializer) {
validateStoreOpen();
final KeyValueIterator<Bytes, byte[]> storeIterator = wrapped().prefixScan(prefix, prefixKeySerializer);
final Bytes from = Bytes.wrap(prefixKeySerializer.serialize(null, prefix));
final Bytes to = Bytes.increment(from);
final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().range(cacheName, from, to, false);
return new MergedSortedCacheKeyValueBytesStoreIterator(cacheIterator, storeIterator, true);
}
@Override
public KeyValueIterator<Bytes, byte[]> reverseAll() {
validateStoreOpen();
final KeyValueIterator<Bytes, byte[]> storeIterator = wrapped().reverseAll();
final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().reverseAll(cacheName);
return new MergedSortedCacheKeyValueBytesStoreIterator(cacheIterator, storeIterator, false);
}
@Override
public long approximateNumEntries() {
validateStoreOpen();
lock.readLock().lock();
try {
validateStoreOpen();
return wrapped().approximateNumEntries();
} finally {
lock.readLock().unlock();
}
}
@Override
public void flush() {
validateStoreOpen();
lock.writeLock().lock();
try {
validateStoreOpen();
internalContext.cache().flush(cacheName);
wrapped().flush();
} finally {
lock.writeLock().unlock();
}
}
@Override
public void flushCache() {
validateStoreOpen();
lock.writeLock().lock();
try {
validateStoreOpen();
internalContext.cache().flush(cacheName);
} finally {
lock.writeLock().unlock();
}
}
@Override
public void clearCache() {
validateStoreOpen();
lock.writeLock().lock();
try {
validateStoreOpen();
internalContext.cache().clear(cacheName);
} finally {
lock.writeLock().unlock();
}
}
@Override
public void close() {
lock.writeLock().lock();
try {
final LinkedList<RuntimeException> suppressed = executeAll(
() -> internalContext.cache().flush(cacheName),
() -> internalContext.cache().close(cacheName),
wrapped()::close
);
if (!suppressed.isEmpty()) {
throwSuppressed("Caught an exception while closing caching key value store for store " + name(),
suppressed);
}
} finally {
lock.writeLock().unlock();
}
}
}
| CacheQueryHandler |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/BeanInfoFactory.java | {
"start": 792,
"end": 1989
} | interface ____ creating {@link BeanInfo} instances for Spring beans.
* Can be used to plug in custom bean property resolution strategies (for example, for other
* languages on the JVM) or more efficient {@link BeanInfo} retrieval algorithms.
*
* <p>BeanInfoFactories are instantiated by the {@link CachedIntrospectionResults},
* by using the {@link org.springframework.core.io.support.SpringFactoriesLoader}
* utility class.
*
* When a {@link BeanInfo} is to be created, the {@code CachedIntrospectionResults}
* will iterate through the discovered factories, calling {@link #getBeanInfo(Class)}
* on each one. If {@code null} is returned, the next factory will be queried.
* If none of the factories support the class, a standard {@link BeanInfo} will be
* created as a default.
*
* <p>Note that the {@link org.springframework.core.io.support.SpringFactoriesLoader}
* sorts the {@code BeanInfoFactory} instances by
* {@link org.springframework.core.annotation.Order @Order}, so that ones with a
* higher precedence come first.
*
* @author Arjen Poutsma
* @since 3.2
* @see CachedIntrospectionResults
* @see org.springframework.core.io.support.SpringFactoriesLoader
*/
public | for |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/NonFinalStaticFieldTest.java | {
"start": 4794,
"end": 4961
} | class ____ {
private static int foo;
}
""")
.addOutputLines(
"Test.java",
"""
public | Test |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/utils/CatalogTableStatisticsConverterTest.java | {
"start": 1267,
"end": 2112
} | class ____ {
@Test
void testConvertToColumnStatsMapWithNullColumnStatisticsData() {
Map<String, CatalogColumnStatisticsDataBase> columnStatisticsDataBaseMap = new HashMap<>();
columnStatisticsDataBaseMap.put(
"first", new CatalogColumnStatisticsDataString(10L, 5.2, 3L, 100L));
columnStatisticsDataBaseMap.put("second", null);
Map<String, ColumnStats> columnStatsMap =
CatalogTableStatisticsConverter.convertToColumnStatsMap(
columnStatisticsDataBaseMap);
assertThat(columnStatsMap).isNotNull();
assertThat(columnStatsMap).hasSize(columnStatisticsDataBaseMap.size() - 1);
assertThat(columnStatsMap).containsKey("first");
assertThat(columnStatsMap).doesNotContainKey("second");
}
}
| CatalogTableStatisticsConverterTest |
java | apache__dubbo | dubbo-registry/dubbo-registry-nacos/src/main/java/org/apache/dubbo/registry/nacos/NacosNamingServiceWrapper.java | {
"start": 16617,
"end": 17106
} | class ____ {
private final Instance instance;
private final NamingService namingService;
public InstanceInfo(Instance instance, NamingService namingService) {
this.instance = instance;
this.namingService = namingService;
}
public Instance getInstance() {
return instance;
}
public NamingService getNamingService() {
return namingService;
}
}
private static | InstanceInfo |
java | elastic__elasticsearch | libs/simdvec/src/main21/java/org/elasticsearch/simdvec/internal/vectorization/MSInt4SymmetricESNextOSQVectorsScorer.java | {
"start": 1300,
"end": 21189
} | class ____ extends MemorySegmentESNextOSQVectorsScorer.MemorySegmentScorer {
MSInt4SymmetricESNextOSQVectorsScorer(IndexInput in, int dimensions, int dataLength, MemorySegment memorySegment) {
super(in, dimensions, dataLength, memorySegment);
}
@Override
public long quantizeScore(byte[] q) throws IOException {
assert q.length == length;
// 128 / 8 == 16
if (length >= 16 && PanamaESVectorUtilSupport.HAS_FAST_INTEGER_VECTORS) {
if (PanamaESVectorUtilSupport.VECTOR_BITSIZE >= 256) {
return quantizeScoreSymmetric256(q);
} else if (PanamaESVectorUtilSupport.VECTOR_BITSIZE == 128) {
return quantizeScoreSymmetric128(q);
}
}
return Long.MIN_VALUE;
}
private long quantizeScoreSymmetric128(byte[] q) throws IOException {
int stripe0 = (int) quantizeScore128(q);
int stripe1 = (int) quantizeScore128(q);
int stripe2 = (int) quantizeScore128(q);
int stripe3 = (int) quantizeScore128(q);
return stripe0 + ((long) stripe1 << 1) + ((long) stripe2 << 2) + ((long) stripe3 << 3);
}
private long quantizeScoreSymmetric256(byte[] q) throws IOException {
int stripe0 = (int) quantizeScore256(q);
int stripe1 = (int) quantizeScore256(q);
int stripe2 = (int) quantizeScore256(q);
int stripe3 = (int) quantizeScore256(q);
return stripe0 + ((long) stripe1 << 1) + ((long) stripe2 << 2) + ((long) stripe3 << 3);
}
private long quantizeScore256(byte[] q) throws IOException {
long subRet0 = 0;
long subRet1 = 0;
long subRet2 = 0;
long subRet3 = 0;
int i = 0;
long offset = in.getFilePointer();
int size = length / 4;
if (size >= ByteVector.SPECIES_256.vectorByteSize() * 2) {
int limit = ByteVector.SPECIES_256.loopBound(size);
var sum0 = LongVector.zero(LONG_SPECIES_256);
var sum1 = LongVector.zero(LONG_SPECIES_256);
var sum2 = LongVector.zero(LONG_SPECIES_256);
var sum3 = LongVector.zero(LONG_SPECIES_256);
for (; i < limit; i += ByteVector.SPECIES_256.length(), offset += LONG_SPECIES_256.vectorByteSize()) {
var vq0 = ByteVector.fromArray(BYTE_SPECIES_256, q, i).reinterpretAsLongs();
var vq1 = ByteVector.fromArray(BYTE_SPECIES_256, q, i + size).reinterpretAsLongs();
var vq2 = ByteVector.fromArray(BYTE_SPECIES_256, q, i + size * 2).reinterpretAsLongs();
var vq3 = ByteVector.fromArray(BYTE_SPECIES_256, q, i + size * 3).reinterpretAsLongs();
var vd = LongVector.fromMemorySegment(LONG_SPECIES_256, memorySegment, offset, ByteOrder.LITTLE_ENDIAN);
sum0 = sum0.add(vq0.and(vd).lanewise(VectorOperators.BIT_COUNT));
sum1 = sum1.add(vq1.and(vd).lanewise(VectorOperators.BIT_COUNT));
sum2 = sum2.add(vq2.and(vd).lanewise(VectorOperators.BIT_COUNT));
sum3 = sum3.add(vq3.and(vd).lanewise(VectorOperators.BIT_COUNT));
}
subRet0 += sum0.reduceLanes(VectorOperators.ADD);
subRet1 += sum1.reduceLanes(VectorOperators.ADD);
subRet2 += sum2.reduceLanes(VectorOperators.ADD);
subRet3 += sum3.reduceLanes(VectorOperators.ADD);
}
if (size - i >= ByteVector.SPECIES_128.vectorByteSize()) {
var sum0 = LongVector.zero(LONG_SPECIES_128);
var sum1 = LongVector.zero(LONG_SPECIES_128);
var sum2 = LongVector.zero(LONG_SPECIES_128);
var sum3 = LongVector.zero(LONG_SPECIES_128);
int limit = ByteVector.SPECIES_128.loopBound(size);
for (; i < limit; i += ByteVector.SPECIES_128.length(), offset += LONG_SPECIES_128.vectorByteSize()) {
var vq0 = ByteVector.fromArray(BYTE_SPECIES_128, q, i).reinterpretAsLongs();
var vq1 = ByteVector.fromArray(BYTE_SPECIES_128, q, i + size).reinterpretAsLongs();
var vq2 = ByteVector.fromArray(BYTE_SPECIES_128, q, i + size * 2).reinterpretAsLongs();
var vq3 = ByteVector.fromArray(BYTE_SPECIES_128, q, i + size * 3).reinterpretAsLongs();
var vd = LongVector.fromMemorySegment(LONG_SPECIES_128, memorySegment, offset, ByteOrder.LITTLE_ENDIAN);
sum0 = sum0.add(vq0.and(vd).lanewise(VectorOperators.BIT_COUNT));
sum1 = sum1.add(vq1.and(vd).lanewise(VectorOperators.BIT_COUNT));
sum2 = sum2.add(vq2.and(vd).lanewise(VectorOperators.BIT_COUNT));
sum3 = sum3.add(vq3.and(vd).lanewise(VectorOperators.BIT_COUNT));
}
subRet0 += sum0.reduceLanes(VectorOperators.ADD);
subRet1 += sum1.reduceLanes(VectorOperators.ADD);
subRet2 += sum2.reduceLanes(VectorOperators.ADD);
subRet3 += sum3.reduceLanes(VectorOperators.ADD);
}
// process scalar tail
in.seek(offset);
for (final int upperBound = size & -Long.BYTES; i < upperBound; i += Long.BYTES) {
final long value = in.readLong();
subRet0 += Long.bitCount((long) BitUtil.VH_LE_LONG.get(q, i) & value);
subRet1 += Long.bitCount((long) BitUtil.VH_LE_LONG.get(q, i + size) & value);
subRet2 += Long.bitCount((long) BitUtil.VH_LE_LONG.get(q, i + 2 * size) & value);
subRet3 += Long.bitCount((long) BitUtil.VH_LE_LONG.get(q, i + 3 * size) & value);
}
for (final int upperBound = size & -Integer.BYTES; i < upperBound; i += Integer.BYTES) {
final int value = in.readInt();
subRet0 += Integer.bitCount((int) BitUtil.VH_LE_INT.get(q, i) & value);
subRet1 += Integer.bitCount((int) BitUtil.VH_LE_INT.get(q, i + size) & value);
subRet2 += Integer.bitCount((int) BitUtil.VH_LE_INT.get(q, i + 2 * size) & value);
subRet3 += Integer.bitCount((int) BitUtil.VH_LE_INT.get(q, i + 3 * size) & value);
}
for (; i < size; i++) {
int dValue = in.readByte() & 0xFF;
subRet0 += Integer.bitCount((q[i] & dValue) & 0xFF);
subRet1 += Integer.bitCount((q[i + size] & dValue) & 0xFF);
subRet2 += Integer.bitCount((q[i + 2 * size] & dValue) & 0xFF);
subRet3 += Integer.bitCount((q[i + 3 * size] & dValue) & 0xFF);
}
return subRet0 + (subRet1 << 1) + (subRet2 << 2) + (subRet3 << 3);
}
private long quantizeScore128(byte[] q) throws IOException {
long subRet0 = 0;
long subRet1 = 0;
long subRet2 = 0;
long subRet3 = 0;
int i = 0;
long offset = in.getFilePointer();
var sum0 = IntVector.zero(INT_SPECIES_128);
var sum1 = IntVector.zero(INT_SPECIES_128);
var sum2 = IntVector.zero(INT_SPECIES_128);
var sum3 = IntVector.zero(INT_SPECIES_128);
int size = length / 4;
int limit = ByteVector.SPECIES_128.loopBound(size);
for (; i < limit; i += ByteVector.SPECIES_128.length(), offset += INT_SPECIES_128.vectorByteSize()) {
var vd = IntVector.fromMemorySegment(INT_SPECIES_128, memorySegment, offset, ByteOrder.LITTLE_ENDIAN);
var vq0 = ByteVector.fromArray(BYTE_SPECIES_128, q, i).reinterpretAsInts();
var vq1 = ByteVector.fromArray(BYTE_SPECIES_128, q, i + size).reinterpretAsInts();
var vq2 = ByteVector.fromArray(BYTE_SPECIES_128, q, i + size * 2).reinterpretAsInts();
var vq3 = ByteVector.fromArray(BYTE_SPECIES_128, q, i + size * 3).reinterpretAsInts();
sum0 = sum0.add(vd.and(vq0).lanewise(VectorOperators.BIT_COUNT));
sum1 = sum1.add(vd.and(vq1).lanewise(VectorOperators.BIT_COUNT));
sum2 = sum2.add(vd.and(vq2).lanewise(VectorOperators.BIT_COUNT));
sum3 = sum3.add(vd.and(vq3).lanewise(VectorOperators.BIT_COUNT));
}
subRet0 += sum0.reduceLanes(VectorOperators.ADD);
subRet1 += sum1.reduceLanes(VectorOperators.ADD);
subRet2 += sum2.reduceLanes(VectorOperators.ADD);
subRet3 += sum3.reduceLanes(VectorOperators.ADD);
// process scalar tail
in.seek(offset);
for (final int upperBound = size & -Long.BYTES; i < upperBound; i += Long.BYTES) {
final long value = in.readLong();
subRet0 += Long.bitCount((long) BitUtil.VH_LE_LONG.get(q, i) & value);
subRet1 += Long.bitCount((long) BitUtil.VH_LE_LONG.get(q, i + size) & value);
subRet2 += Long.bitCount((long) BitUtil.VH_LE_LONG.get(q, i + 2 * size) & value);
subRet3 += Long.bitCount((long) BitUtil.VH_LE_LONG.get(q, i + 3 * size) & value);
}
for (final int upperBound = size & -Integer.BYTES; i < upperBound; i += Integer.BYTES) {
final int value = in.readInt();
subRet0 += Integer.bitCount((int) BitUtil.VH_LE_INT.get(q, i) & value);
subRet1 += Integer.bitCount((int) BitUtil.VH_LE_INT.get(q, i + size) & value);
subRet2 += Integer.bitCount((int) BitUtil.VH_LE_INT.get(q, i + 2 * size) & value);
subRet3 += Integer.bitCount((int) BitUtil.VH_LE_INT.get(q, i + 3 * size) & value);
}
for (; i < size; i++) {
int dValue = in.readByte() & 0xFF;
subRet0 += Integer.bitCount((q[i] & dValue) & 0xFF);
subRet1 += Integer.bitCount((q[i + size] & dValue) & 0xFF);
subRet2 += Integer.bitCount((q[i + 2 * size] & dValue) & 0xFF);
subRet3 += Integer.bitCount((q[i + 3 * size] & dValue) & 0xFF);
}
return subRet0 + (subRet1 << 1) + (subRet2 << 2) + (subRet3 << 3);
}
@Override
public boolean quantizeScoreBulk(byte[] q, int count, float[] scores) throws IOException {
assert q.length == length;
// 128 / 8 == 16
if (length >= 16 && PanamaESVectorUtilSupport.HAS_FAST_INTEGER_VECTORS) {
if (PanamaESVectorUtilSupport.VECTOR_BITSIZE >= 256) {
quantizeScore256Bulk(q, count, scores);
return true;
} else if (PanamaESVectorUtilSupport.VECTOR_BITSIZE == 128) {
quantizeScore128Bulk(q, count, scores);
return true;
}
}
return false;
}
private void quantizeScore128Bulk(byte[] q, int count, float[] scores) throws IOException {
for (int iter = 0; iter < count; iter++) {
scores[iter] = quantizeScoreSymmetric128(q);
}
}
private void quantizeScore256Bulk(byte[] q, int count, float[] scores) throws IOException {
for (int iter = 0; iter < count; iter++) {
scores[iter] = quantizeScoreSymmetric256(q);
}
}
@Override
public float scoreBulk(
byte[] q,
float queryLowerInterval,
float queryUpperInterval,
int queryComponentSum,
float queryAdditionalCorrection,
VectorSimilarityFunction similarityFunction,
float centroidDp,
float[] scores
) throws IOException {
assert q.length == length;
// 128 / 8 == 16
if (length >= 16 && PanamaESVectorUtilSupport.HAS_FAST_INTEGER_VECTORS) {
if (PanamaESVectorUtilSupport.VECTOR_BITSIZE >= 256) {
return score256Bulk(
q,
queryLowerInterval,
queryUpperInterval,
queryComponentSum,
queryAdditionalCorrection,
similarityFunction,
centroidDp,
scores
);
} else if (PanamaESVectorUtilSupport.VECTOR_BITSIZE == 128) {
return score128Bulk(
q,
queryLowerInterval,
queryUpperInterval,
queryComponentSum,
queryAdditionalCorrection,
similarityFunction,
centroidDp,
scores
);
}
}
return Float.NEGATIVE_INFINITY;
}
private float score128Bulk(
byte[] q,
float queryLowerInterval,
float queryUpperInterval,
int queryComponentSum,
float queryAdditionalCorrection,
VectorSimilarityFunction similarityFunction,
float centroidDp,
float[] scores
) throws IOException {
quantizeScore128Bulk(q, BULK_SIZE, scores);
int limit = FLOAT_SPECIES_128.loopBound(BULK_SIZE);
int i = 0;
long offset = in.getFilePointer();
float ay = queryLowerInterval;
float ly = (queryUpperInterval - ay) * FOUR_BIT_SCALE;
float y1 = queryComponentSum;
float maxScore = Float.NEGATIVE_INFINITY;
for (; i < limit; i += FLOAT_SPECIES_128.length()) {
var ax = FloatVector.fromMemorySegment(FLOAT_SPECIES_128, memorySegment, offset + i * Float.BYTES, ByteOrder.LITTLE_ENDIAN);
var lx = FloatVector.fromMemorySegment(
FLOAT_SPECIES_128,
memorySegment,
offset + 4 * BULK_SIZE + i * Float.BYTES,
ByteOrder.LITTLE_ENDIAN
).sub(ax).mul(FOUR_BIT_SCALE);
var targetComponentSums = ShortVector.fromMemorySegment(
SHORT_SPECIES_128,
memorySegment,
offset + 8 * BULK_SIZE + i * Short.BYTES,
ByteOrder.LITTLE_ENDIAN
).convert(VectorOperators.S2I, 0).reinterpretAsInts().and(0xffff).convert(VectorOperators.I2F, 0);
var additionalCorrections = FloatVector.fromMemorySegment(
FLOAT_SPECIES_128,
memorySegment,
offset + 10 * BULK_SIZE + i * Float.BYTES,
ByteOrder.LITTLE_ENDIAN
);
var qcDist = FloatVector.fromArray(FLOAT_SPECIES_128, scores, i);
// ax * ay * dimensions + ay * lx * (float) targetComponentSum + ax * ly * y1 + lx * ly *
// qcDist;
var res1 = ax.mul(ay).mul(dimensions);
var res2 = lx.mul(ay).mul(targetComponentSums);
var res3 = ax.mul(ly).mul(y1);
var res4 = lx.mul(ly).mul(qcDist);
var res = res1.add(res2).add(res3).add(res4);
// For euclidean, we need to invert the score and apply the additional correction, which is
// assumed to be the squared l2norm of the centroid centered vectors.
if (similarityFunction == EUCLIDEAN) {
res = res.mul(-2).add(additionalCorrections).add(queryAdditionalCorrection).add(1f);
res = FloatVector.broadcast(FLOAT_SPECIES_128, 1).div(res).max(0);
maxScore = Math.max(maxScore, res.reduceLanes(VectorOperators.MAX));
res.intoArray(scores, i);
} else {
// For cosine and max inner product, we need to apply the additional correction, which is
// assumed to be the non-centered dot-product between the vector and the centroid
res = res.add(queryAdditionalCorrection).add(additionalCorrections).sub(centroidDp);
if (similarityFunction == MAXIMUM_INNER_PRODUCT) {
res.intoArray(scores, i);
// not sure how to do it better
for (int j = 0; j < FLOAT_SPECIES_128.length(); j++) {
scores[i + j] = VectorUtil.scaleMaxInnerProductScore(scores[i + j]);
maxScore = Math.max(maxScore, scores[i + j]);
}
} else {
res = res.add(1f).mul(0.5f).max(0);
res.intoArray(scores, i);
maxScore = Math.max(maxScore, res.reduceLanes(VectorOperators.MAX));
}
}
}
in.seek(offset + 14L * BULK_SIZE);
return maxScore;
}
private float score256Bulk(
byte[] q,
float queryLowerInterval,
float queryUpperInterval,
int queryComponentSum,
float queryAdditionalCorrection,
VectorSimilarityFunction similarityFunction,
float centroidDp,
float[] scores
) throws IOException {
quantizeScore256Bulk(q, BULK_SIZE, scores);
int limit = FLOAT_SPECIES_256.loopBound(BULK_SIZE);
int i = 0;
long offset = in.getFilePointer();
float ay = queryLowerInterval;
float ly = (queryUpperInterval - ay) * FOUR_BIT_SCALE;
float y1 = queryComponentSum;
float maxScore = Float.NEGATIVE_INFINITY;
for (; i < limit; i += FLOAT_SPECIES_256.length()) {
var ax = FloatVector.fromMemorySegment(FLOAT_SPECIES_256, memorySegment, offset + i * Float.BYTES, ByteOrder.LITTLE_ENDIAN);
var lx = FloatVector.fromMemorySegment(
FLOAT_SPECIES_256,
memorySegment,
offset + 4 * BULK_SIZE + i * Float.BYTES,
ByteOrder.LITTLE_ENDIAN
).sub(ax).mul(FOUR_BIT_SCALE);
var targetComponentSums = ShortVector.fromMemorySegment(
SHORT_SPECIES_256,
memorySegment,
offset + 8 * BULK_SIZE + i * Short.BYTES,
ByteOrder.LITTLE_ENDIAN
).convert(VectorOperators.S2I, 0).reinterpretAsInts().and(0xffff).convert(VectorOperators.I2F, 0);
var additionalCorrections = FloatVector.fromMemorySegment(
FLOAT_SPECIES_256,
memorySegment,
offset + 10 * BULK_SIZE + i * Float.BYTES,
ByteOrder.LITTLE_ENDIAN
);
var qcDist = FloatVector.fromArray(FLOAT_SPECIES_256, scores, i);
// ax * ay * dimensions + ay * lx * (float) targetComponentSum + ax * ly * y1 + lx * ly *
// qcDist;
var res1 = ax.mul(ay).mul(dimensions);
var res2 = lx.mul(ay).mul(targetComponentSums);
var res3 = ax.mul(ly).mul(y1);
var res4 = lx.mul(ly).mul(qcDist);
var res = res1.add(res2).add(res3).add(res4);
// For euclidean, we need to invert the score and apply the additional correction, which is
// assumed to be the squared l2norm of the centroid centered vectors.
if (similarityFunction == EUCLIDEAN) {
res = res.mul(-2).add(additionalCorrections).add(queryAdditionalCorrection).add(1f);
res = FloatVector.broadcast(FLOAT_SPECIES_256, 1).div(res).max(0);
maxScore = Math.max(maxScore, res.reduceLanes(VectorOperators.MAX));
res.intoArray(scores, i);
} else {
// For cosine and max inner product, we need to apply the additional correction, which is
// assumed to be the non-centered dot-product between the vector and the centroid
res = res.add(queryAdditionalCorrection).add(additionalCorrections).sub(centroidDp);
if (similarityFunction == MAXIMUM_INNER_PRODUCT) {
res.intoArray(scores, i);
// not sure how to do it better
for (int j = 0; j < FLOAT_SPECIES_256.length(); j++) {
scores[i + j] = VectorUtil.scaleMaxInnerProductScore(scores[i + j]);
maxScore = Math.max(maxScore, scores[i + j]);
}
} else {
res = res.add(1f).mul(0.5f).max(0);
maxScore = Math.max(maxScore, res.reduceLanes(VectorOperators.MAX));
res.intoArray(scores, i);
}
}
}
in.seek(offset + 14L * BULK_SIZE);
return maxScore;
}
}
| MSInt4SymmetricESNextOSQVectorsScorer |
java | resilience4j__resilience4j | resilience4j-spring-boot2/src/test/java/io/github/resilience4j/service/test/BeanContextPropagator.java | {
"start": 208,
"end": 563
} | class ____ implements ContextPropagator {
@Override
public Supplier<Optional> retrieve() {
return () -> Optional.empty();
}
@Override
public Consumer<Optional> copy() {
return (t) -> {
};
}
@Override
public Consumer<Optional> clear() {
return (t) -> {
};
}
}
| BeanContextPropagator |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/inject/beans/intro/AImplementationString.java | {
"start": 246,
"end": 574
} | class ____ implements AInterface<String, String> {
private String s1,s2;
@Override
public void set(String s, String s2) {
s1=s;
this.s2=s2;
}
@Override
public String get(String s, String s2) {
return s.getClass().getName() +","+s.getClass().getName();
}
}
| AImplementationString |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorIndexNameActionTests.java | {
"start": 716,
"end": 1522
} | class ____ extends ESTestCase {
private RestUpdateConnectorIndexNameAction action;
@Override
public void setUp() throws Exception {
super.setUp();
action = new RestUpdateConnectorIndexNameAction();
}
public void testPrepareRequest_emptyPayload_badRequestError() {
RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT)
.withPath("/_connector/123/_index_name")
.build();
final ElasticsearchParseException e = expectThrows(
ElasticsearchParseException.class,
() -> action.prepareRequest(request, mock(NodeClient.class))
);
assertThat(e, hasToString(containsString("request body is required")));
}
}
| RestUpdateConnectorIndexNameActionTests |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenAction.java | {
"start": 430,
"end": 746
} | class ____ extends ActionType<InvalidateTokenResponse> {
public static final String NAME = "cluster:admin/xpack/security/token/invalidate";
public static final InvalidateTokenAction INSTANCE = new InvalidateTokenAction();
private InvalidateTokenAction() {
super(NAME);
}
}
| InvalidateTokenAction |
java | netty__netty | codec-socks/src/test/java/io/netty/handler/codec/socksx/v5/DefaultSocks5InitialRequestTest.java | {
"start": 781,
"end": 1058
} | class ____ {
@Test
public void testConstructorParamsAreNotEmpty() {
try {
new DefaultSocks5InitialRequest();
} catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
}
}
}
| DefaultSocks5InitialRequestTest |
java | apache__dubbo | dubbo-remoting/dubbo-remoting-netty4/src/main/java/org/apache/dubbo/remoting/transport/netty4/ssl/SslContexts.java | {
"start": 1882,
"end": 7869
} | class ____ {
private static final ErrorTypeAwareLogger logger = LoggerFactory.getErrorTypeAwareLogger(SslContexts.class);
public static SslContext buildServerSslContext(ProviderCert providerConnectionConfig) {
SslContextBuilder sslClientContextBuilder;
InputStream serverKeyCertChainPathStream = null;
InputStream serverPrivateKeyPathStream = null;
InputStream serverTrustCertStream = null;
try {
serverKeyCertChainPathStream = providerConnectionConfig.getKeyCertChainInputStream();
serverPrivateKeyPathStream = providerConnectionConfig.getPrivateKeyInputStream();
serverTrustCertStream = providerConnectionConfig.getTrustCertInputStream();
String password = providerConnectionConfig.getPassword();
if (password != null) {
sslClientContextBuilder =
SslContextBuilder.forServer(serverKeyCertChainPathStream, serverPrivateKeyPathStream, password);
} else {
sslClientContextBuilder =
SslContextBuilder.forServer(serverKeyCertChainPathStream, serverPrivateKeyPathStream);
}
if (serverTrustCertStream != null) {
sslClientContextBuilder.trustManager(serverTrustCertStream);
if (providerConnectionConfig.getAuthPolicy() == AuthPolicy.CLIENT_AUTH) {
sslClientContextBuilder.clientAuth(ClientAuth.REQUIRE);
} else {
sslClientContextBuilder.clientAuth(ClientAuth.OPTIONAL);
}
}
} catch (Exception e) {
throw new IllegalArgumentException("Could not find certificate file or the certificate is invalid.", e);
} finally {
safeCloseStream(serverTrustCertStream);
safeCloseStream(serverKeyCertChainPathStream);
safeCloseStream(serverPrivateKeyPathStream);
}
try {
return sslClientContextBuilder
.sslProvider(findSslProvider())
.ciphers(Http2SecurityUtil.CIPHERS, SupportedCipherSuiteFilter.INSTANCE)
.applicationProtocolConfig(new ApplicationProtocolConfig(
ApplicationProtocolConfig.Protocol.ALPN,
ApplicationProtocolConfig.SelectorFailureBehavior.NO_ADVERTISE,
ApplicationProtocolConfig.SelectedListenerFailureBehavior.ACCEPT,
ApplicationProtocolNames.HTTP_2,
ApplicationProtocolNames.HTTP_1_1))
.build();
} catch (SSLException e) {
throw new IllegalStateException("Build SslSession failed.", e);
}
}
public static SslContext buildClientSslContext(URL url) {
CertManager certManager =
url.getOrDefaultFrameworkModel().getBeanFactory().getBean(CertManager.class);
Cert consumerConnectionConfig = certManager.getConsumerConnectionConfig(url);
if (consumerConnectionConfig == null) {
return null;
}
SslContextBuilder builder = SslContextBuilder.forClient();
InputStream clientTrustCertCollectionPath = null;
InputStream clientCertChainFilePath = null;
InputStream clientPrivateKeyFilePath = null;
try {
clientTrustCertCollectionPath = consumerConnectionConfig.getTrustCertInputStream();
if (clientTrustCertCollectionPath != null) {
builder.trustManager(clientTrustCertCollectionPath);
}
clientCertChainFilePath = consumerConnectionConfig.getKeyCertChainInputStream();
clientPrivateKeyFilePath = consumerConnectionConfig.getPrivateKeyInputStream();
if (clientCertChainFilePath != null && clientPrivateKeyFilePath != null) {
String password = consumerConnectionConfig.getPassword();
if (password != null) {
builder.keyManager(clientCertChainFilePath, clientPrivateKeyFilePath, password);
} else {
builder.keyManager(clientCertChainFilePath, clientPrivateKeyFilePath);
}
}
} catch (Exception e) {
throw new IllegalArgumentException("Could not find certificate file or find invalid certificate.", e);
} finally {
safeCloseStream(clientTrustCertCollectionPath);
safeCloseStream(clientCertChainFilePath);
safeCloseStream(clientPrivateKeyFilePath);
}
try {
return builder.sslProvider(findSslProvider()).build();
} catch (SSLException e) {
throw new IllegalStateException("Build SslSession failed.", e);
}
}
/**
* Returns OpenSSL if available, otherwise returns the JDK provider.
*/
private static SslProvider findSslProvider() {
if (OpenSsl.isAvailable()) {
logger.debug("Using OPENSSL provider.");
return SslProvider.OPENSSL;
}
if (checkJdkProvider()) {
logger.debug("Using JDK provider.");
return SslProvider.JDK;
}
throw new IllegalStateException(
"Could not find any valid TLS provider, please check your dependency or deployment environment, "
+ "usually netty-tcnative, Conscrypt, or Jetty NPN/ALPN is needed.");
}
private static boolean checkJdkProvider() {
Provider[] jdkProviders = Security.getProviders("SSLContext.TLS");
return (jdkProviders != null && jdkProviders.length > 0);
}
private static void safeCloseStream(InputStream stream) {
if (stream == null) {
return;
}
try {
stream.close();
} catch (IOException e) {
logger.warn(TRANSPORT_FAILED_CLOSE_STREAM, "", "", "Failed to close a stream.", e);
}
}
}
| SslContexts |
java | google__guava | android/guava-tests/test/com/google/common/util/concurrent/AbstractScheduledServiceTest.java | {
"start": 3290,
"end": 10590
} | class ____ extends AbstractScheduledService {
@Override
protected void runOneIteration() throws Exception {}
@Override
protected Scheduler scheduler() {
return configuration;
}
@Override
protected ScheduledExecutorService executor() {
return executor;
}
}
public void testFailOnExceptionFromRun() throws Exception {
TestService service = new TestService();
service.runException = new Exception();
service.startAsync().awaitRunning();
service.runFirstBarrier.await();
service.runSecondBarrier.await();
assertThrows(CancellationException.class, () -> future.get());
// An execution exception holds a runtime exception (from throwables.propagate) that holds our
// original exception.
assertEquals(service.runException, service.failureCause());
assertEquals(Service.State.FAILED, service.state());
}
public void testFailOnExceptionFromStartUp() {
TestService service = new TestService();
service.startUpException = new Exception();
IllegalStateException e =
assertThrows(IllegalStateException.class, () -> service.startAsync().awaitRunning());
assertThat(e).hasCauseThat().isEqualTo(service.startUpException);
assertEquals(0, service.numberOfTimesRunCalled.get());
assertEquals(Service.State.FAILED, service.state());
}
public void testFailOnErrorFromStartUpListener() throws InterruptedException {
Error error = new Error();
CountDownLatch latch = new CountDownLatch(1);
TestService service = new TestService();
service.addListener(
new Service.Listener() {
@Override
public void running() {
throw error;
}
@Override
public void failed(State from, Throwable failure) {
assertEquals(State.RUNNING, from);
assertEquals(error, failure);
latch.countDown();
}
},
directExecutor());
service.startAsync();
latch.await();
assertEquals(0, service.numberOfTimesRunCalled.get());
assertEquals(Service.State.FAILED, service.state());
}
public void testFailOnExceptionFromShutDown() throws Exception {
TestService service = new TestService();
service.shutDownException = new Exception();
service.startAsync().awaitRunning();
service.runFirstBarrier.await();
service.stopAsync();
service.runSecondBarrier.await();
IllegalStateException e =
assertThrows(IllegalStateException.class, () -> service.awaitTerminated());
assertThat(e).hasCauseThat().isEqualTo(service.shutDownException);
assertEquals(Service.State.FAILED, service.state());
}
public void testRunOneIterationCalledMultipleTimes() throws Exception {
TestService service = new TestService();
service.startAsync().awaitRunning();
for (int i = 1; i < 10; i++) {
service.runFirstBarrier.await();
assertEquals(i, service.numberOfTimesRunCalled.get());
service.runSecondBarrier.await();
}
service.runFirstBarrier.await();
service.stopAsync();
service.runSecondBarrier.await();
service.stopAsync().awaitTerminated();
}
public void testExecutorOnlyCalledOnce() throws Exception {
TestService service = new TestService();
service.startAsync().awaitRunning();
// It should be called once during startup.
assertEquals(1, service.numberOfTimesExecutorCalled.get());
for (int i = 1; i < 10; i++) {
service.runFirstBarrier.await();
assertEquals(i, service.numberOfTimesRunCalled.get());
service.runSecondBarrier.await();
}
service.runFirstBarrier.await();
service.stopAsync();
service.runSecondBarrier.await();
service.stopAsync().awaitTerminated();
// Only called once overall.
assertEquals(1, service.numberOfTimesExecutorCalled.get());
}
public void testDefaultExecutorIsShutdownWhenServiceIsStopped() throws Exception {
AtomicReference<ScheduledExecutorService> executor = Atomics.newReference();
AbstractScheduledService service =
new AbstractScheduledService() {
@Override
protected void runOneIteration() throws Exception {}
@Override
protected ScheduledExecutorService executor() {
executor.set(super.executor());
return executor.get();
}
@Override
protected Scheduler scheduler() {
return newFixedDelaySchedule(0, 1, MILLISECONDS);
}
};
service.startAsync();
assertFalse(service.executor().isShutdown());
service.awaitRunning();
service.stopAsync();
service.awaitTerminated();
assertTrue(executor.get().awaitTermination(100, MILLISECONDS));
}
public void testDefaultExecutorIsShutdownWhenServiceFails() throws Exception {
AtomicReference<ScheduledExecutorService> executor = Atomics.newReference();
AbstractScheduledService service =
new AbstractScheduledService() {
@Override
protected void startUp() throws Exception {
throw new Exception("Failed");
}
@Override
protected void runOneIteration() throws Exception {}
@Override
protected ScheduledExecutorService executor() {
executor.set(super.executor());
return executor.get();
}
@Override
protected Scheduler scheduler() {
return newFixedDelaySchedule(0, 1, MILLISECONDS);
}
};
assertThrows(IllegalStateException.class, () -> service.startAsync().awaitRunning());
assertTrue(executor.get().awaitTermination(100, MILLISECONDS));
}
public void testSchedulerOnlyCalledOnce() throws Exception {
TestService service = new TestService();
service.startAsync().awaitRunning();
// It should be called once during startup.
assertEquals(1, service.numberOfTimesSchedulerCalled.get());
for (int i = 1; i < 10; i++) {
service.runFirstBarrier.await();
assertEquals(i, service.numberOfTimesRunCalled.get());
service.runSecondBarrier.await();
}
service.runFirstBarrier.await();
service.stopAsync();
service.runSecondBarrier.await();
service.awaitTerminated();
// Only called once overall.
assertEquals(1, service.numberOfTimesSchedulerCalled.get());
}
public void testTimeout() {
// Create a service whose executor will never run its commands
Service service =
new AbstractScheduledService() {
@Override
protected Scheduler scheduler() {
return Scheduler.newFixedDelaySchedule(0, 1, NANOSECONDS);
}
@Override
protected ScheduledExecutorService executor() {
return TestingExecutors.noOpScheduledExecutor();
}
@Override
protected void runOneIteration() throws Exception {}
@Override
protected String serviceName() {
return "Foo";
}
};
TimeoutException e =
assertThrows(
TimeoutException.class, () -> service.startAsync().awaitRunning(1, MILLISECONDS));
assertThat(e)
.hasMessageThat()
.isEqualTo("Timed out waiting for Foo [STARTING] to reach the RUNNING state.");
}
private | NullService |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/ComparatorFactory_floatComparatorWithPrecision_Test.java | {
"start": 1261,
"end": 4089
} | class ____ {
private final ComparatorFactory INSTANCE = ComparatorFactory.INSTANCE;
@ParameterizedTest
@CsvSource({
"1.0, 1.1, 0.1",
"0.111, 0.110, 0.001",
"0.12345, 0.12346, 0.00001",
"0.7654321, 0.7654320, 0.0000001",
"1.2464, 1.2463, 0.0001" })
void should_evaluate_to_be_equal(Float float1, Float float2, Float precision) {
// GIVEN
Comparator<Float> comparator = INSTANCE.floatComparatorWithPrecision(precision);
// WHEN
int comparisonValue = comparator.compare(float1, float2);
int inverseComparisonValue = comparator.compare(float2, float1);
// THEN
then(comparisonValue).isZero();
then(inverseComparisonValue).isZero();
}
@ParameterizedTest
@CsvSource({
"1.1, 1.0, 0.05",
"0.111, 0.110, 0.00099",
"0.12346, 0.12345, 0.0000099",
"0.7654321, 0.7654320, 0.000000099",
"0.7654321, 0.7654320, 9e-8",
"1.2464, 1.2463, 0.000099" })
void should_evaluate_given_value_to_different(Float value, Float other, Float precision) {
// GIVEN
Comparator<Float> comparator = INSTANCE.floatComparatorWithPrecision(precision);
// WHEN
int comparisonValue = comparator.compare(value, other);
int inverseComparisonValue = comparator.compare(other, value);
// THEN
then(comparisonValue).isOne();
then(inverseComparisonValue).isEqualTo(-1);
}
@ParameterizedTest
@MethodSource
void should_follow_java_behavior_when_dealing_with_infinity_and_NaN(Float value1, Float value2) {
// GIVEN
Comparator<Float> comparator = INSTANCE.floatComparatorWithPrecision(1f);
// WHEN
int comparisonValue = comparator.compare(value1, value2);
int javaComparisonValue = value1.compareTo(value2);
// THEN
then(comparisonValue).isEqualTo(javaComparisonValue);
}
static Stream<Arguments> should_follow_java_behavior_when_dealing_with_infinity_and_NaN() {
return Stream.of(arguments(POSITIVE_INFINITY, NEGATIVE_INFINITY),
arguments(NEGATIVE_INFINITY, POSITIVE_INFINITY),
arguments(POSITIVE_INFINITY, POSITIVE_INFINITY),
arguments(NEGATIVE_INFINITY, NEGATIVE_INFINITY),
arguments(NaN, POSITIVE_INFINITY),
arguments(NaN, NEGATIVE_INFINITY),
arguments(NaN, NaN));
}
@ParameterizedTest
@MethodSource
void should_fail_for_invalid_precision(Float precision) {
// GIVEN
Comparator<Float> comparator = INSTANCE.floatComparatorWithPrecision(precision);
// WHEN/THEN
assertThatIllegalArgumentException().isThrownBy(() -> comparator.compare(1f, 2f));
}
static Stream<Float> should_fail_for_invalid_precision() {
return Stream.of(NaN, POSITIVE_INFINITY, NEGATIVE_INFINITY);
}
}
| ComparatorFactory_floatComparatorWithPrecision_Test |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/OrderedMethodTests.java | {
"start": 18123,
"end": 19185
} | class ____ {
@BeforeEach
void trackInvocations(TestInfo testInfo) {
callSequence.add(testInfo.getDisplayName());
threadNames.add(Thread.currentThread().getName());
}
@TestFactory
DynamicTest b() {
return dynamicTest("dynamic", () -> {
});
}
@DisplayName("$")
@Test
void $() {
}
@DisplayName("___")
@Test
void ___() {
}
@DisplayName("AAA")
@Test
void AAA() {
}
@DisplayName("ZZ_Top")
@Test
void ZZ_Top() {
}
@DisplayName("a1")
@Test
void a1() {
}
@DisplayName("a2")
@Test
void a2() {
}
@DisplayName("zzz")
@RepeatedTest(1)
void zzz() {
}
@Test
@DisplayName("⑦ϼ\uD83D\uDC69\u200D⚕\uD83E\uDDD8\u200D♂")
void special_characters() {
}
@Test
void no_display_name_attribute_1() {
}
@Test
void no_display_name_attribute_2() {
}
@Test
void No_display_name_attribute_1_caps() {
}
@Test
void No_display_name_attribute_2_caps() {
}
}
@SuppressWarnings("JUnitMalformedDeclaration")
@TestMethodOrder(OrderAnnotation.class)
static | DisplayNameTestCase |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStore.java | {
"start": 9633,
"end": 13226
} | class ____ {
private Boolean enabled = null;
private DataStreamLifecycle.Builder lifecycleBuilder = null;
private Builder() {}
private Builder(Template template) {
if (template != null) {
enabled = template.enabled.get();
lifecycleBuilder = template.lifecycle.mapAndGet(l -> DataStreamLifecycle.builder(l));
}
}
private Builder(DataStreamFailureStore failureStore) {
if (failureStore != null) {
enabled = failureStore.enabled;
lifecycleBuilder = failureStore.lifecycle == null ? null : DataStreamLifecycle.builder(failureStore.lifecycle);
}
}
public Builder enabled(Boolean enabled) {
this.enabled = enabled;
return this;
}
/**
* Composes the provided enabled value with the current one. Because enabled is a resettable boolean, if it is defined
* it will overwrite the current value.
*/
public Builder enabled(ResettableValue<Boolean> enabled) {
if (enabled.isDefined()) {
this.enabled = enabled.get();
}
return this;
}
public Builder lifecycle(DataStreamLifecycle lifecycle) {
this.lifecycleBuilder = lifecycle == null ? null : DataStreamLifecycle.builder(lifecycle);
return this;
}
/**
* Composes the provided lifecycle value with the current one. Because lifecycle is a resettable template that can be merged,
* if it is defined it will delegate to {@link DataStreamLifecycle.Builder#composeTemplate(DataStreamLifecycle.Template)} to
* correctly compose the contents.
*/
public Builder lifecycle(ResettableValue<DataStreamLifecycle.Template> lifecycle) {
if (lifecycle.shouldReset()) {
this.lifecycleBuilder = null;
} else if (lifecycle.isDefined()) {
if (this.lifecycleBuilder == null) {
this.lifecycleBuilder = DataStreamLifecycle.builder(lifecycle.get());
} else {
this.lifecycleBuilder.composeTemplate(lifecycle.get());
}
}
return this;
}
/**
* Composes the provided failure store template with this builder.
*/
public Builder composeTemplate(DataStreamFailureStore.Template failureStore) {
this.enabled(failureStore.enabled());
this.lifecycle(failureStore.lifecycle());
return this;
}
/**
* Builds a valid DataStreamFailureStore configuration.
* @return the object or null if all the values were null.
*/
@Nullable
public DataStreamFailureStore build() {
if (enabled == null && lifecycleBuilder == null) {
return null;
}
return new DataStreamFailureStore(enabled, lifecycleBuilder == null ? null : lifecycleBuilder.build());
}
/**
* Builds a valid template for the DataStreamFailureStore configuration.
* @return the template or null if all the values were null.
*/
@Nullable
public DataStreamFailureStore.Template buildTemplate() {
if (enabled == null && lifecycleBuilder == null) {
return null;
}
return new Template(enabled, lifecycleBuilder == null ? null : lifecycleBuilder.buildTemplate());
}
}
}
| Builder |
java | elastic__elasticsearch | x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/pagerduty/SentEventTests.java | {
"start": 988,
"end": 2538
} | class ____ extends ESTestCase {
public void testToXContentBodyFiltering() throws Exception {
HttpResponse response = new HttpResponse(500);
String body = randomAlphaOfLength(20);
HttpRequest request = HttpRequest.builder("localhost", 1234).body(body).build();
IncidentEvent incidentEvent = new IncidentEvent("description", "eventtype", null, null, null, null, false, null, null);
SentEvent sentEvent = SentEvent.responded(incidentEvent, request, response);
try (XContentBuilder builder = jsonBuilder()) {
WatcherParams params = WatcherParams.builder().hideSecrets(false).build();
sentEvent.toXContent(builder, params);
assertThat(Strings.toString(builder), containsString(body));
try (
XContentParser parser = builder.contentType()
.xContent()
.createParser(XContentParserConfiguration.EMPTY, Strings.toString(builder))
) {
parser.map();
}
}
try (XContentBuilder builder = jsonBuilder()) {
sentEvent.toXContent(builder, ToXContent.EMPTY_PARAMS);
assertThat(Strings.toString(builder), not(containsString(body)));
try (
XContentParser parser = builder.contentType()
.xContent()
.createParser(XContentParserConfiguration.EMPTY, Strings.toString(builder))
) {
parser.map();
}
}
}
}
| SentEventTests |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/rest/util/MethodWalker.java | {
"start": 2627,
"end": 3694
} | class ____ {
private final String name;
private final Class<?>[] parameterTypes;
private Key(String name, Class<?>[] parameterTypes) {
this.name = name;
this.parameterTypes = parameterTypes;
}
private static Key of(Method method) {
return new Key(method.getName(), method.getParameterTypes());
}
@Override
@SuppressWarnings({"EqualsWhichDoesntCheckParameterClass", "EqualsDoesntCheckParameterClass"})
public boolean equals(Object obj) {
Key key = (Key) obj;
return name.equals(key.name) && Arrays.equals(parameterTypes, key.parameterTypes);
}
@Override
public int hashCode() {
int result = name.hashCode();
for (Class<?> type : parameterTypes) {
result = 31 * result + type.hashCode();
}
return result;
}
@Override
public String toString() {
return name + Arrays.toString(parameterTypes);
}
}
}
| Key |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryBoxedVariableTest.java | {
"start": 14362,
"end": 14865
} | interface ____ {
int f(Integer i);
}
Test() {
F f =
(Integer i) -> {
return i;
};
}
}
""")
.doTest();
}
@Test
public void positiveFactory() {
compilationTestHelper
.addSourceLines(
"Foo.java",
"""
import com.google.auto.value.AutoValue;
@AutoValue
abstract | F |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/operators/testutils/TestData.java | {
"start": 8447,
"end": 11747
} | class ____
implements MutableObjectIterator<Tuple2<Integer, Integer>> {
private final int key;
private final int value;
private int numLeft;
public ConstantIntIntTuplesIterator(int key, int value, int count) {
this.key = key;
this.value = value;
this.numLeft = count;
}
@Override
public Tuple2<Integer, Integer> next(Tuple2<Integer, Integer> reuse) {
if (this.numLeft > 0) {
this.numLeft--;
reuse.setField(this.key, 0);
reuse.setField(this.value, 1);
return reuse;
} else {
return null;
}
}
@Override
public Tuple2<Integer, Integer> next() {
return next(new Tuple2<>(0, 0));
}
}
// ----Tuple2<Integer, String>
private static final TupleTypeInfo<Tuple2<Integer, String>> typeInfoIntString =
TupleTypeInfo.getBasicTupleTypeInfo(Integer.class, String.class);
private static final TypeSerializerFactory<Tuple2<Integer, String>> serializerFactoryIntString =
new MockTupleSerializerFactory(typeInfoIntString);
public static TupleTypeInfo<Tuple2<Integer, String>> getIntStringTupleTypeInfo() {
return typeInfoIntString;
}
public static TypeSerializerFactory<Tuple2<Integer, String>>
getIntStringTupleSerializerFactory() {
return serializerFactoryIntString;
}
public static TypeSerializer<Tuple2<Integer, String>> getIntStringTupleSerializer() {
return serializerFactoryIntString.getSerializer();
}
public static TypeComparator<Tuple2<Integer, String>> getIntStringTupleComparator() {
return getIntStringTupleTypeInfo()
.createComparator(new int[] {0}, new boolean[] {true}, 0, new ExecutionConfig());
}
public static MockTuple2Reader<Tuple2<Integer, String>> getIntStringTupleReader() {
return new MockTuple2Reader<Tuple2<Integer, String>>();
}
// ----Tuple2<Integer, Integer>
private static final TupleTypeInfo<Tuple2<Integer, Integer>> typeInfoIntInt =
TupleTypeInfo.getBasicTupleTypeInfo(Integer.class, Integer.class);
private static final TypeSerializerFactory<Tuple2<Integer, Integer>> serializerFactoryIntInt =
new MockTupleSerializerFactory(typeInfoIntInt);
public static TupleTypeInfo<Tuple2<Integer, Integer>> getIntIntTupleTypeInfo() {
return typeInfoIntInt;
}
public static TypeSerializerFactory<Tuple2<Integer, Integer>>
getIntIntTupleSerializerFactory() {
return serializerFactoryIntInt;
}
public static TypeSerializer<Tuple2<Integer, Integer>> getIntIntTupleSerializer() {
return getIntIntTupleSerializerFactory().getSerializer();
}
public static TypeComparator<Tuple2<Integer, Integer>> getIntIntTupleComparator() {
return getIntIntTupleTypeInfo()
.createComparator(new int[] {0}, new boolean[] {true}, 0, new ExecutionConfig());
}
public static MockTuple2Reader<Tuple2<Integer, Integer>> getIntIntTupleReader() {
return new MockTuple2Reader<>();
}
// ----Tuple2<?, ?>
private static | ConstantIntIntTuplesIterator |
java | junit-team__junit5 | junit-jupiter-engine/src/main/java/org/junit/jupiter/engine/extension/MutableExtensionRegistry.java | {
"start": 10131,
"end": 10332
} | interface ____ {
static Entry of(Extension extension) {
Optional<Extension> value = Optional.of(extension);
return () -> value;
}
Optional<Extension> getExtension();
}
private static | Entry |
java | quarkusio__quarkus | extensions/security-webauthn/runtime/src/main/java/io/quarkus/security/webauthn/WebAuthnRunTimeConfig.java | {
"start": 3811,
"end": 3994
} | enum ____: " + this);
}
}
}
/**
* AuthenticatorAttachment
* https://www.w3.org/TR/webauthn/#enumdef-authenticatorattachment
*/
public | value |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/tool/schema/spi/SqlScriptCommandExtractor.java | {
"start": 1084,
"end": 1332
} | interface ____ extends Service {
/**
* Read the commands from the SQL script represented by the incoming reader, returning
* those commands as an array
*/
List<String> extractCommands(Reader reader, Dialect dialect);
}
| SqlScriptCommandExtractor |
java | apache__camel | test-infra/camel-test-infra-arangodb/src/main/java/org/apache/camel/test/infra/arangodb/services/ArangoDBRemoteInfraService.java | {
"start": 938,
"end": 1626
} | class ____ implements ArangoDBInfraService {
@Override
public int getPort() {
return port();
}
@Override
public int port() {
return Integer.valueOf(System.getProperty(ArangoDBProperties.ARANGODB_PORT));
}
@Override
public String host() {
return System.getProperty(ArangoDBProperties.ARANGODB_HOST);
}
@Override
public String getHost() {
return host();
}
@Override
public void registerProperties() {
// NO-OP
}
@Override
public void initialize() {
registerProperties();
}
@Override
public void shutdown() {
// NO-OP
}
}
| ArangoDBRemoteInfraService |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/functions/Predicate.java | {
"start": 853,
"end": 1146
} | interface ____<@NonNull T> {
/**
* Test the given input value and return a boolean.
* @param t the value
* @return the boolean result
* @throws Throwable if the implementation wishes to throw any type of exception
*/
boolean test(T t) throws Throwable;
}
| Predicate |
java | apache__camel | components/camel-solr/src/main/java/org/apache/camel/component/solr/SolrUtils.java | {
"start": 1250,
"end": 4506
} | class ____ {
public static Map<String, Object> parseAsMap(SolrResponse solrResponse) {
return solrResponse == null || solrResponse.getResponse() == null
? Map.of()
: solrResponse.getResponse().asShallowMap(true);
}
public static Map<String, Object> parseAsFlatMap(SolrResponse solrResponse) {
return parseAsFlatMap(parseAsMap(solrResponse));
}
public static Map<String, Object> parseAsFlatMap(Map<String, Object> map1) {
return parseAsFlatMap(map1, null, null);
}
public static Map<String, Object> parseAsFlatMap(Map<String, Object> map1, String startsWith, String endsWith) {
return map1.entrySet().stream()
.flatMap(SolrUtils::flatten)
.filter(e -> e != null && e.getKey() != null && e.getValue() != null)
.filter(e -> startsWith == null || e.getKey().startsWith(startsWith))
.filter(e -> endsWith == null || e.getKey().endsWith(endsWith))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
@SuppressWarnings("unchecked")
public static Stream<Map.Entry<String, Object>> flatten(Map.Entry<String, Object> entry) {
Map<String, Object> nestedMap = null;
if (entry.getValue() instanceof final SimpleOrderedMap<?> nested) {
nestedMap = (Map<String, Object>) nested.asShallowMap(true);
} else if (entry.getValue() instanceof final Map<?, ?> nested) {
nestedMap = (Map<String, Object>) nested;
}
if (nestedMap != null) {
return nestedMap.entrySet().stream()
.map(e -> new AbstractMap.SimpleEntry<>(entry.getKey() + "." + e.getKey(), e.getValue()))
.flatMap(SolrUtils::flatten);
}
return Stream.of(entry);
}
public static void addHeadersForCommit(ModifiableSolrParams solrParams) {
getHeadersForCommit("commit", null)
.forEach((k, v) -> solrParams.add(k, String.valueOf(v)));
}
public static void addHeadersForCommit(Exchange exchange) {
addHeadersForCommit(exchange, "commit");
}
public static void addHeadersForCommit(Exchange exchange, String commitParam) {
exchange.getMessage().getHeaders().putAll(getHeadersForCommit(commitParam));
}
public static Map<String, Object> getHeadersForCommit() {
return getHeadersForCommit("commit");
}
public static Map<String, Object> getHeadersForCommit(String commitParam) {
return getHeadersForCommit(commitParam, SolrConstants.HEADER_PARAM_PREFIX);
}
public static Map<String, Object> getHeadersForCommit(String commitParam, String prefix) {
String finalPrefix = prefix == null ? "" : prefix;
return Map.of(finalPrefix + commitParam, "true");
}
public static boolean isCollectionOfType(Object body, Class<?> clazz) {
return body instanceof Collection<?> collection && collection.stream().allMatch(clazz::isInstance);
}
public static <T> List<T> convertToList(Collection<T> collection) {
return collection instanceof List
? (List<T>) collection
: new ArrayList<>(collection);
}
}
| SolrUtils |
java | elastic__elasticsearch | libs/geo/src/main/java/org/elasticsearch/geometry/utils/Geohash.java | {
"start": 1047,
"end": 15448
} | class ____ {
private static final char[] BASE_32 = {
'0',
'1',
'2',
'3',
'4',
'5',
'6',
'7',
'8',
'9',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'j',
'k',
'm',
'n',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z' };
private static final String BASE_32_STRING = new String(BASE_32);
/** maximum precision for geohash strings */
public static final int PRECISION = 12;
/** number of bits used for quantizing latitude and longitude values */
private static final short BITS = 32;
private static final double LAT_SCALE = (0x1L << (BITS - 1)) / 180.0D;
private static final double LAT_DECODE = 180.0D / (0x1L << BITS);
private static final double LON_SCALE = (0x1L << (BITS - 1)) / 360.0D;
private static final double LON_DECODE = 360.0D / (0x1L << BITS);
private static final short MORTON_OFFSET = (BITS << 1) - (PRECISION * 5);
/** Bit encoded representation of the latitude of north pole */
private static final long MAX_LAT_BITS = (0x1L << (PRECISION * 5 / 2)) - 1;
// Below code is adapted from the spatial4j library (GeohashUtils.java) Apache 2.0 Licensed
private static final double[] precisionToLatHeight, precisionToLonWidth;
static {
precisionToLatHeight = new double[PRECISION + 1];
precisionToLonWidth = new double[PRECISION + 1];
precisionToLatHeight[0] = 90 * 2;
precisionToLonWidth[0] = 180 * 2;
boolean even = false;
for (int i = 1; i <= PRECISION; i++) {
precisionToLatHeight[i] = precisionToLatHeight[i - 1] / (even ? 8 : 4);
precisionToLonWidth[i] = precisionToLonWidth[i - 1] / (even ? 4 : 8);
even = even == false;
}
}
// no instance:
private Geohash() {}
/** Returns a {@link Point} instance from a geohash string */
public static Point toPoint(final String geohash) throws IllegalArgumentException {
final long hash = mortonEncode(geohash);
return new Point(decodeLongitude(hash), decodeLatitude(hash));
}
/**
* Computes the bounding box coordinates from a given geohash
*
* @param geohash Geohash of the defined cell
* @return GeoRect rectangle defining the bounding box
*/
public static Rectangle toBoundingBox(final String geohash) {
// bottom left is the coordinate
Point bottomLeft = toPoint(geohash);
int len = Math.min(12, geohash.length());
long ghLong = longEncode(geohash, len);
// shift away the level
ghLong >>>= 4;
// deinterleave
long lon = BitUtil.deinterleave(ghLong >>> 1);
long lat = BitUtil.deinterleave(ghLong);
final int shift = (12 - len) * 5 + 2;
if (lat < MAX_LAT_BITS) {
// add 1 to lat and lon to get topRight
ghLong = BitUtil.interleave((int) (lat + 1), (int) (lon + 1)) << 4 | len;
final long mortonHash = BitUtil.flipFlop((ghLong >>> 4) << shift);
Point topRight = new Point(decodeLongitude(mortonHash), decodeLatitude(mortonHash));
return new Rectangle(bottomLeft.getX(), topRight.getX(), topRight.getY(), bottomLeft.getY());
} else {
// We cannot go north of north pole, so just using 90 degrees instead of calculating it using
// add 1 to lon to get lon of topRight, we are going to use 90 for lat
ghLong = BitUtil.interleave((int) lat, (int) (lon + 1)) << 4 | len;
final long mortonHash = BitUtil.flipFlop((ghLong >>> 4) << shift);
Point topRight = new Point(decodeLongitude(mortonHash), decodeLatitude(mortonHash));
return new Rectangle(bottomLeft.getX(), topRight.getX(), 90D, bottomLeft.getY());
}
}
/** Array of geohashes one level below the baseGeohash. Sorted. */
public static String[] getSubGeohashes(String baseGeohash) {
String[] hashes = new String[BASE_32.length];
for (int i = 0; i < BASE_32.length; i++) {// note: already sorted
char c = BASE_32[i];
hashes[i] = baseGeohash + c;
}
return hashes;
}
/**
* Calculate all neighbors of a given geohash cell.
*
* @param geohash Geohash of the defined cell
* @return geohashes of all neighbor cells
*/
public static Collection<? extends CharSequence> getNeighbors(String geohash) {
return addNeighborsAtLevel(geohash, geohash.length(), new ArrayList<CharSequence>(8));
}
/**
* Add all geohashes of the cells next to a given geohash to a list.
*
* @param geohash Geohash of a specified cell
* @param neighbors list to add the neighbors to
* @return the given list
*/
public static final <E extends Collection<? super String>> E addNeighbors(String geohash, E neighbors) {
return addNeighborsAtLevel(geohash, geohash.length(), neighbors);
}
/**
* Add all geohashes of the cells next to a given geohash to a list.
*
* @param geohash Geohash of a specified cell
* @param level level of the given geohash
* @param neighbors list to add the neighbors to
* @return the given list
*/
public static final <E extends Collection<? super String>> E addNeighborsAtLevel(String geohash, int level, E neighbors) {
String south = getNeighbor(geohash, level, 0, -1);
String north = getNeighbor(geohash, level, 0, +1);
if (north != null) {
neighbors.add(getNeighbor(north, level, -1, 0));
neighbors.add(north);
neighbors.add(getNeighbor(north, level, +1, 0));
}
neighbors.add(getNeighbor(geohash, level, -1, 0));
neighbors.add(getNeighbor(geohash, level, +1, 0));
if (south != null) {
neighbors.add(getNeighbor(south, level, -1, 0));
neighbors.add(south);
neighbors.add(getNeighbor(south, level, +1, 0));
}
return neighbors;
}
/**
* Calculate the geohash of a neighbor of a geohash
*
* @param geohash the geohash of a cell
* @param level level of the geohash
* @param dx delta of the first grid coordinate (must be -1, 0 or +1)
* @param dy delta of the second grid coordinate (must be -1, 0 or +1)
* @return geohash of the defined cell
*/
public static final String getNeighbor(String geohash, int level, int dx, int dy) {
int cell = BASE_32_STRING.indexOf(geohash.charAt(level - 1));
// Decoding the Geohash bit pattern to determine grid coordinates
int x0 = cell & 1; // first bit of x
int y0 = cell & 2; // first bit of y
int x1 = cell & 4; // second bit of x
int y1 = cell & 8; // second bit of y
int x2 = cell & 16; // third bit of x
// combine the bitpattern to grid coordinates.
// note that the semantics of x and y are swapping
// on each level
int x = x0 + (x1 / 2) + (x2 / 4);
int y = (y0 / 2) + (y1 / 4);
if (level == 1) {
// Root cells at north (namely "bcfguvyz") or at
// south (namely "0145hjnp") do not have neighbors
// in north/south direction
if ((dy < 0 && y == 0) || (dy > 0 && y == 3)) {
return null;
} else {
return Character.toString(encodeBase32(x + dx, y + dy));
}
} else {
// define grid coordinates for next level
final int nx = ((level % 2) == 1) ? (x + dx) : (x + dy);
final int ny = ((level % 2) == 1) ? (y + dy) : (y + dx);
// if the defined neighbor has the same parent a the current cell
// encode the cell directly. Otherwise find the cell next to this
// cell recursively. Since encoding wraps around within a cell
// it can be encoded here.
// xLimit and YLimit must always be respectively 7 and 3
// since x and y semantics are swapping on each level.
if (nx >= 0 && nx <= 7 && ny >= 0 && ny <= 3) {
return geohash.substring(0, level - 1) + encodeBase32(nx, ny);
} else {
String neighbor = getNeighbor(geohash, level - 1, dx, dy);
return (neighbor != null) ? neighbor + encodeBase32(nx, ny) : neighbor;
}
}
}
/**
* Encode a string geohash to the geohash based long format (lon/lat interleaved, 4 least significant bits = level)
*/
public static final long longEncode(String hash) {
return longEncode(hash, hash.length());
}
/**
* Encode lon/lat to the geohash based long format (lon/lat interleaved, 4 least significant bits = level)
*/
public static final long longEncode(final double lon, final double lat, final int level) {
// shift to appropriate level
final short msf = (short) (((12 - level) * 5) + (MORTON_OFFSET - 2));
return ((encodeLatLon(lat, lon) >>> msf) << 4) | level;
}
/**
* Encode to a geohash string from full resolution longitude, latitude)
*/
public static final String stringEncode(final double lon, final double lat) {
return stringEncode(lon, lat, 12);
}
/**
* Encode to a level specific geohash string from full resolution longitude, latitude
*/
public static final String stringEncode(final double lon, final double lat, final int level) {
// convert to geohashlong
long interleaved = encodeLatLon(lat, lon);
interleaved >>>= (((PRECISION - level) * 5) + (MORTON_OFFSET - 2));
final long geohash = (interleaved << 4) | level;
return stringEncode(geohash);
}
/**
* Encode to a geohash string from the geohash based long format
*/
public static final String stringEncode(long geoHashLong) {
int level = (int) geoHashLong & 15;
geoHashLong >>>= 4;
char[] chars = new char[level];
do {
chars[--level] = BASE_32[(int) (geoHashLong & 31L)];
geoHashLong >>>= 5;
} while (level > 0);
return new String(chars);
}
/** base32 encode at the given grid coordinate */
private static char encodeBase32(int x, int y) {
return BASE_32[((x & 1) + ((y & 1) * 2) + ((x & 2) * 2) + ((y & 2) * 4) + ((x & 4) * 4)) % 32];
}
/**
* Encode from geohash string to the geohash based long format (lon/lat interleaved, 4 least significant bits = level)
*/
private static long longEncode(final String hash, int length) {
int level = length - 1;
long b;
long l = 0L;
for (char c : hash.toCharArray()) {
b = (long) (BASE_32_STRING.indexOf(c));
l |= (b << (level-- * 5));
if (level < 0) {
// We cannot handle more than 12 levels
break;
}
}
return (l << 4) | length;
}
/**
* Encode to a morton long value from a given geohash string
*/
public static long mortonEncode(final String hash) {
if (hash.isEmpty()) {
throw new IllegalArgumentException("empty geohash");
}
int level = 11;
long b;
long l = 0L;
for (char c : hash.toCharArray()) {
b = (long) (BASE_32_STRING.indexOf(c));
if (b < 0) {
throw new IllegalArgumentException("unsupported symbol [" + c + "] in geohash [" + hash + "]");
}
l |= (b << ((level-- * 5) + (MORTON_OFFSET - 2)));
if (level < 0) {
// We cannot handle more than 12 levels
break;
}
}
return BitUtil.flipFlop(l);
}
/** approximate width of geohash tile for a specific precision in degrees */
public static double lonWidthInDegrees(int precision) {
return precisionToLonWidth[precision];
}
/** approximate height of geohash tile for a specific precision in degrees */
public static double latHeightInDegrees(int precision) {
return precisionToLatHeight[precision];
}
private static long encodeLatLon(final double lat, final double lon) {
// encode lat/lon flipping the sign bit so negative ints sort before positive ints
final int latEnc = encodeLatitude(lat) ^ 0x80000000;
final int lonEnc = encodeLongitude(lon) ^ 0x80000000;
return BitUtil.interleave(latEnc, lonEnc) >>> 2;
}
/** encode latitude to integer */
public static int encodeLatitude(double latitude) {
// the maximum possible value cannot be encoded without overflow
if (latitude == 90.0D) {
latitude = Math.nextDown(latitude);
}
return (int) Math.floor(latitude / LAT_DECODE);
}
/** encode longitude to integer */
public static int encodeLongitude(double longitude) {
// the maximum possible value cannot be encoded without overflow
if (longitude == 180.0D) {
longitude = Math.nextDown(longitude);
}
return (int) Math.floor(longitude / LON_DECODE);
}
/** returns the latitude value from the string based geohash */
public static final double decodeLatitude(final String geohash) {
return decodeLatitude(Geohash.mortonEncode(geohash));
}
/** returns the latitude value from the string based geohash */
public static final double decodeLongitude(final String geohash) {
return decodeLongitude(Geohash.mortonEncode(geohash));
}
/** decode longitude value from morton encoded geo point */
public static double decodeLongitude(final long hash) {
return unscaleLon(BitUtil.deinterleave(hash));
}
/** decode latitude value from morton encoded geo point */
public static double decodeLatitude(final long hash) {
return unscaleLat(BitUtil.deinterleave(hash >>> 1));
}
private static double unscaleLon(final long val) {
return (val / LON_SCALE) - 180;
}
private static double unscaleLat(final long val) {
return (val / LAT_SCALE) - 90;
}
}
| Geohash |
java | apache__flink | flink-core/src/test/java/org/apache/flink/core/classloading/ComponentClassLoaderTest.java | {
"start": 9688,
"end": 11900
} | class ____ extends URLClassLoader {
private final String nameToCheck;
private final Class<?> classToReturn;
private final URL resourceToReturn;
public TestUrlClassLoader() {
this(null, null, null);
}
public TestUrlClassLoader(String resourceNameToCheck, URL resourceToReturn) {
this(checkNotNull(resourceNameToCheck), null, checkNotNull(resourceToReturn));
}
public TestUrlClassLoader(String classNameToCheck, Class<?> classToReturn) {
this(checkNotNull(classNameToCheck), checkNotNull(classToReturn), null);
}
public TestUrlClassLoader(
String classNameToCheck, Class<?> classToReturn, URL resourceToReturn) {
super(new URL[0], null);
this.nameToCheck = classNameToCheck;
this.classToReturn = classToReturn;
this.resourceToReturn = resourceToReturn;
}
@Override
public Class<?> loadClass(String name) throws ClassNotFoundException {
if (nameToCheck == null) {
throw new ClassNotFoundException();
}
if (nameToCheck.equals(name)) {
return classToReturn;
}
return super.loadClass(name);
}
@Override
public URL getResource(String name) {
if (nameToCheck == null) {
return null;
}
if (nameToCheck.equals(name)) {
return resourceToReturn;
}
return super.getResource(name);
}
@Override
public Enumeration<URL> getResources(String name) throws IOException {
if (nameToCheck != null && nameToCheck.equals(name)) {
return new ComponentClassLoader.IteratorBackedEnumeration<>(
Collections.singleton(resourceToReturn).iterator());
}
return super.getResources(name);
}
}
private static URL createURL() {
try {
return Paths.get("").toUri().toURL();
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
}
| TestUrlClassLoader |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/sql/oracle/demo/Demo3.java | {
"start": 4908,
"end": 8500
} | class ____ extends MySqlASTVisitorAdapter {
private int varIndex;
private List<SQLExpr> variantList = new ArrayList<SQLExpr>();
private List<SQLExprTableSource> tableSourceList = new ArrayList<SQLExprTableSource>();
private Map<String, String> tableAlias = new HashMap<String, String>();
private String defaultTableName;
public boolean visit(SQLVariantRefExpr x) {
x.getAttributes().put("varIndex", varIndex++);
return true;
}
public boolean visit(SQLBinaryOpExpr x) {
if (isUserId(x.getLeft())) {
if (x.getRight() instanceof SQLVariantRefExpr) {
SQLIdentifierExpr identExpr = (SQLIdentifierExpr) x.getLeft();
String ident = identExpr.getName();
if (ident.equals("uid")) {
variantList.add(x.getRight());
}
} else if (x.getRight() instanceof SQLNumericLiteralExpr) {
variantList.add(x.getRight());
}
}
return true;
}
private boolean isUserId(SQLExpr x) {
if (x instanceof SQLIdentifierExpr) {
if ("user".equals(defaultTableName) && "uid".equals(((SQLIdentifierExpr) x).getName())) {
return true;
}
return false;
}
if (x instanceof SQLPropertyExpr) {
SQLPropertyExpr propExpr = (SQLPropertyExpr) x;
String columnName = propExpr.getName();
if (!"uid".equals(columnName)) {
return false;
}
if (propExpr.getOwner() instanceof SQLIdentifierExpr) {
String ownerName = ((SQLIdentifierExpr) propExpr.getOwner()).getName();
if ("user".equals(ownerName) || "user".equals(tableAlias.get(ownerName))) {
return true;
}
}
}
return false;
}
public boolean visit(SQLExprTableSource x) {
recordTableSource(x);
return true;
}
private String recordTableSource(SQLExprTableSource x) {
if (x.getExpr() instanceof SQLIdentifierExpr) {
String tableName = ((SQLIdentifierExpr) x.getExpr()).getName();
if (x.getAlias() != null) {
tableAlias.put(x.getAlias(), tableName);
}
if ("user".equals(tableName)) {
if (!tableSourceList.contains(x)) {
tableSourceList.add(x);
}
}
return tableName;
}
return null;
}
public boolean visit(SQLSelectQueryBlock queryBlock) {
if (queryBlock.getFrom() instanceof SQLExprTableSource) {
defaultTableName = recordTableSource((SQLExprTableSource) queryBlock.getFrom());
}
return true;
}
public boolean visit(MySqlSelectQueryBlock queryBlock) {
if (queryBlock.getFrom() instanceof SQLExprTableSource) {
defaultTableName = recordTableSource((SQLExprTableSource) queryBlock.getFrom());
}
return true;
}
public List<SQLExpr> getVariantList() {
return variantList;
}
public List<SQLExprTableSource> getTableSourceList() {
return tableSourceList;
}
}
}
| MyVisitor |
java | google__guava | android/guava-tests/test/com/google/common/util/concurrent/JSR166TestCase.java | {
"start": 22041,
"end": 26287
} | class ____ extends Policy {
Permissions perms = new Permissions();
AdjustablePolicy(Permission... permissions) {
for (Permission permission : permissions) perms.add(permission);
}
void addPermission(Permission perm) {
perms.add(perm);
}
void clearPermissions() {
perms = new Permissions();
}
@Override
public PermissionCollection getPermissions(CodeSource cs) {
return perms;
}
@Override
public PermissionCollection getPermissions(ProtectionDomain pd) {
return perms;
}
@Override
public boolean implies(ProtectionDomain pd, Permission p) {
return perms.implies(p);
}
@Override
public void refresh() {}
}
/** Returns a policy containing all the permissions we ever need. */
public static Policy permissivePolicy() {
return new AdjustablePolicy
// Permissions j.u.c. needs directly
(
new RuntimePermission("modifyThread"),
new RuntimePermission("getClassLoader"),
new RuntimePermission("setContextClassLoader"),
// Permissions needed to change permissions!
new SecurityPermission("getPolicy"),
new SecurityPermission("setPolicy"),
new RuntimePermission("setSecurityManager"),
// Permissions needed by the junit test harness
new RuntimePermission("accessDeclaredMembers"),
new PropertyPermission("*", "read"),
new FilePermission("<<ALL FILES>>", "read"));
}
/** Sleeps until the given time has elapsed. Throws AssertionFailedError if interrupted. */
void sleep(long millis) {
try {
delay(millis);
} catch (InterruptedException ie) {
AssertionFailedError afe = new AssertionFailedError("Unexpected InterruptedException");
afe.initCause(ie);
throw afe;
}
}
/**
* Spin-waits up to the specified number of milliseconds for the given thread to enter a wait
* state: BLOCKED, WAITING, or TIMED_WAITING.
*/
@SuppressWarnings("ThreadPriorityCheck") // TODO: b/175898629 - Consider onSpinWait.
void waitForThreadToEnterWaitState(Thread thread, long timeoutMillis) {
long startTime = System.nanoTime();
for (; ; ) {
Thread.State s = thread.getState();
if (s == Thread.State.BLOCKED || s == Thread.State.WAITING || s == Thread.State.TIMED_WAITING)
return;
else if (s == Thread.State.TERMINATED) fail("Unexpected thread termination");
else if (millisElapsedSince(startTime) > timeoutMillis) {
threadAssertTrue(thread.isAlive());
return;
}
Thread.yield();
}
}
/**
* Waits up to LONG_DELAY_MS for the given thread to enter a wait state: BLOCKED, WAITING, or
* TIMED_WAITING.
*/
void waitForThreadToEnterWaitState(Thread thread) {
waitForThreadToEnterWaitState(thread, LONG_DELAY_MS);
}
/**
* Returns the number of milliseconds since time given by startNanoTime, which must have been
* previously returned from a call to {@link System#nanoTime()}.
*/
long millisElapsedSince(long startNanoTime) {
return NANOSECONDS.toMillis(System.nanoTime() - startNanoTime);
}
/** Returns a new started daemon Thread running the given runnable. */
Thread newStartedThread(Runnable runnable) {
Thread t = new Thread(runnable);
t.setDaemon(true);
t.start();
return t;
}
/**
* Waits for the specified time (in milliseconds) for the thread to terminate (using {@link
* Thread#join(long)}), else interrupts the thread (in the hope that it may terminate later) and
* fails.
*/
void awaitTermination(Thread t, long timeoutMillis) {
try {
t.join(timeoutMillis);
} catch (InterruptedException ie) {
threadUnexpectedException(ie);
} finally {
if (t.getState() != Thread.State.TERMINATED) {
t.interrupt();
fail("Test timed out");
}
}
}
/**
* Waits for LONG_DELAY_MS milliseconds for the thread to terminate (using {@link
* Thread#join(long)}), else interrupts the thread (in the hope that it may terminate later) and
* fails.
*/
void awaitTermination(Thread t) {
awaitTermination(t, LONG_DELAY_MS);
}
// Some convenient Runnable classes
public abstract | AdjustablePolicy |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/TaskCheckpointingBehaviourTest.java | {
"start": 21601,
"end": 22430
} | class ____ extends OneInputStreamTask<Object, Object> {
public TestStreamTask(Environment env) throws Exception {
super(env);
}
@Override
public void init() {}
@Override
protected void processInput(MailboxDefaultAction.Controller controller) throws Exception {
triggerCheckpointOnBarrier(
new CheckpointMetaData(11L, System.currentTimeMillis()),
CheckpointOptions.forCheckpointWithDefaultLocation(),
new CheckpointMetricsBuilder());
while (isRunning()) {
Thread.sleep(1L);
}
controller.suspendDefaultAction();
mailboxProcessor.suspend();
}
@Override
protected void cleanUpInternal() {}
}
}
| TestStreamTask |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java | {
"start": 2194,
"end": 12290
} | class ____ extends ESIntegTestCase {
@Override
public Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal, otherSettings))
.put(OperationRouting.USE_ADAPTIVE_REPLICA_SELECTION_SETTING.getKey(), false)
.build();
}
// see #2896
public void testStopOneNodePreferenceWithRedState() throws IOException {
assertAcked(prepareCreate("test").setSettings(indexSettings(cluster().numDataNodes() + 2, 0)));
ensureGreen();
for (int i = 0; i < 10; i++) {
prepareIndex("test").setId("" + i).setSource("field1", "value1").get();
}
refresh();
internalCluster().stopRandomDataNode();
clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForStatus(ClusterHealthStatus.RED).get();
String[] preferences = new String[] {
"_local",
"_prefer_nodes:somenode",
"_prefer_nodes:server2",
"_prefer_nodes:somenode,server2" };
for (String pref : preferences) {
logger.info("--> Testing out preference={}", pref);
assertResponses(response -> {
assertThat(RestStatus.OK, equalTo(response.status()));
assertThat(pref, response.getFailedShards(), greaterThanOrEqualTo(0));
}, prepareSearch().setSize(0).setPreference(pref), prepareSearch().setPreference(pref));
}
// _only_local is a stricter preference, we need to send the request to a data node
assertResponses(response -> {
assertThat(RestStatus.OK, equalTo(response.status()));
assertThat("_only_local", response.getFailedShards(), greaterThanOrEqualTo(0));
},
dataNodeClient().prepareSearch().setSize(0).setPreference("_only_local"),
dataNodeClient().prepareSearch().setPreference("_only_local")
);
}
public void testNoPreferenceRandom() {
assertAcked(
prepareCreate("test").setSettings(
// this test needs at least a replica to make sure two consecutive searches go to two different copies of the same data
Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, between(1, maximumNumberOfReplicas()))
)
);
ensureGreen();
prepareIndex("test").setSource("field1", "value1").get();
refresh();
final Client client = internalCluster().smartClient();
assertResponse(
client.prepareSearch("test").setQuery(matchAllQuery()),
fist -> assertResponse(
client.prepareSearch("test").setQuery(matchAllQuery()),
second -> assertThat(
fist.getHits().getAt(0).getShard().getNodeId(),
not(equalTo(second.getHits().getAt(0).getShard().getNodeId()))
)
)
);
}
public void testSimplePreference() {
indicesAdmin().prepareCreate("test").setSettings("{\"number_of_replicas\": 1}", XContentType.JSON).get();
ensureGreen();
prepareIndex("test").setSource("field1", "value1").get();
refresh();
assertResponses(
response -> assertThat(response.getHits().getTotalHits().value(), equalTo(1L)),
prepareSearch().setQuery(matchAllQuery()),
prepareSearch().setQuery(matchAllQuery()).setPreference("_local"),
prepareSearch().setQuery(matchAllQuery()).setPreference("1234")
);
}
public void testThatSpecifyingNonExistingNodesReturnsUsefulError() {
createIndex("test");
ensureGreen();
try {
prepareSearch().setQuery(matchAllQuery()).setPreference("_only_nodes:DOES-NOT-EXIST").get();
fail("Expected IllegalArgumentException");
} catch (IllegalArgumentException e) {
assertThat(e, hasToString(containsString("no data nodes with criteria [DOES-NOT-EXIST] found for shard: [test][")));
}
}
public void testNodesOnlyRandom() {
assertAcked(
prepareCreate("test").setSettings(
// this test needs at least a replica to make sure two consecutive searches go to two different copies of the same data
Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, between(1, maximumNumberOfReplicas()))
)
);
ensureGreen();
prepareIndex("test").setSource("field1", "value1").get();
refresh();
final Client client = internalCluster().smartClient();
// multiple wildchar to cover multi-param usecase
SearchRequestBuilder request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference("_only_nodes:*,nodes*");
assertSearchOnRandomNodes(request);
request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference("_only_nodes:*");
assertSearchOnRandomNodes(request);
ArrayList<String> allNodeIds = new ArrayList<>();
ArrayList<String> allNodeNames = new ArrayList<>();
ArrayList<String> allNodeHosts = new ArrayList<>();
NodesStatsResponse nodeStats = clusterAdmin().prepareNodesStats().get();
for (NodeStats node : nodeStats.getNodes()) {
allNodeIds.add(node.getNode().getId());
allNodeNames.add(node.getNode().getName());
allNodeHosts.add(node.getHostname());
}
String node_expr = "_only_nodes:" + Strings.arrayToCommaDelimitedString(allNodeIds.toArray());
request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference(node_expr);
assertSearchOnRandomNodes(request);
node_expr = "_only_nodes:" + Strings.arrayToCommaDelimitedString(allNodeNames.toArray());
request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference(node_expr);
assertSearchOnRandomNodes(request);
node_expr = "_only_nodes:" + Strings.arrayToCommaDelimitedString(allNodeHosts.toArray());
request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference(node_expr);
assertSearchOnRandomNodes(request);
node_expr = "_only_nodes:" + Strings.arrayToCommaDelimitedString(allNodeHosts.toArray());
request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference(node_expr);
assertSearchOnRandomNodes(request);
// Mix of valid and invalid nodes
node_expr = "_only_nodes:*,invalidnode";
request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference(node_expr);
assertSearchOnRandomNodes(request);
}
private void assertSearchOnRandomNodes(SearchRequestBuilder request) {
Set<String> hitNodes = new HashSet<>();
for (int i = 0; i < 2; i++) {
assertResponse(request, response -> {
assertThat(response.getHits().getHits().length, greaterThan(0));
hitNodes.add(response.getHits().getAt(0).getShard().getNodeId());
});
}
assertThat(hitNodes.size(), greaterThan(1));
}
public void testCustomPreferenceUnaffectedByOtherShardMovements() {
/*
* Custom preferences can be used to encourage searches to go to a consistent set of shard copies, meaning that other copies' data
* is rarely touched and can be dropped from the filesystem cache. This works best if the set of shards searched doesn't change
* unnecessarily, so this test verifies a consistent routing even as other shards are created/relocated/removed.
*/
assertAcked(
prepareCreate("test").setSettings(
Settings.builder()
.put(indexSettings())
.put(SETTING_NUMBER_OF_REPLICAS, between(1, maximumNumberOfReplicas()))
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)
)
);
ensureGreen();
prepareIndex("test").setSource("field1", "value1").get();
refresh();
final String customPreference = randomAlphaOfLength(10);
final String nodeId;
var response = prepareSearch("test").setQuery(matchAllQuery()).setPreference(customPreference).get();
try {
nodeId = response.getHits().getAt(0).getShard().getNodeId();
} finally {
response.decRef();
}
assertSearchesSpecificNode("test", customPreference, nodeId);
final int replicasInNewIndex = between(1, maximumNumberOfReplicas());
assertAcked(
prepareCreate("test2").setSettings(Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, replicasInNewIndex))
);
ensureGreen();
assertSearchesSpecificNode("test", customPreference, nodeId);
setReplicaCount(replicasInNewIndex - 1, "test2");
assertSearchesSpecificNode("test", customPreference, nodeId);
updateIndexSettings(
Settings.builder()
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put(
IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._name",
internalCluster().getNodeNameThat(DiscoveryNode::canContainData)
),
"test2"
);
ensureGreen();
assertSearchesSpecificNode("test", customPreference, nodeId);
assertAcked(indicesAdmin().prepareDelete("test2"));
assertSearchesSpecificNode("test", customPreference, nodeId);
}
private static void assertSearchesSpecificNode(String index, String customPreference, String nodeId) {
assertResponse(prepareSearch(index).setQuery(matchAllQuery()).setPreference(customPreference), response -> {
assertThat(response.getHits().getHits().length, equalTo(1));
assertThat(response.getHits().getAt(0).getShard().getNodeId(), equalTo(nodeId));
});
}
}
| SearchPreferenceIT |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/cluster/RedisAdvancedClusterReactiveCommandsImpl.java | {
"start": 3517,
"end": 37255
} | class ____<K, V> extends AbstractRedisReactiveCommands<K, V>
implements RedisAdvancedClusterReactiveCommands<K, V> {
private static final InternalLogger logger = InternalLoggerFactory
.getInstance(RedisAdvancedClusterReactiveCommandsImpl.class);
private static final Predicate<RedisClusterNode> ALL_NODES = node -> true;
private final RedisCodec<K, V> codec;
/**
* Initialize a new connection.
*
* @param connection the stateful connection.
* @param codec Codec used to encode/decode keys and values.
* @param parser the implementation of the {@link JsonParser} to use
* @deprecated since 5.2, use
* {@link #RedisAdvancedClusterReactiveCommandsImpl(StatefulRedisClusterConnection, RedisCodec, Supplier)}.
*/
@Deprecated
public RedisAdvancedClusterReactiveCommandsImpl(StatefulRedisClusterConnectionImpl<K, V> connection, RedisCodec<K, V> codec,
Supplier<JsonParser> parser) {
super(connection, codec, parser);
this.codec = codec;
}
/**
* Initialize a new connection.
*
* @param connection the stateful connection.
* @param codec Codec used to encode/decode keys and values.
* @deprecated since 5.2, use
* {@link #RedisAdvancedClusterReactiveCommandsImpl(StatefulRedisClusterConnection, RedisCodec, Supplier)}.
*/
@Deprecated
public RedisAdvancedClusterReactiveCommandsImpl(StatefulRedisClusterConnectionImpl<K, V> connection,
RedisCodec<K, V> codec) {
super(connection, codec);
this.codec = codec;
}
/**
* Initialize a new connection.
*
* @param connection the stateful connection.
* @param codec Codec used to encode/decode keys and values.
* @param parser the implementation of the {@link JsonParser} to use
*/
public RedisAdvancedClusterReactiveCommandsImpl(StatefulRedisClusterConnection<K, V> connection, RedisCodec<K, V> codec,
Supplier<JsonParser> parser) {
super(connection, codec, parser);
this.codec = codec;
}
/**
* Initialize a new connection.
*
* @param connection the stateful connection.
* @param codec Codec used to encode/decode keys and values.
*/
public RedisAdvancedClusterReactiveCommandsImpl(StatefulRedisClusterConnection<K, V> connection, RedisCodec<K, V> codec) {
super(connection, codec);
this.codec = codec;
}
@Override
public Mono<String> clientSetname(K name) {
List<Publisher<String>> publishers = new ArrayList<>();
publishers.add(super.clientSetname(name));
for (RedisClusterNode redisClusterNode : getStatefulConnection().getPartitions()) {
Mono<StatefulRedisConnection<K, V>> byNodeId = getStatefulConnection(redisClusterNode.getNodeId());
publishers.add(byNodeId.flatMap(conn -> {
if (conn.isOpen()) {
return conn.reactive().clientSetname(name);
}
return Mono.empty();
}));
Mono<StatefulRedisConnection<K, V>> byHost = getStatefulConnection(redisClusterNode.getUri().getHost(),
redisClusterNode.getUri().getPort());
publishers.add(byHost.flatMap(conn -> {
if (conn.isOpen()) {
return conn.reactive().clientSetname(name);
}
return Mono.empty();
}));
}
return Flux.merge(publishers).last();
}
@Override
public Mono<Long> clusterCountKeysInSlot(int slot) {
Mono<RedisClusterReactiveCommands<K, V>> connectionBySlot = findConnectionBySlotReactive(slot);
return connectionBySlot.flatMap(cmd -> cmd.clusterCountKeysInSlot(slot));
}
@Override
public Flux<K> clusterGetKeysInSlot(int slot, int count) {
Mono<RedisClusterReactiveCommands<K, V>> connectionBySlot = findConnectionBySlotReactive(slot);
return connectionBySlot.flatMapMany(conn -> conn.clusterGetKeysInSlot(slot, count));
}
@Override
public Mono<Long> dbsize() {
Map<String, Publisher<Long>> publishers = executeOnUpstream(RedisServerReactiveCommands::dbsize);
return Flux.merge(publishers.values()).reduce((accu, next) -> accu + next);
}
@Override
public Mono<Long> del(K... keys) {
return del(Arrays.asList(keys));
}
@Override
public Mono<Long> del(Iterable<K> keys) {
Map<Integer, List<K>> partitioned = SlotHash.partition(codec, keys);
if (partitioned.size() < 2) {
return super.del(keys);
}
List<Publisher<Long>> publishers = new ArrayList<>();
for (Map.Entry<Integer, List<K>> entry : partitioned.entrySet()) {
publishers.add(super.del(entry.getValue()));
}
return Flux.merge(publishers).reduce((accu, next) -> accu + next);
}
@Override
public Mono<Long> exists(K... keys) {
return exists(Arrays.asList(keys));
}
public Mono<Long> exists(Iterable<K> keys) {
List<K> keyList = LettuceLists.newList(keys);
Map<Integer, List<K>> partitioned = SlotHash.partition(codec, keyList);
if (partitioned.size() < 2) {
return super.exists(keyList);
}
List<Publisher<Long>> publishers = new ArrayList<>();
for (Map.Entry<Integer, List<K>> entry : partitioned.entrySet()) {
publishers.add(super.exists(entry.getValue()));
}
return Flux.merge(publishers).reduce((accu, next) -> accu + next);
}
@Override
public Mono<String> flushall() {
Map<String, Publisher<String>> publishers = executeOnUpstream(RedisServerReactiveCommands::flushall);
return Flux.merge(publishers.values()).last();
}
@Override
public Mono<String> flushall(FlushMode flushMode) {
Map<String, Publisher<String>> publishers = executeOnUpstream(it -> it.flushall(flushMode));
return Flux.merge(publishers.values()).last();
}
@Override
public Mono<String> flushallAsync() {
Map<String, Publisher<String>> publishers = executeOnUpstream(RedisServerReactiveCommands::flushallAsync);
return Flux.merge(publishers.values()).last();
}
@Override
public Mono<String> flushdb() {
Map<String, Publisher<String>> publishers = executeOnUpstream(RedisServerReactiveCommands::flushdb);
return Flux.merge(publishers.values()).last();
}
@Override
public Mono<String> flushdb(FlushMode flushMode) {
Map<String, Publisher<String>> publishers = executeOnUpstream(it -> it.flushdb(flushMode));
return Flux.merge(publishers.values()).last();
}
@Override
public Flux<K> keys(String pattern) {
Map<String, Publisher<K>> publishers = executeOnUpstream(commands -> commands.keys(pattern));
return Flux.merge(publishers.values());
}
/**
* Find all keys matching the given pattern (legacy overload).
*
* @param pattern the pattern type: patternkey (pattern).
* @return K array-reply list of keys matching {@code pattern}.
* @deprecated Use {@link #keys(String)} instead. This legacy overload will be removed in a later version.
*/
@Deprecated
@Override
public Flux<K> keysLegacy(K pattern) {
Map<String, Publisher<K>> publishers = executeOnUpstream(commands -> commands.keysLegacy(pattern));
return Flux.merge(publishers.values());
}
@Override
public Mono<Long> keys(KeyStreamingChannel<K> channel, String pattern) {
Map<String, Publisher<Long>> publishers = executeOnUpstream(commands -> commands.keys(channel, pattern));
return Flux.merge(publishers.values()).reduce((accu, next) -> accu + next);
}
/**
* Find all keys matching the given pattern (legacy overload).
*
* @param channel the channel.
* @param pattern the pattern.
* @return Long array-reply list of keys matching {@code pattern}.
* @deprecated Use {@link #keys(String)} instead. This legacy overload will be removed in a later version.
*/
@Deprecated
@Override
public Mono<Long> keysLegacy(KeyStreamingChannel<K> channel, K pattern) {
Map<String, Publisher<Long>> publishers = executeOnUpstream(commands -> commands.keysLegacy(channel, pattern));
return Flux.merge(publishers.values()).reduce((accu, next) -> accu + next);
}
@Override
public Flux<KeyValue<K, V>> mget(K... keys) {
return mget(Arrays.asList(keys));
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public Flux<KeyValue<K, V>> mget(Iterable<K> keys) {
List<K> keyList = LettuceLists.newList(keys);
Map<Integer, List<K>> partitioned = SlotHash.partition(codec, keyList);
if (partitioned.size() < 2) {
return super.mget(keyList);
}
List<Publisher<KeyValue<K, V>>> publishers = partitioned.values().stream().map(super::mget)
.collect(Collectors.toList());
return Flux.mergeSequential(publishers).collectList().map(results -> {
KeyValue<K, V>[] values = new KeyValue[keyList.size()];
int offset = 0;
for (List<K> partitionKeys : partitioned.values()) {
for (int i = 0; i < keyList.size(); i++) {
int index = partitionKeys.indexOf(keyList.get(i));
if (index != -1) {
values[i] = results.get(offset + index);
}
}
offset += partitionKeys.size();
}
return Arrays.asList(values);
}).flatMapMany(Flux::fromIterable);
}
@Override
public Mono<Long> mget(KeyValueStreamingChannel<K, V> channel, K... keys) {
return mget(channel, Arrays.asList(keys));
}
@Override
public Mono<Long> mget(KeyValueStreamingChannel<K, V> channel, Iterable<K> keys) {
List<K> keyList = LettuceLists.newList(keys);
Map<Integer, List<K>> partitioned = SlotHash.partition(codec, keyList);
if (partitioned.size() < 2) {
return super.mget(channel, keyList);
}
List<Publisher<Long>> publishers = new ArrayList<>();
for (Map.Entry<Integer, List<K>> entry : partitioned.entrySet()) {
publishers.add(super.mget(channel, entry.getValue()));
}
return Flux.merge(publishers).reduce(Long::sum);
}
@Override
public Mono<Boolean> msetnx(Map<K, V> map) {
return pipeliningWithMap(map, kvMap -> RedisAdvancedClusterReactiveCommandsImpl.super.msetnx(kvMap).flux(),
booleanFlux -> booleanFlux).reduce((accu, next) -> accu && next);
}
@Override
public Mono<String> mset(Map<K, V> map) {
return pipeliningWithMap(map, kvMap -> RedisAdvancedClusterReactiveCommandsImpl.super.mset(kvMap).flux(),
booleanFlux -> booleanFlux).last();
}
@Override
public Mono<Boolean> msetex(Map<K, V> map, MSetExArgs args) {
return pipeliningWithMap(map, kvMap -> RedisAdvancedClusterReactiveCommandsImpl.super.msetex(kvMap, args).flux(),
booleanFlux -> booleanFlux).reduce((accu, next) -> accu && next);
}
@Override
public Mono<K> randomkey() {
Partitions partitions = getStatefulConnection().getPartitions();
if (partitions.isEmpty()) {
return super.randomkey();
}
int index = ThreadLocalRandom.current().nextInt(partitions.size());
Mono<RedisClusterReactiveCommands<K, V>> connection = getConnectionReactive(partitions.getPartition(index).getNodeId());
return connection.flatMap(RedisKeyReactiveCommands::randomkey);
}
@Override
public Mono<String> scriptFlush() {
Map<String, Publisher<String>> publishers = executeOnNodes(RedisScriptingReactiveCommands::scriptFlush, ALL_NODES);
return Flux.merge(publishers.values()).last();
}
@Override
public Mono<String> scriptKill() {
Map<String, Publisher<String>> publishers = executeOnNodes(RedisScriptingReactiveCommands::scriptKill, ALL_NODES);
return Flux.merge(publishers.values()).onErrorReturn("OK").last();
}
@Override
public Mono<String> scriptLoad(byte[] script) {
Map<String, Publisher<String>> publishers = executeOnNodes((commands) -> commands.scriptLoad(script), ALL_NODES);
return Flux.merge(publishers.values()).last();
}
@Override
public Mono<Void> shutdown(boolean save) {
Map<String, Publisher<Void>> publishers = executeOnNodes(commands -> commands.shutdown(save), ALL_NODES);
return Flux.merge(publishers.values()).then();
}
@Override
public Mono<Long> touch(K... keys) {
return touch(Arrays.asList(keys));
}
public Mono<Long> touch(Iterable<K> keys) {
List<K> keyList = LettuceLists.newList(keys);
Map<Integer, List<K>> partitioned = SlotHash.partition(codec, keyList);
if (partitioned.size() < 2) {
return super.touch(keyList);
}
List<Publisher<Long>> publishers = new ArrayList<>();
for (Map.Entry<Integer, List<K>> entry : partitioned.entrySet()) {
publishers.add(super.touch(entry.getValue()));
}
return Flux.merge(publishers).reduce((accu, next) -> accu + next);
}
@Override
public Mono<Long> unlink(K... keys) {
return unlink(Arrays.asList(keys));
}
@Override
public Mono<Long> unlink(Iterable<K> keys) {
Map<Integer, List<K>> partitioned = SlotHash.partition(codec, keys);
if (partitioned.size() < 2) {
return super.unlink(keys);
}
List<Publisher<Long>> publishers = new ArrayList<>();
for (Map.Entry<Integer, List<K>> entry : partitioned.entrySet()) {
publishers.add(super.unlink(entry.getValue()));
}
return Flux.merge(publishers).reduce((accu, next) -> accu + next);
}
@Override
public RedisClusterReactiveCommands<K, V> getConnection(String nodeId) {
return getStatefulConnection().getConnection(nodeId).reactive();
}
private Mono<StatefulRedisConnection<K, V>> getStatefulConnection(String nodeId) {
return getMono(getConnectionProvider().getConnectionAsync(ConnectionIntent.WRITE, nodeId));
}
private Mono<RedisClusterReactiveCommands<K, V>> getConnectionReactive(String nodeId) {
return getMono(getConnectionProvider().<K, V> getConnectionAsync(ConnectionIntent.WRITE, nodeId))
.map(StatefulRedisConnection::reactive);
}
@Override
public RedisClusterReactiveCommands<K, V> getConnection(String host, int port) {
return getStatefulConnection().getConnection(host, port).reactive();
}
private Mono<RedisClusterReactiveCommands<K, V>> getConnectionReactive(String host, int port) {
return getMono(getConnectionProvider().<K, V> getConnectionAsync(ConnectionIntent.WRITE, host, port))
.map(StatefulRedisConnection::reactive);
}
private Mono<StatefulRedisConnection<K, V>> getStatefulConnection(String host, int port) {
return getMono(getConnectionProvider().<K, V> getConnectionAsync(ConnectionIntent.WRITE, host, port));
}
@Override
public StatefulRedisClusterConnection<K, V> getStatefulConnection() {
return (StatefulRedisClusterConnection<K, V>) super.getConnection();
}
/**
* Obtain a node-scoped connection for the given intent (READ/WRITE). Selection honors the current ReadFrom policy via the
* cluster connection provider.
*/
private Mono<StatefulRedisConnection<K, V>> getStatefulConnection(ConnectionIntent intent) {
return getMono(getConnectionProvider().getRandomConnectionAsync(intent));
}
@Override
public Mono<KeyScanCursor<K>> scan() {
return clusterScan(ScanCursor.INITIAL, (connection, cursor) -> connection.scan(), reactiveClusterKeyScanCursorMapper());
}
@Override
public Mono<KeyScanCursor<K>> scan(ScanArgs scanArgs) {
return clusterScan(ScanCursor.INITIAL, (connection, cursor) -> connection.scan(scanArgs),
reactiveClusterKeyScanCursorMapper());
}
@Override
public Mono<KeyScanCursor<K>> scan(ScanCursor scanCursor, ScanArgs scanArgs) {
return clusterScan(scanCursor, (connection, cursor) -> connection.scan(cursor, scanArgs),
reactiveClusterKeyScanCursorMapper());
}
@Override
public Mono<KeyScanCursor<K>> scan(ScanCursor scanCursor) {
return clusterScan(scanCursor, RedisKeyReactiveCommands::scan, reactiveClusterKeyScanCursorMapper());
}
@Override
public Mono<StreamScanCursor> scan(KeyStreamingChannel<K> channel) {
return clusterScan(ScanCursor.INITIAL, (connection, cursor) -> connection.scan(channel),
reactiveClusterStreamScanCursorMapper());
}
@Override
public Mono<StreamScanCursor> scan(KeyStreamingChannel<K> channel, ScanArgs scanArgs) {
return clusterScan(ScanCursor.INITIAL, (connection, cursor) -> connection.scan(channel, scanArgs),
reactiveClusterStreamScanCursorMapper());
}
@Override
public Mono<StreamScanCursor> scan(KeyStreamingChannel<K> channel, ScanCursor scanCursor, ScanArgs scanArgs) {
return clusterScan(scanCursor, (connection, cursor) -> connection.scan(channel, cursor, scanArgs),
reactiveClusterStreamScanCursorMapper());
}
@Override
public Mono<StreamScanCursor> scan(KeyStreamingChannel<K> channel, ScanCursor scanCursor) {
return clusterScan(scanCursor, (connection, cursor) -> connection.scan(channel, cursor),
reactiveClusterStreamScanCursorMapper());
}
@Override
public Mono<AggregationReply<K, V>> ftAggregate(String index, V query, AggregateArgs<K, V> args) {
return routeKeyless(() -> super.ftAggregate(index, query, args),
(nodeId, conn) -> conn.ftAggregate(index, query, args).mapNotNull(reply -> {
if (reply != null) {
reply.getCursor().filter(c -> c.getCursorId() > 0).ifPresent(c -> c.setNodeId(nodeId));
}
return reply;
}), CommandType.FT_AGGREGATE);
}
@Override
public Mono<AggregationReply<K, V>> ftAggregate(String index, V query) {
return ftAggregate(index, query, null);
}
@Override
public Mono<SearchReply<K, V>> ftSearch(String index, V query, SearchArgs<K, V> args) {
return routeKeyless(() -> super.ftSearch(index, query, args), conn -> conn.ftSearch(index, query, args),
CommandType.FT_SEARCH);
}
@Override
public Mono<SearchReply<K, V>> ftSearch(String index, V query) {
return ftSearch(index, query, SearchArgs.<K, V> builder().build());
}
@Override
public Mono<String> ftExplain(String index, V query) {
return routeKeyless(() -> super.ftExplain(index, query), conn -> conn.ftExplain(index, query), CommandType.FT_EXPLAIN);
}
@Override
public Mono<String> ftExplain(String index, V query, ExplainArgs<K, V> args) {
return routeKeyless(() -> super.ftExplain(index, query, args), conn -> conn.ftExplain(index, query, args),
CommandType.FT_EXPLAIN);
}
@Override
public Flux<V> ftTagvals(String index, String fieldName) {
return routeKeylessMany(() -> super.ftTagvals(index, fieldName), conn -> conn.ftTagvals(index, fieldName),
CommandType.FT_TAGVALS);
}
@Override
public Mono<SpellCheckResult<V>> ftSpellcheck(String index, V query) {
return routeKeyless(() -> super.ftSpellcheck(index, query), conn -> conn.ftSpellcheck(index, query),
CommandType.FT_SPELLCHECK);
}
@Override
public Mono<SpellCheckResult<V>> ftSpellcheck(String index, V query, SpellCheckArgs<K, V> args) {
return routeKeyless(() -> super.ftSpellcheck(index, query, args), conn -> conn.ftSpellcheck(index, query, args),
CommandType.FT_SPELLCHECK);
}
@Override
public Mono<Long> ftDictadd(String dict, V... terms) {
return routeKeyless(() -> super.ftDictadd(dict, terms), conn -> conn.ftDictadd(dict, terms), CommandType.FT_DICTADD);
}
@Override
public Mono<Long> ftDictdel(String dict, V... terms) {
return routeKeyless(() -> super.ftDictdel(dict, terms), conn -> conn.ftDictdel(dict, terms), CommandType.FT_DICTDEL);
}
@Override
public Flux<V> ftDictdump(String dict) {
return routeKeylessMany(() -> super.ftDictdump(dict), conn -> conn.ftDictdump(dict), CommandType.FT_DICTDUMP);
}
@Override
public Mono<String> ftAliasadd(String alias, String index) {
return routeKeyless(() -> super.ftAliasadd(alias, index), conn -> conn.ftAliasadd(alias, index),
CommandType.FT_ALIASADD);
}
@Override
public Mono<String> ftAliasupdate(String alias, String index) {
return routeKeyless(() -> super.ftAliasupdate(alias, index), conn -> conn.ftAliasupdate(alias, index),
CommandType.FT_ALIASUPDATE);
}
@Override
public Mono<String> ftAliasdel(String alias) {
return routeKeyless(() -> super.ftAliasdel(alias), conn -> conn.ftAliasdel(alias), CommandType.FT_ALIASDEL);
}
@Override
public Mono<String> ftCreate(String index, List<FieldArgs<K>> fieldArgs) {
return routeKeyless(() -> super.ftCreate(index, fieldArgs), conn -> conn.ftCreate(index, fieldArgs),
CommandType.FT_CREATE);
}
@Override
public Mono<String> ftCreate(String index, CreateArgs<K, V> arguments, List<FieldArgs<K>> fieldArgs) {
return routeKeyless(() -> super.ftCreate(index, arguments, fieldArgs),
conn -> conn.ftCreate(index, arguments, fieldArgs), CommandType.FT_CREATE);
}
@Override
public Mono<String> ftAlter(String index, boolean skipInitialScan, List<FieldArgs<K>> fieldArgs) {
return routeKeyless(() -> super.ftAlter(index, skipInitialScan, fieldArgs),
conn -> conn.ftAlter(index, skipInitialScan, fieldArgs), CommandType.FT_ALTER);
}
@Override
public Mono<String> ftAlter(String index, List<FieldArgs<K>> fieldArgs) {
return routeKeyless(() -> super.ftAlter(index, fieldArgs), conn -> conn.ftAlter(index, fieldArgs),
CommandType.FT_ALTER);
}
@Override
public Mono<String> ftDropindex(String index, boolean deleteDocumentKeys) {
return routeKeyless(() -> super.ftDropindex(index, deleteDocumentKeys),
conn -> conn.ftDropindex(index, deleteDocumentKeys), CommandType.FT_DROPINDEX);
}
@Override
public Mono<String> ftDropindex(String index) {
return routeKeyless(() -> super.ftDropindex(index), conn -> conn.ftDropindex(index), CommandType.FT_DROPINDEX);
}
@Override
public Mono<Map<V, List<V>>> ftSyndump(String index) {
return routeKeyless(() -> super.ftSyndump(index), conn -> conn.ftSyndump(index), CommandType.FT_SYNDUMP);
}
@Override
public Mono<String> ftSynupdate(String index, V synonymGroupId, V... terms) {
return routeKeyless(() -> super.ftSynupdate(index, synonymGroupId, terms),
conn -> conn.ftSynupdate(index, synonymGroupId, terms), CommandType.FT_SYNUPDATE);
}
@Override
public Mono<String> ftSynupdate(String index, V synonymGroupId, SynUpdateArgs<K, V> args, V... terms) {
return routeKeyless(() -> super.ftSynupdate(index, synonymGroupId, args, terms),
conn -> conn.ftSynupdate(index, synonymGroupId, args, terms), CommandType.FT_SYNUPDATE);
}
@Override
public Flux<V> ftList() {
return routeKeylessMany(super::ftList, RediSearchReactiveCommands::ftList, CommandType.FT_LIST);
}
@Override
public Mono<AggregationReply<K, V>> ftCursorread(String index, Cursor cursor, int count) {
if (cursor == null) {
return Mono.error(new IllegalArgumentException("cursor must not be null"));
}
long cursorId = cursor.getCursorId();
if (cursorId <= 0) {
return Mono.just(new AggregationReply<>());
}
Optional<String> nodeIdOpt = cursor.getNodeId();
if (!nodeIdOpt.isPresent()) {
return Mono.error(new IllegalArgumentException("Cursor missing nodeId; cannot route cursor READ in cluster mode"));
}
String nodeId = nodeIdOpt.get();
StatefulRedisConnection<K, V> byNode = getStatefulConnection().getConnection(nodeId, ConnectionIntent.WRITE);
return byNode.reactive().ftCursorread(index, cursor, count).map(reply -> {
if (reply != null) {
reply.getCursor().ifPresent(c -> c.setNodeId(nodeId));
}
return reply;
});
}
@Override
public Mono<AggregationReply<K, V>> ftCursorread(String index, Cursor cursor) {
return ftCursorread(index, cursor, -1);
}
@Override
public Mono<String> ftCursordel(String index, Cursor cursor) {
if (cursor == null) {
return Mono.error(new IllegalArgumentException("cursor must not be null"));
}
long cursorId = cursor.getCursorId();
if (cursorId <= 0) {
return Mono.just("OK");
}
Optional<String> nodeIdOpt = cursor.getNodeId();
if (!nodeIdOpt.isPresent()) {
return Mono.error(new IllegalArgumentException("Cursor missing nodeId; cannot route cursor DEL in cluster mode"));
}
String nodeId = nodeIdOpt.get();
StatefulRedisConnection<K, V> byNode = getStatefulConnection().getConnection(nodeId, ConnectionIntent.WRITE);
return byNode.reactive().ftCursordel(index, cursor);
}
/**
* Route a keyless RediSearch command using cluster-aware connection selection. Honors the current ReadFrom policy and the
* READ/WRITE intent derived from {@code commandType}. Falls back to {@code superCall} on failure to preserve existing
* behavior.
*/
<R> Mono<R> routeKeyless(Supplier<Mono<R>> superCall, Function<RedisClusterReactiveCommands<K, V>, Mono<R>> routedCall,
ProtocolKeyword commandType) {
ConnectionIntent intent = getConnectionIntent(commandType);
return getStatefulConnection(intent).map(StatefulRedisConnection::reactive).flatMap(routedCall).onErrorResume(err -> {
logger.error("Cluster routing failed for {} - falling back to superCall", commandType, err);
return superCall.get();
});
}
/**
* Route a keyless RediSearch command producing a stream (Flux) using cluster-aware selection. Honors the current ReadFrom
* policy and the READ/WRITE intent derived from {@code commandType}. Falls back to {@code superCall} on failure to preserve
* existing behavior.
*/
<R> Flux<R> routeKeylessMany(Supplier<Flux<R>> superCall, Function<RedisClusterReactiveCommands<K, V>, Flux<R>> routedCall,
ProtocolKeyword commandType) {
ConnectionIntent intent = getConnectionIntent(commandType);
return getStatefulConnection(intent).map(StatefulRedisConnection::reactive).flatMapMany(routedCall)
.onErrorResume(err -> {
logger.error("Cluster routing failed for {} - falling back to superCall", commandType, err);
return superCall.get();
});
}
/**
* Route a keyless RediSearch command with node context. Obtains the executing node id via CLUSTER MYID and passes it to
* {@code routedCall} so replies can be stamped (e.g., cursor.nodeId). Honors ReadFrom and READ/WRITE intent. Falls back to
* {@code superCall} on failure.
*/
<R> Mono<R> routeKeyless(Supplier<Mono<R>> superCall,
BiFunction<String, RedisClusterReactiveCommands<K, V>, Mono<R>> routedCall, ProtocolKeyword commandType) {
ConnectionIntent intent = getConnectionIntent(commandType);
return getStatefulConnection(intent).map(StatefulRedisConnection::reactive)
.flatMap(conn -> conn.clusterMyId().flatMap(nodeId -> routedCall.apply(nodeId, conn))).onErrorResume(err -> {
logger.error("Cluster routing failed for {} - falling back to superCall", commandType, err);
return superCall.get();
});
}
/** Determine READ vs WRITE intent for routing by probing command read-only status. */
private ConnectionIntent getConnectionIntent(ProtocolKeyword commandType) {
try {
RedisCommand probe = new Command(commandType, null);
boolean isReadOnly = getStatefulConnection().getOptions().getReadOnlyCommands().isReadOnly(probe);
return isReadOnly ? ConnectionIntent.READ : ConnectionIntent.WRITE;
} catch (Exception e) {
logger.error("Error while determining connection intent for " + commandType, e);
return ConnectionIntent.WRITE;
}
}
@SuppressWarnings("unchecked")
private <T extends ScanCursor> Mono<T> clusterScan(ScanCursor cursor,
BiFunction<RedisKeyReactiveCommands<K, V>, ScanCursor, Mono<T>> scanFunction,
ClusterScanSupport.ScanCursorMapper<Mono<T>> resultMapper) {
return clusterScan(getStatefulConnection(), getConnectionProvider(), cursor, scanFunction,
(ClusterScanSupport.ScanCursorMapper) resultMapper);
}
private <T> Flux<T> pipeliningWithMap(Map<K, V> map, Function<Map<K, V>, Flux<T>> function,
Function<Flux<T>, Flux<T>> resultFunction) {
Map<Integer, List<K>> partitioned = SlotHash.partition(codec, map.keySet());
if (partitioned.size() < 2) {
return function.apply(map);
}
List<Flux<T>> publishers = partitioned.values().stream().map(ks -> {
Map<K, V> op = new HashMap<>();
ks.forEach(k -> op.put(k, map.get(k)));
return function.apply(op);
}).collect(Collectors.toList());
return resultFunction.apply(Flux.merge(publishers));
}
/**
* Run a command on all available masters,
*
* @param function function producing the command
* @param <T> result type
* @return map of a key (counter) and commands.
*/
protected <T> Map<String, Publisher<T>> executeOnUpstream(
Function<RedisClusterReactiveCommands<K, V>, ? extends Publisher<T>> function) {
return executeOnNodes(function, redisClusterNode -> redisClusterNode.is(UPSTREAM));
}
/**
* Run a command on all available nodes that match {@code filter}.
*
* @param function function producing the command
* @param filter filter function for the node selection
* @param <T> result type
* @return map of a key (counter) and commands.
*/
protected <T> Map<String, Publisher<T>> executeOnNodes(
Function<RedisClusterReactiveCommands<K, V>, ? extends Publisher<T>> function, Predicate<RedisClusterNode> filter) {
Map<String, Publisher<T>> executions = new HashMap<>();
for (RedisClusterNode redisClusterNode : getStatefulConnection().getPartitions()) {
if (!filter.test(redisClusterNode)) {
continue;
}
RedisURI uri = redisClusterNode.getUri();
Mono<RedisClusterReactiveCommands<K, V>> connection = getConnectionReactive(uri.getHost(), uri.getPort());
executions.put(redisClusterNode.getNodeId(), connection.flatMapMany(function::apply));
}
return executions;
}
private Mono<RedisClusterReactiveCommands<K, V>> findConnectionBySlotReactive(int slot) {
RedisClusterNode node = getStatefulConnection().getPartitions().getPartitionBySlot(slot);
if (node != null) {
return getConnectionReactive(node.getUri().getHost(), node.getUri().getPort());
}
return Mono.error(new RedisException("No partition for slot " + slot));
}
private AsyncClusterConnectionProvider getConnectionProvider() {
ClusterDistributionChannelWriter writer = (ClusterDistributionChannelWriter) getStatefulConnection().getChannelWriter();
return (AsyncClusterConnectionProvider) writer.getClusterConnectionProvider();
}
/**
* Perform a SCAN in the cluster.
*
*/
static <T extends ScanCursor, K, V> Mono<T> clusterScan(StatefulRedisClusterConnection<K, V> connection,
AsyncClusterConnectionProvider connectionProvider, ScanCursor cursor,
BiFunction<RedisKeyReactiveCommands<K, V>, ScanCursor, Mono<T>> scanFunction,
ClusterScanSupport.ScanCursorMapper<Mono<T>> mapper) {
List<String> nodeIds = ClusterScanSupport.getNodeIds(connection, cursor);
String currentNodeId = ClusterScanSupport.getCurrentNodeId(cursor, nodeIds);
ScanCursor continuationCursor = ClusterScanSupport.getContinuationCursor(cursor);
Mono<T> scanCursor = getMono(connectionProvider.<K, V> getConnectionAsync(ConnectionIntent.WRITE, currentNodeId))
.flatMap(conn -> scanFunction.apply(conn.reactive(), continuationCursor));
return mapper.map(nodeIds, currentNodeId, scanCursor);
}
private static <T> Mono<T> getMono(CompletableFuture<T> future) {
return Mono.fromCompletionStage(future);
}
}
| RedisAdvancedClusterReactiveCommandsImpl |
java | micronaut-projects__micronaut-core | inject/src/main/java/io/micronaut/inject/QualifiedBeanType.java | {
"start": 1130,
"end": 1245
} | interface ____ a {@link BeanType} that allows qualifiers.
*
* @param <T> The bean type
* @since 4.0.0
*/
public | for |
java | quarkusio__quarkus | devtools/gradle/gradle-model/src/main/java/io/quarkus/gradle/tooling/dependency/ArtifactExtensionDependency.java | {
"start": 280,
"end": 1188
} | class ____ extends ExtensionDependency<ArtifactCoords> {
public ArtifactExtensionDependency(ModuleVersionIdentifier extensionId,
ArtifactCoords deploymentModule,
List<Dependency> conditionalDependencies,
List<Dependency> conditionalDevDeps,
List<ArtifactKey> dependencyConditions) {
super(extensionId, deploymentModule, conditionalDependencies, conditionalDevDeps, dependencyConditions);
}
@Override
public String getDeploymentGroup() {
return getDeploymentModule().getGroupId();
}
@Override
public String getDeploymentName() {
return getDeploymentModule().getArtifactId();
}
@Override
public String getDeploymentVersion() {
return getDeploymentModule().getVersion();
}
@Override
public boolean isProjectDependency() {
return false;
}
}
| ArtifactExtensionDependency |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenIT0135EjbLifecycleTest.java | {
"start": 946,
"end": 2064
} | class ____ extends AbstractMavenIntegrationTestCase {
/**
* Test default binding of goals for "ejb" lifecycle.
*
* @throws Exception in case of failure
*/
@Test
public void testit0135() throws Exception {
File testDir = extractResources("/it0135");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.deleteDirectory("target");
verifier.setAutoclean(false);
verifier.addCliArgument("deploy");
verifier.execute();
verifier.verifyFilePresent("target/resources-resources.txt");
verifier.verifyFilePresent("target/compiler-compile.txt");
verifier.verifyFilePresent("target/resources-test-resources.txt");
verifier.verifyFilePresent("target/compiler-test-compile.txt");
verifier.verifyFilePresent("target/surefire-test.txt");
verifier.verifyFilePresent("target/ejb-ejb.txt");
verifier.verifyFilePresent("target/install-install.txt");
verifier.verifyFilePresent("target/deploy-deploy.txt");
verifier.verifyErrorFreeLog();
}
}
| MavenIT0135EjbLifecycleTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.