language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-server-webflux/src/test/java/org/springframework/cloud/gateway/support/ShortcutConfigurableNonRestrictiveTests.java
|
{
"start": 1496,
"end": 2988
}
|
class ____ {
@Autowired
BeanFactory beanFactory;
@Autowired
ConfigurableEnvironment env;
private SpelExpressionParser parser;
@Test
public void testNormalizeDefaultTypeWithSpelAndPropertyReferenceEnabled() {
parser = new SpelExpressionParser();
ShortcutConfigurable shortcutConfigurable = new ShortcutConfigurable() {
@Override
public List<String> shortcutFieldOrder() {
return Arrays.asList("bean", "arg1");
}
};
Map<String, String> args = new HashMap<>();
args.put("barproperty", "#{@bar.property}");
args.put("arg1", "val1");
Map<String, Object> map = ShortcutType.DEFAULT.normalize(args, shortcutConfigurable, parser, this.beanFactory);
assertThat(map).isNotNull().containsEntry("barproperty", 42).containsEntry("arg1", "val1");
}
@Test
public void testNormalizeDefaultTypeWithSpelAndMethodReferenceEnabled() {
parser = new SpelExpressionParser();
ShortcutConfigurable shortcutConfigurable = new ShortcutConfigurable() {
@Override
public List<String> shortcutFieldOrder() {
return Arrays.asList("bean", "arg1");
}
};
Map<String, String> args = new HashMap<>();
args.put("barmethod", "#{@bar.myMethod}");
args.put("arg1", "val1");
Map<String, Object> map = ShortcutType.DEFAULT.normalize(args, shortcutConfigurable, parser, this.beanFactory);
assertThat(map).isNotNull().containsEntry("barmethod", 42).containsEntry("arg1", "val1");
}
@SpringBootConfiguration
protected static
|
ShortcutConfigurableNonRestrictiveTests
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/discovery/AoclOutputBasedDiscoveryStrategy.java
|
{
"start": 1488,
"end": 2031
}
|
class ____
implements FPGADiscoveryStrategy {
private final AbstractFpgaVendorPlugin plugin;
public AoclOutputBasedDiscoveryStrategy(AbstractFpgaVendorPlugin fpgaPlugin) {
this.plugin = fpgaPlugin;
}
@Override
public List<FpgaDevice> discover() throws ResourceHandlerException {
List<FpgaDevice> list =
plugin.discover(FpgaDiscoverer.MAX_EXEC_TIMEOUT_MS);
if (list.isEmpty()) {
throw new ResourceHandlerException("No FPGA devices detected!");
}
return list;
}
}
|
AoclOutputBasedDiscoveryStrategy
|
java
|
google__error-prone
|
core/src/main/java/com/google/errorprone/bugpatterns/time/JodaPlusMinusLong.java
|
{
"start": 2266,
"end": 4486
}
|
class ____ extends BugChecker implements MethodInvocationTreeMatcher {
private static final ImmutableSet<String> TYPES =
ImmutableSet.of("DateMidnight", "DateTime", "Duration", "Instant");
private static final ImmutableSet<String> METHODS = ImmutableSet.of("plus", "minus");
private static final Matcher<ExpressionTree> MATCHER =
Matchers.allOf(
buildMatcher(),
// Allow usage by JodaTime itself
Matchers.not(Matchers.packageStartsWith("org.joda.time")));
private static final Matcher<ExpressionTree> DURATION_GET_MILLIS_MATCHER =
MethodMatchers.instanceMethod()
.onDescendantOf("org.joda.time.ReadableDuration")
.named("getMillis");
private static Matcher<ExpressionTree> buildMatcher() {
List<Matcher<ExpressionTree>> matchers = new ArrayList<>();
for (String type : TYPES) {
for (String method : METHODS) {
matchers.add(
Matchers.instanceMethod()
.onExactClass("org.joda.time." + type)
.named(method)
.withParameters("long"));
}
}
return Matchers.anyOf(matchers);
}
@Override
public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) {
if (!MATCHER.matches(tree, state)) {
return Description.NO_MATCH;
}
SuggestedFix.Builder builder = SuggestedFix.builder();
ExpressionTree firstArgumentTree = tree.getArguments().getFirst();
String firstArgumentReplacement;
if (DURATION_GET_MILLIS_MATCHER.matches(firstArgumentTree, state)) {
// This is passing {@code someDuration.getMillis()} as the parameter. we can replace this
// with {@code someDuration}.
firstArgumentReplacement = state.getSourceForNode(ASTHelpers.getReceiver(firstArgumentTree));
} else {
// Wrap the long as a Duration.
firstArgumentReplacement =
SuggestedFixes.qualifyType(state, builder, "org.joda.time.Duration")
+ ".millis("
+ state.getSourceForNode(firstArgumentTree)
+ ")";
}
builder.replace(firstArgumentTree, firstArgumentReplacement);
return describeMatch(tree, builder.build());
}
}
|
JodaPlusMinusLong
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/time/StopWatch.java
|
{
"start": 2879,
"end": 3027
}
|
class ____ not thread-safe.
* </p>
*
* @see DurationUtils#of(FailableRunnable)
* @see DurationUtils#of(FailableConsumer)
* @since 2.0
*/
public
|
is
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/inheritance/BaseMapper.java
|
{
"start": 707,
"end": 767
}
|
interface ____<T> {
T retrieveById(Integer id);
}
|
BaseMapper
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/util/ClassLoaderUtil.java
|
{
"start": 5121,
"end": 5245
}
|
class ____.
*
* @param cnfe The ClassNotFoundException that defines the name of the class.
* @param cl The
|
loader
|
java
|
apache__kafka
|
connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java
|
{
"start": 32485,
"end": 32885
}
|
class ____ provided as part of the connector properties
1. if the version is not provided,
- if the converter is packaged with the connector then, the packaged version is used
- if the converter is not packaged with the connector, the latest version is used
2. if the version is provided, the provided version is used
B. If the converter
|
is
|
java
|
resilience4j__resilience4j
|
resilience4j-spring-boot2/src/test/java/io/github/resilience4j/circuitbreaker/autoconfigure/CircuitBreakerConfigurationOnMissingBeanTest.java
|
{
"start": 1709,
"end": 2951
}
|
class ____ {
@Autowired
private ConfigWithOverrides configWithOverrides;
@Autowired
private CircuitBreakerRegistry circuitBreakerRegistry;
@Autowired
private CircuitBreakerAspect circuitBreakerAspect;
@Autowired
private EventConsumerRegistry<CircuitBreakerEvent> circuitEventConsumerBreakerRegistry;
@Test
public void testAllBeansFromCircuitBreakerConfigurationHasOnMissingBean()
throws NoSuchMethodException {
final Class<CircuitBreakerConfiguration> originalClass = CircuitBreakerConfiguration.class;
final Class<CircuitBreakerConfigurationOnMissingBean> onMissingBeanClass = CircuitBreakerConfigurationOnMissingBean.class;
TestUtils.assertAnnotations(originalClass, onMissingBeanClass);
}
@Test
public void testAllCircuitBreakerConfigurationBeansOverridden() {
assertEquals(circuitBreakerRegistry, configWithOverrides.circuitBreakerRegistry);
assertEquals(circuitBreakerAspect, configWithOverrides.circuitBreakerAspect);
assertEquals(circuitEventConsumerBreakerRegistry,
configWithOverrides.circuitEventConsumerBreakerRegistry);
}
@Configuration
public static
|
CircuitBreakerConfigurationOnMissingBeanTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForInt.java
|
{
"start": 4617,
"end": 5424
}
|
class ____ extends KeyExtractorForInt {
private final IntBlock block;
MaxFromUnorderedBlock(TopNEncoder encoder, byte nul, byte nonNul, IntBlock block) {
super(encoder, nul, nonNul);
this.block = block;
}
@Override
public int writeKey(BreakingBytesRefBuilder key, int position) {
int size = block.getValueCount(position);
if (size == 0) {
return nul(key);
}
int start = block.getFirstValueIndex(position);
int end = start + size;
int max = block.getInt(start);
for (int i = start + 1; i < end; i++) {
max = Math.max(max, block.getInt(i));
}
return nonNul(key, max);
}
}
}
|
MaxFromUnorderedBlock
|
java
|
FasterXML__jackson-databind
|
src/main/java/tools/jackson/databind/deser/BasicDeserializerFactory.java
|
{
"start": 79754,
"end": 80033
}
|
class ____ contain default mappings for abstract JDK {@link java.util.Collection}
* and {@link java.util.Map} types. Separated out here to defer cost of creating lookups
* until mappings are actually needed.
*/
@SuppressWarnings("rawtypes")
protected static
|
to
|
java
|
alibaba__nacos
|
naming/src/main/java/com/alibaba/nacos/naming/monitor/collector/NamingSubAndPubMetricsCollector.java
|
{
"start": 1471,
"end": 3873
}
|
class ____ {
private static final long DELAY_SECONDS = 5;
private static ScheduledExecutorService executorService = ExecutorFactory.newSingleScheduledExecutorService(r -> {
Thread thread = new Thread(r, "nacos.naming.monitor.NamingSubAndPubMetricsCollector");
thread.setDaemon(true);
return thread;
});
@Autowired
public NamingSubAndPubMetricsCollector(ConnectionBasedClientManager connectionBasedClientManager,
EphemeralIpPortClientManager ephemeralIpPortClientManager, PersistentIpPortClientManager persistentIpPortClientManager) {
executorService.scheduleWithFixedDelay(() -> {
int v1SubscriberCount = 0;
int v1PublisherCount = 0;
for (String clientId : ephemeralIpPortClientManager.allClientId()) {
Client client = ephemeralIpPortClientManager.getClient(clientId);
if (null != client) {
v1PublisherCount += client.getAllPublishedService().size();
v1SubscriberCount += client.getAllSubscribeService().size();
}
}
for (String clientId : persistentIpPortClientManager.allClientId()) {
Client client = persistentIpPortClientManager.getClient(clientId);
if (null != client) {
v1PublisherCount += client.getAllPublishedService().size();
v1SubscriberCount += client.getAllSubscribeService().size();
}
}
MetricsMonitor.getNamingSubscriber("v1").set(v1SubscriberCount);
MetricsMonitor.getNamingPublisher("v1").set(v1PublisherCount);
int v2SubscriberCount = 0;
int v2PublisherCount = 0;
for (String clientId : connectionBasedClientManager.allClientId()) {
Client client = connectionBasedClientManager.getClient(clientId);
if (null != client) {
v2PublisherCount += client.getAllPublishedService().size();
v2SubscriberCount += client.getAllSubscribeService().size();
}
}
MetricsMonitor.getNamingSubscriber("v2").set(v2SubscriberCount);
MetricsMonitor.getNamingPublisher("v2").set(v2PublisherCount);
}, DELAY_SECONDS, DELAY_SECONDS, TimeUnit.SECONDS);
}
}
|
NamingSubAndPubMetricsCollector
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/InstanceOfAssertFactoriesTest.java
|
{
"start": 55576,
"end": 56187
}
|
class ____ {
private final Object actual = new AtomicBoolean();
@Test
void createAssert() {
// WHEN
AtomicBooleanAssert result = ATOMIC_BOOLEAN.createAssert(actual);
// THEN
result.isFalse();
}
@Test
void createAssert_with_ValueProvider() {
// GIVEN
ValueProvider<?> valueProvider = mockThatDelegatesTo(type -> actual);
// WHEN
AtomicBooleanAssert result = ATOMIC_BOOLEAN.createAssert(valueProvider);
// THEN
result.isFalse();
verify(valueProvider).apply(AtomicBoolean.class);
}
}
@Nested
|
AtomicBoolean_Factory
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/lookup/Interpolator.java
|
{
"start": 4891,
"end": 9699
}
|
class ____ not available because this JRE does not support JNDI."
+ " JNDI string lookups will not be available, continuing configuration. Ignoring "
+ t);
break;
case LOOKUP_KEY_WEB:
LOGGER.info("Log4j appears to be running in a Servlet environment, but there's no log4j-web module "
+ "available. If you want better web container support, please add the log4j-web JAR to your "
+ "web archive or server lib directory.");
break;
case LOOKUP_KEY_DOCKER:
case LOOKUP_KEY_SPRING:
break;
case LOOKUP_KEY_KUBERNETES:
if (t instanceof NoClassDefFoundError) {
LOGGER.warn("Unable to create Kubernetes lookup due to missing dependency: {}", t.getMessage());
}
break;
default:
LOGGER.error("Unable to create Lookup for {}", lookupKey, t);
}
}
/**
* Resolves the specified variable. This implementation will try to extract
* a variable prefix from the given variable name (the first colon (':') is
* used as prefix separator). It then passes the name of the variable with
* the prefix stripped to the lookup object registered for this prefix. If
* no prefix can be found or if the associated lookup object cannot resolve
* this variable, the default lookup object will be used.
*
* @param event The current LogEvent or null.
* @param var the name of the variable whose value is to be looked up
* @return the value of this variable or <b>null</b> if it cannot be
* resolved
*/
@Override
public String lookup(final LogEvent event, String var) {
final LookupResult result = evaluate(event, var);
return result == null ? null : result.value();
}
/**
* Resolves the specified variable. This implementation will try to extract
* a variable prefix from the given variable name (the first colon (':') is
* used as prefix separator). It then passes the name of the variable with
* the prefix stripped to the lookup object registered for this prefix. If
* no prefix can be found or if the associated lookup object cannot resolve
* this variable, the default lookup object will be used.
*
* @param event The current LogEvent or null.
* @param var the name of the variable whose value is to be looked up
* @return the value of this variable or <b>null</b> if it cannot be
* resolved
*/
@Override
public LookupResult evaluate(final LogEvent event, String var) {
if (var == null) {
return null;
}
final int prefixPos = var.indexOf(PREFIX_SEPARATOR);
if (prefixPos >= 0) {
final String prefix = toRootLowerCase(var.substring(0, prefixPos));
final String name = var.substring(prefixPos + 1);
final StrLookup lookup = strLookupMap.get(prefix);
LookupResult value = null;
if (lookup != null) {
value = event == null ? lookup.evaluate(name) : lookup.evaluate(event, name);
}
if (value != null) {
return value;
}
var = var.substring(prefixPos + 1);
}
if (defaultLookup != null) {
return event == null ? defaultLookup.evaluate(var) : defaultLookup.evaluate(event, var);
}
return null;
}
@Override
public void setConfiguration(final Configuration configuration) {
super.setConfiguration(configuration);
// Propagate
for (final StrLookup lookup : strLookupMap.values()) {
if (lookup instanceof ConfigurationAware) {
((ConfigurationAware) lookup).setConfiguration(configuration);
}
}
}
@Override
public void setLoggerContext(final LoggerContext loggerContext) {
this.loggerContext = new WeakReference<>(loggerContext);
// Propagate
for (final StrLookup lookup : strLookupMap.values()) {
if (lookup instanceof LoggerContextAware) {
((LoggerContextAware) lookup).setLoggerContext(loggerContext);
}
}
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
for (final String name : strLookupMap.keySet()) {
if (sb.length() == 0) {
sb.append('{');
} else {
sb.append(", ");
}
sb.append(name);
}
if (sb.length() > 0) {
sb.append('}');
}
return sb.toString();
}
}
|
is
|
java
|
mockito__mockito
|
mockito-core/src/test/java/org/mockito/internal/matchers/apachecommons/EqualsBuilderTest.java
|
{
"start": 819,
"end": 1558
}
|
class ____ {
private int a;
public TestObject() {}
public TestObject(int a) {
this.a = a;
}
public boolean equals(Object o) {
if (o == null) {
return false;
}
if (o == this) {
return true;
}
if (o.getClass() != getClass()) {
return false;
}
TestObject rhs = (TestObject) o;
return (a == rhs.a);
}
public int hashCode() {
return super.hashCode();
}
public void setA(int a) {
this.a = a;
}
public int getA() {
return a;
}
}
static
|
TestObject
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
|
{
"start": 42758,
"end": 43630
}
|
class ____ implements FileSystemAccess.FileSystemExecutor<Map> {
private Path path;
/**
* Creates an executor for getting the ACLs for a file.
*
* @param path the path to retrieve the ACLs.
*/
public FSAclStatus(String path) {
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return a Map object (JSON friendly) with the file status.
*
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Map execute(FileSystem fs) throws IOException {
AclStatus status = fs.getAclStatus(path);
return aclStatusToJSON(status);
}
}
/**
* Executor that performs a set-replication FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static
|
FSAclStatus
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/collection/spi/AbstractPersistentCollection.java
|
{
"start": 31590,
"end": 31766
}
|
interface ____<E> extends DelayedOperation<E> {
void replace(CollectionPersister collectionPersister, Map<Object,Object> copyCache);
}
protected abstract
|
ValueDelayedOperation
|
java
|
apache__dubbo
|
dubbo-cluster/src/main/java/org/apache/dubbo/rpc/cluster/router/script/config/AppScriptStateRouter.java
|
{
"start": 2497,
"end": 7277
}
|
class ____<T> extends AbstractStateRouter<T> implements ConfigurationListener {
private static final ErrorTypeAwareLogger logger =
LoggerFactory.getErrorTypeAwareLogger(AppScriptStateRouter.class);
private static final String RULE_SUFFIX = ".script-router";
private ScriptRule scriptRule;
private ScriptStateRouter<T> scriptRouter;
private String application;
public AppScriptStateRouter(URL url) {
super(url);
}
@Override
protected BitList<Invoker<T>> doRoute(
BitList<Invoker<T>> invokers,
URL url,
Invocation invocation,
boolean needToPrintMessage,
Holder<RouterSnapshotNode<T>> routerSnapshotNodeHolder,
Holder<String> messageHolder)
throws RpcException {
if (scriptRouter == null || !scriptRule.isValid() || !scriptRule.isEnabled()) {
if (needToPrintMessage) {
messageHolder.set(
"Directly return from script router. Reason: Invokers from previous router is empty or script is not enabled. Script rule is: "
+ (scriptRule == null ? "null" : scriptRule.getRawRule()));
}
return invokers;
}
invokers = scriptRouter.route(invokers, url, invocation, needToPrintMessage, routerSnapshotNodeHolder);
if (needToPrintMessage) {
messageHolder.set(messageHolder.get());
}
return invokers;
}
@Override
public synchronized void process(ConfigChangedEvent event) {
if (logger.isDebugEnabled()) {
logger.debug("Notification of script rule change, type is: " + event.getChangeType() + ", raw rule is:\n "
+ event.getContent());
}
try {
if (event.getChangeType().equals(ConfigChangeType.DELETED)) {
this.scriptRule = null;
} else {
this.scriptRule = ScriptRule.parse(event.getContent());
URL scriptUrl = getUrl().addParameter(
TYPE_KEY,
isEmpty(scriptRule.getType()) ? DEFAULT_SCRIPT_TYPE_KEY : scriptRule.getType())
.addParameterAndEncoded(RULE_KEY, scriptRule.getScript())
.addParameter(FORCE_KEY, scriptRule.isForce())
.addParameter(RUNTIME_KEY, scriptRule.isRuntime());
scriptRouter = new ScriptStateRouter<>(scriptUrl);
}
} catch (Exception e) {
logger.error(
CLUSTER_TAG_ROUTE_INVALID,
"Failed to parse the raw tag router rule",
"",
"Failed to parse the raw tag router rule and it will not take effect, please check if the "
+ "rule matches with the template, the raw rule is:\n ",
e);
}
}
@Override
public void notify(BitList<Invoker<T>> invokers) {
if (CollectionUtils.isEmpty(invokers)) {
return;
}
Invoker<T> invoker = invokers.get(0);
URL url = invoker.getUrl();
String providerApplication = url.getRemoteApplication();
if (isEmpty(providerApplication)) {
logger.error(
CLUSTER_TAG_ROUTE_EMPTY,
"tag router get providerApplication is empty",
"",
"TagRouter must getConfig from or subscribe to a specific application, but the application "
+ "in this TagRouter is not specified.");
return;
}
synchronized (this) {
if (!providerApplication.equals(application)) {
if (StringUtils.isNotEmpty(application)) {
this.getRuleRepository().removeListener(application + RULE_SUFFIX, this);
}
String key = providerApplication + RULE_SUFFIX;
this.getRuleRepository().addListener(key, this);
application = providerApplication;
String rawRule = this.getRuleRepository().getRule(key, DynamicConfiguration.DEFAULT_GROUP);
if (StringUtils.isNotEmpty(rawRule)) {
this.process(new ConfigChangedEvent(key, DynamicConfiguration.DEFAULT_GROUP, rawRule));
}
}
}
}
@Override
public void stop() {
if (StringUtils.isNotEmpty(application)) {
this.getRuleRepository().removeListener(application + RULE_SUFFIX, this);
}
}
// for testing purpose
public void setScriptRule(ScriptRule scriptRule) {
this.scriptRule = scriptRule;
}
}
|
AppScriptStateRouter
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/language/simple/SimpleInheritanceIssueTest.java
|
{
"start": 2871,
"end": 3013
}
|
class ____ {
public String parse(byte[] input) {
return new String(input);
}
}
public static
|
MySingleParser
|
java
|
micronaut-projects__micronaut-core
|
core/src/main/java/io/micronaut/core/naming/Described.java
|
{
"start": 689,
"end": 798
}
|
interface ____ types that are described by a description.
*
* @author graemerocher
* @since 1.0
*/
public
|
for
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/json/jackson/DatabindCodec.java
|
{
"start": 1128,
"end": 5623
}
|
class ____ extends JacksonCodec {
private static final ObjectMapper mapper = new ObjectMapper(JacksonCodec.factory);
static {
initialize();
}
private static void initialize() {
// Non-standard JSON but we allow C style comments in our JSON
mapper.configure(JsonParser.Feature.ALLOW_COMMENTS, true);
VertxModule module = new VertxModule();
mapper.registerModule(module);
}
/**
* @return the {@link ObjectMapper} used for data binding.
*/
public static ObjectMapper mapper() {
return mapper;
}
@Override
public <T> T fromValue(Object json, Class<T> clazz) {
T value = DatabindCodec.mapper.convertValue(json, clazz);
if (clazz == Object.class) {
value = (T) adapt(value);
}
return value;
}
public <T> T fromValue(Object json, TypeReference<T> type) {
T value = DatabindCodec.mapper.convertValue(json, type);
if (type.getType() == Object.class) {
value = (T) adapt(value);
}
return value;
}
@Override
public <T> T fromString(String str, Class<T> clazz) throws DecodeException {
return fromParser(createParser(str), clazz);
}
public <T> T fromString(String str, TypeReference<T> typeRef) throws DecodeException {
return fromParser(createParser(str), typeRef);
}
@Override
public <T> T fromBuffer(Buffer buf, Class<T> clazz) throws DecodeException {
return fromParser(createParser(buf), clazz);
}
public <T> T fromBuffer(Buffer buf, TypeReference<T> typeRef) throws DecodeException {
return fromParser(createParser(buf), typeRef);
}
public static JsonParser createParser(BufferInternal buf) {
try {
return DatabindCodec.mapper.getFactory().createParser((InputStream) new ByteBufInputStream(buf.getByteBuf()));
} catch (IOException e) {
throw new DecodeException("Failed to decode:" + e.getMessage(), e);
}
}
public static JsonParser createParser(String str) {
try {
return DatabindCodec.mapper.getFactory().createParser(str);
} catch (IOException e) {
throw new DecodeException("Failed to decode:" + e.getMessage(), e);
}
}
public static <T> T fromParser(JsonParser parser, Class<T> type) throws DecodeException {
T value;
JsonToken remaining;
try {
value = DatabindCodec.mapper.readValue(parser, type);
remaining = parser.nextToken();
} catch (Exception e) {
throw new DecodeException("Failed to decode:" + e.getMessage(), e);
} finally {
close(parser);
}
if (remaining != null) {
throw new DecodeException("Unexpected trailing token");
}
if (type == Object.class) {
value = (T) adapt(value);
}
return value;
}
private static <T> T fromParser(JsonParser parser, TypeReference<T> type) throws DecodeException {
T value;
try {
value = DatabindCodec.mapper.readValue(parser, type);
} catch (Exception e) {
throw new DecodeException("Failed to decode:" + e.getMessage(), e);
} finally {
close(parser);
}
if (type.getType() == Object.class) {
value = (T) adapt(value);
}
return value;
}
@Override
public String toString(Object object, boolean pretty) throws EncodeException {
try {
String result;
if (pretty) {
result = mapper.writerWithDefaultPrettyPrinter().writeValueAsString(object);
} else {
result = mapper.writeValueAsString(object);
}
return result;
} catch (Exception e) {
throw new EncodeException("Failed to encode as JSON: " + e.getMessage());
}
}
@Override
public Buffer toBuffer(Object object, boolean pretty) throws EncodeException {
try {
byte[] result;
if (pretty) {
result = mapper.writerWithDefaultPrettyPrinter().writeValueAsBytes(object);
} else {
result = mapper.writeValueAsBytes(object);
}
return Buffer.buffer(result);
} catch (Exception e) {
throw new EncodeException("Failed to encode as JSON: " + e.getMessage());
}
}
private static Object adapt(Object o) {
try {
if (o instanceof List) {
List list = (List) o;
return new JsonArray(list);
} else if (o instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, Object> map = (Map<String, Object>) o;
return new JsonObject(map);
}
return o;
} catch (Exception e) {
throw new DecodeException("Failed to decode: " + e.getMessage());
}
}
}
|
DatabindCodec
|
java
|
spring-projects__spring-framework
|
spring-webflux/src/test/java/org/springframework/web/reactive/config/WebFluxViewResolutionIntegrationTests.java
|
{
"start": 6943,
"end": 7134
}
|
class ____ implements WebFluxConfigurer {
@Bean
public SampleController sampleController() {
return new SampleController("index_UTF-8");
}
}
@Controller
static
|
AbstractWebFluxConfig
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
|
{
"start": 6440,
"end": 37231
}
|
class ____ {
private final ContainerLaunchContext newLaunchContext;
private final ResourceSet newResourceSet;
// Rollback state
private final ContainerLaunchContext oldLaunchContext;
private final ResourceSet oldResourceSet;
private boolean isRollback = false;
private ReInitializationContext(ContainerLaunchContext newLaunchContext,
ResourceSet newResourceSet,
ContainerLaunchContext oldLaunchContext,
ResourceSet oldResourceSet) {
this.newLaunchContext = newLaunchContext;
this.newResourceSet = newResourceSet;
this.oldLaunchContext = oldLaunchContext;
this.oldResourceSet = oldResourceSet;
}
private boolean canRollback() {
return (oldLaunchContext != null);
}
private ResourceSet mergedResourceSet(ResourceSet current) {
if (isRollback) {
// No merging should be done for rollback
return newResourceSet;
}
if (current == newResourceSet) {
// This happens during a restart
return current;
}
return ResourceSet.merge(current, newResourceSet);
}
private ReInitializationContext createContextForRollback() {
ReInitializationContext cntxt = new ReInitializationContext(
oldLaunchContext, oldResourceSet, null, null);
cntxt.isRollback = true;
return cntxt;
}
}
private final SimpleDateFormat dateFormat =
new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
private final Lock readLock;
private final Lock writeLock;
private final Dispatcher dispatcher;
private final NMStateStoreService stateStore;
private final Credentials credentials;
private final NodeManagerMetrics metrics;
private final long[] localizationCounts =
new long[LocalizationCounter.values().length];
private volatile ContainerLaunchContext launchContext;
private volatile ContainerTokenIdentifier containerTokenIdentifier;
private final ContainerId containerId;
private final String user;
private int version;
private int exitCode = ContainerExitStatus.INVALID;
private final StringBuilder diagnostics;
private final int diagnosticsMaxSize;
private boolean wasLaunched;
private boolean wasPaused;
private long containerLocalizationStartTime;
private long containerLaunchStartTime;
private ContainerMetrics containerMetrics;
private static Clock clock = SystemClock.getInstance();
private ContainerRetryContext containerRetryContext;
private SlidingWindowRetryPolicy.RetryContext windowRetryContext;
private SlidingWindowRetryPolicy retryPolicy;
private String csiVolumesRootDir;
private String workDir;
private String logDir;
private String host;
private String ips;
private String exposedPorts;
private volatile ReInitializationContext reInitContext;
private volatile boolean isReInitializing = false;
private volatile boolean isMarkeForKilling = false;
private Object containerRuntimeData;
/** The NM-wide configuration - not specific to this container */
private final Configuration daemonConf;
private final long startTime;
private static final Logger LOG =
LoggerFactory.getLogger(ContainerImpl.class);
// whether container has been recovered after a restart
private RecoveredContainerStatus recoveredStatus =
RecoveredContainerStatus.REQUESTED;
// whether container was marked as killed after recovery
private boolean recoveredAsKilled = false;
private Context context;
private ResourceSet resourceSet;
private ResourceMappings resourceMappings;
public ContainerImpl(Configuration conf, Dispatcher dispatcher,
ContainerLaunchContext launchContext, Credentials creds,
NodeManagerMetrics metrics,
ContainerTokenIdentifier containerTokenIdentifier, Context context) {
this(conf, dispatcher, launchContext, creds, metrics,
containerTokenIdentifier, context, SystemClock.getInstance().getTime());
}
public ContainerImpl(Configuration conf, Dispatcher dispatcher,
ContainerLaunchContext launchContext, Credentials creds,
NodeManagerMetrics metrics,
ContainerTokenIdentifier containerTokenIdentifier, Context context,
long startTs) {
this.startTime = startTs;
this.daemonConf = conf;
this.dispatcher = dispatcher;
this.stateStore = context.getNMStateStore();
this.version = containerTokenIdentifier.getVersion();
this.launchContext = launchContext;
this.diagnosticsMaxSize = conf.getInt(
YarnConfiguration.NM_CONTAINER_DIAGNOSTICS_MAXIMUM_SIZE,
YarnConfiguration.DEFAULT_NM_CONTAINER_DIAGNOSTICS_MAXIMUM_SIZE);
this.containerTokenIdentifier = containerTokenIdentifier;
this.containerId = containerTokenIdentifier.getContainerID();
this.diagnostics = new StringBuilder();
this.credentials = creds;
this.metrics = metrics;
user = containerTokenIdentifier.getApplicationSubmitter();
ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
this.readLock = readWriteLock.readLock();
this.writeLock = readWriteLock.writeLock();
this.context = context;
boolean containerMetricsEnabled =
conf.getBoolean(YarnConfiguration.NM_CONTAINER_METRICS_ENABLE,
YarnConfiguration.DEFAULT_NM_CONTAINER_METRICS_ENABLE);
if (containerMetricsEnabled) {
long flushPeriod =
conf.getLong(YarnConfiguration.NM_CONTAINER_METRICS_PERIOD_MS,
YarnConfiguration.DEFAULT_NM_CONTAINER_METRICS_PERIOD_MS);
long unregisterDelay = conf.getLong(
YarnConfiguration.NM_CONTAINER_METRICS_UNREGISTER_DELAY_MS,
YarnConfiguration.DEFAULT_NM_CONTAINER_METRICS_UNREGISTER_DELAY_MS);
containerMetrics = ContainerMetrics
.forContainer(containerId, flushPeriod, unregisterDelay);
containerMetrics.recordStartTime(clock.getTime());
}
// Configure the Retry Context
this.containerRetryContext = configureRetryContext(
conf, launchContext, this.containerId);
this.windowRetryContext = new SlidingWindowRetryPolicy
.RetryContext(containerRetryContext);
this.retryPolicy = new SlidingWindowRetryPolicy(clock);
stateMachine = stateMachineFactory.make(this, ContainerState.NEW,
context.getContainerStateTransitionListener());
this.context = context;
this.resourceSet = new ResourceSet();
this.resourceMappings = new ResourceMappings();
}
private static ContainerRetryContext configureRetryContext(
Configuration conf, ContainerLaunchContext launchContext,
ContainerId containerId) {
ContainerRetryContext context;
if (launchContext != null
&& launchContext.getContainerRetryContext() != null) {
context = launchContext.getContainerRetryContext();
} else {
context = ContainerRetryContext.NEVER_RETRY_CONTEXT;
}
int minimumRestartInterval = conf.getInt(
YarnConfiguration.NM_CONTAINER_RETRY_MINIMUM_INTERVAL_MS,
YarnConfiguration.DEFAULT_NM_CONTAINER_RETRY_MINIMUM_INTERVAL_MS);
if (context.getRetryPolicy() != ContainerRetryPolicy.NEVER_RETRY
&& context.getRetryInterval() < minimumRestartInterval) {
LOG.info("Set restart interval to minimum value " + minimumRestartInterval
+ "ms for container " + containerId);
context.setRetryInterval(minimumRestartInterval);
}
return context;
}
// constructor for a recovered container
public ContainerImpl(Configuration conf, Dispatcher dispatcher,
ContainerLaunchContext launchContext, Credentials creds,
NodeManagerMetrics metrics,
ContainerTokenIdentifier containerTokenIdentifier, Context context,
RecoveredContainerState rcs) {
this(conf, dispatcher, launchContext, creds, metrics,
containerTokenIdentifier, context, rcs.getStartTime());
this.recoveredStatus = rcs.getStatus();
this.exitCode = rcs.getExitCode();
this.recoveredAsKilled = rcs.getKilled();
this.diagnostics.append(rcs.getDiagnostics());
this.version = rcs.getVersion();
this.windowRetryContext.setRemainingRetries(
rcs.getRemainingRetryAttempts());
this.windowRetryContext.setRestartTimes(rcs.getRestartTimes());
this.workDir = rcs.getWorkDir();
this.logDir = rcs.getLogDir();
this.resourceMappings = rcs.getResourceMappings();
}
private static final ContainerDiagnosticsUpdateTransition UPDATE_DIAGNOSTICS_TRANSITION =
new ContainerDiagnosticsUpdateTransition();
// State Machine for each container.
private static StateMachineFactory
<ContainerImpl, ContainerState, ContainerEventType, ContainerEvent>
stateMachineFactory =
new StateMachineFactory<ContainerImpl, ContainerState, ContainerEventType, ContainerEvent>(ContainerState.NEW)
// From NEW State
.addTransition(ContainerState.NEW,
EnumSet.of(ContainerState.LOCALIZING,
ContainerState.SCHEDULED,
ContainerState.LOCALIZATION_FAILED,
ContainerState.DONE),
ContainerEventType.INIT_CONTAINER, new RequestResourcesTransition())
.addTransition(ContainerState.NEW, ContainerState.NEW,
ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
UPDATE_DIAGNOSTICS_TRANSITION)
.addTransition(ContainerState.NEW, ContainerState.DONE,
ContainerEventType.KILL_CONTAINER, new KillOnNewTransition())
.addTransition(ContainerState.NEW, ContainerState.NEW,
ContainerEventType.UPDATE_CONTAINER_TOKEN, new UpdateTransition())
// From LOCALIZING State
.addTransition(ContainerState.LOCALIZING,
EnumSet.of(ContainerState.LOCALIZING, ContainerState.SCHEDULED),
ContainerEventType.RESOURCE_LOCALIZED, new LocalizedTransition())
.addTransition(ContainerState.LOCALIZING,
ContainerState.LOCALIZATION_FAILED,
ContainerEventType.RESOURCE_FAILED,
new ResourceFailedTransition())
.addTransition(ContainerState.LOCALIZING, ContainerState.LOCALIZING,
ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
UPDATE_DIAGNOSTICS_TRANSITION)
.addTransition(ContainerState.LOCALIZING, ContainerState.KILLING,
ContainerEventType.KILL_CONTAINER,
new KillBeforeRunningTransition())
.addTransition(ContainerState.LOCALIZING, ContainerState.LOCALIZING,
ContainerEventType.UPDATE_CONTAINER_TOKEN, new UpdateTransition())
// From LOCALIZATION_FAILED State
.addTransition(ContainerState.LOCALIZATION_FAILED,
ContainerState.DONE,
ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP,
new LocalizationFailedToDoneTransition())
.addTransition(ContainerState.LOCALIZATION_FAILED,
ContainerState.LOCALIZATION_FAILED,
ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
UPDATE_DIAGNOSTICS_TRANSITION)
// container not launched so kill is a no-op
.addTransition(ContainerState.LOCALIZATION_FAILED,
ContainerState.LOCALIZATION_FAILED,
EnumSet.of(ContainerEventType.KILL_CONTAINER,
ContainerEventType.PAUSE_CONTAINER))
// container cleanup triggers a release of all resources
// regardless of whether they were localized or not
// LocalizedResource handles release event in all states
.addTransition(ContainerState.LOCALIZATION_FAILED,
ContainerState.LOCALIZATION_FAILED,
ContainerEventType.RESOURCE_LOCALIZED)
.addTransition(ContainerState.LOCALIZATION_FAILED,
ContainerState.LOCALIZATION_FAILED,
ContainerEventType.RESOURCE_FAILED)
.addTransition(ContainerState.LOCALIZATION_FAILED,
ContainerState.LOCALIZATION_FAILED,
ContainerEventType.UPDATE_CONTAINER_TOKEN, new UpdateTransition())
// From SCHEDULED State
.addTransition(ContainerState.SCHEDULED, ContainerState.RUNNING,
ContainerEventType.CONTAINER_LAUNCHED, new LaunchTransition())
.addTransition(ContainerState.SCHEDULED, ContainerState.PAUSED,
ContainerEventType.RECOVER_PAUSED_CONTAINER,
new RecoveredContainerTransition())
.addTransition(ContainerState.SCHEDULED, ContainerState.EXITED_WITH_FAILURE,
ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
new ExitedWithFailureTransition(true))
.addTransition(ContainerState.SCHEDULED, ContainerState.SCHEDULED,
ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
UPDATE_DIAGNOSTICS_TRANSITION)
.addTransition(ContainerState.SCHEDULED, ContainerState.KILLING,
ContainerEventType.KILL_CONTAINER,
new KillTransition())
.addTransition(ContainerState.SCHEDULED, ContainerState.SCHEDULED,
ContainerEventType.UPDATE_CONTAINER_TOKEN,
new NotifyContainerSchedulerOfUpdateTransition())
// From RUNNING State
.addTransition(ContainerState.RUNNING,
ContainerState.EXITED_WITH_SUCCESS,
ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS,
new ExitedWithSuccessTransition(true))
.addTransition(ContainerState.RUNNING,
EnumSet.of(ContainerState.RELAUNCHING,
ContainerState.SCHEDULED,
ContainerState.EXITED_WITH_FAILURE),
ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
new RetryFailureTransition())
.addTransition(ContainerState.RUNNING,
EnumSet.of(ContainerState.RUNNING,
ContainerState.REINITIALIZING,
ContainerState.REINITIALIZING_AWAITING_KILL),
ContainerEventType.REINITIALIZE_CONTAINER,
new ReInitializeContainerTransition())
.addTransition(ContainerState.RUNNING,
EnumSet.of(ContainerState.RUNNING,
ContainerState.REINITIALIZING,
ContainerState.REINITIALIZING_AWAITING_KILL),
ContainerEventType.ROLLBACK_REINIT,
new RollbackContainerTransition())
.addTransition(ContainerState.RUNNING, ContainerState.RUNNING,
ContainerEventType.RESOURCE_LOCALIZED,
new ResourceLocalizedWhileRunningTransition())
.addTransition(ContainerState.RUNNING, ContainerState.RUNNING,
ContainerEventType.RESOURCE_FAILED,
new ResourceLocalizationFailedWhileRunningTransition())
.addTransition(ContainerState.RUNNING, ContainerState.RUNNING,
ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
UPDATE_DIAGNOSTICS_TRANSITION)
.addTransition(ContainerState.RUNNING, ContainerState.KILLING,
ContainerEventType.KILL_CONTAINER, new KillTransition())
.addTransition(ContainerState.RUNNING,
ContainerState.EXITED_WITH_FAILURE,
ContainerEventType.CONTAINER_KILLED_ON_REQUEST,
new KilledExternallyTransition())
.addTransition(ContainerState.RUNNING, ContainerState.PAUSING,
ContainerEventType.PAUSE_CONTAINER, new PauseContainerTransition())
.addTransition(ContainerState.RUNNING, ContainerState.RUNNING,
ContainerEventType.UPDATE_CONTAINER_TOKEN,
new NotifyContainerSchedulerOfUpdateTransition())
// From PAUSING State
.addTransition(ContainerState.PAUSING, ContainerState.PAUSING,
ContainerEventType.RESOURCE_LOCALIZED,
new ResourceLocalizedWhileRunningTransition())
.addTransition(ContainerState.PAUSING, ContainerState.KILLING,
ContainerEventType.KILL_CONTAINER, new KillTransition())
.addTransition(ContainerState.PAUSING, ContainerState.PAUSING,
ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
UPDATE_DIAGNOSTICS_TRANSITION)
.addTransition(ContainerState.PAUSING, ContainerState.PAUSED,
ContainerEventType.CONTAINER_PAUSED, new PausedContainerTransition())
// In case something goes wrong then container will exit from the
// PAUSING state
.addTransition(ContainerState.PAUSING,
ContainerState.EXITED_WITH_SUCCESS,
ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS)
.addTransition(ContainerState.PAUSING,
ContainerState.EXITED_WITH_FAILURE,
ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
new ExitedWithFailureTransition(true))
.addTransition(ContainerState.PAUSING, ContainerState.EXITED_WITH_FAILURE,
ContainerEventType.CONTAINER_KILLED_ON_REQUEST,
new KilledExternallyTransition())
.addTransition(ContainerState.PAUSING, ContainerState.PAUSING,
ContainerEventType.RESOURCE_LOCALIZED,
new ResourceLocalizedWhileRunningTransition())
.addTransition(ContainerState.PAUSING, ContainerState.PAUSING,
ContainerEventType.UPDATE_CONTAINER_TOKEN,
new NotifyContainerSchedulerOfUpdateTransition())
// From PAUSED State
.addTransition(ContainerState.PAUSED, ContainerState.KILLING,
ContainerEventType.KILL_CONTAINER, new KillTransition())
.addTransition(ContainerState.PAUSED, ContainerState.PAUSED,
ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
UPDATE_DIAGNOSTICS_TRANSITION)
.addTransition(ContainerState.PAUSED, ContainerState.PAUSED,
ContainerEventType.PAUSE_CONTAINER)
// This can happen during re-initialization.
.addTransition(ContainerState.PAUSED, ContainerState.PAUSED,
ContainerEventType.RESOURCE_LOCALIZED,
new ResourceLocalizedWhileRunningTransition())
.addTransition(ContainerState.PAUSED, ContainerState.RESUMING,
ContainerEventType.RESUME_CONTAINER, new ResumeContainerTransition())
// In case something goes wrong then container will exit from the
// PAUSED state
.addTransition(ContainerState.PAUSED,
ContainerState.EXITED_WITH_FAILURE,
ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
new ExitedWithFailureTransition(true))
.addTransition(ContainerState.PAUSED, ContainerState.EXITED_WITH_FAILURE,
ContainerEventType.CONTAINER_KILLED_ON_REQUEST,
new KilledExternallyTransition())
.addTransition(ContainerState.PAUSED,
ContainerState.EXITED_WITH_SUCCESS,
ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS,
new ExitedWithSuccessTransition(true))
.addTransition(ContainerState.PAUSED, ContainerState.PAUSED,
ContainerEventType.UPDATE_CONTAINER_TOKEN,
new NotifyContainerSchedulerOfUpdateTransition())
// From RESUMING State
.addTransition(ContainerState.RESUMING, ContainerState.KILLING,
ContainerEventType.KILL_CONTAINER, new KillTransition())
.addTransition(ContainerState.RESUMING, ContainerState.RUNNING,
ContainerEventType.CONTAINER_RESUMED)
.addTransition(ContainerState.RESUMING, ContainerState.RESUMING,
ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
UPDATE_DIAGNOSTICS_TRANSITION)
// This can happen during re-initialization
.addTransition(ContainerState.RESUMING, ContainerState.RESUMING,
ContainerEventType.RESOURCE_LOCALIZED,
new ResourceLocalizedWhileRunningTransition())
// In case something goes wrong then container will exit from the
// RESUMING state
.addTransition(ContainerState.RESUMING,
ContainerState.EXITED_WITH_FAILURE,
ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
new ExitedWithFailureTransition(true))
.addTransition(ContainerState.RESUMING,
ContainerState.EXITED_WITH_FAILURE,
ContainerEventType.CONTAINER_KILLED_ON_REQUEST,
new KilledExternallyTransition())
.addTransition(ContainerState.RESUMING,
ContainerState.EXITED_WITH_SUCCESS,
ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS,
new ExitedWithSuccessTransition(true))
.addTransition(ContainerState.RESUMING, ContainerState.RESUMING,
ContainerEventType.UPDATE_CONTAINER_TOKEN,
new NotifyContainerSchedulerOfUpdateTransition())
// NOTE - We cannot get a PAUSE_CONTAINER while in RESUMING state.
// From REINITIALIZING State
.addTransition(ContainerState.REINITIALIZING,
ContainerState.EXITED_WITH_SUCCESS,
ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS,
new ExitedWithSuccessTransition(true))
.addTransition(ContainerState.REINITIALIZING,
ContainerState.EXITED_WITH_FAILURE,
ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
new ExitedWithFailureTransition(true))
.addTransition(ContainerState.REINITIALIZING,
EnumSet.of(ContainerState.REINITIALIZING,
ContainerState.REINITIALIZING_AWAITING_KILL),
ContainerEventType.RESOURCE_LOCALIZED,
new ResourceLocalizedWhileReInitTransition())
.addTransition(ContainerState.REINITIALIZING, ContainerState.RUNNING,
ContainerEventType.RESOURCE_FAILED,
new ResourceLocalizationFailedWhileReInitTransition())
.addTransition(ContainerState.REINITIALIZING,
ContainerState.REINITIALIZING,
ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
UPDATE_DIAGNOSTICS_TRANSITION)
.addTransition(ContainerState.REINITIALIZING, ContainerState.KILLING,
ContainerEventType.KILL_CONTAINER, new KillTransition())
.addTransition(ContainerState.REINITIALIZING, ContainerState.PAUSING,
ContainerEventType.PAUSE_CONTAINER, new PauseContainerTransition())
.addTransition(ContainerState.REINITIALIZING,
ContainerState.REINITIALIZING,
ContainerEventType.UPDATE_CONTAINER_TOKEN,
new NotifyContainerSchedulerOfUpdateTransition())
// from REINITIALIZING_AWAITING_KILL
.addTransition(ContainerState.REINITIALIZING_AWAITING_KILL,
ContainerState.EXITED_WITH_SUCCESS,
ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS,
new ExitedWithSuccessTransition(true))
.addTransition(ContainerState.REINITIALIZING_AWAITING_KILL,
ContainerState.EXITED_WITH_FAILURE,
ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
new ExitedWithFailureTransition(true))
.addTransition(ContainerState.REINITIALIZING_AWAITING_KILL,
ContainerState.REINITIALIZING_AWAITING_KILL,
ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
UPDATE_DIAGNOSTICS_TRANSITION)
.addTransition(ContainerState.REINITIALIZING_AWAITING_KILL,
ContainerState.KILLING,
ContainerEventType.KILL_CONTAINER, new KillTransition())
.addTransition(ContainerState.REINITIALIZING_AWAITING_KILL,
ContainerState.SCHEDULED, ContainerEventType.PAUSE_CONTAINER)
.addTransition(ContainerState.REINITIALIZING_AWAITING_KILL,
ContainerState.SCHEDULED,
ContainerEventType.CONTAINER_KILLED_ON_REQUEST,
new KilledForReInitializationTransition())
.addTransition(ContainerState.REINITIALIZING_AWAITING_KILL,
ContainerState.REINITIALIZING_AWAITING_KILL,
ContainerEventType.UPDATE_CONTAINER_TOKEN,
new NotifyContainerSchedulerOfUpdateTransition())
// From RELAUNCHING State
.addTransition(ContainerState.RELAUNCHING, ContainerState.RUNNING,
ContainerEventType.CONTAINER_LAUNCHED, new LaunchTransition())
.addTransition(ContainerState.RELAUNCHING,
ContainerState.EXITED_WITH_FAILURE,
ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
new ExitedWithFailureTransition(true))
.addTransition(ContainerState.RELAUNCHING, ContainerState.RELAUNCHING,
ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
UPDATE_DIAGNOSTICS_TRANSITION)
.addTransition(ContainerState.RELAUNCHING, ContainerState.KILLING,
ContainerEventType.KILL_CONTAINER, new KillTransition())
.addTransition(ContainerState.RELAUNCHING, ContainerState.KILLING,
ContainerEventType.PAUSE_CONTAINER, new KillOnPauseTransition())
.addTransition(ContainerState.RELAUNCHING, ContainerState.RELAUNCHING,
ContainerEventType.UPDATE_CONTAINER_TOKEN,
new NotifyContainerSchedulerOfUpdateTransition())
// From CONTAINER_EXITED_WITH_SUCCESS State
.addTransition(ContainerState.EXITED_WITH_SUCCESS, ContainerState.DONE,
ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP,
new ExitedWithSuccessToDoneTransition())
.addTransition(ContainerState.EXITED_WITH_SUCCESS,
ContainerState.EXITED_WITH_SUCCESS,
ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
UPDATE_DIAGNOSTICS_TRANSITION)
.addTransition(ContainerState.EXITED_WITH_SUCCESS,
ContainerState.EXITED_WITH_SUCCESS,
EnumSet.of(ContainerEventType.KILL_CONTAINER,
ContainerEventType.PAUSE_CONTAINER))
// No transition - assuming container is on its way to completion
.addTransition(ContainerState.EXITED_WITH_SUCCESS,
ContainerState.EXITED_WITH_SUCCESS,
ContainerEventType.UPDATE_CONTAINER_TOKEN)
.addTransition(ContainerState.EXITED_WITH_SUCCESS,
ContainerState.EXITED_WITH_SUCCESS,
ContainerEventType.CONTAINER_KILLED_ON_REQUEST)
// From EXITED_WITH_FAILURE State
.addTransition(ContainerState.EXITED_WITH_FAILURE, ContainerState.DONE,
ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP,
new ExitedWithFailureToDoneTransition())
.addTransition(ContainerState.EXITED_WITH_FAILURE,
ContainerState.EXITED_WITH_FAILURE,
ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
UPDATE_DIAGNOSTICS_TRANSITION)
.addTransition(ContainerState.EXITED_WITH_FAILURE,
ContainerState.EXITED_WITH_FAILURE,
EnumSet.of(ContainerEventType.KILL_CONTAINER,
ContainerEventType.PAUSE_CONTAINER))
// No transition - assuming container is on its way to completion
.addTransition(ContainerState.EXITED_WITH_FAILURE,
ContainerState.EXITED_WITH_FAILURE,
ContainerEventType.UPDATE_CONTAINER_TOKEN)
.addTransition(ContainerState.EXITED_WITH_FAILURE,
ContainerState.EXITED_WITH_FAILURE,
ContainerEventType.CONTAINER_KILLED_ON_REQUEST)
// From KILLING State.
.addTransition(ContainerState.KILLING,
ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
ContainerEventType.CONTAINER_KILLED_ON_REQUEST,
new ContainerKilledTransition())
.addTransition(ContainerState.KILLING,
ContainerState.KILLING,
ContainerEventType.RESOURCE_LOCALIZED,
new LocalizedResourceDuringKillTransition())
.addTransition(ContainerState.KILLING,
ContainerState.KILLING,
ContainerEventType.RESOURCE_FAILED)
.addTransition(ContainerState.KILLING, ContainerState.KILLING,
ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
UPDATE_DIAGNOSTICS_TRANSITION)
.addTransition(ContainerState.KILLING, ContainerState.KILLING,
ContainerEventType.KILL_CONTAINER)
.addTransition(ContainerState.KILLING, ContainerState.EXITED_WITH_SUCCESS,
ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS,
new ExitedWithSuccessTransition(false))
.addTransition(ContainerState.KILLING, ContainerState.EXITED_WITH_FAILURE,
ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
new ExitedWithFailureTransition(false))
.addTransition(ContainerState.KILLING,
ContainerState.DONE,
ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP,
new KillingToDoneTransition())
// Handle a launched container during killing stage is a no-op
// as cleanup container is always handled after launch container event
// in the container launcher
.addTransition(ContainerState.KILLING,
ContainerState.KILLING,
EnumSet.of(ContainerEventType.CONTAINER_LAUNCHED,
ContainerEventType.PAUSE_CONTAINER))
// No transition - assuming container is on its way to completion
.addTransition(ContainerState.KILLING, ContainerState.KILLING,
ContainerEventType.UPDATE_CONTAINER_TOKEN)
// From CONTAINER_CLEANEDUP_AFTER_KILL State.
.addTransition(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
ContainerState.DONE,
ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP,
new ContainerCleanedupAfterKillToDoneTransition())
.addTransition(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
UPDATE_DIAGNOSTICS_TRANSITION)
.addTransition(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
EnumSet.of(ContainerEventType.KILL_CONTAINER,
ContainerEventType.RESOURCE_FAILED,
ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS,
ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
ContainerEventType.PAUSE_CONTAINER))
// No transition - assuming container is on its way to completion
.addTransition(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
ContainerEventType.UPDATE_CONTAINER_TOKEN)
.addTransition(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
ContainerEventType.CONTAINER_KILLED_ON_REQUEST)
// From DONE
.addTransition(ContainerState.DONE, ContainerState.DONE,
EnumSet.of(ContainerEventType.KILL_CONTAINER,
ContainerEventType.PAUSE_CONTAINER))
.addTransition(ContainerState.DONE, ContainerState.DONE,
ContainerEventType.INIT_CONTAINER)
.addTransition(ContainerState.DONE, ContainerState.DONE,
ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
UPDATE_DIAGNOSTICS_TRANSITION)
// This transition may result when
// we notify container of failed localization if localizer thread (for
// that container) fails for some reason
.addTransition(ContainerState.DONE, ContainerState.DONE,
EnumSet.of(ContainerEventType.RESOURCE_FAILED,
ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS,
ContainerEventType.CONTAINER_EXITED_WITH_FAILURE))
// No transition - assuming container is on its way to completion
.addTransition(ContainerState.DONE, ContainerState.DONE,
ContainerEventType.UPDATE_CONTAINER_TOKEN)
.addTransition(ContainerState.DONE, ContainerState.DONE,
ContainerEventType.CONTAINER_KILLED_ON_REQUEST)
// create the topology tables
.installTopology();
private final StateMachine<ContainerState, ContainerEventType, ContainerEvent>
stateMachine;
public org.apache.hadoop.yarn.api.records.ContainerState getCurrentState() {
switch (stateMachine.getCurrentState()) {
case NEW:
case LOCALIZING:
case LOCALIZATION_FAILED:
case SCHEDULED:
case PAUSED:
case RESUMING:
case RUNNING:
case RELAUNCHING:
case REINITIALIZING:
case REINITIALIZING_AWAITING_KILL:
case EXITED_WITH_SUCCESS:
case EXITED_WITH_FAILURE:
case KILLING:
case CONTAINER_CLEANEDUP_AFTER_KILL:
case CONTAINER_RESOURCES_CLEANINGUP:
case PAUSING:
return org.apache.hadoop.yarn.api.records.ContainerState.RUNNING;
case DONE:
default:
return org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE;
}
}
// NOTE: Please update the doc in the ContainerSubState
|
ReInitializationContext
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/engine/MergeMemoryEstimator.java
|
{
"start": 1552,
"end": 5501
}
|
class ____ {
// Determined empirically by using Accountable.ramBytesUsed() during merges on Lucene using an instrumented build of Lucene.
// Didn't adapted the ramBytesUsed() code for this as it depends on graph levels and size for non-zero levels, which are difficult
// to estimate without actually building the graph.
public static final long HNSW_PER_DOC_ESTIMATION = 348L;
/**
* Estimates the memory, in bytes, needed to merge the segments of the given merge.
*/
public static long estimateMergeMemory(MergePolicy.OneMerge merge, IndexReader indexReader) {
assert merge.segments.isEmpty() == false;
long memoryNeeded = 0;
Map<String, SegmentCommitInfo> segments = merge.segments.stream().collect(Collectors.toMap(s -> s.info.name, s -> s));
List<LeafReaderContext> leaves = indexReader.leaves();
SegmentReader segmentReader = null;
for (LeafReaderContext leafReaderContext : leaves) {
segmentReader = Lucene.segmentReader(leafReaderContext.reader());
String segmentName = segmentReader.getSegmentName();
SegmentCommitInfo segmentCommitInfo = segments.get(segmentName);
if (segmentCommitInfo != null) {
memoryNeeded += estimateMergeMemory(segmentCommitInfo, segmentReader);
segments.remove(segmentName);
if (segments.isEmpty()) {
break;
}
}
}
// Estimate segments without readers - the searcher may not have been refreshed yet, so estimate them with the field info from
// the last segment reader
if (segmentReader != null) {
for (SegmentCommitInfo segmentCommitInfo : segments.values()) {
memoryNeeded += estimateMergeMemory(segmentCommitInfo, segmentReader);
}
}
return memoryNeeded;
}
private static long estimateMergeMemory(SegmentCommitInfo segmentCommitInfo, SegmentReader reader) {
long maxMem = 0;
for (FieldInfo fieldInfo : reader.getFieldInfos()) {
maxMem = Math.max(maxMem, estimateFieldMemory(fieldInfo, segmentCommitInfo, reader));
}
return maxMem;
}
private static long estimateFieldMemory(FieldInfo fieldInfo, SegmentCommitInfo segmentCommitInfo, SegmentReader segmentReader) {
long maxMem = 0;
if (fieldInfo.hasVectorValues()) {
maxMem = Math.max(maxMem, estimateVectorFieldMemory(fieldInfo, segmentCommitInfo, segmentReader));
}
// TODO Work on estimations on other field infos when / if needed
return maxMem;
}
private static long estimateVectorFieldMemory(FieldInfo fieldInfo, SegmentCommitInfo segmentCommitInfo, SegmentReader segmentReader) {
KnnVectorsReader vectorsReader = segmentReader.getVectorReader();
if (vectorsReader instanceof PerFieldKnnVectorsFormat.FieldsReader perFieldKnnVectorsFormat) {
vectorsReader = perFieldKnnVectorsFormat.getFieldReader(fieldInfo.getName());
}
return getVectorFieldEstimation(fieldInfo, segmentCommitInfo, vectorsReader);
}
private static long getVectorFieldEstimation(FieldInfo fieldInfo, SegmentCommitInfo segmentCommitInfo, KnnVectorsReader vectorsReader) {
int numDocs = segmentCommitInfo.info.maxDoc() - segmentCommitInfo.getDelCount();
if (vectorsReader instanceof Lucene99HnswVectorsReader) {
return numDocs * HNSW_PER_DOC_ESTIMATION;
} else {
// Dominated by the heap byte buffer size used to write each vector
if (fieldInfo.getVectorEncoding() == VectorEncoding.FLOAT32) {
return fieldInfo.getVectorDimension() * VectorEncoding.FLOAT32.byteSize;
}
// Byte does not use buffering for writing but the IndexOutput directly
return 0;
}
}
}
|
MergeMemoryEstimator
|
java
|
reactor__reactor-core
|
reactor-core/src/main/java/reactor/core/publisher/FluxBufferPredicate.java
|
{
"start": 2405,
"end": 3529
}
|
enum ____ {
UNTIL, UNTIL_CUT_BEFORE, WHILE
}
final Predicate<? super T> predicate;
final Supplier<C> bufferSupplier;
final Mode mode;
FluxBufferPredicate(Flux<? extends T> source, Predicate<? super T> predicate,
Supplier<C> bufferSupplier, Mode mode) {
super(source);
this.predicate = Objects.requireNonNull(predicate, "predicate");
this.bufferSupplier = Objects.requireNonNull(bufferSupplier, "bufferSupplier");
this.mode = mode;
}
@Override
public int getPrefetch() {
return 1; //this operator changes the downstream request to 1 in the source
}
@Override
public CoreSubscriber<? super T> subscribeOrReturn(CoreSubscriber<? super C> actual) {
C initialBuffer = Objects.requireNonNull(bufferSupplier.get(),
"The bufferSupplier returned a null initial buffer");
BufferPredicateSubscriber<T, C> parent = new BufferPredicateSubscriber<>(actual,
initialBuffer, bufferSupplier, predicate, mode);
return parent;
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return super.scanUnsafe(key);
}
static final
|
Mode
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/cglib/core/AbstractClassGenerator.java
|
{
"start": 6727,
"end": 7104
}
|
class ____ the same properties. Default is <code>true</code>.
*/
public void setUseCache(boolean useCache) {
this.useCache = useCache;
}
/**
* @see #setUseCache
*/
public boolean getUseCache() {
return useCache;
}
/**
* If set, CGLIB will attempt to load classes from the specified
* <code>ClassLoader</code> before generating them. Because generated
*
|
with
|
java
|
quarkusio__quarkus
|
extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/broadcast/Lo.java
|
{
"start": 388,
"end": 648
}
|
class ____ {
@Inject
WebSocketConnection connection;
@OnOpen(broadcast = true)
Uni<String> open() {
assertTrue(Context.isOnEventLoopThread());
return Uni.createFrom().item(connection.pathParam("client").toLowerCase());
}
}
|
Lo
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SplitCompressionInputStream.java
|
{
"start": 1307,
"end": 2217
}
|
class ____
extends CompressionInputStream {
private long start;
private long end;
public SplitCompressionInputStream(InputStream in, long start, long end)
throws IOException {
super(in);
this.start = start;
this.end = end;
}
protected void setStart(long start) {
this.start = start;
}
protected void setEnd(long end) {
this.end = end;
}
/**
* After calling createInputStream, the values of start or end
* might change. So this method can be used to get the new value of start.
* @return The changed value of start
*/
public long getAdjustedStart() {
return start;
}
/**
* After calling createInputStream, the values of start or end
* might change. So this method can be used to get the new value of end.
* @return The changed value of end
*/
public long getAdjustedEnd() {
return end;
}
}
|
SplitCompressionInputStream
|
java
|
spring-projects__spring-framework
|
spring-aop/src/main/java/org/springframework/aop/config/AbstractInterceptorDrivenBeanDefinitionDecorator.java
|
{
"start": 1782,
"end": 2574
}
|
class ____ the creation of the {@link ProxyFactoryBean} bean definition
* and wraps the original as an inner-bean definition for the {@code target} property
* of {@link ProxyFactoryBean}.
*
* <p>Chaining is correctly handled, ensuring that only one {@link ProxyFactoryBean} definition
* is created. If a previous {@link org.springframework.beans.factory.xml.BeanDefinitionDecorator}
* already created the {@link org.springframework.aop.framework.ProxyFactoryBean} then the
* interceptor is simply added to the existing definition.
*
* <p>Subclasses have only to create the {@code BeanDefinition} to the interceptor that
* they wish to add.
*
* @author Rob Harrop
* @author Juergen Hoeller
* @since 2.0
* @see org.aopalliance.intercept.MethodInterceptor
*/
public abstract
|
controls
|
java
|
playframework__playframework
|
core/play/src/main/java/play/libs/typedmap/TypedKey.java
|
{
"start": 460,
"end": 1976
}
|
class ____<A> {
private final play.api.libs.typedmap.TypedKey<A> underlying;
public TypedKey(play.api.libs.typedmap.TypedKey<A> underlying) {
this.underlying = underlying;
}
/** @return the underlying Scala TypedKey which this instance wraps. */
public play.api.libs.typedmap.TypedKey<A> asScala() {
return underlying;
}
/**
* Bind this key to a value.
*
* @param value The value to bind this key to.
* @return A bound value.
*/
public TypedEntry<A> bindValue(A value) {
return new TypedEntry<>(this, value);
}
/**
* Creates a TypedKey without a name.
*
* @param <A> The type of value this key is associated with.
* @return A fresh key.
*/
public static <A> TypedKey<A> create() {
return new TypedKey<>(TypedKey$.MODULE$.apply());
}
/**
* Creates a TypedKey with the given name.
*
* @param displayName The name to display when printing this key.
* @param <A> The type of value this key is associated with.
* @return A fresh key.
*/
public static <A> TypedKey<A> create(String displayName) {
return new TypedKey<>(TypedKey$.MODULE$.apply(displayName));
}
@Override
public String toString() {
return underlying.toString();
}
@Override
public int hashCode() {
return underlying.hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj instanceof TypedKey) {
return this.underlying.equals(((TypedKey) obj).underlying);
} else {
return false;
}
}
}
|
TypedKey
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/PgReplicationSlotEndpointBuilderFactory.java
|
{
"start": 1635,
"end": 21180
}
|
interface ____
extends
EndpointConsumerBuilder {
default AdvancedPgReplicationSlotEndpointBuilder advanced() {
return (AdvancedPgReplicationSlotEndpointBuilder) this;
}
/**
* Postgres password.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param password the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder password(String password) {
doSetProperty("password", password);
return this;
}
/**
* Postgres user.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: postgres
* Group: common
*
* @param user the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder user(String user) {
doSetProperty("user", user);
return this;
}
/**
* If the polling consumer did not poll any files, you can enable this
* option to send an empty message (no body) instead.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param sendEmptyMessageWhenIdle the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder sendEmptyMessageWhenIdle(boolean sendEmptyMessageWhenIdle) {
doSetProperty("sendEmptyMessageWhenIdle", sendEmptyMessageWhenIdle);
return this;
}
/**
* If the polling consumer did not poll any files, you can enable this
* option to send an empty message (no body) instead.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param sendEmptyMessageWhenIdle the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder sendEmptyMessageWhenIdle(String sendEmptyMessageWhenIdle) {
doSetProperty("sendEmptyMessageWhenIdle", sendEmptyMessageWhenIdle);
return this;
}
/**
* The number of subsequent error polls (failed due some error) that
* should happen before the backoffMultipler should kick-in.
*
* The option is a: <code>int</code> type.
*
* Group: scheduler
*
* @param backoffErrorThreshold the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder backoffErrorThreshold(int backoffErrorThreshold) {
doSetProperty("backoffErrorThreshold", backoffErrorThreshold);
return this;
}
/**
* The number of subsequent error polls (failed due some error) that
* should happen before the backoffMultipler should kick-in.
*
* The option will be converted to a <code>int</code> type.
*
* Group: scheduler
*
* @param backoffErrorThreshold the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder backoffErrorThreshold(String backoffErrorThreshold) {
doSetProperty("backoffErrorThreshold", backoffErrorThreshold);
return this;
}
/**
* The number of subsequent idle polls that should happen before the
* backoffMultipler should kick-in.
*
* The option is a: <code>int</code> type.
*
* Group: scheduler
*
* @param backoffIdleThreshold the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder backoffIdleThreshold(int backoffIdleThreshold) {
doSetProperty("backoffIdleThreshold", backoffIdleThreshold);
return this;
}
/**
* The number of subsequent idle polls that should happen before the
* backoffMultipler should kick-in.
*
* The option will be converted to a <code>int</code> type.
*
* Group: scheduler
*
* @param backoffIdleThreshold the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder backoffIdleThreshold(String backoffIdleThreshold) {
doSetProperty("backoffIdleThreshold", backoffIdleThreshold);
return this;
}
/**
* To let the scheduled polling consumer backoff if there has been a
* number of subsequent idles/errors in a row. The multiplier is then
* the number of polls that will be skipped before the next actual
* attempt is happening again. When this option is in use then
* backoffIdleThreshold and/or backoffErrorThreshold must also be
* configured.
*
* The option is a: <code>int</code> type.
*
* Group: scheduler
*
* @param backoffMultiplier the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder backoffMultiplier(int backoffMultiplier) {
doSetProperty("backoffMultiplier", backoffMultiplier);
return this;
}
/**
* To let the scheduled polling consumer backoff if there has been a
* number of subsequent idles/errors in a row. The multiplier is then
* the number of polls that will be skipped before the next actual
* attempt is happening again. When this option is in use then
* backoffIdleThreshold and/or backoffErrorThreshold must also be
* configured.
*
* The option will be converted to a <code>int</code> type.
*
* Group: scheduler
*
* @param backoffMultiplier the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder backoffMultiplier(String backoffMultiplier) {
doSetProperty("backoffMultiplier", backoffMultiplier);
return this;
}
/**
* Milliseconds before the next poll.
*
* The option is a: <code>long</code> type.
*
* Default: 500
* Group: scheduler
*
* @param delay the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder delay(long delay) {
doSetProperty("delay", delay);
return this;
}
/**
* Milliseconds before the next poll.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 500
* Group: scheduler
*
* @param delay the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder delay(String delay) {
doSetProperty("delay", delay);
return this;
}
/**
* If greedy is enabled, then the ScheduledPollConsumer will run
* immediately again, if the previous run polled 1 or more messages.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: scheduler
*
* @param greedy the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder greedy(boolean greedy) {
doSetProperty("greedy", greedy);
return this;
}
/**
* If greedy is enabled, then the ScheduledPollConsumer will run
* immediately again, if the previous run polled 1 or more messages.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: scheduler
*
* @param greedy the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder greedy(String greedy) {
doSetProperty("greedy", greedy);
return this;
}
/**
* Milliseconds before the first poll starts.
*
* The option is a: <code>long</code> type.
*
* Default: 1000
* Group: scheduler
*
* @param initialDelay the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder initialDelay(long initialDelay) {
doSetProperty("initialDelay", initialDelay);
return this;
}
/**
* Milliseconds before the first poll starts.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 1000
* Group: scheduler
*
* @param initialDelay the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder initialDelay(String initialDelay) {
doSetProperty("initialDelay", initialDelay);
return this;
}
/**
* Specifies a maximum limit of number of fires. So if you set it to 1,
* the scheduler will only fire once. If you set it to 5, it will only
* fire five times. A value of zero or negative means fire forever.
*
* The option is a: <code>long</code> type.
*
* Default: 0
* Group: scheduler
*
* @param repeatCount the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder repeatCount(long repeatCount) {
doSetProperty("repeatCount", repeatCount);
return this;
}
/**
* Specifies a maximum limit of number of fires. So if you set it to 1,
* the scheduler will only fire once. If you set it to 5, it will only
* fire five times. A value of zero or negative means fire forever.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 0
* Group: scheduler
*
* @param repeatCount the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder repeatCount(String repeatCount) {
doSetProperty("repeatCount", repeatCount);
return this;
}
/**
* The consumer logs a start/complete log line when it polls. This
* option allows you to configure the logging level for that.
*
* The option is a: <code>org.apache.camel.LoggingLevel</code> type.
*
* Default: TRACE
* Group: scheduler
*
* @param runLoggingLevel the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder runLoggingLevel(org.apache.camel.LoggingLevel runLoggingLevel) {
doSetProperty("runLoggingLevel", runLoggingLevel);
return this;
}
/**
* The consumer logs a start/complete log line when it polls. This
* option allows you to configure the logging level for that.
*
* The option will be converted to a
* <code>org.apache.camel.LoggingLevel</code> type.
*
* Default: TRACE
* Group: scheduler
*
* @param runLoggingLevel the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder runLoggingLevel(String runLoggingLevel) {
doSetProperty("runLoggingLevel", runLoggingLevel);
return this;
}
/**
* Allows for configuring a custom/shared thread pool to use for the
* consumer. By default each consumer has its own single threaded thread
* pool.
*
* The option is a:
* <code>java.util.concurrent.ScheduledExecutorService</code> type.
*
* Group: scheduler
*
* @param scheduledExecutorService the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder scheduledExecutorService(ScheduledExecutorService scheduledExecutorService) {
doSetProperty("scheduledExecutorService", scheduledExecutorService);
return this;
}
/**
* Allows for configuring a custom/shared thread pool to use for the
* consumer. By default each consumer has its own single threaded thread
* pool.
*
* The option will be converted to a
* <code>java.util.concurrent.ScheduledExecutorService</code> type.
*
* Group: scheduler
*
* @param scheduledExecutorService the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder scheduledExecutorService(String scheduledExecutorService) {
doSetProperty("scheduledExecutorService", scheduledExecutorService);
return this;
}
/**
* To use a cron scheduler from either camel-spring or camel-quartz
* component. Use value spring or quartz for built in scheduler.
*
* The option is a: <code>java.lang.Object</code> type.
*
* Default: none
* Group: scheduler
*
* @param scheduler the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder scheduler(Object scheduler) {
doSetProperty("scheduler", scheduler);
return this;
}
/**
* To use a cron scheduler from either camel-spring or camel-quartz
* component. Use value spring or quartz for built in scheduler.
*
* The option will be converted to a <code>java.lang.Object</code> type.
*
* Default: none
* Group: scheduler
*
* @param scheduler the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder scheduler(String scheduler) {
doSetProperty("scheduler", scheduler);
return this;
}
/**
* To configure additional properties when using a custom scheduler or
* any of the Quartz, Spring based scheduler. This is a multi-value
* option with prefix: scheduler.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* schedulerProperties(String, Object) method to add a value (call the
* method multiple times to set more values).
*
* Group: scheduler
*
* @param key the option key
* @param value the option value
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder schedulerProperties(String key, Object value) {
doSetMultiValueProperty("schedulerProperties", "scheduler." + key, value);
return this;
}
/**
* To configure additional properties when using a custom scheduler or
* any of the Quartz, Spring based scheduler. This is a multi-value
* option with prefix: scheduler.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* schedulerProperties(String, Object) method to add a value (call the
* method multiple times to set more values).
*
* Group: scheduler
*
* @param values the values
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder schedulerProperties(Map values) {
doSetMultiValueProperties("schedulerProperties", "scheduler.", values);
return this;
}
/**
* Whether the scheduler should be auto started.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param startScheduler the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder startScheduler(boolean startScheduler) {
doSetProperty("startScheduler", startScheduler);
return this;
}
/**
* Whether the scheduler should be auto started.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param startScheduler the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder startScheduler(String startScheduler) {
doSetProperty("startScheduler", startScheduler);
return this;
}
/**
* Time unit for initialDelay and delay options.
*
* The option is a: <code>java.util.concurrent.TimeUnit</code> type.
*
* Default: MILLISECONDS
* Group: scheduler
*
* @param timeUnit the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder timeUnit(TimeUnit timeUnit) {
doSetProperty("timeUnit", timeUnit);
return this;
}
/**
* Time unit for initialDelay and delay options.
*
* The option will be converted to a
* <code>java.util.concurrent.TimeUnit</code> type.
*
* Default: MILLISECONDS
* Group: scheduler
*
* @param timeUnit the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder timeUnit(String timeUnit) {
doSetProperty("timeUnit", timeUnit);
return this;
}
/**
* Controls if fixed delay or fixed rate is used. See
* ScheduledExecutorService in JDK for details.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param useFixedDelay the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder useFixedDelay(boolean useFixedDelay) {
doSetProperty("useFixedDelay", useFixedDelay);
return this;
}
/**
* Controls if fixed delay or fixed rate is used. See
* ScheduledExecutorService in JDK for details.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param useFixedDelay the value to set
* @return the dsl builder
*/
default PgReplicationSlotEndpointBuilder useFixedDelay(String useFixedDelay) {
doSetProperty("useFixedDelay", useFixedDelay);
return this;
}
}
/**
* Advanced builder for endpoint for the PostgresSQL Replication Slot component.
*/
public
|
PgReplicationSlotEndpointBuilder
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/ResourcePrivilegesMapTests.java
|
{
"start": 535,
"end": 4841
}
|
class ____ extends ESTestCase {
public void testBuilder() {
ResourcePrivilegesMap instance = ResourcePrivilegesMap.builder()
.addResourcePrivilege("*", Map.of("read", true, "write", true))
.build();
assertThat(instance.getResourceToResourcePrivileges().size(), is(1));
assertThat(instance.getResourceToResourcePrivileges().get("*").isAllowed("read"), is(true));
assertThat(instance.getResourceToResourcePrivileges().get("*").isAllowed("write"), is(true));
instance = ResourcePrivilegesMap.builder().addResourcePrivilege("*", Map.of("read", true, "write", false)).build();
assertThat(instance.getResourceToResourcePrivileges().size(), is(1));
assertThat(instance.getResourceToResourcePrivileges().get("*").isAllowed("read"), is(true));
assertThat(instance.getResourceToResourcePrivileges().get("*").isAllowed("write"), is(false));
instance = ResourcePrivilegesMap.builder()
.addResourcePrivilege("some-other", Map.of("index", true, "write", true))
.addResourcePrivilegesMap(instance)
.build();
assertThat(instance.getResourceToResourcePrivileges().size(), is(2));
assertThat(instance.getResourceToResourcePrivileges().get("*").isAllowed("read"), is(true));
assertThat(instance.getResourceToResourcePrivileges().get("*").isAllowed("write"), is(false));
assertThat(instance.getResourceToResourcePrivileges().get("some-other").isAllowed("index"), is(true));
assertThat(instance.getResourceToResourcePrivileges().get("some-other").isAllowed("write"), is(true));
}
public void testIntersection() {
ResourcePrivilegesMap.Builder builder = ResourcePrivilegesMap.builder();
ResourcePrivilegesMap instance = ResourcePrivilegesMap.builder()
.addResourcePrivilege("*", Map.of("read", true, "write", true))
.addResourcePrivilege("index-*", Map.of("read", true, "write", true))
.build();
ResourcePrivilegesMap otherInstance = ResourcePrivilegesMap.builder()
.addResourcePrivilege("*", Map.of("read", true, "write", false))
.addResourcePrivilege("index-*", Map.of("read", false, "write", true))
.build();
ResourcePrivilegesMap result = builder.addResourcePrivilegesMap(instance).addResourcePrivilegesMap(otherInstance).build();
assertThat(result.getResourceToResourcePrivileges().size(), is(2));
assertThat(result.getResourceToResourcePrivileges().get("*").isAllowed("read"), is(true));
assertThat(result.getResourceToResourcePrivileges().get("*").isAllowed("write"), is(false));
assertThat(result.getResourceToResourcePrivileges().get("index-*").isAllowed("read"), is(false));
assertThat(result.getResourceToResourcePrivileges().get("index-*").isAllowed("write"), is(true));
assertThat(result.getResourceToResourcePrivileges().get("index-uncommon"), is(nullValue()));
}
public void testEqualsHashCode() {
ResourcePrivilegesMap instance = ResourcePrivilegesMap.builder()
.addResourcePrivilege("*", Map.of("read", true, "write", true))
.build();
EqualsHashCodeTestUtils.checkEqualsAndHashCode(instance, (original) -> {
return ResourcePrivilegesMap.builder().addResourcePrivilegesMap(original).build();
});
EqualsHashCodeTestUtils.checkEqualsAndHashCode(instance, (original) -> {
return ResourcePrivilegesMap.builder().addResourcePrivilegesMap(original).build();
}, ResourcePrivilegesMapTests::mutateTestItem);
}
private static ResourcePrivilegesMap mutateTestItem(ResourcePrivilegesMap original) {
return switch (randomIntBetween(0, 1)) {
case 0 -> ResourcePrivilegesMap.builder()
.addResourcePrivilege(randomAlphaOfLength(6), Map.of("read", true, "write", true))
.build();
case 1 -> ResourcePrivilegesMap.builder().addResourcePrivilege("*", Map.of("read", false, "write", false)).build();
default -> ResourcePrivilegesMap.builder()
.addResourcePrivilege(randomAlphaOfLength(6), Map.of("read", true, "write", true))
.build();
};
}
}
|
ResourcePrivilegesMapTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/InferenceUtils.java
|
{
"start": 9404,
"end": 9521
}
|
interface ____<E extends Enum<E>> {
E apply(String name) throws IllegalArgumentException;
}
}
|
EnumConstructor
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/util/OutputTag.java
|
{
"start": 1688,
"end": 4355
}
|
class ____<T> implements Serializable {
private static final long serialVersionUID = 2L;
private final String id;
private final TypeInformation<T> typeInfo;
/**
* Creates a new named {@code OutputTag} with the given id.
*
* @param id The id of the created {@code OutputTag}.
*/
public OutputTag(String id) {
Preconditions.checkNotNull(id, "OutputTag id cannot be null.");
Preconditions.checkArgument(!id.isEmpty(), "OutputTag id must not be empty.");
this.id = id;
try {
this.typeInfo = TypeExtractor.createTypeInfo(this, OutputTag.class, getClass(), 0);
} catch (InvalidTypesException e) {
throw new InvalidTypesException(
"Could not determine TypeInformation for the OutputTag type. "
+ "The most common reason is forgetting to make the OutputTag an anonymous inner class. "
+ "It is also not possible to use generic type variables with OutputTags, such as 'Tuple2<A, B>'.",
e);
}
}
/**
* Creates a new named {@code OutputTag} with the given id and output {@link TypeInformation}.
*
* @param id The id of the created {@code OutputTag}.
* @param typeInfo The {@code TypeInformation} for the side output.
*/
public OutputTag(String id, TypeInformation<T> typeInfo) {
Preconditions.checkNotNull(id, "OutputTag id cannot be null.");
Preconditions.checkArgument(!id.isEmpty(), "OutputTag id must not be empty.");
this.id = id;
this.typeInfo = Preconditions.checkNotNull(typeInfo, "TypeInformation cannot be null.");
}
public static boolean isResponsibleFor(
@Nullable OutputTag<?> owner, @Nonnull OutputTag<?> other) {
return other.equals(owner);
}
// ------------------------------------------------------------------------
public String getId() {
return id;
}
public TypeInformation<T> getTypeInfo() {
return typeInfo;
}
// ------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj == null || !(obj instanceof OutputTag)) {
return false;
}
OutputTag other = (OutputTag) obj;
return Objects.equals(this.id, other.id);
}
@Override
public int hashCode() {
return id.hashCode();
}
@Override
public String toString() {
return "OutputTag(" + getTypeInfo() + ", " + id + ")";
}
}
|
OutputTag
|
java
|
qos-ch__slf4j
|
slf4j-api/src/main/java/org/slf4j/spi/MDCAdapter.java
|
{
"start": 1286,
"end": 1424
}
|
interface ____ the service offered by various MDC
* implementations.
*
* @author Ceki Gülcü
* @since 1.4.1
*/
public
|
abstracts
|
java
|
apache__flink
|
flink-formats/flink-orc/src/test/java/org/apache/flink/orc/writer/OrcBulkWriterFactoryTest.java
|
{
"start": 1489,
"end": 2287
}
|
class ____ {
@Test
void testNotOverrideInMemoryManager(@TempDir java.nio.file.Path tmpDir) throws IOException {
TestMemoryManager memoryManager = new TestMemoryManager();
OrcBulkWriterFactory<Record> factory =
new TestOrcBulkWriterFactory<>(
new RecordVectorizer("struct<_col0:string,_col1:int>"), memoryManager);
factory.create(new LocalDataOutputStream(tmpDir.resolve("file1").toFile()));
factory.create(new LocalDataOutputStream(tmpDir.resolve("file2").toFile()));
List<Path> addedWriterPath = memoryManager.getAddedWriterPath();
assertThat(addedWriterPath).hasSize(2);
assertThat(addedWriterPath.get(1)).isNotEqualTo(addedWriterPath.get(0));
}
private static
|
OrcBulkWriterFactoryTest
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/oracle/select/OracleSelectTest4.java
|
{
"start": 978,
"end": 2634
}
|
class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = "SELECT LPAD(' ',2*(LEVEL-1)) || last_name org_chart, " +
"employee_id, manager_id, job_id " +
" FROM employees" +
" START WITH job_id = 'AD_PRES' " +
" CONNECT BY PRIOR employee_id = manager_id AND LEVEL <= 2;";
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement statemen = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
statemen.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
assertEquals(1, visitor.getTables().size());
assertTrue(visitor.getTables().containsKey(new TableStat.Name("employees")));
assertEquals(4, visitor.getColumns().size());
assertTrue(visitor.getColumns().contains(new TableStat.Column("employees", "job_id")));
assertTrue(visitor.getColumns().contains(new TableStat.Column("employees", "last_name")));
assertTrue(visitor.getColumns().contains(new TableStat.Column("employees", "employee_id")));
assertTrue(visitor.getColumns().contains(new TableStat.Column("employees", "manager_id")));
}
}
|
OracleSelectTest4
|
java
|
grpc__grpc-java
|
xds/src/test/java/io/grpc/xds/GrpcXdsClientImplTestBase.java
|
{
"start": 6074,
"end": 6197
}
|
class ____ not
// necessary. Still keep it for future version usage. Remove if too much trouble to maintain.
public abstract
|
is
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/mvdedupe/BatchEncoder.java
|
{
"start": 12435,
"end": 13118
}
|
class ____ extends DirectEncoder {
DirectInts(IntBlock block) {
super(block);
}
@Override
protected int readValueAtBlockIndex(int valueIndex, BytesRefBuilder dst) {
int before = dst.length();
int after = before + Integer.BYTES;
dst.grow(after);
int v = ((IntBlock) block).getInt(valueIndex);
intHandle.set(dst.bytes(), before, v);
dst.setLength(after);
return Integer.BYTES;
}
}
private static final VarHandle longHandle = MethodHandles.byteArrayViewVarHandle(long[].class, ByteOrder.nativeOrder());
protected abstract static
|
DirectInts
|
java
|
spring-projects__spring-framework
|
spring-test/src/main/java/org/springframework/test/context/TestContextAnnotationUtils.java
|
{
"start": 18357,
"end": 18654
}
|
class ____ an inner class?
if (searchEnclosingClass(clazz)) {
descriptor = findAnnotationDescriptorForTypes(clazz.getEnclosingClass(), annotationTypes, visited);
if (descriptor != null) {
return descriptor;
}
}
return null;
}
/**
* Determine if annotations on the enclosing
|
of
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/OptionalBindingTest.java
|
{
"start": 3415,
"end": 4121
}
|
interface ____");
});
}
// Note: This is a regression test for an issue we ran into in CL/644086367, where an optional
// binding owned by a parent component is also requested by a child component which declares an
// additional @BindsOptionalOf declaration. In this case, we just want to make sure that the setup
// builds successfully.
@Test
public void cachedInParent_succeeds() {
Source parent =
CompilerTests.javaSource(
"test.Parent",
"package test;",
"",
"import dagger.Component;",
"import java.util.Optional;",
"",
"@Component(modules = ParentModule.class)",
"
|
Parent
|
java
|
apache__camel
|
components/camel-as2/camel-as2-component/src/test/java/org/apache/camel/component/as2/AS2ServerSecEncryptedIT.java
|
{
"start": 1885,
"end": 4505
}
|
class ____ extends AS2ServerSecTestBase {
// verify message types that fail with insufficient security due to lack of encryption
@ParameterizedTest
@EnumSource(value = AS2MessageStructure.class,
names = { "PLAIN", "SIGNED", "PLAIN_COMPRESSED", "COMPRESSED_SIGNED", "SIGNED_COMPRESSED" })
public void insufficientEncryptionFailureTest(AS2MessageStructure messageStructure) throws Exception {
HttpCoreContext context = send(messageStructure);
verifyOkResponse(context);
verifyMdnErrorDisposition(context, AS2DispositionModifier.ERROR_INSUFFICIENT_MESSAGE_SECURITY);
}
// verify message types that are successfully decrypted
@ParameterizedTest
@EnumSource(value = AS2MessageStructure.class,
names = {
"ENCRYPTED", "SIGNED_ENCRYPTED", "ENCRYPTED_COMPRESSED", "ENCRYPTED_COMPRESSED_SIGNED",
"ENCRYPTED_SIGNED_COMPRESSED" })
public void successfullyProcessedTest(AS2MessageStructure messageStructure) throws Exception {
HttpCoreContext context = send(messageStructure);
verifyOkResponse(context);
verifyMdnSuccessDisposition(context);
}
// verify message types that fail decryption when encrypted with an invalid cert
@ParameterizedTest
@EnumSource(value = AS2MessageStructure.class,
names = {
"ENCRYPTED", "SIGNED_ENCRYPTED", "ENCRYPTED_COMPRESSED", "ENCRYPTED_COMPRESSED_SIGNED",
"ENCRYPTED_SIGNED_COMPRESSED" })
public void invalidEncryptionFailureTest(AS2MessageStructure messageStructure) throws Exception {
HttpCoreContext context = sendWithInvalidEncryption(messageStructure);
verifyOkResponse(context);
verifyMdnErrorDisposition(context, AS2DispositionModifier.ERROR_DECRYPTION_FAILED);
}
// utility method to reproduce the MIC and compare against the MIC received in MDN.
@Override
protected MicUtils.ReceivedContentMic createReceivedContentMic(HttpRequest request) throws HttpException {
return MicUtils.createReceivedContentMic((ClassicHttpRequest) request, null, signingKP.getPrivate());
}
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = super.createCamelContext();
AS2Component as2Component = (AS2Component) context.getComponent("as2");
AS2Configuration configuration = as2Component.getConfiguration();
configuration.setDecryptingPrivateKey(decryptingKP.getPrivate());
return context;
}
}
|
AS2ServerSecEncryptedIT
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/Timestream2EndpointBuilderFactory.java
|
{
"start": 15093,
"end": 20208
}
|
interface ____
extends
EndpointProducerBuilder {
default Timestream2EndpointBuilder basic() {
return (Timestream2EndpointBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedTimestream2EndpointBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedTimestream2EndpointBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* To use an existing configured AwsTimestreamQueryClient client.
*
* The option is a:
* <code>software.amazon.awssdk.services.timestreamquery.TimestreamQueryClient</code> type.
*
* Group: advanced
*
* @param awsTimestreamQueryClient the value to set
* @return the dsl builder
*/
default AdvancedTimestream2EndpointBuilder awsTimestreamQueryClient(software.amazon.awssdk.services.timestreamquery.TimestreamQueryClient awsTimestreamQueryClient) {
doSetProperty("awsTimestreamQueryClient", awsTimestreamQueryClient);
return this;
}
/**
* To use an existing configured AwsTimestreamQueryClient client.
*
* The option will be converted to a
* <code>software.amazon.awssdk.services.timestreamquery.TimestreamQueryClient</code> type.
*
* Group: advanced
*
* @param awsTimestreamQueryClient the value to set
* @return the dsl builder
*/
default AdvancedTimestream2EndpointBuilder awsTimestreamQueryClient(String awsTimestreamQueryClient) {
doSetProperty("awsTimestreamQueryClient", awsTimestreamQueryClient);
return this;
}
/**
* To use an existing configured AwsTimestreamWriteClient client.
*
* The option is a:
* <code>software.amazon.awssdk.services.timestreamwrite.TimestreamWriteClient</code> type.
*
* Group: advanced
*
* @param awsTimestreamWriteClient the value to set
* @return the dsl builder
*/
default AdvancedTimestream2EndpointBuilder awsTimestreamWriteClient(software.amazon.awssdk.services.timestreamwrite.TimestreamWriteClient awsTimestreamWriteClient) {
doSetProperty("awsTimestreamWriteClient", awsTimestreamWriteClient);
return this;
}
/**
* To use an existing configured AwsTimestreamWriteClient client.
*
* The option will be converted to a
* <code>software.amazon.awssdk.services.timestreamwrite.TimestreamWriteClient</code> type.
*
* Group: advanced
*
* @param awsTimestreamWriteClient the value to set
* @return the dsl builder
*/
default AdvancedTimestream2EndpointBuilder awsTimestreamWriteClient(String awsTimestreamWriteClient) {
doSetProperty("awsTimestreamWriteClient", awsTimestreamWriteClient);
return this;
}
}
public
|
AdvancedTimestream2EndpointBuilder
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/BasePrimaryAllocationCommand.java
|
{
"start": 2369,
"end": 3293
}
|
class ____<T extends BasePrimaryAllocationCommand> extends AbstractAllocateAllocationCommand.Builder<T> {
protected boolean acceptDataLoss;
Builder(ProjectId projectId) {
super(projectId);
}
public void setAcceptDataLoss(boolean acceptDataLoss) {
this.acceptDataLoss = acceptDataLoss;
}
}
@Override
protected void extraXContent(XContentBuilder builder) throws IOException {
builder.field(ACCEPT_DATA_LOSS_FIELD, acceptDataLoss);
}
@Override
public boolean equals(Object obj) {
if (false == super.equals(obj)) {
return false;
}
BasePrimaryAllocationCommand other = (BasePrimaryAllocationCommand) obj;
return acceptDataLoss == other.acceptDataLoss;
}
@Override
public int hashCode() {
return 31 * super.hashCode() + Boolean.hashCode(acceptDataLoss);
}
}
|
Builder
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServicesWithSSL.java
|
{
"start": 5100,
"end": 5666
}
|
class ____ extends TimelineClientImpl {
private Response resp;
@Override
protected TimelineWriter createTimelineWriter(Configuration conf,
UserGroupInformation authUgi, Client client, URI resURI, RetryPolicy<Object> retryPolicy) {
return new DirectTimelineWriter(authUgi, client, resURI, retryPolicy) {
@Override
public Response doPostingObject(Object obj, String path) throws JsonProcessingException {
resp = super.doPostingObject(obj, path);
return resp;
}
};
}
}
}
|
TestTimelineClient
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/ThreadsDefaultTest.java
|
{
"start": 973,
"end": 1657
}
|
class ____ extends ContextTestSupport {
@Test
public void testThreadsDefault() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived("Hello World");
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
// will use a cached thread pool which can grown/shrink
.threads().to("log:foo").to("mock:result");
}
};
}
}
|
ThreadsDefaultTest
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ProtoBuilderReturnValueIgnoredTest.java
|
{
"start": 3016,
"end": 3352
}
|
class ____ {
private void singleField(Duration.Builder proto) {}
}
""")
.doTest();
}
@Test
public void refactoringSecondFix() {
refactoringHelper
.addInputLines(
"Test.java",
"""
import com.google.protobuf.Duration;
final
|
Test
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
|
{
"start": 1243,
"end": 3784
}
|
class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(AbstractService.class);
private ServiceOperations() {
}
/**
* Stop a service.
* <p>Do nothing if the service is null or not
* in a state in which it can be/needs to be stopped.
* <p>
* The service state is checked <i>before</i> the operation begins.
* This process is <i>not</i> thread safe.
* @param service a service or null
*/
public static void stop(Service service) {
if (service != null) {
service.stop();
}
}
/**
* Stop a service; if it is null do nothing. Exceptions are caught and
* logged at warn level. (but not Throwables). This operation is intended to
* be used in cleanup operations
*
* @param service a service; may be null
* @return any exception that was caught; null if none was.
*/
public static Exception stopQuietly(Service service) {
return stopQuietly(LOG, service);
}
/**
* Stop a service; if it is null do nothing. Exceptions are caught and
* logged at warn level. (but not Throwables). This operation is intended to
* be used in cleanup operations
*
* @param log the log to warn at
* @param service a service; may be null
* @return any exception that was caught; null if none was.
* @deprecated to be removed with 3.4.0. Use {@link #stopQuietly(Logger, Service)} instead.
*/
@Deprecated
public static Exception stopQuietly(org.apache.commons.logging.Log log, Service service) {
try {
stop(service);
} catch (Exception e) {
log.warn("When stopping the service " + service.getName(), e);
return e;
}
return null;
}
/**
* Stop a service; if it is null do nothing. Exceptions are caught and
* logged at warn level. (but not Throwables). This operation is intended to
* be used in cleanup operations
*
* @param log the log to warn at
* @param service a service; may be null
* @return any exception that was caught; null if none was.
* @see ServiceOperations#stopQuietly(Service)
*/
public static Exception stopQuietly(Logger log, Service service) {
try {
stop(service);
} catch (Exception e) {
log.warn("When stopping the service {}", service.getName(), e);
return e;
}
return null;
}
/**
* Class to manage a list of {@link ServiceStateChangeListener} instances,
* including a notification loop that is robust against changes to the list
* during the notification process.
*/
public static
|
ServiceOperations
|
java
|
alibaba__nacos
|
api/src/main/java/com/alibaba/nacos/api/naming/listener/FuzzyWatchLoadWatcher.java
|
{
"start": 770,
"end": 1036
}
|
interface ____ {
/**
* triggered when server pattern count over limit.
*/
void onPatternOverLimit();
/**
* triggered when pattern match service count over limit.
*/
void onServiceReachUpLimit();
}
|
FuzzyWatchLoadWatcher
|
java
|
apache__camel
|
components/camel-ai/camel-pinecone/src/generated/java/org/apache/camel/component/pinecone/PineconeVectorDbComponentConfigurer.java
|
{
"start": 735,
"end": 6793
}
|
class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
private org.apache.camel.component.pinecone.PineconeVectorDbConfiguration getOrCreateConfiguration(PineconeVectorDbComponent target) {
if (target.getConfiguration() == null) {
target.setConfiguration(new org.apache.camel.component.pinecone.PineconeVectorDbConfiguration());
}
return target.getConfiguration();
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
PineconeVectorDbComponent target = (PineconeVectorDbComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "action": getOrCreateConfiguration(target).setAction(property(camelContext, org.apache.camel.component.pinecone.PineconeVectorDbAction.class, value)); return true;
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "cloud": getOrCreateConfiguration(target).setCloud(property(camelContext, java.lang.String.class, value)); return true;
case "cloudregion":
case "cloudRegion": getOrCreateConfiguration(target).setCloudRegion(property(camelContext, java.lang.String.class, value)); return true;
case "collectiondimension":
case "collectionDimension": getOrCreateConfiguration(target).setCollectionDimension(property(camelContext, java.lang.Integer.class, value)); return true;
case "collectionsimilaritymetric":
case "collectionSimilarityMetric": getOrCreateConfiguration(target).setCollectionSimilarityMetric(property(camelContext, java.lang.String.class, value)); return true;
case "configuration": target.setConfiguration(property(camelContext, org.apache.camel.component.pinecone.PineconeVectorDbConfiguration.class, value)); return true;
case "host": getOrCreateConfiguration(target).setHost(property(camelContext, java.lang.String.class, value)); return true;
case "indexname":
case "indexName": getOrCreateConfiguration(target).setIndexName(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "proxyhost":
case "proxyHost": getOrCreateConfiguration(target).setProxyHost(property(camelContext, java.lang.String.class, value)); return true;
case "proxyport":
case "proxyPort": getOrCreateConfiguration(target).setProxyPort(property(camelContext, java.lang.Integer.class, value)); return true;
case "tls": getOrCreateConfiguration(target).setTls(property(camelContext, boolean.class, value)); return true;
case "token": getOrCreateConfiguration(target).setToken(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "action": return org.apache.camel.component.pinecone.PineconeVectorDbAction.class;
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "cloud": return java.lang.String.class;
case "cloudregion":
case "cloudRegion": return java.lang.String.class;
case "collectiondimension":
case "collectionDimension": return java.lang.Integer.class;
case "collectionsimilaritymetric":
case "collectionSimilarityMetric": return java.lang.String.class;
case "configuration": return org.apache.camel.component.pinecone.PineconeVectorDbConfiguration.class;
case "host": return java.lang.String.class;
case "indexname":
case "indexName": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "proxyhost":
case "proxyHost": return java.lang.String.class;
case "proxyport":
case "proxyPort": return java.lang.Integer.class;
case "tls": return boolean.class;
case "token": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
PineconeVectorDbComponent target = (PineconeVectorDbComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "action": return getOrCreateConfiguration(target).getAction();
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "cloud": return getOrCreateConfiguration(target).getCloud();
case "cloudregion":
case "cloudRegion": return getOrCreateConfiguration(target).getCloudRegion();
case "collectiondimension":
case "collectionDimension": return getOrCreateConfiguration(target).getCollectionDimension();
case "collectionsimilaritymetric":
case "collectionSimilarityMetric": return getOrCreateConfiguration(target).getCollectionSimilarityMetric();
case "configuration": return target.getConfiguration();
case "host": return getOrCreateConfiguration(target).getHost();
case "indexname":
case "indexName": return getOrCreateConfiguration(target).getIndexName();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "proxyhost":
case "proxyHost": return getOrCreateConfiguration(target).getProxyHost();
case "proxyport":
case "proxyPort": return getOrCreateConfiguration(target).getProxyPort();
case "tls": return getOrCreateConfiguration(target).isTls();
case "token": return getOrCreateConfiguration(target).getToken();
default: return null;
}
}
}
|
PineconeVectorDbComponentConfigurer
|
java
|
apache__camel
|
core/camel-api/src/main/java/org/apache/camel/spi/ExpressionFactoryAware.java
|
{
"start": 952,
"end": 1096
}
|
interface ____ {
/**
* Gets the {@link ExpressionFactory}.
*/
ExpressionFactory getExpressionFactory();
}
|
ExpressionFactoryAware
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtLongEvaluator.java
|
{
"start": 1170,
"end": 4177
}
|
class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(SqrtLongEvaluator.class);
private final Source source;
private final EvalOperator.ExpressionEvaluator val;
private final DriverContext driverContext;
private Warnings warnings;
public SqrtLongEvaluator(Source source, EvalOperator.ExpressionEvaluator val,
DriverContext driverContext) {
this.source = source;
this.val = val;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
try (LongBlock valBlock = (LongBlock) val.eval(page)) {
LongVector valVector = valBlock.asVector();
if (valVector == null) {
return eval(page.getPositionCount(), valBlock);
}
return eval(page.getPositionCount(), valVector);
}
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += val.baseRamBytesUsed();
return baseRamBytesUsed;
}
public DoubleBlock eval(int positionCount, LongBlock valBlock) {
try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
switch (valBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
long val = valBlock.getLong(valBlock.getFirstValueIndex(p));
try {
result.appendDouble(Sqrt.process(val));
} catch (ArithmeticException e) {
warnings().registerException(e);
result.appendNull();
}
}
return result.build();
}
}
public DoubleBlock eval(int positionCount, LongVector valVector) {
try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
long val = valVector.getLong(p);
try {
result.appendDouble(Sqrt.process(val));
} catch (ArithmeticException e) {
warnings().registerException(e);
result.appendNull();
}
}
return result.build();
}
}
@Override
public String toString() {
return "SqrtLongEvaluator[" + "val=" + val + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(val);
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static
|
SqrtLongEvaluator
|
java
|
apache__dubbo
|
dubbo-cluster/src/test/java/org/apache/dubbo/rpc/cluster/support/DemoServiceAMock.java
|
{
"start": 905,
"end": 1087
}
|
class ____ implements DemoServiceA {
public static final String MOCK_VALUE = "mockA";
@Override
public String methodA() {
return MOCK_VALUE;
}
}
|
DemoServiceAMock
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest/deployment/src/main/java/io/quarkus/resteasy/reactive/server/deployment/QuarkusMultipartReturnTypeHandler.java
|
{
"start": 2111,
"end": 3288
}
|
class ____ and have done all we need
return canHandle;
}
canHandle = false;
if (FormDataOutputMapperGenerator.isReturnTypeCompatible(multipartClassInfo, index)) {
additionalWriters.add(MultipartMessageBodyWriter.class.getName(), MediaType.MULTIPART_FORM_DATA, className);
String mapperClassName = FormDataOutputMapperGenerator.generate(multipartClassInfo,
new GeneratedClassGizmoAdaptor(generatedClassBuildItemBuildProducer,
applicationClassPredicate.test(className)),
index);
reflectiveClassProducer.produce(
ReflectiveClassBuildItem.builder(MultipartMessageBodyWriter.class.getName()).methods()
.build());
reflectiveClassProducer.produce(ReflectiveClassBuildItem.builder(className).build());
reflectiveClassProducer
.produce(ReflectiveClassBuildItem.builder(mapperClassName).methods().build());
canHandle = true;
}
multipartOutputGeneratedPopulators.put(className, canHandle);
return canHandle;
}
}
|
before
|
java
|
google__guava
|
android/guava-testlib/test/com/google/common/testing/anotherpackage/ForwardingWrapperTesterTest.java
|
{
"start": 1686,
"end": 8636
}
|
class ____ extends TestCase {
private final ForwardingWrapperTester tester = new ForwardingWrapperTester();
public void testGoodForwarder() {
tester.testForwarding(
Arithmetic.class,
new Function<Arithmetic, Arithmetic>() {
@Override
public Arithmetic apply(Arithmetic arithmetic) {
return new ForwardingArithmetic(arithmetic);
}
});
tester.testForwarding(
ParameterTypesDifferent.class,
new Function<ParameterTypesDifferent, ParameterTypesDifferent>() {
@Override
public ParameterTypesDifferent apply(ParameterTypesDifferent delegate) {
return new ParameterTypesDifferentForwarder(delegate);
}
});
}
public void testVoidMethodForwarding() {
tester.testForwarding(
Runnable.class,
new Function<Runnable, Runnable>() {
@Override
public Runnable apply(Runnable runnable) {
return new ForwardingRunnable(runnable);
}
});
}
public void testToStringForwarding() {
tester.testForwarding(
Runnable.class,
new Function<Runnable, Runnable>() {
@Override
public Runnable apply(Runnable runnable) {
return new ForwardingRunnable(runnable) {
@Override
public String toString() {
return runnable.toString();
}
};
}
});
}
public void testFailsToForwardToString() {
assertFailure(
Runnable.class,
new Function<Runnable, Runnable>() {
@Override
public Runnable apply(Runnable runnable) {
return new ForwardingRunnable(runnable) {
@Override
public String toString() {
return "";
}
};
}
},
"toString()");
}
public void testFailsToForwardHashCode() {
tester.includingEquals();
assertFailure(
Runnable.class,
new Function<Runnable, Runnable>() {
@Override
public Runnable apply(Runnable runnable) {
return new ForwardingRunnable(runnable) {
@SuppressWarnings("EqualsHashCode")
@Override
public boolean equals(@Nullable Object o) {
if (o instanceof ForwardingRunnable) {
ForwardingRunnable that = (ForwardingRunnable) o;
return runnable.equals(that.runnable);
}
return false;
}
};
}
},
"Runnable");
}
public void testEqualsAndHashCodeForwarded() {
tester.includingEquals();
tester.testForwarding(
Runnable.class,
new Function<Runnable, Runnable>() {
@Override
public Runnable apply(Runnable runnable) {
return new ForwardingRunnable(runnable) {
@Override
public boolean equals(@Nullable Object o) {
if (o instanceof ForwardingRunnable) {
ForwardingRunnable that = (ForwardingRunnable) o;
return runnable.equals(that.runnable);
}
return false;
}
@Override
public int hashCode() {
return runnable.hashCode();
}
};
}
});
}
public void testFailsToForwardEquals() {
tester.includingEquals();
assertFailure(
Runnable.class,
new Function<Runnable, Runnable>() {
@Override
public Runnable apply(Runnable runnable) {
return new ForwardingRunnable(runnable) {
@Override
public int hashCode() {
return runnable.hashCode();
}
};
}
},
"Runnable");
}
public void testFailsToForward() {
assertFailure(
Runnable.class,
new Function<Runnable, Runnable>() {
@Override
public Runnable apply(Runnable runnable) {
return new ForwardingRunnable(runnable) {
@Override
public void run() {}
};
}
},
"run()",
"Failed to forward");
}
public void testRedundantForwarding() {
assertFailure(
Runnable.class,
new Function<Runnable, Runnable>() {
@Override
public Runnable apply(Runnable runnable) {
return new Runnable() {
@Override
public void run() {
runnable.run();
runnable.run();
}
};
}
},
"run()",
"invoked more than once");
}
public void testFailsToForwardParameters() {
assertFailure(
Adder.class,
new Function<Adder, Adder>() {
@Override
public Adder apply(Adder adder) {
return new FailsToForwardParameters(adder);
}
},
"add(",
"Parameter #0");
}
public void testForwardsToTheWrongMethod() {
assertFailure(
Arithmetic.class,
new Function<Arithmetic, Arithmetic>() {
@Override
public Arithmetic apply(Arithmetic adder) {
return new ForwardsToTheWrongMethod(adder);
}
},
"minus");
}
public void testFailsToForwardReturnValue() {
assertFailure(
Adder.class,
new Function<Adder, Adder>() {
@Override
public Adder apply(Adder adder) {
return new FailsToForwardReturnValue(adder);
}
},
"add(",
"Return value");
}
public void testFailsToPropagateException() {
assertFailure(
Adder.class,
new Function<Adder, Adder>() {
@Override
public Adder apply(Adder adder) {
return new FailsToPropagateException(adder);
}
},
"add(",
"exception");
}
public void testNotInterfaceType() {
assertThrows(
IllegalArgumentException.class,
() ->
new ForwardingWrapperTester()
.testForwarding(String.class, Functions.<String>identity()));
}
public void testNulls() {
new NullPointerTester()
.setDefault(Class.class, Runnable.class)
.testAllPublicInstanceMethods(new ForwardingWrapperTester());
}
private <T> void assertFailure(
Class<T> interfaceType,
Function<T, ? extends T> wrapperFunction,
String... expectedMessages) {
try {
tester.testForwarding(interfaceType, wrapperFunction);
} catch (AssertionFailedError expected) {
for (String message : expectedMessages) {
assertThat(expected).hasMessageThat().contains(message);
}
return;
}
fail("expected failure not reported");
}
private
|
ForwardingWrapperTesterTest
|
java
|
hibernate__hibernate-orm
|
hibernate-spatial/src/main/java/org/hibernate/spatial/dialect/oracle/OracleSDOFunctionDescriptors.java
|
{
"start": 842,
"end": 6941
}
|
class ____ implements KeyedSqmFunctionDescriptors {
private final Map<FunctionKey, SqmFunctionDescriptor> map = new HashMap<>();
private final BasicTypeRegistry typeRegistry;
public OracleSDOFunctionDescriptors(FunctionContributions functionContributions) {
typeRegistry = functionContributions.getTypeConfiguration().getBasicTypeRegistry();
registerSDOFunctions();
}
@Override
public Map<FunctionKey, SqmFunctionDescriptor> asMap() {
return Collections.unmodifiableMap( map );
}
private void registerSDOFunctions() {
map.put( CommonSpatialFunction.ST_ASTEXT.getKey(), new NamedSqmFunctionDescriptor(
"SDO_UTIL.TO_WKTGEOMETRY",
false,
StandardArgumentsValidators.exactly(
1 ),
StandardFunctionReturnTypeResolvers.invariant(
typeRegistry.resolve(
StandardBasicTypes.STRING ) )
) );
map.put( CommonSpatialFunction.ST_GEOMETRYTYPE.getKey(), new SDOGetGeometryType( typeRegistry ) );
map.put( CommonSpatialFunction.ST_DIMENSION.getKey(), new SDOMethodDescriptor(
"Get_Dims",
StandardArgumentsValidators.exactly(
1 ),
StandardFunctionReturnTypeResolvers.invariant(
typeRegistry.resolve(
StandardBasicTypes.INTEGER ) )
) );
map.put(
CommonSpatialFunction.ST_ENVELOPE.getKey(),
new NamedSqmFunctionDescriptor(
"SDO_GEOM.SDO_MBR",
true,
StandardArgumentsValidators.exactly( 1 ),
StandardFunctionReturnTypeResolvers.useFirstNonNull()
)
);
map.put(
CommonSpatialFunction.ST_SRID.getKey(),
new SDOMethodDescriptor(
"SDO_SRID",
false,
StandardArgumentsValidators.exactly( 1 ),
StandardFunctionReturnTypeResolvers.invariant(
typeRegistry.resolve(
StandardBasicTypes.INTEGER
)
)
)
);
map.put(
CommonSpatialFunction.ST_ASBINARY.getKey(),
new SDOMethodDescriptor(
"Get_WKB",
true,
StandardArgumentsValidators.exactly( 1 ),
StandardFunctionReturnTypeResolvers.invariant( typeRegistry.resolve( StandardBasicTypes.BINARY ) )
)
);
map.put(
CommonSpatialFunction.ST_ISSIMPLE.getKey(),
new OracleSpatialSQLMMFunction(
"ST_ISSIMPLE",
"ST_ISSIMPLE",
1,
StandardFunctionReturnTypeResolvers.invariant(
typeRegistry.resolve( StandardBasicTypes.BOOLEAN )
),
false
)
);
map.put(
CommonSpatialFunction.ST_ISEMPTY.getKey(),
new OracleSpatialSQLMMFunction(
"ST_ISEMPTY",
"ST_ISEMPTY",
1,
StandardFunctionReturnTypeResolvers.invariant(
typeRegistry.resolve( StandardBasicTypes.BOOLEAN )
),
false
)
);
map.put(
CommonSpatialFunction.ST_BOUNDARY.getKey(),
new OracleSpatialSQLMMFunction(
"ST_BOUNDARY",
"ST_BOUNDARY",
1,
StandardFunctionReturnTypeResolvers.useFirstNonNull(),
true
)
);
map.put(
CommonSpatialFunction.ST_OVERLAPS.getKey(),
new SDORelateFunction( List.of( "OVERLAPBDYDISJOINT", "OVERLAPBDYINTERSECT" ), typeRegistry )
);
map.put(
CommonSpatialFunction.ST_CROSSES.getKey(),
new OracleSpatialSQLMMFunction(
"ST_CROSSES",
"ST_CROSSES",
2,
StandardFunctionReturnTypeResolvers.invariant(
typeRegistry.resolve( StandardBasicTypes.BOOLEAN )
),
false
)
);
map.put(
CommonSpatialFunction.ST_INTERSECTS.getKey(),
new SDORelateFunction( List.of( "ANYINTERACT" ), typeRegistry )
);
map.put(
CommonSpatialFunction.ST_CONTAINS.getKey(),
new SDORelateFunction( List.of( "CONTAINS", "COVERS" ), typeRegistry )
);
map.put(
CommonSpatialFunction.ST_DISJOINT.getKey(),
new SDORelateFunction( List.of( "DISJOINT" ), typeRegistry )
);
map.put( CommonSpatialFunction.ST_RELATE.getKey(), new STRelateFunction( typeRegistry ) );
map.put(
CommonSpatialFunction.ST_TOUCHES.getKey(),
new SDORelateFunction( List.of( "TOUCH" ), typeRegistry )
);
map.put(
CommonSpatialFunction.ST_WITHIN.getKey(),
new SDORelateFunction( List.of( "INSIDE", "COVEREDBY" ), typeRegistry )
);
map.put(
CommonSpatialFunction.ST_EQUALS.getKey(),
new SDORelateFunction( List.of( "EQUAL" ), typeRegistry )
);
map.put(
CommonSpatialFunction.ST_DISTANCE.getKey(),
new NamedSqmFunctionDescriptor(
"SDO_GEOM.SDO_DISTANCE",
true,
StandardArgumentsValidators.exactly( 2 ),
StandardFunctionReturnTypeResolvers.invariant( typeRegistry.resolve( StandardBasicTypes.DOUBLE ) )
)
);
map.put(
CommonSpatialFunction.ST_BUFFER.getKey(),
new NamedSqmFunctionDescriptor(
"SDO_GEOM.SDO_BUFFER",
true,
StandardArgumentsValidators.exactly( 2 ),
StandardFunctionReturnTypeResolvers.useFirstNonNull()
)
);
map.put(
CommonSpatialFunction.ST_CONVEXHULL.getKey(),
new NamedSqmFunctionDescriptor(
"SDO_GEOM.SDO_CONVEXHULL",
true,
StandardArgumentsValidators.exactly( 1 ),
StandardFunctionReturnTypeResolvers.useFirstNonNull()
)
);
map.put(
CommonSpatialFunction.ST_DIFFERENCE.getKey(),
new NamedSqmFunctionDescriptor(
"SDO_GEOM.SDO_DIFFERENCE",
true,
StandardArgumentsValidators.exactly( 2 ),
StandardFunctionReturnTypeResolvers.useFirstNonNull()
)
);
map.put(
CommonSpatialFunction.ST_INTERSECTION.getKey(),
new NamedSqmFunctionDescriptor(
"SDO_GEOM.SDO_INTERSECTION",
true,
StandardArgumentsValidators.exactly( 2 ),
StandardFunctionReturnTypeResolvers.useFirstNonNull()
)
);
map.put(
CommonSpatialFunction.ST_SYMDIFFERENCE.getKey(),
new NamedSqmFunctionDescriptor(
"SDO_GEOM.SDO_XOR",
true,
StandardArgumentsValidators.exactly( 2 ),
StandardFunctionReturnTypeResolvers.useFirstNonNull()
)
);
map.put(
CommonSpatialFunction.ST_UNION.getKey(),
new NamedSqmFunctionDescriptor(
"SDO_GEOM.SDO_UNION",
true,
StandardArgumentsValidators.exactly( 2 ),
StandardFunctionReturnTypeResolvers.useFirstNonNull()
)
);
}
}
|
OracleSDOFunctionDescriptors
|
java
|
alibaba__nacos
|
config/src/main/java/com/alibaba/nacos/config/server/utils/Protocol.java
|
{
"start": 775,
"end": 1388
}
|
class ____ {
/**
* fix the version number like 2.0.4(fix the version template like major.minor.bug-fix)
*
* @param version version
* @return version.
*/
public static int getVersionNumber(String version) {
if (version == null) {
return -1;
}
String[] vs = version.split("\\.");
int sum = 0;
for (int i = 0; i < vs.length; i++) {
try {
sum = sum * 10 + Integer.parseInt(vs[i]);
} catch (Exception e) {
// ignore
}
}
return sum;
}
}
|
Protocol
|
java
|
apache__flink
|
flink-core/src/test/java/org/apache/flink/api/common/state/ListStateDeclarationTest.java
|
{
"start": 1055,
"end": 2841
}
|
class ____ {
@Test
void testListStateDeclarationName() {
ListStateDeclaration<Integer> listStateDeclaration =
StateDeclarations.listStateBuilder("listState", TypeDescriptors.INT).build();
assertThat(listStateDeclaration.getName()).isEqualTo("listState");
}
@Test
void testListStateDeclarationDistribution() {
ListStateDeclaration<Integer> listStateDefault =
StateDeclarations.listStateBuilder("listState", TypeDescriptors.INT).build();
assertThat(listStateDefault.getRedistributionStrategy())
.isEqualTo(ListStateDeclaration.RedistributionStrategy.SPLIT);
assertThat(listStateDefault.getRedistributionMode())
.isEqualTo(StateDeclaration.RedistributionMode.NONE);
ListStateDeclaration<Integer> listStateCustomized =
StateDeclarations.listStateBuilder("listState", TypeDescriptors.INT)
.redistributeBy(ListStateDeclaration.RedistributionStrategy.UNION)
.redistributeWithMode(StateDeclaration.RedistributionMode.REDISTRIBUTABLE)
.build();
assertThat(listStateCustomized.getRedistributionStrategy())
.isEqualTo(ListStateDeclaration.RedistributionStrategy.UNION);
assertThat(listStateCustomized.getRedistributionMode())
.isEqualTo(StateDeclaration.RedistributionMode.REDISTRIBUTABLE);
}
@Test
void testListStateDeclarationType() {
ListStateDeclaration<Integer> listStateDeclaration =
StateDeclarations.listStateBuilder("listState", TypeDescriptors.INT).build();
assertThat(listStateDeclaration.getTypeDescriptor()).isEqualTo(TypeDescriptors.INT);
}
}
|
ListStateDeclarationTest
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/codec/zstd/Zstd814StoredFieldsFormat.java
|
{
"start": 2002,
"end": 3526
}
|
enum ____ {
BEST_SPEED(1, BEST_SPEED_BLOCK_SIZE, 128),
BEST_COMPRESSION(3, BEST_COMPRESSION_BLOCK_SIZE, 2048);
final int level, blockSizeInBytes, blockDocCount;
final Zstd814StoredFieldsFormat format;
Mode(int level, int blockSizeInBytes, int blockDocCount) {
this.level = level;
this.blockSizeInBytes = blockSizeInBytes;
this.blockDocCount = blockDocCount;
this.format = new Zstd814StoredFieldsFormat(this);
}
public Zstd814StoredFieldsFormat getFormat() {
return format;
}
}
private final Mode mode;
private Zstd814StoredFieldsFormat(Mode mode) {
super("ZstdStoredFields814", new ZstdCompressionMode(mode.level), mode.blockSizeInBytes, mode.blockDocCount, 10);
this.mode = mode;
}
@Override
public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si, IOContext context) throws IOException {
// Both modes are compatible, we only put an attribute for debug purposes.
String previous = si.putAttribute(MODE_KEY, mode.name());
if (previous != null && previous.equals(mode.name()) == false) {
throw new IllegalStateException(
"found existing value for " + MODE_KEY + " for segment: " + si.name + "old=" + previous + ", new=" + mode.name()
);
}
return super.fieldsWriter(directory, si, context);
}
public Mode getMode() {
return mode;
}
}
|
Mode
|
java
|
junit-team__junit5
|
platform-tests/src/test/java/org/junit/platform/engine/support/hierarchical/ParallelExecutionIntegrationTests.java
|
{
"start": 15273,
"end": 15729
}
|
class ____ {
static AtomicInteger sharedResource;
static CountDownLatch countDownLatch;
@BeforeAll
static void initialize() {
sharedResource = new AtomicInteger();
countDownLatch = new CountDownLatch(6);
}
@Test
void a() throws Exception {
storeAndBlockAndCheck(sharedResource, countDownLatch);
}
@Test
void b() throws Exception {
storeAndBlockAndCheck(sharedResource, countDownLatch);
}
@Nested
|
NestedIsolatedTestCase
|
java
|
spring-projects__spring-framework
|
spring-beans/src/test/java/org/springframework/beans/factory/annotation/LookupAnnotationTests.java
|
{
"start": 7862,
"end": 7920
}
|
class ____<T extends Number> {
}
public static
|
NumberStore
|
java
|
quarkusio__quarkus
|
independent-projects/qute/core/src/main/java/io/quarkus/qute/ValueResolvers.java
|
{
"start": 9039,
"end": 10101
}
|
class ____ implements ValueResolver {
public boolean appliesTo(EvalContext context) {
if (hasNoParams(context) && matchClass(context, Mapper.class)
&& context.getBase() instanceof Mapper mapper) {
return mapper.appliesTo(context.getName());
}
return false;
}
@Override
public int getPriority() {
// mapper is used in loops so we use a higher priority to jump the queue
return 15;
}
@Override
public CompletionStage<Object> resolve(EvalContext context) {
return ((Mapper) context.getBase()).getAsync(context.getName());
}
}
/**
* Performs conditional AND on the base object and the first parameter. It's a
* short-circuiting operation - the parameter is only evaluated if needed.
*
* @see Booleans#isFalsy(Object)
*/
public static ValueResolver logicalAndResolver() {
return new LogicalAndResolver();
}
public static final
|
MapperResolver
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/common/security/ssl/SslFactory.java
|
{
"start": 19033,
"end": 25780
}
|
class ____ {
private static final ByteBuffer EMPTY_BUF = ByteBuffer.allocate(0);
private final SSLEngine sslEngine;
private SSLEngineResult handshakeResult;
private ByteBuffer appBuffer;
private ByteBuffer netBuffer;
static void validate(SslEngineFactory oldEngineBuilder,
SslEngineFactory newEngineBuilder) throws SSLException {
validate(createSslEngineForValidation(oldEngineBuilder, ConnectionMode.SERVER),
createSslEngineForValidation(newEngineBuilder, ConnectionMode.CLIENT));
validate(createSslEngineForValidation(newEngineBuilder, ConnectionMode.SERVER),
createSslEngineForValidation(oldEngineBuilder, ConnectionMode.CLIENT));
}
private static SSLEngine createSslEngineForValidation(SslEngineFactory sslEngineFactory, ConnectionMode connectionMode) {
// Use empty hostname, disable hostname verification
if (connectionMode == ConnectionMode.SERVER) {
return sslEngineFactory.createServerSslEngine("", 0);
} else {
return sslEngineFactory.createClientSslEngine("", 0, "");
}
}
static void validate(SSLEngine clientEngine, SSLEngine serverEngine) throws SSLException {
SslEngineValidator clientValidator = new SslEngineValidator(clientEngine);
SslEngineValidator serverValidator = new SslEngineValidator(serverEngine);
try {
clientValidator.beginHandshake();
serverValidator.beginHandshake();
while (!serverValidator.complete() || !clientValidator.complete()) {
clientValidator.handshake(serverValidator);
serverValidator.handshake(clientValidator);
}
} finally {
clientValidator.close();
serverValidator.close();
}
}
private SslEngineValidator(SSLEngine engine) {
this.sslEngine = engine;
appBuffer = ByteBuffer.allocate(sslEngine.getSession().getApplicationBufferSize());
netBuffer = ByteBuffer.allocate(sslEngine.getSession().getPacketBufferSize());
}
void beginHandshake() throws SSLException {
sslEngine.beginHandshake();
}
void handshake(SslEngineValidator peerValidator) throws SSLException {
SSLEngineResult.HandshakeStatus handshakeStatus = sslEngine.getHandshakeStatus();
while (true) {
switch (handshakeStatus) {
case NEED_WRAP:
handshakeResult = sslEngine.wrap(EMPTY_BUF, netBuffer);
switch (handshakeResult.getStatus()) {
case OK: break;
case BUFFER_OVERFLOW:
if (netBuffer.position() != 0) // Wait for peer to consume previously wrapped data
return;
netBuffer.compact();
netBuffer = Utils.ensureCapacity(netBuffer, sslEngine.getSession().getPacketBufferSize());
netBuffer.flip();
break;
case BUFFER_UNDERFLOW:
case CLOSED:
default:
throw new SSLException("Unexpected handshake status: " + handshakeResult.getStatus());
}
return;
case NEED_UNWRAP:
handshakeStatus = unwrap(peerValidator, true);
if (handshakeStatus == null) return;
break;
case NEED_TASK:
sslEngine.getDelegatedTask().run();
handshakeStatus = sslEngine.getHandshakeStatus();
break;
case FINISHED:
return;
case NOT_HANDSHAKING:
if (handshakeResult.getHandshakeStatus() != SSLEngineResult.HandshakeStatus.FINISHED)
throw new SSLException("Did not finish handshake, handshake status: " + handshakeResult.getHandshakeStatus());
else if (peerValidator.netBuffer.position() != 0) {
unwrap(peerValidator, false);
}
return;
default:
throw new IllegalStateException("Unexpected handshake status: " + handshakeStatus);
}
}
}
private SSLEngineResult.HandshakeStatus unwrap(SslEngineValidator peerValidator, boolean updateHandshakeResult) throws SSLException {
// Unwrap regardless of whether there is data in the buffer to ensure that
// handshake status is updated if required.
peerValidator.netBuffer.flip(); // unwrap the data from peer
SSLEngineResult sslEngineResult = sslEngine.unwrap(peerValidator.netBuffer, appBuffer);
if (updateHandshakeResult) {
handshakeResult = sslEngineResult;
}
peerValidator.netBuffer.compact();
SSLEngineResult.HandshakeStatus handshakeStatus = sslEngineResult.getHandshakeStatus();
switch (sslEngineResult.getStatus()) {
case OK: break;
case BUFFER_OVERFLOW:
appBuffer = Utils.ensureCapacity(appBuffer, sslEngine.getSession().getApplicationBufferSize());
break;
case BUFFER_UNDERFLOW:
netBuffer = Utils.ensureCapacity(netBuffer, sslEngine.getSession().getPacketBufferSize());
// BUFFER_UNDERFLOW typically indicates that we need more data from peer,
// so return to process peer.
return null;
case CLOSED:
default:
throw new SSLException("Unexpected handshake status: " + sslEngineResult.getStatus());
}
return handshakeStatus;
}
boolean complete() {
return sslEngine.getHandshakeStatus() == SSLEngineResult.HandshakeStatus.FINISHED ||
sslEngine.getHandshakeStatus() == SSLEngineResult.HandshakeStatus.NOT_HANDSHAKING;
}
void close() {
sslEngine.closeOutbound();
try {
sslEngine.closeInbound();
} catch (Exception e) {
// ignore
}
}
}
}
|
SslEngineValidator
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/orm/jpa/domain/PersonListener.java
|
{
"start": 1092,
"end": 1993
}
|
class ____ {
public static final List<String> methodsInvoked = new ArrayList<>();
@PostLoad
public void postLoad(Person person) {
methodsInvoked.add("@PostLoad: " + person.getName());
}
@PrePersist
public void prePersist(Person person) {
methodsInvoked.add("@PrePersist: " + person.getName());
}
@PostPersist
public void postPersist(Person person) {
methodsInvoked.add("@PostPersist: " + person.getName());
}
@PreUpdate
public void preUpdate(Person person) {
methodsInvoked.add("@PreUpdate: " + person.getName());
}
@PostUpdate
public void postUpdate(Person person) {
methodsInvoked.add("@PostUpdate: " + person.getName());
}
@PreRemove
public void preRemove(Person person) {
methodsInvoked.add("@PreRemove: " + person.getName());
}
@PostRemove
public void postRemove(Person person) {
methodsInvoked.add("@PostRemove: " + person.getName());
}
}
|
PersonListener
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/unionsubclass/secondarytables/UnionSubclassWithSecondaryTableTests.java
|
{
"start": 633,
"end": 980
}
|
class ____ {
@Test
@FailureExpected
@Jira("https://hibernate.atlassian.net/browse/HHH-12676")
void testIt(SessionFactoryScope sessions) {
sessions.inTransaction( (session) -> {
session.createQuery( "from CardPayment" ).getResultList();
session.createQuery( "from Payment" ).getResultList();
} );
}
}
|
UnionSubclassWithSecondaryTableTests
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/schemafilter/SchemaFilterTest.java
|
{
"start": 6420,
"end": 6671
}
|
class ____ {
@Id
private long id;
public long getId() {
return id;
}
public void setId( long id ) {
this.id = id;
}
}
@Entity
@jakarta.persistence.Table(name = "the_entity_4", schema = "the_schema_2")
public static
|
Schema2Entity3
|
java
|
elastic__elasticsearch
|
modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java
|
{
"start": 40371,
"end": 41925
}
|
class ____ extends Plugin implements IngestPlugin {
/*
* This processor has a single field, randomField. Its sole purpose is to hold a random value to make the processor unique from
* other prorcessors that are otherwise identical. The only reason for this is so that the pipeline the processor belongs to is
* unique. And the only reason for that is so that adding the pipeline is not treated as a no-op.
*/
public static final String NON_GEO_PROCESSOR_TYPE = "test";
@Override
public Map<String, Processor.Factory> getProcessors(Processor.Parameters parameters) {
Map<String, Processor.Factory> procMap = new HashMap<>();
procMap.put(NON_GEO_PROCESSOR_TYPE, (factories, tag, description, config, projectId) -> {
readStringProperty(NON_GEO_PROCESSOR_TYPE, tag, config, "randomField");
return new AbstractProcessor(tag, description) {
@Override
public void execute(IngestDocument ingestDocument, BiConsumer<IngestDocument, Exception> handler) {
config.remove("randomField");
}
@Override
public String getType() {
return NON_GEO_PROCESSOR_TYPE;
}
};
});
return procMap;
}
}
/**
* A simple plugin that provides the {@link GeoIpIndexSettingProvider}.
*/
public static final
|
NonGeoProcessorsPlugin
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/issues/StopRouteShouldNotStopContextScopedErrorHandlerIssueTest.java
|
{
"start": 981,
"end": 1976
}
|
class ____ extends ContextTestSupport {
@Test
public void testIssue() throws Exception {
getMockEndpoint("mock:error").expectedMessageCount(1);
// stopping foo route, which should not stop context scoped error
// handler
context.getRouteController().stopRoute("foo");
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
// use context scoped
errorHandler(deadLetterChannel("mock:error").maximumRedeliveries(0));
from("direct:start").routeId("start").to("log:start").throwException(new IllegalArgumentException("Forced"));
from("direct:foo").routeId("foo").to("log:foo").to("mock:foo");
}
};
}
}
|
StopRouteShouldNotStopContextScopedErrorHandlerIssueTest
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/cli/MultiCommandTests.java
|
{
"start": 990,
"end": 1471
}
|
class ____ extends MultiCommand {
final AtomicBoolean closed = new AtomicBoolean();
DummyMultiCommand() {
super("A dummy multi command");
}
@Override
public void close() throws IOException {
super.close();
if (this.closed.compareAndSet(false, true) == false) {
throw new IllegalStateException("DummyMultiCommand already closed");
}
}
}
static
|
DummyMultiCommand
|
java
|
hibernate__hibernate-orm
|
hibernate-testing/src/main/java/org/hibernate/testing/bytecode/enhancement/extension/engine/BytecodeEnhancedEngineDescriptor.java
|
{
"start": 393,
"end": 866
}
|
class ____ extends JupiterEngineDescriptor {
public BytecodeEnhancedEngineDescriptor(UniqueId uniqueId, JupiterConfiguration configuration) {
super( uniqueId, configuration );
}
public BytecodeEnhancedEngineDescriptor(JupiterEngineDescriptor engineDescriptor) {
super( engineDescriptor.getUniqueId(), engineDescriptor.getConfiguration() );
for ( TestDescriptor child : engineDescriptor.getChildren() ) {
addChild( child );
}
}
}
|
BytecodeEnhancedEngineDescriptor
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/completable/CompletableFromObservable.java
|
{
"start": 1178,
"end": 1801
}
|
class ____<T> implements Observer<T> {
final CompletableObserver co;
CompletableFromObservableObserver(CompletableObserver co) {
this.co = co;
}
@Override
public void onSubscribe(Disposable d) {
co.onSubscribe(d);
}
@Override
public void onNext(T value) {
// Deliberately ignored.
}
@Override
public void onError(Throwable e) {
co.onError(e);
}
@Override
public void onComplete() {
co.onComplete();
}
}
}
|
CompletableFromObservableObserver
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/RestartTimeInfo.java
|
{
"start": 355,
"end": 1144
}
|
class ____ {
private final Long latestFinalBucketTimeMs;
private final Long latestRecordTimeMs;
private final boolean haveSeenDataPreviously;
public RestartTimeInfo(@Nullable Long latestFinalBucketTimeMs, @Nullable Long latestRecordTimeMs, boolean haveSeenDataPreviously) {
this.latestFinalBucketTimeMs = latestFinalBucketTimeMs;
this.latestRecordTimeMs = latestRecordTimeMs;
this.haveSeenDataPreviously = haveSeenDataPreviously;
}
@Nullable
public Long getLatestFinalBucketTimeMs() {
return latestFinalBucketTimeMs;
}
@Nullable
public Long getLatestRecordTimeMs() {
return latestRecordTimeMs;
}
public boolean haveSeenDataPreviously() {
return haveSeenDataPreviously;
}
}
|
RestartTimeInfo
|
java
|
FasterXML__jackson-databind
|
src/main/java/tools/jackson/databind/BeanDescription.java
|
{
"start": 12845,
"end": 13457
}
|
class ____ extends SupplierBase
{
protected final BeanDescription _beanDesc;
public EagerSupplier(MapperConfig<?> config, BeanDescription beanDesc) {
super(config, beanDesc.getType());
_beanDesc = beanDesc;
}
@Override
public BeanDescription get() { return _beanDesc; }
@Override
public AnnotatedClass getClassInfo() {
return _beanDesc.getClassInfo();
}
@Override
public Annotations getClassAnnotations() {
return _beanDesc.getClassAnnotations();
}
}
}
|
EagerSupplier
|
java
|
reactor__reactor-core
|
reactor-core/src/main/java/reactor/core/publisher/FluxFilterFuseable.java
|
{
"start": 6088,
"end": 10250
}
|
class ____<T>
implements InnerOperator<T, T>, ConditionalSubscriber<T>,
QueueSubscription<T> {
final ConditionalSubscriber<? super T> actual;
final Context ctx;
final Predicate<? super T> predicate;
@SuppressWarnings("NotNullFieldNotInitialized") // s initialized in onSubscribe
QueueSubscription<T> s;
boolean done;
int sourceMode;
FilterFuseableConditionalSubscriber(ConditionalSubscriber<? super T> actual,
Predicate<? super T> predicate) {
this.actual = actual;
this.ctx = actual.currentContext();
this.predicate = predicate;
}
@SuppressWarnings("unchecked")
@Override
public void onSubscribe(Subscription s) {
if (Operators.validate(this.s, s)) {
this.s = (QueueSubscription<T>) s;
actual.onSubscribe(this);
}
}
@SuppressWarnings("DataFlowIssue") // fusion passes nulls via onNext
@Override
public void onNext(T t) {
if (sourceMode == ASYNC) {
actual.onNext(null);
}
else {
if (done) {
Operators.onNextDropped(t, this.ctx);
return;
}
boolean b;
try {
b = predicate.test(t);
}
catch (Throwable e) {
Throwable e_ = Operators.onNextError(t, e, this.ctx, s);
if (e_ != null) {
onError(e_);
}
else {
s.request(1);
}
Operators.onDiscard(t, this.ctx);
return;
}
if (b) {
actual.onNext(t);
}
else {
s.request(1);
Operators.onDiscard(t, this.ctx);
}
}
}
@Override
public boolean tryOnNext(T t) {
if (done) {
Operators.onNextDropped(t, this.ctx);
return false;
}
boolean b;
try {
b = predicate.test(t);
}
catch (Throwable e) {
Throwable e_ = Operators.onNextError(t, e, this.ctx, s);
if (e_ != null) {
onError(e_);
}
Operators.onDiscard(t, this.ctx);
return false;
}
if (b) {
return actual.tryOnNext(t);
}
else {
Operators.onDiscard(t, this.ctx);
return false;
}
}
@Override
public void onError(Throwable t) {
if (done) {
Operators.onErrorDropped(t, this.ctx);
return;
}
done = true;
actual.onError(t);
}
@Override
public void onComplete() {
if (done) {
return;
}
done = true;
actual.onComplete();
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.PARENT) return s;
if (key == Attr.TERMINATED) return done;
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return InnerOperator.super.scanUnsafe(key);
}
@Override
public CoreSubscriber<? super T> actual() {
return actual;
}
@Override
public void request(long n) {
s.request(n);
}
@Override
public void cancel() {
s.cancel();
}
@Override
public @Nullable T poll() {
if (sourceMode == ASYNC) {
long dropped = 0;
for (; ; ) {
T v = s.poll();
try {
if (v == null || predicate.test(v)) {
if (dropped != 0) {
request(dropped);
}
return v;
}
Operators.onDiscard(v, this.ctx);
dropped++;
}
catch (Throwable e) {
RuntimeException e_ = Operators.onNextPollError(v, e, this.ctx);
Operators.onDiscard(v, this.ctx);
if (e_ != null) {
throw e_;
}
// else continue
}
}
}
else {
for (; ; ) {
T v = s.poll();
try {
if (v == null || predicate.test(v)) {
return v;
}
Operators.onDiscard(v, this.ctx);
}
catch (Throwable e) {
RuntimeException e_ = Operators.onNextPollError(v, e, this.ctx);
Operators.onDiscard(v, this.ctx);
if (e_ != null) {
throw e_;
}
// else continue
}
}
}
}
@Override
public boolean isEmpty() {
return s.isEmpty();
}
@Override
public void clear() {
s.clear();
}
@Override
public int size() {
return s.size();
}
@Override
public int requestFusion(int requestedMode) {
int m;
if ((requestedMode & Fuseable.THREAD_BARRIER) != 0) {
return Fuseable.NONE;
}
else {
m = s.requestFusion(requestedMode);
}
sourceMode = m;
return m;
}
}
}
|
FilterFuseableConditionalSubscriber
|
java
|
elastic__elasticsearch
|
x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/FieldExtractorTestCase.java
|
{
"start": 1531,
"end": 1746
}
|
class ____ parameters/settings that can be used in the mapping of an index
* and which can affect the outcome of _source extraction and parsing when retrieving
* values from Elasticsearch.
*/
public abstract
|
covering
|
java
|
apache__avro
|
lang/java/mapred/src/main/java/org/apache/avro/mapred/HadoopReducer.java
|
{
"start": 1150,
"end": 1491
}
|
class ____<K, V, OUT> extends HadoopReducerBase<K, V, OUT, AvroWrapper<OUT>, NullWritable> {
@Override
@SuppressWarnings("unchecked")
protected AvroReducer<K, V, OUT> getReducer(JobConf conf) {
return ReflectionUtils.newInstance(conf.getClass(AvroJob.REDUCER, AvroReducer.class, AvroReducer.class), conf);
}
private
|
HadoopReducer
|
java
|
junit-team__junit5
|
platform-tests/src/test/java/org/junit/platform/commons/util/ReflectionUtilsTests.java
|
{
"start": 47679,
"end": 47797
}
|
class ____ extends ClassWithNestedClasses implements Interface45 {
}
}
@Nested
|
ClassExtendingClassWithNestedClasses
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/RandomCastTest.java
|
{
"start": 855,
"end": 1168
}
|
class ____ {
private final CompilationTestHelper testHelper =
CompilationTestHelper.newInstance(RandomCast.class, getClass());
@Test
public void positive() {
testHelper
.addSourceLines(
"Test.java",
"""
import java.util.Random;
|
RandomCastTest
|
java
|
quarkusio__quarkus
|
integration-tests/kafka-devservices/src/test/java/io/quarkus/it/kafka/continuoustesting/DevServicesKafkaContinuousTestingTest.java
|
{
"start": 1326,
"end": 11023
}
|
class ____ extends BaseDevServiceTest {
static final String DEVSERVICES_DISABLED_PROPERTIES = ContinuousTestingTestUtils.appProperties(
"quarkus.devservices.enabled=false");
static final String UPDATED_FIXED_PORT_PROPERTIES = "\nquarkus.kafka.devservices.port=6342\n";
@RegisterExtension
public static QuarkusDevModeTest test = new QuarkusDevModeTest()
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class)
.addClass(BundledEndpoint.class)
.addClass(KafkaAdminManager.class)
.deleteClass(KafkaEndpoint.class)
.addAsResource(
new StringAsset(ContinuousTestingTestUtils
.appProperties("quarkus.kafka.devservices.provider=kafka-native",
"quarkus.kafka.devservices.topic-partitions.test=2",
"quarkus.kafka.devservices.topic-partitions.test-consumer=3",
"quarkus.kafka.health.enabled=true")),
"application.properties"))
.setTestArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class).addClass(KafkaAdminTest.class));
@AfterAll
static void afterAll() {
stopAllContainers();
}
@Test
public void testContinuousTestingDisablesDevServicesWhenPropertiesChange() {
ContinuousTestingTestUtils utils = new ContinuousTestingTestUtils();
var result = utils.waitForNextCompletion();
assertEquals(2, result.getTotalTestsPassed());
assertEquals(0, result.getTotalTestsFailed());
// Now let's disable dev services globally ... BOOOOOM! Splat!
test.modifyResourceFile("application.properties", s -> DEVSERVICES_DISABLED_PROPERTIES);
result = utils.waitForNextCompletion();
assertEquals(0, result.getTotalTestsPassed());
assertEquals(1, result.getTotalTestsFailed());
ping500();
List<Container> kafkaContainers = getKafkaContainers();
assertTrue(kafkaContainers.isEmpty(),
"Expected no containers, but got: " + prettyPrintContainerList(kafkaContainers));
}
// This tests behaviour in dev mode proper when combined with continuous testing. This creates a possibility of port conflicts, false sharing of state, and all sorts of race conditions.
@Test
public void testDevModeCoexistingWithContinuousTestingServiceUpdatesContainersOnConfigChange() {
// Note that driving continuous testing concurrently can sometimes cause 500s caused by containers not yet being available on slow machines
ContinuousTestingTestUtils continuousTestingTestUtils = new ContinuousTestingTestUtils();
ContinuousTestingTestUtils.TestStatus result = continuousTestingTestUtils.waitForNextCompletion();
assertEquals(2, result.getTotalTestsPassed());
assertEquals(0, result.getTotalTestsFailed());
// Interacting with the app will force a refresh
ping();
List<Container> started = getKafkaContainers();
assertFalse(started.isEmpty());
Container container = started.get(0);
assertTrue(Arrays.stream(container.getPorts()).noneMatch(p -> p.getPublicPort() == 6377),
"Expected random port, but got: " + Arrays.toString(container.getPorts()));
int newPort = 6388;
int testPort = newPort + 1;
// Continuous tests and dev mode should *not* share containers, even if the port is fixed
// Specify that the fixed port is for dev mode, or one launch will fail with port conflicts
test.modifyResourceFile("application.properties",
s -> s + "\n%dev.quarkus.kafka.devservices.port=" + newPort
+ "\n%test.quarkus.kafka.devservices.port=" + testPort);
test.modifyTestSourceFile(KafkaAdminTest.class, s -> s.replaceAll("test\\(\\) ", "someTest()"));
// Force another refresh
result = continuousTestingTestUtils.waitForNextCompletion();
assertEquals(2, result.getTotalTestsPassed());
assertEquals(0, result.getTotalTestsFailed());
ping();
List<Container> newContainers = getKafkaContainersExcludingExisting(started);
// We expect 2 new containers, since test was also refreshed
assertEquals(2, newContainers.size(),
"New containers: "
+ prettyPrintContainerList(newContainers)
+ "\n Old containers: " + prettyPrintContainerList(started) + "\n All containers: "
+ prettyPrintContainerList(getKafkaContainers())); // this can be wrong
// We need to inspect the dev-mode container; we don't have a non-brittle way of distinguishing them, so just look in them all
boolean hasRightPort = newContainers.stream()
.anyMatch(newContainer -> hasPublicPort(newContainer, newPort));
assertTrue(hasRightPort,
"Expected port " + newPort + ", but got: "
+ newContainers.stream().map(c -> Arrays.toString(c.getPorts())).collect(Collectors.joining(", ")));
boolean hasRightTestPort = newContainers.stream()
.anyMatch(newContainer -> hasPublicPort(newContainer, testPort));
assertTrue(hasRightTestPort,
"Expected port " + testPort + ", but got: "
+ newContainers.stream().map(c -> Arrays.toString(c.getPorts())).collect(Collectors.joining(", ")));
}
void ping() {
when().get("/kafka/partitions/test").then()
.statusCode(200)
.body(is("2"));
}
void ping500() {
when().get("/kafka/partitions/test").then()
.statusCode(500);
}
@Test
public void testContinuousTestingReusesInstanceWhenPropertiesAreNotChanged() {
ContinuousTestingTestUtils utils = new ContinuousTestingTestUtils();
var result = utils.waitForNextCompletion();
assertEquals(2, result.getTotalTestsPassed());
assertEquals(0, result.getTotalTestsFailed());
List<Container> kafkaContainers = getKafkaContainers();
// Make a change that shouldn't affect dev services
test.modifyTestSourceFile(KafkaAdminTest.class, s -> s.replaceAll("test\\(\\)", "myTest()"));
result = utils.waitForNextCompletion();
assertEquals(2, result.getTestsPassed());
assertEquals(0, result.getTestsFailed());
// Some containers could have disappeared, because ryuk cleaned them up, but no new containers should have appeared
List<Container> newContainers = getKafkaContainersExcludingExisting(kafkaContainers);
assertEquals(0, newContainers.size(),
"New containers: " + newContainers + "\n Old containers: " + kafkaContainers + "\n All containers: "
+ getKafkaContainers());
}
@Test
public void testContinuousTestingCreatesANewInstanceWhenPropertiesAreChanged() {
ContinuousTestingTestUtils utils = new ContinuousTestingTestUtils();
var result = utils.waitForNextCompletion();
assertEquals(2, result.getTotalTestsPassed());
assertEquals(0, result.getTotalTestsFailed());
List<Container> existingContainers = new ArrayList<>(getKafkaContainers());
test.modifyResourceFile("application.properties", s -> s.replaceAll("kafka-native", "Redpanda"));
result = utils.waitForNextCompletion();
assertEquals(2, result.getTestsPassed());
assertEquals(0, result.getTestsFailed());
// A new container should have appeared
{
List<Container> newContainers = getKafkaContainersExcludingExisting(existingContainers);
assertEquals(1, newContainers.size(),
"New containers: " + newContainers + "\n Old containers: " + existingContainers + "\n All containers: "
+ getKafkaContainers());
List<Integer> existingPorts = Arrays.stream(existingContainers.get(0).getPorts())
.map(ContainerPort::getPublicPort)
.toList();
// The new container should be on the new port
List<Integer> ports = Arrays.stream(newContainers.get(0).getPorts())
.map(ContainerPort::getPublicPort)
.toList();
// Oh good, it's one port, so it should be the expected one
assertFalse(ports.containsAll(existingPorts), "Container ports: " + ports);
existingContainers.addAll(newContainers);
}
test.modifyResourceFile("application.properties", s -> s + UPDATED_FIXED_PORT_PROPERTIES);
result = utils.waitForNextCompletion();
assertEquals(2, result.getTestsPassed());
assertEquals(0, result.getTestsFailed());
// Another new container should have appeared
{
List<Container> newContainers = getKafkaContainersExcludingExisting(existingContainers);
assertEquals(1, newContainers.size(),
"New containers: " + newContainers + "\n Old containers: " + existingContainers + "\n All containers: "
+ getKafkaContainers());
// The new container should be on the new port
List<Integer> ports = Arrays.stream(newContainers.get(0).getPorts())
.map(ContainerPort::getPublicPort)
.toList();
assertTrue(ports.contains(6342), "Container ports: " + ports);
}
}
}
|
DevServicesKafkaContinuousTestingTest
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableRefCount.java
|
{
"start": 1355,
"end": 4305
}
|
class ____<T> extends Flowable<T> {
final ConnectableFlowable<T> source;
final int n;
final long timeout;
final TimeUnit unit;
final Scheduler scheduler;
RefConnection connection;
public FlowableRefCount(ConnectableFlowable<T> source) {
this(source, 1, 0L, TimeUnit.NANOSECONDS, null);
}
public FlowableRefCount(ConnectableFlowable<T> source, int n, long timeout, TimeUnit unit,
Scheduler scheduler) {
this.source = source;
this.n = n;
this.timeout = timeout;
this.unit = unit;
this.scheduler = scheduler;
}
@Override
protected void subscribeActual(Subscriber<? super T> s) {
RefConnection conn;
boolean connect = false;
synchronized (this) {
conn = connection;
if (conn == null) {
conn = new RefConnection(this);
connection = conn;
}
long c = conn.subscriberCount;
if (c == 0L && conn.timer != null) {
conn.timer.dispose();
}
conn.subscriberCount = c + 1;
if (!conn.connected && c + 1 == n) {
connect = true;
conn.connected = true;
}
}
source.subscribe(new RefCountSubscriber<>(s, this, conn));
if (connect) {
source.connect(conn);
}
}
void cancel(RefConnection rc) {
SequentialDisposable sd;
synchronized (this) {
if (connection == null || connection != rc) {
return;
}
long c = rc.subscriberCount - 1;
rc.subscriberCount = c;
if (c != 0L || !rc.connected) {
return;
}
if (timeout == 0L) {
timeout(rc);
return;
}
sd = new SequentialDisposable();
rc.timer = sd;
}
sd.replace(scheduler.scheduleDirect(rc, timeout, unit));
}
void terminated(RefConnection rc) {
synchronized (this) {
if (connection == rc) {
if (rc.timer != null) {
rc.timer.dispose();
rc.timer = null;
}
if (--rc.subscriberCount == 0) {
connection = null;
source.reset();
}
}
}
}
void timeout(RefConnection rc) {
synchronized (this) {
if (rc.subscriberCount == 0 && rc == connection) {
connection = null;
Disposable connectionObject = rc.get();
DisposableHelper.dispose(rc);
if (connectionObject == null) {
rc.disconnectedEarly = true;
} else {
source.reset();
}
}
}
}
static final
|
FlowableRefCount
|
java
|
alibaba__nacos
|
client/src/main/java/com/alibaba/nacos/client/naming/cache/NamingFuzzyWatchServiceListHolder.java
|
{
"start": 2728,
"end": 17166
}
|
class ____ extends SmartSubscriber implements Closeable {
private static final Logger LOGGER = LogUtils.logger(NamingFuzzyWatchServiceListHolder.class);
private String notifierEventScope;
private NamingGrpcClientProxy namingGrpcClientProxy;
/**
* fuzzyListenExecuteBell.
*/
private final BlockingQueue<Object> fuzzyWatchExecuteBell = new ArrayBlockingQueue<>(1);
private final Object bellItem = new Object();
private final AtomicLong fuzzyWatchLastAllSyncTime = new AtomicLong(System.currentTimeMillis());
private static final long FUZZY_LISTEN_ALL_SYNC_INTERNAL = 3 * 60 * 1000;
ScheduledExecutorService executorService;
/**
* The contents of {@code patternMatchMap} are Map{pattern -> Set[matched services]}.
*/
private Map<String, NamingFuzzyWatchContext> fuzzyMatchContextMap = new ConcurrentHashMap<>();
public NamingFuzzyWatchServiceListHolder(String notifierEventScope) {
this.notifierEventScope = notifierEventScope;
NotifyCenter.registerSubscriber(this);
}
/**
* shut down.
*/
@Override
public void shutdown() {
// deregister subscriber which registered in constructor
NotifyCenter.deregisterSubscriber(this);
if (executorService != null && !executorService.isShutdown()) {
executorService.shutdown();
}
}
/**
* start.
*/
@SuppressWarnings("PMD.ThreadPoolCreationRule")
public void start() {
executorService = Executors.newSingleThreadScheduledExecutor(
new NameThreadFactory("com.alibaba.nacos.client.naming.fuzzy.watch.Worker"));
executorService.submit(() -> {
while (!executorService.isShutdown() && !executorService.isTerminated()) {
try {
fuzzyWatchExecuteBell.poll(5L, TimeUnit.SECONDS);
if (executorService.isShutdown() || executorService.isTerminated()) {
continue;
}
executeNamingFuzzyWatch();
} catch (Throwable e) {
LOGGER.error("[rpc-fuzzy-watch-execute] rpc fuzzy watch exception", e);
try {
Thread.sleep(50L);
} catch (InterruptedException interruptedException) {
//ignore
}
notifyFuzzyWatchSync();
}
}
});
}
public void registerNamingGrpcClientProxy(NamingGrpcClientProxy namingGrpcClientProxy) {
this.namingGrpcClientProxy = namingGrpcClientProxy;
}
public NamingFuzzyWatchContext getFuzzyWatchContext(String groupKeyPattern) {
return fuzzyMatchContextMap.get(groupKeyPattern);
}
/**
* Add a watcher to the context.
*
* @param watcher watcher to be added
*/
public NamingFuzzyWatchContext registerFuzzyWatcher(String groupKeyPattern, FuzzyWatchEventWatcher watcher) {
if (!namingGrpcClientProxy.isAbilitySupportedByServer(AbilityKey.SERVER_FUZZY_WATCH)) {
throw new NacosRuntimeException(NacosException.SERVER_NOT_IMPLEMENTED,
"Request Nacos server version is too low, not support fuzzy watch feature.");
}
NamingFuzzyWatchContext namingFuzzyWatchContext = initFuzzyWatchContextIfNeed(groupKeyPattern);
namingFuzzyWatchContext.setDiscard(false);
synchronized (namingFuzzyWatchContext) {
FuzzyWatchEventWatcherWrapper fuzzyWatchEventWatcherWrapper = new FuzzyWatchEventWatcherWrapper(watcher);
if (namingFuzzyWatchContext.getFuzzyWatchEventWatcherWrappers().add(fuzzyWatchEventWatcherWrapper)) {
LOGGER.info(" [add-watcher-ok] groupKeyPattern={}, watcher={},uuid={} ", groupKeyPattern, watcher,
fuzzyWatchEventWatcherWrapper.getUuid());
Set<String> receivedServiceKeys = namingFuzzyWatchContext.getReceivedServiceKeys();
if (CollectionUtils.isNotEmpty(receivedServiceKeys)) {
for (String serviceKey : receivedServiceKeys) {
NamingFuzzyWatchNotifyEvent namingFuzzyWatchNotifyEvent = NamingFuzzyWatchNotifyEvent.build(
notifierEventScope, groupKeyPattern, serviceKey, ADD_SERVICE, FUZZY_WATCH_INIT_NOTIFY,
fuzzyWatchEventWatcherWrapper.getUuid());
NotifyCenter.publishEvent(namingFuzzyWatchNotifyEvent);
}
}
}
}
return namingFuzzyWatchContext;
}
/**
* init fuzzy watch context.
*
* @param groupKeyPattern groupKeyPattern.
* @return fuzzy context.
*/
public NamingFuzzyWatchContext initFuzzyWatchContextIfNeed(String groupKeyPattern) {
if (!fuzzyMatchContextMap.containsKey(groupKeyPattern)) {
synchronized (fuzzyMatchContextMap) {
if (fuzzyMatchContextMap.containsKey(groupKeyPattern)) {
return fuzzyMatchContextMap.get(groupKeyPattern);
}
LOGGER.info("[fuzzy-watch] init fuzzy watch context for pattern {}", groupKeyPattern);
fuzzyMatchContextMap.putIfAbsent(groupKeyPattern,
new NamingFuzzyWatchContext(notifierEventScope, groupKeyPattern));
notifyFuzzyWatchSync();
}
}
return fuzzyMatchContextMap.get(groupKeyPattern);
}
/**
* remove fuzzy watch context for pattern.
*
* @param groupKeyPattern group key pattern.
*/
public synchronized void removePatternMatchCache(String groupKeyPattern) {
NamingFuzzyWatchContext namingFuzzyWatchContext = fuzzyMatchContextMap.get(groupKeyPattern);
if (namingFuzzyWatchContext == null) {
return;
}
if (namingFuzzyWatchContext.isDiscard() && namingFuzzyWatchContext.getFuzzyWatchEventWatcherWrappers()
.isEmpty()) {
LOGGER.info("[fuzzy-watch] remove fuzzy watch context for pattern {}", groupKeyPattern);
fuzzyMatchContextMap.remove(groupKeyPattern);
}
}
/**
* notify sync fuzzy watch with server.
*/
void notifyFuzzyWatchSync() {
fuzzyWatchExecuteBell.offer(bellItem);
}
/**
* Execute fuzzy listen configuration changes.
*
* <p>This method iterates through all fuzzy listen contexts and determines whether they need to be added or
* removed based on their consistency with the server and discard status. It then calls the appropriate method to
* execute the fuzzy listen operation.
*
* @throws NacosException If an error occurs during the execution of fuzzy listen configuration changes.
*/
public void executeNamingFuzzyWatch() throws NacosException {
// Obtain the current timestamp
long now = System.currentTimeMillis();
// Determine whether a full synchronization is needed
boolean needAllSync = now - fuzzyWatchLastAllSyncTime.get() >= FUZZY_LISTEN_ALL_SYNC_INTERNAL;
List<NamingFuzzyWatchContext> needSyncContexts = new ArrayList<>();
// Iterate through all fuzzy listen contexts
for (NamingFuzzyWatchContext context : fuzzyMatchContextMap.values()) {
// Check if the context is consistent with the server
if (context.isConsistentWithServer()) {
context.syncFuzzyWatchers();
// Skip if a full synchronization is not needed
if (!needAllSync) {
continue;
}
}
needSyncContexts.add(context);
}
// Execute fuzzy listen operation for addition
doExecuteNamingFuzzyWatch(needSyncContexts);
// Update last all sync time if a full synchronization was performed
if (needAllSync) {
fuzzyWatchLastAllSyncTime.set(now);
}
}
public void resetConsistenceStatus() {
fuzzyMatchContextMap.values()
.forEach(fuzzyWatcherContext -> fuzzyWatcherContext.setConsistentWithServer(false));
}
/**
* Execute fuzzy listen configuration changes for a specific map of contexts.
*
* <p>This method submits tasks to execute fuzzy listen operations asynchronously for the provided contexts. It
* waits for all tasks to complete and logs any errors that occur.
*
* @param contextLists The map of contexts to execute fuzzy listen operations for.
* @throws NacosException If an error occurs during the execution of fuzzy listen configuration changes.
*/
private void doExecuteNamingFuzzyWatch(List<NamingFuzzyWatchContext> contextLists) throws NacosException {
// Return if the context map is null or empty
if (CollectionUtils.isEmpty(contextLists)) {
return;
}
// Iterate through the context map and submit tasks for execution
for (NamingFuzzyWatchContext entry : contextLists) {
// Submit task for execution
NamingFuzzyWatchRequest configFuzzyWatchRequest = buildFuzzyWatchNamingRequest(entry);
try {
// Execute the fuzzy listen operation
NamingFuzzyWatchResponse listenResponse = namingGrpcClientProxy.fuzzyWatchRequest(
configFuzzyWatchRequest);
if (listenResponse != null && listenResponse.isSuccess()) {
if (configFuzzyWatchRequest.getWatchType().equals(WATCH_TYPE_CANCEL_WATCH)) {
removePatternMatchCache(entry.getGroupKeyPattern());
} else {
entry.setConsistentWithServer(true);
}
entry.clearOverLimitTs();
}
} catch (NacosException e) {
// Log error and retry after a short delay
if (FUZZY_WATCH_PATTERN_OVER_LIMIT.getCode() == e.getErrCode()
|| FUZZY_WATCH_PATTERN_MATCH_COUNT_OVER_LIMIT.getCode() == e.getErrCode()) {
LOGGER.error(" fuzzy watch pattern over limit,pattern ->{} ,fuzzy watch will be suppressed,msg={}",
entry.getGroupKeyPattern(), e.getErrMsg());
NamingFuzzyWatchLoadEvent namingFuzzyWatchLoadEvent = NamingFuzzyWatchLoadEvent.buildEvent(
e.getErrCode(), entry.getGroupKeyPattern(), notifierEventScope);
NotifyCenter.publishEvent(namingFuzzyWatchLoadEvent);
} else {
LOGGER.error(" fuzzy watch request fail.", e);
try {
Thread.sleep(1000L);
} catch (InterruptedException interruptedException) {
// Ignore interruption
}
// Retry notification
notifyFuzzyWatchSync();
}
}
}
}
private NamingFuzzyWatchRequest buildFuzzyWatchNamingRequest(NamingFuzzyWatchContext namingFuzzyWatchContext) {
NamingFuzzyWatchRequest namingFuzzyWatchRequest = new NamingFuzzyWatchRequest();
namingFuzzyWatchRequest.setInitializing(namingFuzzyWatchContext.isInitializing());
namingFuzzyWatchRequest.setNamespace(namingGrpcClientProxy.getNamespaceId());
namingFuzzyWatchRequest.setReceivedGroupKeys(namingFuzzyWatchContext.getReceivedServiceKeys());
namingFuzzyWatchRequest.setGroupKeyPattern(namingFuzzyWatchContext.getGroupKeyPattern());
if (namingFuzzyWatchContext.isDiscard() && namingFuzzyWatchContext.getFuzzyWatchEventWatcherWrappers()
.isEmpty()) {
namingFuzzyWatchRequest.setWatchType(WATCH_TYPE_CANCEL_WATCH);
} else {
namingFuzzyWatchRequest.setWatchType(WATCH_TYPE_WATCH);
}
return namingFuzzyWatchRequest;
}
public Map<String, NamingFuzzyWatchContext> getFuzzyMatchContextMap() {
return fuzzyMatchContextMap;
}
@Override
public void onEvent(Event event) {
if (event instanceof NamingFuzzyWatchNotifyEvent) {
if (!event.scope().equals(notifierEventScope)) {
return;
}
NamingFuzzyWatchNotifyEvent watchNotifyEvent = (NamingFuzzyWatchNotifyEvent) event;
String changedType = watchNotifyEvent.getChangedType();
String syncType = watchNotifyEvent.getSyncType();
String serviceKey = watchNotifyEvent.getServiceKey();
String pattern = watchNotifyEvent.getPattern();
String watchUuid = watchNotifyEvent.getWatcherUuid();
NamingFuzzyWatchContext namingFuzzyWatchContext = fuzzyMatchContextMap.get(pattern);
if (namingFuzzyWatchContext == null) {
return;
}
namingFuzzyWatchContext.notifyFuzzyWatchers(serviceKey, changedType, syncType, watchUuid);
}
if (event instanceof NamingFuzzyWatchLoadEvent) {
if (!event.scope().equals(notifierEventScope)) {
return;
}
NamingFuzzyWatchLoadEvent overLimitEvent = (NamingFuzzyWatchLoadEvent) event;
NamingFuzzyWatchContext namingFuzzyWatchContext = fuzzyMatchContextMap.get(
overLimitEvent.getGroupKeyPattern());
if (namingFuzzyWatchContext == null) {
return;
}
namingFuzzyWatchContext.notifyOverLimitWatchers(overLimitEvent.getCode());
}
}
@Override
public List<Class<? extends Event>> subscribeTypes() {
List<Class<? extends Event>> result = new LinkedList<>();
result.add(NamingFuzzyWatchNotifyEvent.class);
result.add(NamingFuzzyWatchLoadEvent.class);
return result;
}
public String getNotifierEventScope() {
return notifierEventScope;
}
}
|
NamingFuzzyWatchServiceListHolder
|
java
|
greenrobot__greendao
|
tests/DaoTest/src/androidTest/java/org/greenrobot/greendao/daotest2/entity/KeepEntityTest.java
|
{
"start": 1052,
"end": 1575
}
|
class ____ extends AbstractDaoTestLongPk<KeepEntityDao, KeepEntity> {
public KeepEntityTest() {
super(KeepEntityDao.class);
}
@Override
protected KeepEntity createEntity(Long key) {
KeepEntity entity = new KeepEntity();
entity.setId(key);
return entity;
}
public void testKeepSectionAvailable() {
KeepEntity keepEntity = new KeepEntity(42l);
assertEquals("KeepEntity ID=42 (extra="+Build.VERSION.SDK+")", keepEntity.toString());
}
}
|
KeepEntityTest
|
java
|
spring-projects__spring-security
|
core/src/test/java/org/springframework/security/jackson/UnmodifiableMapTests.java
|
{
"start": 867,
"end": 1718
}
|
class ____ extends AbstractMixinTests {
// @formatter:off
private static final String DEFAULT_MAP_JSON = "{"
+ "\"@class\": \"java.util.Collections$UnmodifiableMap\","
+ "\"Key\": \"Value\""
+ "}";
// @formatter:on
@Test
void shouldSerialize() throws Exception {
String mapJson = mapper
.writeValueAsString(Collections.unmodifiableMap(Collections.singletonMap("Key", "Value")));
JSONAssert.assertEquals(DEFAULT_MAP_JSON, mapJson, true);
}
@Test
void shouldDeserialize() throws Exception {
Map<String, String> map = mapper.readValue(DEFAULT_MAP_JSON,
Collections.unmodifiableMap(Collections.emptyMap()).getClass());
assertThat(map).isNotNull()
.isInstanceOf(Collections.unmodifiableMap(Collections.emptyMap()).getClass())
.containsAllEntriesOf(Collections.singletonMap("Key", "Value"));
}
}
|
UnmodifiableMapTests
|
java
|
junit-team__junit5
|
junit-vintage-engine/src/testFixtures/java/org/junit/vintage/engine/samples/junit4/DynamicRunner.java
|
{
"start": 565,
"end": 1039
}
|
class ____ extends ConfigurableRunner implements Filterable {
public DynamicRunner(Class<?> testClass) {
super(testClass);
}
@Override
public Description getDescription() {
return Description.createSuiteDescription(testClass);
}
@Override
public void filter(Filter filter) throws NoTestsRemainException {
filteredChildren.removeIf(each -> !filter.shouldRun(each));
if (filteredChildren.isEmpty()) {
throw new NoTestsRemainException();
}
}
}
|
DynamicRunner
|
java
|
apache__camel
|
components/camel-nats/src/test/java/org/apache/camel/component/nats/integration/NatsAuthITSupport.java
|
{
"start": 1225,
"end": 1690
}
|
class ____ extends CamelTestSupport {
@RegisterExtension
static NatsLocalContainerService service = new NatsLocalContainerAuthService();
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = super.createCamelContext();
NatsComponent nats = context.getComponent("nats", NatsComponent.class);
nats.setServers(service.getServiceAddress());
return context;
}
}
|
NatsAuthITSupport
|
java
|
spring-projects__spring-boot
|
module/spring-boot-webflux/src/test/java/org/springframework/boot/webflux/autoconfigure/actuate/web/mappings/WebFluxMappingsAutoConfigurationTests.java
|
{
"start": 1213,
"end": 2337
}
|
class ____ {
private final ReactiveWebApplicationContextRunner contextRunner = new ReactiveWebApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(WebFluxMappingsAutoConfiguration.class));
@Test
void whenEndpointIsUnavailableThenDescriptionProviderIsNotCreated() {
this.contextRunner.withBean(DispatcherHandler.class)
.run((context) -> assertThat(context).doesNotHaveBean(DispatcherHandlersMappingDescriptionProvider.class));
}
@Test
void whenEndpointIsAvailableButThereIsNoDispatcherHandlerThenDescriptionProviderIsNotCreated() {
this.contextRunner.withPropertyValues("management.endpoints.web.exposure.include=mappings")
.run((context) -> assertThat(context).doesNotHaveBean(DispatcherHandlersMappingDescriptionProvider.class));
}
@Test
void whenEndpointIsAvailableThenDescriptionProviderIsCreated() {
this.contextRunner.withBean(DispatcherHandler.class)
.withPropertyValues("management.endpoints.web.exposure.include=mappings")
.run((context) -> assertThat(context).hasSingleBean(DispatcherHandlersMappingDescriptionProvider.class));
}
}
|
WebFluxMappingsAutoConfigurationTests
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/introspect/SetterConflictTest.java
|
{
"start": 793,
"end": 1222
}
|
class ____ {
Object value;
public void setBloop(Boolean bloop) {
throw new Error("Wrong setter!");
}
@JsonSetter
public void setBloop(Object bloop) {
value = bloop;
}
}
// [databind#3125]: As per existing (2.7+) logic we SHOULD tie-break
// in favor of `String` but code up until 2.12 short-circuited early fail
static
|
DuplicateSetterBean2979
|
java
|
alibaba__nacos
|
naming/src/test/java/com/alibaba/nacos/naming/core/v2/client/manager/impl/EphemeralIpPortClientManagerTest.java
|
{
"start": 1905,
"end": 4629
}
|
class ____ {
private final String ephemeralIpPortId = "127.0.0.1:80#true";
private final String syncedClientId = "127.0.0.1:8080#true";
EphemeralIpPortClientManager ephemeralIpPortClientManager;
@Mock
private IpPortBasedClient client;
@Mock
private DistroMapper distroMapper;
@Mock
private SwitchDomain switchDomain;
@Mock
private ClientAttributes attributes;
@BeforeAll
static void setUpBeforeClass() {
EnvUtil.setEnvironment(new MockEnvironment());
}
@BeforeEach
void setUp() throws Exception {
ephemeralIpPortClientManager = new EphemeralIpPortClientManager(distroMapper, switchDomain);
when(client.getClientId()).thenReturn(ephemeralIpPortId);
when(client.getRevision()).thenReturn(1320L);
ephemeralIpPortClientManager.clientConnected(client);
when(attributes.getClientAttribute(ClientConstants.REVISION, 0)).thenReturn(5120);
ephemeralIpPortClientManager.syncClientConnected(syncedClientId, attributes);
}
@Test
void testGetClient() {
Client fetchedClient = ephemeralIpPortClientManager.getClient(ephemeralIpPortId);
assertEquals(fetchedClient, client);
}
@Test
void testAllClientId() {
Collection<String> allClientIds = ephemeralIpPortClientManager.allClientId();
assertEquals(2, allClientIds.size());
assertTrue(allClientIds.contains(ephemeralIpPortId));
assertTrue(allClientIds.contains(syncedClientId));
}
@Test
void testContainsEphemeralIpPortId() {
assertTrue(ephemeralIpPortClientManager.contains(ephemeralIpPortId));
assertTrue(ephemeralIpPortClientManager.contains(syncedClientId));
String unUsedClientId = "127.0.0.1:8888#true";
assertFalse(ephemeralIpPortClientManager.contains(unUsedClientId));
}
@Test
void testVerifyClient0() {
assertTrue(ephemeralIpPortClientManager.verifyClient(new DistroClientVerifyInfo(ephemeralIpPortId, 0)));
assertTrue(ephemeralIpPortClientManager.verifyClient(new DistroClientVerifyInfo(syncedClientId, 0)));
}
@Test
void testVerifyClient() {
assertFalse(ephemeralIpPortClientManager.verifyClient(new DistroClientVerifyInfo(ephemeralIpPortId, 1)));
assertTrue(ephemeralIpPortClientManager.verifyClient(new DistroClientVerifyInfo(ephemeralIpPortId, 1320)));
assertFalse(ephemeralIpPortClientManager.verifyClient(new DistroClientVerifyInfo(syncedClientId, 1)));
assertTrue(ephemeralIpPortClientManager.verifyClient(new DistroClientVerifyInfo(syncedClientId, 5120)));
}
}
|
EphemeralIpPortClientManagerTest
|
java
|
apache__avro
|
lang/java/ipc/src/test/java/org/apache/avro/specific/TestSpecificDatumReader.java
|
{
"start": 1437,
"end": 4332
}
|
class ____ {
public static byte[] serializeRecord(FooBarSpecificRecord fooBarSpecificRecord) throws IOException {
SpecificDatumWriter<FooBarSpecificRecord> datumWriter = new SpecificDatumWriter<>(FooBarSpecificRecord.SCHEMA$);
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
Encoder encoder = EncoderFactory.get().binaryEncoder(byteArrayOutputStream, null);
datumWriter.write(fooBarSpecificRecord, encoder);
encoder.flush();
return byteArrayOutputStream.toByteArray();
}
public static byte[] serializeRecord(StringablesRecord stringablesRecord) throws IOException {
SpecificDatumWriter<StringablesRecord> datumWriter = new SpecificDatumWriter<>(StringablesRecord.SCHEMA$);
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
Encoder encoder = EncoderFactory.get().binaryEncoder(byteArrayOutputStream, null);
datumWriter.write(stringablesRecord, encoder);
encoder.flush();
return byteArrayOutputStream.toByteArray();
}
@Test
void read() throws IOException {
Builder newBuilder = FooBarSpecificRecord.newBuilder();
newBuilder.setId(42);
newBuilder.setName("foo");
newBuilder.setNicknames(Collections.singletonList("bar"));
newBuilder.setRelatedids(Arrays.asList(1, 2, 3));
FooBarSpecificRecord specificRecord = newBuilder.build();
byte[] recordBytes = serializeRecord(specificRecord);
Decoder decoder = DecoderFactory.get().binaryDecoder(recordBytes, null);
SpecificDatumReader<FooBarSpecificRecord> specificDatumReader = new SpecificDatumReader<>(
FooBarSpecificRecord.SCHEMA$);
FooBarSpecificRecord deserialized = new FooBarSpecificRecord();
specificDatumReader.read(deserialized, decoder);
assertEquals(specificRecord, deserialized);
}
@Test
void stringables() throws IOException {
StringablesRecord.Builder newBuilder = StringablesRecord.newBuilder();
newBuilder.setValue(new BigDecimal("42.11"));
HashMap<String, BigDecimal> mapWithBigDecimalElements = new HashMap<>();
mapWithBigDecimalElements.put("test", new BigDecimal("11.11"));
newBuilder.setMapWithBigDecimalElements(mapWithBigDecimalElements);
HashMap<BigInteger, String> mapWithBigIntKeys = new HashMap<>();
mapWithBigIntKeys.put(BigInteger.ONE, "test");
newBuilder.setMapWithBigIntKeys(mapWithBigIntKeys);
StringablesRecord stringablesRecord = newBuilder.build();
byte[] recordBytes = serializeRecord(stringablesRecord);
Decoder decoder = DecoderFactory.get().binaryDecoder(recordBytes, null);
SpecificDatumReader<StringablesRecord> specificDatumReader = new SpecificDatumReader<>(StringablesRecord.SCHEMA$);
StringablesRecord deserialized = new StringablesRecord();
specificDatumReader.read(deserialized, decoder);
assertEquals(stringablesRecord, deserialized);
}
}
|
TestSpecificDatumReader
|
java
|
quarkusio__quarkus
|
extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/connection/ConnectionIdleTimeoutTest.java
|
{
"start": 2303,
"end": 2709
}
|
class ____ {
static final CountDownLatch CLOSED = new CountDownLatch(1);
static final AtomicBoolean MESSAGE = new AtomicBoolean();
@OnTextMessage
void onText(String message) {
MESSAGE.set(true);
}
@OnClose
void close() {
CLOSED.countDown();
}
}
@WebSocketClient(path = "/end")
public static
|
ServerEndpoint
|
java
|
spring-projects__spring-framework
|
spring-webflux/src/main/java/org/springframework/web/reactive/socket/adapter/AbstractListenerWebSocketSession.java
|
{
"start": 10035,
"end": 11393
}
|
class ____ extends AbstractListenerWriteProcessor<WebSocketMessage> {
private volatile boolean isReady = true;
WebSocketSendProcessor() {
super(receivePublisher.getLogPrefix());
}
@Override
protected boolean write(WebSocketMessage message) throws IOException {
if (logger.isTraceEnabled()) {
logger.trace(getLogPrefix() + "Sending " + message);
}
else if (rsWriteLogger.isTraceEnabled()) {
rsWriteLogger.trace(getLogPrefix() + "Sending " + message);
}
// In case of IOException, onError handling should call discardData(WebSocketMessage)..
return sendMessage(message);
}
@Override
protected boolean isDataEmpty(WebSocketMessage message) {
return (message.getPayload().readableByteCount() == 0);
}
@Override
protected boolean isWritePossible() {
return (this.isReady);
}
/**
* Subclasses can invoke this before sending a message (false) and
* after receiving the async send callback (true) effective translating
* async completion callback into simple flow control.
*/
public void setReadyToSend(boolean ready) {
if (ready && rsWriteLogger.isTraceEnabled()) {
rsWriteLogger.trace(getLogPrefix() + "Ready to send");
}
this.isReady = ready;
}
@Override
protected void discardData(WebSocketMessage message) {
message.release();
}
}
}
|
WebSocketSendProcessor
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/configurers/CsrfConfigurerTests.java
|
{
"start": 38176,
"end": 38487
}
|
class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.csrf((csrf) -> csrf
.sessionAuthenticationStrategy(null));
return http.build();
// @formatter:on
}
}
@Configuration
@EnableWebSecurity
static
|
NullAuthenticationStrategy
|
java
|
elastic__elasticsearch
|
x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/NodesDeprecationCheckResponse.java
|
{
"start": 661,
"end": 2067
}
|
class ____ extends BaseNodesResponse<NodesDeprecationCheckAction.NodeResponse> {
public NodesDeprecationCheckResponse(StreamInput in) throws IOException {
super(in);
}
public NodesDeprecationCheckResponse(
ClusterName clusterName,
List<NodesDeprecationCheckAction.NodeResponse> nodes,
List<FailedNodeException> failures
) {
super(clusterName, nodes, failures);
}
@Override
protected List<NodesDeprecationCheckAction.NodeResponse> readNodesFrom(StreamInput in) throws IOException {
return in.readCollectionAsList(NodesDeprecationCheckAction.NodeResponse::new);
}
@Override
protected void writeNodesTo(StreamOutput out, List<NodesDeprecationCheckAction.NodeResponse> nodes) throws IOException {
out.writeCollection(nodes);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
NodesDeprecationCheckResponse that = (NodesDeprecationCheckResponse) o;
return Objects.equals(getClusterName(), that.getClusterName())
&& Objects.equals(getNodes(), that.getNodes())
&& Objects.equals(failures(), that.failures());
}
@Override
public int hashCode() {
return Objects.hash(getClusterName(), getNodes(), failures());
}
}
|
NodesDeprecationCheckResponse
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/atomic/longarray/AtomicLongArrayAssert_hasSizeLessThanOrEqualTo_Test.java
|
{
"start": 819,
"end": 1197
}
|
class ____ extends AtomicLongArrayAssertBaseTest {
@Override
protected AtomicLongArrayAssert invoke_api_method() {
return assertions.hasSizeLessThanOrEqualTo(6);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertHasSizeLessThanOrEqualTo(getInfo(assertions), internalArray(), 6);
}
}
|
AtomicLongArrayAssert_hasSizeLessThanOrEqualTo_Test
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterServiceInterceptor.java
|
{
"start": 2492,
"end": 2679
}
|
class ____ whether {@link ApplicationMasterServiceProcessor}s
* work fine, e.g. allocation is invoked on preprocessor and the next processor
* in the chain is also invoked.
*/
public
|
tests
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/EqualsIncompatibleTypeTest.java
|
{
"start": 1767,
"end": 1790
}
|
class ____ {}
abstract
|
C
|
java
|
apache__flink
|
flink-python/src/main/java/org/apache/flink/table/runtime/arrow/writers/ArrowFieldWriter.java
|
{
"start": 1165,
"end": 2426
}
|
class ____<IN> {
/** Container which is used to store the written sequence of values of a column. */
private final ValueVector valueVector;
/** The current count of elements written. */
private int count = 0;
public ArrowFieldWriter(ValueVector valueVector) {
this.valueVector = Preconditions.checkNotNull(valueVector);
}
/** Returns the underlying container which stores the sequence of values of a column. */
public ValueVector getValueVector() {
return valueVector;
}
/** Returns the current count of elements written. */
public int getCount() {
return count;
}
/** Sets the field value as the field at the specified ordinal of the specified row. */
public abstract void doWrite(IN row, int ordinal);
/** Writes the specified ordinal of the specified row. */
public void write(IN row, int ordinal) {
doWrite(row, ordinal);
count += 1;
}
/** Finishes the writing of the current row batch. */
public void finish() {
valueVector.setValueCount(count);
}
/** Resets the state of the writer to write the next batch of fields. */
public void reset() {
valueVector.reset();
count = 0;
}
}
|
ArrowFieldWriter
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/exception/DataException.java
|
{
"start": 425,
"end": 887
}
|
class ____ extends JDBCException {
/**
* Constructor for DataException.
*
* @param root The underlying exception.
*/
public DataException(String message, SQLException root) {
super( message, root );
}
/**
* Constructor for DataException.
*
* @param message Optional message.
* @param root The underlying exception.
*/
public DataException(String message, SQLException root, String sql) {
super( message, root, sql );
}
}
|
DataException
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java
|
{
"start": 1755,
"end": 11062
}
|
class ____ {
/**
* Autodetect API native program name - always loaded from the same directory as the controller process
*/
public static final String AUTODETECT = "autodetect";
static final String AUTODETECT_PATH = "./" + AUTODETECT;
/*
* Arguments used by both autodetect and normalize
*/
public static final String DELETE_STATE_FILES_ARG = "--deleteStateFiles";
public static final String LENGTH_ENCODED_INPUT_ARG = "--lengthEncodedInput";
public static final String MODEL_CONFIG_ARG = "--modelconfig=";
public static final String QUANTILES_STATE_PATH_ARG = "--quantilesState=";
public static final String LICENSE_KEY_VALIDATED_ARG = "--validElasticLicenseKeyConfirmed=";
private static final String JSON_EXTENSION = ".json";
private static final String CONFIG_ARG = "--config=";
private static final String EVENTS_CONFIG_ARG = "--eventsconfig=";
private static final String FILTERS_CONFIG_ARG = "--filtersconfig=";
/**
* Name of the config setting containing the path to the logs directory
*/
private static final int DEFAULT_MAX_NUM_RECORDS = 500;
/**
* The maximum number of anomaly records that will be written each bucket
*/
// Though this setting is dynamic, it is only set when a new job is opened. So, already running jobs will not get the updated value.
public static final Setting<Integer> MAX_ANOMALY_RECORDS_SETTING_DYNAMIC = Setting.intSetting(
"xpack.ml.max_anomaly_records",
DEFAULT_MAX_NUM_RECORDS,
Setting.Property.NodeScope,
Setting.Property.Dynamic
);
/**
* Persisted quantiles are written to disk so they can be read by
* the autodetect program. All quantiles files have this extension.
*/
private static final String QUANTILES_FILE_EXTENSION = ".json";
private final Job job;
private final List<Path> filesToDelete;
private final Logger logger;
private final Environment env;
private final Settings settings;
private final NativeController controller;
private final ProcessPipes processPipes;
private Set<MlFilter> referencedFilters;
private List<ScheduledEvent> scheduledEvents;
private Quantiles quantiles;
/**
* Constructs an autodetect process builder
*
* @param job The job configuration
* @param filesToDelete This method will append File objects that need to be
* deleted when the process completes
* @param logger The job's logger
*/
public AutodetectBuilder(
Job job,
List<Path> filesToDelete,
Logger logger,
Environment env,
Settings settings,
NativeController controller,
ProcessPipes processPipes
) {
this.env = env;
this.settings = settings;
this.controller = controller;
this.processPipes = processPipes;
this.job = Objects.requireNonNull(job);
this.filesToDelete = Objects.requireNonNull(filesToDelete);
this.logger = Objects.requireNonNull(logger);
referencedFilters = new HashSet<>();
scheduledEvents = Collections.emptyList();
}
public AutodetectBuilder referencedFilters(Set<MlFilter> filters) {
referencedFilters = filters;
return this;
}
/**
* Set quantiles to restore the normalizer state if any.
*
* @param quantiles the quantiles
*/
public AutodetectBuilder quantiles(Quantiles quantiles) {
this.quantiles = quantiles;
return this;
}
public AutodetectBuilder scheduledEvents(List<ScheduledEvent> scheduledEvents) {
this.scheduledEvents = scheduledEvents;
return this;
}
/**
* Requests that the controller daemon start an autodetect process.
*/
public void build() throws IOException, InterruptedException {
List<String> command = buildAutodetectCommand();
buildFiltersConfig(command);
buildScheduledEventsConfig(command);
buildJobConfig(command);
buildQuantiles(command);
processPipes.addArgs(command);
controller.startProcess(command);
}
/**
* Visible for testing
*/
List<String> buildAutodetectCommand() {
List<String> command = new ArrayList<>();
command.add(AUTODETECT_PATH);
// Input is always length encoded
command.add(LENGTH_ENCODED_INPUT_ARG);
// Limit the number of output records
command.add(maxAnomalyRecordsArg(settings));
if (ProcessBuilderUtils.modelConfigFilePresent(env)) {
String modelConfigFile = XPackPlugin.resolveConfigFile(env, ProcessBuilderUtils.ML_MODEL_CONF).toString();
command.add(MODEL_CONFIG_ARG + modelConfigFile);
}
// License has been created by the open job action
command.add(LICENSE_KEY_VALIDATED_ARG + true);
return command;
}
static String maxAnomalyRecordsArg(Settings settings) {
return "--maxAnomalyRecords=" + MAX_ANOMALY_RECORDS_SETTING_DYNAMIC.get(settings);
}
private void buildQuantiles(List<String> command) throws IOException {
if (quantiles != null && quantiles.getQuantileState().isEmpty() == false) {
logger.info("Restoring quantiles for job '" + job.getId() + "'");
Path normalizersStateFilePath = writeNormalizerInitState(job.getId(), quantiles.getQuantileState(), env);
String quantilesStateFileArg = QUANTILES_STATE_PATH_ARG + normalizersStateFilePath;
command.add(quantilesStateFileArg);
command.add(DELETE_STATE_FILES_ARG);
}
}
/**
* Write the normalizer init state to file.
*/
public static Path writeNormalizerInitState(String jobId, String state, Environment env) throws IOException {
// createTempFile has a race condition where it may return the same
// temporary file name to different threads if called simultaneously
// from multiple threads, hence add the thread ID to avoid this
FileUtils.recreateTempDirectoryIfNeeded(env.tmpDir());
Path stateFile = Files.createTempFile(
env.tmpDir(),
jobId + "_quantiles_" + Thread.currentThread().getId(),
QUANTILES_FILE_EXTENSION
);
try (BufferedWriter osw = Files.newBufferedWriter(stateFile, StandardCharsets.UTF_8)) {
osw.write(state);
}
return stateFile;
}
private void buildScheduledEventsConfig(List<String> command) throws IOException {
if (scheduledEvents.isEmpty()) {
return;
}
FileUtils.recreateTempDirectoryIfNeeded(env.tmpDir());
Path eventsConfigFile = Files.createTempFile(env.tmpDir(), "eventsConfig", JSON_EXTENSION);
filesToDelete.add(eventsConfigFile);
List<ScheduledEventToRuleWriter> scheduledEventToRuleWriters = scheduledEvents.stream()
.map(x -> new ScheduledEventToRuleWriter(x.getDescription(), x.toDetectionRule(job.getAnalysisConfig().getBucketSpan())))
.collect(Collectors.toList());
try (
OutputStreamWriter osw = new OutputStreamWriter(Files.newOutputStream(eventsConfigFile), StandardCharsets.UTF_8);
XContentBuilder jsonBuilder = JsonXContent.contentBuilder()
) {
osw.write(
Strings.toString(
jsonBuilder.startObject()
.field(ScheduledEvent.RESULTS_FIELD.getPreferredName(), scheduledEventToRuleWriters)
.endObject()
)
);
}
command.add(EVENTS_CONFIG_ARG + eventsConfigFile.toString());
}
private void buildJobConfig(List<String> command) throws IOException {
FileUtils.recreateTempDirectoryIfNeeded(env.tmpDir());
Path configFile = Files.createTempFile(env.tmpDir(), "config", JSON_EXTENSION);
filesToDelete.add(configFile);
try (
OutputStreamWriter osw = new OutputStreamWriter(Files.newOutputStream(configFile), StandardCharsets.UTF_8);
XContentBuilder jsonBuilder = JsonXContent.contentBuilder()
) {
job.toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS);
osw.write(Strings.toString(jsonBuilder));
}
command.add(CONFIG_ARG + configFile.toString());
}
private void buildFiltersConfig(List<String> command) throws IOException {
if (referencedFilters.isEmpty()) {
return;
}
FileUtils.recreateTempDirectoryIfNeeded(env.tmpDir());
Path filtersConfigFile = Files.createTempFile(env.tmpDir(), "filtersConfig", JSON_EXTENSION);
filesToDelete.add(filtersConfigFile);
try (
OutputStreamWriter osw = new OutputStreamWriter(Files.newOutputStream(filtersConfigFile), StandardCharsets.UTF_8);
XContentBuilder jsonBuilder = JsonXContent.contentBuilder()
) {
osw.write(
Strings.toString(jsonBuilder.startObject().field(MlFilter.RESULTS_FIELD.getPreferredName(), referencedFilters).endObject())
);
}
command.add(FILTERS_CONFIG_ARG + filtersConfigFile.toString());
}
}
|
AutodetectBuilder
|
java
|
mybatis__mybatis-3
|
src/main/java/org/apache/ibatis/mapping/MappedStatement.java
|
{
"start": 1262,
"end": 2187
}
|
class ____ {
private String resource;
private Configuration configuration;
private String id;
private Integer fetchSize;
private Integer timeout;
private StatementType statementType;
private ResultSetType resultSetType;
private SqlSource sqlSource;
private Cache cache;
private ParameterMap parameterMap;
private List<ResultMap> resultMaps;
private boolean flushCacheRequired;
private boolean useCache;
private boolean resultOrdered;
private SqlCommandType sqlCommandType;
private KeyGenerator keyGenerator;
private String[] keyProperties;
private String[] keyColumns;
private boolean hasNestedResultMaps;
private String databaseId;
private Log statementLog;
private LanguageDriver lang;
private String[] resultSets;
private ParamNameResolver paramNameResolver;
private boolean dirtySelect;
MappedStatement() {
// constructor disabled
}
public static
|
MappedStatement
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/dialect/function/StringFunction.java
|
{
"start": 2966,
"end": 3118
}
|
enum ____ passed to 'string()' function: " + argumentType );
}
}
@Override
public String getArgumentListSignature() {
return "(ENUM arg)";
}
}
|
type
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.