language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/aggregator/SplitAggregateParallelProcessingStackOverflowIssueTest.java | {
"start": 1223,
"end": 2558
} | class ____ extends ContextTestSupport {
@Test
public void testStackoverflow() throws Exception {
int size = 50000;
MockEndpoint result = getMockEndpoint("mock:result");
result.expectedMessageCount(size / 10);
StringBuilder sb = new StringBuilder();
for (int i = 0; i < size; i++) {
sb.append("Line #").append(i);
sb.append("\n");
}
template.sendBody("direct:start", sb);
MockEndpoint.assertIsSatisfied(60, SECONDS, result);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.split().tokenize("\n").streaming().parallelProcessing()
.aggregate(constant("foo"), new GroupedBodyAggregationStrategy())
.completeAllOnStop()
.eagerCheckCompletion()
.completionSize(10)
.completionTimeout(SECONDS.toMillis(5))
.completionPredicate(exchangeProperty(SPLIT_COMPLETE))
.to("log:result?groupSize=100", "mock:result");
}
};
}
}
| SplitAggregateParallelProcessingStackOverflowIssueTest |
java | apache__camel | test-infra/camel-test-infra-consul/src/main/java/org/apache/camel/test/infra/consul/services/ConsulInfraService.java | {
"start": 980,
"end": 1108
} | interface ____ extends InfrastructureService {
String getConsulUrl();
String host();
int port();
}
| ConsulInfraService |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/bind/support/WebArgumentResolver.java | {
"start": 1121,
"end": 1658
} | class ____ implements WebArgumentResolver {
*
* public Object resolveArgument(MethodParameter methodParameter, NativeWebRequest webRequest) {
* if (methodParameter.getParameterType().equals(MySpecialArg.class)) {
* return new MySpecialArg("myValue");
* }
* return UNRESOLVED;
* }
* }</pre>
*
* @author Juergen Hoeller
* @since 2.5.2
* @see org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter#setCustomArgumentResolvers
*/
@FunctionalInterface
public | MySpecialArgumentResolver |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvtVO/alipay/PlatformDepartmentVO.java | {
"start": 150,
"end": 6311
} | class ____ {
@JSONField(ordinal=1)
private String id ;
@JSONField(ordinal=2)
private String label ;
@JSONField(ordinal=3)
private String value;
@JSONField(ordinal=4)
private String parentId;
@JSONField(ordinal=5)
private String parentLabel;
@JSONField(ordinal=6)
private String companyId;
@JSONField(ordinal=7)
private String departCode;
@JSONField(ordinal=8)
private String memo;
@JSONField(ordinal=9)
private String departOrgCode;
@JSONField(ordinal=10)
private String contact;
@JSONField(ordinal=11)
private String mobile;
@JSONField(ordinal=12)
private String departType;
@JSONField(serialize=false)
private String ipId;
@JSONField(serialize=false)
private String ipRoleId;
@JSONField(serialize=false)
private PlatformDepartmentVO parent;
@JSONField(ordinal=6,name="ChildNodes")
private List<PlatformDepartmentVO> childNodes =new ArrayList<PlatformDepartmentVO>();
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getLabel() {
return label;
}
public void setLabel(String label) {
this.label = label;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
public String getParentId() {
return parentId;
}
public void setParentId(String parentId) {
this.parentId = parentId;
}
public String getCompanyId() {
return companyId;
}
public void setCompanyId(String companyId) {
this.companyId = companyId;
}
public String getDepartCode() {
return departCode;
}
public void setDepartCode(String departCode) {
this.departCode = departCode;
}
public String getMemo() {
return memo;
}
public void setMemo(String memo) {
this.memo = memo;
}
public PlatformDepartmentVO getParent() {
return parent;
}
public void setParent(PlatformDepartmentVO parent) {
this.parent = parent;
}
public List<PlatformDepartmentVO> getChildNodes() {
return childNodes;
}
public void setChildNodes(List<PlatformDepartmentVO> childNodes) {
this.childNodes = childNodes;
}
/**
* Getter method for property <tt>departType</tt>.
*
* @return property value of departType
*/
public String getDepartType() {
return departType;
}
/**
* Setter method for property <tt>departType</tt>.
*
* @param departType value to be assigned to property departType
*/
public void setDepartType(String departType) {
this.departType = departType;
}
/**
* Getter method for property <tt>parentLabel</tt>.
*
* @return property value of parentLabel
*/
public String getParentLabel() {
return parentLabel;
}
/**
* Setter method for property <tt>parentLabel</tt>.
*
* @param parentLabel value to be assigned to property parentLabel
*/
public void setParentLabel(String parentLabel) {
this.parentLabel = parentLabel;
}
/**
* Getter method for property <tt>departOrgCode</tt>.
*
* @return property value of departOrgCode
*/
public String getDepartOrgCode() {
return departOrgCode;
}
/**
* Setter method for property <tt>departOrgCode</tt>.
*
* @param departOrgCode value to be assigned to property departOrgCode
*/
public void setDepartOrgCode(String departOrgCode) {
this.departOrgCode = departOrgCode;
}
/**
* Getter method for property <tt>contact</tt>.
*
* @return property value of contact
*/
public String getContact() {
return contact;
}
/**
* Setter method for property <tt>contact</tt>.
*
* @param contact value to be assigned to property contact
*/
public void setContact(String contact) {
this.contact = contact;
}
/**
* Getter method for property <tt>mobile</tt>.
*
* @return property value of mobile
*/
public String getMobile() {
return mobile;
}
/**
* Setter method for property <tt>mobile</tt>.
*
* @param mobile value to be assigned to property mobile
*/
public void setMobile(String mobile) {
this.mobile = mobile;
}
/**
* Getter method for property <tt>ipRoleId</tt>.
*
* @return property value of ipRoleId
*/
public String getIpRoleId() {
return ipRoleId;
}
/**
* Setter method for property <tt>ipRoleId</tt>.
*
* @param ipRoleId value to be assigned to property ipRoleId
*/
public void setIpRoleId(String ipRoleId) {
this.ipRoleId = ipRoleId;
}
/**
* Getter method for property <tt>ipId</tt>.
*
* @return property value of ipId
*/
public String getIpId() {
return ipId;
}
/**
* Setter method for property <tt>ipId</tt>.
*
* @param ipId value to be assigned to property ipId
*/
public void setIpId(String ipId) {
this.ipId = ipId;
}
public PlatformDepartmentVO() {
}
// public PlatformDepartmentVO(String id, String label, String value, String parentId,
// String companyId) {
// this.id = id;
// this.label = label;
// this.value = value;
// this.parentId = parentId;
// this.companyId = companyId;
// }
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
if(null==this.getId()){
return false;
}
final PlatformDepartmentVO other = (PlatformDepartmentVO) obj;
if(!this.getId().equals(other.getId())) {
return false;
}
return true;
}
} | PlatformDepartmentVO |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/mapper_extend/ParentMapper.java | {
"start": 755,
"end": 901
} | interface ____ extends GrandpaMapper {
User getUserXML();
@Select("select * from users where id = 1")
User getUserAnnotated();
}
| ParentMapper |
java | apache__avro | lang/java/perf/src/main/java/org/apache/avro/perf/test/reflect/ReflectIntArrayTest.java | {
"start": 2401,
"end": 3341
} | class ____ extends BasicArrayState {
private final Schema schema;
private int[][] testData;
private Encoder encoder;
private ReflectDatumWriter<int[]> datumWriter;
public TestStateEncode() {
super(ARRAY_SIZE);
final String jsonText = ReflectData.get().getSchema(int[].class).toString();
this.schema = new Schema.Parser().parse(jsonText);
}
/**
* Setup the trial data.
*
* @throws IOException Could not setup test data
*/
@Setup(Level.Trial)
public void doSetupTrial() throws Exception {
this.encoder = super.newEncoder(false, getNullOutputStream());
this.datumWriter = new ReflectDatumWriter<>(schema);
this.testData = new int[getBatchSize()][];
for (int i = 0; i < testData.length; i++) {
this.testData[i] = populateDoubleArray(getRandom(), getArraySize());
}
}
}
@State(Scope.Thread)
public static | TestStateEncode |
java | spring-projects__spring-framework | spring-aop/src/test/java/org/springframework/aop/support/ComposablePointcutTests.java | {
"start": 3110,
"end": 6238
} | class
____ pc = new ComposablePointcut(ClassFilter.TRUE, GET_AGE_METHOD_MATCHER);
assertThat(Pointcuts.matches(pc, PointcutsTests.TEST_BEAN_ABSQUATULATE, TestBean.class)).isFalse();
assertThat(Pointcuts.matches(pc, PointcutsTests.TEST_BEAN_GET_AGE, TestBean.class)).isTrue();
assertThat(Pointcuts.matches(pc, PointcutsTests.TEST_BEAN_GET_NAME, TestBean.class)).isFalse();
pc.union(GETTER_METHOD_MATCHER);
// Should now match all getter methods
assertThat(Pointcuts.matches(pc, PointcutsTests.TEST_BEAN_ABSQUATULATE, TestBean.class)).isFalse();
assertThat(Pointcuts.matches(pc, PointcutsTests.TEST_BEAN_GET_AGE, TestBean.class)).isTrue();
assertThat(Pointcuts.matches(pc, PointcutsTests.TEST_BEAN_GET_NAME, TestBean.class)).isTrue();
pc.union(ABSQUATULATE_METHOD_MATCHER);
// Should now match absquatulate() as well
assertThat(Pointcuts.matches(pc, PointcutsTests.TEST_BEAN_ABSQUATULATE, TestBean.class)).isTrue();
assertThat(Pointcuts.matches(pc, PointcutsTests.TEST_BEAN_GET_AGE, TestBean.class)).isTrue();
assertThat(Pointcuts.matches(pc, PointcutsTests.TEST_BEAN_GET_NAME, TestBean.class)).isTrue();
// But it doesn't match everything
assertThat(Pointcuts.matches(pc, PointcutsTests.TEST_BEAN_SET_AGE, TestBean.class)).isFalse();
}
@Test
void testIntersectionMethodMatcher() {
ComposablePointcut pc = new ComposablePointcut();
assertThat(pc.getMethodMatcher().matches(PointcutsTests.TEST_BEAN_ABSQUATULATE, TestBean.class)).isTrue();
assertThat(pc.getMethodMatcher().matches(PointcutsTests.TEST_BEAN_GET_AGE, TestBean.class)).isTrue();
assertThat(pc.getMethodMatcher().matches(PointcutsTests.TEST_BEAN_GET_NAME, TestBean.class)).isTrue();
pc.intersection(GETTER_METHOD_MATCHER);
assertThat(pc.getMethodMatcher().matches(PointcutsTests.TEST_BEAN_ABSQUATULATE, TestBean.class)).isFalse();
assertThat(pc.getMethodMatcher().matches(PointcutsTests.TEST_BEAN_GET_AGE, TestBean.class)).isTrue();
assertThat(pc.getMethodMatcher().matches(PointcutsTests.TEST_BEAN_GET_NAME, TestBean.class)).isTrue();
pc.intersection(GET_AGE_METHOD_MATCHER);
// Use the Pointcuts matches method
assertThat(Pointcuts.matches(pc, PointcutsTests.TEST_BEAN_ABSQUATULATE, TestBean.class)).isFalse();
assertThat(Pointcuts.matches(pc, PointcutsTests.TEST_BEAN_GET_AGE, TestBean.class)).isTrue();
assertThat(Pointcuts.matches(pc, PointcutsTests.TEST_BEAN_GET_NAME, TestBean.class)).isFalse();
}
@Test
void testEqualsAndHashCode() {
ComposablePointcut pc1 = new ComposablePointcut();
ComposablePointcut pc2 = new ComposablePointcut();
assertThat(pc2).isEqualTo(pc1);
assertThat(pc2.hashCode()).isEqualTo(pc1.hashCode());
pc1.intersection(GETTER_METHOD_MATCHER);
assertThat(pc1).isNotEqualTo(pc2);
assertThat(pc1.hashCode()).isNotEqualTo(pc2.hashCode());
pc2.intersection(GETTER_METHOD_MATCHER);
assertThat(pc2).isEqualTo(pc1);
assertThat(pc2.hashCode()).isEqualTo(pc1.hashCode());
pc1.union(GET_AGE_METHOD_MATCHER);
pc2.union(GET_AGE_METHOD_MATCHER);
assertThat(pc2).isEqualTo(pc1);
assertThat(pc2.hashCode()).isEqualTo(pc1.hashCode());
}
}
| ComposablePointcut |
java | dropwizard__dropwizard | dropwizard-jetty/src/test/java/io/dropwizard/jetty/ContextRoutingHandlerTest.java | {
"start": 660,
"end": 2659
} | class ____ {
@Mock
private Request request;
@Mock
private Response response;
@Mock
private Callback callback;
@Mock
private Handler handler1;
@Mock
private Handler handler2;
private ContextRoutingHandler handler;
@BeforeEach
void setUp() {
this.handler = new ContextRoutingHandler(Map.of(
"/", handler1,
"/admin", handler2
));
}
@Test
void routesToTheBestPrefixMatch() throws Exception {
HttpURI httpURI = mock(HttpURI.class);
when(httpURI.getPath()).thenReturn("/hello-world");
when(request.getHttpURI()).thenReturn(httpURI);
handler.handle(request, response, callback);
verify(handler1).handle(request, response, callback);
verify(handler2, never()).handle(request, response, callback);
}
@Test
void routesToTheLongestPrefixMatch() throws Exception {
HttpURI httpURI = mock(HttpURI.class);
when(httpURI.getPath()).thenReturn("/admin/woo");
when(request.getHttpURI()).thenReturn(httpURI);
handler.handle(request, response, callback);
verify(handler1, never()).handle(request, response, callback);
verify(handler2).handle(request, response, callback);
}
@Test
void passesHandlingNonMatchingRequests() throws Exception {
HttpURI httpURI = mock(HttpURI.class);
when(httpURI.getPath()).thenReturn("WAT");
when(request.getHttpURI()).thenReturn(httpURI);
handler.handle(request, response, callback);
verify(handler1, never()).handle(request, response, callback);
verify(handler2, never()).handle(request, response, callback);
}
@Test
void startsAndStopsAllHandlers() throws Exception {
handler.start();
verify(handler1).start();
verify(handler2).start();
handler.stop();
verify(handler1).stop();
verify(handler2).stop();
}
}
| ContextRoutingHandlerTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java | {
"start": 1849,
"end": 4733
} | class ____<E extends Schedulable>
extends AbstractQueue<E> implements BlockingQueue<E> {
public static final Logger LOG =
LoggerFactory.getLogger(CallQueueManager.class);
// Number of checkpoints for empty queue.
private static final int CHECKPOINT_NUM = 20;
// Interval to check empty queue.
private static final long CHECKPOINT_INTERVAL_MS = 10;
@SuppressWarnings("unchecked")
static <E> Class<? extends BlockingQueue<E>> convertQueueClass(
Class<?> queueClass, Class<E> elementClass) {
return (Class<? extends BlockingQueue<E>>)queueClass;
}
@SuppressWarnings("unchecked")
static Class<? extends RpcScheduler> convertSchedulerClass(
Class<?> schedulerClass) {
return (Class<? extends RpcScheduler>)schedulerClass;
}
private volatile boolean clientBackOffEnabled;
private volatile boolean serverFailOverEnabled;
// Atomic refs point to active callQueue
// We have two so we can better control swapping
private final AtomicReference<BlockingQueue<E>> putRef;
private final AtomicReference<BlockingQueue<E>> takeRef;
private RpcScheduler scheduler;
public CallQueueManager(Class<? extends BlockingQueue<E>> backingClass,
Class<? extends RpcScheduler> schedulerClass,
boolean clientBackOffEnabled, int maxQueueSize, String namespace,
Configuration conf) {
int priorityLevels = parseNumLevels(namespace, conf);
this.scheduler = createScheduler(schedulerClass, priorityLevels,
namespace, conf);
int[] capacityWeights = parseCapacityWeights(priorityLevels,
namespace, conf);
this.serverFailOverEnabled = getServerFailOverEnable(namespace, conf);
BlockingQueue<E> bq = createCallQueueInstance(backingClass,
priorityLevels, maxQueueSize, namespace, capacityWeights, conf);
this.clientBackOffEnabled = clientBackOffEnabled;
this.putRef = new AtomicReference<BlockingQueue<E>>(bq);
this.takeRef = new AtomicReference<BlockingQueue<E>>(bq);
LOG.info("Using callQueue: {}, queueCapacity: {}, " +
"scheduler: {}, ipcBackoff: {}, ipcFailOver: {}.",
backingClass, maxQueueSize, schedulerClass, clientBackOffEnabled, serverFailOverEnabled);
}
@VisibleForTesting // only!
CallQueueManager(BlockingQueue<E> queue, RpcScheduler scheduler,
boolean clientBackOffEnabled, boolean serverFailOverEnabled) {
this.putRef = new AtomicReference<BlockingQueue<E>>(queue);
this.takeRef = new AtomicReference<BlockingQueue<E>>(queue);
this.scheduler = scheduler;
this.clientBackOffEnabled = clientBackOffEnabled;
this.serverFailOverEnabled = serverFailOverEnabled;
}
/**
* Return boolean value configured by property 'ipc.<port>.callqueue.overflow.trigger.failover'
* if it is present. If the config is not present, default config
* (without port) is used to derive | CallQueueManager |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldMapper.java | {
"start": 1580,
"end": 2261
} | class ____ extends TermBasedFieldType {
public FakeFieldType(String name) {
super(name, IndexType.terms(true, false), false, TextSearchInfo.SIMPLE_MATCH_ONLY, Collections.emptyMap());
}
@Override
public String typeName() {
return "faketype";
}
@Override
public ValueFetcher valueFetcher(SearchExecutionContext context, String format) {
throw new UnsupportedOperationException();
}
}
@Override
protected String contentType() {
return null;
}
@Override
protected void parseCreateField(DocumentParserContext context) {}
public static | FakeFieldType |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/TestInputFile.java | {
"start": 1879,
"end": 2715
} | class ____ {
private static final int DefaultMinNum = 1;
private static final int DefaultMaxNum = 64;
public int minBytesNum;
public int maxBytesNum;
public KVSizeScope() {
this.minBytesNum = DefaultMinNum;
this.maxBytesNum = DefaultMaxNum;
}
public KVSizeScope(int min, int max) {
this.minBytesNum = min;
this.maxBytesNum = max;
}
}
private static HashMap<String, KVSizeScope> map = new HashMap<String, KVSizeScope>();
private byte[] databuf = null;
private final String keyClsName, valueClsName;
private int filesize = 0;
private int keyMaxBytesNum, keyMinBytesNum;
private int valueMaxBytesNum, valueMinBytesNum;
private SequenceFile.Writer writer = null;
Random r = new Random();
public static final int DATABUFSIZE = 1 << 22; // 4M
private | KVSizeScope |
java | elastic__elasticsearch | plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2KeystoreCredentialsIT.java | {
"start": 976,
"end": 2519
} | class ____ extends DiscoveryEc2ClusterFormationTestCase {
private static final String PREFIX = getIdentifierPrefix("DiscoveryEc2KeystoreCredentialsIT");
private static final String ACCESS_KEY = PREFIX + "-access-key";
private static final Supplier<String> regionSupplier = new DynamicRegionSupplier();
private static final AwsEc2HttpFixture ec2ApiFixture = new AwsEc2HttpFixture(
fixedAccessKey(ACCESS_KEY, regionSupplier, "ec2"),
DiscoveryEc2KeystoreCredentialsIT::getAvailableTransportEndpoints
);
private static final ElasticsearchCluster cluster = ElasticsearchCluster.local()
.nodes(2)
.plugin("discovery-ec2")
.setting(DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), Ec2DiscoveryPlugin.EC2_SEED_HOSTS_PROVIDER_NAME)
.setting("logger." + AwsEc2SeedHostsProvider.class.getCanonicalName(), "DEBUG")
.setting(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), ec2ApiFixture::getAddress)
.environment("AWS_REGION", regionSupplier)
.keystore("discovery.ec2.access_key", ACCESS_KEY)
.keystore("discovery.ec2.secret_key", ESTestCase::randomSecretKey)
.build();
private static List<String> getAvailableTransportEndpoints() {
return cluster.getAvailableTransportEndpoints();
}
@ClassRule
public static TestRule ruleChain = RuleChain.outerRule(ec2ApiFixture).around(cluster);
@Override
protected ElasticsearchCluster getCluster() {
return cluster;
}
}
| DiscoveryEc2KeystoreCredentialsIT |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/GuardedByCheckerTest.java | {
"start": 1027,
"end": 1501
} | class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(GuardedByChecker.class, getClass());
@Test
public void locked() {
compilationHelper
.addSourceLines(
"threadsafety/Test.java",
"""
package threadsafety;
import com.google.errorprone.annotations.concurrent.GuardedBy;
import java.util.concurrent.locks.Lock;
| GuardedByCheckerTest |
java | quarkusio__quarkus | extensions/oidc-common/runtime/src/main/java/io/quarkus/oidc/common/runtime/config/OidcClientCommonConfig.java | {
"start": 3063,
"end": 5116
} | enum ____ {
/**
* `client_secret_basic` (default): The client id and secret are submitted with the HTTP Authorization Basic
* scheme.
*/
BASIC,
/**
* `client_secret_post`: The client id and secret are submitted as the `client_id` and `client_secret`
* form parameters.
*/
POST,
/**
* `client_secret_jwt`: The client id and generated JWT secret are submitted as the `client_id` and
* `client_secret`
* form parameters.
*/
POST_JWT,
/**
* client id and secret are submitted as HTTP query parameters. This option is only supported by the OIDC
* extension.
*/
QUERY
}
/**
* The client secret value. This value is ignored if `credentials.secret` is set.
* Must be set unless a secret is set in {@link #clientSecret} or {@link #jwt} client authentication is required.
*/
Optional<String> value();
/**
* The Secret CredentialsProvider.
*/
Provider provider();
/**
* The authentication method.
* If the `clientSecret.value` secret is set, this method is `basic` by default.
*/
Optional<Method> method();
}
/**
* Supports the client authentication `client_secret_jwt` and `private_key_jwt` methods, which involves sending a JWT
* token assertion signed with a client secret or private key.
* JWT Bearer client authentication is also supported.
*
* @see <a href=
* "https://openid.net/specs/openid-connect-core-1_0.html#ClientAuthentication">https://openid.net/specs/openid-connect-core-1_0.html#ClientAuthentication</a>
*/
| Method |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/jmx/export/CustomDateEditorRegistrar.java | {
"start": 951,
"end": 1217
} | class ____ implements PropertyEditorRegistrar {
@Override
public void registerCustomEditors(PropertyEditorRegistry registry) {
registry.registerCustomEditor(Date.class, new CustomDateEditor(new SimpleDateFormat("yyyy/MM/dd"), true));
}
}
| CustomDateEditorRegistrar |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/GenericTypeResolverTests.java | {
"start": 10564,
"end": 10641
} | class ____ implements MyInterfaceType<String> {
}
public | MySimpleInterfaceType |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/cglib/proxy/Mixin.java | {
"start": 7629,
"end": 8701
} | class ____
{
private Class[] classes;
private int[] route;
Route(Object[] delegates) {
Map map = new HashMap();
ArrayList collect = new ArrayList();
for (int i = 0; i < delegates.length; i++) {
Class delegate = delegates[i].getClass();
collect.clear();
ReflectUtils.addAllInterfaces(delegate, collect);
for (Iterator it = collect.iterator(); it.hasNext();) {
Class iface = (Class)it.next();
if (!map.containsKey(iface)) {
map.put(iface, i);
}
}
}
classes = new Class[map.size()];
route = new int[map.size()];
int index = 0;
for (Iterator it = map.keySet().iterator(); it.hasNext();) {
Class key = (Class)it.next();
classes[index] = key;
route[index] = ((Integer)map.get(key));
index++;
}
}
}
}
| Route |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/onexception/OnExceptionGlobalAndDLCErrorHandlerIssueTest.java | {
"start": 985,
"end": 2620
} | class ____ extends ContextTestSupport {
@Test
public void testNoOnGlobalException() throws Exception {
getMockEndpoint("mock:foo").expectedMessageCount(1);
getMockEndpoint("mock:dead").expectedMessageCount(0);
getMockEndpoint("mock:global").expectedMessageCount(1);
getMockEndpoint("mock:local").expectedMessageCount(0);
template.sendBody("direct:foo", "Hello World");
assertMockEndpointsSatisfied();
}
@Test
public void testOnRouteException() throws Exception {
getMockEndpoint("mock:bar").expectedMessageCount(1);
getMockEndpoint("mock:dead").expectedMessageCount(0);
getMockEndpoint("mock:global").expectedMessageCount(0);
getMockEndpoint("mock:local").expectedMessageCount(1);
template.sendBody("direct:bar", "Hello World");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
errorHandler(deadLetterChannel("mock:dead"));
onException(Exception.class).handled(true).to("mock:global");
from("direct:bar").routeId("bar").onException(IllegalArgumentException.class).handled(true).to("mock:local")
.end().to("mock:bar")
.throwException(new IllegalArgumentException("Damn"));
from("direct:foo").routeId("foo").to("mock:foo").throwException(new IllegalArgumentException("Damn"));
}
};
}
}
| OnExceptionGlobalAndDLCErrorHandlerIssueTest |
java | quarkusio__quarkus | extensions/amazon-lambda/deployment/src/test/java/io/quarkus/amazon/lambda/deployment/RequestHandlerJandexUtilTest.java | {
"start": 23379,
"end": 23550
} | class ____ implements NestedDefaultInterface {
// Uses the nested default method
}
// Collection test handler classes
public static | NestedInterfaceHandler |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/util/StringUtf8Utils.java | {
"start": 1437,
"end": 13211
} | class ____ {
private static final int MAX_BYTES_PER_CHAR = 3;
/** This method must have the same result with JDK's String.getBytes. */
public static byte[] encodeUTF8(String str) {
byte[] bytes = allocateReuseBytes(str.length() * MAX_BYTES_PER_CHAR);
int len = encodeUTF8(str, bytes);
return Arrays.copyOf(bytes, len);
}
public static int encodeUTF8(String str, byte[] bytes) {
int offset = 0;
int len = str.length();
int sl = offset + len;
int dp = 0;
int dlASCII = dp + Math.min(len, bytes.length);
// ASCII only optimized loop
while (dp < dlASCII && str.charAt(offset) < '\u0080') {
bytes[dp++] = (byte) str.charAt(offset++);
}
while (offset < sl) {
char c = str.charAt(offset++);
if (c < 0x80) {
// Have at most seven bits
bytes[dp++] = (byte) c;
} else if (c < 0x800) {
// 2 bytes, 11 bits
bytes[dp++] = (byte) (0xc0 | (c >> 6));
bytes[dp++] = (byte) (0x80 | (c & 0x3f));
} else if (Character.isSurrogate(c)) {
final int uc;
int ip = offset - 1;
if (Character.isHighSurrogate(c)) {
if (sl - ip < 2) {
uc = -1;
} else {
char d = str.charAt(ip + 1);
if (Character.isLowSurrogate(d)) {
uc = Character.toCodePoint(c, d);
} else {
// for some illegal character
// the jdk will ignore the origin character and cast it to '?'
// this acts the same with jdk
return defaultEncodeUTF8(str, bytes);
}
}
} else {
if (Character.isLowSurrogate(c)) {
// for some illegal character
// the jdk will ignore the origin character and cast it to '?'
// this acts the same with jdk
return defaultEncodeUTF8(str, bytes);
} else {
uc = c;
}
}
if (uc < 0) {
bytes[dp++] = (byte) '?';
} else {
bytes[dp++] = (byte) (0xf0 | ((uc >> 18)));
bytes[dp++] = (byte) (0x80 | ((uc >> 12) & 0x3f));
bytes[dp++] = (byte) (0x80 | ((uc >> 6) & 0x3f));
bytes[dp++] = (byte) (0x80 | (uc & 0x3f));
offset++; // 2 chars
}
} else {
// 3 bytes, 16 bits
bytes[dp++] = (byte) (0xe0 | ((c >> 12)));
bytes[dp++] = (byte) (0x80 | ((c >> 6) & 0x3f));
bytes[dp++] = (byte) (0x80 | (c & 0x3f));
}
}
return dp;
}
public static int defaultEncodeUTF8(String str, byte[] bytes) {
try {
byte[] buffer = str.getBytes("UTF-8");
System.arraycopy(buffer, 0, bytes, 0, buffer.length);
return buffer.length;
} catch (UnsupportedEncodingException e) {
throw new RuntimeException("encodeUTF8 error", e);
}
}
public static String decodeUTF8(byte[] input, int offset, int byteLen) {
char[] chars = allocateReuseChars(byteLen);
int len = decodeUTF8Strict(input, offset, byteLen, chars);
if (len < 0) {
return defaultDecodeUTF8(input, offset, byteLen);
}
return new String(chars, 0, len);
}
public static int decodeUTF8Strict(byte[] sa, int sp, int len, char[] da) {
final int sl = sp + len;
int dp = 0;
int dlASCII = Math.min(len, da.length);
// ASCII only optimized loop
while (dp < dlASCII && sa[sp] >= 0) {
da[dp++] = (char) sa[sp++];
}
while (sp < sl) {
int b1 = sa[sp++];
if (b1 >= 0) {
// 1 byte, 7 bits: 0xxxxxxx
da[dp++] = (char) b1;
} else if ((b1 >> 5) == -2 && (b1 & 0x1e) != 0) {
// 2 bytes, 11 bits: 110xxxxx 10xxxxxx
if (sp < sl) {
int b2 = sa[sp++];
if ((b2 & 0xc0) != 0x80) { // isNotContinuation(b2)
return -1;
} else {
da[dp++] = (char) (((b1 << 6) ^ b2) ^ (((byte) 0xC0 << 6) ^ ((byte) 0x80)));
}
continue;
}
return -1;
} else if ((b1 >> 4) == -2) {
// 3 bytes, 16 bits: 1110xxxx 10xxxxxx 10xxxxxx
if (sp + 1 < sl) {
int b2 = sa[sp++];
int b3 = sa[sp++];
if ((b1 == (byte) 0xe0 && (b2 & 0xe0) == 0x80)
|| (b2 & 0xc0) != 0x80
|| (b3 & 0xc0) != 0x80) { // isMalformed3(b1, b2, b3)
return -1;
} else {
char c =
(char)
((b1 << 12)
^ (b2 << 6)
^ (b3
^ (((byte) 0xE0 << 12)
^ ((byte) 0x80 << 6)
^ ((byte) 0x80))));
if (Character.isSurrogate(c)) {
return -1;
} else {
da[dp++] = c;
}
}
continue;
}
return -1;
} else if ((b1 >> 3) == -2) {
// 4 bytes, 21 bits: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
if (sp + 2 < sl) {
int b2 = sa[sp++];
int b3 = sa[sp++];
int b4 = sa[sp++];
int uc =
((b1 << 18)
^ (b2 << 12)
^ (b3 << 6)
^ (b4
^ (((byte) 0xF0 << 18)
^ ((byte) 0x80 << 12)
^ ((byte) 0x80 << 6)
^ ((byte) 0x80))));
// isMalformed4 and shortest form check
if (((b2 & 0xc0) != 0x80 || (b3 & 0xc0) != 0x80 || (b4 & 0xc0) != 0x80)
|| !Character.isSupplementaryCodePoint(uc)) {
return -1;
} else {
da[dp++] = Character.highSurrogate(uc);
da[dp++] = Character.lowSurrogate(uc);
}
continue;
}
return -1;
} else {
return -1;
}
}
return dp;
}
public static String decodeUTF8(MemorySegment input, int offset, int byteLen) {
char[] chars = allocateReuseChars(byteLen);
int len = decodeUTF8Strict(input, offset, byteLen, chars);
if (len < 0) {
byte[] bytes = allocateReuseBytes(byteLen);
input.get(offset, bytes, 0, byteLen);
return defaultDecodeUTF8(bytes, 0, byteLen);
}
return new String(chars, 0, len);
}
public static int decodeUTF8Strict(MemorySegment segment, int sp, int len, char[] da) {
final int sl = sp + len;
int dp = 0;
int dlASCII = Math.min(len, da.length);
// ASCII only optimized loop
while (dp < dlASCII && segment.get(sp) >= 0) {
da[dp++] = (char) segment.get(sp++);
}
while (sp < sl) {
int b1 = segment.get(sp++);
if (b1 >= 0) {
// 1 byte, 7 bits: 0xxxxxxx
da[dp++] = (char) b1;
} else if ((b1 >> 5) == -2 && (b1 & 0x1e) != 0) {
// 2 bytes, 11 bits: 110xxxxx 10xxxxxx
if (sp < sl) {
int b2 = segment.get(sp++);
if ((b2 & 0xc0) != 0x80) { // isNotContinuation(b2)
return -1;
} else {
da[dp++] = (char) (((b1 << 6) ^ b2) ^ (((byte) 0xC0 << 6) ^ ((byte) 0x80)));
}
continue;
}
return -1;
} else if ((b1 >> 4) == -2) {
// 3 bytes, 16 bits: 1110xxxx 10xxxxxx 10xxxxxx
if (sp + 1 < sl) {
int b2 = segment.get(sp++);
int b3 = segment.get(sp++);
if ((b1 == (byte) 0xe0 && (b2 & 0xe0) == 0x80)
|| (b2 & 0xc0) != 0x80
|| (b3 & 0xc0) != 0x80) { // isMalformed3(b1, b2, b3)
return -1;
} else {
char c =
(char)
((b1 << 12)
^ (b2 << 6)
^ (b3
^ (((byte) 0xE0 << 12)
^ ((byte) 0x80 << 6)
^ ((byte) 0x80))));
if (Character.isSurrogate(c)) {
return -1;
} else {
da[dp++] = c;
}
}
continue;
}
return -1;
} else if ((b1 >> 3) == -2) {
// 4 bytes, 21 bits: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
if (sp + 2 < sl) {
int b2 = segment.get(sp++);
int b3 = segment.get(sp++);
int b4 = segment.get(sp++);
int uc =
((b1 << 18)
^ (b2 << 12)
^ (b3 << 6)
^ (b4
^ (((byte) 0xF0 << 18)
^ ((byte) 0x80 << 12)
^ ((byte) 0x80 << 6)
^ ((byte) 0x80))));
// isMalformed4 and shortest form check
if (((b2 & 0xc0) != 0x80 || (b3 & 0xc0) != 0x80 || (b4 & 0xc0) != 0x80)
|| !Character.isSupplementaryCodePoint(uc)) {
return -1;
} else {
da[dp++] = Character.highSurrogate(uc);
da[dp++] = Character.lowSurrogate(uc);
}
continue;
}
return -1;
} else {
return -1;
}
}
return dp;
}
public static String defaultDecodeUTF8(byte[] bytes, int offset, int len) {
return new String(bytes, offset, len, StandardCharsets.UTF_8);
}
}
| StringUtf8Utils |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/resource/beans/spi/ManagedBeanRegistryInitiator.java | {
"start": 3504,
"end": 4424
} | class ____");
}
return CdiBeanContainerBuilder.fromBeanManagerReference( beanManager, serviceRegistry );
}
else {
if ( isCdiAvailable ) {
BEANS_MSG_LOGGER.noBeanManagerButCdiAvailable();
}
return null;
}
}
private static Object getConfiguredBeanManager(ServiceRegistry serviceRegistry) {
final Map<String, Object> settings = serviceRegistry.requireService( ConfigurationService.class ).getSettings();
final Object beanManager = settings.get( JAKARTA_CDI_BEAN_MANAGER );
return beanManager != null ? beanManager : settings.get( CDI_BEAN_MANAGER );
}
private BeanContainer interpretExplicitBeanContainer(Object explicitSetting, ServiceRegistry serviceRegistry) {
if ( explicitSetting == null ) {
return null;
}
else if ( explicitSetting instanceof BeanContainer beanContainer ) {
return beanContainer;
}
else {
// otherwise we ultimately need to resolve this to a | loader |
java | apache__camel | components/camel-debezium/camel-debezium-common/camel-debezium-common-component/src/main/java/org/apache/camel/component/debezium/DebeziumComponent.java | {
"start": 1325,
"end": 3512
} | class ____<C extends EmbeddedDebeziumConfiguration> extends DefaultComponent {
protected DebeziumComponent() {
}
protected DebeziumComponent(CamelContext context) {
super(context);
}
@Override
protected DebeziumEndpoint<C> createEndpoint(String uri, String remaining, Map<String, Object> parameters)
throws Exception {
// use a copy on the endpoint
final C configuration = (C) getConfiguration().copy();
if (ObjectHelper.isEmpty(remaining) && ObjectHelper.isEmpty(configuration.getName())) {
throw new IllegalArgumentException(
String.format("Connector name must be configured on endpoint using syntax debezium-%s:name",
configuration.getConnectorDatabaseType()));
}
// if we have name in path, we override the name in the configuration
if (!ObjectHelper.isEmpty(remaining)) {
configuration.setName(remaining);
}
DebeziumEndpoint<C> endpoint = initializeDebeziumEndpoint(uri, configuration);
setProperties(endpoint, parameters);
// extract the additional properties map
if (PropertiesHelper.hasProperties(parameters, "additionalProperties.")) {
final Map<String, Object> additionalProperties = endpoint.getConfiguration().getAdditionalProperties();
// add and overwrite additional properties from endpoint to pre-configured properties
additionalProperties.putAll(PropertiesHelper.extractProperties(parameters, "additionalProperties."));
}
// validate configurations
final ConfigurationValidation configurationValidation = configuration.validateConfiguration();
if (!configurationValidation.isValid()) {
throw new IllegalArgumentException(configurationValidation.getReason());
}
return endpoint;
}
protected abstract DebeziumEndpoint<C> initializeDebeziumEndpoint(String uri, C configuration);
public abstract C getConfiguration();
@Metadata(description = "Component configuration")
public abstract void setConfiguration(C configuration);
}
| DebeziumComponent |
java | reactor__reactor-core | reactor-test/src/main/java/reactor/test/subscriber/DefaultConditionalTestSubscriber.java | {
"start": 858,
"end": 2130
} | class ____<T> extends DefaultTestSubscriber<T>
implements ConditionalTestSubscriber<T> {
final Predicate<? super T> tryOnNextPredicate;
DefaultConditionalTestSubscriber(TestSubscriberBuilder options,
Predicate<? super T> tryOnNextPredicate) {
super(options);
this.tryOnNextPredicate = tryOnNextPredicate;
}
@Override
public boolean tryOnNext(T t) {
int previousState = markOnNextStart();
boolean wasTerminated = isMarkedTerminated(previousState);
boolean wasOnNext = isMarkedOnNext(previousState);
if (wasTerminated || wasOnNext) {
//at this point, we know we haven't switched the markedOnNext bit. if it is set, let the other onNext unset it
this.protocolErrors.add(Signal.next(t));
return false;
}
try {
if (tryOnNextPredicate.test(t)) {
this.receivedOnNext.add(t);
if (cancelled.get()) {
this.receivedPostCancellation.add(t);
}
checkTerminatedAfterOnNext();
return true;
}
else {
Operators.onDiscard(t, currentContext());
checkTerminatedAfterOnNext();
return false;
}
}
catch (Throwable predicateError) {
markOnNextDone();
internalCancel();
onError(predicateError);
return false; //this is consistent with eg. Flux.filter
}
}
}
| DefaultConditionalTestSubscriber |
java | spring-projects__spring-boot | module/spring-boot-http-client/src/main/java/org/springframework/boot/http/client/autoconfigure/HttpClientProperties.java | {
"start": 1465,
"end": 2190
} | interface ____ group. By default, set to empty
* {@link Map}.
*/
private Map<String, List<String>> defaultHeader = new LinkedHashMap<>();
/**
* API version properties.
*/
@NestedConfigurationProperty
private final ApiversionProperties apiversion = new ApiversionProperties();
public @Nullable String getBaseUrl() {
return this.baseUrl;
}
public void setBaseUrl(@Nullable String baseUrl) {
this.baseUrl = baseUrl;
}
public Map<String, List<String>> getDefaultHeader() {
return this.defaultHeader;
}
public void setDefaultHeader(Map<String, List<String>> defaultHeaders) {
this.defaultHeader = defaultHeaders;
}
public ApiversionProperties getApiversion() {
return this.apiversion;
}
}
| client |
java | alibaba__nacos | naming/src/main/java/com/alibaba/nacos/naming/misc/HttpClientManager.java | {
"start": 5526,
"end": 5999
} | class ____ extends AbstractHttpClientFactory {
@Override
protected HttpClientConfig buildHttpClientConfig() {
return HttpClientConfig.builder().setConTimeOutMillis(CON_TIME_OUT_MILLIS)
.setReadTimeOutMillis(TIME_OUT_MILLIS).setMaxRedirects(0).build();
}
@Override
protected Logger assignLogger() {
return SRV_LOG;
}
}
private static | SyncHttpClientFactory |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/support/jsse/BaseSSLContextParameters.java | {
"start": 37290,
"end": 37453
} | class ____ wraps an {@code SSLContext} as if
* it were an {@code SSLContextSpi}, allowing us to achieve decoration.
*/
protected static final | effectively |
java | apache__camel | core/camel-base-engine/src/main/java/org/apache/camel/impl/engine/DefaultExchangeFactoryManager.java | {
"start": 1247,
"end": 3589
} | class ____ extends ServiceSupport implements ExchangeFactoryManager, CamelContextAware {
private final Map<Consumer, ExchangeFactory> factories = new ConcurrentHashMap<>();
private final UtilizationStatistics statistics = new UtilizationStatistics();
private CamelContext camelContext;
private int capacity;
private boolean statisticsEnabled;
public CamelContext getCamelContext() {
return camelContext;
}
public void setCamelContext(CamelContext camelContext) {
this.camelContext = camelContext;
}
@Override
public void addExchangeFactory(ExchangeFactory exchangeFactory) {
factories.put(exchangeFactory.getConsumer(), exchangeFactory);
// same for all factories
capacity = exchangeFactory.getCapacity();
statisticsEnabled = exchangeFactory.isStatisticsEnabled();
}
@Override
public void removeExchangeFactory(ExchangeFactory exchangeFactory) {
factories.remove(exchangeFactory.getConsumer());
}
@Override
public Collection<ExchangeFactory> getExchangeFactories() {
return Collections.unmodifiableCollection(factories.values());
}
@Override
public int getConsumerCounter() {
return factories.size();
}
@Override
public int getCapacity() {
return capacity;
}
@Override
public int getPooledCounter() {
int counter = 0;
for (ExchangeFactory ef : factories.values()) {
counter += ef.getSize();
}
return counter;
}
@Override
public boolean isStatisticsEnabled() {
return statisticsEnabled;
}
@Override
public void setStatisticsEnabled(boolean statisticsEnabled) {
this.statisticsEnabled = statisticsEnabled;
for (ExchangeFactory ef : factories.values()) {
ef.setStatisticsEnabled(statisticsEnabled);
}
}
@Override
public void resetStatistics() {
factories.values().forEach(ExchangeFactory::resetStatistics);
}
@Override
public void purge() {
factories.values().forEach(ExchangeFactory::purge);
}
@Override
public ExchangeFactory.Statistics getStatistics() {
return statistics;
}
/**
* Represents utilization statistics
*/
final | DefaultExchangeFactoryManager |
java | netty__netty | codec-socks/src/main/java/io/netty/handler/codec/socksx/v4/Socks4ClientDecoder.java | {
"start": 1464,
"end": 1553
} | class ____ extends ReplayingDecoder<State> {
@UnstableApi
public | Socks4ClientDecoder |
java | elastic__elasticsearch | x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionEncoder.java | {
"start": 2019,
"end": 10289
} | class ____ {
public static final byte NUMERIC_MARKER_BYTE = (byte) 0x01;
public static final byte PRERELEASE_SEPARATOR_BYTE = (byte) 0x02;
public static final byte NO_PRERELEASE_SEPARATOR_BYTE = (byte) 0x03;
private static final char PRERELEASE_SEPARATOR = '-';
private static final char DOT_SEPARATOR = '.';
private static final char BUILD_SEPARATOR = '+';
private static final String ENCODED_EMPTY_STRING = new String(new byte[] { NO_PRERELEASE_SEPARATOR_BYTE }, Charsets.UTF_8);
// Regex to test relaxed Semver Main Version validity. Allows for more or less than three main version parts
private static Pattern LEGAL_MAIN_VERSION_SEMVER = Pattern.compile("(0|[1-9]\\d*)(\\.(0|[1-9]\\d*))*");
private static Pattern LEGAL_PRERELEASE_VERSION_SEMVER = Pattern.compile(
"(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))"
);
private static Pattern LEGAL_BUILDSUFFIX_SEMVER = Pattern.compile("(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?");
/**
* Encodes a version string.
*/
public static EncodedVersion encodeVersion(String versionString) {
VersionParts versionParts = VersionParts.ofVersion(versionString);
// don't treat non-legal versions further, just mark them as illegal and return
if (legalVersionString(versionParts) == false) {
if (versionString.length() == 0) {
// special case, we want empty string to sort after valid strings, which all start with 0x01, add a higher char that
// we are sure to remove when decoding
versionString = ENCODED_EMPTY_STRING;
}
return new EncodedVersion(new BytesRef(versionString), false, true, null, null, null);
}
BytesRefBuilder encodedBytes = new BytesRefBuilder();
Integer[] mainVersionParts = prefixDigitGroupsWithLength(versionParts.mainVersion, encodedBytes);
if (versionParts.preRelease != null) {
encodedBytes.append(PRERELEASE_SEPARATOR_BYTE); // versions with pre-release part sort before ones without
encodedBytes.append((byte) PRERELEASE_SEPARATOR);
String[] preReleaseParts = versionParts.preRelease.substring(1).split("\\.");
boolean first = true;
for (String preReleasePart : preReleaseParts) {
if (first == false) {
encodedBytes.append((byte) DOT_SEPARATOR);
}
boolean isNumeric = preReleasePart.chars().allMatch(x -> Character.isDigit(x));
if (isNumeric) {
prefixDigitGroupsWithLength(preReleasePart, encodedBytes);
} else {
encodedBytes.append(new BytesRef(preReleasePart));
}
first = false;
}
} else {
encodedBytes.append(NO_PRERELEASE_SEPARATOR_BYTE);
}
if (versionParts.buildSuffix != null) {
encodedBytes.append(new BytesRef(versionParts.buildSuffix));
}
return new EncodedVersion(
encodedBytes.toBytesRef(),
true,
versionParts.preRelease != null,
mainVersionParts[0],
mainVersionParts[1],
mainVersionParts[2]
);
}
private static Integer[] prefixDigitGroupsWithLength(String input, BytesRefBuilder result) {
int pos = 0;
int mainVersionCounter = 0;
Integer[] mainVersionComponents = new Integer[3];
while (pos < input.length()) {
if (Character.isDigit(input.charAt(pos))) {
// found beginning of number block, so get its length
int start = pos;
BytesRefBuilder number = new BytesRefBuilder();
while (pos < input.length() && Character.isDigit(input.charAt(pos))) {
number.append((byte) input.charAt(pos));
pos++;
}
int length = pos - start;
if (length >= 128) {
throw new IllegalArgumentException("Groups of digits cannot be longer than 127, but found: " + length);
}
result.append(NUMERIC_MARKER_BYTE); // ensure length byte does cause higher sort order comparing to other byte[]
result.append((byte) (length | 0x80)); // add upper bit to mark as length
result.append(number);
// if present, parse out three leftmost version parts
if (mainVersionCounter < 3) {
mainVersionComponents[mainVersionCounter] = Integer.valueOf(number.toBytesRef().utf8ToString());
mainVersionCounter++;
}
} else {
result.append((byte) input.charAt(pos));
pos++;
}
}
return mainVersionComponents;
}
public static BytesRef decodeVersion(BytesRef version) {
int inputPos = version.offset;
int resultPos = 0;
byte[] result = new byte[version.length];
while (inputPos < version.offset + version.length) {
byte inputByte = version.bytes[inputPos];
if (inputByte == NUMERIC_MARKER_BYTE) {
// need to skip this byte
inputPos++;
// this should always be a length encoding, which is skipped by increasing inputPos at the end of the loop
assert version.bytes[inputPos] < 0;
} else if (inputByte != PRERELEASE_SEPARATOR_BYTE && inputByte != NO_PRERELEASE_SEPARATOR_BYTE) {
result[resultPos] = inputByte;
resultPos++;
}
inputPos++;
}
return new BytesRef(result, 0, resultPos);
}
static boolean legalVersionString(VersionParts versionParts) {
boolean legalMainVersion = LEGAL_MAIN_VERSION_SEMVER.matcher(versionParts.mainVersion).matches();
boolean legalPreRelease = true;
if (versionParts.preRelease != null) {
legalPreRelease = LEGAL_PRERELEASE_VERSION_SEMVER.matcher(versionParts.preRelease).matches();
}
boolean legalBuildSuffix = true;
if (versionParts.buildSuffix != null) {
legalBuildSuffix = LEGAL_BUILDSUFFIX_SEMVER.matcher(versionParts.buildSuffix).matches();
}
return legalMainVersion && legalPreRelease && legalBuildSuffix;
}
static CompiledAutomaton prefixAutomaton(String versionPrefix, boolean caseInsensitive) {
Automaton a = new Automaton();
Automaton.Builder builder = new Automaton.Builder();
int lastState = builder.createState();
if (versionPrefix.isEmpty() == false) {
if (caseInsensitive) {
versionPrefix = versionPrefix.toLowerCase(Locale.ROOT);
}
BytesRef bytesRef = new BytesRef(versionPrefix);
byte[] prefixBytes = bytesRef.bytes;
for (int i = 0; i < bytesRef.length; i++) {
// add self-loop transition for possibility of marker bytes after each original byte
builder.addTransition(lastState, lastState, NO_PRERELEASE_SEPARATOR_BYTE);
builder.addTransition(lastState, lastState, PRERELEASE_SEPARATOR_BYTE);
// for numeric marker we need to be able to skip two bytes
int intermediateState = builder.createState();
builder.addTransition(lastState, intermediateState, NUMERIC_MARKER_BYTE);
builder.addTransition(intermediateState, lastState, 0, 255);
int state = builder.createState();
byte b = prefixBytes[i];
int label = b & 0xff;
builder.addTransition(lastState, state, label);
lastState = state;
}
}
builder.setAccept(lastState, true);
a = builder.finish();
assert Operations.hasDeadStates(a) == false;
a = Operations.concatenate(a, Automata.makeAnyBinary());
assert a.isDeterministic();
a = Operations.determinize(a, 0);
return new CompiledAutomaton(a, false, true, true);
}
static | VersionEncoder |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/ActiveMQEndpointBuilderFactory.java | {
"start": 25054,
"end": 26005
} | class ____ is
* good enough as subscription name). Only makes sense when listening to
* a topic (pub-sub domain), therefore this method switches the
* pubSubDomain flag as well.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param subscriptionDurable the value to set
* @return the dsl builder
*/
default ActiveMQEndpointConsumerBuilder subscriptionDurable(String subscriptionDurable) {
doSetProperty("subscriptionDurable", subscriptionDurable);
return this;
}
/**
* Set the name of a subscription to create. To be applied in case of a
* topic (pub-sub domain) with a shared or durable subscription. The
* subscription name needs to be unique within this client's JMS client
* id. Default is the | name |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/core/RedisHandshakeUnitTests.java | {
"start": 14159,
"end": 14600
} | class ____ implements RedisCredentialsProvider {
private final Sinks.One<RedisCredentials> credentialsSink = Sinks.one();
@Override
public Mono<RedisCredentials> resolveCredentials() {
return credentialsSink.asMono();
}
public void completeCredentials(RedisCredentials credentials) {
credentialsSink.tryEmitValue(credentials);
}
}
}
| DelayedRedisCredentialsProvider |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/appender/rolling/CronTriggeringPolicy.java | {
"start": 6798,
"end": 6927
} | class ____ implements Runnable {
@Override
public void run() {
rollover();
}
}
}
| CronTrigger |
java | alibaba__fastjson | src/test/java/com/alibaba/json/test/benchmark/basic/ShortBenchmark_arrayMappinng_obj.java | {
"start": 1602,
"end": 1749
} | class ____ {
public Short v1;
public Short v2;
public Short v3;
public Short v4;
public Short v5;
}
}
| Model |
java | spring-projects__spring-boot | module/spring-boot-jdbc/src/test/java/org/springframework/boot/jdbc/autoconfigure/JdbcClientAutoConfigurationTests.java | {
"start": 1233,
"end": 2628
} | class ____ {
private final ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withPropertyValues("spring.datasource.generate-unique-name=true")
.withConfiguration(AutoConfigurations.of(DataSourceAutoConfiguration.class, JdbcTemplateAutoConfiguration.class,
JdbcClientAutoConfiguration.class));
@Test
void jdbcClientWhenNoAvailableJdbcTemplateIsNotCreated() {
new ApplicationContextRunner()
.withConfiguration(
AutoConfigurations.of(DataSourceAutoConfiguration.class, JdbcClientAutoConfiguration.class))
.run((context) -> assertThat(context).doesNotHaveBean(JdbcClient.class));
}
@Test
void jdbcClientWhenExistingJdbcTemplateIsCreated() {
this.contextRunner.run((context) -> {
assertThat(context).hasSingleBean(JdbcClient.class);
NamedParameterJdbcTemplate namedParameterJdbcTemplate = context.getBean(NamedParameterJdbcTemplate.class);
assertThat(namedParameterJdbcTemplate.getJdbcOperations()).isEqualTo(context.getBean(JdbcOperations.class));
});
}
@Test
void jdbcClientWithCustomJdbcClientIsNotCreated() {
this.contextRunner.withBean("customJdbcClient", JdbcClient.class, () -> mock(JdbcClient.class))
.run((context) -> {
assertThat(context).hasSingleBean(JdbcClient.class);
assertThat(context.getBean(JdbcClient.class)).isEqualTo(context.getBean("customJdbcClient"));
});
}
}
| JdbcClientAutoConfigurationTests |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/rpc/RpcKvStateRegistryListener.java | {
"start": 1354,
"end": 2684
} | class ____ implements KvStateRegistryListener {
private final KvStateRegistryGateway kvStateRegistryGateway;
private final InetSocketAddress kvStateServerAddress;
public RpcKvStateRegistryListener(
KvStateRegistryGateway kvStateRegistryGateway, InetSocketAddress kvStateServerAddress) {
this.kvStateRegistryGateway = Preconditions.checkNotNull(kvStateRegistryGateway);
this.kvStateServerAddress = Preconditions.checkNotNull(kvStateServerAddress);
}
@Override
public void notifyKvStateRegistered(
JobID jobId,
JobVertexID jobVertexId,
KeyGroupRange keyGroupRange,
String registrationName,
KvStateID kvStateId) {
kvStateRegistryGateway.notifyKvStateRegistered(
jobId,
jobVertexId,
keyGroupRange,
registrationName,
kvStateId,
kvStateServerAddress);
}
@Override
public void notifyKvStateUnregistered(
JobID jobId,
JobVertexID jobVertexId,
KeyGroupRange keyGroupRange,
String registrationName) {
kvStateRegistryGateway.notifyKvStateUnregistered(
jobId, jobVertexId, keyGroupRange, registrationName);
}
}
| RpcKvStateRegistryListener |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/custom/CustomGenericJpaRepositoryFactory.java | {
"start": 1308,
"end": 2027
} | class ____ extends JpaRepositoryFactory {
public CustomGenericJpaRepositoryFactory(EntityManager entityManager) {
super(entityManager);
}
@Override
@SuppressWarnings("unchecked")
protected SimpleJpaRepository<?, ?> getTargetRepository(RepositoryInformation information, EntityManager em) {
JpaEntityInformation<Object, Serializable> entityMetadata = mock(JpaEntityInformation.class);
when(entityMetadata.getJavaType()).thenReturn((Class<Object>) information.getDomainType());
return new CustomGenericJpaRepository<>(entityMetadata, em);
}
@Override
protected Class<?> getRepositoryBaseClass(RepositoryMetadata metadata) {
return CustomGenericJpaRepository.class;
}
}
| CustomGenericJpaRepositoryFactory |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/converter/ParamConverterProviderTest.java | {
"start": 3307,
"end": 4278
} | interface ____ {
@Path("/sub")
SubClient sub();
@GET
@Path("/param/{param}")
String get(@MyAnnotation("myValue") @PathParam("param") Param param);
@GET
@Path("/param/{param}")
String get(@BeanParam Bean beanParam);
@GET
@Path("/query")
String getWithQuery(@MyAnnotation("myValue") @QueryParam("param") Param param);
@GET
@Path("/query")
String getWithQuery(@BeanParam Bean beanParam);
@GET
@Path("/header")
String getWithHeader(@MyAnnotation("myValue") @HeaderParam("param") Param param);
@GET
@Path("/header")
String getWithHeader(@BeanParam Bean beanParam);
@GET
@Path("/cookie")
String getWithCookie(@MyAnnotation("myValue") @CookieParam("cookie-param") Param param);
@GET
@Path("/cookie")
String getWithCookie(@BeanParam Bean beanParam);
}
| Client |
java | micronaut-projects__micronaut-core | http-server/src/main/java/io/micronaut/http/server/cors/CorsFilter.java | {
"start": 3187,
"end": 22707
} | class ____ implements Ordered, ConditionalFilter {
public static final int CORS_FILTER_ORDER = ServerFilterPhase.METRICS.after();
private static final Logger LOG = LoggerFactory.getLogger(CorsFilter.class);
private static final ArgumentConversionContext<HttpMethod> CONVERSION_CONTEXT_HTTP_METHOD = ImmutableArgumentConversionContext.of(HttpMethod.class);
protected final HttpServerConfiguration.CorsConfiguration corsConfiguration;
@Nullable
private final HttpHostResolver httpHostResolver;
private final Router router;
/**
* @param corsConfiguration The {@link CorsOriginConfiguration} instance
* @param httpHostResolver HTTP Host resolver
* @deprecated use {@link CorsFilter(HttpServerConfiguration.CorsConfiguration, HttpHostResolver, Router)} instead.
*/
@Deprecated(since = "4.7", forRemoval = true)
public CorsFilter(HttpServerConfiguration.CorsConfiguration corsConfiguration,
@Nullable HttpHostResolver httpHostResolver) {
this.corsConfiguration = corsConfiguration;
this.httpHostResolver = httpHostResolver;
this.router = null;
}
/**
* @param corsConfiguration The {@link CorsOriginConfiguration} instance
* @param httpHostResolver HTTP Host resolver
* @param router Router
*/
@Inject
public CorsFilter(HttpServerConfiguration.CorsConfiguration corsConfiguration,
@Nullable HttpHostResolver httpHostResolver,
Router router) {
this.corsConfiguration = corsConfiguration;
this.httpHostResolver = httpHostResolver;
this.router = router;
}
@Override
public boolean isEnabled(HttpRequest<?> request) {
String origin = request.getOrigin().orElse(null);
if (origin == null) {
if (LOG.isTraceEnabled()) {
LOG.trace("Http Header " + HttpHeaders.ORIGIN + " not present. Proceeding with the request.");
}
return false;
}
return true;
}
@PreMatching
@RequestFilter
@Nullable
@Internal
public final HttpResponse<?> filterPreFlightRequest(HttpRequest<?> request) {
if (isEnabled(request) && CorsUtil.isPreflightRequest(request)) {
CorsOriginConfiguration corsOriginConfiguration = getAnyConfiguration(request).orElse(null);
if (corsOriginConfiguration != null) {
return handlePreflightRequest(request, corsOriginConfiguration);
}
}
return null; // proceed
}
@RequestFilter
@Nullable
@Internal
public final HttpResponse<?> filterRequest(HttpRequest<?> request) {
String origin = request.getOrigin().orElse(null);
if (origin == null) {
LOG.trace("Http Header {} not present. Proceeding with the request.", HttpHeaders.ORIGIN);
return null; // proceed
}
CorsOriginConfiguration corsOriginConfiguration = getConfiguration(request).orElse(null);
if (corsOriginConfiguration != null) {
// These validation might be configured on the actual route
if (validateMethodToMatch(request, corsOriginConfiguration).isEmpty()) {
return forbidden();
}
if (shouldDenyToPreventDriveByLocalhostAttack(corsOriginConfiguration, request)) {
LOG.trace("The resolved configuration allows any origin. To prevent drive-by-localhost attacks the request is forbidden");
return forbidden();
}
return null; // proceed
} else if (shouldDenyToPreventDriveByLocalhostAttack(origin, request)) {
LOG.trace("The request specifies an origin different than localhost. To prevent drive-by-localhost attacks the request is forbidden");
return forbidden();
}
LOG.trace("CORS configuration not found for {} origin", origin);
return null; // proceed
}
@ResponseFilter
@Internal
public final void filterResponse(HttpRequest<?> request, MutableHttpResponse<?> response) {
CorsOriginConfiguration corsOriginConfiguration = getConfiguration(request).orElse(null);
if (corsOriginConfiguration != null) {
if (CorsUtil.isPreflightRequest(request)) {
decorateResponseWithHeadersForPreflightRequest(request, response, corsOriginConfiguration);
}
decorateResponseWithHeaders(request, response, corsOriginConfiguration);
}
}
/**
* @param corsOriginConfiguration CORS Origin configuration for request's HTTP Header origin.
* @param request HTTP Request
* @return {@literal true} if the resolved host is localhost or 127.0.0.1 address and the CORS configuration has any for allowed origins.
*/
protected boolean shouldDenyToPreventDriveByLocalhostAttack(@NonNull CorsOriginConfiguration corsOriginConfiguration,
@NonNull HttpRequest<?> request) {
if (corsConfiguration.isLocalhostPassThrough()) {
return false;
}
if (httpHostResolver == null) {
return false;
}
String origin = request.getOrigin().orElse(null);
if (origin == null) {
return false;
}
if (isOriginLocal(origin)) {
return false;
}
String host = httpHostResolver.resolve(request);
return (
corsOriginConfiguration.getAllowedOriginsRegex().isEmpty() && isAny(corsOriginConfiguration.getAllowedOrigins())
) && isHostLocal(host);
}
/**
* @param origin HTTP Header {@link HttpHeaders#ORIGIN} value.
* @param request HTTP Request
* @return {@literal true} if the resolved host is localhost or 127.0.0.1 and origin is not one of these then deny it.
*/
protected boolean shouldDenyToPreventDriveByLocalhostAttack(@NonNull String origin,
@NonNull HttpRequest<?> request) {
if (corsConfiguration.isLocalhostPassThrough()) {
return false;
}
if (httpHostResolver == null) {
return false;
}
String host = httpHostResolver.resolve(request);
return !isOriginLocal(origin) && isHostLocal(host);
}
/*
* We only need to check host for starting with "localhost" "127." (as there are multiple loopback addresses on linux)
*
* This is fine for host, as the request had to get here.
*
* We check the first character as a performance optimization prior to calling startsWith.
*/
private boolean isHostLocal(@NonNull String hostString) {
if (hostString.isEmpty()) {
return false;
}
char initialChar = hostString.charAt(0);
if (initialChar != 'h' && initialChar != 'w') {
return false;
}
return hostString.startsWith("http://localhost")
|| hostString.startsWith("https://localhost")
|| hostString.startsWith("http://127.")
|| hostString.startsWith("https://127.")
|| hostString.startsWith("ws://localhost")
|| hostString.startsWith("wss://localhost")
|| hostString.startsWith("ws://127.")
|| hostString.startsWith("wss://127.");
}
/*
* For Origin, we need to be more strict as otherwise an address like 127.malicious.com would be allowed.
*/
private boolean isOriginLocal(@NonNull String hostString) {
try {
URI uri = URI.create(hostString);
String host = uri.getHost();
return SocketUtils.LOCALHOST.equals(host) || "127.0.0.1".equals(host);
} catch (IllegalArgumentException e) {
return false;
}
}
@Override
public int getOrder() {
return CORS_FILTER_ORDER;
}
@NonNull
private Optional<HttpMethod> validateMethodToMatch(@NonNull HttpRequest<?> request,
@NonNull CorsOriginConfiguration config) {
HttpMethod methodToMatch = methodToMatch(request);
if (!methodAllowed(config, methodToMatch)) {
return Optional.empty();
}
return Optional.of(methodToMatch);
}
/**
* @param config The {@link CorsOriginConfiguration} instance
* @param response The {@link MutableHttpResponse} object
*/
protected void setAllowCredentials(CorsOriginConfiguration config, MutableHttpResponse<?> response) {
if (config.isAllowCredentials()) {
response.header(ACCESS_CONTROL_ALLOW_CREDENTIALS, StringUtils.TRUE);
}
}
/**
* Sets the HTTP Header {@value HttpHeaders#ACCESS_CONTROL_ALLOW_PRIVATE_NETWORK} in the response to {@code true}, if the {@link CorsOriginConfiguration#isAllowPrivateNetwork()} is {@code true}.
*
* @param config The {@link CorsOriginConfiguration} instance
* @param response The {@link MutableHttpResponse} object
*/
protected void setAllowPrivateNetwork(CorsOriginConfiguration config, MutableHttpResponse<?> response) {
if (config.isAllowPrivateNetwork()) {
response.header(ACCESS_CONTROL_ALLOW_PRIVATE_NETWORK, StringUtils.TRUE);
}
}
/**
* @param exposedHeaders A list of the exposed headers
* @param response The {@link MutableHttpResponse} object
*/
protected void setExposeHeaders(List<String> exposedHeaders, MutableHttpResponse<?> response) {
if (corsConfiguration.isSingleHeader()) {
String headerValue = String.join(",", exposedHeaders);
if (StringUtils.isNotEmpty(headerValue)) {
response.header(ACCESS_CONTROL_EXPOSE_HEADERS, headerValue);
}
} else {
exposedHeaders.forEach(header -> response.header(ACCESS_CONTROL_EXPOSE_HEADERS, header));
}
}
/**
* @param response The {@link MutableHttpResponse} object
*/
protected void setVary(MutableHttpResponse<?> response) {
response.header(VARY, ORIGIN);
}
/**
* @param origin The origin
* @param response The {@link MutableHttpResponse} object
*/
protected void setOrigin(@Nullable String origin, @NonNull MutableHttpResponse<?> response) {
if (origin != null) {
response.header(ACCESS_CONTROL_ALLOW_ORIGIN, origin);
}
}
/**
* @param method The {@link HttpMethod} object
* @param response The {@link MutableHttpResponse} object
*/
protected void setAllowMethods(HttpMethod method, MutableHttpResponse<?> response) {
response.header(ACCESS_CONTROL_ALLOW_METHODS, method);
}
/**
* @param optionalAllowHeaders A list with optional allow headers
* @param response The {@link MutableHttpResponse} object
*/
protected void setAllowHeaders(List<?> optionalAllowHeaders, MutableHttpResponse<?> response) {
List<String> allowHeaders = optionalAllowHeaders.stream().map(Object::toString).collect(Collectors.toList());
if (corsConfiguration.isSingleHeader()) {
String headerValue = String.join(",", allowHeaders);
if (StringUtils.isNotEmpty(headerValue)) {
response.header(ACCESS_CONTROL_ALLOW_HEADERS, headerValue);
}
} else {
allowHeaders
.stream()
.map(StringUtils::trimLeadingWhitespace)
.forEach(header -> response.header(ACCESS_CONTROL_ALLOW_HEADERS, header));
}
}
/**
* @param maxAge The max age
* @param response The {@link MutableHttpResponse} object
*/
protected void setMaxAge(long maxAge, MutableHttpResponse<?> response) {
if (maxAge > -1) {
response.header(ACCESS_CONTROL_MAX_AGE, Long.toString(maxAge));
}
}
@NonNull
private Optional<CorsOriginConfiguration> getConfiguration(@NonNull HttpRequest<?> request) {
String requestOrigin = request.getOrigin().orElse(null);
if (requestOrigin == null) {
return Optional.empty();
}
Optional<CorsOriginConfiguration> originConfiguration = CrossOriginUtil.getCorsOriginConfigurationForRequest(request);
if (originConfiguration.isPresent() && matchesOrigin(originConfiguration.get(), requestOrigin)) {
return originConfiguration;
}
if (!corsConfiguration.isEnabled()) {
return Optional.empty();
}
return corsConfiguration.getConfigurations().values().stream()
.filter(config -> matchesOrigin(config, requestOrigin))
.findFirst();
}
@NonNull
private Optional<CorsOriginConfiguration> getAnyConfiguration(@NonNull HttpRequest<?> request) {
String requestOrigin = request.getOrigin().orElse(null);
if (requestOrigin == null) {
return Optional.empty();
}
for (UriRouteMatch<Object, Object> routeMatch : router.findAny(request)) {
Optional<CorsOriginConfiguration> corsOriginConfiguration = CrossOriginUtil.getCorsOriginConfiguration(routeMatch);
if (corsOriginConfiguration.isPresent() && matchesOrigin(corsOriginConfiguration.get(), requestOrigin)) {
return corsOriginConfiguration;
}
}
if (!corsConfiguration.isEnabled()) {
return Optional.empty();
}
return corsConfiguration.getConfigurations().values().stream()
.filter(config -> matchesOrigin(config, requestOrigin))
.findFirst();
}
private static boolean matchesOrigin(@NonNull CorsOriginConfiguration config, String requestOrigin) {
if (config.getAllowedOriginsRegex().map(regex -> matchesOrigin(regex, requestOrigin)).orElse(false)) {
return true;
}
List<String> allowedOrigins = config.getAllowedOrigins();
return !allowedOrigins.isEmpty() && (
(config.getAllowedOriginsRegex().isEmpty() && isAny(allowedOrigins)) ||
allowedOrigins.stream().anyMatch(origin -> origin.equals(requestOrigin))
);
}
private static boolean matchesOrigin(@NonNull String originRegex, @NonNull String requestOrigin) {
Pattern p = Pattern.compile(originRegex);
Matcher m = p.matcher(requestOrigin);
return m.matches();
}
private static boolean isAny(List<String> values) {
return values == CorsOriginConfiguration.ANY;
}
private static boolean isAnyMethod(List<HttpMethod> allowedMethods) {
return allowedMethods == CorsOriginConfiguration.ANY_METHOD;
}
private boolean methodAllowed(@NonNull CorsOriginConfiguration config,
@NonNull HttpMethod methodToMatch) {
List<HttpMethod> allowedMethods = config.getAllowedMethods();
return isAnyMethod(allowedMethods) || allowedMethods.stream().anyMatch(method -> method.equals(methodToMatch));
}
@NonNull
private HttpMethod methodToMatch(@NonNull HttpRequest<?> request) {
HttpMethod requestMethod = request.getMethod();
return CorsUtil.isPreflightRequest(request) ? request.getHeaders().getFirst(ACCESS_CONTROL_REQUEST_METHOD, CONVERSION_CONTEXT_HTTP_METHOD).orElse(requestMethod) : requestMethod;
}
private boolean hasAllowedHeaders(@NonNull HttpRequest<?> request, @NonNull CorsOriginConfiguration config) {
Optional<List<String>> accessControlHeaders = request.getHeaders().get(ACCESS_CONTROL_REQUEST_HEADERS, ConversionContext.LIST_OF_STRING);
List<String> allowedHeaders = config.getAllowedHeaders();
return isAny(allowedHeaders) || (
accessControlHeaders.isPresent() &&
accessControlHeaders.get().stream().allMatch(header -> allowedHeaders.stream().anyMatch(allowedHeader -> allowedHeader.equalsIgnoreCase(header.trim())))
);
}
@NonNull
private static MutableHttpResponse<Object> forbidden() {
return HttpResponse.status(HttpStatus.FORBIDDEN);
}
private void decorateResponseWithHeadersForPreflightRequest(@NonNull HttpRequest<?> request,
@NonNull MutableHttpResponse<?> response,
@NonNull CorsOriginConfiguration config) {
HttpHeaders headers = request.getHeaders();
headers.getFirst(ACCESS_CONTROL_REQUEST_METHOD, CONVERSION_CONTEXT_HTTP_METHOD)
.ifPresent(methods -> setAllowMethods(methods, response));
headers.get(ACCESS_CONTROL_REQUEST_HEADERS, ConversionContext.LIST_OF_STRING)
.ifPresent(val -> setAllowHeaders(val, response));
headers.getFirst(ACCESS_CONTROL_REQUEST_PRIVATE_NETWORK, ConversionContext.BOOLEAN)
.ifPresent(value -> setAllowPrivateNetwork(config, response));
setMaxAge(config.getMaxAge(), response);
}
private void decorateResponseWithHeaders(@NonNull HttpRequest<?> request,
@NonNull MutableHttpResponse<?> response,
@NonNull CorsOriginConfiguration config) {
setOrigin(request.getOrigin().orElse(null), response);
setVary(response);
setExposeHeaders(config.getExposedHeaders(), response);
setAllowCredentials(config, response);
}
@NonNull
private MutableHttpResponse<?> handlePreflightRequest(@NonNull HttpRequest<?> request,
@NonNull CorsOriginConfiguration corsOriginConfiguration) {
boolean isValid = validatePreflightRequest(request, corsOriginConfiguration);
if (!isValid) {
return HttpResponse.status(HttpStatus.FORBIDDEN);
}
MutableHttpResponse<?> resp = HttpResponse.status(HttpStatus.OK);
decorateResponseWithHeadersForPreflightRequest(request, resp, corsOriginConfiguration);
decorateResponseWithHeaders(request, resp, corsOriginConfiguration);
return resp;
}
@Nullable
private boolean validatePreflightRequest(@NonNull HttpRequest<?> request,
@NonNull CorsOriginConfiguration config) {
Optional<HttpMethod> methodToMatchOptional = validateMethodToMatch(request, config);
if (methodToMatchOptional.isEmpty()) {
return false;
}
HttpMethod methodToMatch = methodToMatchOptional.get();
if (!CorsUtil.isPreflightRequest(request)) {
return false;
}
List<HttpMethod> availableHttpMethods = router.findAny(request).stream().map(UriRouteMatch::getHttpMethod).toList();
if (availableHttpMethods.stream().noneMatch(method -> method.equals(methodToMatch))) {
return false;
}
if (!hasAllowedHeaders(request, config)) {
return false;
}
if (request.getHeaders().contains(ACCESS_CONTROL_REQUEST_PRIVATE_NETWORK)) {
boolean accessControlRequestPrivateNetwork = request.getHeaders().get(ACCESS_CONTROL_REQUEST_PRIVATE_NETWORK, Boolean.class, Boolean.FALSE);
if (accessControlRequestPrivateNetwork && !config.isAllowPrivateNetwork()) {
return false;
}
}
return true;
}
}
| CorsFilter |
java | apache__camel | components/camel-spring-parent/camel-spring-batch/src/test/java/org/apache/camel/component/spring/batch/support/CamelJobExecutionListenerTest.java | {
"start": 1270,
"end": 2963
} | class ____ extends CamelTestSupport {
// Fixtures
@Mock
JobExecution jobExecution;
CamelJobExecutionListener jobExecutionListener;
// Camel fixtures
@Override
protected void doPostSetup() throws Exception {
jobExecutionListener = new CamelJobExecutionListener(template(), "seda:eventQueue");
}
// Tests
@Test
public void shouldSendBeforeJobEvent() throws Exception {
// When
jobExecutionListener.beforeJob(jobExecution);
// Then
assertEquals(jobExecution, consumer().receiveBody("seda:eventQueue"));
}
@Test
public void shouldSetBeforeJobEventHeader() throws Exception {
// When
jobExecutionListener.beforeJob(jobExecution);
// Then
Exchange beforeJobEvent = consumer().receive("seda:eventQueue");
assertEquals(CamelJobExecutionListener.EventType.BEFORE.name(),
beforeJobEvent.getIn().getHeader(CamelJobExecutionListener.EventType.HEADER_KEY));
}
@Test
public void shouldSendAfterJobEvent() throws Exception {
// When
jobExecutionListener.afterJob(jobExecution);
// Then
assertEquals(jobExecution, consumer().receiveBody("seda:eventQueue"));
}
@Test
public void shouldSetAfterJobEventHeader() throws Exception {
// When
jobExecutionListener.afterJob(jobExecution);
// Then
Exchange beforeJobEvent = consumer().receive("seda:eventQueue");
assertEquals(CamelJobExecutionListener.EventType.AFTER.name(),
beforeJobEvent.getIn().getHeader(CamelJobExecutionListener.EventType.HEADER_KEY));
}
}
| CamelJobExecutionListenerTest |
java | hibernate__hibernate-orm | hibernate-testing/src/main/java/org/hibernate/testing/junit4/BaseNonConfigCoreFunctionalTestCase.java | {
"start": 16354,
"end": 18477
} | class ____ implements Work {
@Override
public void execute(Connection connection) throws SQLException {
connection.rollback();
}
}
protected void cleanupTest() throws Exception {
}
@AllowSysOut
protected void assertAllDataRemoved() {
if ( !createSchema() ) {
return; // no tables were created...
}
if ( !Boolean.getBoolean( VALIDATE_DATA_CLEANUP ) ) {
return;
}
try ( Session tmpSession = sessionFactory.openSession() ) {
final List<Object> list =
tmpSession.createSelectionQuery( "select o from java.lang.Object o", Object.class )
.getResultList();
final Map<String, Integer> items = new HashMap<>();
if ( !list.isEmpty() ) {
for ( Object element : list ) {
Integer l = items.get( tmpSession.getEntityName( element ) );
if (l == null) {
l = 0;
}
l = l + 1;
items.put( tmpSession.getEntityName( element ), l );
System.out.println( "Data left: " + element );
}
fail( "Data is left in the database: " + items );
}
}
// intentionally empty
}
public void inSession(Consumer<SessionImplementor> action) {
log.trace( "#inSession(action)" );
TransactionUtil2.inSession( sessionFactory(), action );
}
public void inStatelessSession(Consumer<StatelessSession> action) {
log.trace( "#inSession(action)" );
TransactionUtil2.inStatelessSession( sessionFactory(), action );
}
public <R> R fromSession(Function<SessionImplementor,R> action) {
log.trace( "#inSession(action)" );
return TransactionUtil2.fromSession( sessionFactory(), action );
}
public void inTransaction(Consumer<SessionImplementor> action) {
log.trace( "#inTransaction(action)" );
TransactionUtil2.inTransaction( sessionFactory(), action );
}
public void inStatelessTransaction(Consumer<StatelessSession> action) {
log.trace( "#inTransaction(action)" );
TransactionUtil2.inStatelessTransaction( sessionFactory(), action );
}
public <R> R fromTransaction(Function<SessionImplementor,R> action) {
log.trace( "#inTransaction(action)" );
return TransactionUtil2.fromTransaction( sessionFactory(), action );
}
}
| RollbackWork |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openshiftai/completion/OpenShiftAiChatCompletionModelTests.java | {
"start": 653,
"end": 3189
} | class ____ extends ESTestCase {
private static final String MODEL_VALUE = "model_name";
private static final String API_KEY_VALUE = "test_api_key";
private static final String URL_VALUE = "http://www.abc.com";
private static final String ALTERNATE_MODEL_VALUE = "different_model";
public static OpenShiftAiChatCompletionModel createCompletionModel(String url, String apiKey, String modelName) {
return createModelWithTaskType(url, apiKey, modelName, TaskType.COMPLETION);
}
public static OpenShiftAiChatCompletionModel createChatCompletionModel(String url, String apiKey, String modelName) {
return createModelWithTaskType(url, apiKey, modelName, TaskType.CHAT_COMPLETION);
}
public static OpenShiftAiChatCompletionModel createModelWithTaskType(String url, String apiKey, String modelName, TaskType taskType) {
return new OpenShiftAiChatCompletionModel(
"inferenceEntityId",
taskType,
"service",
new OpenShiftAiChatCompletionServiceSettings(modelName, url, null),
new DefaultSecretSettings(new SecureString(apiKey.toCharArray()))
);
}
public void testOverrideWith_UnifiedCompletionRequest_KeepsSameModelId() {
var model = createCompletionModel(URL_VALUE, API_KEY_VALUE, MODEL_VALUE);
var overriddenModel = OpenShiftAiChatCompletionModel.of(model, MODEL_VALUE);
assertThat(overriddenModel, is(sameInstance(model)));
}
public void testOverrideWith_UnifiedCompletionRequest_OverridesExistingModelId() {
var model = createCompletionModel(URL_VALUE, API_KEY_VALUE, MODEL_VALUE);
var overriddenModel = OpenShiftAiChatCompletionModel.of(model, ALTERNATE_MODEL_VALUE);
assertThat(overriddenModel.getServiceSettings().modelId(), is(ALTERNATE_MODEL_VALUE));
}
public void testOverrideWith_UnifiedCompletionRequest_UsesModelFields_WhenRequestDoesNotOverride() {
var model = createCompletionModel(URL_VALUE, API_KEY_VALUE, MODEL_VALUE);
var overriddenModel = OpenShiftAiChatCompletionModel.of(model, null);
assertThat(overriddenModel, is(sameInstance(model)));
}
public void testOverrideWith_UnifiedCompletionRequest_KeepsNullIfNoModelIdProvided() {
var model = createCompletionModel(URL_VALUE, API_KEY_VALUE, null);
var overriddenModel = OpenShiftAiChatCompletionModel.of(model, null);
assertThat(overriddenModel, is(sameInstance(model)));
}
}
| OpenShiftAiChatCompletionModelTests |
java | apache__camel | core/camel-management/src/main/java/org/apache/camel/management/mbean/ManagedThreadPool.java | {
"start": 1206,
"end": 5101
} | class ____ implements ManagedThreadPoolMBean {
private final CamelContext camelContext;
private final ThreadPoolExecutor threadPool;
private final String id;
private final String sourceId;
private final String routeId;
private final String threadPoolProfileId;
public ManagedThreadPool(CamelContext camelContext, ThreadPoolExecutor threadPool, String id,
String sourceId, String routeId, String threadPoolProfileId) {
this.camelContext = camelContext;
this.threadPool = threadPool;
this.sourceId = sourceId;
this.id = id;
this.routeId = routeId;
this.threadPoolProfileId = threadPoolProfileId;
}
public void init(ManagementStrategy strategy) {
// do nothing
}
public CamelContext getContext() {
return camelContext;
}
public ThreadPoolExecutor getThreadPool() {
return threadPool;
}
@Override
public String getCamelId() {
return camelContext.getName();
}
@Override
public String getCamelManagementName() {
return camelContext.getManagementName();
}
@Override
public String getId() {
return id;
}
@Override
public String getSourceId() {
return sourceId;
}
@Override
public String getRouteId() {
return routeId;
}
@Override
public String getThreadPoolProfileId() {
return threadPoolProfileId;
}
@Override
public int getCorePoolSize() {
return threadPool.getCorePoolSize();
}
@Override
public void setCorePoolSize(int corePoolSize) {
threadPool.setCorePoolSize(corePoolSize);
}
@Override
public int getPoolSize() {
return threadPool.getPoolSize();
}
@Override
public int getMaximumPoolSize() {
return threadPool.getMaximumPoolSize();
}
@Override
public void setMaximumPoolSize(int maximumPoolSize) {
threadPool.setMaximumPoolSize(maximumPoolSize);
}
@Override
public int getLargestPoolSize() {
return threadPool.getLargestPoolSize();
}
@Override
public int getActiveCount() {
return threadPool.getActiveCount();
}
@Override
public long getTaskCount() {
return threadPool.getTaskCount();
}
@Override
public long getCompletedTaskCount() {
return threadPool.getCompletedTaskCount();
}
@Override
public long getTaskQueueSize() {
if (threadPool.getQueue() != null) {
return threadPool.getQueue().size();
} else {
return 0;
}
}
@Override
public boolean isTaskQueueEmpty() {
if (threadPool.getQueue() != null) {
return threadPool.getQueue().isEmpty();
} else {
return true;
}
}
@Override
public long getKeepAliveTime() {
return threadPool.getKeepAliveTime(TimeUnit.SECONDS);
}
@Override
public void setKeepAliveTime(long keepAliveTimeInSeconds) {
threadPool.setKeepAliveTime(keepAliveTimeInSeconds, TimeUnit.SECONDS);
}
@Override
public boolean isAllowCoreThreadTimeout() {
return threadPool.allowsCoreThreadTimeOut();
}
@Override
public void setAllowCoreThreadTimeout(boolean allowCoreThreadTimeout) {
threadPool.allowCoreThreadTimeOut(allowCoreThreadTimeout);
}
@Override
public boolean isShutdown() {
return threadPool.isShutdown();
}
@Override
public void purge() {
threadPool.purge();
}
@Override
public int getTaskQueueRemainingCapacity() {
if (threadPool.getQueue() != null) {
return threadPool.getQueue().remainingCapacity();
} else {
// no queue found, so no capacity
return 0;
}
}
}
| ManagedThreadPool |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/core/search/arguments/GeoFieldArgsTest.java | {
"start": 515,
"end": 8981
} | class ____ {
@Test
void testDefaultGeoFieldArgs() {
GeoFieldArgs<String> field = GeoFieldArgs.<String> builder().name("location").build();
assertThat(field.getName()).isEqualTo("location");
assertThat(field.getFieldType()).isEqualTo("GEO");
assertThat(field.getAs()).isEmpty();
assertThat(field.isSortable()).isFalse();
assertThat(field.isUnNormalizedForm()).isFalse();
assertThat(field.isNoIndex()).isFalse();
assertThat(field.isIndexEmpty()).isFalse();
assertThat(field.isIndexMissing()).isFalse();
}
@Test
void testGeoFieldArgsWithAlias() {
GeoFieldArgs<String> field = GeoFieldArgs.<String> builder().name("coordinates").as("location").build();
assertThat(field.getName()).isEqualTo("coordinates");
assertThat(field.getAs()).hasValue("location");
assertThat(field.getFieldType()).isEqualTo("GEO");
}
@Test
void testGeoFieldArgsWithSortable() {
GeoFieldArgs<String> field = GeoFieldArgs.<String> builder().name("position").sortable().build();
assertThat(field.getName()).isEqualTo("position");
assertThat(field.isSortable()).isTrue();
assertThat(field.isUnNormalizedForm()).isFalse();
}
@Test
void testGeoFieldArgsWithSortableAndUnnormalized() {
GeoFieldArgs<String> field = GeoFieldArgs.<String> builder().name("geo_point").sortable().unNormalizedForm().build();
assertThat(field.getName()).isEqualTo("geo_point");
assertThat(field.isSortable()).isTrue();
assertThat(field.isUnNormalizedForm()).isTrue();
}
@Test
void testGeoFieldArgsWithNoIndex() {
GeoFieldArgs<String> field = GeoFieldArgs.<String> builder().name("internal_location").noIndex().build();
assertThat(field.getName()).isEqualTo("internal_location");
assertThat(field.isNoIndex()).isTrue();
}
@Test
void testGeoFieldArgsWithIndexEmpty() {
GeoFieldArgs<String> field = GeoFieldArgs.<String> builder().name("optional_location").indexEmpty().build();
assertThat(field.getName()).isEqualTo("optional_location");
assertThat(field.isIndexEmpty()).isTrue();
}
@Test
void testGeoFieldArgsWithIndexMissing() {
GeoFieldArgs<String> field = GeoFieldArgs.<String> builder().name("nullable_location").indexMissing().build();
assertThat(field.getName()).isEqualTo("nullable_location");
assertThat(field.isIndexMissing()).isTrue();
}
@Test
void testGeoFieldArgsWithAllOptions() {
GeoFieldArgs<String> field = GeoFieldArgs.<String> builder().name("comprehensive_geo").as("geo").sortable()
.unNormalizedForm().noIndex().indexEmpty().indexMissing().build();
assertThat(field.getName()).isEqualTo("comprehensive_geo");
assertThat(field.getAs()).hasValue("geo");
assertThat(field.isSortable()).isTrue();
assertThat(field.isUnNormalizedForm()).isTrue();
assertThat(field.isNoIndex()).isTrue();
assertThat(field.isIndexEmpty()).isTrue();
assertThat(field.isIndexMissing()).isTrue();
}
@Test
void testGeoFieldArgsBuild() {
GeoFieldArgs<String> field = GeoFieldArgs.<String> builder().name("store_location").as("location").sortable()
.unNormalizedForm().indexEmpty().indexMissing().build();
CommandArgs<String, String> commandArgs = new CommandArgs<>(StringCodec.UTF8);
field.build(commandArgs);
String argsString = commandArgs.toString();
assertThat(argsString).contains("store_location");
assertThat(argsString).contains("AS");
assertThat(argsString).contains("location");
assertThat(argsString).contains("GEO");
assertThat(argsString).contains("SORTABLE");
assertThat(argsString).contains("UNF");
assertThat(argsString).contains("INDEXEMPTY");
assertThat(argsString).contains("INDEXMISSING");
}
@Test
void testGeoFieldArgsMinimalBuild() {
GeoFieldArgs<String> field = GeoFieldArgs.<String> builder().name("simple_geo").build();
CommandArgs<String, String> commandArgs = new CommandArgs<>(StringCodec.UTF8);
field.build(commandArgs);
String argsString = commandArgs.toString();
assertThat(argsString).contains("simple_geo");
assertThat(argsString).contains("GEO");
assertThat(argsString).doesNotContain("AS");
assertThat(argsString).doesNotContain("SORTABLE");
assertThat(argsString).doesNotContain("UNF");
assertThat(argsString).doesNotContain("NOINDEX");
assertThat(argsString).doesNotContain("INDEXEMPTY");
assertThat(argsString).doesNotContain("INDEXMISSING");
}
@Test
void testGeoFieldArgsSortableWithoutUnnormalized() {
GeoFieldArgs<String> field = GeoFieldArgs.<String> builder().name("sortable_geo").sortable().build();
CommandArgs<String, String> commandArgs = new CommandArgs<>(StringCodec.UTF8);
field.build(commandArgs);
String argsString = commandArgs.toString();
assertThat(argsString).contains("SORTABLE");
assertThat(argsString).doesNotContain("UNF"); // UNF should only appear with SORTABLE when explicitly set
}
@Test
void testGeoFieldArgsWithNoIndexOnly() {
GeoFieldArgs<String> field = GeoFieldArgs.<String> builder().name("no_index_geo").noIndex().build();
CommandArgs<String, String> commandArgs = new CommandArgs<>(StringCodec.UTF8);
field.build(commandArgs);
String argsString = commandArgs.toString();
assertThat(argsString).contains("NOINDEX");
assertThat(argsString).doesNotContain("SORTABLE");
assertThat(argsString).doesNotContain("INDEXEMPTY");
assertThat(argsString).doesNotContain("INDEXMISSING");
}
@Test
void testBuilderMethodChaining() {
// Test that builder methods return the correct type for method chaining
GeoFieldArgs<String> field = GeoFieldArgs.<String> builder().name("chained_geo").as("chained_alias").sortable()
.unNormalizedForm().noIndex().indexEmpty().indexMissing().build();
assertThat(field.getName()).isEqualTo("chained_geo");
assertThat(field.getAs()).hasValue("chained_alias");
assertThat(field.isSortable()).isTrue();
assertThat(field.isUnNormalizedForm()).isTrue();
assertThat(field.isNoIndex()).isTrue();
assertThat(field.isIndexEmpty()).isTrue();
assertThat(field.isIndexMissing()).isTrue();
}
@Test
void testGeoFieldArgsTypeSpecificBehavior() {
// Test that geo fields don't have type-specific arguments beyond common ones
GeoFieldArgs<String> field = GeoFieldArgs.<String> builder().name("geo_field").build();
CommandArgs<String, String> commandArgs = new CommandArgs<>(StringCodec.UTF8);
field.build(commandArgs);
String argsString = commandArgs.toString();
// Should only contain field name and type, no geo-specific arguments
assertThat(argsString).contains("geo_field");
assertThat(argsString).contains("GEO");
// Should not contain any text-specific, tag-specific, or numeric-specific arguments
assertThat(argsString).doesNotContain("WEIGHT");
assertThat(argsString).doesNotContain("NOSTEM");
assertThat(argsString).doesNotContain("PHONETIC");
assertThat(argsString).doesNotContain("SEPARATOR");
assertThat(argsString).doesNotContain("CASESENSITIVE");
assertThat(argsString).doesNotContain("WITHSUFFIXTRIE");
}
@Test
void testGeoFieldArgsInheritedMethods() {
// Test that inherited methods from FieldArgs work correctly
GeoFieldArgs<String> field = GeoFieldArgs.<String> builder().name("inherited_geo").noIndex().indexEmpty().indexMissing()
.build();
assertThat(field.isNoIndex()).isTrue();
assertThat(field.isIndexEmpty()).isTrue();
assertThat(field.isIndexMissing()).isTrue();
CommandArgs<String, String> commandArgs = new CommandArgs<>(StringCodec.UTF8);
field.build(commandArgs);
String argsString = commandArgs.toString();
assertThat(argsString).contains("NOINDEX");
assertThat(argsString).contains("INDEXEMPTY");
assertThat(argsString).contains("INDEXMISSING");
}
}
| GeoFieldArgsTest |
java | apache__camel | components/camel-influxdb2/src/main/java/org/apache/camel/component/influxdb2/InfluxDb2Endpoint.java | {
"start": 2034,
"end": 2328
} | class ____ extends DefaultEndpoint {
private static final Logger LOG = LoggerFactory.getLogger(InfluxDb2Endpoint.class);
private InfluxDBClient influxDBClient;
@UriPath
@Metadata(required = true,
description = "Connection to the Influx database, of | InfluxDb2Endpoint |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_3849/DeduplicateByTargetMapper.java | {
"start": 802,
"end": 2316
} | class ____ {
@BeforeMapping
void deduplicateByTargetForBefore(Parent source, @MappingTarget ParentDto target) {
INVOKED_METHODS.add( "beforeMappingParentTargetInOtherClass" );
}
@BeforeMapping
void deduplicateByTargetForBefore(Parent source, @MappingTarget ChildDto target) {
INVOKED_METHODS.add( "beforeMappingChildTargetInOtherClass" );
}
@AfterMapping
void deduplicateByTarget(Parent source, @MappingTarget ParentDto target) {
INVOKED_METHODS.add( "afterMappingParentTargetInOtherClass" );
}
@AfterMapping
void deduplicateByTarget(Parent source, @MappingTarget ChildDto target) {
INVOKED_METHODS.add( "afterMappingChildTargetInOtherClass" );
}
}
@BeforeMapping
default void deduplicateByTargetForBefore(Parent source, @MappingTarget ParentDto target) {
INVOKED_METHODS.add( "beforeMappingParentTarget" );
}
@BeforeMapping
default void deduplicateByTargetForBefore(Parent source, @MappingTarget ChildDto target) {
INVOKED_METHODS.add( "beforeMappingChildTarget" );
}
@AfterMapping
default void deduplicateByTarget(Parent source, @MappingTarget ParentDto target) {
INVOKED_METHODS.add( "afterMappingParentTarget" );
}
@AfterMapping
default void deduplicateByTarget(Parent source, @MappingTarget ChildDto target) {
INVOKED_METHODS.add( "afterMappingChildTarget" );
}
}
| MappingContext |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/CommitterTestBinding.java | {
"start": 2069,
"end": 4783
} | class ____ implements
IOStatisticsSource {
/**
* IOStatistics counter for progress events.
*/
public static final String PROGRESS_EVENTS = "progress_events";
/**
* IOStatistics to update with progress events.
*/
private final IOStatisticsStore iostatistics;
/**
* Job attempt ID:.
*/
private final String jobAttemptId;
/**
* Job ID.
*/
private final JobID jobId;
/**
* Task Attempt ID, under the job attempt.
*/
private final TaskAttemptID taskAttemptId;
/**
* Task ID.
*/
private final TaskID taskId;
/**
* Task attempt context for the given task Attempt.
*/
private final TaskAttemptContext taskAttemptContext;
/**
* Construct.
* @param conf job/task config. This is patched with the app attempt.
* @param appAttempt application attempt.
* @param taskNumber task number
* @param taskAttemptNumber which attempt on this task is it
*/
CommitterTestBinding(
Configuration conf,
int appAttempt, int taskNumber, int taskAttemptNumber) {
iostatistics = createIOStatisticsStore()
.withCounters(PROGRESS_EVENTS)
.build();
// this is the job ID, with no attempt info.
jobId = JobID.forName(randomJobId());
jobAttemptId = jobId.toString() + "_ " + appAttempt;
taskId = new TaskID(jobId, TaskType.MAP, taskNumber);
taskAttemptId = new TaskAttemptID(taskId,
taskAttemptNumber);
conf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, appAttempt);
taskAttemptContext = new TaskAttemptContextImpl(conf, taskAttemptId);
}
/**
* Create a committer config for the given output path.
* @param outputPath output path in destFS.
* @return a committer for the active task.
*/
ManifestCommitterConfig createCommitterConfig(
Path outputPath) {
return new ManifestCommitterConfig(outputPath,
TASK_COMMITTER,
taskAttemptContext,
iostatistics,
null);
}
/**
* Create a stage config from the committer config.
* All stats go to the local IOStatisticsStore;
* there's a progress callback also set to increment
* the counter {@link #PROGRESS_EVENTS}
* @return a stage config
*/
StageConfig createStageConfig(Path outputPath) {
return createCommitterConfig(outputPath)
.createStageConfig()
.withProgressable(new ProgressCallback());
}
@Override
public IOStatisticsStore getIOStatistics() {
return iostatistics;
}
/**
* Whenever this progress callback is invoked, the progress_events
* counter is incremented. This allows for tests to verify that
* callbacks have occurred by asserting on the event counter.
*/
private final | CommitterTestBinding |
java | elastic__elasticsearch | x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/NodeDeprecationChecksTests.java | {
"start": 1755,
"end": 42238
} | class ____ extends ESTestCase {
public void testRemovedSettingNotSet() {
final Settings clusterSettings = Settings.EMPTY;
final Settings nodeSettings = Settings.EMPTY;
final Setting<?> removedSetting = Setting.simpleString("node.removed_setting");
final DeprecationIssue issue = NodeDeprecationChecks.checkRemovedSetting(
clusterSettings,
nodeSettings,
removedSetting,
"http://removed-setting.example.com",
"Some detail.",
DeprecationIssue.Level.CRITICAL
);
assertThat(issue, nullValue());
}
public void testRemovedSetting() {
final Settings clusterSettings = Settings.EMPTY;
final Settings nodeSettings = Settings.builder().put("node.removed_setting", "value").build();
final Setting<?> removedSetting = Setting.simpleString("node.removed_setting");
final DeprecationIssue issue = NodeDeprecationChecks.checkRemovedSetting(
clusterSettings,
nodeSettings,
removedSetting,
"https://removed-setting.example.com",
"Some detail.",
DeprecationIssue.Level.CRITICAL
);
assertThat(issue, not(nullValue()));
assertThat(issue.getLevel(), equalTo(DeprecationIssue.Level.CRITICAL));
assertThat(issue.getMessage(), equalTo("Setting [node.removed_setting] is deprecated"));
assertThat(issue.getDetails(), equalTo("Remove the [node.removed_setting] setting. Some detail."));
assertThat(issue.getUrl(), equalTo("https://removed-setting.example.com"));
}
public void testRemovedAffixSetting() {
final Settings clusterSettings = Settings.EMPTY;
final Settings nodeSettings = Settings.builder().put("node.removed_setting.a.value", "value").build();
final Setting<?> removedSetting = Setting.affixKeySetting(
"node.removed_setting.",
"value",
key -> Setting.simpleString(key, Setting.Property.NodeScope)
);
final DeprecationIssue issue = NodeDeprecationChecks.checkRemovedSetting(
clusterSettings,
nodeSettings,
removedSetting,
"https://removed-setting.example.com",
"Some detail.",
DeprecationIssue.Level.CRITICAL
);
assertThat(issue, not(nullValue()));
assertThat(issue.getLevel(), equalTo(DeprecationIssue.Level.CRITICAL));
assertThat(issue.getMessage(), equalTo("Setting [node.removed_setting.*.value] is deprecated"));
assertThat(issue.getDetails(), equalTo("Remove the [node.removed_setting.*.value] setting. Some detail."));
assertThat(issue.getUrl(), equalTo("https://removed-setting.example.com"));
}
public void testRemovedGroupSetting() {
final Settings clusterSettings = Settings.EMPTY;
final Settings nodeSettings = Settings.builder().put("node.removed_setting.v", "value").build();
final Setting<?> removedSetting = Setting.groupSetting("node.removed_setting.", Setting.Property.NodeScope);
final DeprecationIssue issue = NodeDeprecationChecks.checkRemovedSetting(
clusterSettings,
nodeSettings,
removedSetting,
"https://removed-setting.example.com",
"Some detail.",
DeprecationIssue.Level.CRITICAL
);
assertThat(issue, not(nullValue()));
assertThat(issue.getLevel(), equalTo(DeprecationIssue.Level.CRITICAL));
assertThat(issue.getMessage(), equalTo("Setting [node.removed_setting.] is deprecated"));
assertThat(issue.getDetails(), equalTo("Remove the [node.removed_setting.] setting. Some detail."));
assertThat(issue.getUrl(), equalTo("https://removed-setting.example.com"));
}
public void testMultipleRemovedSettings() {
final Settings clusterSettings = Settings.EMPTY;
final Settings nodeSettings = Settings.builder()
.put("node.removed_setting1", "value")
.put("node.removed_setting2", "value")
.build();
final Setting<?> removedSetting1 = Setting.simpleString("node.removed_setting1");
final Setting<?> removedSetting2 = Setting.simpleString("node.removed_setting2");
final DeprecationIssue issue = NodeDeprecationChecks.checkMultipleRemovedSettings(
clusterSettings,
nodeSettings,
shuffledList(List.of(removedSetting1, removedSetting2)),
"https://removed-setting.example.com",
"Some detail.",
DeprecationIssue.Level.CRITICAL
);
assertThat(issue, not(nullValue()));
assertThat(issue.getLevel(), equalTo(DeprecationIssue.Level.CRITICAL));
assertThat(issue.getMessage(), equalTo("Settings [node.removed_setting1, node.removed_setting2] are deprecated"));
assertThat(issue.getDetails(), equalTo("Remove each setting in [node.removed_setting1, node.removed_setting2]. Some detail."));
assertThat(issue.getUrl(), equalTo("https://removed-setting.example.com"));
}
public void testMultipleDataPaths() {
final Settings settings = Settings.builder().putList("path.data", Arrays.asList("d1", "d2")).build();
final XPackLicenseState licenseState = new XPackLicenseState(() -> 0);
final DeprecationIssue issue = NodeDeprecationChecks.checkMultipleDataPaths(settings, null, null, licenseState);
assertThat(issue, not(nullValue()));
assertThat(issue.getLevel(), equalTo(DeprecationIssue.Level.WARNING));
assertThat(issue.getMessage(), equalTo("Specifying multiple data paths is deprecated"));
assertThat(
issue.getDetails(),
equalTo(
"The [path.data] setting contains a list of paths. Specify a single path as a string. Use RAID or other system level "
+ "features to utilize multiple disks. If multiple data paths are configured, the node will fail to start in 8.0."
)
);
String url = "https://ela.st/es-deprecation-7-multiple-paths";
assertThat(issue.getUrl(), equalTo(url));
}
public void testNoMultipleDataPaths() {
Settings settings = Settings.builder().put("path.data", "data").build();
final XPackLicenseState licenseState = new XPackLicenseState(() -> 0);
final DeprecationIssue issue = NodeDeprecationChecks.checkMultipleDataPaths(settings, null, null, licenseState);
assertThat(issue, nullValue());
}
public void testDataPathsList() {
final Settings settings = Settings.builder().putList("path.data", "d1").build();
final XPackLicenseState licenseState = new XPackLicenseState(() -> 0);
final DeprecationIssue issue = NodeDeprecationChecks.checkDataPathsList(settings, null, null, licenseState);
assertThat(issue, not(nullValue()));
assertThat(issue.getLevel(), equalTo(DeprecationIssue.Level.WARNING));
assertThat(issue.getMessage(), equalTo("Multiple data paths are not supported"));
assertThat(
issue.getDetails(),
equalTo(
"The [path.data] setting contains a list of paths. Specify a single path as a string. Use RAID or other system level "
+ "features to utilize multiple disks. If multiple data paths are configured, the node will fail to start in 8.0."
)
);
String url = "https://ela.st/es-deprecation-7-multiple-paths";
assertThat(issue.getUrl(), equalTo(url));
}
public void testNoDataPathsListDefault() {
final Settings settings = Settings.builder().build();
final XPackLicenseState licenseState = new XPackLicenseState(() -> 0);
final DeprecationIssue issue = NodeDeprecationChecks.checkDataPathsList(settings, null, null, licenseState);
assertThat(issue, nullValue());
}
public void testSharedDataPathSetting() {
Settings settings = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), createTempDir())
.build();
List<DeprecationIssue> issues = filterChecks(
SINGLE_NODE_CHECKS,
c -> c.apply(settings, null, ClusterState.EMPTY_STATE, new XPackLicenseState(() -> 0))
);
final String expectedUrl = "https://ela.st/es-deprecation-7-shared-data-path";
assertThat(
issues,
contains(
new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"setting [path.shared_data] is deprecated and will be removed in a future version",
expectedUrl,
"Found shared data path configured. Discontinue use of this setting.",
false,
null
)
)
);
}
public void testCheckReservedPrefixedRealmNames() {
final Settings.Builder builder = Settings.builder();
final boolean invalidFileRealmName = randomBoolean();
final boolean invalidNativeRealmName = randomBoolean();
final boolean invalidOtherRealmName = (false == invalidFileRealmName && false == invalidNativeRealmName) || randomBoolean();
final List<String> invalidRealmNames = new ArrayList<>();
final String fileRealmName = randomAlphaOfLengthBetween(4, 12);
if (invalidFileRealmName) {
builder.put("xpack.security.authc.realms.file." + "_" + fileRealmName + ".order", -20);
invalidRealmNames.add("xpack.security.authc.realms.file." + "_" + fileRealmName);
} else {
builder.put("xpack.security.authc.realms.file." + fileRealmName + ".order", -20);
}
final String nativeRealmName = randomAlphaOfLengthBetween(4, 12);
if (invalidNativeRealmName) {
builder.put("xpack.security.authc.realms.native." + "_" + nativeRealmName + ".order", -10);
invalidRealmNames.add("xpack.security.authc.realms.native." + "_" + nativeRealmName);
} else {
builder.put("xpack.security.authc.realms.native." + nativeRealmName + ".order", -10);
}
final int otherRealmId = randomIntBetween(0, 9);
final String otherRealmName = randomAlphaOfLengthBetween(4, 12);
if (invalidOtherRealmName) {
builder.put("xpack.security.authc.realms.type_" + otherRealmId + "." + "_" + otherRealmName + ".order", 0);
invalidRealmNames.add("xpack.security.authc.realms.type_" + otherRealmId + "." + "_" + otherRealmName);
} else {
builder.put("xpack.security.authc.realms.type_" + otherRealmId + "." + otherRealmName + ".order", 0);
}
final Settings settings = builder.build();
final List<DeprecationIssue> deprecationIssues = filterChecks(
SINGLE_NODE_CHECKS,
c -> c.apply(settings, null, ClusterState.EMPTY_STATE, new XPackLicenseState(() -> 0))
);
assertEquals(1, deprecationIssues.size());
final DeprecationIssue deprecationIssue = deprecationIssues.get(0);
assertEquals("Realm that start with [_] will not be permitted in a future major release.", deprecationIssue.getMessage());
assertEquals("https://ela.st/es-deprecation-7-realm-prefix", deprecationIssue.getUrl());
assertEquals(
"Found realm "
+ (invalidRealmNames.size() == 1 ? "name" : "names")
+ " with reserved prefix [_]: ["
+ Strings.collectionToDelimitedString(invalidRealmNames.stream().sorted().collect(Collectors.toList()), "; ")
+ "]. "
+ "In a future major release, node will fail to start if any realm names start with reserved prefix.",
deprecationIssue.getDetails()
);
}
void monitoringSetting(String settingKey, String value) {
Settings settings = Settings.builder().put(settingKey, value).build();
List<DeprecationIssue> issues = filterChecks(
SINGLE_NODE_CHECKS,
c -> c.apply(settings, null, ClusterState.EMPTY_STATE, new XPackLicenseState(() -> 0))
);
final String expectedUrl = "https://ela.st/es-deprecation-7-monitoring-settings";
assertThat(
issues,
hasItem(
new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"setting [" + settingKey + "] is deprecated and will be removed after 8.0",
expectedUrl,
"the setting [" + settingKey + "] is currently set to [" + value + "], remove this setting",
false,
null
)
)
);
}
void monitoringExporterSetting(String suffix, String value) {
String settingKey = "xpack.monitoring.exporters.test." + suffix;
Settings settings = Settings.builder().put(settingKey, value).build();
final XPackLicenseState licenseState = new XPackLicenseState(() -> 0);
List<DeprecationIssue> issues = filterChecks(
SINGLE_NODE_CHECKS,
c -> c.apply(settings, null, ClusterState.EMPTY_STATE, licenseState)
);
final String expectedUrl = "https://ela.st/es-deprecation-7-monitoring-settings";
assertThat(
issues,
hasItem(
new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"The [" + settingKey + "] settings are deprecated and will be removed after 8.0",
expectedUrl,
"Remove the following settings: [" + settingKey + "]",
false,
null
)
)
);
}
void monitoringExporterGroupedSetting(String suffix, String value) {
String settingKey = "xpack.monitoring.exporters.test." + suffix;
String subSettingKey = settingKey + ".subsetting";
Settings settings = Settings.builder().put(subSettingKey, value).build();
final XPackLicenseState licenseState = new XPackLicenseState(() -> 0);
List<DeprecationIssue> issues = filterChecks(
SINGLE_NODE_CHECKS,
c -> c.apply(settings, null, ClusterState.EMPTY_STATE, licenseState)
);
final String expectedUrl = "https://ela.st/es-deprecation-7-monitoring-settings";
assertThat(
issues,
hasItem(
new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"The [" + settingKey + ".*] settings are deprecated and will be removed after 8.0",
expectedUrl,
"Remove the following settings: [" + subSettingKey + "]",
false,
null
)
)
);
}
void monitoringExporterSecureSetting(String suffix, String value) {
String settingKey = "xpack.monitoring.exporters.test." + suffix;
MockSecureSettings secureSettings = new MockSecureSettings();
secureSettings.setString(settingKey, value);
Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
final XPackLicenseState licenseState = new XPackLicenseState(() -> 0);
List<DeprecationIssue> issues = filterChecks(
SINGLE_NODE_CHECKS,
c -> c.apply(settings, null, ClusterState.EMPTY_STATE, licenseState)
);
final String expectedUrl = "https://ela.st/es-deprecation-7-monitoring-settings";
assertThat(
issues,
hasItem(
new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"The [" + settingKey + "] settings are deprecated and will be removed after 8.0",
expectedUrl,
"Remove the following settings from the keystore: [" + settingKey + "]",
false,
null
)
)
);
}
public void testCheckMonitoringSettingHistoryDuration() {
monitoringSetting("xpack.monitoring.history.duration", "7d");
}
public void testCheckMonitoringSettingCollectIndexRecovery() {
monitoringSetting("xpack.monitoring.collection.index.recovery.active_only", "true");
}
public void testCheckMonitoringSettingCollectIndices() {
monitoringSetting("xpack.monitoring.collection.indices", "[test1,test2]");
}
public void testCheckMonitoringSettingCollectCcrTimeout() {
monitoringSetting("xpack.monitoring.collection.ccr.stats.timeout", "10s");
}
public void testCheckMonitoringSettingCollectEnrichStatsTimeout() {
monitoringSetting("xpack.monitoring.collection.enrich.stats.timeout", "10s");
}
public void testCheckMonitoringSettingCollectIndexRecoveryStatsTimeout() {
monitoringSetting("xpack.monitoring.collection.index.recovery.timeout", "10s");
}
public void testCheckMonitoringSettingCollectIndexStatsTimeout() {
monitoringSetting("xpack.monitoring.collection.index.stats.timeout", "10s");
}
public void testCheckMonitoringSettingCollectMlJobStatsTimeout() {
monitoringSetting("xpack.monitoring.collection.ml.job.stats.timeout", "10s");
}
public void testCheckMonitoringSettingCollectNodeStatsTimeout() {
monitoringSetting("xpack.monitoring.collection.node.stats.timeout", "10s");
}
public void testCheckMonitoringSettingCollectClusterStatsTimeout() {
monitoringSetting("xpack.monitoring.collection.cluster.stats.timeout", "10s");
}
public void testCheckMonitoringSettingExportersHost() {
monitoringExporterSetting("host", "abcdef");
}
public void testCheckMonitoringSettingExportersBulkTimeout() {
monitoringExporterSetting("bulk.timeout", "10s");
}
public void testCheckMonitoringSettingExportersConnectionTimeout() {
monitoringExporterSetting("connection.timeout", "10s");
}
public void testCheckMonitoringSettingExportersConnectionReadTimeout() {
monitoringExporterSetting("connection.read_timeout", "10s");
}
public void testCheckMonitoringSettingExportersAuthUsername() {
monitoringExporterSetting("auth.username", "abcdef");
}
public void testCheckMonitoringSettingExportersAuthPass() {
monitoringExporterSecureSetting("auth.secure_password", "abcdef");
}
public void testCheckMonitoringSettingExportersSSL() {
monitoringExporterGroupedSetting("ssl", "abcdef");
}
public void testCheckMonitoringSettingExportersProxyBase() {
monitoringExporterSetting("proxy.base_path", "abcdef");
}
public void testCheckMonitoringSettingExportersSniffEnabled() {
monitoringExporterSetting("sniff.enabled", "true");
}
public void testCheckMonitoringSettingExportersHeaders() {
monitoringExporterGroupedSetting("headers", "abcdef");
}
public void testCheckMonitoringSettingExportersTemplateTimeout() {
monitoringExporterSetting("index.template.master_timeout", "10s");
}
public void testCheckMonitoringSettingExportersMasterTimeout() {
monitoringExporterSetting("wait_master.timeout", "10s");
}
public void testCheckMonitoringSettingExportersEnabled() {
monitoringExporterSetting("enabled", "true");
}
public void testCheckMonitoringSettingExportersType() {
monitoringExporterSetting("type", "local");
}
public void testCheckMonitoringSettingExportersAlertsEnabled() {
monitoringExporterSetting("cluster_alerts.management.enabled", "true");
}
public void testCheckMonitoringSettingExportersAlertsBlacklist() {
monitoringExporterSetting("cluster_alerts.management.blacklist", "[abcdef,ghijkl]");
}
public void testCheckMonitoringSettingExportersIndexNameTimeFormat() {
monitoringExporterSetting("index.name.time_format", "yyyy-mm-dd");
}
public void testCheckMonitoringSettingDecomissionAlerts() {
monitoringSetting("xpack.monitoring.migration.decommission_alerts", "true");
}
public void testCheckMonitoringSettingEsCollectionEnabled() {
monitoringSetting("xpack.monitoring.elasticsearch.collection.enabled", "true");
}
public void testCheckMonitoringSettingCollectionEnabled() {
monitoringSetting("xpack.monitoring.collection.enabled", "true");
}
public void testCheckMonitoringSettingCollectionInterval() {
monitoringSetting("xpack.monitoring.collection.interval", "10s");
}
public void testExporterUseIngestPipelineSettings() {
Settings settings = Settings.builder().put("xpack.monitoring.exporters.test.use_ingest", true).build();
List<DeprecationIssue> issues = filterChecks(
SINGLE_NODE_CHECKS,
c -> c.apply(settings, null, ClusterState.EMPTY_STATE, new XPackLicenseState(() -> 0))
);
final String expectedUrl = "https://ela.st/es-deprecation-7-monitoring-exporter-use-ingest-setting";
assertThat(
issues,
hasItem(
new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"The [xpack.monitoring.exporters.test.use_ingest] settings are deprecated and will be removed after 8.0",
expectedUrl,
"Remove the following settings: [xpack.monitoring.exporters.test.use_ingest]",
false,
null
)
)
);
}
public void testExporterPipelineMasterTimeoutSetting() {
Settings settings = Settings.builder()
.put("xpack.monitoring.exporters.test.index.pipeline.master_timeout", TimeValue.timeValueSeconds(10))
.build();
List<DeprecationIssue> issues = filterChecks(
SINGLE_NODE_CHECKS,
c -> c.apply(settings, null, ClusterState.EMPTY_STATE, new XPackLicenseState(() -> 0))
);
final String expectedUrl = "https://ela.st/es-deprecation-7-monitoring-exporter-pipeline-timeout-setting";
assertThat(
issues,
hasItem(
new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"The [xpack.monitoring.exporters.test.index.pipeline.master_timeout] "
+ "settings are deprecated and will be removed after 8.0",
expectedUrl,
"Remove the following settings: [xpack.monitoring.exporters.test.index.pipeline.master_timeout]",
false,
null
)
)
);
}
public void testExporterCreateLegacyTemplateSetting() {
Settings settings = Settings.builder().put("xpack.monitoring.exporters.test.index.template.create_legacy_templates", true).build();
List<DeprecationIssue> issues = filterChecks(
SINGLE_NODE_CHECKS,
c -> c.apply(settings, null, ClusterState.EMPTY_STATE, new XPackLicenseState(() -> 0))
);
final String expectedUrl = "https://ela.st/es-deprecation-7-monitoring-exporter-create-legacy-template-setting";
assertThat(
issues,
hasItem(
new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"The [xpack.monitoring.exporters.test.index.template.create_legacy_templates] settings are deprecated and will be "
+ "removed after 8.0",
expectedUrl,
"Remove the following settings: " + "[xpack.monitoring.exporters.test.index.template.create_legacy_templates]",
false,
null
)
)
);
}
public void testScriptContextCacheSetting() {
Settings settings = Settings.builder()
.put(ScriptService.SCRIPT_GENERAL_MAX_COMPILATIONS_RATE_SETTING.getKey(), "use-context")
.build();
List<DeprecationIssue> issues = filterChecks(
SINGLE_NODE_CHECKS,
c -> c.apply(settings, null, ClusterState.EMPTY_STATE, new XPackLicenseState(() -> 0))
);
assertThat(
issues,
hasItem(
new DeprecationIssue(
DeprecationIssue.Level.WARNING,
ScriptService.USE_CONTEXT_RATE_KEY_DEPRECATION_MESSAGE,
"https://ela.st/es-deprecation-7-script-context-cache",
"found deprecated script context caches in use, change setting to compilation rate or remove "
+ "setting to use the default",
false,
null
)
)
);
}
public void testScriptContextCompilationsRateLimitSetting() {
List<String> contexts = List.of("field", "score");
Settings settings = Settings.builder()
.put(ScriptService.SCRIPT_GENERAL_MAX_COMPILATIONS_RATE_SETTING.getKey(), "use-context")
.put(ScriptService.SCRIPT_MAX_COMPILATIONS_RATE_SETTING.getConcreteSettingForNamespace(contexts.get(0)).getKey(), "123/5m")
.put(ScriptService.SCRIPT_MAX_COMPILATIONS_RATE_SETTING.getConcreteSettingForNamespace(contexts.get(1)).getKey(), "456/7m")
.build();
List<DeprecationIssue> issues = filterChecks(
SINGLE_NODE_CHECKS,
c -> c.apply(settings, null, ClusterState.EMPTY_STATE, new XPackLicenseState(() -> 0))
);
assertThat(
issues,
hasItem(
new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"Setting context-specific rate limits"
+ " [script.context.field.max_compilations_rate,script.context.score.max_compilations_rate] is deprecated."
+ " Use [script.max_compilations_rate] to rate limit the compilation of user scripts."
+ " Context-specific caches are no longer needed to prevent system scripts from triggering rate limits.",
"https://ela.st/es-deprecation-7-script-context-cache",
"[script.context.field.max_compilations_rate,script.context.score.max_compilations_rate] is deprecated and"
+ " will be removed in a future release",
false,
null
)
)
);
assertWarnings(
"[script.context.field.max_compilations_rate] setting was deprecated in Elasticsearch and will be"
+ " removed in a future release. See the deprecation documentation for the next major version.",
"[script.context.score.max_compilations_rate] setting was deprecated in Elasticsearch and will be removed in a future release. "
+ "See the deprecation documentation for the next major version."
);
}
public void testImplicitScriptContextCacheSetting() {
List<String> contexts = List.of("update", "filter");
Settings settings = Settings.builder()
.put(ScriptService.SCRIPT_MAX_COMPILATIONS_RATE_SETTING.getConcreteSettingForNamespace(contexts.get(0)).getKey(), "123/5m")
.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING.getConcreteSettingForNamespace(contexts.get(1)).getKey(), "2453")
.build();
List<DeprecationIssue> issues = filterChecks(
SINGLE_NODE_CHECKS,
c -> c.apply(settings, null, ClusterState.EMPTY_STATE, new XPackLicenseState(() -> 0))
);
assertThat(
issues,
hasItem(
new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"Implicitly using the script context cache is deprecated, remove settings "
+ "[script.context.filter.cache_max_size, script.context.update.max_compilations_rate] "
+ "to use the script general cache.",
"https://ela.st/es-deprecation-7-script-context-cache",
"Remove the context-specific cache settings and set [script.max_compilations_rate] to configure the rate limit for "
+ "the general cache. If no limit is set, the rate defaults to 150 compilations per five minutes: 150/5m. "
+ "Context-specific caches are no longer needed to prevent system scripts from triggering rate limits.",
false,
null
)
)
);
assertWarnings(
"[script.context.update.max_compilations_rate] setting was deprecated in Elasticsearch and will be"
+ " removed in a future release. See the deprecation documentation for the next major version.",
"[script.context.filter.cache_max_size] setting was deprecated in Elasticsearch and will be removed in a future release. "
+ "See the deprecation documentation for the next major version."
);
}
public void testScriptContextCacheSizeSetting() {
List<String> contexts = List.of("filter", "update");
Settings settings = Settings.builder()
.put(ScriptService.SCRIPT_GENERAL_MAX_COMPILATIONS_RATE_SETTING.getKey(), "use-context")
.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING.getConcreteSettingForNamespace(contexts.get(0)).getKey(), 80)
.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING.getConcreteSettingForNamespace(contexts.get(1)).getKey(), 200)
.build();
List<DeprecationIssue> issues = filterChecks(
SINGLE_NODE_CHECKS,
c -> c.apply(settings, null, ClusterState.EMPTY_STATE, new XPackLicenseState(() -> 0))
);
assertThat(
issues,
hasItem(
new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"Setting a context-specific cache size"
+ " [script.context.filter.cache_max_size,script.context.update.cache_max_size] is deprecated."
+ " Use [script.cache.max_size] to configure the size of the general cache for scripts."
+ " Context-specific caches are no longer needed to prevent system scripts from triggering rate limits.",
"https://ela.st/es-deprecation-7-script-context-cache",
"[script.context.filter.cache_max_size,script.context.update.cache_max_size] is deprecated and will be"
+ " removed in a future release",
false,
null
)
)
);
assertWarnings(
"[script.context.update.cache_max_size] setting was deprecated in Elasticsearch and will be removed in a future release. "
+ "See the deprecation documentation for the next major version.",
"[script.context.filter.cache_max_size] setting was deprecated in Elasticsearch and will be removed in a future release. "
+ "See the deprecation documentation for the next major version."
);
}
public void testScriptContextCacheExpirationSetting() {
List<String> contexts = List.of("interval", "moving-function");
Settings settings = Settings.builder()
.put(ScriptService.SCRIPT_GENERAL_MAX_COMPILATIONS_RATE_SETTING.getKey(), "use-context")
.put(ScriptService.SCRIPT_CACHE_EXPIRE_SETTING.getConcreteSettingForNamespace(contexts.get(0)).getKey(), "100m")
.put(ScriptService.SCRIPT_CACHE_EXPIRE_SETTING.getConcreteSettingForNamespace(contexts.get(1)).getKey(), "2d")
.build();
List<DeprecationIssue> issues = filterChecks(
SINGLE_NODE_CHECKS,
c -> c.apply(settings, null, ClusterState.EMPTY_STATE, new XPackLicenseState(() -> 0))
);
assertThat(
issues,
hasItem(
new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"Setting a context-specific cache expiration"
+ " [script.context.interval.cache_expire,script.context.moving-function.cache_expire] is deprecated."
+ " Use [script.cache.expire] to configure the expiration of the general cache."
+ " Context-specific caches are no longer needed to prevent system scripts from triggering rate limits.",
"https://ela.st/es-deprecation-7-script-context-cache",
"[script.context.interval.cache_expire,script.context.moving-function.cache_expire] is deprecated and will be"
+ " removed in a future release",
false,
null
)
)
);
assertWarnings(
"[script.context.interval.cache_expire] setting was deprecated in Elasticsearch and will be removed in a future release. "
+ "See the deprecation documentation for the next major version.",
"[script.context.moving-function.cache_expire] setting was deprecated in Elasticsearch and will be removed in a future release."
+ " See the deprecation documentation for the next major version."
);
}
public void testEnforceDefaultTierPreferenceSetting() {
Settings settings = Settings.builder().put(DataTier.ENFORCE_DEFAULT_TIER_PREFERENCE_SETTING.getKey(), randomBoolean()).build();
List<DeprecationIssue> issues = filterChecks(
SINGLE_NODE_CHECKS,
c -> c.apply(settings, null, ClusterState.EMPTY_STATE, new XPackLicenseState(() -> 0))
);
final String expectedUrl = "https://www.elastic.co/guide/en/elasticsearch/reference/current/data-tiers.html";
assertThat(
issues,
hasItem(
new DeprecationIssue(
DeprecationIssue.Level.CRITICAL,
"setting [cluster.routing.allocation.enforce_default_tier_preference] is deprecated and"
+ " will not be available in a future version",
expectedUrl,
"found [cluster.routing.allocation.enforce_default_tier_preference] configured." + " Discontinue use of this setting.",
false,
null
)
)
);
}
private List<DeprecationIssue> getDeprecationIssues(Settings settings, PluginsAndModules pluginsAndModules) {
final List<DeprecationIssue> issues = filterChecks(
NodeDeprecationChecks.SINGLE_NODE_CHECKS,
c -> c.apply(settings, pluginsAndModules, ClusterState.EMPTY_STATE, new XPackLicenseState(() -> 0))
);
return issues;
}
public void testLifecyleStepMasterTimeoutSetting() {
Settings settings = Settings.builder()
.put(LifecycleSettings.LIFECYCLE_STEP_MASTER_TIMEOUT_SETTING.getKey(), randomTimeValue())
.build();
final PluginsAndModules pluginsAndModules = new PluginsAndModules(Collections.emptyList(), Collections.emptyList());
final List<DeprecationIssue> issues = getDeprecationIssues(settings, pluginsAndModules);
final DeprecationIssue expected = new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"Setting [indices.lifecycle.step.master_timeout] is deprecated",
"https://ela.st/es-deprecation-8-lifecycle-master-timeout-setting",
"Remove the [indices.lifecycle.step.master_timeout] setting. As of 7.16 the timeout is always infinite.",
false,
null
);
assertThat(issues, hasItem(expected));
assertWarnings(
true,
new DeprecationWarning(
Level.WARN,
"[indices.lifecycle.step.master_timeout] setting was deprecated in Elasticsearch and will be removed in a future release. "
+ "See the deprecation documentation for the next major version."
)
);
}
public void testEqlEnabledSetting() {
Settings settings = Settings.builder().put("xpack.eql.enabled", randomBoolean()).build();
final PluginsAndModules pluginsAndModules = new PluginsAndModules(Collections.emptyList(), Collections.emptyList());
final List<DeprecationIssue> issues = getDeprecationIssues(settings, pluginsAndModules);
final DeprecationIssue expected = new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"Setting [xpack.eql.enabled] is deprecated",
"https://ela.st/es-deprecation-7-eql-enabled-setting",
"Remove the [xpack.eql.enabled] setting. As of 7.9.2 basic license level features are always enabled.",
false,
null
);
assertThat(issues, hasItem(expected));
assertWarnings(
true,
new DeprecationWarning(
Level.WARN,
"[xpack.eql.enabled] setting was deprecated in Elasticsearch and will be removed in a future release. "
+ "See the deprecation documentation for the next major version."
)
);
}
public void testDynamicSettings() {
String concreteSettingKey = "xpack.monitoring.exporters." + randomAlphaOfLength(10) + ".use_ingest";
Settings clusterSettings = Settings.builder().put(concreteSettingKey, randomBoolean()).build();
Settings nodettings = Settings.builder().build();
final PluginsAndModules pluginsAndModules = new PluginsAndModules(Collections.emptyList(), Collections.emptyList());
final XPackLicenseState licenseState = new XPackLicenseState(() -> 0);
Metadata.Builder metadataBuilder = Metadata.builder();
if (randomBoolean()) {
metadataBuilder.persistentSettings(clusterSettings);
} else {
metadataBuilder.transientSettings(clusterSettings);
}
Metadata metadata = metadataBuilder.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build();
final List<DeprecationIssue> issues = filterChecks(
NodeDeprecationChecks.SINGLE_NODE_CHECKS,
c -> c.apply(nodettings, pluginsAndModules, clusterState, licenseState)
);
Map<String, Object> meta = null;
final DeprecationIssue expected = new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"The [" + concreteSettingKey + "] settings are deprecated and will be removed after 8.0",
"https://ela.st/es-deprecation-7-monitoring-exporter-use-ingest-setting",
"Remove the following settings: [" + concreteSettingKey + "]",
false,
meta
);
assertThat(issues, hasItem(expected));
}
public void testCheckNodeAttrData() {
Settings settings = Settings.builder().put("node.attr.data", randomAlphaOfLength(randomIntBetween(4, 20))).build();
final PluginsAndModules pluginsAndModules = new PluginsAndModules(Collections.emptyList(), Collections.emptyList());
final List<DeprecationIssue> issues = getDeprecationIssues(settings, pluginsAndModules);
final DeprecationIssue expected = new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"Setting node.attributes.data is not recommended",
"https://ela.st/es-deprecation-7-node-attr-data-setting",
"One or more of your nodes is configured with node.attributes.data settings. This is typically used to create a "
+ "hot/warm or tiered architecture, based on legacy guidelines. Data tiers are a recommended replacement for tiered "
+ "architecture clusters.",
false,
null
);
assertThat(issues, hasItem(expected));
}
static <T> List<DeprecationIssue> filterChecks(List<T> checks, Function<T, DeprecationIssue> mapper) {
return checks.stream().map(mapper).filter(Objects::nonNull).toList();
}
}
| NodeDeprecationChecksTests |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/deser/asm/TestASM_boolean.java | {
"start": 149,
"end": 446
} | class ____ extends TestCase {
public void test_asm() throws Exception {
V0 v = new V0();
String text = JSON.toJSONString(v);
V0 v1 = JSON.parseObject(text, V0.class);
Assert.assertEquals(v.isValue(), v1.isValue());
}
public static | TestASM_boolean |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamBranch.java | {
"start": 1171,
"end": 1670
} | class ____<K, V> implements ProcessorSupplier<K, V, K, V> {
private final List<Predicate<? super K, ? super V>> predicates;
private final List<String> childNodes;
KStreamBranch(final List<Predicate<? super K, ? super V>> predicates,
final List<String> childNodes) {
this.predicates = predicates;
this.childNodes = childNodes;
}
@Override
public Processor<K, V, K, V> get() {
return new KStreamBranchProcessor();
}
private | KStreamBranch |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/visitor/MySqlParameterizedOutputVisitorTest.java | {
"start": 855,
"end": 8274
} | class ____ extends TestCase {
public void test_0() throws Exception {
String sql = "SELECT appsheetserialno FROM app_trans WHERE nodeid = _gbk '619' "
+ " AND alino = _gbk '2013110900031031001700thfund00163619'"
+ " AND apserialno = _gbk '201405120002300002170013205458'";
System.out.println(ParameterizedOutputVisitorUtils.parameterize(sql, DbType.mysql));
System.out.println("-----------------------");
sql = "SELECT p.AppId, p.PlayerId, p.Nickname, p.CreateTime FROM acc_playeruser AS pu INNER JOIN acc_player AS p ON (pu.AppId = p.AppId AND pu.PlayerId = p.PlayerId) WHERE pu.UserId=x'881A58F6204D4E048F66E41596A66A57';";
System.out.println(ParameterizedOutputVisitorUtils.parameterize(sql, DbType.mysql));
System.out.println("-----------------------");
sql = "SET @now := NOW(); INSERT characters (uid, name, race, career, sex, creation) VALUES (4068548, '', 58, 0, 0, @now); SET @id := LAST_INSERT_ID(); INSERT characters_data (character_id, data, creation) VALUES (@id, '\0\0\0\0\0\0\0\04068548\0\0\0:\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0<\0\0\0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0d\0\0\0__\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0', @now); SELECT @id; ";
System.out.println(ParameterizedOutputVisitorUtils.parameterize(sql, DbType.mysql));
System.out.println("-----------------------");
sql = "INSERT INTO qrtz_JOB_DETAILS (SCHED_NAME, JOB_NAME, JOB_GROUP, DESCRIPTION, JOB_CLASS_NAME, IS_DURABLE, IS_NONCONCURRENT, IS_UPDATE_DATA, REQUESTS_RECOVERY, JOB_DATA) VALUES('DefaultQuartzScheduler', 'taobao.item.recommend.delete_368815794_2_35391685928', 'tasks', null, 'cn.agooo.job.TasksJob', 0, 0, 0, 0, x'ACED0005737200156F72672E71756172747A2E4A6F62446174614D61709FB083E8BFA9B0CB020000787200266F72672E71756172747A2E7574696C732E537472696E674B65794469727479466C61674D61708208E8C3FBC55D280200015A0013616C6C6F77735472616E7369656E74446174617872001D6F72672E71756172747A2E7574696C732E4469727479466C61674D617013E62EAD28760ACE0200025A000564697274794C00036D617074000F4C6A6176612F7574696C2F4D61703B787001737200116A6176612E7574696C2E486173684D61700507DAC1C31660D103000246000A6C6F6164466163746F724900097468726573686F6C6478703F4000000000000C770800000010000000017400077461736B7349647372000E6A6176612E6C616E672E4C6F6E673B8BE490CC8F23DF0200014A000576616C7565787200106A6176612E6C616E672E4E756D62657286AC951D0B94E08B020000787000000000002063227800')";
System.out.println(ParameterizedOutputVisitorUtils.parameterize(sql, DbType.mysql));
System.out.println("-----------------------");
sql = "SELECT * FROM sync_contactsdata WHERE __id=x'2EEE5AE7CB0E4AF697D966AE8BF046B8'";
System.out.println(ParameterizedOutputVisitorUtils.parameterize(sql, DbType.mysql));
System.out.println("-----------------------");
sql = "select TID,POSTFEE,RECEIVERADDRESS from tradegroup003 where (0 = 650954981082695 or tid=650954981082695) And SELLERNICK='______________' And BUYERNICK='yingge7' and CREATED > date_sub(now(), interval 48 hour) and STATUS = 'WAIT_BUYER_PAY' and func_isNotFollowgroup003(tradegroup003.tid,'______________') = 0";
System.out.println(ParameterizedOutputVisitorUtils.parameterize(sql, DbType.mysql));
System.out.println("-----------------------");
sql = "SHOW COLUMNS FROM `pms_purchase_ops`/*20140512152820##%2Fbase.php%3Fshopid%3D%26module%3Dpms%26action%3DqualityItem%26op_id%3D756%26params%3Dcase%253Aupdate%252Ctable%253Aquality%252Corder_id%253A201405090006DL%252Creceive_no%253AQCI01201405090006DL%252Creceive_treat%253AALL_DONE%260.5652460628381133*/;";
System.out.println(ParameterizedOutputVisitorUtils.parameterize(sql, DbType.mysql));
System.out.println("-----------------------");
}
public void test_1() throws Exception {
String sql = "create or replace view vtmp_log_node_204180 as select `twf_log_group_user`.`uni_id` as `user_id`, `twf_log_group_user`.`control_group_type` from `twf_log_group_user` where `twf_log_group_user`.`subjob_id` = 204180";
System.out.println(ParameterizedOutputVisitorUtils.parameterize(sql, JdbcConstants.MYSQL));
System.out.println("-----------------------");
sql = " create view V_CustomerGoodsNf as select a.*,b.GoodsNO,b.GoodsName,b.SpecName,c.CustomerName,c.NickName,c.Tel,c.Email, StatusExt = (CASE a.curStatus WHEN (0) THEN ('待通知') WHEN (1) THEN ('已通知') WHEN (2) THEN ('已过期') WHEN (3) THEN ('被取消')END) from G_Customer_GoodsNotify a left outer join V_GoodsListBySpec b on a.GoodsID=b.GoodsID and a.SpecID=b.SpecID left outer join G_Customer_CustomerList c on a.CustomerID=c.CustomerID";
System.out.println(ParameterizedOutputVisitorUtils.parameterize(sql, JdbcConstants.SQL_SERVER));
System.out.println("-----------------------");
sql = "CREATE view V_API_GoodsMatch as SELECT a.SpecName, a.GoodsNO, a.GoodsName, bMatch = CONVERT(bit, (CASE isnull(b.Numiid, '') WHEN ('') THEN (0) ELSE (1) END)), issys1 = (CASE isnull(b.issys, '') WHEN ('') THEN ('无') WHEN (0) THEN ('未同步') WHEN (1) THEN ('待同步') WHEN (2) THEN ('同步失败') WHEN (3) THEN ('同步成功') END), bFixNum = isnull(b.bFixNum, 0), bVirNum = isnull(b.bVirNum, 0), issys = isnull(b.issys, 0), FixNum = isnull(b.FixNum, 0), VirNumBase = isnull(b.VirNumBase, 0), VirNumInc = isnull(b.VirNumInc, 0), b.TBName, b.TBSku, b.UpdateTime, b.syscount, b.sysLog, c.ShopName, b.TBOuterID, b.SKUOuterID, d .FlagName, b.GoodsID, b.SpecID, b.Numiid, b.Skuid, b.BTBGoods, b.sysGoodsType, b.ID, bstop = CONVERT(bit, isnull(b.bstop, 0)), b.bSingletb, b.SingleNumPer, b.VirNumTop, isnull(b.goodstype, '0') AS goodstype, CASE b.SpecID WHEN - 2 THEN 1 ELSE 0 END AS pcbs FROM V_GoodsSpec a LEFT OUTER JOIN G_API_SysMatch b ON a.GoodsID = b.GoodsID AND CASE b.SpecID WHEN - 2 THEN 1 ELSE a.specid END = CASE b.SpecID WHEN - 2 THEN 1 ELSE b.SpecID END AND b.GoodsType = 0 LEFT OUTER JOIN G_Cfg_ShopList c ON b.ShopID = c.ShopID LEFT OUTER JOIN dbo.G_Cfg_RecordFlag d ON a.FlagID = d .FlagID UNION ALL SELECT '' AS SpecName, a.GoodsNO, a.GoodsName, bMatch = CONVERT(bit, (CASE isnull(b.Numiid, '') WHEN ('') THEN (0) ELSE (1) END)), issys1 = (CASE isnull(b.issys, '') WHEN ('') THEN ('无') WHEN (0) THEN ('未同步') WHEN (1) THEN ('待同步') WHEN (2) THEN ('同步失败') WHEN (3) THEN ('同步成功') END), bFixNum = isnull(b.bFixNum, 0), bVirNum = isnull(b.bVirNum, 0), issys = isnull(b.issys, 0), FixNum = isnull(b.FixNum, 0), VirNumBase = isnull(b.VirNumBase, 0), VirNumInc = isnull(b.VirNumInc, 0), b.TBName, b.TBSku, b.UpdateTime, b.syscount, b.sysLog, c.ShopName, b.TBOuterID, b.SKUOuterID, '' AS FlagName, b.GoodsID, b.SpecID, b.Numiid, b.Skuid, b.BTBGoods, b.sysGoodsType, b.ID, bstop = CONVERT(bit, isnull(b.bstop, 0)), b.bSingletb, b.SingleNumPer, b.VirNumTop, b.goodstype, 0 AS pcbs FROM g_goods_goodslistfit a LEFT OUTER JOIN g_api_sysma";
System.out.println(ParameterizedOutputVisitorUtils.parameterize(sql, JdbcConstants.SQL_SERVER));
System.out.println("-----------------------");
}
}
| MySqlParameterizedOutputVisitorTest |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsAction.java | {
"start": 4671,
"end": 6830
} | class ____ extends AbstractGetResourcesRequest {
public static final ParseField INCLUDE = new ParseField("include");
public static final ParseField ALLOW_NO_MATCH = new ParseField("allow_no_match");
public static final ParseField TAGS = new ParseField("tags");
private final Includes includes;
private final List<String> tags;
public Request(String id) {
this(id, null, null);
}
public Request(String id, List<String> tags, Set<String> includes) {
setResourceId(id);
setAllowNoResources(true);
this.tags = tags == null ? Collections.emptyList() : tags;
this.includes = new Includes(includes);
}
public Request(StreamInput in) throws IOException {
super(in);
this.includes = new Includes(in);
this.tags = in.readStringCollectionAsList();
}
@Override
public String getResourceIdField() {
return TrainedModelConfig.MODEL_ID.getPreferredName();
}
public List<String> getTags() {
return tags;
}
public Includes getIncludes() {
return includes;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
this.includes.writeTo(out);
out.writeStringCollection(tags);
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), includes, tags);
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return super.equals(obj) && this.includes.equals(other.includes) && Objects.equals(tags, other.tags);
}
@Override
public String getCancelableTaskDescription() {
return format("get_trained_models[%s]", getResourceId());
}
}
public static | Request |
java | quarkusio__quarkus | extensions/mailer/deployment/src/test/java/io/quarkus/mailer/MailTemplateValidationTest.java | {
"start": 1185,
"end": 1407
} | class ____ {
@Inject
MailTemplate doesNotExist;
Uni<Void> send() {
return doesNotExist.to("quarkus@quarkus.io").subject("Test").data("name", "Foo").send();
}
}
}
| MailTemplates |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/builder/DiffBuilder.java | {
"start": 6709,
"end": 14151
} | interface ____<T> extends Supplier<T>, Serializable {
// empty
}
static final String TO_STRING_FORMAT = "%s differs from %s";
/**
* Constructs a new {@link Builder}.
*
* @param <T> type of the left and right object.
* @return a new {@link Builder}.
* @since 3.15.0
*/
public static <T> Builder<T> builder() {
return new Builder<>();
}
private final List<Diff<?>> diffs;
private final boolean equals;
private final T left;
private final T right;
private final ToStringStyle style;
private final String toStringFormat;
/**
* Constructs a builder for the specified objects with the specified style.
*
* <p>
* If {@code lhs == rhs} or {@code lhs.equals(rhs)} then the builder will not evaluate any calls to {@code append(...)} and will return an empty
* {@link DiffResult} when {@link #build()} is executed.
* </p>
*
* <p>
* This delegates to {@link #DiffBuilder(Object, Object, ToStringStyle, boolean)} with the testTriviallyEqual flag enabled.
* </p>
*
* @param left {@code this} object.
* @param right the object to diff against.
* @param style the style to use when outputting the objects, {@code null} uses the default.
* @throws NullPointerException if {@code lhs} or {@code rhs} is {@code null}.
* @deprecated Use {@link Builder}.
*/
@Deprecated
public DiffBuilder(final T left, final T right, final ToStringStyle style) {
this(left, right, style, true);
}
/**
* Constructs a builder for the specified objects with the specified style.
*
* <p>
* If {@code lhs == rhs} or {@code lhs.equals(rhs)} then the builder will not evaluate any calls to {@code append(...)} and will return an empty
* {@link DiffResult} when {@link #build()} is executed.
* </p>
*
* @param left {@code this} object.
* @param right the object to diff against.
* @param style the style to use when outputting the objects, {@code null} uses the default.
* @param testObjectsEquals If true, this will test if lhs and rhs are the same or equal. All of the append(fieldName, lhs, rhs) methods will abort without
* creating a field {@link Diff} if the trivially equal test is enabled and returns true. The result of this test is never changed
* throughout the life of this {@link DiffBuilder}.
* @throws NullPointerException if {@code lhs} or {@code rhs} is {@code null}.
* @since 3.4
* @deprecated Use {@link Builder}.
*/
@Deprecated
public DiffBuilder(final T left, final T right, final ToStringStyle style, final boolean testObjectsEquals) {
this(left, right, style, testObjectsEquals, TO_STRING_FORMAT);
}
private DiffBuilder(final T left, final T right, final ToStringStyle style, final boolean testObjectsEquals, final String toStringFormat) {
this.left = Objects.requireNonNull(left, "left");
this.right = Objects.requireNonNull(right, "right");
this.diffs = new ArrayList<>();
this.toStringFormat = toStringFormat;
this.style = style != null ? style : ToStringStyle.DEFAULT_STYLE;
// Don't compare any fields if objects equal
this.equals = testObjectsEquals && Objects.equals(left, right);
}
private <F> DiffBuilder<T> add(final String fieldName, final SerializableSupplier<F> left, final SerializableSupplier<F> right, final Class<F> type) {
diffs.add(new SDiff<>(fieldName, left, right, type));
return this;
}
/**
* Tests if two {@code boolean}s are equal.
*
* @param fieldName the field name.
* @param lhs the left-hand side {@code boolean}.
* @param rhs the right-hand side {@code boolean}.
* @return {@code this} instance.
* @throws NullPointerException if field name is {@code null}.
*/
public DiffBuilder<T> append(final String fieldName, final boolean lhs, final boolean rhs) {
return equals || lhs == rhs ? this : add(fieldName, () -> Boolean.valueOf(lhs), () -> Boolean.valueOf(rhs), Boolean.class);
}
/**
* Tests if two {@code boolean[]}s are equal.
*
* @param fieldName the field name.
* @param lhs the left-hand side {@code boolean[]}.
* @param rhs the right-hand side {@code boolean[]}.
* @return {@code this} instance.
* @throws NullPointerException if field name is {@code null}.
*/
public DiffBuilder<T> append(final String fieldName, final boolean[] lhs, final boolean[] rhs) {
return equals || Arrays.equals(lhs, rhs) ? this : add(fieldName, () -> ArrayUtils.toObject(lhs), () -> ArrayUtils.toObject(rhs), Boolean[].class);
}
/**
* Tests if two {@code byte}s are equal.
*
* @param fieldName the field name.
* @param lhs the left-hand side {@code byte}.
* @param rhs the right-hand side {@code byte}.
* @return {@code this} instance.
* @throws NullPointerException if field name is {@code null}.
*/
public DiffBuilder<T> append(final String fieldName, final byte lhs, final byte rhs) {
return equals || lhs == rhs ? this : add(fieldName, () -> Byte.valueOf(lhs), () -> Byte.valueOf(rhs), Byte.class);
}
/**
* Tests if two {@code byte[]}s are equal.
*
* @param fieldName the field name.
* @param lhs the left-hand side {@code byte[]}.
* @param rhs the right-hand side {@code byte[]}.
* @return {@code this} instance.
* @throws NullPointerException if field name is {@code null}.
*/
public DiffBuilder<T> append(final String fieldName, final byte[] lhs, final byte[] rhs) {
return equals || Arrays.equals(lhs, rhs) ? this : add(fieldName, () -> ArrayUtils.toObject(lhs), () -> ArrayUtils.toObject(rhs), Byte[].class);
}
/**
* Tests if two {@code char}s are equal.
*
* @param fieldName the field name.
* @param lhs the left-hand side {@code char}.
* @param rhs the right-hand side {@code char}.
* @return {@code this} instance.
* @throws NullPointerException if field name is {@code null}.
*/
public DiffBuilder<T> append(final String fieldName, final char lhs, final char rhs) {
return equals || lhs == rhs ? this : add(fieldName, () -> Character.valueOf(lhs), () -> Character.valueOf(rhs), Character.class);
}
/**
* Tests if two {@code char[]}s are equal.
*
* @param fieldName the field name.
* @param lhs the left-hand side {@code char[]}.
* @param rhs the right-hand side {@code char[]}.
* @return {@code this} instance.
* @throws NullPointerException if field name is {@code null}.
*/
public DiffBuilder<T> append(final String fieldName, final char[] lhs, final char[] rhs) {
return equals || Arrays.equals(lhs, rhs) ? this : add(fieldName, () -> ArrayUtils.toObject(lhs), () -> ArrayUtils.toObject(rhs), Character[].class);
}
/**
* Appends diffs from another {@link DiffResult}.
*
* <p>
* Useful this method to compare properties which are themselves Diffable and would like to know which specific part of it is different.
* </p>
*
* <pre>{@code
* public | SerializableSupplier |
java | apache__camel | dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/generated/java/org/apache/camel/dsl/yaml/deserializers/ModelDeserializers.java | {
"start": 558805,
"end": 561873
} | class ____ extends YamlDeserializerBase<LangChain4jParagraphTokenizerDefinition> {
public LangChain4jParagraphTokenizerDefinitionDeserializer() {
super(LangChain4jParagraphTokenizerDefinition.class);
}
@Override
protected LangChain4jParagraphTokenizerDefinition newInstance() {
return new LangChain4jParagraphTokenizerDefinition();
}
@Override
protected boolean setProperty(LangChain4jParagraphTokenizerDefinition target,
String propertyKey, String propertyName, Node node) {
propertyKey = org.apache.camel.util.StringHelper.dashToCamelCase(propertyKey);
switch(propertyKey) {
case "id": {
String val = asText(node);
target.setId(val);
break;
}
case "maxOverlap": {
String val = asText(node);
target.setMaxOverlap(val);
break;
}
case "maxTokens": {
String val = asText(node);
target.setMaxTokens(val);
break;
}
case "modelName": {
String val = asText(node);
target.setModelName(val);
break;
}
case "tokenizerType": {
String val = asText(node);
target.setTokenizerType(val);
break;
}
default: {
return false;
}
}
return true;
}
}
@YamlType(
nodes = "langChain4jSentenceTokenizer",
types = org.apache.camel.model.tokenizer.LangChain4jSentenceTokenizerDefinition.class,
order = org.apache.camel.dsl.yaml.common.YamlDeserializerResolver.ORDER_LOWEST - 1,
displayName = "LangChain4J Tokenizer with sentence splitter",
description = "Camel AI: Tokenizer for splitting by sentences.",
deprecated = false,
properties = {
@YamlProperty(name = "id", type = "string", description = "The id of this node", displayName = "Id"),
@YamlProperty(name = "maxOverlap", type = "number", required = true, description = "Sets the maximum number of tokens that can overlap in each segment", displayName = "Max Overlap"),
@YamlProperty(name = "maxTokens", type = "number", required = true, description = "Sets the maximum number of tokens on each segment", displayName = "Max Tokens"),
@YamlProperty(name = "modelName", type = "string", description = "Sets the model name", displayName = "Model Name"),
@YamlProperty(name = "tokenizerType", type = "enum:OPEN_AI,AZURE,QWEN", description = "Sets the tokenizer type", displayName = "Tokenizer Type")
}
)
public static | LangChain4jParagraphTokenizerDefinitionDeserializer |
java | resilience4j__resilience4j | resilience4j-micrometer/src/main/java/io/github/resilience4j/micrometer/TimerConfig.java | {
"start": 4038,
"end": 4537
} | class ____.
* @return the TimerConfig.Builder
*/
public Builder onFailureTagResolver(@Nullable Function<Throwable, String> onFailureTagResolver) {
this.onFailureTagResolver = onFailureTagResolver;
return this;
}
/**
* Builds a TimerConfig
*
* @return the TimerConfig
*/
public TimerConfig build() {
return new TimerConfig(metricNames, onFailureTagResolver);
}
}
}
| name |
java | quarkusio__quarkus | extensions/smallrye-graphql/deployment/src/test/java/io/quarkus/smallrye/graphql/deployment/SmallRyeGraphQLContextTestCase.java | {
"start": 4102,
"end": 7828
} | class ____ {
@Inject
Instance<Context> context;
@Inject
ManagedExecutor managedExecutor;
@Query
public CompletableFuture<Dummy> testAsyncQuery() {
final String executionId = context.get().getExecutionId();
assertNotNull(executionId);
this.executionId = context.get().getExecutionId();
assertEquals("testAsyncQuery", context.get().getFieldName());
Dummy result = new Dummy();
result.setStringField("OK");
return CompletableFuture.completedFuture(result);
}
@Query
public Dummy testManualPropagation() throws ExecutionException, InterruptedException {
final String executionId = context.get().getExecutionId();
assertNotNull(executionId);
CompletableFuture<Integer> numberFuture = CompletableFuture.supplyAsync(() -> {
assertNotNull(context);
assertEquals(executionId, context.get().getExecutionId());
assertEquals("testManualPropagation", context.get().getFieldName());
return 42;
}, managedExecutor);
CompletableFuture<String> stringFuture = CompletableFuture.supplyAsync(() -> {
assertNotNull(context, "Context must to be available inside an async task");
assertEquals(executionId, context.get().getExecutionId(), "Execution ID must be the same inside an async task");
assertEquals("testManualPropagation", context.get().getFieldName());
return "OK";
}, managedExecutor);
Dummy result = new Dummy();
result.setNumberField(numberFuture.get());
result.setStringField(stringFuture.get());
return result;
}
private volatile String executionId;
@Query
public Dummy testSourceMethods() {
this.executionId = context.get().getExecutionId();
assertEquals("testSourceMethods", context.get().getFieldName());
return new Dummy();
}
@Name("stringBatchSource")
public List<String> stringBatchSource(@Source List<Dummy> source) {
assertEquals("stringBatchSource", context.get().getFieldName());
assertEquals(this.executionId, context.get().getExecutionId(), "Wrong execution ID propagated from the root query");
List<String> result = new ArrayList<>();
for (Dummy dummy : source) {
result.add("hello");
}
return result;
}
@Name("stringBatchSourceAsync")
public CompletionStage<List<String>> stringBatchSourceAsync(@Source List<Dummy> source) {
assertEquals("stringBatchSourceAsync", context.get().getFieldName());
assertEquals(this.executionId, context.get().getExecutionId(), "Wrong execution ID propagated from the root query");
List<String> result = new ArrayList<>();
for (Dummy dummy : source) {
result.add("hello");
}
return CompletableFuture.completedFuture(result);
}
@Name("numberBatchSource")
public List<Integer> numberBatchSource(@Source List<Dummy> source) {
assertEquals("numberBatchSource", context.get().getFieldName());
assertEquals(this.executionId, context.get().getExecutionId(), "Wrong execution ID propagated from the root query");
List<Integer> result = new ArrayList<>();
for (Dummy dummy : source) {
result.add(123);
}
return result;
}
}
public static | ContextPropagationResource |
java | google__guava | android/guava-tests/test/com/google/common/collect/MapsTransformValuesTest.java | {
"start": 1052,
"end": 1559
} | class ____ extends AbstractMapsTransformValuesTest {
@Override
protected Map<String, String> makeEmptyMap() {
return transformValues(new HashMap<String, String>(), Functions.<String>identity());
}
@Override
protected Map<String, String> makePopulatedMap() {
Map<String, Integer> underlying = new HashMap<>();
underlying.put("a", 1);
underlying.put("b", 2);
underlying.put("c", 3);
return transformValues(underlying, Functions.toStringFunction());
}
}
| MapsTransformValuesTest |
java | alibaba__nacos | sys/src/main/java/com/alibaba/nacos/sys/utils/DiskUtils.java | {
"start": 22393,
"end": 23418
} | class ____ implements AutoCloseable {
private final org.apache.commons.io.LineIterator target;
/**
* Constructs an iterator of the lines for a <code>Reader</code>.
*
* @param target {@link org.apache.commons.io.LineIterator}
*/
LineIterator(org.apache.commons.io.LineIterator target) {
this.target = target;
}
public boolean hasNext() {
return target.hasNext();
}
public String next() {
return target.next();
}
public String nextLine() {
return target.nextLine();
}
@Override
public void close() throws IOException {
target.close();
}
public void remove() {
target.remove();
}
public void forEachRemaining(Consumer<? super String> action) {
target.forEachRemaining(action);
}
}
}
| LineIterator |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/RangeNotSatisfiableEOFException.java | {
"start": 1150,
"end": 1353
} | class ____ extends EOFException {
public RangeNotSatisfiableEOFException(
String operation,
Exception cause) {
super(operation);
initCause(cause);
}
}
| RangeNotSatisfiableEOFException |
java | apache__flink | flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/sink/TestSinkV2.java | {
"start": 19662,
"end": 20317
} | class ____<CommT> implements Committer<CommT>, Serializable {
private boolean isClosed;
public DefaultCommitter() {
this.isClosed = false;
}
@Override
public void commit(Collection<CommitRequest<CommT>> committables) {}
public void close() throws Exception {
isClosed = true;
}
public boolean isClosed() {
return isClosed;
}
public void init() {
// context is not used for this implementation
}
}
/** A {@link Committer} that always re-commits the committables data it received. */
static | DefaultCommitter |
java | quarkusio__quarkus | extensions/scheduler/deployment/src/test/java/io/quarkus/scheduler/test/SuccessfulExecutionTest.java | {
"start": 452,
"end": 1150
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest test = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(Jobs.class));
static final CountDownLatch SUCCESS_LATCH = new CountDownLatch(2);
static SuccessfulExecution successfulExecution;
@Test
public void testTriggerErrorStatus() throws InterruptedException {
assertTrue(SUCCESS_LATCH.await(5, TimeUnit.SECONDS));
}
void observeFailedExecution(@Observes SuccessfulExecution successfulExecution) {
SuccessfulExecutionTest.successfulExecution = successfulExecution;
SUCCESS_LATCH.countDown();
}
static | SuccessfulExecutionTest |
java | apache__camel | components/camel-platform-http-vertx/src/main/java/org/apache/camel/component/platform/http/vertx/VertxPlatformHttpServer.java | {
"start": 2382,
"end": 2614
} | class ____ a basic Vert.x Web based server that can be used by the {@link VertxPlatformHttpEngine} on
* platforms that do not provide Vert.x based http services.
*/
@ManagedResource(description = "Vert.x HTTP Server")
public | implement |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/flogger/FloggerRequiredModifiers.java | {
"start": 7538,
"end": 8015
} | class ____ a final backstop.
TreePath owner =
state.findPathToEnclosing(ClassTree.class, MethodTree.class, VariableTree.class);
Tree parent = owner.getLeaf();
Tree grandparent = owner.getParentPath().getLeaf();
boolean isLoggerField =
parent instanceof VariableTree
&& grandparent instanceof ClassTree
&& ASTHelpers.isSameType(loggerType, ASTHelpers.getType(parent), state);
if (isLoggerField) {
// Declared as a | as |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/scripting/support/ResourceScriptSourceTests.java | {
"start": 1067,
"end": 3930
} | class ____ {
@Test
void doesNotPropagateFatalExceptionOnResourceThatCannotBeResolvedToAFile() throws Exception {
Resource resource = mock();
given(resource.lastModified()).willThrow(new IOException());
ResourceScriptSource scriptSource = new ResourceScriptSource(resource);
long lastModified = scriptSource.retrieveLastModifiedTime();
assertThat(lastModified).isEqualTo(0);
}
@Test
void beginsInModifiedState() {
Resource resource = mock();
ResourceScriptSource scriptSource = new ResourceScriptSource(resource);
assertThat(scriptSource.isModified()).isTrue();
}
@Test
void lastModifiedWorksWithResourceThatDoesNotSupportFileBasedReading() throws Exception {
Resource resource = mock();
// underlying File is asked for so that the last modified time can be checked...
// And then mock the file changing; i.e. the File says it has been modified
given(resource.lastModified()).willReturn(100L, 100L, 200L);
// does not support File-based reading; delegates to InputStream-style reading...
//resource.getFile();
//mock.setThrowable(new FileNotFoundException());
given(resource.getInputStream()).willReturn(InputStream.nullInputStream());
ResourceScriptSource scriptSource = new ResourceScriptSource(resource);
assertThat(scriptSource.isModified()).as("ResourceScriptSource must start off in the 'isModified' state (it obviously isn't).").isTrue();
scriptSource.getScriptAsString();
assertThat(scriptSource.isModified()).as("ResourceScriptSource must not report back as being modified if the underlying File resource is not reporting a changed lastModified time.").isFalse();
// Must now report back as having been modified
assertThat(scriptSource.isModified()).as("ResourceScriptSource must report back as being modified if the underlying File resource is reporting a changed lastModified time.").isTrue();
}
@Test
void lastModifiedWorksWithResourceThatDoesNotSupportFileBasedAccessAtAll() throws Exception {
Resource resource = new ByteArrayResource(new byte[0]);
ResourceScriptSource scriptSource = new ResourceScriptSource(resource);
assertThat(scriptSource.isModified()).as("ResourceScriptSource must start off in the 'isModified' state (it obviously isn't).").isTrue();
scriptSource.getScriptAsString();
assertThat(scriptSource.isModified()).as("ResourceScriptSource must not report back as being modified if the underlying File resource is not reporting a changed lastModified time.").isFalse();
// Must now continue to report back as not having been modified 'cos the Resource does not support access as a File (and so the lastModified date cannot be determined).
assertThat(scriptSource.isModified()).as("ResourceScriptSource must not report back as being modified if the underlying File resource is not reporting a changed lastModified time.").isFalse();
}
}
| ResourceScriptSourceTests |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/execution/librarycache/TestingLibraryCacheManager.java | {
"start": 1010,
"end": 1950
} | class ____ implements LibraryCacheManager {
private final Function<JobID, LibraryCacheManager.ClassLoaderLease>
registerOrRetainClassLoaderFunction;
private final Runnable shutdownRunnable;
private TestingLibraryCacheManager(
Function<JobID, LibraryCacheManager.ClassLoaderLease>
registerOrRetainClassLoaderFunction,
Runnable shutdownRunnable) {
this.registerOrRetainClassLoaderFunction = registerOrRetainClassLoaderFunction;
this.shutdownRunnable = shutdownRunnable;
}
@Override
public LibraryCacheManager.ClassLoaderLease registerClassLoaderLease(JobID jobId) {
return registerOrRetainClassLoaderFunction.apply(jobId);
}
@Override
public void shutdown() {
shutdownRunnable.run();
}
public static Builder newBuilder() {
return new Builder();
}
public static final | TestingLibraryCacheManager |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/filter/wall/WallUpdateTest1.java | {
"start": 784,
"end": 1271
} | class ____ extends TestCase {
private String sql = "UPDATE T_USER SET FNAME = ? WHERE FID = ?";
private WallConfig config = new WallConfig();
protected void setUp() throws Exception {
config.setUpdateAllow(false);
}
public void testMySql() throws Exception {
assertFalse(WallUtils.isValidateMySql(sql, config));
}
public void testORACLE() throws Exception {
assertFalse(WallUtils.isValidateOracle(sql, config));
}
}
| WallUpdateTest1 |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java | {
"start": 3331,
"end": 33574
} | class ____ extends ESAllocationTestCase {
private static final String MIN_SUPPORTED_LUCENE_VERSION = IndexVersions.MINIMUM_COMPATIBLE.luceneVersion().toString();
private final ShardId shardId = new ShardId("test", "_na_", 0);
private final DiscoveryNode node1 = newNode("node1");
private final DiscoveryNode node2 = newNode("node2");
private final DiscoveryNode node3 = newNode("node3");
private TestAllocator testAllocator;
@Before
public void buildTestAllocator() {
this.testAllocator = new TestAllocator();
}
private void allocateAllUnassigned(final RoutingAllocation allocation) {
final RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator();
while (iterator.hasNext()) {
testAllocator.allocateUnassigned(iterator.next(), allocation, iterator);
}
}
/**
* Verifies that when we are still fetching data in an async manner, the replica shard moves to ignore unassigned.
*/
public void testNoAsyncFetchData() {
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders());
testAllocator.clean();
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
}
/**
* Verifies that on index creation, we don't go and fetch data, but keep the replica shard unassigned to let
* the shard allocator to allocate it. There isn't a copy around to find anyhow.
*/
public void testNoAsyncFetchOnIndexCreation() {
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(
yesAllocationDeciders(),
Settings.EMPTY,
UnassignedInfo.Reason.INDEX_CREATED
);
testAllocator.clean();
allocateAllUnassigned(allocation);
assertThat(testAllocator.getFetchDataCalledAndClean(), equalTo(false));
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).size(), equalTo(1));
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId));
}
/**
* Verifies that for anything but index creation, fetch data ends up being called, since we need to go and try
* and find a better copy for the shard.
*/
public void testAsyncFetchOnAnythingButIndexCreation() {
UnassignedInfo.Reason reason = RandomPicks.randomFrom(
random(),
EnumSet.complementOf(EnumSet.of(UnassignedInfo.Reason.INDEX_CREATED))
);
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(), Settings.EMPTY, reason);
testAllocator.clean();
allocateAllUnassigned(allocation);
assertThat("failed with reason " + reason, testAllocator.getFetchDataCalledAndClean(), equalTo(true));
}
/**
* Verifies that when there is a full match (syncId and files) we allocate it to matching node.
*/
public void testSimpleFullMatchAllocation() {
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders());
DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3;
testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
.addData(nodeToMatch, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
allocateAllUnassigned(allocation);
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(
shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).get(0).currentNodeId(),
equalTo(nodeToMatch.getId())
);
}
/**
* Verifies that when there is no sync id match but files match, we allocate it to matching node.
*/
public void testFileChecksumMatch() {
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders());
DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3;
testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
.addData(nodeToMatch, "NO_MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
allocateAllUnassigned(allocation);
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(
shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).get(0).currentNodeId(),
equalTo(nodeToMatch.getId())
);
}
public void testPreferCopyWithHighestMatchingOperations() {
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders());
long retainingSeqNoOnPrimary = randomLongBetween(1, Integer.MAX_VALUE);
long retainingSeqNoForNode2 = randomLongBetween(0, retainingSeqNoOnPrimary - 1);
// Rarely use a seqNo above retainingSeqNoOnPrimary, which could in theory happen when primary fails and comes back quickly.
long retainingSeqNoForNode3 = randomLongBetween(retainingSeqNoForNode2 + 1, retainingSeqNoOnPrimary + 100);
List<RetentionLease> retentionLeases = Arrays.asList(
newRetentionLease(node1, retainingSeqNoOnPrimary),
newRetentionLease(node2, retainingSeqNoForNode2),
newRetentionLease(node3, retainingSeqNoForNode3)
);
testAllocator.addData(
node1,
retentionLeases,
"MATCH",
new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)
);
testAllocator.addData(node2, "NOT_MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
testAllocator.addData(node3, randomSyncId(), new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
allocateAllUnassigned(allocation);
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(
shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).get(0).currentNodeId(),
equalTo(node3.getId())
);
}
public void testNotCancellingRecoveryOnIrrelevantShard() {
// re-using the setup from testCancelRecoveryIfFoundCopyWithNoopRetentionLease to be sure that the only difference is the relevance
runNoopRetentionLeaseTest(false);
}
public void testCancelRecoveryIfFoundCopyWithNoopRetentionLease() {
runNoopRetentionLeaseTest(true);
}
private void runNoopRetentionLeaseTest(boolean isRelevantShard) {
final UnassignedInfo unassignedInfo;
final Set<String> failedNodeIds;
if (randomBoolean()) {
failedNodeIds = Collections.emptySet();
unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null);
} else {
failedNodeIds = new HashSet<>(randomSubsetOf(Set.of("node-4", "node-5", "node-6", "node-7")));
unassignedInfo = new UnassignedInfo(
UnassignedInfo.Reason.ALLOCATION_FAILED,
null,
null,
randomIntBetween(1, 10),
System.nanoTime(),
System.currentTimeMillis(),
false,
UnassignedInfo.AllocationStatus.NO_ATTEMPT,
failedNodeIds,
null
);
}
RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders(), unassignedInfo);
long retainingSeqNo = randomLongBetween(1, Long.MAX_VALUE);
testAllocator.addData(
node1,
Arrays.asList(newRetentionLease(node1, retainingSeqNo), newRetentionLease(node3, retainingSeqNo)),
"MATCH",
new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)
);
testAllocator.addData(node2, "NO_MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
testAllocator.addData(node3, randomSyncId(), new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
testAllocator.processExistingRecoveries(allocation, shardRouting -> isRelevantShard);
if (isRelevantShard) {
assertThat(allocation.routingNodesChanged(), equalTo(true));
List<ShardRouting> unassignedShards = shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED);
assertThat(unassignedShards, hasSize(1));
assertThat(unassignedShards.get(0).shardId(), equalTo(shardId));
assertThat(unassignedShards.get(0).unassignedInfo().failedAllocations(), equalTo(0));
assertThat(unassignedShards.get(0).unassignedInfo().failedNodeIds(), equalTo(failedNodeIds));
} else {
assertThat(allocation.routingNodesChanged(), equalTo(false));
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).size(), equalTo(0));
}
}
public void testNotCancellingRecoveryIfCurrentRecoveryHasRetentionLease() {
RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders());
List<RetentionLease> peerRecoveryRetentionLeasesOnPrimary = new ArrayList<>();
long retainingSeqNo = randomLongBetween(1, Long.MAX_VALUE);
peerRecoveryRetentionLeasesOnPrimary.add(newRetentionLease(node1, retainingSeqNo));
peerRecoveryRetentionLeasesOnPrimary.add(newRetentionLease(node2, randomLongBetween(1, retainingSeqNo)));
if (randomBoolean()) {
peerRecoveryRetentionLeasesOnPrimary.add(newRetentionLease(node3, randomLongBetween(0, retainingSeqNo)));
}
testAllocator.addData(
node1,
peerRecoveryRetentionLeasesOnPrimary,
"MATCH",
new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)
);
testAllocator.addData(node2, randomSyncId(), new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
testAllocator.addData(node3, randomSyncId(), new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
testAllocator.processExistingRecoveries(allocation, shardRouting -> true);
assertThat(allocation.routingNodesChanged(), equalTo(false));
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).size(), equalTo(0));
}
public void testNotCancelIfPrimaryDoesNotHaveValidRetentionLease() {
RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders());
testAllocator.addData(
node1,
Collections.singletonList(newRetentionLease(node3, randomNonNegativeLong())),
"MATCH",
new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)
);
testAllocator.addData(node2, "NOT_MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
testAllocator.addData(node3, "NOT_MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
testAllocator.processExistingRecoveries(allocation, shardRouting -> true);
assertThat(allocation.routingNodesChanged(), equalTo(false));
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).size(), equalTo(0));
}
public void testIgnoreRetentionLeaseIfCopyIsEmpty() {
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders());
long retainingSeqNo = randomLongBetween(1, Long.MAX_VALUE);
List<RetentionLease> retentionLeases = new ArrayList<>();
retentionLeases.add(newRetentionLease(node1, retainingSeqNo));
retentionLeases.add(newRetentionLease(node2, randomLongBetween(0, retainingSeqNo)));
if (randomBoolean()) {
retentionLeases.add(newRetentionLease(node3, randomLongBetween(0, retainingSeqNo)));
}
testAllocator.addData(
node1,
retentionLeases,
randomSyncId(),
new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)
);
testAllocator.addData(node2, null); // has retention lease but store is empty
testAllocator.addData(node3, randomSyncId(), new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
allocateAllUnassigned(allocation);
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(
shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).get(0).currentNodeId(),
equalTo(node3.getId())
);
}
/**
* When we can't find primary data, but still find replica data, we go ahead and keep it unassigned
* to be allocated. This is today behavior, which relies on a primary corruption identified with
* adding a replica and having that replica actually recover and cause the corruption to be identified
* See CorruptFileTest#
*/
public void testNoPrimaryData() {
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders());
testAllocator.addData(node2, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
allocateAllUnassigned(allocation);
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).size(), equalTo(1));
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId));
}
/**
* Verifies that when there is primary data, but no data at all on other nodes, the shard keeps
* unassigned to be allocated later on.
*/
public void testNoDataForReplicaOnAnyNode() {
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders());
testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
allocateAllUnassigned(allocation);
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).size(), equalTo(1));
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId));
}
/**
* Verifies that when there is primary data, but no matching data at all on other nodes, the shard keeps
* unassigned to be allocated later on.
*/
public void testNoMatchingFilesForReplicaOnAnyNode() {
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders());
testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
.addData(node2, "NO_MATCH", new StoreFileMetadata("file1", 10, "NO_MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
allocateAllUnassigned(allocation);
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).size(), equalTo(1));
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId));
}
/**
* When there is no decision or throttle decision across all nodes for the shard, make sure the shard
* moves to the ignore unassigned list.
*/
public void testNoOrThrottleDecidersRemainsInUnassigned() {
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(
randomBoolean() ? noAllocationDeciders() : throttleAllocationDeciders()
);
testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
.addData(node2, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
}
/**
* Tests when the node to allocate to due to matching is being throttled, we move the shard to ignored
* to wait till throttling on it is done.
*/
public void testThrottleWhenAllocatingToMatchingNode() {
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(
new AllocationDeciders(
Arrays.asList(
new TestAllocateDecision(Decision.YES),
new SameShardAllocationDecider(createBuiltInClusterSettings()),
new AllocationDecider() {
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
if (node.node().equals(node2)) {
return Decision.THROTTLE;
}
return Decision.YES;
}
}
)
)
);
testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
.addData(node2, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
}
public void testDelayedAllocation() {
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(
yesAllocationDeciders(),
Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1)).build(),
UnassignedInfo.Reason.NODE_LEFT
);
testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
if (randomBoolean()) {
// we sometime return empty list of files, make sure we test this as well
testAllocator.addData(node2, null);
}
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(false));
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
allocation = onePrimaryOnNode1And1Replica(
yesAllocationDeciders(),
Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1)).build(),
UnassignedInfo.Reason.NODE_LEFT
);
testAllocator.addData(node2, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(
shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).get(0).currentNodeId(),
equalTo(node2.getId())
);
}
public void testNotCancellingRecoveryIfSyncedOnExistingRecovery() {
final UnassignedInfo unassignedInfo;
if (randomBoolean()) {
unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null);
} else {
unassignedInfo = new UnassignedInfo(
UnassignedInfo.Reason.ALLOCATION_FAILED,
null,
null,
randomIntBetween(1, 10),
System.nanoTime(),
System.currentTimeMillis(),
false,
UnassignedInfo.AllocationStatus.NO_ATTEMPT,
Set.of("node-4"),
null
);
}
RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders(), unassignedInfo);
List<RetentionLease> retentionLeases = new ArrayList<>();
if (randomBoolean()) {
long retainingSeqNoOnPrimary = randomLongBetween(0, Long.MAX_VALUE);
retentionLeases.add(newRetentionLease(node1, retainingSeqNoOnPrimary));
if (randomBoolean()) {
retentionLeases.add(newRetentionLease(node2, randomLongBetween(0, retainingSeqNoOnPrimary)));
}
if (randomBoolean()) {
retentionLeases.add(newRetentionLease(node3, randomLongBetween(0, retainingSeqNoOnPrimary)));
}
}
testAllocator.addData(
node1,
retentionLeases,
"MATCH",
new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)
);
testAllocator.addData(node2, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
testAllocator.addData(node3, randomSyncId(), new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
testAllocator.processExistingRecoveries(allocation, shardRouting -> true);
assertThat(allocation.routingNodesChanged(), equalTo(false));
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).size(), equalTo(0));
}
public void testNotCancellingRecovery() {
RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders());
testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
.addData(node2, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
testAllocator.processExistingRecoveries(allocation, shardRouting -> true);
assertThat(allocation.routingNodesChanged(), equalTo(false));
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).size(), equalTo(0));
}
public void testDoNotCancelForBrokenNode() {
Set<String> failedNodes = new HashSet<>();
failedNodes.add(node3.getId());
if (randomBoolean()) {
failedNodes.add("node4");
}
UnassignedInfo unassignedInfo = new UnassignedInfo(
UnassignedInfo.Reason.ALLOCATION_FAILED,
null,
null,
randomIntBetween(failedNodes.size(), 10),
System.nanoTime(),
System.currentTimeMillis(),
false,
UnassignedInfo.AllocationStatus.NO_ATTEMPT,
failedNodes,
null
);
RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders(), unassignedInfo);
long retainingSeqNoOnPrimary = randomLongBetween(0, Long.MAX_VALUE);
List<RetentionLease> retentionLeases = Arrays.asList(
newRetentionLease(node1, retainingSeqNoOnPrimary),
newRetentionLease(node3, retainingSeqNoOnPrimary)
);
testAllocator.addData(
node1,
retentionLeases,
"MATCH",
new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)
)
.addData(node2, randomSyncId(), new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
.addData(node3, randomSyncId(), new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
testAllocator.processExistingRecoveries(allocation, shardRouting -> true);
assertThat(allocation.routingNodesChanged(), equalTo(false));
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED), empty());
}
private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders deciders) {
return onePrimaryOnNode1And1Replica(deciders, Settings.EMPTY, UnassignedInfo.Reason.CLUSTER_RECOVERED);
}
private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders deciders, Settings settings, UnassignedInfo.Reason reason) {
ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId, node1.getId(), true, ShardRoutingState.STARTED);
IndexMetadata.Builder indexMetadata = IndexMetadata.builder(shardId.getIndexName())
.settings(settings(IndexVersion.current()).put(settings))
.numberOfShards(1)
.numberOfReplicas(1)
.putInSyncAllocationIds(0, Sets.newHashSet(primaryShard.allocationId().getId()));
Metadata metadata = Metadata.builder().put(indexMetadata).build();
// mark shard as delayed if reason is NODE_LEFT
boolean delayed = reason == UnassignedInfo.Reason.NODE_LEFT
&& UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(settings).nanos() > 0;
int failedAllocations = reason == UnassignedInfo.Reason.ALLOCATION_FAILED ? 1 : 0;
final String lastAllocatedNodeId = reason == UnassignedInfo.Reason.NODE_RESTARTING || randomBoolean()
? randomAlphaOfLength(10)
: null;
RoutingTable routingTable = RoutingTable.builder()
.add(
IndexRoutingTable.builder(shardId.getIndex())
.addIndexShard(
new IndexShardRoutingTable.Builder(shardId).addShard(primaryShard)
.addShard(
ShardRouting.newUnassigned(
shardId,
false,
RecoverySource.PeerRecoverySource.INSTANCE,
new UnassignedInfo(
reason,
null,
null,
failedAllocations,
System.nanoTime(),
System.currentTimeMillis(),
delayed,
UnassignedInfo.AllocationStatus.NO_ATTEMPT,
Collections.emptySet(),
lastAllocatedNodeId
),
ShardRouting.Role.DEFAULT
)
)
)
)
.build();
ClusterState state = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(routingTable)
.nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3))
.build();
return new RoutingAllocation(
deciders,
state.mutableRoutingNodes(),
state,
ClusterInfo.EMPTY,
SnapshotShardSizeInfo.EMPTY,
System.nanoTime()
);
}
private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders, UnassignedInfo unassignedInfo) {
ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId, node1.getId(), true, ShardRoutingState.STARTED);
Metadata metadata = Metadata.builder()
.put(
IndexMetadata.builder(shardId.getIndexName())
.settings(settings(IndexVersion.current()))
.numberOfShards(1)
.numberOfReplicas(1)
.putInSyncAllocationIds(0, Sets.newHashSet(primaryShard.allocationId().getId()))
)
.build();
RoutingTable routingTable = RoutingTable.builder()
.add(
IndexRoutingTable.builder(shardId.getIndex())
.addIndexShard(
new IndexShardRoutingTable.Builder(shardId).addShard(primaryShard)
.addShard(
shardRoutingBuilder(shardId, node2.getId(), false, ShardRoutingState.INITIALIZING).withUnassignedInfo(
unassignedInfo
).build()
)
)
)
.build();
ClusterState state = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(routingTable)
.nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3))
.build();
return new RoutingAllocation(
deciders,
state.mutableRoutingNodes(),
state,
ClusterInfo.EMPTY,
SnapshotShardSizeInfo.EMPTY,
System.nanoTime()
);
}
private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders) {
return onePrimaryOnNode1And1ReplicaRecovering(deciders, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null));
}
static RetentionLease newRetentionLease(DiscoveryNode node, long retainingSeqNo) {
return new RetentionLease(
ReplicationTracker.getPeerRecoveryRetentionLeaseId(node.getId()),
retainingSeqNo,
randomNonNegativeLong(),
ReplicationTracker.PEER_RECOVERY_RETENTION_LEASE_SOURCE
);
}
static String randomSyncId() {
return randomFrom("MATCH", "NOT_MATCH", null);
}
| ReplicaShardAllocatorTests |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/util/ClassUtils.java | {
"start": 8033,
"end": 8669
} | class ____ resource loading (but not necessarily for
* {@code Class.forName}, which accepts a {@code null} ClassLoader
* reference as well).
* @return the default ClassLoader (only {@code null} if even the system
* ClassLoader isn't accessible)
* @see Thread#getContextClassLoader()
* @see ClassLoader#getSystemClassLoader()
*/
public static @Nullable ClassLoader getDefaultClassLoader() {
ClassLoader cl = null;
try {
cl = Thread.currentThread().getContextClassLoader();
}
catch (Throwable ex) {
// Cannot access thread context ClassLoader - falling back...
}
if (cl == null) {
// No thread context | path |
java | apache__thrift | lib/java/src/main/java/org/apache/thrift/transport/TSSLTransportFactory.java | {
"start": 10184,
"end": 15755
} | class ____ {
protected String protocol = "TLS";
protected String keyStore;
protected InputStream keyStoreStream;
protected String keyPass;
protected String keyManagerType = KeyManagerFactory.getDefaultAlgorithm();
protected String keyStoreType = "JKS";
protected String trustStore;
protected InputStream trustStoreStream;
protected String trustPass;
protected String trustManagerType = TrustManagerFactory.getDefaultAlgorithm();
protected String trustStoreType = "JKS";
protected String[] cipherSuites;
protected boolean clientAuth = false;
protected boolean isKeyStoreSet = false;
protected boolean isTrustStoreSet = false;
public TSSLTransportParameters() {}
/**
* Create parameters specifying the protocol and cipher suites
*
* @param protocol The specific protocol (TLS/SSL) can be specified with versions
* @param cipherSuites
*/
public TSSLTransportParameters(String protocol, String[] cipherSuites) {
this(protocol, cipherSuites, false);
}
/**
* Create parameters specifying the protocol, cipher suites and if client authentication is
* required
*
* @param protocol The specific protocol (TLS/SSL) can be specified with versions
* @param cipherSuites
* @param clientAuth
*/
public TSSLTransportParameters(String protocol, String[] cipherSuites, boolean clientAuth) {
if (protocol != null) {
this.protocol = protocol;
}
this.cipherSuites =
cipherSuites != null ? Arrays.copyOf(cipherSuites, cipherSuites.length) : null;
this.clientAuth = clientAuth;
}
/**
* Set the keystore, password, certificate type and the store type
*
* @param keyStore Location of the Keystore on disk
* @param keyPass Keystore password
* @param keyManagerType The default is X509
* @param keyStoreType The default is JKS
*/
public void setKeyStore(
String keyStore, String keyPass, String keyManagerType, String keyStoreType) {
this.keyStore = keyStore;
this.keyPass = keyPass;
if (keyManagerType != null) {
this.keyManagerType = keyManagerType;
}
if (keyStoreType != null) {
this.keyStoreType = keyStoreType;
}
isKeyStoreSet = true;
}
/**
* Set the keystore, password, certificate type and the store type
*
* @param keyStoreStream Keystore content input stream
* @param keyPass Keystore password
* @param keyManagerType The default is X509
* @param keyStoreType The default is JKS
*/
public void setKeyStore(
InputStream keyStoreStream, String keyPass, String keyManagerType, String keyStoreType) {
this.keyStoreStream = keyStoreStream;
setKeyStore("", keyPass, keyManagerType, keyStoreType);
}
/**
* Set the keystore and password
*
* @param keyStore Location of the Keystore on disk
* @param keyPass Keystore password
*/
public void setKeyStore(String keyStore, String keyPass) {
setKeyStore(keyStore, keyPass, null, null);
}
/**
* Set the keystore and password
*
* @param keyStoreStream Keystore content input stream
* @param keyPass Keystore password
*/
public void setKeyStore(InputStream keyStoreStream, String keyPass) {
setKeyStore(keyStoreStream, keyPass, null, null);
}
/**
* Set the truststore, password, certificate type and the store type
*
* @param trustStore Location of the Truststore on disk
* @param trustPass Truststore password
* @param trustManagerType The default is X509
* @param trustStoreType The default is JKS
*/
public void setTrustStore(
String trustStore, String trustPass, String trustManagerType, String trustStoreType) {
this.trustStore = trustStore;
this.trustPass = trustPass;
if (trustManagerType != null) {
this.trustManagerType = trustManagerType;
}
if (trustStoreType != null) {
this.trustStoreType = trustStoreType;
}
isTrustStoreSet = true;
}
/**
* Set the truststore, password, certificate type and the store type
*
* @param trustStoreStream Truststore content input stream
* @param trustPass Truststore password
* @param trustManagerType The default is X509
* @param trustStoreType The default is JKS
*/
public void setTrustStore(
InputStream trustStoreStream,
String trustPass,
String trustManagerType,
String trustStoreType) {
this.trustStoreStream = trustStoreStream;
setTrustStore("", trustPass, trustManagerType, trustStoreType);
}
/**
* Set the truststore and password
*
* @param trustStore Location of the Truststore on disk
* @param trustPass Truststore password
*/
public void setTrustStore(String trustStore, String trustPass) {
setTrustStore(trustStore, trustPass, null, null);
}
/**
* Set the truststore and password
*
* @param trustStoreStream Truststore content input stream
* @param trustPass Truststore password
*/
public void setTrustStore(InputStream trustStoreStream, String trustPass) {
setTrustStore(trustStoreStream, trustPass, null, null);
}
/**
* Set if client authentication is required
*
* @param clientAuth
*/
public void requireClientAuth(boolean clientAuth) {
this.clientAuth = clientAuth;
}
}
}
| TSSLTransportParameters |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/arm-java/org/apache/hadoop/ipc/protobuf/ProtobufRpcEngineProtos.java | {
"start": 5273,
"end": 5855
} | class ____ the called method
* </pre>
*/
long getClientProtocolVersion();
}
/**
* Protobuf type {@code hadoop.common.RequestHeaderProto}
*
* <pre>
**
* This message is the header for the Protobuf Rpc Engine
* when sending a RPC request from RPC client to the RPC server.
* The actual request (serialized as protobuf) follows this request.
*
* No special header is needed for the Rpc Response for Protobuf Rpc Engine.
* The normal RPC response header (see RpcHeader.proto) are sufficient.
* </pre>
*/
public static final | declaring |
java | spring-projects__spring-framework | spring-context-indexer/src/test/java/org/springframework/context/index/sample/SampleNonStaticEmbedded.java | {
"start": 860,
"end": 913
} | class ____ {
@Component
public | SampleNonStaticEmbedded |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PartitionedFileReader.java | {
"start": 16901,
"end": 17814
} | class ____ {
private final long offset;
private final long size;
private final int repeatCount;
/**
* Constructs a BufferPositionDescriptor with specified offset, size, and repeat count.
*
* @param offset the offset of the buffer
* @param size the size of the buffer
* @param repeatCount the repeat count for the buffer
*/
BufferPositionDescriptor(long offset, long size, int repeatCount) {
this.offset = offset;
this.size = size;
this.repeatCount = repeatCount;
}
@VisibleForTesting
long getOffset() {
return offset;
}
@VisibleForTesting
long getSize() {
return size;
}
@VisibleForTesting
int getRepeatCount() {
return repeatCount;
}
}
}
| BufferPositionDescriptor |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/annotation/AnnotationValue.java | {
"start": 2266,
"end": 2617
} | class ____ similar to how
* a implementation of {@link Annotation} behaves.</p>
*
* NOTE: During the mapping or remapping, nullable stereotypes value means that
* the stereotypes will be filled from the annotation definition, when empty collection will skip it.
*
* @param <A> The annotation type
* @author Graeme Rocher
* @since 1.0
*/
public | is |
java | elastic__elasticsearch | x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpRequest.java | {
"start": 16582,
"end": 21766
} | class ____ {
private String host;
private int port = -1;
private Scheme scheme;
private HttpMethod method;
private String path;
private Map<String, String> params = new HashMap<>();
private Map<String, String> headers = new HashMap<>();
private BasicAuth auth;
private String body;
private TimeValue connectionTimeout;
private TimeValue readTimeout;
private HttpProxy proxy;
private Builder(String host, int port) {
this.host = host;
this.port = port;
}
private Builder(HttpRequest original) {
this.host = original.host;
this.port = original.port;
this.scheme = original.scheme;
this.method = original.method;
this.path = original.path;
this.params = new HashMap<>(original.params);
this.headers = new HashMap<>(original.headers);
this.auth = original.auth;
this.body = original.body;
this.connectionTimeout = original.connectionTimeout;
this.readTimeout = original.readTimeout;
this.proxy = original.proxy;
}
private Builder() {}
public Builder scheme(Scheme scheme) {
this.scheme = scheme;
return this;
}
public Builder method(HttpMethod method) {
this.method = method;
return this;
}
public Builder path(String path) {
this.path = path;
return this;
}
public Builder setParams(Map<String, String> params) {
if (this.params == null) {
throw new IllegalStateException("Request has already been built!");
}
this.params.putAll(params);
return this;
}
public Builder setParam(String key, String value) {
if (params == null) {
throw new IllegalStateException("Request has already been built!");
}
this.params.put(key, value);
return this;
}
public Builder setHeaders(Map<String, String> headers) {
if (this.headers == null) {
throw new IllegalStateException("Request has already been built!");
}
this.headers.putAll(headers);
return this;
}
public Builder setHeader(String key, String value) {
if (headers == null) {
throw new IllegalStateException("Request has already been built!");
}
this.headers.put(key, value);
return this;
}
public Builder auth(BasicAuth auth) {
this.auth = auth;
return this;
}
public Builder body(String body) {
this.body = body;
return this;
}
public Builder jsonBody(ToXContent xContent) {
return body(Strings.toString(xContent)).setHeader("Content-Type", XContentType.JSON.mediaType());
}
public Builder connectionTimeout(TimeValue timeout) {
this.connectionTimeout = timeout;
return this;
}
public Builder readTimeout(TimeValue timeout) {
this.readTimeout = timeout;
return this;
}
public Builder proxy(HttpProxy proxy) {
this.proxy = proxy;
return this;
}
public HttpRequest build() {
HttpRequest request = new HttpRequest(
host,
port,
scheme,
method,
path,
unmodifiableMap(params),
unmodifiableMap(headers),
auth,
body,
connectionTimeout,
readTimeout,
proxy
);
params = null;
headers = null;
return request;
}
public Builder fromUrl(String supposedUrl) {
if (Strings.hasLength(supposedUrl) == false) {
throw new ElasticsearchParseException("Configured URL is empty, please configure a valid URL");
}
try {
URI uri = new URI(supposedUrl);
if (Strings.hasLength(uri.getScheme()) == false) {
throw new ElasticsearchParseException("URL [{}] does not contain a scheme", uri);
}
scheme = Scheme.parse(uri.getScheme());
port = uri.getPort() > 0 ? uri.getPort() : scheme.defaultPort();
host = uri.getHost();
if (Strings.hasLength(uri.getRawPath())) {
path = uri.getRawPath();
}
String rawQuery = uri.getRawQuery();
if (Strings.hasLength(rawQuery)) {
RestUtils.decodeQueryString(rawQuery, 0, params);
}
} catch (URISyntaxException e) {
throw new ElasticsearchParseException("Malformed URL [{}]", supposedUrl);
}
return this;
}
}
public | Builder |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/RedisURI.java | {
"start": 6514,
"end": 42507
} | class ____ implements Serializable, ConnectionPoint {
public static final String URI_SCHEME_REDIS_SENTINEL = "redis-sentinel";
public static final String URI_SCHEME_REDIS_SENTINEL_SECURE = "rediss-sentinel";
public static final String URI_SCHEME_REDIS = "redis";
public static final String URI_SCHEME_REDIS_SECURE = "rediss";
public static final String URI_SCHEME_REDIS_SECURE_ALT = "redis+ssl";
public static final String URI_SCHEME_REDIS_TLS_ALT = "redis+tls";
public static final String URI_SCHEME_REDIS_SOCKET = "redis-socket";
public static final String URI_SCHEME_REDIS_SOCKET_ALT = "redis+socket";
public static final String PARAMETER_NAME_TIMEOUT = "timeout";
public static final String PARAMETER_NAME_DATABASE = "database";
public static final String PARAMETER_NAME_DATABASE_ALT = "db";
public static final String PARAMETER_NAME_SENTINEL_MASTER_ID = "sentinelMasterId";
public static final String PARAMETER_NAME_CLIENT_NAME = "clientName";
public static final String PARAMETER_NAME_LIBRARY_NAME = "libraryName";
public static final String PARAMETER_NAME_LIBRARY_VERSION = "libraryVersion";
public static final String PARAMETER_NAME_VERIFY_PEER = "verifyPeer";
public static final Map<String, LongFunction<Duration>> CONVERTER_MAP;
static {
Map<String, LongFunction<Duration>> unitMap = new HashMap<>();
unitMap.put("ns", Duration::ofNanos);
unitMap.put("us", us -> Duration.ofNanos(us * 1000));
unitMap.put("ms", Duration::ofMillis);
unitMap.put("s", Duration::ofSeconds);
unitMap.put("m", Duration::ofMinutes);
unitMap.put("h", Duration::ofHours);
unitMap.put("d", Duration::ofDays);
CONVERTER_MAP = Collections.unmodifiableMap(unitMap);
}
/**
* The default sentinel port.
*/
public static final int DEFAULT_SENTINEL_PORT = 26379;
/**
* The default redis port.
*/
public static final int DEFAULT_REDIS_PORT = 6379;
/**
* Default timeout: 60 sec
*/
public static final long DEFAULT_TIMEOUT = 60;
public static final Duration DEFAULT_TIMEOUT_DURATION = Duration.ofSeconds(DEFAULT_TIMEOUT);
private String host;
private String socket;
private String sentinelMasterId;
private int port;
private int database;
private String clientName;
private String libraryName = LettuceVersion.getName();
private String libraryVersion = LettuceVersion.getVersion();
private RedisCredentialsProvider credentialsProvider = new StaticCredentialsProvider(null, null);;
private boolean ssl = false;
private SslVerifyMode verifyMode = SslVerifyMode.FULL;
private boolean startTls = false;
private Duration timeout = DEFAULT_TIMEOUT_DURATION;
private final List<RedisURI> sentinels = new ArrayList<>();
/**
* Default empty constructor.
*/
public RedisURI() {
}
/**
* Constructor with host/port and timeout.
*
* @param host the host
* @param port the port
* @param timeout timeout value
*/
public RedisURI(String host, int port, Duration timeout) {
LettuceAssert.notEmpty(host, "Host must not be empty");
LettuceAssert.notNull(timeout, "Timeout duration must not be null");
LettuceAssert.isTrue(!timeout.isNegative(), "Timeout duration must be greater or equal to zero");
setHost(host);
setPort(port);
setTimeout(timeout);
}
/**
* Return a new {@link RedisURI.Builder} to construct a {@link RedisURI}.
*
* @return a new {@link RedisURI.Builder} to construct a {@link RedisURI}.
*/
public static RedisURI.Builder builder() {
return new Builder();
}
/**
* Create a Redis URI from host and port.
*
* @param host the host
* @param port the port
* @return An instance of {@link RedisURI} containing details from the {@code host} and {@code port}.
*/
public static RedisURI create(String host, int port) {
return Builder.redis(host, port).build();
}
/**
* Create a Redis URI from an URI string.
*
* The uri must follow conventions of {@link java.net.URI}
*
* @param uri The URI string.
* @return An instance of {@link RedisURI} containing details from the URI.
*/
public static RedisURI create(String uri) {
LettuceAssert.notEmpty(uri, "URI must not be empty");
return create(URI.create(uri));
}
/**
* Create a Redis URI from an URI string:
*
* The uri must follow conventions of {@link java.net.URI}
*
* @param uri The URI.
* @return An instance of {@link RedisURI} containing details from the URI.
*/
public static RedisURI create(URI uri) {
return buildRedisUriFromUri(uri);
}
/**
* Create a new {@link RedisURI.Builder} that is initialized from a plain {@link RedisURI}.
*
* @param source the initialization source, must not be {@code null}.
* @return the initialized builder.
* @since 6.0
*/
public static Builder builder(RedisURI source) {
LettuceAssert.notNull(source, "Source RedisURI must not be null");
Builder builder = builder();
builder.withSsl(source).withAuthentication(source).withTimeout(source.getTimeout()).withDatabase(source.getDatabase());
if (source.getClientName() != null) {
builder.withClientName(source.getClientName());
}
if (source.getLibraryName() != null) {
builder.withLibraryName(source.getLibraryName());
}
if (source.getLibraryVersion() != null) {
builder.withLibraryVersion(source.getLibraryVersion());
}
if (source.socket != null) {
builder.socket = source.getSocket();
} else {
if (source.getHost() != null) {
builder.withHost(source.getHost());
builder.withPort(source.getPort());
}
}
return builder;
}
/**
* Returns the host.
*
* @return the host.
*/
public String getHost() {
return host;
}
/**
* Sets the Redis host.
*
* @param host the host
*/
public void setHost(String host) {
this.host = host;
}
/**
* Returns the Sentinel Master Id.
*
* @return the Sentinel Master Id.
*/
public String getSentinelMasterId() {
return sentinelMasterId;
}
/**
* Sets the Sentinel Master Id.
*
* @param sentinelMasterId the Sentinel Master Id.
*/
public void setSentinelMasterId(String sentinelMasterId) {
this.sentinelMasterId = sentinelMasterId;
}
/**
* Returns the Redis port.
*
* @return the Redis port
*/
public int getPort() {
return port;
}
/**
* Sets the Redis port. Defaults to {@link #DEFAULT_REDIS_PORT}.
*
* @param port the Redis port
*/
public void setPort(int port) {
this.port = port;
}
/**
* Returns the Unix Domain Socket path.
*
* @return the Unix Domain Socket path.
*/
public String getSocket() {
return socket;
}
/**
* Sets the Unix Domain Socket path.
*
* @param socket the Unix Domain Socket path.
*/
public void setSocket(String socket) {
this.socket = socket;
}
/**
* Apply authentication from another {@link RedisURI}. The authentication settings of the {@code source} URI will be applied
* to this URI. That is in particular the {@link RedisCredentialsProvider}.
*
* @param source must not be {@code null}.
* @since 6.0
*/
public void applyAuthentication(RedisURI source) {
LettuceAssert.notNull(source, "Source RedisURI must not be null");
if (source.credentialsProvider != null) {
setCredentialsProvider(source.getCredentialsProvider());
}
}
/**
* Sets the password to use to authenticate Redis connections.
* <p>
* This method effectively overwrites any existing {@link RedisCredentialsProvider} with a new one, containing an empty
* username and the provided password.
*
* @param password the password to use to authenticate Redis connections.
* @see #setCredentialsProvider(RedisCredentialsProvider)
* @since 7.0
*/
public void setAuthentication(CharSequence password) {
LettuceAssert.notNull(password, "Password must not be null");
this.setAuthentication(null, password);
}
/**
* Sets the password to use to authenticate Redis connections.
* <p>
* This method effectively overwrites any existing {@link RedisCredentialsProvider} with a new one, containing an empty
* username and the provided password.
*
* @param password the password to use to authenticate Redis connections.
* @see #setCredentialsProvider(RedisCredentialsProvider)
* @since 7.0
*/
public void setAuthentication(char[] password) {
LettuceAssert.notNull(password, "Password must not be null");
this.setAuthentication(null, password);
}
/**
* Sets the username and password to use to authenticate Redis connections.
* <p>
* This method effectively overwrites any existing {@link RedisCredentialsProvider} with a new one, containing the provided
* username and password.
*
* @param username the username to use to authenticate Redis connections.
* @param password the password to use to authenticate Redis connections.
* @see #setCredentialsProvider(RedisCredentialsProvider)
* @since 7.0
*/
public void setAuthentication(String username, char[] password) {
LettuceAssert.notNull(password, "Password must not be null");
this.setCredentialsProvider(() -> Mono.just(RedisCredentials.just(username, password)));
}
/**
* Sets the username and password to use to authenticate Redis connections.
* <p>
* This method effectively overwrites any existing {@link RedisCredentialsProvider} with a new one, containing the provided
* username and password.
*
* @param username the username to use to authenticate Redis connections.
* @param password the password to use to authenticate Redis connections.
* @see #setCredentialsProvider(RedisCredentialsProvider)
* @since 7.0
*/
public void setAuthentication(String username, CharSequence password) {
LettuceAssert.notNull(password, "Password must not be null");
this.setCredentialsProvider(() -> Mono.just(RedisCredentials.just(username, password)));
}
/**
* Returns the {@link RedisCredentialsProvider} to use to authenticate Redis connections. Returns a static credentials
* provider no explicit {@link RedisCredentialsProvider} was configured.
*
* @return the {@link RedisCredentialsProvider} to use to authenticate Redis connections
* @since 6.2
*/
public RedisCredentialsProvider getCredentialsProvider() {
return this.credentialsProvider;
}
/**
* Sets the {@link RedisCredentialsProvider}. Configuring a credentials provider resets the configured static
* username/password.
*
* @param credentialsProvider the credentials provider to use when authenticating a Redis connection.
* @since 6.2
*/
public void setCredentialsProvider(RedisCredentialsProvider credentialsProvider) {
LettuceAssert.notNull(credentialsProvider, "RedisCredentialsProvider must not be null");
this.credentialsProvider = credentialsProvider;
}
/**
* Returns the command timeout for synchronous command execution.
*
* @return the Timeout
* @since 5.0
*/
public Duration getTimeout() {
return timeout;
}
/**
* Sets the command timeout for synchronous command execution. A zero timeout value indicates to not time out.
*
* @param timeout the command timeout for synchronous command execution.
* @since 5.0
*/
public void setTimeout(Duration timeout) {
LettuceAssert.notNull(timeout, "Timeout must not be null");
LettuceAssert.isTrue(!timeout.isNegative(), "Timeout must be greater or equal 0");
this.timeout = timeout;
}
/**
* Returns the Redis database number. Databases are only available for Redis Standalone and Redis Master/Slave.
*
* @return the Redis database number
*/
public int getDatabase() {
return database;
}
/**
* Sets the Redis database number. Databases are only available for Redis Standalone and Redis Master/Slave.
*
* @param database the Redis database number.
*/
public void setDatabase(int database) {
LettuceAssert.isTrue(database >= 0, "Invalid database number: " + database);
this.database = database;
}
/**
* Returns the client name.
*
* @return the client name.
* @since 4.4
*/
public String getClientName() {
return clientName;
}
/**
* Sets the client name to be applied on Redis connections.
*
* @param clientName the client name.
* @since 4.4
*/
public void setClientName(String clientName) {
this.clientName = clientName;
}
/**
* Returns the library name.
*
* @return the library name.
* @since 6.3
*/
public String getLibraryName() {
return libraryName;
}
/**
* Sets the library name to be applied on Redis connections.
*
* @param libraryName the library name.
* @since 6.3
*/
public void setLibraryName(String libraryName) {
if (libraryName != null && libraryName.indexOf(' ') != -1) {
throw new IllegalArgumentException("Library name must not contain spaces");
}
this.libraryName = libraryName;
}
/**
* Returns the library version.
*
* @return the library version.
* @since 6.3
*/
public String getLibraryVersion() {
return libraryVersion;
}
/**
* Sets the library version to be applied on Redis connections.
*
* @param libraryVersion the library version.
* @since 6.3
*/
public void setLibraryVersion(String libraryVersion) {
if (libraryVersion != null && libraryVersion.indexOf(' ') != -1) {
throw new IllegalArgumentException("Library version must not contain spaces");
}
this.libraryVersion = libraryVersion;
}
/**
* Apply authentication from another {@link RedisURI}. The SSL settings of the {@code source} URI will be applied to this
* URI. That is in particular SSL usage, peer verification and StartTLS.
*
* @param source must not be {@code null}.
* @since 6.0
*/
public void applySsl(RedisURI source) {
LettuceAssert.notNull(source, "Source RedisURI must not be null");
setSsl(source.isSsl());
setVerifyPeer(source.getVerifyMode());
setStartTls(source.isStartTls());
}
/**
* Returns {@code true} if SSL mode is enabled.
*
* @return {@code true} if SSL mode is enabled.
*/
public boolean isSsl() {
return ssl;
}
/**
* Sets whether to use SSL. Sets SSL also for already configured Redis Sentinel nodes.
*
* @param ssl
*/
public void setSsl(boolean ssl) {
this.ssl = ssl;
this.sentinels.forEach(it -> it.setSsl(ssl));
}
/**
* Returns whether to verify peers when using {@link #isSsl() SSL}.
*
* @return {@code true} to verify peers when using {@link #isSsl() SSL}.
*/
public boolean isVerifyPeer() {
return verifyMode != SslVerifyMode.NONE;
}
/**
* Returns the mode to verify peers when using {@link #isSsl() SSL}.
*
* @return the verification mode
* @since 6.1
*/
public SslVerifyMode getVerifyMode() {
return verifyMode;
}
/**
* Sets whether to verify peers when using {@link #isSsl() SSL}. Sets peer verification also for already configured Redis
* Sentinel nodes.
*
* @param verifyPeer {@code true} to verify peers when using {@link #isSsl() SSL}.
*/
public void setVerifyPeer(boolean verifyPeer) {
setVerifyPeer(verifyPeer ? SslVerifyMode.FULL : SslVerifyMode.NONE);
}
/**
* Sets how to verify peers when using {@link #isSsl() SSL}. Sets peer verification also for already configured Redis
* Sentinel nodes.
*
* @param verifyMode verification mode to use when using {@link #isSsl() SSL}.
* @since 6.1
*/
public void setVerifyPeer(SslVerifyMode verifyMode) {
LettuceAssert.notNull(verifyMode, "VerifyMode must not be null");
this.verifyMode = verifyMode;
this.sentinels.forEach(it -> it.setVerifyPeer(this.verifyMode));
}
/**
* Returns {@code true} if StartTLS is enabled.
*
* @return {@code true} if StartTLS is enabled.
*/
public boolean isStartTls() {
return startTls;
}
/**
* Returns whether StartTLS is enabled. Sets StartTLS also for already configured Redis Sentinel nodes.
*
* @param startTls {@code true} if StartTLS is enabled.
*/
public void setStartTls(boolean startTls) {
this.startTls = startTls;
this.sentinels.forEach(it -> it.setStartTls(startTls));
}
/**
*
* @return the list of {@link RedisURI Redis Sentinel URIs}.
*/
public List<RedisURI> getSentinels() {
return sentinels;
}
/**
* Creates an URI based on the RedisURI if possible.
* <p>
* An URI an represent a Standalone address using host and port or socket addressing or a Redis Sentinel address using
* host/port. A Redis Sentinel URI with multiple nodes using Unix Domain Sockets cannot be rendered to a {@link URI}.
*
* @return URI based on the RedisURI.
* @throws IllegalStateException if the URI cannot be rendered.
*/
public URI toURI() {
try {
return URI.create(createUriString(false));
} catch (Exception e) {
throw new IllegalStateException("Cannot render URI for " + toString(), e);
}
}
private String createUriString(boolean maskCredentials) {
String scheme = getScheme();
String authority = getAuthority(scheme, maskCredentials);
String queryString = getQueryString();
String uri = scheme + "://" + authority;
if (!queryString.isEmpty()) {
uri += "?" + queryString;
}
return uri;
}
private static RedisURI buildRedisUriFromUri(URI uri) {
LettuceAssert.notNull(uri, "URI must not be null");
LettuceAssert.notNull(uri.getScheme(), "URI scheme must not be null");
Builder builder;
if (isSentinel(uri.getScheme())) {
builder = configureSentinel(uri);
} else {
builder = configureStandalone(uri);
}
String userInfo = uri.getUserInfo();
if (isEmpty(userInfo) && isNotEmpty(uri.getAuthority()) && uri.getAuthority().lastIndexOf('@') > 0) {
userInfo = uri.getAuthority().substring(0, uri.getAuthority().lastIndexOf('@'));
}
if (isNotEmpty(userInfo)) {
String password = userInfo;
String username = null;
if (password.startsWith(":")) {
password = password.substring(1);
} else {
int index = password.indexOf(':');
if (index > 0) {
username = password.substring(0, index);
password = password.substring(index + 1);
}
}
if (LettuceStrings.isNotEmpty(password)) {
if (username == null) {
builder.withPassword(password);
} else {
builder.withAuthentication(username, password);
}
}
}
if (isNotEmpty(uri.getPath()) && builder.socket == null) {
String pathSuffix = uri.getPath().substring(1);
if (isNotEmpty(pathSuffix)) {
builder.withDatabase(Integer.parseInt(pathSuffix));
}
}
if (isNotEmpty(uri.getQuery())) {
StringTokenizer st = new StringTokenizer(uri.getQuery(), "&;");
while (st.hasMoreTokens()) {
String queryParam = st.nextToken();
String forStartWith = queryParam.toLowerCase();
if (forStartWith.startsWith(PARAMETER_NAME_TIMEOUT + "=")) {
parseTimeout(builder, queryParam.toLowerCase());
}
if (forStartWith.startsWith(PARAMETER_NAME_DATABASE + "=")
|| queryParam.startsWith(PARAMETER_NAME_DATABASE_ALT + "=")) {
parseDatabase(builder, queryParam);
}
if (forStartWith.startsWith(PARAMETER_NAME_CLIENT_NAME.toLowerCase() + "=")) {
parseClientName(builder, queryParam);
}
if (forStartWith.startsWith(PARAMETER_NAME_LIBRARY_NAME.toLowerCase() + "=")) {
parseLibraryName(builder, queryParam);
}
if (forStartWith.startsWith(PARAMETER_NAME_LIBRARY_VERSION.toLowerCase() + "=")) {
parseLibraryVersion(builder, queryParam);
}
if (forStartWith.startsWith(PARAMETER_NAME_VERIFY_PEER.toLowerCase() + "=")) {
parseVerifyPeer(builder, queryParam);
}
if (forStartWith.startsWith(PARAMETER_NAME_SENTINEL_MASTER_ID.toLowerCase() + "=")) {
parseSentinelMasterId(builder, queryParam);
}
}
}
if (isSentinel(uri.getScheme())) {
LettuceAssert.notEmpty(builder.sentinelMasterId, "URI must contain the sentinelMasterId");
}
return builder.build();
}
private String getAuthority(String scheme, boolean maskCredentials) {
String authority = null;
if (host != null) {
if (host.contains(",")) {
authority = host;
} else {
authority = urlEncode(host) + getPortPart(port, scheme);
}
}
if (!sentinels.isEmpty()) {
authority = sentinels.stream().map(redisURI -> {
if (LettuceStrings.isNotEmpty(redisURI.getSocket())) {
return String.format("[Socket %s]", redisURI.getSocket());
}
return urlEncode(redisURI.getHost()) + getPortPart(redisURI.getPort(), scheme);
}).collect(Collectors.joining(","));
}
if (socket != null) {
authority = urlEncode(socket);
} else {
if (database != 0) {
authority += "/" + database;
}
}
if (credentialsProvider != null) {
if (!maskCredentials) {
authority = urlEncode("**credentialsProvider**") + "@" + authority;
} else {
// compatibility with versions before 7.0 - in previous versions of the Lettuce driver there was an option to
// have a username and password pair as part of the RedisURI; in these cases when we were masking credentials we
// would get asterix for each character of the password.
RedisCredentials creds = credentialsProvider.resolveCredentials().block();
if (creds != null) {
String credentials = "";
if (creds.hasUsername() && !creds.getUsername().isEmpty()) {
credentials = urlEncode(creds.getUsername()) + ":";
}
if (creds.hasPassword()) {
credentials += IntStream.range(0, creds.getPassword().length).mapToObj(ignore -> "*")
.collect(Collectors.joining());
}
if (!credentials.isEmpty()) {
authority = credentials + "@" + authority;
}
}
}
}
return authority;
}
private String getQueryString() {
List<String> queryPairs = new ArrayList<>();
if (database != 0 && LettuceStrings.isNotEmpty(socket)) {
queryPairs.add(PARAMETER_NAME_DATABASE + "=" + database);
}
if (clientName != null) {
queryPairs.add(PARAMETER_NAME_CLIENT_NAME + "=" + urlEncode(clientName));
}
if (libraryName != null && !libraryName.equals(LettuceVersion.getName())) {
queryPairs.add(PARAMETER_NAME_LIBRARY_NAME + "=" + urlEncode(libraryName));
}
if (libraryVersion != null && !libraryVersion.equals(LettuceVersion.getVersion())) {
queryPairs.add(PARAMETER_NAME_LIBRARY_VERSION + "=" + urlEncode(libraryVersion));
}
if (isSsl() && getVerifyMode() != SslVerifyMode.FULL) {
queryPairs.add(PARAMETER_NAME_VERIFY_PEER + "=" + verifyMode.name());
}
if (sentinelMasterId != null) {
queryPairs.add(PARAMETER_NAME_SENTINEL_MASTER_ID + "=" + urlEncode(sentinelMasterId));
}
if (timeout.getSeconds() != DEFAULT_TIMEOUT) {
if (timeout.getNano() == 0) {
queryPairs.add(PARAMETER_NAME_TIMEOUT + "=" + timeout.getSeconds() + "s");
} else {
queryPairs.add(PARAMETER_NAME_TIMEOUT + "=" + timeout.toNanos() + "ns");
}
}
return String.join("&", queryPairs);
}
private String getPortPart(int port, String scheme) {
if (isSentinel(scheme) && port == DEFAULT_SENTINEL_PORT) {
return "";
}
if (URI_SCHEME_REDIS.equals(scheme) && port == DEFAULT_REDIS_PORT) {
return "";
}
return ":" + port;
}
private String getScheme() {
String scheme = URI_SCHEME_REDIS;
if (isSsl()) {
if (isStartTls()) {
scheme = URI_SCHEME_REDIS_TLS_ALT;
} else {
scheme = URI_SCHEME_REDIS_SECURE;
}
}
if (socket != null) {
scheme = URI_SCHEME_REDIS_SOCKET;
}
if (host == null && !sentinels.isEmpty()) {
if (isSsl()) {
scheme = URI_SCHEME_REDIS_SENTINEL_SECURE;
} else {
scheme = URI_SCHEME_REDIS_SENTINEL;
}
}
return scheme;
}
/**
* URL encode the {@code str} without slash escaping {@code %2F}.
*
* @param str the string to encode.
* @return the URL-encoded string
*/
private static String urlEncode(String str) {
try {
return URLEncoder.encode(str, StandardCharsets.UTF_8.name()).replaceAll("%2F", "/");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException(e);
}
}
/**
* @return the RedisURL in a URI-like form.
*/
@Override
public String toString() {
return createUriString(true);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (!(o instanceof RedisURI))
return false;
RedisURI redisURI = (RedisURI) o;
if (port != redisURI.port)
return false;
if (database != redisURI.database)
return false;
if (host != null ? !host.equals(redisURI.host) : redisURI.host != null)
return false;
if (socket != null ? !socket.equals(redisURI.socket) : redisURI.socket != null)
return false;
if (sentinelMasterId != null ? !sentinelMasterId.equals(redisURI.sentinelMasterId) : redisURI.sentinelMasterId != null)
return false;
return !(sentinels != null ? !sentinels.equals(redisURI.sentinels) : redisURI.sentinels != null);
}
@Override
public int hashCode() {
int result = host != null ? host.hashCode() : 0;
result = 31 * result + (socket != null ? socket.hashCode() : 0);
result = 31 * result + (sentinelMasterId != null ? sentinelMasterId.hashCode() : 0);
result = 31 * result + port;
result = 31 * result + database;
result = 31 * result + (sentinels != null ? sentinels.hashCode() : 0);
return result;
}
private static void parseTimeout(Builder builder, String queryParam) {
int index = queryParam.indexOf('=');
if (index < 0) {
return;
}
String timeoutString = queryParam.substring(index + 1);
int numbersEnd = 0;
while (numbersEnd < timeoutString.length() && Character.isDigit(timeoutString.charAt(numbersEnd))) {
numbersEnd++;
}
if (numbersEnd == 0) {
if (timeoutString.startsWith("-")) {
builder.withTimeout(Duration.ZERO);
} else {
// no-op, leave defaults
}
} else {
String timeoutValueString = timeoutString.substring(0, numbersEnd);
long timeoutValue = Long.parseLong(timeoutValueString);
builder.withTimeout(Duration.ofMillis(timeoutValue));
String suffix = timeoutString.substring(numbersEnd);
LongFunction<Duration> converter = CONVERTER_MAP.get(suffix);
if (converter == null) {
converter = Duration::ofMillis;
}
builder.withTimeout(converter.apply(timeoutValue));
}
}
private static void parseDatabase(Builder builder, String queryParam) {
int index = queryParam.indexOf('=');
if (index < 0) {
return;
}
String databaseString = queryParam.substring(index + 1);
int numbersEnd = 0;
while (numbersEnd < databaseString.length() && Character.isDigit(databaseString.charAt(numbersEnd))) {
numbersEnd++;
}
if (numbersEnd != 0) {
String databaseValueString = databaseString.substring(0, numbersEnd);
int value = Integer.parseInt(databaseValueString);
builder.withDatabase(value);
}
}
private static void parseClientName(Builder builder, String queryParam) {
String clientName = getValuePart(queryParam);
if (isNotEmpty(clientName)) {
builder.withClientName(clientName);
}
}
private static void parseLibraryName(Builder builder, String queryParam) {
String libraryName = getValuePart(queryParam);
if (isNotEmpty(libraryName)) {
builder.withLibraryName(libraryName);
}
}
private static void parseLibraryVersion(Builder builder, String queryParam) {
String libraryVersion = getValuePart(queryParam);
if (isNotEmpty(libraryVersion)) {
builder.withLibraryVersion(libraryVersion);
}
}
private static void parseVerifyPeer(Builder builder, String queryParam) {
String verifyPeer = getValuePart(queryParam);
if (isNotEmpty(verifyPeer)) {
builder.withVerifyPeer(SslVerifyMode.valueOf(verifyPeer.toUpperCase()));
}
}
private static void parseSentinelMasterId(Builder builder, String queryParam) {
String masterIdString = getValuePart(queryParam);
if (isNotEmpty(masterIdString)) {
builder.withSentinelMasterId(masterIdString);
}
}
private static String getValuePart(String queryParam) {
int index = queryParam.indexOf('=');
if (index < 0) {
return null;
}
return queryParam.substring(index + 1);
}
private static Builder configureStandalone(URI uri) {
Builder builder = null;
Set<String> allowedSchemes = LettuceSets.unmodifiableSet(URI_SCHEME_REDIS, URI_SCHEME_REDIS_SECURE,
URI_SCHEME_REDIS_SOCKET, URI_SCHEME_REDIS_SOCKET_ALT, URI_SCHEME_REDIS_SECURE_ALT, URI_SCHEME_REDIS_TLS_ALT);
if (!allowedSchemes.contains(uri.getScheme())) {
throw new IllegalArgumentException("Scheme " + uri.getScheme() + " not supported");
}
if (URI_SCHEME_REDIS_SOCKET.equals(uri.getScheme()) || URI_SCHEME_REDIS_SOCKET_ALT.equals(uri.getScheme())) {
builder = Builder.socket(uri.getPath());
} else {
if (isNotEmpty(uri.getHost())) {
if (uri.getPort() > 0) {
builder = Builder.redis(uri.getHost(), uri.getPort());
} else {
builder = Builder.redis(uri.getHost());
}
} else {
if (isNotEmpty(uri.getAuthority())) {
String authority = uri.getAuthority();
if (authority.indexOf('@') > -1) {
authority = authority.substring(authority.lastIndexOf('@') + 1);
}
builder = Builder.redis(authority);
}
}
}
LettuceAssert.notNull(builder, "Invalid URI, cannot get host or socket part");
if (URI_SCHEME_REDIS_SECURE.equals(uri.getScheme()) || URI_SCHEME_REDIS_SECURE_ALT.equals(uri.getScheme())) {
builder.withSsl(true);
}
if (URI_SCHEME_REDIS_TLS_ALT.equals(uri.getScheme())) {
builder.withSsl(true);
builder.withStartTls(true);
}
return builder;
}
private static RedisURI.Builder configureSentinel(URI uri) {
String masterId = uri.getFragment();
RedisURI.Builder builder = null;
if (isNotEmpty(uri.getHost())) {
if (uri.getPort() != -1) {
builder = RedisURI.Builder.sentinel(uri.getHost(), uri.getPort());
} else {
builder = RedisURI.Builder.sentinel(uri.getHost());
}
}
if (builder == null && isNotEmpty(uri.getAuthority())) {
String authority = uri.getAuthority();
if (authority.indexOf('@') > -1) {
authority = authority.substring(authority.lastIndexOf('@') + 1);
}
String[] hosts = authority.split(",");
for (String host : hosts) {
HostAndPort hostAndPort = HostAndPort.parse(host);
if (builder == null) {
if (hostAndPort.hasPort()) {
builder = RedisURI.Builder.sentinel(hostAndPort.getHostText(), hostAndPort.getPort());
} else {
builder = RedisURI.Builder.sentinel(hostAndPort.getHostText());
}
} else {
if (hostAndPort.hasPort()) {
builder.withSentinel(hostAndPort.getHostText(), hostAndPort.getPort());
} else {
builder.withSentinel(hostAndPort.getHostText());
}
}
}
}
LettuceAssert.notNull(builder, "Invalid URI, cannot get host part");
if (isNotEmpty(masterId)) {
builder.withSentinelMasterId(masterId);
}
if (uri.getScheme().equals(URI_SCHEME_REDIS_SENTINEL_SECURE)) {
builder.withSsl(true);
}
return builder;
}
private static boolean isSentinel(String scheme) {
return URI_SCHEME_REDIS_SENTINEL.equals(scheme) || URI_SCHEME_REDIS_SENTINEL_SECURE.equals(scheme);
}
/**
* Builder for Redis URI.
*/
public static | RedisURI |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/context/ResourceLoaderAware.java | {
"start": 3055,
"end": 3912
} | interface ____ extends Aware {
/**
* Set the ResourceLoader that this object runs in.
* <p>This might be a ResourcePatternResolver, which can be checked
* through {@code instanceof ResourcePatternResolver}. See also the
* {@code ResourcePatternUtils.getResourcePatternResolver} method.
* <p>Invoked after population of normal bean properties but before an init callback
* like InitializingBean's {@code afterPropertiesSet} or a custom init-method.
* Invoked before ApplicationContextAware's {@code setApplicationContext}.
* @param resourceLoader the ResourceLoader object to be used by this object
* @see org.springframework.core.io.support.ResourcePatternResolver
* @see org.springframework.core.io.support.ResourcePatternUtils#getResourcePatternResolver
*/
void setResourceLoader(ResourceLoader resourceLoader);
}
| ResourceLoaderAware |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/runtime/src/main/java/io/quarkus/resteasy/reactive/server/EndpointDisabled.java | {
"start": 618,
"end": 719
} | class ____ to be disabled
*/
String stringValue();
/**
* Determines if the Resource | is |
java | google__dagger | javatests/dagger/internal/codegen/KeywordValidatorTest.java | {
"start": 2568,
"end": 3655
} | interface ____ {",
" fun int(): Integer", // Keyword parameter name "int"
"}");
CompilerTests.daggerCompiler(componentSrc)
.compile(
subject -> {
switch (CompilerTests.backend(subject)) {
case KSP:
subject
.hasErrorContaining("The name 'int' cannot be used because")
.onSource(componentSrc)
.onLineContaining("fun int(): Integer");
break;
case JAVAC:
subject
.hasErrorCount(1)
.hasErrorContaining("is not abstract and does not override abstract method");
break;
}
});
}
@Test
public void javaKeywordAsParameterName_doesNotFail() {
Source componentSrc =
CompilerTests.kotlinSource(
"test/TestComponent.kt",
"package test",
"",
"import dagger.Component",
"",
"@Component",
" | TestComponent |
java | apache__camel | components/camel-http/src/test/java/org/apache/camel/component/http/HeaderFilteringTest.java | {
"start": 1965,
"end": 4467
} | class ____ {
private static final String BODY = "{\"example\":\"json\"}";
private int port;
private HttpServer server;
@Test
public void shouldFilterIncomingHttpHeadersInProducer() throws Exception {
final DefaultCamelContext context = new DefaultCamelContext();
context.start();
final HttpComponent http = context.getComponent("http", HttpComponent.class);
final Producer producer = http.createProducer(context, "http://localhost:" + port, POST.name(), "/test", null, null,
APPLICATION_JSON.getMimeType(), APPLICATION_JSON.getMimeType(), new RestConfiguration(),
Collections.emptyMap());
final DefaultExchange exchange = new DefaultExchange(context);
final DefaultMessage in = new DefaultMessage(context);
in.setHeader(HOST, "www.not-localhost.io");
in.setBody(BODY);
exchange.setIn(in);
producer.start();
try {
producer.process(exchange);
} catch (final HttpOperationFailedException e) {
fail(e.getMessage() + "\n%s", e.getResponseBody());
}
producer.stop();
context.stop();
}
@BeforeEach
public void startHttpServer() throws IOException {
server = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
server.createContext("/test", this::handleTest);
server.start();
port = server.getAddress().getPort();
}
@AfterEach
public void stopHttpServer() {
server.stop(0);
}
private void handleTest(final HttpExchange exchange) throws IOException {
try (final OutputStream responseBody = exchange.getResponseBody()) {
try {
assertThat(exchange.getRequestBody())
.hasSameContentAs(new ByteArrayInputStream(BODY.getBytes(StandardCharsets.UTF_8)));
assertThat(exchange.getRequestHeaders()).containsEntry(HOST,
Collections.singletonList("localhost:" + port));
exchange.sendResponseHeaders(200, 0);
} catch (final AssertionError error) {
final String failure = ExceptionHelper.stackTraceToString(error);
final byte[] failureBytes = failure.getBytes(StandardCharsets.UTF_8);
exchange.sendResponseHeaders(500, failureBytes.length);
responseBody.write(failureBytes);
}
}
}
}
| HeaderFilteringTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestPriorityRouterPolicy.java | {
"start": 1942,
"end": 4751
} | class ____ extends BaseRouterPoliciesTest {
@BeforeEach
public void setUp() throws Exception {
setPolicy(new PriorityRouterPolicy());
setPolicyInfo(new WeightedPolicyInfo());
Map<SubClusterIdInfo, Float> routerWeights = new HashMap<>();
Map<SubClusterIdInfo, Float> amrmWeights = new HashMap<>();
// simulate 20 subclusters with a 5% chance of being inactive
for (int i = 0; i < 20; i++) {
SubClusterIdInfo sc = new SubClusterIdInfo("sc" + i);
// with 5% omit a subcluster
if (getRand().nextFloat() < 0.95f || i == 5) {
long now = Time.now();
SubClusterInfo federationSubClusterInfo = SubClusterInfo.newInstance(
sc.toId(), "dns1:80", "dns1:81", "dns1:82", "dns1:83",
now - 1000, SubClusterState.SC_RUNNING, now - 2000, generateClusterMetricsInfo(i));
getActiveSubclusters().put(sc.toId(), federationSubClusterInfo);
}
float weight = getRand().nextFloat();
if (i == 5) {
weight = 1.1f; // guaranteed to be the largest.
}
// 5% chance we omit one of the weights
if (i <= 5 || getRand().nextFloat() > 0.05f) {
routerWeights.put(sc, weight);
amrmWeights.put(sc, weight);
}
}
getPolicyInfo().setRouterPolicyWeights(routerWeights);
getPolicyInfo().setAMRMPolicyWeights(amrmWeights);
setupContext();
}
@Test
public void testPickLowestWeight() throws YarnException {
SubClusterId chosen = ((FederationRouterPolicy) getPolicy())
.getHomeSubcluster(getApplicationSubmissionContext(), null);
assertEquals("sc5", chosen.getId());
}
@Test
public void testZeroSubClustersWithPositiveWeight() throws Exception {
Map<SubClusterIdInfo, Float> routerWeights = new HashMap<>();
Map<SubClusterIdInfo, Float> amrmWeights = new HashMap<>();
// Set negative value to all subclusters
for (int i = 0; i < 5; i++) {
SubClusterIdInfo sc = new SubClusterIdInfo("sc" + i);
SubClusterInfo sci = SubClusterInfo.newInstance(
sc.toId(), "dns1:80", "dns1:81", "dns1:82", "dns1:83", SubClusterState.SC_RUNNING,
System.currentTimeMillis(), "something");
getActiveSubclusters().put(sc.toId(), sci);
routerWeights.put(sc, 0.0f);
amrmWeights.put(sc, -1.0f);
}
getPolicyInfo().setRouterPolicyWeights(routerWeights);
getPolicyInfo().setAMRMPolicyWeights(amrmWeights);
FederationPoliciesTestUtil.initializePolicyContext(getPolicy(),
getPolicyInfo(), getActiveSubclusters());
intercept(FederationPolicyException.class,
"No Active Subcluster with weight vector greater than zero.",
() -> ((FederationRouterPolicy) getPolicy())
.getHomeSubcluster(getApplicationSubmissionContext(), null));
}
}
| TestPriorityRouterPolicy |
java | google__guava | android/guava/src/com/google/common/util/concurrent/TimeoutFuture.java | {
"start": 3974,
"end": 5043
} | class ____<V extends @Nullable Object> implements Runnable {
@LazyInit @Nullable TimeoutFuture<V> timeoutFutureRef;
Fire(TimeoutFuture<V> timeoutFuture) {
this.timeoutFutureRef = timeoutFuture;
}
@Override
// TODO: b/227335009 - Maybe change interruption behavior, but it requires thought.
@SuppressWarnings("Interruption")
public void run() {
// If either of these reads return null then we must be after a successful cancel or another
// call to this method.
TimeoutFuture<V> timeoutFuture = timeoutFutureRef;
if (timeoutFuture == null) {
return;
}
@RetainedLocalRef ListenableFuture<V> delegate = timeoutFuture.delegateRef;
if (delegate == null) {
return;
}
/*
* If we're about to complete the TimeoutFuture, we want to release our reference to it.
* Otherwise, we'll pin it (and its result) in memory until the timeout task is GCed. (The
* need to clear our reference to the TimeoutFuture is the reason we use a *static* nested
* | Fire |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ImmutableCheckerTest.java | {
"start": 63768,
"end": 64491
} | class ____ {
public final <A> void f1(A transform) {}
public <@ImmutableTypeParameter B, C> C f2(Function<B, C> fn) {
return null;
}
public final <D, E> void f3(Function<D, E> fn) {
// BUG: Diagnostic contains: instantiation of 'B' is mutable
// 'D' is a mutable type variable
f1(f2(fn));
}
}
""")
.doTest();
}
@Test
public void containerOf_extendsThreadSafe() {
compilationHelper
.addSourceLines(
"X.java",
"""
import com.google.errorprone.annotations.Immutable;
@Immutable
| Test |
java | jhy__jsoup | src/test/java/org/jsoup/integration/Benchmark.java | {
"start": 186,
"end": 1067
} | class ____ {
public static void run(Runnable a, Runnable b, int count) {
long aMillis;
long bMillis;
print("Running test A (x%d)", count);
aMillis = time(a, count);
print("Running test B");
bMillis = time(b, count);
print("\nResults:");
print("A: %.2fs", aMillis / 1000f);
print("B: %.2fs", bMillis / 1000f);
print("\nB ran in %.2f %% time of A\n", (bMillis *1f / aMillis * 1f) * 100f);
}
private static long time(Runnable test, int count) {
Date start = new Date();
for (int i = 0; i < count; i++) {
test.run();
}
Date end = new Date();
return end.getTime() - start.getTime();
}
private static void print(String msgFormat, Object... msgParams) {
System.out.println(String.format(msgFormat, msgParams));
}
}
| Benchmark |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/multipleiterates/User.java | {
"start": 712,
"end": 1357
} | class ____ {
private Integer id;
private String name;
private String[] firstAttr;
private String[] secondAttr;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String[] getFirstAttr() {
return firstAttr;
}
public void setFirstAttr(String[] firstAttr) {
this.firstAttr = firstAttr;
}
public String[] getSecondAttr() {
return secondAttr;
}
public void setSecondAttr(String[] secondAttr) {
this.secondAttr = secondAttr;
}
}
| User |
java | apache__flink | flink-dstl/flink-dstl-dfs/src/test/java/org/apache/flink/changelog/fs/ChangelogStreamHandleReaderWithCacheTest.java | {
"start": 1832,
"end": 4671
} | class ____ {
@TempDir java.nio.file.Path tempFolder;
@Test
void testCloseStreamTwice() throws Exception {
String tempFolderPath = tempFolder.toUri().getPath();
registerFileSystem(
new LocalFileSystem() {
@Override
public boolean isDistributedFS() {
return true;
}
},
tempFolder.toUri().getScheme());
byte[] data = {0x00}; // not compressed, empty data
Path handlePath = new Path(tempFolderPath, UUID.randomUUID().toString());
FileStateHandle stateHandle = prepareFileStateHandle(handlePath, data);
Configuration configuration = new Configuration();
configuration.set(CACHE_IDLE_TIMEOUT, Duration.ofDays(365)); // cache forever
configuration.set(CoreOptions.TMP_DIRS, tempFolderPath);
try (ChangelogStreamHandleReaderWithCache reader =
new ChangelogStreamHandleReaderWithCache(configuration)) {
DataInputStream inputStream = reader.openAndSeek(stateHandle, 0L);
inputStream.close();
inputStream.close(); // close twice
reader.openAndSeek(stateHandle, 0L); // should not throw FileNotFoundException
}
}
private FileStateHandle prepareFileStateHandle(Path path, byte[] data) throws IOException {
try (InputStream inputStream = new ByteArrayInputStream(data);
OutputStream outputStream =
path.getFileSystem().create(path, FileSystem.WriteMode.OVERWRITE)) {
IOUtils.copyBytes(inputStream, outputStream);
}
return new FileStateHandle(path, data.length);
}
private static void registerFileSystem(FileSystem fs, String scheme) {
FileSystem.initialize(
new Configuration(),
new TestingPluginManager(
singletonMap(
FileSystemFactory.class,
Collections.singleton(
new FileSystemFactory() {
@Override
public FileSystem create(URI fsUri) {
return fs;
}
@Override
public String getScheme() {
return scheme;
}
})
.iterator())));
}
}
| ChangelogStreamHandleReaderWithCacheTest |
java | apache__camel | components/camel-nitrite/src/main/java/org/apache/camel/component/nitrite/NitriteConsumer.java | {
"start": 1171,
"end": 1997
} | class ____ extends DefaultConsumer {
private final NitriteEndpoint endpoint;
private NitriteChangeListener changeListener;
public NitriteConsumer(NitriteEndpoint endpoint, Processor processor) {
super(endpoint, processor);
this.endpoint = endpoint;
}
@Override
protected void doInit() throws Exception {
super.doInit();
changeListener = new NitriteChangeListener();
}
@Override
protected void doStart() throws Exception {
super.doStart();
endpoint.getNitriteCollection().register(changeListener);
}
@Override
protected void doStop() throws Exception {
if (changeListener != null) {
endpoint.getNitriteCollection().deregister(changeListener);
}
super.doStop();
}
private | NitriteConsumer |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregationBuilder.java | {
"start": 1771,
"end": 6235
} | class ____ extends ValuesSourceAggregationBuilder<CountedTermsAggregationBuilder> {
public static final String NAME = "counted_terms";
static final ValuesSourceRegistry.RegistryKey<CountedTermsAggregatorSupplier> REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>(
NAME,
CountedTermsAggregatorSupplier.class
);
public static final ParseField REQUIRED_SIZE_FIELD_NAME = new ParseField("size");
public static final ObjectParser<CountedTermsAggregationBuilder, String> PARSER = ObjectParser.fromBuilder(
NAME,
CountedTermsAggregationBuilder::new
);
static {
ValuesSourceAggregationBuilder.declareFields(PARSER, true, true, false);
PARSER.declareInt(CountedTermsAggregationBuilder::size, REQUIRED_SIZE_FIELD_NAME);
}
// see TermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS
private TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds(1, 0, 10, -1);
public CountedTermsAggregationBuilder(String name) {
super(name);
}
public CountedTermsAggregationBuilder(
ValuesSourceAggregationBuilder<CountedTermsAggregationBuilder> clone,
AggregatorFactories.Builder factoriesBuilder,
Map<String, Object> metadata
) {
super(clone, factoriesBuilder, metadata);
}
public CountedTermsAggregationBuilder(StreamInput in) throws IOException {
super(in);
bucketCountThresholds = new TermsAggregator.BucketCountThresholds(in);
}
public static void registerAggregators(ValuesSourceRegistry.Builder builder) {
CountedTermsAggregatorFactory.registerAggregators(builder);
}
@Override
public boolean supportsSampling() {
return true;
}
public CountedTermsAggregationBuilder size(int size) {
if (size <= 0) {
throw new IllegalArgumentException("[size] must be greater than 0. Found [" + size + "] in [" + name + "]");
}
bucketCountThresholds.setRequiredSize(size);
return this;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersions.V_8_12_0;
}
@Override
protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map<String, Object> metadata) {
return new CountedTermsAggregationBuilder(this, factoriesBuilder, metadata);
}
@Override
public BucketCardinality bucketCardinality() {
return BucketCardinality.MANY;
}
@Override
public String getType() {
return NAME;
}
@Override
protected void innerWriteTo(StreamOutput out) throws IOException {
bucketCountThresholds.writeTo(out);
}
@Override
protected ValuesSourceType defaultValueSourceType() {
return CoreValuesSourceType.KEYWORD;
}
@Override
protected ValuesSourceAggregatorFactory innerBuild(
AggregationContext context,
ValuesSourceConfig config,
AggregatorFactory parent,
AggregatorFactories.Builder subFactoriesBuilder
) throws IOException {
CountedTermsAggregatorSupplier aggregatorSupplier = context.getValuesSourceRegistry().getAggregator(REGISTRY_KEY, config);
return new CountedTermsAggregatorFactory(
name,
config,
bucketCountThresholds,
context,
parent,
subFactoriesBuilder,
metadata,
aggregatorSupplier
);
}
@Override
protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
// expose only size in XContent as only size can be set externally
builder.field(REQUIRED_SIZE_FIELD_NAME.getPreferredName(), bucketCountThresholds.getRequiredSize());
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (super.equals(o) == false) {
return false;
}
CountedTermsAggregationBuilder that = (CountedTermsAggregationBuilder) o;
return Objects.equals(bucketCountThresholds, that.bucketCountThresholds);
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), bucketCountThresholds);
}
}
| CountedTermsAggregationBuilder |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/atomic/integer/AtomicIntegerAssert_overridingErrorMessage_Test.java | {
"start": 958,
"end": 2691
} | class ____ {
@Test
void should_honor_custom_error_message_set_with_withFailMessage() {
// GIVEN
String error = "ssss";
// WHEN
ThrowingCallable code = () -> assertThat(new AtomicInteger(0)).withFailMessage(error)
.hasValueLessThan(-1);
// THEN
assertThatAssertionErrorIsThrownBy(code).withMessageContaining(error);
}
@Test
void should_honor_custom_error_message_set_with_overridingErrorMessage() {
// GIVEN
String error = "ssss";
// WHEN
ThrowingCallable code = () -> assertThat(new AtomicInteger(0)).overridingErrorMessage(error)
.hasValueLessThan(-1);
// THEN
assertThatAssertionErrorIsThrownBy(code).withMessageContaining(error);
}
@Test
void should_honor_custom_error_message_set_with_withFailMessage_using_supplier() {
// GIVEN
String error = "ssss";
// WHEN
ThrowingCallable code = () -> assertThat(new AtomicInteger(0)).withFailMessage(() -> error)
.hasValueLessThan(-1);
// THEN
assertThatAssertionErrorIsThrownBy(code).withMessageContaining(error);
}
@Test
void should_honor_custom_error_message_set_with_overridingErrorMessage_using_supplier() {
// GIVEN
String error = "ssss";
// WHEN
ThrowingCallable code = () -> assertThat(new AtomicInteger(0)).overridingErrorMessage(() -> error)
.hasValueLessThan(-1);
// THEN
assertThatAssertionErrorIsThrownBy(code).withMessageContaining(error);
}
}
| AtomicIntegerAssert_overridingErrorMessage_Test |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/shareddata/Shareable.java | {
"start": 780,
"end": 1051
} | interface ____ then you
* will be able to add it to {@link io.vertx.core.shareddata.LocalMap} instances.
* <p>
* Mutable object that you want to store in a {@link io.vertx.core.shareddata.LocalMap}
* should override {@link Shareable#copy()} method.
* <p>
* Use this | and |
java | apache__logging-log4j2 | log4j-layout-template-json/src/main/java/org/apache/logging/log4j/layout/template/json/util/InstantFormatter.java | {
"start": 6107,
"end": 6419
} | class ____ implements FormatterFactory {
@Override
public Formatter createIfSupported(final String pattern, final Locale locale, final TimeZone timeZone) {
return new JavaDateTimeFormatter(pattern, locale, timeZone);
}
}
private static final | JavaDateTimeFormatterFactory |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/system/JavaVersion.java | {
"start": 1249,
"end": 3501
} | enum ____ {
/**
* Java 17.
* @since 2.5.3
*/
SEVENTEEN("17", Console.class, "charset"),
/**
* Java 18.
* @since 2.5.11
*/
EIGHTEEN("18", Duration.class, "isPositive"),
/**
* Java 19.
* @since 2.6.12
*/
NINETEEN("19", Future.class, "state"),
/**
* Java 20.
* @since 2.7.13
*/
TWENTY("20", Class.class, "accessFlags"),
/**
* Java 21.
* @since 2.7.16
*/
TWENTY_ONE("21", SortedSet.class, "getFirst"),
/**
* Java 22.
* @since 3.2.4
*/
TWENTY_TWO("22", Console.class, "isTerminal"),
/**
* Java 23.
* @since 3.2.9
*/
TWENTY_THREE("23", NumberFormat.class, "isStrict"),
/**
* Java 24.
* @since 3.4.3
*/
TWENTY_FOUR("24", Reader.class, "of", CharSequence.class),
/**
* Java 25.
* @since 3.5.7
*/
TWENTY_FIVE("25", Reader.class, "readAllLines");
private final String name;
private final boolean available;
private final Class<?> versionSpecificClass;
JavaVersion(String name, Class<?> versionSpecificClass, String versionSpecificMethod, Class<?>... paramTypes) {
this.name = name;
this.versionSpecificClass = versionSpecificClass;
this.available = ClassUtils.hasMethod(versionSpecificClass, versionSpecificMethod, paramTypes);
}
@Override
public String toString() {
return this.name;
}
/**
* Returns the {@link JavaVersion} of the current runtime.
* @return the {@link JavaVersion}
*/
public static JavaVersion getJavaVersion() {
List<JavaVersion> candidates = Arrays.asList(JavaVersion.values());
Collections.reverse(candidates);
for (JavaVersion candidate : candidates) {
if (candidate.available) {
return candidate;
}
}
return SEVENTEEN;
}
/**
* Return if this version is equal to or newer than a given version.
* @param version the version to compare
* @return {@code true} if this version is equal to or newer than {@code version}
*/
public boolean isEqualOrNewerThan(JavaVersion version) {
return compareTo(version) >= 0;
}
/**
* Return if this version is older than a given version.
* @param version the version to compare
* @return {@code true} if this version is older than {@code version}
*/
public boolean isOlderThan(JavaVersion version) {
return compareTo(version) < 0;
}
static | JavaVersion |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/codec/tsdb/TSDBSyntheticIdFieldsProducer.java | {
"start": 17243,
"end": 17483
} | class ____ extends BaseTermsEnum {
/**
* Holds all doc values that composed the synthetic _id
*/
private final DocValuesHolder docValues;
/**
* Current synthetic term the | SyntheticIdTermsEnum |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/CustomNonBlockingReturnTypeTest.java | {
"start": 4471,
"end": 4578
} | interface ____ {
String getMessage();
}
@Provider
@Dependent
public static | HasMessage |
java | quarkusio__quarkus | extensions/websockets-next/deployment/src/main/java/io/quarkus/websockets/next/deployment/devui/WebSocketServerDevUIProcessor.java | {
"start": 1091,
"end": 5956
} | class ____ {
@BuildStep(onlyIf = IsLocalDevelopment.class)
public void pages(List<WebSocketEndpointBuildItem> endpoints, List<GeneratedEndpointBuildItem> generatedEndpoints,
BuildProducer<CardPageBuildItem> cardPages) {
CardPageBuildItem pageBuildItem = new CardPageBuildItem();
pageBuildItem.addBuildTimeData("endpoints", createEndpointsJson(endpoints, generatedEndpoints));
pageBuildItem.addPage(Page.webComponentPageBuilder()
.title("Server Endpoints")
.icon("font-awesome-solid:plug")
.componentLink("qwc-wsn-endpoints.js")
.staticLabel(String.valueOf(endpoints.stream().filter(WebSocketEndpointBuildItem::isServer).count())));
cardPages.produce(pageBuildItem);
}
@BuildStep(onlyIf = IsLocalDevelopment.class)
JsonRPCProvidersBuildItem rpcProvider() {
return new JsonRPCProvidersBuildItem(WebSocketNextJsonRPCService.class);
}
private List<Map<String, Object>> createEndpointsJson(List<WebSocketEndpointBuildItem> endpoints,
List<GeneratedEndpointBuildItem> generatedEndpoints) {
List<Map<String, Object>> json = new ArrayList<>();
for (WebSocketEndpointBuildItem endpoint : endpoints.stream().filter(WebSocketEndpointBuildItem::isServer)
.sorted(Comparator.comparing(e -> e.path))
.collect(Collectors.toList())) {
Map<String, Object> endpointJson = new HashMap<>();
String clazz = endpoint.bean.getImplClazz().name().toString();
endpointJson.put("clazz", clazz);
endpointJson.put("generatedClazz",
generatedEndpoints.stream().filter(ge -> ge.endpointClassName.equals(clazz)).findFirst()
.orElseThrow().generatedClassName);
endpointJson.put("path", WebSocketProcessor.getOriginalPath(endpoint.path));
endpointJson.put("executionMode", endpoint.inboundProcessingMode.toString());
List<Map<String, Object>> callbacks = new ArrayList<>();
addCallback(endpoint.onOpen, callbacks);
addCallback(endpoint.onBinaryMessage, callbacks);
addCallback(endpoint.onTextMessage, callbacks);
addCallback(endpoint.onPingMessage, callbacks);
addCallback(endpoint.onPongMessage, callbacks);
addCallback(endpoint.onClose, callbacks);
for (Callback c : endpoint.onErrors) {
addCallback(c, callbacks);
}
endpointJson.put("callbacks", callbacks);
json.add(endpointJson);
}
return json;
}
private void addCallback(Callback callback, List<Map<String, Object>> callbacks) {
if (callback != null) {
callbacks.add(Map.of("annotation", callback.annotation.toString(), "method", methodToString(callback.method)));
}
}
private String methodToString(MethodInfo method) {
StringBuilder builder = new StringBuilder();
builder.append(typeToString(method.returnType())).append(' ').append(method.name()).append('(');
for (Iterator<MethodParameterInfo> it = method.parameters().iterator(); it.hasNext();) {
MethodParameterInfo p = it.next();
builder.append(typeToString(p.type()));
builder.append(' ');
builder.append(p.name() != null ? p.name() : "arg" + p.position());
if (it.hasNext()) {
builder.append(", ");
}
}
builder.append(')');
if (!method.exceptions().isEmpty()) {
builder.append(" throws ");
for (Iterator<Type> it = method.exceptions().iterator(); it.hasNext();) {
builder.append(typeToString(it.next()));
if (it.hasNext()) {
builder.append(", ");
}
}
}
return builder.toString();
}
private String typeToString(Type type) {
if (type.kind() == Kind.PARAMETERIZED_TYPE) {
ParameterizedType parameterizedType = type.asParameterizedType();
StringBuilder builder = new StringBuilder();
builder.append(parameterizedType.name().withoutPackagePrefix());
if (!parameterizedType.arguments().isEmpty()) {
builder.append('<');
for (Iterator<Type> it = parameterizedType.arguments().iterator(); it.hasNext();) {
builder.append(typeToString(it.next()));
if (it.hasNext()) {
builder.append(", ");
}
}
builder.append('>');
}
return builder.toString();
} else {
return type.name().withoutPackagePrefix();
}
}
}
| WebSocketServerDevUIProcessor |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySIDUtil.java | {
"start": 1154,
"end": 3793
} | class ____ {
public static final String TOKEN_GROUPS = "tokenGroups";
public static String convertToString(byte[] bytes) {
/*
* The binary data structure, from http://msdn.microsoft.com/en-us/library/cc230371(PROT.10).aspx:
* byte[0] - Revision (1 byte): An 8-bit unsigned integer that specifies the revision level of
* the SID structure. This value MUST be set to 0x01.
* byte[1] - SubAuthorityCount (1 byte): An 8-bit unsigned integer that specifies the number of
* elements in the SubAuthority array. The maximum number of elements allowed is 15.
* byte[2-7] - IdentifierAuthority (6 bytes): A SID_IDENTIFIER_AUTHORITY structure that contains
* information, which indicates the authority under which the SID was created. It describes the
* entity that created the SID and manages the account.
* Six element arrays of 8-bit unsigned integers that specify the top-level authority
* big-endian!
* and then - SubAuthority (variable): A variable length array of unsigned 32-bit integers that
* uniquely identifies a principal relative to the IdentifierAuthority. Its length is determined
* by SubAuthorityCount. little-endian!
*/
if ((bytes == null) || (bytes.length < 8)) {
throw new IllegalArgumentException("Invalid SID");
}
char[] hex = Hex.encodeHex(bytes);
StringBuilder sb = new StringBuilder();
// start with 'S'
sb.append('S');
// revision
int revision = Integer.parseInt(new String(hex, 0, 2), 16);
sb.append('-');
sb.append(revision);
// get count
int count = Integer.parseInt(new String(hex, 2, 2), 16);
// check length
if (bytes.length != (8 + count * 4)) {
throw new IllegalArgumentException("Invalid SID");
}
// get authority, big-endian
long authority = Long.parseLong(new String(hex, 4, 12), 16);
sb.append('-');
sb.append(authority);
// sub-authorities, little-endian
for (int i = 0; i < count; i++) {
StringBuilder rid = new StringBuilder();
for (int k = 3; k >= 0; k--) {
rid.append(hex[16 + (i * 8) + (k * 2)]);
rid.append(hex[16 + (i * 8) + (k * 2) + 1]);
}
long subAuthority = Long.parseLong(rid.toString(), 16);
sb.append('-');
sb.append(subAuthority);
}
return sb.toString();
}
}
| ActiveDirectorySIDUtil |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/XChangeEndpointBuilderFactory.java | {
"start": 1579,
"end": 5399
} | interface ____
extends
EndpointProducerBuilder {
default AdvancedXChangeEndpointBuilder advanced() {
return (AdvancedXChangeEndpointBuilder) this;
}
/**
* The currency.
*
* The option is a: <code>org.knowm.xchange.currency.Currency</code>
* type.
*
* Group: producer
*
* @param currency the value to set
* @return the dsl builder
*/
default XChangeEndpointBuilder currency(org.knowm.xchange.currency.Currency currency) {
doSetProperty("currency", currency);
return this;
}
/**
* The currency.
*
* The option will be converted to a
* <code>org.knowm.xchange.currency.Currency</code> type.
*
* Group: producer
*
* @param currency the value to set
* @return the dsl builder
*/
default XChangeEndpointBuilder currency(String currency) {
doSetProperty("currency", currency);
return this;
}
/**
* The currency pair.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param currencyPair the value to set
* @return the dsl builder
*/
default XChangeEndpointBuilder currencyPair(String currencyPair) {
doSetProperty("currencyPair", currencyPair);
return this;
}
/**
* The method to execute.
*
* The option is a:
* <code>org.apache.camel.component.xchange.XChangeConfiguration.XChangeMethod</code> type.
*
* Required: true
* Group: producer
*
* @param method the value to set
* @return the dsl builder
*/
default XChangeEndpointBuilder method(org.apache.camel.component.xchange.XChangeConfiguration.XChangeMethod method) {
doSetProperty("method", method);
return this;
}
/**
* The method to execute.
*
* The option will be converted to a
* <code>org.apache.camel.component.xchange.XChangeConfiguration.XChangeMethod</code> type.
*
* Required: true
* Group: producer
*
* @param method the value to set
* @return the dsl builder
*/
default XChangeEndpointBuilder method(String method) {
doSetProperty("method", method);
return this;
}
/**
* The service to call.
*
* The option is a:
* <code>org.apache.camel.component.xchange.XChangeConfiguration.XChangeService</code> type.
*
* Required: true
* Group: producer
*
* @param service the value to set
* @return the dsl builder
*/
default XChangeEndpointBuilder service(org.apache.camel.component.xchange.XChangeConfiguration.XChangeService service) {
doSetProperty("service", service);
return this;
}
/**
* The service to call.
*
* The option will be converted to a
* <code>org.apache.camel.component.xchange.XChangeConfiguration.XChangeService</code> type.
*
* Required: true
* Group: producer
*
* @param service the value to set
* @return the dsl builder
*/
default XChangeEndpointBuilder service(String service) {
doSetProperty("service", service);
return this;
}
}
/**
* Advanced builder for endpoint for the XChange component.
*/
public | XChangeEndpointBuilder |
java | square__retrofit | samples/src/main/java/com/example/retrofit/DeserializeErrorBody.java | {
"start": 1125,
"end": 1176
} | class ____ {
// normal fields...
}
static | User |
java | apache__flink | flink-datastream/src/main/java/org/apache/flink/datastream/impl/stream/AbstractDataStream.java | {
"start": 1316,
"end": 1456
} | class ____ all streams.
*
* <p>Note: This is only used for internal implementation. It must not leak to user face api.
*/
public abstract | for |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/types/ClassLogicalTypeConverter.java | {
"start": 1092,
"end": 1307
} | class ____ {
@Deprecated
public static Class getDefaultExternalClassForType(LogicalType type) {
return TypeConversions.fromLogicalToDataType(type).getConversionClass();
}
}
| ClassLogicalTypeConverter |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/selection/methodgenerics/wildcards/SourceWildCardExtendsMapper.java | {
"start": 579,
"end": 998
} | class ____ {
private final Wrapper<TypeC> propB;
private final Wrapper<TypeC> propC;
public Source(Wrapper<TypeC> propB, Wrapper<TypeC> propC) {
this.propB = propB;
this.propC = propC;
}
public Wrapper<TypeC> getPropB() {
return propB;
}
public Wrapper<TypeC> getPropC() {
return propC;
}
}
| Source |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.