language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/TestBoundedTwoInputOperator.java | {
"start": 1215,
"end": 2511
} | class ____ extends AbstractStreamOperator<String>
implements TwoInputStreamOperator<String, String, String>, BoundedMultiInput {
private static final long serialVersionUID = 1L;
private final String name;
public TestBoundedTwoInputOperator(String name) {
this.name = name;
}
@Override
public void processElement1(StreamRecord<String> element) {
output.collect(element.replace("[" + name + "-1]: " + element.getValue()));
}
@Override
public void processElement2(StreamRecord<String> element) {
output.collect(element.replace("[" + name + "-2]: " + element.getValue()));
}
@Override
public void endInput(int inputId) {
output("[" + name + "-" + inputId + "]: End of input");
}
@Override
public void finish() throws Exception {
ProcessingTimeService timeService = getProcessingTimeService();
timeService.registerTimer(
timeService.getCurrentProcessingTime(),
t -> output("[" + name + "]: Timer registered in close"));
output.collect(new StreamRecord<>("[" + name + "]: Finish"));
super.finish();
}
private void output(String record) {
output.collect(new StreamRecord<>(record));
}
}
| TestBoundedTwoInputOperator |
java | apache__rocketmq | tools/src/main/java/org/apache/rocketmq/tools/command/broker/GetBrokerConfigCommand.java | {
"start": 1779,
"end": 5414
} | class ____ implements SubCommand {
@Override
public String commandName() {
return "getBrokerConfig";
}
@Override
public String commandDesc() {
return "Get broker config by cluster or special broker.";
}
@Override
public Options buildCommandlineOptions(final Options options) {
OptionGroup group = new OptionGroup();
group.addOption(new Option("b", "brokerAddr", true, "get which broker"));
group.addOption(new Option("c", "clusterName", true, "get which cluster"));
group.setRequired(true);
options.addOptionGroup(group);
return options;
}
@Override
public void execute(final CommandLine commandLine, final Options options,
final RPCHook rpcHook) throws SubCommandException {
DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook);
defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis()));
try {
if (commandLine.hasOption('b')) {
String brokerAddr = commandLine.getOptionValue('b').trim();
defaultMQAdminExt.start();
getAndPrint(defaultMQAdminExt,
String.format("============%s============\n", brokerAddr),
brokerAddr);
} else if (commandLine.hasOption('c')) {
String clusterName = commandLine.getOptionValue('c').trim();
defaultMQAdminExt.start();
Map<String, List<String>> masterAndSlaveMap
= CommandUtil.fetchMasterAndSlaveDistinguish(defaultMQAdminExt, clusterName);
for (String masterAddr : masterAndSlaveMap.keySet()) {
if (masterAddr == null) {
continue;
}
getAndPrint(
defaultMQAdminExt,
String.format("============Master: %s============\n", masterAddr),
masterAddr
);
for (String slaveAddr : masterAndSlaveMap.get(masterAddr)) {
if (slaveAddr == null) {
continue;
}
getAndPrint(
defaultMQAdminExt,
String.format("============My Master: %s=====Slave: %s============\n", masterAddr, slaveAddr),
slaveAddr
);
}
}
}
} catch (Exception e) {
throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e);
} finally {
defaultMQAdminExt.shutdown();
}
}
protected void getAndPrint(final MQAdminExt defaultMQAdminExt, final String printPrefix, final String addr)
throws InterruptedException, RemotingConnectException,
UnsupportedEncodingException, RemotingTimeoutException,
MQBrokerException, RemotingSendRequestException {
System.out.print(printPrefix);
if (addr.equals(CommandUtil.NO_MASTER_PLACEHOLDER)) {
return;
}
Properties properties = defaultMQAdminExt.getBrokerConfig(addr);
if (properties == null) {
System.out.printf("Broker[%s] has no config property!\n", addr);
return;
}
for (Entry<Object, Object> entry : properties.entrySet()) {
System.out.printf("%-50s= %s\n", entry.getKey(), entry.getValue());
}
System.out.printf("%n");
}
}
| GetBrokerConfigCommand |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/checkreturnvalue/UnnecessarilyUsedValueTest.java | {
"start": 858,
"end": 1223
} | class ____ {
private final BugCheckerRefactoringTestHelper helper =
BugCheckerRefactoringTestHelper.newInstance(UnnecessarilyUsedValue.class, getClass());
@Test
public void methods() {
helper
.addInputLines(
"Client.java",
"""
package com.google.frobber;
public final | UnnecessarilyUsedValueTest |
java | netty__netty | codec-http/src/test/java/io/netty/handler/codec/rtsp/RtspDecoderTest.java | {
"start": 1172,
"end": 2836
} | class ____ {
/**
* There was a problem when an ANNOUNCE request was issued by the server,
* i.e. entered through the response decoder. First the decoder failed to
* parse the ANNOUNCE request, then it stopped receiving any more
* responses. This test verifies that the issue is solved.
*/
@Test
public void testReceiveAnnounce() {
byte[] data1 = ("ANNOUNCE rtsp://172.20.184.218:554/d3abaaa7-65f2-"
+ "42b4-8d6b-379f492fcf0f RTSP/1.0\r\n"
+ "CSeq: 2\r\n"
+ "Session: 2777476816092819869\r\n"
+ "x-notice: 5402 \"Session Terminated by Server\" "
+ "event-date=20150514T075303Z\r\n"
+ "Range: npt=0\r\n\r\n").getBytes();
byte[] data2 = ("RTSP/1.0 200 OK\r\n" +
"Server: Orbit2x\r\n" +
"CSeq: 172\r\n" +
"Session: 2547019973447939919\r\n" +
"\r\n").getBytes();
EmbeddedChannel ch = new EmbeddedChannel(new RtspDecoder(),
new HttpObjectAggregator(1048576));
ch.writeInbound(Unpooled.wrappedBuffer(data1),
Unpooled.wrappedBuffer(data2));
HttpObject res1 = ch.readInbound();
assertNotNull(res1);
assertTrue(res1 instanceof FullHttpRequest);
((FullHttpRequest) res1).release();
HttpObject res2 = ch.readInbound();
assertNotNull(res2);
assertTrue(res2 instanceof FullHttpResponse);
((FullHttpResponse) res2).release();
}
}
| RtspDecoderTest |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/ValueObjectBinder.java | {
"start": 9833,
"end": 12004
} | class ____<T> extends ValueObject<T> {
private static final Annotation[] ANNOTATION_ARRAY = new Annotation[0];
private final List<ConstructorParameter> constructorParameters;
private KotlinValueObject(Constructor<T> primaryConstructor, KFunction<T> kotlinConstructor,
ResolvableType type) {
super(primaryConstructor);
this.constructorParameters = parseConstructorParameters(kotlinConstructor, type);
}
private List<ConstructorParameter> parseConstructorParameters(KFunction<T> kotlinConstructor,
ResolvableType type) {
List<KParameter> parameters = kotlinConstructor.getParameters();
List<ConstructorParameter> result = new ArrayList<>(parameters.size());
for (KParameter parameter : parameters) {
String name = getParameterName(parameter);
ResolvableType parameterType = ResolvableType
.forType(ReflectJvmMapping.getJavaType(parameter.getType()), type);
Annotation[] annotations = parameter.getAnnotations().toArray(ANNOTATION_ARRAY);
Assert.state(name != null, "'name' must not be null");
result.add(new ConstructorParameter(name, parameterType, annotations));
}
return Collections.unmodifiableList(result);
}
private @Nullable String getParameterName(KParameter parameter) {
return MergedAnnotations.from(parameter, parameter.getAnnotations().toArray(ANNOTATION_ARRAY))
.get(Name.class)
.getValue(MergedAnnotation.VALUE, String.class)
.orElseGet(parameter::getName);
}
@Override
List<ConstructorParameter> getConstructorParameters() {
return this.constructorParameters;
}
static <T> @Nullable ValueObject<T> get(Constructor<T> bindConstructor, ResolvableType type,
ParameterNameDiscoverer parameterNameDiscoverer) {
KFunction<T> kotlinConstructor = ReflectJvmMapping.getKotlinFunction(bindConstructor);
if (kotlinConstructor != null) {
return new KotlinValueObject<>(bindConstructor, kotlinConstructor, type);
}
return DefaultValueObject.get(bindConstructor, type, parameterNameDiscoverer);
}
}
/**
* A default {@link ValueObject} implementation that uses only standard Java
* reflection calls.
*/
private static final | KotlinValueObject |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/Aws2KinesisComponentBuilderFactory.java | {
"start": 1396,
"end": 1910
} | interface ____ {
/**
* AWS Kinesis (camel-aws2-kinesis)
* Consume and produce records from and to AWS Kinesis Streams.
*
* Category: cloud,messaging
* Since: 3.2
* Maven coordinates: org.apache.camel:camel-aws2-kinesis
*
* @return the dsl builder
*/
static Aws2KinesisComponentBuilder aws2Kinesis() {
return new Aws2KinesisComponentBuilderImpl();
}
/**
* Builder for the AWS Kinesis component.
*/
| Aws2KinesisComponentBuilderFactory |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/inject/scope/custom/BeanA.java | {
"start": 74,
"end": 111
} | class ____ extends AbstractBean {
}
| BeanA |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SumOverTime.java | {
"start": 1492,
"end": 3873
} | class ____ extends TimeSeriesAggregateFunction implements OptionalArgument {
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(
Expression.class,
"SumOverTime",
SumOverTime::new
);
@FunctionInfo(
returnType = { "double", "long" },
description = "Calculates the sum over time value of a field.",
type = FunctionType.TIME_SERIES_AGGREGATE,
appliesTo = { @FunctionAppliesTo(lifeCycle = FunctionAppliesToLifecycle.PREVIEW, version = "9.2.0") },
preview = true,
examples = { @Example(file = "k8s-timeseries", tag = "sum_over_time") }
)
public SumOverTime(
Source source,
@Param(name = "field", type = { "aggregate_metric_double", "double", "integer", "long" }) Expression field,
@Param(
name = "window",
type = { "time_duration" },
description = "the time window over which to compute the standard deviation",
optional = true
) Expression window
) {
this(source, field, Literal.TRUE, Objects.requireNonNullElse(window, NO_WINDOW));
}
public SumOverTime(Source source, Expression field, Expression filter, Expression window) {
super(source, field, filter, window, emptyList());
}
private SumOverTime(StreamInput in) throws IOException {
super(in);
}
@Override
public String getWriteableName() {
return ENTRY.name;
}
@Override
public SumOverTime withFilter(Expression filter) {
return new SumOverTime(source(), field(), filter, window());
}
@Override
protected NodeInfo<SumOverTime> info() {
return NodeInfo.create(this, SumOverTime::new, field(), filter(), window());
}
@Override
public SumOverTime replaceChildren(List<Expression> newChildren) {
return new SumOverTime(source(), newChildren.get(0), newChildren.get(1), newChildren.get(2));
}
@Override
protected TypeResolution resolveType() {
return perTimeSeriesAggregation().resolveType();
}
@Override
public DataType dataType() {
return perTimeSeriesAggregation().dataType();
}
@Override
public Sum perTimeSeriesAggregation() {
return new Sum(source(), field(), filter(), window(), SummationMode.LOSSY_LITERAL);
}
}
| SumOverTime |
java | apache__rocketmq | client/src/test/java/org/apache/rocketmq/acl/common/AclUtilsTest.java | {
"start": 1423,
"end": 9708
} | class ____ {
@Test
public void testGetAddresses() {
String address = "1.1.1.{1,2,3,4}";
String[] addressArray = AclUtils.getAddresses(address, "{1,2,3,4}");
List<String> newAddressList = new ArrayList<>(Arrays.asList(addressArray));
List<String> addressList = new ArrayList<>();
addressList.add("1.1.1.1");
addressList.add("1.1.1.2");
addressList.add("1.1.1.3");
addressList.add("1.1.1.4");
Assert.assertEquals(newAddressList, addressList);
// IPv6 test
String ipv6Address = "1:ac41:9987::bb22:666:{1,2,3,4}";
String[] ipv6AddressArray = AclUtils.getAddresses(ipv6Address, "{1,2,3,4}");
List<String> newIPv6AddressList = new ArrayList<>();
Collections.addAll(newIPv6AddressList, ipv6AddressArray);
List<String> ipv6AddressList = new ArrayList<>();
ipv6AddressList.add("1:ac41:9987::bb22:666:1");
ipv6AddressList.add("1:ac41:9987::bb22:666:2");
ipv6AddressList.add("1:ac41:9987::bb22:666:3");
ipv6AddressList.add("1:ac41:9987::bb22:666:4");
Assert.assertEquals(newIPv6AddressList, ipv6AddressList);
}
@Test
public void testIsScope_StringArray() {
String address = "12";
for (int i = 0; i < 6; i++) {
boolean isScope = AclUtils.isScope(address, 4);
if (i == 3) {
Assert.assertTrue(isScope);
} else {
Assert.assertFalse(isScope);
}
address = address + ".12";
}
}
@Test
public void testIsScope_Array() {
String[] address = StringUtils.split("12.12.12.12", ".");
boolean isScope = AclUtils.isScope(address, 4);
Assert.assertTrue(isScope);
isScope = AclUtils.isScope(address, 3);
Assert.assertTrue(isScope);
address = StringUtils.split("12.12.1222.1222", ".");
isScope = AclUtils.isScope(address, 4);
Assert.assertFalse(isScope);
isScope = AclUtils.isScope(address, 3);
Assert.assertFalse(isScope);
// IPv6 test
address = StringUtils.split("1050:0000:0000:0000:0005:0600:300c:326b", ":");
isScope = AclUtils.isIPv6Scope(address, 8);
Assert.assertTrue(isScope);
isScope = AclUtils.isIPv6Scope(address, 4);
Assert.assertTrue(isScope);
address = StringUtils.split("1050:9876:0000:0000:0005:akkg:300c:326b", ":");
isScope = AclUtils.isIPv6Scope(address, 8);
Assert.assertFalse(isScope);
isScope = AclUtils.isIPv6Scope(address, 4);
Assert.assertTrue(isScope);
address = StringUtils.split(AclUtils.expandIP("1050::0005:akkg:300c:326b", 8), ":");
isScope = AclUtils.isIPv6Scope(address, 8);
Assert.assertFalse(isScope);
isScope = AclUtils.isIPv6Scope(address, 4);
Assert.assertTrue(isScope);
}
@Test
public void testIsScope_String() {
for (int i = 0; i < 256; i++) {
boolean isScope = AclUtils.isScope(i + "");
Assert.assertTrue(isScope);
}
boolean isScope = AclUtils.isScope("-1");
Assert.assertFalse(isScope);
isScope = AclUtils.isScope("256");
Assert.assertFalse(isScope);
}
@Test
public void testIsScope_Integral() {
for (int i = 0; i < 256; i++) {
boolean isScope = AclUtils.isScope(i);
Assert.assertTrue(isScope);
}
boolean isScope = AclUtils.isScope(-1);
Assert.assertFalse(isScope);
isScope = AclUtils.isScope(256);
Assert.assertFalse(isScope);
// IPv6 test
int min = Integer.parseInt("0", 16);
int max = Integer.parseInt("ffff", 16);
for (int i = min; i < max + 1; i++) {
isScope = AclUtils.isIPv6Scope(i);
Assert.assertTrue(isScope);
}
isScope = AclUtils.isIPv6Scope(-1);
Assert.assertFalse(isScope);
isScope = AclUtils.isIPv6Scope(max + 1);
Assert.assertFalse(isScope);
}
@Test
public void testIsAsterisk() {
boolean isAsterisk = AclUtils.isAsterisk("*");
Assert.assertTrue(isAsterisk);
isAsterisk = AclUtils.isAsterisk(",");
Assert.assertFalse(isAsterisk);
}
@Test
public void testIsComma() {
boolean isColon = AclUtils.isComma(",");
Assert.assertTrue(isColon);
isColon = AclUtils.isComma("-");
Assert.assertFalse(isColon);
}
@Test
public void testIsMinus() {
boolean isMinus = AclUtils.isMinus("-");
Assert.assertTrue(isMinus);
isMinus = AclUtils.isMinus("*");
Assert.assertFalse(isMinus);
}
@Test
public void testV6ipProcess() {
String remoteAddr = "5::7:6:1-200:*";
Assert.assertEquals(AclUtils.v6ipProcess(remoteAddr), "0005:0000:0000:0000:0007:0006");
remoteAddr = "5::7:6:1-200";
Assert.assertEquals(AclUtils.v6ipProcess(remoteAddr), "0005:0000:0000:0000:0000:0007:0006");
remoteAddr = "5::7:6:*";
Assert.assertEquals(AclUtils.v6ipProcess(remoteAddr), "0005:0000:0000:0000:0000:0007:0006");
remoteAddr = "5:7:6:*";
Assert.assertEquals(AclUtils.v6ipProcess(remoteAddr), "0005:0007:0006");
}
@Test
public void testExpandIP() {
Assert.assertEquals(AclUtils.expandIP("::", 8), "0000:0000:0000:0000:0000:0000:0000:0000");
Assert.assertEquals(AclUtils.expandIP("::1", 8), "0000:0000:0000:0000:0000:0000:0000:0001");
Assert.assertEquals(AclUtils.expandIP("3::", 8), "0003:0000:0000:0000:0000:0000:0000:0000");
Assert.assertEquals(AclUtils.expandIP("2::2", 8), "0002:0000:0000:0000:0000:0000:0000:0002");
Assert.assertEquals(AclUtils.expandIP("4::aac4:92", 8), "0004:0000:0000:0000:0000:0000:AAC4:0092");
Assert.assertEquals(AclUtils.expandIP("ab23:56:901a::cc6:765:bb:9011", 8), "AB23:0056:901A:0000:0CC6:0765:00BB:9011");
Assert.assertEquals(AclUtils.expandIP("ab23:56:901a:1:cc6:765:bb:9011", 8), "AB23:0056:901A:0001:0CC6:0765:00BB:9011");
Assert.assertEquals(AclUtils.expandIP("5::7:6", 6), "0005:0000:0000:0000:0007:0006");
}
private static String randomTmpFile() {
String tmpFileName = System.getProperty("java.io.tmpdir");
// https://rationalpi.wordpress.com/2007/01/26/javaiotmpdir-inconsitency/
if (!tmpFileName.endsWith(File.separator)) {
tmpFileName += File.separator;
}
tmpFileName += UUID.randomUUID() + ".yml";
return tmpFileName;
}
@Test
public void getYamlDataIgnoreFileNotFoundExceptionTest() {
JSONObject yamlDataObject = AclUtils.getYamlDataObject("plain_acl.yml", JSONObject.class);
Assert.assertNull(yamlDataObject);
}
@Test
public void getAclRPCHookTest() throws IOException {
try (InputStream is = AclUtilsTest.class.getClassLoader().getResourceAsStream("conf/plain_acl_incomplete.yml")) {
RPCHook incompleteContRPCHook = AclUtils.getAclRPCHook(is);
Assert.assertNull(incompleteContRPCHook);
}
}
@Test
public void testGetAclRPCHookByFileName() {
// Skip this test if running in Bazel, as the resource path is a path inside the JAR.
Assume.assumeTrue(System.getProperty("build.bazel") == null);
RPCHook actual = AclUtils.getAclRPCHook(Objects.requireNonNull(AclUtilsTest.class.getResource("/acl_hook/plain_acl.yml")).getPath());
assertNotNull(actual);
assertTrue(actual instanceof AclClientRPCHook);
assertAclClientRPCHook((AclClientRPCHook) actual);
}
@Test
public void testGetAclRPCHookByInputStream() {
RPCHook actual = AclUtils.getAclRPCHook(Objects.requireNonNull(AclUtilsTest.class.getResourceAsStream("/acl_hook/plain_acl.yml")));
assertNotNull(actual);
assertTrue(actual instanceof AclClientRPCHook);
assertAclClientRPCHook((AclClientRPCHook) actual);
}
private void assertAclClientRPCHook(final AclClientRPCHook actual) {
assertEquals("rocketmq2", actual.getSessionCredentials().getAccessKey());
assertEquals("12345678", actual.getSessionCredentials().getSecretKey());
}
}
| AclUtilsTest |
java | quarkusio__quarkus | extensions/security/deployment/src/test/java/io/quarkus/security/test/permissionsallowed/checker/CombinePermissionCheckerWithPossessedPermissionTest.java | {
"start": 1006,
"end": 7317
} | class ____ {
private static final AuthData USER_WITH_AUGMENTORS = new AuthData(USER, true);
private static final AuthData ADMIN_WITH_AUGMENTORS = new AuthData(ADMIN, true);
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(IdentityMock.class, AuthData.class, SecurityTestUtils.class));
@Inject
AdminOnlyMethodArgSecuredBean securedBean;
@Test
public void testAccessGrantedByPossessedPermissionAndChecker_allOf() {
var adminWithSecuredPerm = new AuthData(ADMIN, true, new StringPermission("read", "secured"));
var adminWithSecured2Perm = new AuthData(ADMIN, true, new StringPermission("read", "secured2"));
assertSuccess(() -> securedBean.noSecurity("1", "2", 3, 4, 5), "noSecurity", USER_WITH_AUGMENTORS);
assertSuccess(() -> securedBean.noSecurity("1", "2", 3, 4, 5), "noSecurity", ADMIN_WITH_AUGMENTORS);
assertFailureFor(() -> securedBean.secured_allOf(1, 2, 3, 4, 5), ForbiddenException.class,
USER_WITH_AUGMENTORS);
assertFailureFor(() -> securedBean.secured2_allOf("1", "2", 3, 4, 5), ForbiddenException.class,
USER_WITH_AUGMENTORS);
assertFailureFor(() -> securedBean.secured_allOf(1, 2, 3, 4, 5), ForbiddenException.class,
ADMIN_WITH_AUGMENTORS);
assertFailureFor(() -> securedBean.secured2_allOf("1", "2", 3, 4, 5), ForbiddenException.class,
ADMIN_WITH_AUGMENTORS);
assertSuccess(() -> securedBean.secured_allOf(1, 2, 3, 4, 5), "secured", adminWithSecuredPerm);
assertSuccess(() -> securedBean.secured2_allOf("1", "2", 3, 4, 5), "secured2", adminWithSecured2Perm);
// wrong value of the param 'one'
assertFailureFor(() -> securedBean.secured2_allOf("9", "2", 3, 4, 5), ForbiddenException.class,
adminWithSecured2Perm);
// wrong value of the param 'five'
assertFailureFor(() -> securedBean.secured2_allOf("1", "2", 3, 4, 6), ForbiddenException.class,
adminWithSecured2Perm);
// missing string permission "read:secured"
assertFailureFor(() -> securedBean.secured_allOf(1, 2, 3, 4, 5), ForbiddenException.class,
adminWithSecured2Perm);
// missing string permission "read:secured2"
assertFailureFor(() -> securedBean.secured2_allOf("1", "2", 3, 4, 5), ForbiddenException.class,
adminWithSecuredPerm);
}
@Test
public void testAccessGrantedByPossessedPermissionAndChecker_inclusiveAllOf() {
var adminWithSecuredPerm = new AuthData(ADMIN, true, new StringPermission("read", "secured"));
var adminWithSecured2Perm = new AuthData(ADMIN, true, new StringPermission("read", "secured2"));
assertFailureFor(() -> securedBean.secured_inclusiveAllOf(1, 2, 3, 4, 5), ForbiddenException.class,
USER_WITH_AUGMENTORS);
assertFailureFor(() -> securedBean.secured2_inclusiveAllOf("1", "2", 3, 4, 5), ForbiddenException.class,
USER_WITH_AUGMENTORS);
assertFailureFor(() -> securedBean.secured_inclusiveAllOf(1, 2, 3, 4, 5), ForbiddenException.class,
ADMIN_WITH_AUGMENTORS);
assertFailureFor(() -> securedBean.secured2_inclusiveAllOf("1", "2", 3, 4, 5), ForbiddenException.class,
ADMIN_WITH_AUGMENTORS);
assertSuccess(() -> securedBean.secured_inclusiveAllOf(1, 2, 3, 4, 5), "secured", adminWithSecuredPerm);
assertSuccess(() -> securedBean.secured2_inclusiveAllOf("1", "2", 3, 4, 5), "secured2",
adminWithSecured2Perm);
// wrong value of the param 'one'
assertFailureFor(() -> securedBean.secured2_inclusiveAllOf("9", "2", 3, 4, 5), ForbiddenException.class,
adminWithSecured2Perm);
// wrong value of the param 'five'
assertFailureFor(() -> securedBean.secured2_inclusiveAllOf("1", "2", 3, 4, 6), ForbiddenException.class,
adminWithSecured2Perm);
// missing string permission "read:secured"
assertFailureFor(() -> securedBean.secured_inclusiveAllOf(1, 2, 3, 4, 5), ForbiddenException.class,
adminWithSecured2Perm);
// missing string permission "read:secured2"
assertFailureFor(() -> securedBean.secured2_inclusiveAllOf("1", "2", 3, 4, 5), ForbiddenException.class,
adminWithSecuredPerm);
}
@Test
public void testAccessGrantedByPossessedPermissionAndChecker_oneOf() {
var adminWithSecuredPerm = new AuthData(ADMIN, true, new StringPermission("read", "secured"));
var adminWithSecured2Perm = new AuthData(ADMIN, true, new StringPermission("read", "secured2"));
assertFailureFor(() -> securedBean.secured_oneOf(1, 2, 3, 4, 5), ForbiddenException.class,
USER_WITH_AUGMENTORS);
assertFailureFor(() -> securedBean.secured2_oneOf("1", "2", 3, 4, 5), ForbiddenException.class,
USER_WITH_AUGMENTORS);
assertSuccess(() -> securedBean.secured_oneOf(1, 2, 3, 4, 5), "secured", adminWithSecuredPerm);
assertSuccess(() -> securedBean.secured2_oneOf("1", "2", 3, 4, 5), "secured2", adminWithSecured2Perm);
// wrong value of the param 'one', but has 'read:secured2'
assertSuccess(() -> securedBean.secured2_oneOf("9", "2", 3, 4, 5), "secured2", adminWithSecured2Perm);
// wrong value of the param 'five', but has 'read:secured2'
assertSuccess(() -> securedBean.secured2_oneOf("1", "2", 3, 4, 6), "secured2", adminWithSecured2Perm);
// wrong value of the param 'five' and no 'read:secured2'
assertFailureFor(() -> securedBean.secured2_oneOf("1", "2", 3, 4, 16), ForbiddenException.class,
adminWithSecuredPerm);
// missing string permission "read:secured" and wrong param 'two'
assertFailureFor(() -> securedBean.secured_oneOf(1, 4, 3, 4, 5), ForbiddenException.class,
adminWithSecured2Perm);
// has 'read:secured' but param '3' is wrong
assertSuccess(() -> securedBean.secured_oneOf(1, 4, 6, 4, 5), "secured", adminWithSecuredPerm);
}
@ApplicationScoped
public static | CombinePermissionCheckerWithPossessedPermissionTest |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/layout/Encoder.java | {
"start": 898,
"end": 1151
} | interface ____ how to convert an object to some binary representation and
* write the result to a {@code ByteBuffer}, ideally without creating temporary objects.
*
* @param <T> the type of objects that the Encoder can encode
* @since 2.6
*/
public | know |
java | google__guice | core/test/com/google/inject/spi/ModuleAnnotatedMethodScannerTest.java | {
"start": 20735,
"end": 22658
} | class ____ extends ModuleAnnotatedMethodScanner {
private final Class<?> classToIgnore;
private int ignoredCounter = 0;
IgnoringScanner(Class<?> classToIgnore) {
this.classToIgnore = classToIgnore;
}
@Override
public Set<? extends Class<? extends Annotation>> annotationClasses() {
return ImmutableSet.of(TestProvides.class);
}
@Override
public <T> Key<T> prepareMethod(
Binder binder, Annotation annotation, Key<T> key, InjectionPoint injectionPoint) {
Method method = (Method) injectionPoint.getMember();
if (method.getDeclaringClass().equals(classToIgnore)) {
ignoredCounter++;
return null;
}
return key;
}
int ignoredCounter() {
return ignoredCounter;
}
}
@Test
public void ignoreMethodsScannedForOverridesSubclass() {
IgnoringScanner scanner = new IgnoringScanner(Subclass.class);
CreationException creationException =
assertThatInjectorCreationFails(ProviderMethodsModule.forModule(new Subclass(), scanner));
assertThat(creationException)
.hasMessageThat()
.contains(
"Overriding @ModuleAnnotatedMethodScannerTest.TestProvides methods is not allowed.");
assertThat(scanner.ignoredCounter()).isEqualTo(1); // checking that there was a method ignored.
}
@Test
public void ignoreMethodsScannedForOverridesSuperclass() {
IgnoringScanner scanner = new IgnoringScanner(Superclass.class);
CreationException creationException =
assertThatInjectorCreationFails(ProviderMethodsModule.forModule(new Subclass(), scanner));
assertThat(creationException)
.hasMessageThat()
.contains(
"Overriding @ModuleAnnotatedMethodScannerTest.TestProvides methods is not allowed.");
assertThat(scanner.ignoredCounter()).isEqualTo(1); // checking that there was a method ignored.
}
static | IgnoringScanner |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/pattern/ClassNamePatternConverter.java | {
"start": 2009,
"end": 2443
} | class ____ will be appended.
*/
@Override
public void format(final LogEvent event, final StringBuilder toAppendTo) {
final StackTraceElement element = event.getSource();
if (element == null) {
toAppendTo.append(NA);
} else {
abbreviate(element.getClassName(), toAppendTo);
}
}
@Override
public boolean requiresLocation() {
return true;
}
}
| name |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/vectors/AbstractMaxScoreKnnCollector.java | {
"start": 971,
"end": 1924
} | class ____ extends AbstractKnnCollector {
public static final long LEAST_COMPETITIVE = NeighborQueue.encodeRaw(Integer.MAX_VALUE, Float.NEGATIVE_INFINITY);
protected AbstractMaxScoreKnnCollector(int k, long visitLimit, KnnSearchStrategy searchStrategy) {
super(k, visitLimit, searchStrategy);
}
/**
* Returns the minimum competitive document score.
* This is used to determine the global competitiveness of documents in the search.
* This may be a competitive score even if the collector hasn't collected k results yet.
*
* @return the minimum competitive document score
*/
public abstract long getMinCompetitiveDocScore();
/**
* Updates the minimum competitive document score.
*
* @param minCompetitiveDocScore the new minimum competitive document score to set
*/
abstract void updateMinCompetitiveDocScore(long minCompetitiveDocScore);
}
| AbstractMaxScoreKnnCollector |
java | apache__spark | core/src/main/java/org/apache/spark/util/collection/unsafe/sort/PrefixComparators.java | {
"start": 6809,
"end": 7181
} | class ____ extends RadixSortSupport {
@Override public boolean sortDescending() { return true; }
@Override public boolean sortSigned() { return true; }
@Override public boolean nullsFirst() { return true; }
@Override
public int compare(long b, long a) {
return Long.compare(a, b);
}
}
public static final | SignedPrefixComparatorDescNullsFirst |
java | quarkusio__quarkus | extensions/kubernetes/vanilla/deployment/src/main/java/io/quarkus/kubernetes/deployment/KnativeDeployer.java | {
"start": 534,
"end": 1708
} | class ____ {
@BuildStep
public void checkEnvironment(Optional<SelectedKubernetesDeploymentTargetBuildItem> selectedDeploymentTarget,
List<GeneratedKubernetesResourceBuildItem> resources,
KubernetesClientBuildItem kubernetesClientBuilder,
BuildProducer<KubernetesDeploymentClusterBuildItem> deploymentCluster) {
selectedDeploymentTarget.ifPresent(target -> {
if (!KubernetesDeploy.INSTANCE.checkSilently(kubernetesClientBuilder)) {
return;
}
if (target.getEntry().getName().equals(KNATIVE)) {
try (KnativeClient client = kubernetesClientBuilder.buildClient().adapt(KnativeClient.class)) {
if (client.hasApiGroup("knative.dev", false)) {
deploymentCluster.produce(new KubernetesDeploymentClusterBuildItem(KNATIVE));
} else {
throw new IllegalStateException(
"Knative was requested as a deployment, but the target cluster is not a Knative cluster!");
}
}
}
});
}
}
| KnativeDeployer |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/exceptions/misusing/RedundantListenerException.java | {
"start": 592,
"end": 818
} | interface ____.
* <p>
* Indicates a user error - previous listener was not removed
* according to the API specification - see {@link org.mockito.MockitoFramework#addListener(MockitoListener)}.
*
* @since 2.5.2
*/
public | type |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/annotation/AnnotationValueBuilder.java | {
"start": 9566,
"end": 9860
} | enum ____.
*
* @param enumObj The enum
* @return This builder
*/
@NonNull
public AnnotationValueBuilder<T> value(@Nullable Enum<?> enumObj) {
return member(AnnotationMetadata.VALUE_MEMBER, enumObj);
}
/**
* Sets the value member to the given | object |
java | apache__rocketmq | test/src/test/java/org/apache/rocketmq/test/container/TransactionMessageIT.java | {
"start": 2018,
"end": 12079
} | class ____ extends ContainerIntegrationTestBase {
private static final String MESSAGE_STRING = RandomStringUtils.random(1024);
private static byte[] messageBody;
static {
try {
messageBody = MESSAGE_STRING.getBytes(RemotingHelper.DEFAULT_CHARSET);
} catch (UnsupportedEncodingException ignored) {
}
}
private static final int MESSAGE_COUNT = 16;
public TransactionMessageIT() {
}
private static String generateGroup() {
return "GID-" + TransactionMessageIT.class.getSimpleName() + RandomStringUtils.randomNumeric(5);
}
@Test
public void consumeTransactionMsg() throws MQClientException {
final String topic = generateTopic();
createTopicTo(master1With3Replicas, topic, 1, 1);
final String group = generateGroup();
DefaultMQPushConsumer pushConsumer = createPushConsumer(group);
pushConsumer.subscribe(topic, "*");
AtomicInteger receivedMsgCount = new AtomicInteger(0);
pushConsumer.registerMessageListener((MessageListenerConcurrently) (msgs, context) -> {
receivedMsgCount.addAndGet(msgs.size());
return ConsumeConcurrentlyStatus.CONSUME_SUCCESS;
});
pushConsumer.start();
TransactionMQProducer producer = createTransactionProducer(group, new TransactionListenerImpl(false));
producer.start();
for (int i = 0; i < MESSAGE_COUNT; i++) {
Message msg = new Message(topic, messageBody);
TransactionSendResult result = producer.sendMessageInTransaction(msg, null);
assertThat(result.getLocalTransactionState()).isEqualTo(LocalTransactionState.COMMIT_MESSAGE);
}
System.out.printf("send message complete%n");
await().atMost(Duration.ofSeconds(MESSAGE_COUNT * 2)).until(() -> receivedMsgCount.get() >= MESSAGE_COUNT);
System.out.printf("consumer received %d msg%n", receivedMsgCount.get());
pushConsumer.shutdown();
producer.shutdown();
}
private static String generateTopic() {
return TransactionMessageIT.class.getSimpleName() + RandomStringUtils.randomNumeric(5);
}
@Test
public void consumeTransactionMsgLocalEscape() throws Exception {
final String topic = generateTopic();
createTopicTo(master1With3Replicas, topic, 1, 1);
final String group = generateGroup();
DefaultMQPushConsumer pushConsumer = createPushConsumer(group);
pushConsumer.subscribe(topic, "*");
AtomicInteger receivedMsgCount = new AtomicInteger(0);
Map<String, Message> msgSentMap = new HashMap<>();
pushConsumer.registerMessageListener((MessageListenerConcurrently) (msgs, context) -> {
for (MessageExt msg : msgs) {
if (msgSentMap.containsKey(msg.getMsgId())) {
receivedMsgCount.incrementAndGet();
}
}
return ConsumeConcurrentlyStatus.CONSUME_SUCCESS;
});
pushConsumer.start();
TransactionListenerImpl transactionCheckListener = new TransactionListenerImpl(true);
TransactionMQProducer producer = createTransactionProducer(group, transactionCheckListener);
producer.start();
for (int i = 0; i < MESSAGE_COUNT; i++) {
Message msg = new Message(topic, messageBody);
msg.setKeys(UUID.randomUUID().toString());
SendResult result = producer.sendMessageInTransaction(msg, null);
String msgId = result.getMsgId();
msgSentMap.put(msgId, msg);
}
isolateBroker(master1With3Replicas);
brokerContainer1.removeBroker(new BrokerIdentity(master1With3Replicas.getBrokerIdentity().getBrokerClusterName(),
master1With3Replicas.getBrokerIdentity().getBrokerName(),
master1With3Replicas.getBrokerIdentity().getBrokerId()));
System.out.printf("=========" + master1With3Replicas.getBrokerIdentity().getBrokerName() + "-"
+ master1With3Replicas.getBrokerIdentity().getBrokerId() + " removed%n");
createTopicTo(master2With3Replicas, topic, 1, 1);
transactionCheckListener.setShouldReturnUnknownState(false);
producer.getDefaultMQProducerImpl().getmQClientFactory().updateTopicRouteInfoFromNameServer(topic);
System.out.printf("Wait for consuming%n");
await().atMost(Duration.ofSeconds(300)).until(() -> receivedMsgCount.get() >= MESSAGE_COUNT);
System.out.printf("consumer received %d msg%n", receivedMsgCount.get());
pushConsumer.shutdown();
producer.shutdown();
master1With3Replicas = brokerContainer1.addBroker(buildConfigContext(master1With3Replicas.getBrokerConfig(), master1With3Replicas.getMessageStoreConfig()));
master1With3Replicas.start();
cancelIsolatedBroker(master1With3Replicas);
awaitUntilSlaveOK();
receivedMsgCount.set(0);
DefaultMQPushConsumer pushConsumer2 = createPushConsumer(group);
pushConsumer2.subscribe(topic, "*");
pushConsumer2.registerMessageListener((MessageListenerConcurrently) (msgs, context) -> {
for (MessageExt msg : msgs) {
if (msgSentMap.containsKey(msg.getMsgId())) {
receivedMsgCount.incrementAndGet();
}
}
return ConsumeConcurrentlyStatus.CONSUME_SUCCESS;
});
pushConsumer2.start();
System.out.printf("Wait for checking...%n");
Thread.sleep(10000L);
}
@Test
public void consumeTransactionMsgRemoteEscape() throws Exception {
final String topic = generateTopic();
createTopicTo(master1With3Replicas, topic, 1, 1);
final String group = generateGroup();
AtomicInteger receivedMsgCount = new AtomicInteger(0);
Map<String, Message> msgSentMap = new HashMap<>();
DefaultMQPushConsumer pushConsumer = createPushConsumer(group);
pushConsumer.subscribe(topic, "*");
pushConsumer.registerMessageListener((MessageListenerConcurrently) (msgs, context) -> {
for (MessageExt msg : msgs) {
if (msgSentMap.containsKey(msg.getMsgId())) {
receivedMsgCount.incrementAndGet();
}
}
return ConsumeConcurrentlyStatus.CONSUME_SUCCESS;
});
pushConsumer.start();
TransactionListenerImpl transactionCheckListener = new TransactionListenerImpl(true);
TransactionMQProducer producer = createTransactionProducer(group, transactionCheckListener);
producer.start();
for (int i = 0; i < MESSAGE_COUNT; i++) {
Message msg = new Message(topic, messageBody);
msg.setKeys(UUID.randomUUID().toString());
SendResult result = producer.sendMessageInTransaction(msg, null);
String msgId = result.getMsgId();
msgSentMap.put(msgId, msg);
}
isolateBroker(master1With3Replicas);
brokerContainer1.removeBroker(new BrokerIdentity(master1With3Replicas.getBrokerIdentity().getBrokerClusterName(),
master1With3Replicas.getBrokerIdentity().getBrokerName(),
master1With3Replicas.getBrokerIdentity().getBrokerId()));
System.out.printf("=========" + master1With3Replicas.getBrokerIdentity().getBrokerName() + "-"
+ master1With3Replicas.getBrokerIdentity().getBrokerId() + " removed%n");
createTopicTo(master2With3Replicas, topic, 1, 1);
createTopicTo(master3With3Replicas, topic, 1, 1);
//isolateBroker(master2With3Replicas);
brokerContainer2.removeBroker(new BrokerIdentity(master2With3Replicas.getBrokerIdentity().getBrokerClusterName(),
master2With3Replicas.getBrokerIdentity().getBrokerName(),
master2With3Replicas.getBrokerIdentity().getBrokerId()));
System.out.printf("=========" + master2With3Replicas.getBrokerIdentity().getBrokerClusterName() + "-"
+ master2With3Replicas.getBrokerIdentity().getBrokerName()
+ "-" + master2With3Replicas.getBrokerIdentity().getBrokerId() + " removed%n");
pushConsumer.getDefaultMQPushConsumerImpl().getRebalanceImpl().doRebalance(false);
transactionCheckListener.setShouldReturnUnknownState(false);
producer.getDefaultMQProducerImpl().getmQClientFactory().updateTopicRouteInfoFromNameServer(topic);
System.out.printf("Wait for consuming%n");
await().atMost(Duration.ofSeconds(180)).until(() -> receivedMsgCount.get() >= MESSAGE_COUNT);
System.out.printf("consumer received %d msg%n", receivedMsgCount.get());
pushConsumer.shutdown();
producer.shutdown();
master1With3Replicas = brokerContainer1.addBroker(buildConfigContext(master1With3Replicas.getBrokerConfig(), master1With3Replicas.getMessageStoreConfig()));
master1With3Replicas.start();
cancelIsolatedBroker(master1With3Replicas);
master2With3Replicas = brokerContainer2.addBroker(buildConfigContext(master2With3Replicas.getBrokerConfig(),
master2With3Replicas.getMessageStoreConfig()));
master2With3Replicas.start();
cancelIsolatedBroker(master2With3Replicas);
awaitUntilSlaveOK();
receivedMsgCount.set(0);
DefaultMQPushConsumer pushConsumer2 = createPushConsumer(group);
pushConsumer2.subscribe(topic, "*");
pushConsumer2.registerMessageListener((MessageListenerConcurrently) (msgs, context) -> {
for (MessageExt msg : msgs) {
if (msgSentMap.containsKey(msg.getMsgId())) {
receivedMsgCount.incrementAndGet();
}
}
return ConsumeConcurrentlyStatus.CONSUME_SUCCESS;
});
pushConsumer2.start();
System.out.printf("Wait for checking...%n");
Thread.sleep(10000L);
assertThat(receivedMsgCount.get()).isEqualTo(0);
pushConsumer2.shutdown();
}
}
| TransactionMessageIT |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_3462/Issue3462Mapper.java | {
"start": 364,
"end": 505
} | interface ____ {
Issue3462Mapper INSTANCE = Mappers.getMapper( Issue3462Mapper.class );
Target map(Source source);
| Issue3462Mapper |
java | apache__camel | components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/integration/SftpConsumerLocalWorkDirectoryIT.java | {
"start": 1786,
"end": 4303
} | class ____ extends SftpServerTestSupport {
@TempDir
Path testDirectory;
protected String getFtpUrl() {
return "sftp://localhost:{{ftp.server.port}}/{{ftp.root.dir}}/?password=admin"
+ "&localWorkDirectory=" + testDirectory.resolve("lwd")
+ "&noop=true";
}
@Override
public void doPostSetup() throws Exception {
prepareFtpServer();
}
private void prepareFtpServer() throws Exception {
// prepares the FTP Server by creating a file on the server that we want
// to unit test that we can pool
Endpoint endpoint = context.getEndpoint(getFtpUrl());
Exchange exchange = endpoint.createExchange();
exchange.getIn().setBody("Hello World");
exchange.getIn().setHeader(Exchange.FILE_NAME, "hello.txt");
Producer producer = endpoint.createProducer();
producer.start();
producer.process(exchange);
producer.stop();
}
@Test
public void testLocalWorkDirectory() throws Exception {
NotifyBuilder notify = new NotifyBuilder(context).whenDone(1).create();
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello World");
mock.expectedMessageCount(1);
context.getRouteController().startRoute("myRoute");
MockEndpoint.assertIsSatisfied(context);
MockEndpoint.assertIsSatisfied(context);
assertTrue(notify.matchesWaitTime());
// and the out file should exists
assertFileExists(testDirectory.resolve("out/hello.txt"), "Hello World");
// now the lwd file should be deleted
assertFileNotExists(testDirectory.resolve("lwd/hello.txt"));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from(getFtpUrl()).routeId("myRoute").noAutoStartup().process(new Processor() {
public void process(Exchange exchange) {
File body = exchange.getIn().getBody(File.class);
assertNotNull(body);
assertTrue(body.exists(), "Local work file should exists");
assertEquals(FileUtil.normalizePath(testDirectory.resolve("lwd/hello.txt").toString()), body.getPath());
}
}).to("mock:result", TestSupport.fileUri(testDirectory, "out"));
}
};
}
}
| SftpConsumerLocalWorkDirectoryIT |
java | alibaba__nacos | core/src/main/java/com/alibaba/nacos/core/cluster/remote/ClusterRpcClientProxy.java | {
"start": 2427,
"end": 10192
} | class ____ extends MemberChangeListener {
private static final long DEFAULT_REQUEST_TIME_OUT = 3000L;
final ServerMemberManager serverMemberManager;
public ClusterRpcClientProxy(ServerMemberManager serverMemberManager) {
this.serverMemberManager = serverMemberManager;
}
/**
* init after constructor.
*/
@PostConstruct
public void init() {
try {
NotifyCenter.registerSubscriber(this);
List<Member> members = serverMemberManager.allMembersWithoutSelf();
refresh(members);
Loggers.CLUSTER.info(
"[ClusterRpcClientProxy] success to refresh cluster rpc client on start up,members ={} ", members);
} catch (NacosException e) {
Loggers.CLUSTER.warn("[ClusterRpcClientProxy] fail to refresh cluster rpc client,{} ", e.getMessage());
}
}
/**
* init cluster rpc clients.
*
* @param members cluster server list member list.
*/
private void refresh(List<Member> members) throws NacosException {
//ensure to create client of new members
for (Member member : members) {
createRpcClientAndStart(member, ConnectionType.GRPC);
}
//shutdown and remove old members.
Set<Map.Entry<String, RpcClient>> allClientEntrys = RpcClientFactory.getAllClientEntries();
Iterator<Map.Entry<String, RpcClient>> iterator = allClientEntrys.iterator();
List<String> newMemberKeys = members.stream().map(this::memberClientKey).collect(Collectors.toList());
while (iterator.hasNext()) {
Map.Entry<String, RpcClient> next1 = iterator.next();
if (next1.getKey().startsWith("Cluster-") && !newMemberKeys.contains(next1.getKey())) {
Loggers.CLUSTER.info("member leave,destroy client of member - > : {}", next1.getKey());
RpcClient client = RpcClientFactory.getClient(next1.getKey());
if (client != null) {
RpcClientFactory.getClient(next1.getKey()).shutdown();
}
iterator.remove();
}
}
}
private String memberClientKey(Member member) {
return "Cluster-" + member.getAddress();
}
private void createRpcClientAndStart(Member member, ConnectionType type) throws NacosException {
Map<String, String> labels = new HashMap<>(2);
labels.put(RemoteConstants.LABEL_SOURCE, RemoteConstants.LABEL_SOURCE_CLUSTER);
String memberClientKey = memberClientKey(member);
RpcClient client = buildRpcClient(type, labels, memberClientKey);
if (!client.getConnectionType().equals(type)) {
Loggers.CLUSTER.info("connection type changed, destroy client of member - > : {}", member);
RpcClientFactory.destroyClient(memberClientKey);
client = buildRpcClient(type, labels, memberClientKey);
}
if (client.isWaitInitiated()) {
Loggers.CLUSTER.info("start a new rpc client to member -> : {}", member);
//one fixed server
client.serverListFactory(new ServerListFactory() {
@Override
public String genNextServer() {
return member.getAddress();
}
@Override
public String getCurrentServer() {
return member.getAddress();
}
@Override
public List<String> getServerList() {
return CollectionUtils.list(member.getAddress());
}
});
client.start();
}
}
/**
* Using {@link EnvUtil#getAvailableProcessors(int)} to build cluster clients' grpc thread pool.
*/
private RpcClient buildRpcClient(ConnectionType type, Map<String, String> labels, String memberClientKey) {
Properties properties = EnvUtil.getProperties();
GrpcClientConfig clientConfig = DefaultGrpcClientConfig.newBuilder().buildClusterFromProperties(properties)
.setLabels(labels).setName(memberClientKey)
.setThreadPoolCoreSize(EnvUtil.getAvailableProcessors(2))
.setThreadPoolMaxSize(EnvUtil.getAvailableProcessors(8)).build();
return RpcClientFactory.createClusterClient(memberClientKey, type, clientConfig);
}
/**
* send request to member.
*
* @param member member of server.
* @param request request.
* @return Response response.
* @throws NacosException exception may throws.
*/
public Response sendRequest(Member member, Request request) throws NacosException {
return sendRequest(member, request, DEFAULT_REQUEST_TIME_OUT);
}
/**
* send request to member.
*
* @param member member of server.
* @param request request.
* @return Response response.
* @throws NacosException exception may throws.
*/
public Response sendRequest(Member member, Request request, long timeoutMills) throws NacosException {
RpcClient client = RpcClientFactory.getClient(memberClientKey(member));
if (client != null) {
injectorServerIdentity(request);
return client.request(request, timeoutMills);
} else {
throw new NacosException(CLIENT_INVALID_PARAM, "No rpc client related to member: " + member);
}
}
/**
* aync send request to member with callback.
*
* @param member member of server.
* @param request request.
* @param callBack RequestCallBack.
* @throws NacosException exception may throws.
*/
public void asyncRequest(Member member, Request request, RequestCallBack callBack) throws NacosException {
RpcClient client = RpcClientFactory.getClient(memberClientKey(member));
if (client != null) {
injectorServerIdentity(request);
client.asyncRequest(request, callBack);
} else {
throw new NacosException(CLIENT_INVALID_PARAM, "No rpc client related to member: " + member);
}
}
/**
* send request to member.
*
* @param request request.
* @throws NacosException exception may throw.
*/
public void sendRequestToAllMembers(Request request) throws NacosException {
List<Member> members = serverMemberManager.allMembersWithoutSelf();
for (Member member1 : members) {
sendRequest(member1, request);
}
}
@Override
public void onEvent(MembersChangeEvent event) {
try {
List<Member> members = serverMemberManager.allMembersWithoutSelf();
refresh(members);
} catch (NacosException e) {
Loggers.CLUSTER.warn("[serverlist] fail to refresh cluster rpc client, event:{}, msg: {} ", event,
e.getMessage());
}
}
/**
* Check whether client for member is ready.
*
* @param member member
* @return {@code true} if target client is connected, otherwise {@code false}
*/
public boolean isRunning(Member member) {
RpcClient client = RpcClientFactory.getClient(memberClientKey(member));
if (null == client) {
return false;
}
return client.isRunning();
}
private void injectorServerIdentity(Request request) {
AuthHeaderUtil.addIdentityToHeader(request, NacosAuthConfigHolder.getInstance()
.getNacosAuthConfigByScope(NacosServerAuthConfig.NACOS_SERVER_AUTH_SCOPE));
}
}
| ClusterRpcClientProxy |
java | apache__flink | flink-core/src/main/java/org/apache/flink/util/ConfigurationException.java | {
"start": 920,
"end": 1319
} | class ____ extends FlinkException {
private static final long serialVersionUID = 3971647332059381556L;
public ConfigurationException(String message) {
super(message);
}
public ConfigurationException(String message, Throwable cause) {
super(message, cause);
}
public ConfigurationException(Throwable cause) {
super(cause);
}
}
| ConfigurationException |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/propertyeditors/FileEditor.java | {
"start": 2154,
"end": 3864
} | class ____ extends PropertyEditorSupport {
private final ResourceEditor resourceEditor;
/**
* Create a new FileEditor, using a default ResourceEditor underneath.
*/
public FileEditor() {
this.resourceEditor = new ResourceEditor();
}
/**
* Create a new FileEditor, using the given ResourceEditor underneath.
* @param resourceEditor the ResourceEditor to use
*/
public FileEditor(ResourceEditor resourceEditor) {
Assert.notNull(resourceEditor, "ResourceEditor must not be null");
this.resourceEditor = resourceEditor;
}
@Override
public void setAsText(String text) throws IllegalArgumentException {
if (!StringUtils.hasText(text)) {
setValue(null);
return;
}
// Check whether we got an absolute file path without "file:" prefix.
// For backwards compatibility, we'll consider those as straight file path.
File file = null;
if (!ResourceUtils.isUrl(text)) {
file = new File(text);
if (file.isAbsolute()) {
setValue(file);
return;
}
}
// Proceed with standard resource location parsing.
this.resourceEditor.setAsText(text);
Resource resource = (Resource) this.resourceEditor.getValue();
// If it's a URL or a path pointing to an existing resource, use it as-is.
if (file == null || resource.exists()) {
try {
setValue(resource.getFile());
}
catch (IOException ex) {
throw new IllegalArgumentException(
"Could not retrieve file for " + resource + ": " + ex.getMessage());
}
}
else {
// Set a relative File reference and hope for the best.
setValue(file);
}
}
@Override
public String getAsText() {
File value = (File) getValue();
return (value != null ? value.getPath() : "");
}
}
| FileEditor |
java | spring-projects__spring-boot | module/spring-boot-pulsar/src/main/java/org/springframework/boot/pulsar/autoconfigure/PropertiesPulsarConnectionDetails.java | {
"start": 786,
"end": 1249
} | class ____ implements PulsarConnectionDetails {
private final PulsarProperties pulsarProperties;
PropertiesPulsarConnectionDetails(PulsarProperties pulsarProperties) {
this.pulsarProperties = pulsarProperties;
}
@Override
public String getBrokerUrl() {
return this.pulsarProperties.getClient().getServiceUrl();
}
@Override
public String getAdminUrl() {
return this.pulsarProperties.getAdmin().getServiceUrl();
}
}
| PropertiesPulsarConnectionDetails |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/inheritance/TypeLevelInheritanceTest.java | {
"start": 3185,
"end": 3310
} | class ____ {
public void ping() {
}
}
@Typed(SubBean.class)
@ApplicationScoped
static | BasicBean |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/common/io/RichOutputFormatTest.java | {
"start": 1363,
"end": 2223
} | class ____ {
@Test
void testCheckRuntimeContextAccess() {
final SerializedOutputFormat<Value> inputFormat = new SerializedOutputFormat<>();
final TaskInfo taskInfo = new TaskInfoImpl("test name", 3, 1, 3, 0);
inputFormat.setRuntimeContext(
new RuntimeUDFContext(
taskInfo,
getClass().getClassLoader(),
new ExecutionConfig(),
new HashMap<>(),
new HashMap<>(),
UnregisteredMetricsGroup.createOperatorMetricGroup()));
assertThat(inputFormat.getRuntimeContext().getTaskInfo().getIndexOfThisSubtask()).isOne();
assertThat(inputFormat.getRuntimeContext().getTaskInfo().getNumberOfParallelSubtasks())
.isEqualTo(3);
}
}
| RichOutputFormatTest |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/jls/JLS_15_12_2_5Test.java | {
"start": 11259,
"end": 11447
} | interface ____ {
String oneArg(Object arg);
String oneArg(String arg);
String varargs(Object... args);
String varargs(String... args);
}
}
| SingleOverload |
java | quarkusio__quarkus | independent-projects/arc/runtime/src/main/java/io/quarkus/arc/impl/BuiltInBean.java | {
"start": 194,
"end": 661
} | class ____<T> implements InjectableBean<T> {
@Override
public String getIdentifier() {
return "builtin_bean_" + this.getClass().getSimpleName();
}
@Override
public T create(CreationalContext<T> creationalContext) {
return get(creationalContext);
}
@Override
public Kind getKind() {
return Kind.BUILTIN;
}
@Override
public String toString() {
return Beans.toString(this);
}
}
| BuiltInBean |
java | apache__camel | components/camel-jooq/src/test/java/org/apache/camel/component/jooq/beans/AuthorRecordBean.java | {
"start": 1133,
"end": 1396
} | class ____ {
private int id = 1;
public ResultQuery<AuthorRecord> select() {
return DSL.selectFrom(AUTHOR).where(AUTHOR.ID.eq(id));
}
public Query delete() {
return DSL.delete(AUTHOR).where(AUTHOR.ID.eq(id));
}
}
| AuthorRecordBean |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/jdbc/AbstractWork.java | {
"start": 330,
"end": 461
} | class ____ intended to be used for work that does not return a value when
* executed.
*
* @author Gail Badner
*/
public abstract | is |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/data/util/RowDataUtil.java | {
"start": 985,
"end": 1714
} | class ____ {
/**
* Returns true if the message is either {@link RowKind#INSERT} or {@link RowKind#UPDATE_AFTER},
* which refers to an accumulate operation of aggregation.
*/
public static boolean isAccumulateMsg(RowData row) {
RowKind kind = row.getRowKind();
return kind == RowKind.INSERT || kind == RowKind.UPDATE_AFTER;
}
/**
* Returns true if the message is either {@link RowKind#DELETE} or {@link
* RowKind#UPDATE_BEFORE}, which refers to a retract operation of aggregation.
*/
public static boolean isRetractMsg(RowData row) {
RowKind kind = row.getRowKind();
return kind == RowKind.UPDATE_BEFORE || kind == RowKind.DELETE;
}
}
| RowDataUtil |
java | google__guice | core/src/com/google/inject/internal/InjectorImpl.java | {
"start": 19494,
"end": 46746
} | class ____<T> extends BindingImpl<T>
implements ConvertedConstantBinding<T> {
final T value;
final Provider<T> provider;
final Binding<String> originalBinding;
final TypeConverterBinding typeConverterBinding;
ConvertedConstantBindingImpl(
InjectorImpl injector,
Key<T> key,
T value,
Binding<String> originalBinding,
TypeConverterBinding typeConverterBinding) {
super(
injector,
key,
originalBinding.getSource(),
ConstantFactory.create(value, originalBinding.getSource()),
Scoping.UNSCOPED);
this.value = value;
provider = Providers.of(value);
this.originalBinding = originalBinding;
this.typeConverterBinding = typeConverterBinding;
}
@Override
public Provider<T> getProvider() {
return provider;
}
@Override
public <V> V acceptTargetVisitor(BindingTargetVisitor<? super T, V> visitor) {
return visitor.visit(this);
}
@Override
public T getValue() {
return value;
}
@Override
public TypeConverterBinding getTypeConverterBinding() {
return typeConverterBinding;
}
@Override
public Key<String> getSourceKey() {
return originalBinding.getKey();
}
@Override
public Set<Dependency<?>> getDependencies() {
return ImmutableSet.<Dependency<?>>of(Dependency.get(getSourceKey()));
}
@Override
public void applyTo(Binder binder) {
throw new UnsupportedOperationException("This element represents a synthetic binding.");
}
@Override
public String toString() {
return MoreObjects.toStringHelper(ConvertedConstantBinding.class)
.add("key", getKey())
.add("sourceKey", getSourceKey())
.add("value", value)
.toString();
}
@Override
public boolean equals(Object obj) {
if (obj instanceof ConvertedConstantBindingImpl) {
ConvertedConstantBindingImpl<?> o = (ConvertedConstantBindingImpl<?>) obj;
return getKey().equals(o.getKey())
&& getScoping().equals(o.getScoping())
&& Objects.equal(value, o.value);
} else {
return false;
}
}
@Override
public int hashCode() {
return Objects.hashCode(getKey(), getScoping(), value);
}
}
<T> void initializeBinding(BindingImpl<T> binding, Errors errors) throws ErrorsException {
if (binding instanceof DelayedInitialize) {
((DelayedInitialize) binding).initialize(this, errors);
}
}
/** For multibinding bindings that delegate to each other. */
<T> void initializeBindingIfDelayed(Binding<T> binding, Errors errors) throws ErrorsException {
if (binding instanceof InternalProviderInstanceBindingImpl
&& ((InternalProviderInstanceBindingImpl) binding).getInitializationTiming()
== InternalProviderInstanceBindingImpl.InitializationTiming.DELAYED) {
((DelayedInitialize) binding).initialize(this, errors);
}
}
private <T> void initializeJitBinding(BindingImpl<T> binding, Errors errors)
throws ErrorsException {
// Put the partially constructed binding in the map a little early. This enables us to handle
// circular dependencies. Example: FooImpl -> BarImpl -> FooImpl.
// Note: We don't need to synchronize on jitBindingData.lock() during injector creation.
if (binding instanceof DelayedInitialize) {
Key<T> key = binding.getKey();
jitBindingData.putJitBinding(key, binding);
boolean successful = false;
DelayedInitialize delayed = (DelayedInitialize) binding;
try {
delayed.initialize(this, errors);
successful = true;
} finally {
if (!successful) {
// We do not pass cb.getInternalConstructor as the second parameter
// so that cached exceptions while constructing it get stored.
// See TypeListenerTest#testTypeListenerThrows
removeFailedJitBinding(binding, null);
cleanup(binding, new HashSet<Key<?>>());
}
}
}
}
/**
* Iterates through the binding's dependencies to clean up any stray bindings that were leftover
* from a failed JIT binding. This is required because the bindings are eagerly and optimistically
* added to allow circular dependency support, so dependencies may pass where they should have
* failed.
*/
private boolean cleanup(BindingImpl<?> binding, Set<Key<?>> encountered) {
boolean bindingFailed = false;
Set<Dependency<?>> deps = getInternalDependencies(binding);
for (Dependency<?> dep : deps) {
Key<?> depKey = dep.getKey();
InjectionPoint ip = dep.getInjectionPoint();
if (encountered.add(depKey)) { // only check if we haven't looked at this key yet
var depBinding = jitBindingData.getJitBinding(depKey);
if (depBinding != null) { // if the binding still exists, validate
boolean failed = cleanup(depBinding, encountered); // if children fail, we fail
if (depBinding instanceof ConstructorBindingImpl) {
ConstructorBindingImpl<?> ctorBinding = (ConstructorBindingImpl<?>) depBinding;
ip = ctorBinding.getInternalConstructor();
if (!ctorBinding.isInitialized()) {
failed = true;
}
}
if (failed) {
removeFailedJitBinding(depBinding, ip);
bindingFailed = true;
}
} else if (bindingData.getExplicitBinding(depKey) == null) {
// ignore keys if they were explicitly bound, but if neither JIT
// nor explicit, it's also invalid & should let parent know.
bindingFailed = true;
}
}
}
return bindingFailed;
}
/** Cleans up any state that may have been cached when constructing the JIT binding. */
private void removeFailedJitBinding(Binding<?> binding, InjectionPoint ip) {
jitBindingData.addFailedJitBinding(binding.getKey());
// Be careful cleaning up constructors & jitBindings -- we can't remove
// from `jitBindings` if we're still in the process of loading this constructor,
// otherwise we can re-enter the constructor's cache and attempt to load it
// while already loading it. See issues:
// - https://github.com/google/guice/pull/1633
// - https://github.com/google/guice/issues/785
// - https://github.com/google/guice/pull/1389
// - https://github.com/google/guice/pull/1394
// (Note: there may be a better way to do this that avoids the need for the `isLoading`
// conditional, but due to the recursive nature of JIT loading and the way we allow partially
// initialized JIT bindings [to support circular dependencies], there's no other great way
// that I could figure out.)
if (ip == null || !constructors.isLoading(ip)) {
jitBindingData.removeJitBinding(binding.getKey());
}
if (ip != null && !constructors.isLoading(ip)) {
constructors.remove(ip);
}
membersInjectorStore.remove(binding.getKey().getTypeLiteral());
provisionListenerStore.remove(binding);
}
/** Safely gets the dependencies of possibly not initialized bindings. */
private Set<Dependency<?>> getInternalDependencies(BindingImpl<?> binding) {
if (binding instanceof ConstructorBindingImpl) {
return ((ConstructorBindingImpl<?>) binding).getInternalDependencies();
} else if (binding instanceof HasDependencies) {
return ((HasDependencies) binding).getDependencies();
} else {
return ImmutableSet.of();
}
}
/**
* Creates a binding for an injectable type with the given scope. Looks for a scope on the type if
* none is specified.
*/
<T> BindingImpl<T> createUninitializedBinding(
Key<T> key,
Scoping scoping,
Object source,
Errors errors,
boolean jitBinding,
Consumer<CreationListener> creationListenerCallback)
throws ErrorsException {
Class<?> rawType = key.getTypeLiteral().getRawType();
ImplementedBy implementedBy = rawType.getAnnotation(ImplementedBy.class);
// Don't try to inject arrays or enums annotated with @ImplementedBy.
if (rawType.isArray() || (rawType.isEnum() && implementedBy != null)) {
throw errors.missingImplementationWithHint(key, this).toException();
}
// Handle TypeLiteral<T> by binding the inner type
if (rawType == TypeLiteral.class) {
@SuppressWarnings("unchecked") // we have to fudge the inner type as Object
BindingImpl<T> binding =
(BindingImpl<T>) createTypeLiteralBinding((Key<TypeLiteral<Object>>) key, errors);
return binding;
}
// Handle @ImplementedBy
if (implementedBy != null) {
Annotations.checkForMisplacedScopeAnnotations(rawType, source, errors);
return createImplementedByBinding(
key, scoping, implementedBy, errors, creationListenerCallback);
}
// Handle @ProvidedBy.
ProvidedBy providedBy = rawType.getAnnotation(ProvidedBy.class);
if (providedBy != null) {
Annotations.checkForMisplacedScopeAnnotations(rawType, source, errors);
return createProvidedByBinding(key, scoping, providedBy, errors);
}
return ConstructorBindingImpl.create(
this,
key,
null, /* use default constructor */
source,
scoping,
errors,
jitBinding && options.jitDisabled,
options.atInjectRequired);
}
/**
* Converts a binding for a {@code Key<TypeLiteral<T>>} to the value {@code TypeLiteral<T>}. It's
* a bit awkward because we have to pull out the inner type in the type literal.
*/
private <T> BindingImpl<TypeLiteral<T>> createTypeLiteralBinding(
Key<TypeLiteral<T>> key, Errors errors) throws ErrorsException {
Type typeLiteralType = key.getTypeLiteral().getType();
if (!(typeLiteralType instanceof ParameterizedType)) {
throw errors.cannotInjectRawTypeLiteral().toException();
}
ParameterizedType parameterizedType = (ParameterizedType) typeLiteralType;
Type innerType = parameterizedType.getActualTypeArguments()[0];
// this is unforunate. We don't support building TypeLiterals for type variable like 'T'. If
// this proves problematic, we can probably fix TypeLiteral to support type variables
if (!(innerType instanceof Class)
&& !(innerType instanceof GenericArrayType)
&& !(innerType instanceof ParameterizedType)) {
throw errors.cannotInjectTypeLiteralOf(innerType).toException();
}
@SuppressWarnings("unchecked") // by definition, innerType == T, so this is safe
TypeLiteral<T> value = (TypeLiteral<T>) TypeLiteral.get(innerType);
InternalFactory<TypeLiteral<T>> factory =
ConstantFactory.create(value, SourceProvider.UNKNOWN_SOURCE);
return new InstanceBindingImpl<TypeLiteral<T>>(
this,
key,
SourceProvider.UNKNOWN_SOURCE,
factory,
ImmutableSet.<InjectionPoint>of(),
value);
}
/** Creates a binding for a type annotated with @ProvidedBy. */
<T> BindingImpl<T> createProvidedByBinding(
Key<T> key, Scoping scoping, ProvidedBy providedBy, Errors errors) throws ErrorsException {
Class<?> rawType = key.getTypeLiteral().getRawType();
Class<? extends jakarta.inject.Provider<?>> providerType = providedBy.value();
// Make sure it's not the same type. TODO: Can we check for deeper loops?
if (providerType == rawType) {
throw errors.recursiveProviderType().toException();
}
// if no scope is specified, look for a scoping annotation on the raw type
if (!scoping.isExplicitlyScoped()) {
int numErrorsBefore = errors.size();
Class<? extends Annotation> scopeAnnotation = findScopeAnnotation(errors, rawType);
if (scopeAnnotation != null) {
scoping =
Scoping.makeInjectable(
Scoping.forAnnotation(scopeAnnotation), this, errors.withSource(rawType));
}
errors.throwIfNewErrors(numErrorsBefore);
}
// Assume the provider provides an appropriate type. We double check at runtime.
@SuppressWarnings("unchecked")
Key<? extends Provider<T>> providerKey = (Key<? extends Provider<T>>) Key.get(providerType);
ProvidedByInternalFactory<T> internalFactory =
new ProvidedByInternalFactory<T>(
rawType, providerType, providerKey, circularFactoryIdFactory.next());
Object source = rawType;
BindingImpl<T> binding =
LinkedProviderBindingImpl.createWithInitializer(
this,
key,
source,
Scoping.<T>scope(key, this, internalFactory, source, scoping),
scoping,
providerKey,
internalFactory);
internalFactory.setProvisionListenerCallback(provisionListenerStore.get(binding));
return binding;
}
/** Creates a binding for a type annotated with @ImplementedBy. */
private <T> BindingImpl<T> createImplementedByBinding(
Key<T> key,
Scoping scoping,
ImplementedBy implementedBy,
Errors errors,
Consumer<CreationListener> creationListenerCallback)
throws ErrorsException {
Class<?> rawType = key.getTypeLiteral().getRawType();
Class<?> implementationType = implementedBy.value();
// Make sure it's not the same type. TODO: Can we check for deeper cycles?
if (implementationType == rawType) {
throw errors.recursiveImplementationType().toException();
}
// Make sure implementationType extends type.
if (!rawType.isAssignableFrom(implementationType)) {
throw errors.notASubtype(implementationType, rawType).toException();
}
@SuppressWarnings("unchecked") // After the preceding check, this cast is safe.
Class<? extends T> subclass = (Class<? extends T>) implementationType;
// Look up the target binding.
final Key<? extends T> targetKey = Key.get(subclass);
Object source = rawType;
FactoryProxy<T> factory = new FactoryProxy<>(this, key, targetKey, source);
// Notify any callbacks that we have a new CreationListener that needs to be notified.
creationListenerCallback.accept(factory);
return new LinkedBindingImpl<T>(
this,
key,
source,
Scoping.<T>scope(key, this, factory, source, scoping),
scoping,
targetKey);
}
/**
* Attempts to create a just-in-time binding for {@code key} in the root injector, falling back to
* other ancestor injectors until this injector is tried.
*/
private <T> BindingImpl<T> createJustInTimeBindingRecursive(
Key<T> key, Errors errors, boolean jitDisabled, JitLimitation jitType)
throws ErrorsException {
// ask the parent to create the JIT binding
if (parent != null) {
if (jitType == JitLimitation.NEW_OR_EXISTING_JIT
&& jitDisabled
&& !parent.options.jitDisabled) {
// If the binding would be forbidden here but allowed in a parent, report an error instead
throw errors.jitDisabledInParent(key).toException();
}
try {
return parent.createJustInTimeBindingRecursive(
key,
new Errors(),
jitDisabled,
parent.options.jitDisabled ? JitLimitation.NO_JIT : jitType);
} catch (ErrorsException ignored) {
// If JIT binding creation failed in parent injector(s), create the JIT binding in this
// injector instead.
}
}
// Retrieve the sources before checking for banned key to guard against sources becoming null
// due to a full GC happening after calling jitBindingData.isBanned and
// state.getSourcesForBannedKey.
// TODO(user): Consolidate these two APIs.
Set<Object> sources = jitBindingData.getSourcesForBannedKey(key);
if (jitBindingData.isBannedKey(key)) {
throw errors.childBindingAlreadySet(key, sources).toException();
}
key = MoreTypes.canonicalizeKey(key); // before storing the key long-term, canonicalize it.
BindingImpl<T> binding = createJustInTimeBinding(key, errors, jitDisabled, jitType);
jitBindingData.banKeyInParent(key, bindingData, binding.getSource());
jitBindingData.putJitBinding(key, binding);
return binding;
}
/**
* Returns a new just-in-time binding created by resolving {@code key}. The strategies used to
* create just-in-time bindings are:
*
* <ol>
* <li>Internalizing Providers. If the requested binding is for {@code Provider<T>}, we delegate
* to the binding for {@code T}.
* <li>Converting constants.
* <li>ImplementedBy and ProvidedBy annotations. Only for unannotated keys.
* <li>The constructor of the raw type. Only for unannotated keys.
* </ol>
*
* @throws com.google.inject.internal.ErrorsException if the binding cannot be created.
*/
private <T> BindingImpl<T> createJustInTimeBinding(
Key<T> key, Errors errors, boolean jitDisabled, JitLimitation jitType)
throws ErrorsException {
int numErrorsBefore = errors.size();
// Retrieve the sources before checking for a banned key to guard against sources becoming null
// due to a full GC happening after calling jitBindingData.isBanned and
// jitBindingData.getSourcesForBannedKey.
// TODO(user): Consolidate these two APIs.
Set<Object> sources = jitBindingData.getSourcesForBannedKey(key);
if (jitBindingData.isBannedKey(key)) {
throw errors.childBindingAlreadySet(key, sources).toException();
}
// Handle cases where T is a Provider<?>.
if (isProvider(key)) {
// These casts are safe. We know T extends Provider<X> and that given Key<Provider<X>>,
// createSyntheticProviderBinding() will return BindingImpl<Provider<X>>.
@SuppressWarnings("unchecked")
BindingImpl<T> binding = (BindingImpl<T>) createSyntheticProviderBinding((Key) key, errors);
return binding;
}
// Handle cases where T is a MembersInjector<?>
if (isMembersInjector(key)) {
// These casts are safe. T extends MembersInjector<X> and that given Key<MembersInjector<X>>,
// createMembersInjectorBinding() will return BindingImpl<MembersInjector<X>>.
@SuppressWarnings("unchecked")
BindingImpl<T> binding = (BindingImpl<T>) createMembersInjectorBinding((Key) key, errors);
return binding;
}
// Try to convert a constant string binding to the requested type.
BindingImpl<T> convertedBinding = convertConstantStringBinding(key, errors);
if (convertedBinding != null) {
return convertedBinding;
}
if (!isTypeLiteral(key) && jitDisabled && jitType != JitLimitation.NEW_OR_EXISTING_JIT) {
throw errors.jitDisabled(key).toException();
}
// If the key has an annotation...
if (key.getAnnotationType() != null) {
// Look for a binding without annotation attributes or return null.
if (key.hasAttributes() && !options.exactBindingAnnotationsRequired) {
try {
Errors ignored = new Errors();
return getBindingOrThrow(key.withoutAttributes(), ignored, JitLimitation.NO_JIT);
} catch (ErrorsException ignored) {
// throw with a more appropriate message below
}
}
throw errors.missingImplementationWithHint(key, this).toException();
}
Object source = key.getTypeLiteral().getRawType();
// Notify the creationListener right away, because we're going to recursively create JIT
// bindings on-demand.
BindingImpl<T> binding =
createUninitializedBinding(
key,
Scoping.UNSCOPED,
source,
errors,
true,
creationListener -> creationListener.notify(errors));
errors.throwIfNewErrors(numErrorsBefore);
initializeJitBinding(binding, errors);
return binding;
}
<T> InternalFactory<? extends T> getInternalFactory(
Key<T> key, Errors errors, JitLimitation jitType) throws ErrorsException {
return getBindingOrThrow(key, errors, jitType).getInternalFactory();
}
@Override
public Map<Key<?>, Binding<?>> getBindings() {
return bindingData.getExplicitBindingsThisLevel();
}
@Override
public Map<Key<?>, Binding<?>> getAllBindings() {
synchronized (jitBindingData.lock()) {
return new ImmutableMap.Builder<Key<?>, Binding<?>>()
.putAll(bindingData.getExplicitBindingsThisLevel())
.putAll(jitBindingData.getJitBindings())
.buildOrThrow();
}
}
@Override
public Map<Class<? extends Annotation>, Scope> getScopeBindings() {
return ImmutableMap.copyOf(bindingData.getScopes());
}
@Override
public Set<TypeConverterBinding> getTypeConverterBindings() {
return ImmutableSet.copyOf(bindingData.getConvertersThisLevel());
}
@Override
public List<Element> getElements() {
ImmutableList.Builder<Element> elements = ImmutableList.builder();
elements.addAll(getAllBindings().values());
elements.addAll(bindingData.getProviderLookupsThisLevel());
elements.addAll(bindingData.getConvertersThisLevel());
elements.addAll(bindingData.getScopeBindingsThisLevel());
elements.addAll(bindingData.getTypeListenerBindingsThisLevel());
elements.addAll(bindingData.getProvisionListenerBindingsThisLevel());
elements.addAll(bindingData.getScannerBindingsThisLevel());
elements.addAll(bindingData.getStaticInjectionRequestsThisLevel());
elements.addAll(bindingData.getMembersInjectorLookupsThisLevel());
elements.addAll(bindingData.getInjectionRequestsThisLevel());
elements.addAll(bindingData.getInterceptorBindingsThisLevel());
return elements.build();
}
@Override
public Map<TypeLiteral<?>, List<InjectionPoint>> getAllMembersInjectorInjectionPoints() {
// Note, this is a safe cast per the ListMultimap javadocs.
// We could use Multimaps.asMap to avoid the cast, but unfortunately it's a @Beta method.
@SuppressWarnings("unchecked")
Map<TypeLiteral<?>, List<InjectionPoint>> res =
(Map<TypeLiteral<?>, List<InjectionPoint>>)
(Map<TypeLiteral<?>, ?>)
ImmutableListMultimap.copyOf(
Multimaps.filterKeys(
membersInjectorStore.getAllInjectionPoints(),
userRequestedMembersInjectorTypes::contains))
.asMap();
return res;
}
/** Returns parameter injectors, or {@code null} if there are no parameters. */
SingleParameterInjector<?>[] getParametersInjectors(List<Dependency<?>> parameters, Errors errors)
throws ErrorsException {
if (parameters.isEmpty()) {
return null;
}
int numErrorsBefore = errors.size();
SingleParameterInjector<?>[] result = new SingleParameterInjector<?>[parameters.size()];
int i = 0;
for (Dependency<?> parameter : parameters) {
try {
result[i++] = createParameterInjector(parameter, errors.withSource(parameter));
} catch (ErrorsException rethrownBelow) {
// rethrown below
}
}
errors.throwIfNewErrors(numErrorsBefore);
return result;
}
<T> SingleParameterInjector<T> createParameterInjector(
final Dependency<T> dependency, final Errors errors) throws ErrorsException {
BindingImpl<? extends T> binding =
getBindingOrThrow(dependency.getKey(), errors, JitLimitation.NO_JIT);
return new SingleParameterInjector<T>(dependency, binding);
}
/** Cached constructor injectors for each type */
final ConstructorInjectorStore constructors = new ConstructorInjectorStore(this);
/** Cached field and method injectors for each type. */
MembersInjectorStore membersInjectorStore;
/** Cached provision listener callbacks for each key. */
ProvisionListenerCallbackStore provisionListenerStore;
@Override
@SuppressWarnings({
"unchecked",
"rawtypes"
}) // the members injector type is consistent with instance's type
public void injectMembers(Object instance) {
MembersInjector membersInjector = getMembersInjector(instance.getClass());
membersInjector.injectMembers(instance);
}
@Override
public <T> MembersInjector<T> getMembersInjector(TypeLiteral<T> typeLiteral) {
checkNotNull(typeLiteral, "typeLiteral");
userRequestedMembersInjectorTypes.add(typeLiteral);
Errors errors = new Errors(typeLiteral);
try {
return membersInjectorStore.get(typeLiteral, errors);
} catch (ErrorsException e) {
ConfigurationException exception =
new ConfigurationException(errors.merge(e.getErrors()).getMessages());
throw exception;
}
}
@Override
public <T> MembersInjector<T> getMembersInjector(Class<T> type) {
return getMembersInjector(TypeLiteral.get(type));
}
@Override
public <T> Provider<T> getProvider(Class<T> type) {
return getProvider(Key.get(checkNotNull(type, "type")));
}
@Override
public <T> Provider<T> getProvider(final Key<T> key) {
checkNotNull(key, "key");
Errors errors = new Errors(key);
try {
// Access off the BindingImpl to leverage the cached provider.
Provider<T> result = getBindingOrThrow(key, errors, JitLimitation.NO_JIT).getProvider();
errors.throwIfNewErrors(0);
return result;
} catch (ErrorsException e) {
ConfigurationException exception =
new ConfigurationException(errors.merge(e.getErrors()).getMessages());
throw exception;
}
}
// A special implementation for BindingImpl to break a recursive dependency with getProvider so
// that getProvider can leverage the cache inside BindingImpl
<T> Provider<T> getProviderForBindingImpl(Key<T> key) {
Errors errors = new Errors(key);
try {
return getProviderOrThrow(Dependency.get(key), errors);
} catch (ErrorsException e) {
ConfigurationException exception =
new ConfigurationException(errors.merge(e.getErrors()).getMessages());
throw exception;
}
}
// Used by LookupProcessor to satisfy delegates.
<T> Provider<T> getProviderOrThrow(Dependency<T> dependency, Errors errors)
throws ErrorsException {
var key = dependency.getKey();
BindingImpl<T> binding = getBindingOrThrow(key, errors, JitLimitation.NO_JIT);
@SuppressWarnings("unchecked") // safe because Providers are covariant.
Provider<T> provider =
(Provider<T>) binding.getInternalFactory().makeProvider(this, dependency);
return provider;
}
@Override
public <T> T getInstance(Key<T> key) {
return getProvider(key).get();
}
@Override
public <T> T getInstance(Class<T> type) {
return getProvider(type).get();
}
/**
* Holds Object[] as a mutable wrapper, rather than InternalContext, since array operations are
* faster than ThreadLocal.set() / .get() operations.
*
* <p>Holds Object[] rather than InternalContext[], since localContext never gets cleaned up at
* any point. This could lead to problems when, for example, an OSGI application is reloaded, the
* InjectorImpl is destroyed, but the thread that the injector runs on is kept alive. In such a
* case, ThreadLocal itself would hold on to a reference to localContext, which would hold on to
* the old InternalContext. | ConvertedConstantBindingImpl |
java | apache__kafka | storage/src/main/java/org/apache/kafka/storage/internals/log/LogSegment.java | {
"start": 8232,
"end": 40860
} | class ____ not thread-safe so basically we assume that
// methods are called within UnifiedLog#lock.
// However, there's exceptional paths where this method can be called outside of the lock,
// so we need lock here to prevent multiple threads trying to modify maxTimestampAndOffsetSoFar
synchronized (maxTimestampAndOffsetLock) {
if (maxTimestampAndOffsetSoFar == TimestampOffset.UNKNOWN) {
maxTimestampAndOffsetSoFar = timeIndex().lastEntry();
}
}
}
return maxTimestampAndOffsetSoFar;
}
/**
* The maximum timestamp we see so far.
*
* Note that this may result in time index materialization.
*/
public long maxTimestampSoFar() throws IOException {
return readMaxTimestampAndOffsetSoFar().timestamp();
}
/**
* Note that this may result in time index materialization.
*/
private long shallowOffsetOfMaxTimestampSoFar() throws IOException {
return readMaxTimestampAndOffsetSoFar().offset();
}
/* Return the size in bytes of this log segment */
public int size() {
return log.sizeInBytes();
}
/**
* checks that the argument offset can be represented as an integer offset relative to the baseOffset.
*/
private boolean canConvertToRelativeOffset(long offset) throws IOException {
return offsetIndex().canAppendOffset(offset);
}
/**
* Append the given messages starting with the given offset. Add
* an entry to the index if needed.
*
* It is assumed this method is being called from within a lock, it is not thread-safe otherwise.
*
* @param largestOffset The last offset in the message set
* @param records The log entries to append.
* @throws LogSegmentOffsetOverflowException if the largest offset causes index offset overflow
*/
public void append(long largestOffset,
MemoryRecords records) throws IOException {
if (records.sizeInBytes() > 0) {
LOGGER.trace("Inserting {} bytes at end offset {} at position {}",
records.sizeInBytes(), largestOffset, log.sizeInBytes());
int physicalPosition = log.sizeInBytes();
ensureOffsetInRange(largestOffset);
// append the messages
long appendedBytes = log.append(records);
LOGGER.trace("Appended {} to {} at end offset {}", appendedBytes, log.file(), largestOffset);
for (RecordBatch batch : records.batches()) {
long batchMaxTimestamp = batch.maxTimestamp();
long batchLastOffset = batch.lastOffset();
if (batchMaxTimestamp > maxTimestampSoFar()) {
maxTimestampAndOffsetSoFar = new TimestampOffset(batchMaxTimestamp, batchLastOffset);
}
if (bytesSinceLastIndexEntry > indexIntervalBytes) {
offsetIndex().append(batchLastOffset, physicalPosition);
timeIndex().maybeAppend(maxTimestampSoFar(), shallowOffsetOfMaxTimestampSoFar());
bytesSinceLastIndexEntry = 0;
}
var sizeInBytes = batch.sizeInBytes();
physicalPosition += sizeInBytes;
bytesSinceLastIndexEntry += sizeInBytes;
}
}
}
private void ensureOffsetInRange(long offset) throws IOException {
if (!canConvertToRelativeOffset(offset))
throw new LogSegmentOffsetOverflowException(this, offset);
}
private int appendChunkFromFile(FileRecords records, int position, BufferSupplier bufferSupplier) throws IOException {
int bytesToAppend = 0;
long maxOffset = Long.MIN_VALUE;
ByteBuffer readBuffer = bufferSupplier.get(1024 * 1024);
// find all batches that are valid to be appended to the current log segment and
// determine the maximum offset and timestamp
Iterator<FileChannelRecordBatch> nextBatches = records.batchesFrom(position).iterator();
FileChannelRecordBatch batch;
while ((batch = nextAppendableBatch(nextBatches, readBuffer, bytesToAppend)) != null) {
maxOffset = batch.lastOffset();
bytesToAppend += batch.sizeInBytes();
}
if (bytesToAppend > 0) {
// Grow buffer if needed to ensure we copy at least one batch
if (readBuffer.capacity() < bytesToAppend)
readBuffer = bufferSupplier.get(bytesToAppend);
readBuffer.limit(bytesToAppend);
records.readInto(readBuffer, position);
append(maxOffset, MemoryRecords.readableRecords(readBuffer));
}
bufferSupplier.release(readBuffer);
return bytesToAppend;
}
private FileChannelRecordBatch nextAppendableBatch(Iterator<FileChannelRecordBatch> recordBatches,
ByteBuffer readBuffer,
int bytesToAppend) throws IOException {
if (recordBatches.hasNext()) {
FileChannelRecordBatch batch = recordBatches.next();
if (canConvertToRelativeOffset(batch.lastOffset()) &&
(bytesToAppend == 0 || bytesToAppend + batch.sizeInBytes() < readBuffer.capacity()))
return batch;
}
return null;
}
/**
* Append records from a file beginning at the given position until either the end of the file
* is reached or an offset is found which is too large to convert to a relative offset for the indexes.
*
* @return the number of bytes appended to the log (may be less than the size of the input if an
* offset is encountered which would overflow this segment)
*/
public int appendFromFile(FileRecords records, int start) throws IOException {
int position = start;
BufferSupplier bufferSupplier = new BufferSupplier.GrowableBufferSupplier();
while (position < start + records.sizeInBytes()) {
int bytesAppended = appendChunkFromFile(records, position, bufferSupplier);
if (bytesAppended == 0)
return position - start;
position += bytesAppended;
}
return position - start;
}
/* not thread safe */
public void updateTxnIndex(CompletedTxn completedTxn, long lastStableOffset) throws IOException {
if (completedTxn.isAborted()) {
LOGGER.trace("Writing aborted transaction {} to transaction index, last stable offset is {}", completedTxn, lastStableOffset);
txnIndex.append(new AbortedTxn(completedTxn, lastStableOffset));
}
}
private void updateProducerState(ProducerStateManager producerStateManager, RecordBatch batch) throws IOException {
if (batch.hasProducerId()) {
long producerId = batch.producerId();
ProducerAppendInfo appendInfo = producerStateManager.prepareUpdate(producerId, AppendOrigin.REPLICATION);
Optional<CompletedTxn> maybeCompletedTxn = appendInfo.append(batch, Optional.empty());
producerStateManager.update(appendInfo);
if (maybeCompletedTxn.isPresent()) {
CompletedTxn completedTxn = maybeCompletedTxn.get();
long lastStableOffset = producerStateManager.lastStableOffset(completedTxn);
updateTxnIndex(completedTxn, lastStableOffset);
producerStateManager.completeTxn(completedTxn);
}
}
producerStateManager.updateMapEndOffset(batch.lastOffset() + 1);
}
/**
* Equivalent to {@code translateOffset(offset, 0)}.
*
* See {@link #translateOffset(long, int)} for details.
*/
public LogOffsetPosition translateOffset(long offset) throws IOException {
return translateOffset(offset, 0);
}
/**
* Find the physical file position for the message batch that contains the requested offset.
*
* The startingFilePosition argument is an optimization that can be used if we already know a valid starting position
* in the file higher than the greatest-lower-bound from the index.
*
* This method is thread-safe.
*
* @param offset The offset we want to translate
* @param startingFilePosition A lower bound on the file position from which to begin the search. This is purely an optimization and
* when omitted, the search will begin at the position in the offset index.
* @return The base offset, position in the log, and size of the message batch that contains the requested offset,
* or null if no such batch is found.
*/
LogOffsetPosition translateOffset(long offset, int startingFilePosition) throws IOException {
OffsetPosition mapping = offsetIndex().lookup(offset);
return log.searchForOffsetFromPosition(offset, Math.max(mapping.position(), startingFilePosition));
}
/**
* Equivalent to {@code read(startOffset, maxSize, size())}.
*
* See {@link #read(long, int, Optional, boolean)} for details.
*/
public FetchDataInfo read(long startOffset, int maxSize) throws IOException {
return read(startOffset, maxSize, size());
}
/**
* Equivalent to {@code read(startOffset, maxSize, maxPosition, false)}.
*
* See {@link #read(long, int, Optional, boolean)} for details.
*/
public FetchDataInfo read(long startOffset, int maxSize, long maxPosition) throws IOException {
return read(startOffset, maxSize, Optional.of(maxPosition), false);
}
/**
* Read a message set from this segment that contains startOffset. The message set will include
* no more than maxSize bytes and will end before maxOffset if a maxOffset is specified.
*
* This method is thread-safe.
*
* @param startOffset The logical log offset we are trying to read
* @param maxSize The maximum number of bytes to include in the message set we read
* @param maxPositionOpt The maximum position in the log segment that should be exposed for read
* @param minOneMessage If this is true, the first message will be returned even if it exceeds `maxSize` (if one exists)
*
* @return The fetched data and the base offset metadata of the message batch that contains startOffset,
* or null if the startOffset is larger than the largest offset in this log
*/
public FetchDataInfo read(long startOffset, int maxSize, Optional<Long> maxPositionOpt, boolean minOneMessage) throws IOException {
if (maxSize < 0)
throw new IllegalArgumentException("Invalid max size " + maxSize + " for log read from segment " + log);
LogOffsetPosition startOffsetAndSize = translateOffset(startOffset);
// if the start position is already off the end of the log, return null
if (startOffsetAndSize == null)
return null;
int startPosition = startOffsetAndSize.position;
LogOffsetMetadata offsetMetadata = new LogOffsetMetadata(startOffsetAndSize.offset, this.baseOffset, startPosition);
int adjustedMaxSize = maxSize;
if (minOneMessage)
adjustedMaxSize = Math.max(maxSize, startOffsetAndSize.size);
// return empty records in the fetch-data-info when:
// 1. adjustedMaxSize is 0 (or)
// 2. maxPosition to read is unavailable
if (adjustedMaxSize == 0 || maxPositionOpt.isEmpty())
return new FetchDataInfo(offsetMetadata, MemoryRecords.EMPTY);
// calculate the length of the message set to read based on whether or not they gave us a maxOffset
int fetchSize = Math.min((int) (maxPositionOpt.get() - startPosition), adjustedMaxSize);
return new FetchDataInfo(offsetMetadata, log.slice(startPosition, fetchSize),
adjustedMaxSize < startOffsetAndSize.size, Optional.empty());
}
public OptionalLong fetchUpperBoundOffset(OffsetPosition startOffsetPosition, int fetchSize) throws IOException {
return offsetIndex().fetchUpperBoundOffset(startOffsetPosition, fetchSize)
.map(offsetPosition -> OptionalLong.of(offsetPosition.offset())).orElseGet(OptionalLong::empty);
}
/**
* Run recovery on the given segment. This will rebuild the index from the log file and lop off any invalid bytes
* from the end of the log and index.
*
* This method is not thread-safe.
*
* @param producerStateManager Producer state corresponding to the segment's base offset. This is needed to recover
* the transaction index.
* @param leaderEpochCache a cache for updating the leader epoch during recovery.
* @return The number of bytes truncated from the log
* @throws LogSegmentOffsetOverflowException if the log segment contains an offset that causes the index offset to overflow
*/
public int recover(ProducerStateManager producerStateManager, LeaderEpochFileCache leaderEpochCache) throws IOException {
offsetIndex().reset();
timeIndex().reset();
txnIndex.reset();
int validBytes = 0;
int lastIndexEntry = 0;
maxTimestampAndOffsetSoFar = TimestampOffset.UNKNOWN;
try {
for (RecordBatch batch : log.batches()) {
batch.ensureValid();
ensureOffsetInRange(batch.lastOffset());
// The max timestamp is exposed at the batch level, so no need to iterate the records
if (batch.maxTimestamp() > maxTimestampSoFar()) {
maxTimestampAndOffsetSoFar = new TimestampOffset(batch.maxTimestamp(), batch.lastOffset());
}
// Build offset index
if (validBytes - lastIndexEntry > indexIntervalBytes) {
offsetIndex().append(batch.lastOffset(), validBytes);
timeIndex().maybeAppend(maxTimestampSoFar(), shallowOffsetOfMaxTimestampSoFar());
lastIndexEntry = validBytes;
}
validBytes += batch.sizeInBytes();
if (batch.magic() >= RecordBatch.MAGIC_VALUE_V2) {
if (batch.partitionLeaderEpoch() >= 0 &&
(leaderEpochCache.latestEpoch().isEmpty() || batch.partitionLeaderEpoch() > leaderEpochCache.latestEpoch().get()))
leaderEpochCache.assign(batch.partitionLeaderEpoch(), batch.baseOffset());
updateProducerState(producerStateManager, batch);
}
}
} catch (CorruptRecordException | InvalidRecordException e) {
LOGGER.warn("Found invalid messages in log segment {} at byte offset {}.", log.file().getAbsolutePath(),
validBytes, e);
}
int truncated = log.sizeInBytes() - validBytes;
if (truncated > 0)
LOGGER.debug("Truncated {} invalid bytes at the end of segment {} during recovery", truncated, log.file().getAbsolutePath());
log.truncateTo(validBytes);
offsetIndex().trimToValidSize();
// A normally closed segment always appends the biggest timestamp ever seen into log segment, we do this as well.
timeIndex().maybeAppend(maxTimestampSoFar(), shallowOffsetOfMaxTimestampSoFar(), true);
timeIndex().trimToValidSize();
return truncated;
}
/**
* Check whether the last offset of the last batch in this segment overflows the indexes.
*/
public boolean hasOverflow() throws IOException {
long nextOffset = readNextOffset();
return nextOffset > baseOffset && !canConvertToRelativeOffset(nextOffset - 1);
}
public TxnIndexSearchResult collectAbortedTxns(long fetchOffset, long upperBoundOffset) {
return txnIndex.collectAbortedTxns(fetchOffset, upperBoundOffset);
}
@Override
public String toString() {
// We don't call `largestRecordTimestamp` below to avoid materializing the time index when `toString` is invoked
return "LogSegment(baseOffset=" + baseOffset +
", size=" + size() +
", lastModifiedTime=" + lastModified() +
", largestRecordTimestamp=" + maxTimestampAndOffsetSoFar.timestamp() +
")";
}
/**
* Truncate off all index and log entries with offsets >= the given offset.
* If the given offset is larger than the largest message in this segment, do nothing.
*
* This method is not thread-safe.
*
* @param offset The offset to truncate to
* @return The number of log bytes truncated
*/
public int truncateTo(long offset) throws IOException {
// Do offset translation before truncating the index to avoid needless scanning
// in case we truncate the full index
LogOffsetPosition mapping = translateOffset(offset);
OffsetIndex offsetIndex = offsetIndex();
TimeIndex timeIndex = timeIndex();
offsetIndex.truncateTo(offset);
timeIndex.truncateTo(offset);
txnIndex.truncateTo(offset);
// After truncation, reset and allocate more space for the (new currently active) index
offsetIndex.resize(offsetIndex.maxIndexSize());
timeIndex.resize(timeIndex.maxIndexSize());
int bytesTruncated;
if (mapping == null)
bytesTruncated = 0;
else
bytesTruncated = log.truncateTo(mapping.position);
if (log.sizeInBytes() == 0) {
created = time.milliseconds();
rollingBasedTimestamp = OptionalLong.empty();
}
bytesSinceLastIndexEntry = 0;
if (maxTimestampSoFar() >= 0)
maxTimestampAndOffsetSoFar = readLargestTimestamp();
return bytesTruncated;
}
private TimestampOffset readLargestTimestamp() throws IOException {
// Get the last time index entry. If the time index is empty, it will return (-1, baseOffset)
TimestampOffset lastTimeIndexEntry = timeIndex().lastEntry();
OffsetPosition offsetPosition = offsetIndex().lookup(lastTimeIndexEntry.offset());
// Scan the rest of the messages to see if there is a larger timestamp after the last time index entry.
FileRecords.TimestampAndOffset maxTimestampOffsetAfterLastEntry = log.largestTimestampAfter(offsetPosition.position());
if (maxTimestampOffsetAfterLastEntry.timestamp > lastTimeIndexEntry.timestamp())
return new TimestampOffset(maxTimestampOffsetAfterLastEntry.timestamp, maxTimestampOffsetAfterLastEntry.offset);
return lastTimeIndexEntry;
}
/**
* Calculate the offset that would be used for the next message to be append to this segment.
* Note that this is expensive.
*
* This method is thread-safe.
*/
public long readNextOffset() throws IOException {
FetchDataInfo fetchData = read(offsetIndex().lastOffset(), log.sizeInBytes());
if (fetchData == null)
return baseOffset;
else
return fetchData.records.lastBatch()
.map(RecordBatch::nextOffset)
.orElse(baseOffset);
}
/**
* Flush this log segment to disk.
*
* This method is thread-safe.
*/
public void flush() throws IOException {
try {
LOG_FLUSH_TIMER.time(new Callable<Void>() {
// lambdas cannot declare a more specific exception type, so we use an anonymous inner class
@Override
public Void call() throws IOException {
log.flush();
offsetIndex().flush();
timeIndex().flush();
txnIndex.flush();
return null;
}
});
} catch (Exception e) {
if (e instanceof IOException)
throw (IOException) e;
else if (e instanceof RuntimeException)
throw (RuntimeException) e;
else
throw new IllegalStateException("Unexpected exception thrown: " + e, e);
}
}
/**
* Update the directory reference for the log and indices in this segment. This would typically be called after a
* directory is renamed.
*/
void updateParentDir(File dir) {
log.updateParentDir(dir);
lazyOffsetIndex.updateParentDir(dir);
lazyTimeIndex.updateParentDir(dir);
txnIndex.updateParentDir(dir);
}
/**
* Change the suffix for the index and log files for this log segment
* IOException from this method should be handled by the caller
*/
public void changeFileSuffixes(String oldSuffix, String newSuffix) throws IOException {
log.renameTo(new File(Utils.replaceSuffix(log.file().getPath(), oldSuffix, newSuffix)));
lazyOffsetIndex.renameTo(new File(Utils.replaceSuffix(offsetIndexFile().getPath(), oldSuffix, newSuffix)));
lazyTimeIndex.renameTo(new File(Utils.replaceSuffix(timeIndexFile().getPath(), oldSuffix, newSuffix)));
txnIndex.renameTo(new File(Utils.replaceSuffix(txnIndex.file().getPath(), oldSuffix, newSuffix)));
}
public boolean hasSuffix(String suffix) {
return log.file().getName().endsWith(suffix) &&
offsetIndexFile().getName().endsWith(suffix) &&
timeIndexFile().getName().endsWith(suffix) &&
txnIndex.file().getName().endsWith(suffix);
}
/**
* Append the largest time index entry to the time index and trim the log and indexes.
*
* The time index entry appended will be used to decide when to delete the segment.
*/
public void onBecomeInactiveSegment() throws IOException {
timeIndex().maybeAppend(maxTimestampSoFar(), shallowOffsetOfMaxTimestampSoFar(), true);
offsetIndex().trimToValidSize();
timeIndex().trimToValidSize();
log.trim();
}
/**
* If not previously loaded,
* load the timestamp of the first message into memory.
*/
private void loadFirstBatchTimestamp() {
if (rollingBasedTimestamp.isEmpty()) {
Iterator<FileChannelRecordBatch> iter = log.batches().iterator();
if (iter.hasNext())
rollingBasedTimestamp = OptionalLong.of(iter.next().maxTimestamp());
}
}
/**
* The time this segment has waited to be rolled.
* If the first message batch has a timestamp we use its timestamp to determine when to roll a segment. A segment
* is rolled if the difference between the new batch's timestamp and the first batch's timestamp exceeds the
* segment rolling time.
* If the first batch does not have a timestamp, we use the wall clock time to determine when to roll a segment. A
* segment is rolled if the difference between the current wall clock time and the segment create time exceeds the
* segment rolling time.
*/
public long timeWaitedForRoll(long now, long messageTimestamp) {
// Load the timestamp of the first message into memory
loadFirstBatchTimestamp();
long ts = rollingBasedTimestamp.orElse(-1L);
if (ts >= 0)
return messageTimestamp - ts;
return now - created;
}
/**
* @return the first batch timestamp if the timestamp is available. Otherwise, return Long.MaxValue
*/
public long getFirstBatchTimestamp() {
loadFirstBatchTimestamp();
OptionalLong timestamp = rollingBasedTimestamp;
if (timestamp.isPresent() && timestamp.getAsLong() >= 0)
return timestamp.getAsLong();
return Long.MAX_VALUE;
}
/**
* Search the message offset based on timestamp and offset.
*
* This method returns an option of TimestampOffset. The returned value is determined using the following ordered list of rules:
*
* - If all the messages in the segment have smaller offsets, return Empty
* - If all the messages in the segment have smaller timestamps, return Empty
* - If all the messages in the segment have larger timestamps, or no message in the segment has a timestamp
* the returned the offset will be max(the base offset of the segment, startingOffset) and the timestamp will be Message.NoTimestamp.
* - Otherwise, return an option of TimestampOffset. The offset is the offset of the first message whose timestamp
* is greater than or equals to the target timestamp and whose offset is greater than or equals to the startingOffset.
*
* This method only returns Empty when 1) all messages' offset < startOffing or 2) the log is not empty, but we did not
* see any message when scanning the log from the indexed position. The latter could happen if the log is truncated
* after we get the indexed position but before we scan the log from there. In this case we simply return Empty and the
* caller will need to check on the truncated log and maybe retry or even do the search on another log segment.
*
* @param timestampMs The timestamp to search for.
* @param startingOffset The starting offset to search.
* @return the timestamp and offset of the first message that meets the requirements. Empty will be returned if there is no such message.
*/
public Optional<FileRecords.TimestampAndOffset> findOffsetByTimestamp(long timestampMs, long startingOffset) throws IOException {
// Get the index entry with a timestamp less than or equal to the target timestamp
TimestampOffset timestampOffset = timeIndex().lookup(timestampMs);
int position = offsetIndex().lookup(Math.max(timestampOffset.offset(), startingOffset)).position();
// Search the timestamp
return Optional.ofNullable(log.searchForTimestamp(timestampMs, position, startingOffset));
}
/**
* Close this log segment
*/
@Override
public void close() throws IOException {
if (maxTimestampAndOffsetSoFar != TimestampOffset.UNKNOWN)
Utils.swallow(LOGGER, Level.WARN, "maybeAppend", () -> timeIndex().maybeAppend(maxTimestampSoFar(), shallowOffsetOfMaxTimestampSoFar(), true));
Utils.closeAll(lazyOffsetIndex, lazyTimeIndex, log, txnIndex);
}
/**
* Close file handlers used by the log segment but don't write to disk. This is used when the disk may have failed
*/
void closeHandlers() {
Utils.swallow(LOGGER, Level.WARN, "offsetIndex", lazyOffsetIndex::closeHandler);
Utils.swallow(LOGGER, Level.WARN, "timeIndex", lazyTimeIndex::closeHandler);
Utils.swallow(LOGGER, Level.WARN, "log", log::closeHandlers);
Utils.closeQuietly(txnIndex, "txnIndex", LOGGER);
}
/**
* Delete this log segment from the filesystem.
*/
public void deleteIfExists() throws IOException {
try {
Utils.tryAll(List.of(
() -> deleteTypeIfExists(log::deleteIfExists, "log", log.file(), true),
() -> deleteTypeIfExists(lazyOffsetIndex::deleteIfExists, "offset index", offsetIndexFile(), true),
() -> deleteTypeIfExists(lazyTimeIndex::deleteIfExists, "time index", timeIndexFile(), true),
() -> deleteTypeIfExists(txnIndex::deleteIfExists, "transaction index", txnIndex.file(), false)));
} catch (Throwable t) {
if (t instanceof IOException)
throw (IOException) t;
if (t instanceof Error)
throw (Error) t;
if (t instanceof RuntimeException)
throw (RuntimeException) t;
throw new IllegalStateException("Unexpected exception: " + t.getMessage(), t);
}
}
// Helper method for `deleteIfExists()`
private Void deleteTypeIfExists(StorageAction<Boolean, IOException> delete, String fileType, File file, boolean logIfMissing) throws IOException {
try {
if (delete.execute())
LOGGER.info("Deleted {} {}.", fileType, file.getAbsolutePath());
else {
if (logIfMissing) {
LOGGER.info("Failed to delete {} {} because it does not exist.", fileType, file.getAbsolutePath());
}
if (file.getParent() == null) {
return null;
}
// During alter log dir, the log segment may be moved to a new directory, so async delete may fail.
// Fallback to delete the file in the new directory to avoid orphan file.
Matcher dirMatcher = FUTURE_DIR_PATTERN.matcher(file.getParent());
if (dirMatcher.matches()) {
String topicPartitionAbsolutePath = dirMatcher.group(1) + "-" + dirMatcher.group(2);
File fallbackFile = new File(topicPartitionAbsolutePath, file.getName());
if (fallbackFile.exists() && file.getName().endsWith(LogFileUtils.DELETED_FILE_SUFFIX) && fallbackFile.delete()) {
LOGGER.info("Fallback to delete {} {}.", fileType, fallbackFile.getAbsolutePath());
}
}
}
return null;
} catch (IOException e) {
throw new IOException("Delete of " + fileType + " " + file.getAbsolutePath() + " failed.", e);
}
}
// Visible for testing
public boolean deleted() {
return !log.file().exists() && !offsetIndexFile().exists() && !timeIndexFile().exists() && !txnIndex.file().exists();
}
/**
* The last modified time of this log segment as a unix time stamp
*/
public long lastModified() {
return log.file().lastModified();
}
/**
* The largest timestamp this segment contains, if maxTimestampSoFar >= 0, otherwise Empty.
*/
public OptionalLong largestRecordTimestamp() throws IOException {
long maxTimestampSoFar = maxTimestampSoFar();
if (maxTimestampSoFar >= 0)
return OptionalLong.of(maxTimestampSoFar);
return OptionalLong.empty();
}
/**
* The largest timestamp this segment contains.
*/
public long largestTimestamp() throws IOException {
long maxTimestampSoFar = maxTimestampSoFar();
if (maxTimestampSoFar >= 0)
return maxTimestampSoFar;
return lastModified();
}
/**
* Change the last modified time for this log segment
*/
public void setLastModified(long ms) throws IOException {
FileTime fileTime = FileTime.fromMillis(ms);
Files.setLastModifiedTime(log.file().toPath(), fileTime);
Files.setLastModifiedTime(offsetIndexFile().toPath(), fileTime);
Files.setLastModifiedTime(timeIndexFile().toPath(), fileTime);
}
public static LogSegment open(File dir, long baseOffset, LogConfig config, Time time, int initFileSize, boolean preallocate) throws IOException {
return open(dir, baseOffset, config, time, false, initFileSize, preallocate, "");
}
public static LogSegment open(File dir, long baseOffset, LogConfig config, Time time, boolean fileAlreadyExists,
int initFileSize, boolean preallocate, String fileSuffix) throws IOException {
int maxIndexSize = config.maxIndexSize;
return new LogSegment(
FileRecords.open(LogFileUtils.logFile(dir, baseOffset, fileSuffix), fileAlreadyExists, initFileSize, preallocate),
LazyIndex.forOffset(LogFileUtils.offsetIndexFile(dir, baseOffset, fileSuffix), baseOffset, maxIndexSize),
LazyIndex.forTime(LogFileUtils.timeIndexFile(dir, baseOffset, fileSuffix), baseOffset, maxIndexSize),
new TransactionIndex(baseOffset, LogFileUtils.transactionIndexFile(dir, baseOffset, fileSuffix)),
baseOffset,
config.indexInterval,
config.randomSegmentJitter(),
time);
}
public static void deleteIfExists(File dir, long baseOffset, String fileSuffix) throws IOException {
deleteFileIfExists(LogFileUtils.offsetIndexFile(dir, baseOffset, fileSuffix));
deleteFileIfExists(LogFileUtils.timeIndexFile(dir, baseOffset, fileSuffix));
deleteFileIfExists(LogFileUtils.transactionIndexFile(dir, baseOffset, fileSuffix));
deleteFileIfExists(LogFileUtils.logFile(dir, baseOffset, fileSuffix));
}
private static boolean deleteFileIfExists(File file) throws IOException {
return Files.deleteIfExists(file.toPath());
}
}
| is |
java | apache__camel | components/camel-google/camel-google-pubsub/src/test/java/org/apache/camel/component/google/pubsub/integration/SingleExchangeRoundtripIT.java | {
"start": 1574,
"end": 4741
} | class ____ extends PubsubTestSupport {
private static final String TOPIC_NAME = "singleSend";
private static final String SUBSCRIPTION_NAME = "singleReceive";
@EndpointInject("direct:from")
private Endpoint directIn;
@EndpointInject("google-pubsub:{{project.id}}:" + TOPIC_NAME)
private Endpoint pubsubTopic;
@EndpointInject("mock:sendResult")
private MockEndpoint sendResult;
@EndpointInject("google-pubsub:{{project.id}}:" + SUBSCRIPTION_NAME + "?synchronousPull=true")
private Endpoint pubsubSubscription;
@EndpointInject("mock:receiveResult")
private MockEndpoint receiveResult;
@Produce("direct:from")
private ProducerTemplate producer;
@Override
public void createTopicSubscription() {
createTopicSubscriptionPair(TOPIC_NAME, SUBSCRIPTION_NAME);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from(directIn).routeId("Single_Send").to(pubsubTopic).to(sendResult);
from(pubsubSubscription).routeId("Single_Receive").to("direct:one");
from("direct:one").to(receiveResult);
}
};
}
@Test
public void testSingleMessageSend() throws Exception {
Exchange exchange = new DefaultExchange(context);
String attributeKey = "ATTRIBUTE-TEST-KEY";
String attributeValue = "ATTRIBUTE-TEST-VALUE";
Map<String, String> attributes = new HashMap<>();
attributes.put(attributeKey, attributeValue);
exchange.getIn().setBody("Single : " + exchange.getExchangeId());
exchange.getIn().setHeader(GooglePubsubConstants.ATTRIBUTES, attributes);
receiveResult.expectedMessageCount(1);
receiveResult.expectedBodiesReceivedInAnyOrder(exchange.getIn().getBody());
producer.send(exchange);
List<Exchange> sentExchanges = sendResult.getExchanges();
assertEquals(1, sentExchanges.size(), "Sent exchanges");
Exchange sentExchange = sentExchanges.get(0);
assertEquals(exchange.getIn().getHeader(GooglePubsubConstants.MESSAGE_ID),
sentExchange.getIn().getHeader(GooglePubsubConstants.MESSAGE_ID), "Sent ID");
receiveResult.assertIsSatisfied(5000);
List<Exchange> receivedExchanges = receiveResult.getExchanges();
assertNotNull(receivedExchanges, "Received exchanges");
Exchange receivedExchange = receivedExchanges.get(0);
assertNotNull(receivedExchange.getIn().getHeader(GooglePubsubConstants.MESSAGE_ID), "PUBSUB Message ID Property");
assertNotNull(receivedExchange.getIn().getHeader(GooglePubsubConstants.PUBLISH_TIME), "PUBSUB Published Time");
assertEquals(attributeValue,
((Map) receivedExchange.getIn().getHeader(GooglePubsubConstants.ATTRIBUTES)).get(attributeKey),
"PUBSUB Header Attribute");
assertEquals(sentExchange.getIn().getHeader(GooglePubsubConstants.MESSAGE_ID),
receivedExchange.getIn().getHeader(GooglePubsubConstants.MESSAGE_ID));
}
}
| SingleExchangeRoundtripIT |
java | quarkusio__quarkus | extensions/smallrye-reactive-messaging/runtime/src/main/java/io/quarkus/smallrye/reactivemessaging/runtime/HealthCenterFilterConfig.java | {
"start": 553,
"end": 780
} | interface ____ {
/**
* Configuration for the health center filter.
*/
@ConfigDocMapKey("channel")
@ConfigDocSection
Map<String, HealthCenterConfig> health();
@ConfigGroup
| HealthCenterFilterConfig |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/alterTable/MySqlAlterTableTest19.java | {
"start": 911,
"end": 1903
} | class ____ extends TestCase {
public void test_alter_add_key() throws Exception {
String sql = "ALTER TABLE `test`.`tb1` ADD KEY `idx_parent_id` (`parent_id`)";
MySqlStatementParser parser = new MySqlStatementParser(sql);
SQLStatement stmt = parser.parseStatementList().get(0);
parser.match(Token.EOF);
String output = SQLUtils.toMySqlString(stmt);
assertEquals("ALTER TABLE `test`.`tb1`\n\tADD KEY `idx_parent_id` (`parent_id`)", output);
}
public void test_alter_add_index() throws Exception {
String sql = "ALTER TABLE `test`.`tb1` ADD INDEX `idx_parent_id` (`parent_id`)";
MySqlStatementParser parser = new MySqlStatementParser(sql);
SQLStatement stmt = parser.parseStatementList().get(0);
parser.match(Token.EOF);
String output = SQLUtils.toMySqlString(stmt);
assertEquals("ALTER TABLE `test`.`tb1`\n\tADD INDEX `idx_parent_id` (`parent_id`)", output);
}
}
| MySqlAlterTableTest19 |
java | spring-projects__spring-boot | module/spring-boot-health/src/test/java/org/springframework/boot/health/autoconfigure/contributor/CompositeHealthContributorConfigurationTests.java | {
"start": 1780,
"end": 2018
} | class ____
extends CompositeHealthContributorConfiguration<TestHealthIndicator, TestBean> {
TestCompositeHealthContributorConfiguration() {
super(TestHealthIndicator::new);
}
}
static | TestCompositeHealthContributorConfiguration |
java | apache__camel | components/camel-dhis2/camel-dhis2-component/src/generated/java/org/apache/camel/component/dhis2/Dhis2GetEndpointConfigurationConfigurer.java | {
"start": 729,
"end": 7288
} | class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, ExtendedPropertyConfigurerGetter {
private static final Map<String, Object> ALL_OPTIONS;
static {
Map<String, Object> map = new CaseInsensitiveMap();
map.put("ApiName", org.apache.camel.component.dhis2.internal.Dhis2ApiName.class);
map.put("ArrayName", java.lang.String.class);
map.put("BaseApiUrl", java.lang.String.class);
map.put("Client", org.hisp.dhis.integration.sdk.api.Dhis2Client.class);
map.put("Fields", java.lang.String.class);
map.put("Filter", java.util.List.class);
map.put("MethodName", java.lang.String.class);
map.put("Paging", java.lang.Boolean.class);
map.put("Password", java.lang.String.class);
map.put("Path", java.lang.String.class);
map.put("PersonalAccessToken", java.lang.String.class);
map.put("QueryParams", java.util.Map.class);
map.put("RootJunction", org.apache.camel.component.dhis2.api.RootJunctionEnum.class);
map.put("Username", java.lang.String.class);
ALL_OPTIONS = map;
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
org.apache.camel.component.dhis2.Dhis2GetEndpointConfiguration target = (org.apache.camel.component.dhis2.Dhis2GetEndpointConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiname":
case "apiName": target.setApiName(property(camelContext, org.apache.camel.component.dhis2.internal.Dhis2ApiName.class, value)); return true;
case "arrayname":
case "arrayName": target.setArrayName(property(camelContext, java.lang.String.class, value)); return true;
case "baseapiurl":
case "baseApiUrl": target.setBaseApiUrl(property(camelContext, java.lang.String.class, value)); return true;
case "client": target.setClient(property(camelContext, org.hisp.dhis.integration.sdk.api.Dhis2Client.class, value)); return true;
case "fields": target.setFields(property(camelContext, java.lang.String.class, value)); return true;
case "filter": target.setFilter(property(camelContext, java.util.List.class, value)); return true;
case "methodname":
case "methodName": target.setMethodName(property(camelContext, java.lang.String.class, value)); return true;
case "paging": target.setPaging(property(camelContext, java.lang.Boolean.class, value)); return true;
case "password": target.setPassword(property(camelContext, java.lang.String.class, value)); return true;
case "path": target.setPath(property(camelContext, java.lang.String.class, value)); return true;
case "personalaccesstoken":
case "personalAccessToken": target.setPersonalAccessToken(property(camelContext, java.lang.String.class, value)); return true;
case "queryparams":
case "queryParams": target.setQueryParams(property(camelContext, java.util.Map.class, value)); return true;
case "rootjunction":
case "rootJunction": target.setRootJunction(property(camelContext, org.apache.camel.component.dhis2.api.RootJunctionEnum.class, value)); return true;
case "username": target.setUsername(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public Map<String, Object> getAllOptions(Object target) {
return ALL_OPTIONS;
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiname":
case "apiName": return org.apache.camel.component.dhis2.internal.Dhis2ApiName.class;
case "arrayname":
case "arrayName": return java.lang.String.class;
case "baseapiurl":
case "baseApiUrl": return java.lang.String.class;
case "client": return org.hisp.dhis.integration.sdk.api.Dhis2Client.class;
case "fields": return java.lang.String.class;
case "filter": return java.util.List.class;
case "methodname":
case "methodName": return java.lang.String.class;
case "paging": return java.lang.Boolean.class;
case "password": return java.lang.String.class;
case "path": return java.lang.String.class;
case "personalaccesstoken":
case "personalAccessToken": return java.lang.String.class;
case "queryparams":
case "queryParams": return java.util.Map.class;
case "rootjunction":
case "rootJunction": return org.apache.camel.component.dhis2.api.RootJunctionEnum.class;
case "username": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
org.apache.camel.component.dhis2.Dhis2GetEndpointConfiguration target = (org.apache.camel.component.dhis2.Dhis2GetEndpointConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiname":
case "apiName": return target.getApiName();
case "arrayname":
case "arrayName": return target.getArrayName();
case "baseapiurl":
case "baseApiUrl": return target.getBaseApiUrl();
case "client": return target.getClient();
case "fields": return target.getFields();
case "filter": return target.getFilter();
case "methodname":
case "methodName": return target.getMethodName();
case "paging": return target.getPaging();
case "password": return target.getPassword();
case "path": return target.getPath();
case "personalaccesstoken":
case "personalAccessToken": return target.getPersonalAccessToken();
case "queryparams":
case "queryParams": return target.getQueryParams();
case "rootjunction":
case "rootJunction": return target.getRootJunction();
case "username": return target.getUsername();
default: return null;
}
}
@Override
public Object getCollectionValueType(Object target, String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "filter": return java.lang.String.class;
case "queryparams":
case "queryParams": return java.lang.Object.class;
default: return null;
}
}
}
| Dhis2GetEndpointConfigurationConfigurer |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/presentation/StandardRepresentation_array_format_Test.java | {
"start": 1308,
"end": 15246
} | class ____ extends AbstractBaseRepresentationTest {
private static final StandardRepresentation STANDARD_REPRESENTATION = new StandardRepresentation();
@Test
void should_return_null_if_array_is_null() {
// GIVEN
final Object array = null;
// WHEN
String formatted = STANDARD_REPRESENTATION.toStringOf(array);
// THEN
then(formatted).isNull();
}
@Test
void should_return_empty_brackets_if_array_is_empty() {
// GIVEN
final Object[] array = new Object[0];
// WHEN
String formatted = STANDARD_REPRESENTATION.toStringOf(array);
// THEN
then(formatted).isEqualTo("[]");
}
@ParameterizedTest(name = "{1} should be formatted as {2}")
@MethodSource("should_format_primitive_array_source")
void should_format_primitive_array(Object array, String expectedDescription) {
// WHEN
String formatted = STANDARD_REPRESENTATION.toStringOf(array);
// THEN
then(formatted).isEqualTo(expectedDescription);
}
private static Stream<Arguments> should_format_primitive_array_source() {
return Stream.of(Arguments.of(new boolean[] { true, false }, "[true, false]"),
Arguments.of(new char[] { 'a', 'b' }, "['a', 'b']"),
Arguments.of(new double[] { 6.8, 8.3 }, "[6.8, 8.3]"),
Arguments.of(new float[] { 6.1f, 8.6f }, "[6.1f, 8.6f]"),
Arguments.of(new int[] { 78, 66 }, "[78, 66]"),
Arguments.of(new long[] { 160L, 98L }, "[160L, 98L]"),
Arguments.of(new short[] { (short) 5, (short) 8 }, "[5, 8]"),
Arguments.of(new int[] { 78, 66 }, "[78, 66]"),
Arguments.of(new int[] { 78, 66 }, "[78, 66]"),
Arguments.of(new int[] { 78, 66 }, "[78, 66]"),
Arguments.of(new boolean[] { true, false }, "[true, false]"));
}
@Test
void should_format_byte_array_in_hex_representation() {
// GIVEN
Object array = new byte[] { (byte) 3, (byte) 8 };
// WHEN
String formatted = new HexadecimalRepresentation().toStringOf(array);
// THEN
then(formatted).isEqualTo("[0x03, 0x08]");
}
@Test
void should_format_String_array() {
// GIVEN
Object[] array = { "Hello", "World" };
// WHEN
String formatted = STANDARD_REPRESENTATION.toStringOf(array);
// THEN
then(formatted).isEqualTo("[\"Hello\", \"World\"]");
}
@Test
void should_format_Object_array() {
// GIVEN
Object[] array = { "Hello", new Person("Anakin") };
// WHEN
String formatted = STANDARD_REPRESENTATION.toStringOf(array);
// THEN
then(formatted).isEqualTo("[\"Hello\", 'Anakin']");
}
@Test
void should_format_Object_array_on_new_line_smart() {
// GIVEN
StandardRepresentation.setMaxLengthForSingleLineDescription(11);
Object[] array = { "Hello", new Person("Anakin") };
// WHEN
String formatted = STANDARD_REPRESENTATION.toStringOf(array);
// THEN
then(formatted).isEqualTo(format("[\"Hello\",%n" +
" 'Anakin']"));
}
@Test
void should_format_Object_array_that_has_primitive_array_as_element() {
// GIVEN
boolean[] booleans = { true, false };
Object[] array = { "Hello", booleans };
// WHEN
String formatted = STANDARD_REPRESENTATION.toStringOf(array);
// THEN
then(formatted).isEqualTo("[\"Hello\", [true, false]]");
}
@Test
void should_format_Object_array_with_itself_as_element() {
// GIVEN
Object[] array = { "Hello", null };
array[1] = array;
// WHEN
String formatted = STANDARD_REPRESENTATION.toStringOf(array);
// THEN
then(formatted).isEqualTo("[\"Hello\", (this array)]");
}
@Test
void should_format_self_referencing_Object_array() {
// GIVEN
Object[] array = { null, null };
array[0] = array;
array[1] = array;
// WHEN
String formatted = STANDARD_REPRESENTATION.toStringOf(array);
// THEN
then(formatted).isEqualTo("[(this array), (this array)]");
}
@Test
void should_format_Object_array_having_with_primitive_array() {
// GIVEN
Object[] array = { "Hello", new int[] {} };
// WHEN
String formatted = STANDARD_REPRESENTATION.toStringOf(array);
// THEN
then(formatted).isEqualTo("[\"Hello\", []]");
}
@Test
void should_format_Object_array_with_null_element() {
// GIVEN
Object[] array = { "Hello", null };
// WHEN
String formatted = STANDARD_REPRESENTATION.toStringOf(array);
// THEN
then(formatted).isEqualTo("[\"Hello\", null]");
}
@Test
void should_format_big_primitive_array() {
// GIVEN
int[] array = new int[1 << 28];
// WHEN
String formatted = STANDARD_REPRESENTATION.toStringOf(array);
// THEN
then(formatted).contains("...");
then(StringUtils.countMatches(formatted, "0")).isEqualTo(Configuration.MAX_ELEMENTS_FOR_PRINTING);
}
@Test
void should_format_big_object_array() {
// GIVEN
Object[] array = new Object[1 << 28];
// WHEN
String formatted = STANDARD_REPRESENTATION.toStringOf(array);
// THEN
then(formatted).contains("...");
then(StringUtils.countMatches(formatted, "null")).isEqualTo(Configuration.MAX_ELEMENTS_FOR_PRINTING);
}
@Test
void should_format_array_up_to_the_maximum_allowed_elements() {
// GIVEN
StandardRepresentation.setMaxElementsForPrinting(3);
Object[] array = { "First", "Second", "Third", "Fourth", "Fifth", "Sixth", "Seventh" };
// WHEN
String formatted = STANDARD_REPRESENTATION.toStringOf(array);
// THEN
then(formatted).isEqualTo("[\"First\", \"Second\", ... \"Seventh\"]");
}
@Test
void should_format_array_with_one_element_per_line() {
// GIVEN
StandardRepresentation.setMaxLengthForSingleLineDescription(25);
Object[] array = { "1234567890", "1234567890", "1234567890", "1234567890" };
// WHEN
String formatted = STANDARD_REPRESENTATION.toStringOf(array);
// THEN
String formattedAfterNewLine = " <" + formatted + ">";
then(formattedAfterNewLine).isEqualTo(format(" <[\"1234567890\",%n" +
" \"1234567890\",%n" +
" \"1234567890\",%n" +
" \"1234567890\"]>"));
}
@ParameterizedTest(name = "with printing {0} max, {1} should be formatted as {2}")
@MethodSource("should_format_array_source")
void should_format_array_honoring_display_configuration(int maxElementsForPrinting, Object[] array,
String expectedDescription) {
// GIVEN
StandardRepresentation.setMaxElementsForPrinting(maxElementsForPrinting);
StandardRepresentation.setMaxLengthForSingleLineDescription(15);
// WHEN
String formatted = STANDARD_REPRESENTATION.toStringOf(array);
// THEN
// formattedAfterNewLine is built to show we align values on the first element.
String formattedAfterNewLine = " <" + formatted + ">";
then(formattedAfterNewLine).isEqualTo(expectedDescription.formatted());
}
private static Stream<Arguments> should_format_array_source() {
return Stream.of(Arguments.of(12, array(1, 2, 3, 4, 5), " <[1, 2, 3, 4, 5]>"),
Arguments.of(12, array("First", 3, "foo", "bar"), " <[\"First\",%n" +
" 3,%n" +
" \"foo\",%n" +
" \"bar\"]>"),
Arguments.of(12, array("First", 3, 4, "foo", "bar", 5, "another", 6), " <[\"First\",%n" +
" 3,%n" +
" 4,%n" +
" \"foo\",%n" +
" \"bar\",%n" +
" 5,%n" +
" \"another\",%n" +
" 6]>"),
Arguments.of(12, array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), " <[1,%n" +
" 2,%n" +
" 3,%n" +
" 4,%n" +
" 5,%n" +
" 6,%n" +
" 7,%n" +
" 8,%n" +
" 9,%n" +
" 10]>"),
Arguments.of(12, array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), " <[1,%n" +
" 2,%n" +
" 3,%n" +
" 4,%n" +
" 5,%n" +
" 6,%n" +
" 7,%n" +
" 8,%n" +
" 9,%n" +
" 10,%n" +
" 11,%n" +
" 12]>"),
Arguments.of(11, array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19), " <[1,%n" +
" 2,%n" +
" 3,%n" +
" 4,%n" +
" 5,%n" +
" 6,%n" +
" ...%n" +
" 15,%n" +
" 16,%n" +
" 17,%n" +
" 18,%n" +
" 19]>"),
Arguments.of(12, array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20), " <[1,%n" +
" 2,%n" +
" 3,%n" +
" 4,%n" +
" 5,%n" +
" 6,%n" +
" ...%n" +
" 15,%n" +
" 16,%n" +
" 17,%n" +
" 18,%n" +
" 19,%n" +
" 20]>"));
}
private record Person(String name) {
@Override
public String toString() {
return quote(name);
}
}
}
| StandardRepresentation_array_format_Test |
java | spring-projects__spring-framework | spring-expression/src/test/java/org/springframework/expression/spel/SpelCompilationCoverageTests.java | {
"start": 254815,
"end": 254936
} | class ____ {
public int index1 = 1;
public int index2 = 3;
public String word = "abcd";
}
public static | TestClass1 |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/ChangelogTaskLocalStateStore.java | {
"start": 2070,
"end": 9667
} | class ____ extends TaskLocalStateStoreImpl {
private static final Logger LOG = LoggerFactory.getLogger(ChangelogTaskLocalStateStore.class);
private static final String CHANGE_LOG_CHECKPOINT_PREFIX = "changelog_chk_";
/**
* The mapper of checkpointId and materializationId. (cp3, materializationId2) means cp3 refer
* to m1.
*/
private final Map<Long, Long> mapToMaterializationId;
/** Last checkpointId, to check whether checkpoint is out of order. */
private long lastCheckpointId = -1L;
public ChangelogTaskLocalStateStore(
@Nonnull JobID jobID,
@Nonnull AllocationID allocationID,
@Nonnull JobVertexID jobVertexID,
@Nonnegative int subtaskIndex,
@Nonnull LocalRecoveryConfig localRecoveryConfig,
@Nonnull Executor discardExecutor) {
super(jobID, allocationID, jobVertexID, subtaskIndex, localRecoveryConfig, discardExecutor);
this.mapToMaterializationId = new HashMap<>();
}
private void updateReference(long checkpointId, TaskStateSnapshot localState) {
if (localState == null) {
localState = NULL_DUMMY;
}
for (Map.Entry<OperatorID, OperatorSubtaskState> subtaskStateEntry :
localState.getSubtaskStateMappings()) {
for (KeyedStateHandle keyedStateHandle :
subtaskStateEntry.getValue().getManagedKeyedState()) {
if (keyedStateHandle instanceof ChangelogStateBackendHandle) {
ChangelogStateBackendHandle changelogStateBackendHandle =
(ChangelogStateBackendHandle) keyedStateHandle;
long materializationID = changelogStateBackendHandle.getMaterializationID();
if (mapToMaterializationId.containsKey(checkpointId)) {
checkState(
materializationID == mapToMaterializationId.get(checkpointId),
"one checkpoint contains at most one materializationID");
} else {
mapToMaterializationId.put(checkpointId, materializationID);
}
}
}
}
}
public static Path getLocalTaskOwnedDirectory(
LocalSnapshotDirectoryProvider provider, JobID jobID) {
File outDir =
provider.selectAllocationBaseDirectory(
(jobID.hashCode() & Integer.MAX_VALUE)
% provider.allocationBaseDirsCount());
if (!outDir.exists() && !outDir.mkdirs()) {
LOG.error(
"Local state base directory does not exist and could not be created: "
+ outDir);
}
return new Path(
String.format("%s/jid_%s", outDir.toURI(), jobID), CHECKPOINT_TASK_OWNED_STATE_DIR);
}
@Override
public void storeLocalState(long checkpointId, @Nullable TaskStateSnapshot localState) {
if (checkpointId < lastCheckpointId) {
LOG.info(
"Current checkpoint {} is out of order, smaller than last CheckpointId {}.",
lastCheckpointId,
checkpointId);
return;
} else {
lastCheckpointId = checkpointId;
}
synchronized (lock) {
updateReference(checkpointId, localState);
}
super.storeLocalState(checkpointId, localState);
}
@Override
protected File getCheckpointDirectory(long checkpointId) {
return new File(
getLocalRecoveryDirectoryProvider().subtaskBaseDirectory(checkpointId),
CHANGE_LOG_CHECKPOINT_PREFIX + checkpointId);
}
private void deleteMaterialization(LongPredicate pruningChecker) {
Set<Long> materializationToRemove;
synchronized (lock) {
Set<Long> checkpoints =
mapToMaterializationId.keySet().stream()
.filter(pruningChecker::test)
.collect(Collectors.toSet());
materializationToRemove =
checkpoints.stream()
.map(mapToMaterializationId::remove)
.collect(Collectors.toSet());
materializationToRemove.removeAll(mapToMaterializationId.values());
}
discardExecutor.execute(
() ->
syncDiscardFileForCollection(
materializationToRemove.stream()
.map(super::getCheckpointDirectory)
.collect(Collectors.toList())));
}
private void syncDiscardFileForCollection(Collection<File> toDiscard) {
for (File directory : toDiscard) {
if (directory.exists()) {
try {
// TODO: This is guaranteed by the wrapped backend only using this folder for
// its local state, the materialized handle should be discarded here too.
deleteDirectory(directory);
} catch (IOException ex) {
LOG.warn(
"Exception while deleting local state directory of {} in subtask ({} - {} - {}).",
directory,
jobID,
jobVertexID,
subtaskIndex,
ex);
}
}
}
}
@Override
public void pruneCheckpoints(LongPredicate pruningChecker, boolean breakOnceCheckerFalse) {
// Scenarios:
// c1,m1
// confirm c1, do nothing.
// c2,m1
// confirm c2, delete c1, don't delete m1
// c3,m2
// confirm c3, delete c2, delete m1
// delete changelog-chk
super.pruneCheckpoints(pruningChecker, false);
deleteMaterialization(pruningChecker);
}
@Override
public CompletableFuture<Void> dispose() {
deleteMaterialization(id -> true);
// delete all ChangelogStateHandle in taskowned directory.
discardExecutor.execute(
() ->
syncDiscardFileForCollection(
Collections.singleton(
new File(
getLocalTaskOwnedDirectory(
getLocalRecoveryDirectoryProvider(),
jobID)
.toUri()))));
synchronized (lock) {
mapToMaterializationId.clear();
}
return super.dispose();
}
@Override
public String toString() {
return "ChangelogTaskLocalStateStore{"
+ "jobID="
+ jobID
+ ", jobVertexID="
+ jobVertexID
+ ", allocationID="
+ allocationID.toHexString()
+ ", subtaskIndex="
+ subtaskIndex
+ ", localRecoveryConfig="
+ localRecoveryConfig
+ ", storedCheckpointIDs="
+ storedTaskStateByCheckpointID.keySet()
+ ", mapToMaterializationId="
+ mapToMaterializationId.entrySet()
+ '}';
}
}
| ChangelogTaskLocalStateStore |
java | apache__camel | components/camel-thrift/src/test/java/org/apache/camel/component/thrift/local/ThriftThreadPoolServerTest.java | {
"start": 1849,
"end": 4787
} | class ____ extends CamelTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(ThriftProducerSecurityTest.class);
private static final int THRIFT_TEST_PORT = AvailablePortFinder.getNextAvailable();
private static final int THRIFT_TEST_NUM1 = 12;
private static final int THRIFT_TEST_NUM2 = 13;
private static final String TRUST_STORE_PATH = "src/test/resources/certs/truststore.jks";
private static final String KEY_STORE_PATH = "src/test/resources/certs/keystore.jks";
private static final String SECURITY_STORE_PASSWORD = "camelinaction";
private static final int THRIFT_CLIENT_TIMEOUT = 2000;
private static TServerSocket serverTransport;
private static TTransport clientTransport;
private static TServer server;
private static TProtocol protocol;
@SuppressWarnings({ "rawtypes" })
private static Calculator.Processor processor;
@BeforeEach
@SuppressWarnings({ "unchecked", "rawtypes" })
public void startThriftServer() throws Exception {
processor = new Calculator.Processor(new CalculatorSyncServerImpl());
TSSLTransportFactory.TSSLTransportParameters sslParams = new TSSLTransportFactory.TSSLTransportParameters();
sslParams.setKeyStore(KEY_STORE_PATH, SECURITY_STORE_PASSWORD);
serverTransport = TSSLTransportFactory.getServerSocket(THRIFT_TEST_PORT, THRIFT_CLIENT_TIMEOUT,
InetAddress.getByName("localhost"), sslParams);
ThriftThreadPoolServer.Args args = new ThriftThreadPoolServer.Args(serverTransport);
args.processor(processor);
args.executorService(this.context().getExecutorServiceManager().newThreadPool(this, "test-server-invoker", 1, 10));
args.startThreadPool(this.context().getExecutorServiceManager().newSingleThreadExecutor(this, "test-start-thread"));
args.context(this.context());
server = new ThriftThreadPoolServer(args);
server.serve();
LOG.info("Thrift secured server started on port: {}", THRIFT_TEST_PORT);
}
@AfterEach
public void stopThriftServer() {
if (server != null) {
server.stop();
serverTransport.close();
LOG.info("Thrift secured server stoped");
}
}
@Test
public void clientConnectionTest() throws TException {
TSSLTransportFactory.TSSLTransportParameters sslParams = new TSSLTransportFactory.TSSLTransportParameters();
sslParams.setTrustStore(TRUST_STORE_PATH, SECURITY_STORE_PASSWORD);
clientTransport = TSSLTransportFactory.getClientSocket("localhost", THRIFT_TEST_PORT, 1000, sslParams);
protocol = new TBinaryProtocol(clientTransport);
Calculator.Client client = new Calculator.Client(protocol);
int addResult = client.add(THRIFT_TEST_NUM1, THRIFT_TEST_NUM2);
assertEquals(addResult, THRIFT_TEST_NUM1 + THRIFT_TEST_NUM2);
}
}
| ThriftThreadPoolServerTest |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java | {
"start": 4719,
"end": 28257
} | class ____ extends TransportMasterNodeAction<Request, Response> {
private static final ByteSizeValue MAX_NATIVE_DEFINITION_INDEX_SIZE = ByteSizeValue.ofGb(50);
private final TrainedModelProvider trainedModelProvider;
private final XPackLicenseState licenseState;
private final NamedXContentRegistry xContentRegistry;
private final OriginSettingClient client;
private final ProjectResolver projectResolver;
@Inject
public TransportPutTrainedModelAction(
TransportService transportService,
ClusterService clusterService,
ThreadPool threadPool,
XPackLicenseState licenseState,
ActionFilters actionFilters,
Client client,
TrainedModelProvider trainedModelProvider,
NamedXContentRegistry xContentRegistry,
ProjectResolver projectResolver
) {
super(
PutTrainedModelAction.NAME,
transportService,
clusterService,
threadPool,
actionFilters,
Request::new,
Response::new,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
this.licenseState = licenseState;
this.trainedModelProvider = trainedModelProvider;
this.xContentRegistry = xContentRegistry;
this.client = new OriginSettingClient(client, ML_ORIGIN);
this.projectResolver = projectResolver;
}
@Override
protected void masterOperation(
Task task,
PutTrainedModelAction.Request request,
ClusterState state,
ActionListener<Response> finalResponseListener
) {
TrainedModelConfig config = request.getTrainedModelConfig();
try {
if (request.isDeferDefinitionDecompression() == false) {
config.ensureParsedDefinition(xContentRegistry);
}
} catch (IOException ex) {
finalResponseListener.onFailure(
ExceptionsHelper.badRequestException("Failed to parse definition for [{}]", ex, config.getModelId())
);
return;
}
// NOTE: hasModelDefinition is false if we don't parse it. But, if the fully parsed model was already provided, continue
boolean hasModelDefinition = config.getModelDefinition() != null;
if (hasModelDefinition) {
if (validateModelDefinition(config, state, licenseState, finalResponseListener) == false) {
return;
}
}
TrainedModelConfig.Builder trainedModelConfig = new TrainedModelConfig.Builder(config).setVersion(MlConfigVersion.CURRENT)
.setCreateTime(Instant.now())
.setCreatedBy("api_user")
.setLicenseLevel(License.OperationMode.PLATINUM.description());
AtomicReference<ModelPackageConfig> modelPackageConfigHolder = new AtomicReference<>();
if (hasModelDefinition) {
trainedModelConfig.setModelSize(config.getModelDefinition().ramBytesUsed())
.setEstimatedOperations(config.getModelDefinition().getTrainedModel().estimatedNumOperations());
} else {
// Set default location for the given model type.
trainedModelConfig.setLocation(
Optional.ofNullable(config.getModelType()).orElse(TrainedModelType.TREE_ENSEMBLE).getDefaultLocation(config.getModelId())
);
}
if (ModelAliasMetadata.fromState(state).getModelId(trainedModelConfig.getModelId()) != null) {
finalResponseListener.onFailure(
ExceptionsHelper.badRequestException(
"requested model_id [{}] is the same as an existing model_alias. Model model_aliases and ids must be unique",
config.getModelId()
)
);
return;
}
if (TrainedModelAssignmentMetadata.fromState(state).hasDeployment(trainedModelConfig.getModelId())) {
finalResponseListener.onFailure(
ExceptionsHelper.badRequestException(
"Cannot create model [{}] " + MODEL_ALREADY_EXISTS_ERROR_MESSAGE_FRAGMENT,
config.getModelId()
)
);
return;
}
var isPackageModel = config.isPackagedModel();
ActionListener<Void> checkStorageIndexSizeListener = finalResponseListener.<Boolean>delegateFailureAndWrap((delegate, bool) -> {
TrainedModelConfig configToReturn = trainedModelConfig.clearDefinition().build();
if (modelPackageConfigHolder.get() != null) {
triggerModelFetchIfNecessary(
configToReturn.getModelId(),
modelPackageConfigHolder.get(),
request.isWaitForCompletion(),
delegate.<TrainedModelConfig>delegateFailureAndWrap((l, cfg) -> l.onResponse(new Response(cfg)))
.<TrainedModelConfig>delegateFailureAndWrap(
(l, cfg) -> verifyMlNodesAndModelArchitectures(cfg, client, threadPool, l)
)
.delegateFailureAndWrap((l, downloadTriggered) -> l.onResponse(configToReturn))
);
} else {
delegate.onResponse(new PutTrainedModelAction.Response(configToReturn));
}
}).delegateFailureAndWrap((l, r) -> trainedModelProvider.storeTrainedModel(trainedModelConfig.build(), l, isPackageModel));
ActionListener<Void> tagsModelIdCheckListener = ActionListener.wrap(r -> {
if (TrainedModelType.PYTORCH.equals(trainedModelConfig.getModelType())) {
client.admin()
.indices()
.prepareStats(InferenceIndexConstants.nativeDefinitionStore())
.clear()
.setStore(true)
.execute(ActionListener.wrap(stats -> {
IndexStats indexStats = stats.getIndices().get(InferenceIndexConstants.nativeDefinitionStore());
if (indexStats != null
&& indexStats.getTotal().getStore().sizeInBytes() > MAX_NATIVE_DEFINITION_INDEX_SIZE.getBytes()) {
finalResponseListener.onFailure(
new ElasticsearchStatusException(
"Native model store has exceeded the maximum acceptable size of {}, "
+ "please delete older unused pytorch models",
RestStatus.CONFLICT,
MAX_NATIVE_DEFINITION_INDEX_SIZE.toString()
)
);
return;
}
checkStorageIndexSizeListener.onResponse(null);
}, e -> {
if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) {
checkStorageIndexSizeListener.onResponse(null);
return;
}
finalResponseListener.onFailure(
new ElasticsearchStatusException(
"Unable to calculate stats for definition storage index [{}], please try again later",
RestStatus.SERVICE_UNAVAILABLE,
e,
InferenceIndexConstants.nativeDefinitionStore()
)
);
}));
return;
}
checkStorageIndexSizeListener.onResponse(null);
}, finalResponseListener::onFailure);
ActionListener<Void> modelIdTagCheckListener = ActionListener.wrap(
r -> checkTagsAgainstModelIds(request.getTrainedModelConfig().getTags(), tagsModelIdCheckListener),
finalResponseListener::onFailure
);
ActionListener<Void> handlePackageAndTagsListener = ActionListener.wrap(r -> {
if (isPackageModel) {
resolvePackageConfig(trainedModelConfig.getModelId(), ActionListener.wrap(resolvedModelPackageConfig -> {
try {
TrainedModelValidator.validatePackage(trainedModelConfig, resolvedModelPackageConfig, state);
} catch (ValidationException e) {
finalResponseListener.onFailure(e);
return;
}
modelPackageConfigHolder.set(resolvedModelPackageConfig);
setTrainedModelConfigFieldsFromPackagedModel(trainedModelConfig, resolvedModelPackageConfig, xContentRegistry);
checkModelIdAgainstTags(trainedModelConfig.getModelId(), modelIdTagCheckListener);
}, finalResponseListener::onFailure));
} else {
checkModelIdAgainstTags(trainedModelConfig.getModelId(), modelIdTagCheckListener);
}
}, finalResponseListener::onFailure);
if (isPackageModel) {
checkForExistingModelDownloadTask(
client,
trainedModelConfig.getModelId(),
request.isWaitForCompletion(),
finalResponseListener,
() -> handlePackageAndTagsListener.onResponse(null),
request.ackTimeout()
);
} else {
handlePackageAndTagsListener.onResponse(null);
}
}
void verifyMlNodesAndModelArchitectures(
TrainedModelConfig configToReturn,
Client client,
ThreadPool threadPool,
ActionListener<TrainedModelConfig> configToReturnListener
) {
ActionListener<TrainedModelConfig> addWarningHeaderOnFailureListener = new ActionListener<>() {
@Override
public void onResponse(TrainedModelConfig config) {
assert Objects.equals(config, configToReturn);
configToReturnListener.onResponse(configToReturn);
}
@Override
public void onFailure(Exception e) {
HeaderWarning.addWarning(e.getMessage());
configToReturnListener.onResponse(configToReturn);
}
};
callVerifyMlNodesAndModelArchitectures(configToReturn, addWarningHeaderOnFailureListener, client, threadPool);
}
void callVerifyMlNodesAndModelArchitectures(
TrainedModelConfig configToReturn,
ActionListener<TrainedModelConfig> failureListener,
Client client,
ThreadPool threadPool
) {
MlPlatformArchitecturesUtil.verifyMlNodesAndModelArchitectures(
failureListener,
client,
threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME),
configToReturn
);
}
/**
* Check if the model is being downloaded.
* If the download is in progress then the response will be on
* the {@code isBeingDownloadedListener} otherwise {@code createModelAction}
* is called to trigger the next step in the model install.
* Should only be called for Elasticsearch hosted models.
*
* @param client Client
* @param modelId Model Id
* @param isWaitForCompletion Wait for the download to complete
* @param isBeingDownloadedListener The listener called if the download is in progress
* @param createModelAction If no download is in progress this is called to continue
* the model install process.
* @param timeout Model download timeout
*/
static void checkForExistingModelDownloadTask(
Client client,
String modelId,
boolean isWaitForCompletion,
ActionListener<Response> isBeingDownloadedListener,
Runnable createModelAction,
TimeValue timeout
) {
TaskRetriever.getDownloadTaskInfo(
client,
modelId,
isWaitForCompletion,
timeout,
() -> "Timed out waiting for model download to complete",
ActionListener.wrap(taskInfo -> {
if (taskInfo != null) {
getModelInformation(client, modelId, isBeingDownloadedListener);
} else {
// no task exists so proceed with creating the model
createModelAction.run();
}
}, isBeingDownloadedListener::onFailure)
);
}
private static void getModelInformation(Client client, String modelId, ActionListener<Response> listener) {
client.execute(GetTrainedModelsAction.INSTANCE, new GetTrainedModelsAction.Request(modelId), ActionListener.wrap(models -> {
if (models.getResources().results().isEmpty()) {
listener.onFailure(
new ElasticsearchStatusException(
"No model information found for a concurrent create model execution for model id [{}]",
RestStatus.INTERNAL_SERVER_ERROR,
modelId
)
);
} else {
listener.onResponse(new PutTrainedModelAction.Response(models.getResources().results().get(0)));
}
}, e -> {
listener.onFailure(
new ElasticsearchStatusException(
"Unable to retrieve model information for a concurrent create model execution for model id [{}]",
RestStatus.INTERNAL_SERVER_ERROR,
e,
modelId
)
);
}));
}
private void triggerModelFetchIfNecessary(
String modelId,
ModelPackageConfig modelPackageConfig,
boolean waitForCompletion,
ActionListener<Void> listener
) {
client.execute(
LoadTrainedModelPackageAction.INSTANCE,
new LoadTrainedModelPackageAction.Request(modelId, modelPackageConfig, waitForCompletion),
ActionListener.wrap(ack -> listener.onResponse(null), listener::onFailure)
);
}
private void resolvePackageConfig(String modelId, ActionListener<ModelPackageConfig> listener) {
client.execute(
GetTrainedModelPackageConfigAction.INSTANCE,
new GetTrainedModelPackageConfigAction.Request(modelId.substring(1)),
ActionListener.wrap(packageConfig -> listener.onResponse(packageConfig.getModelPackageConfig()), listener::onFailure)
);
}
private void checkModelIdAgainstTags(String modelId, ActionListener<Void> listener) {
QueryBuilder builder = QueryBuilders.constantScoreQuery(
QueryBuilders.boolQuery().filter(QueryBuilders.termQuery(TrainedModelConfig.TAGS.getPreferredName(), modelId))
);
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(builder).size(0).trackTotalHitsUpTo(1);
SearchRequest searchRequest = new SearchRequest(InferenceIndexConstants.INDEX_PATTERN).source(sourceBuilder);
executeAsyncWithOrigin(
client.threadPool().getThreadContext(),
ML_ORIGIN,
searchRequest,
ActionListener.<SearchResponse>wrap(response -> {
if (response.getHits().getTotalHits().value() > 0) {
listener.onFailure(
ExceptionsHelper.badRequestException(Messages.getMessage(Messages.INFERENCE_MODEL_ID_AND_TAGS_UNIQUE, modelId))
);
return;
}
listener.onResponse(null);
}, listener::onFailure),
client::search
);
}
private void checkTagsAgainstModelIds(List<String> tags, ActionListener<Void> listener) {
if (tags.isEmpty()) {
listener.onResponse(null);
return;
}
QueryBuilder builder = QueryBuilders.constantScoreQuery(
QueryBuilders.boolQuery().filter(QueryBuilders.termsQuery(TrainedModelConfig.MODEL_ID.getPreferredName(), tags))
);
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(builder).size(0).trackTotalHitsUpTo(1);
SearchRequest searchRequest = new SearchRequest(InferenceIndexConstants.INDEX_PATTERN).source(sourceBuilder);
executeAsyncWithOrigin(
client.threadPool().getThreadContext(),
ML_ORIGIN,
searchRequest,
ActionListener.<SearchResponse>wrap(response -> {
if (response.getHits().getTotalHits().value() > 0) {
listener.onFailure(
ExceptionsHelper.badRequestException(Messages.getMessage(Messages.INFERENCE_TAGS_AND_MODEL_IDS_UNIQUE, tags))
);
return;
}
listener.onResponse(null);
}, listener::onFailure),
client::search
);
}
public static boolean validateModelDefinition(
TrainedModelConfig config,
ClusterState state,
XPackLicenseState licenseState,
ActionListener<Response> finalResponseListener
) {
try {
config.getModelDefinition().getTrainedModel().validate();
} catch (ElasticsearchException ex) {
finalResponseListener.onFailure(
ExceptionsHelper.badRequestException("Definition for [{}] has validation failures.", ex, config.getModelId())
);
return false;
}
TrainedModelType trainedModelType = TrainedModelType.typeFromTrainedModel(config.getModelDefinition().getTrainedModel());
if (trainedModelType == null) {
finalResponseListener.onFailure(
ExceptionsHelper.badRequestException(
"Unknown trained model definition class [{}]",
config.getModelDefinition().getTrainedModel().getName()
)
);
return false;
}
var configModelType = config.getModelType();
if (configModelType == null) {
// Set the model type from the definition
config = new TrainedModelConfig.Builder(config).setModelType(trainedModelType).build();
} else if (trainedModelType != configModelType) {
finalResponseListener.onFailure(
ExceptionsHelper.badRequestException(
"{} [{}] does not match the model definition type [{}]",
TrainedModelConfig.MODEL_TYPE.getPreferredName(),
configModelType,
trainedModelType
)
);
return false;
}
var inferenceConfig = config.getInferenceConfig();
if (inferenceConfig.isTargetTypeSupported(config.getModelDefinition().getTrainedModel().targetType()) == false) {
finalResponseListener.onFailure(
ExceptionsHelper.badRequestException(
"Model [{}] inference config type [{}] does not support definition target type [{}]",
config.getModelId(),
config.getInferenceConfig().getName(),
config.getModelDefinition().getTrainedModel().targetType()
)
);
return false;
}
var minLicenseSupported = inferenceConfig.getMinLicenseSupportedForAction(RestRequest.Method.PUT);
if (licenseState.isAllowedByLicense(minLicenseSupported) == false) {
finalResponseListener.onFailure(
new ElasticsearchSecurityException(
"Model of type [{}] requires [{}] license level",
RestStatus.FORBIDDEN,
config.getInferenceConfig().getName(),
minLicenseSupported
)
);
return false;
}
TransportVersion minCompatibilityVersion = config.getModelDefinition().getTrainedModel().getMinimalCompatibilityVersion();
if (state.getMinTransportVersion().before(minCompatibilityVersion)) {
finalResponseListener.onFailure(
ExceptionsHelper.badRequestException("Cannot create model [{}] while cluster upgrade is in progress.", config.getModelId())
);
return false;
}
return true;
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return state.blocks().globalBlockedException(projectResolver.getProjectId(), ClusterBlockLevel.METADATA_WRITE);
}
@Override
protected void doExecute(Task task, Request request, ActionListener<Response> listener) {
if (MachineLearningField.ML_API_FEATURE.check(licenseState)) {
super.doExecute(task, request, listener);
} else {
listener.onFailure(LicenseUtils.newComplianceException(XPackField.MACHINE_LEARNING));
}
}
static void setTrainedModelConfigFieldsFromPackagedModel(
TrainedModelConfig.Builder trainedModelConfig,
ModelPackageConfig resolvedModelPackageConfig,
NamedXContentRegistry xContentRegistry
) throws IOException {
trainedModelConfig.setDescription(resolvedModelPackageConfig.getDescription());
trainedModelConfig.setModelType(TrainedModelType.fromString(resolvedModelPackageConfig.getModelType()));
trainedModelConfig.setPlatformArchitecture(resolvedModelPackageConfig.getPlatformArchitecture());
trainedModelConfig.setMetadata(resolvedModelPackageConfig.getMetadata());
trainedModelConfig.setInferenceConfig(
parseInferenceConfigFromModelPackage(resolvedModelPackageConfig.getInferenceConfigSource(), xContentRegistry)
);
trainedModelConfig.setTags(resolvedModelPackageConfig.getTags());
trainedModelConfig.setPrefixStrings(resolvedModelPackageConfig.getPrefixStrings());
trainedModelConfig.setModelPackageConfig(
new ModelPackageConfig.Builder(resolvedModelPackageConfig).resetPackageOnlyFields().build()
);
trainedModelConfig.setLocation(trainedModelConfig.getModelType().getDefaultLocation(trainedModelConfig.getModelId()));
}
static InferenceConfig parseInferenceConfigFromModelPackage(Map<String, Object> source, NamedXContentRegistry namedXContentRegistry)
throws IOException {
try (
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(source);
XContentParser sourceParser = XContentHelper.createParserNotCompressed(
LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG.withRegistry(namedXContentRegistry),
BytesReference.bytes(xContentBuilder),
XContentType.JSON
)
) {
XContentParser.Token token = sourceParser.nextToken();
assert token == XContentParser.Token.START_OBJECT;
token = sourceParser.nextToken();
assert token == XContentParser.Token.FIELD_NAME;
String currentName = sourceParser.currentName();
InferenceConfig inferenceConfig = sourceParser.namedObject(LenientlyParsedInferenceConfig.class, currentName, null);
// consume the end object token
token = sourceParser.nextToken();
assert token == XContentParser.Token.END_OBJECT;
return inferenceConfig;
}
}
}
| TransportPutTrainedModelAction |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/compliance/tck2_2/mapkeycolumn/MapKeyColumnBiDiOneToManyFKTest.java | {
"start": 1144,
"end": 3320
} | class ____ {
@Test
@JiraKey( value = "HHH-12150" )
public void testReferenceToAlreadyMappedColumn(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
AddressCapable2 holder = new AddressCapable2( 1, "osd");
Address2 address = new Address2( 1, "123 Main St" );
session.persist( holder );
session.persist( address );
}
);
scope.inTransaction(
session -> {
AddressCapable2 holder = session.find( AddressCapable2.class, 1 );
Address2 address = session.find( Address2.class, 1 );
address.holder = holder;
address.type = "work";
holder.addresses.put( "work", address );
session.persist( holder );
}
);
scope.inTransaction(
session -> {
AddressCapable2 holder = session.find( AddressCapable2.class, 1 );
assertEquals( 1, holder.addresses.size() );
final Map.Entry<String,Address2> entry = holder.addresses.entrySet().iterator().next();
assertEquals( "work", entry.getKey() );
assertEquals( "work", entry.getValue().type );
session.remove( holder );
}
);
}
@Test
@JiraKey( value = "HHH-12150" )
public void testReferenceToNonMappedColumn(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
AddressCapable holder = new AddressCapable( 1, "osd");
Address address = new Address( 1, "123 Main St" );
session.persist( holder );
session.persist( address );
}
);
scope.inTransaction(
session -> {
AddressCapable holder = session.find( AddressCapable.class, 1 );
Address address = session.find( Address.class, 1 );
address.holder = holder;
holder.addresses.put( "work", address );
session.persist( holder );
}
);
scope.inTransaction(
session -> {
AddressCapable holder = session.find( AddressCapable.class, 1 );
assertEquals( 1, holder.addresses.size() );
final Map.Entry<String,Address> entry = holder.addresses.entrySet().iterator().next();
assertEquals( "work", entry.getKey() );
session.remove( holder );
}
);
}
@Entity( name = "AddressCapable" )
@Table( name = "address_capables" )
public static | MapKeyColumnBiDiOneToManyFKTest |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/core/startup/RuntimeInterceptorDeployment.java | {
"start": 1694,
"end": 10140
} | class ____ {
private static final LinkedHashMap<ResourceInterceptor, ReaderInterceptor> EMPTY_INTERCEPTOR_MAP = new LinkedHashMap<>();
private final Map<ResourceInterceptor<ContainerRequestFilter>, ContainerRequestFilter> globalRequestInterceptorsMap;
private final Map<ResourceInterceptor<ContainerResponseFilter>, ContainerResponseFilter> globalResponseInterceptorsMap;
private final Map<ResourceInterceptor<ContainerRequestFilter>, ContainerRequestFilter> nameRequestInterceptorsMap;
private final Map<ResourceInterceptor<ContainerResponseFilter>, ContainerResponseFilter> nameResponseInterceptorsMap;
private final Map<ResourceInterceptor<ReaderInterceptor>, ReaderInterceptor> globalReaderInterceptorsMap;
private final Map<ResourceInterceptor<WriterInterceptor>, WriterInterceptor> globalWriterInterceptorsMap;
private final Map<ResourceInterceptor<ReaderInterceptor>, ReaderInterceptor> nameReaderInterceptorsMap;
private final Map<ResourceInterceptor<WriterInterceptor>, WriterInterceptor> nameWriterInterceptorsMap;
private final Map<ResourceInterceptor<ContainerRequestFilter>, ContainerRequestFilter> preMatchContainerRequestFilters;
private final List<ResourceRequestFilterHandler> globalRequestInterceptorHandlers;
private final List<ResourceResponseFilterHandler> globalResponseInterceptorHandlers;
private final InterceptorHandler globalInterceptorHandler;
private final DeploymentInfo info;
private final Consumer<Closeable> closeTaskHandler;
private final ConfigurationImpl configurationImpl;
public RuntimeInterceptorDeployment(DeploymentInfo info, ConfigurationImpl configurationImpl,
Consumer<Closeable> closeTaskHandler) {
this.info = info;
this.configurationImpl = configurationImpl;
this.closeTaskHandler = closeTaskHandler;
ResourceInterceptors interceptors = info.getInterceptors();
globalRequestInterceptorsMap = createInterceptorInstances(
interceptors.getContainerRequestFilters().getGlobalResourceInterceptors(), closeTaskHandler);
globalResponseInterceptorsMap = createInterceptorInstances(
interceptors.getContainerResponseFilters().getGlobalResourceInterceptors(), closeTaskHandler);
nameRequestInterceptorsMap = createInterceptorInstances(
interceptors.getContainerRequestFilters().getNameResourceInterceptors(), closeTaskHandler);
nameResponseInterceptorsMap = createInterceptorInstances(
interceptors.getContainerResponseFilters().getNameResourceInterceptors(), closeTaskHandler);
globalReaderInterceptorsMap = createInterceptorInstances(
interceptors.getReaderInterceptors().getGlobalResourceInterceptors(), closeTaskHandler);
globalWriterInterceptorsMap = createInterceptorInstances(
interceptors.getWriterInterceptors().getGlobalResourceInterceptors(), closeTaskHandler);
nameReaderInterceptorsMap = createInterceptorInstances(
interceptors.getReaderInterceptors().getNameResourceInterceptors(), closeTaskHandler);
nameWriterInterceptorsMap = createInterceptorInstances(
interceptors.getWriterInterceptors().getNameResourceInterceptors(), closeTaskHandler);
preMatchContainerRequestFilters = createInterceptorInstances(
interceptors.getContainerRequestFilters().getPreMatchInterceptors(), closeTaskHandler);
Collection<ContainerResponseFilter> responseFilters = globalResponseInterceptorsMap.values();
globalResponseInterceptorHandlers = new ArrayList<>(responseFilters.size());
for (ContainerResponseFilter responseFilter : responseFilters) {
globalResponseInterceptorHandlers.add(new ResourceResponseFilterHandler(responseFilter));
}
globalRequestInterceptorHandlers = new ArrayList<>(globalRequestInterceptorsMap.size());
for (Map.Entry<ResourceInterceptor<ContainerRequestFilter>, ContainerRequestFilter> entry : globalRequestInterceptorsMap
.entrySet()) {
globalRequestInterceptorHandlers
.add(new ResourceRequestFilterHandler(entry.getValue(), false, entry.getKey().isNonBlockingRequired(),
entry.getKey().isWithFormRead()));
}
InterceptorHandler globalInterceptorHandler = null;
if (!globalReaderInterceptorsMap.isEmpty() ||
!globalWriterInterceptorsMap.isEmpty()) {
WriterInterceptor[] writers = null;
ReaderInterceptor[] readers = null;
if (!globalReaderInterceptorsMap.isEmpty()) {
readers = new ReaderInterceptor[globalReaderInterceptorsMap.size()];
int idx = 0;
for (ReaderInterceptor i : globalReaderInterceptorsMap.values()) {
readers[idx++] = i;
}
}
if (!globalWriterInterceptorsMap.isEmpty()) {
writers = new WriterInterceptor[globalWriterInterceptorsMap.size()];
int idx = 0;
for (WriterInterceptor i : globalWriterInterceptorsMap.values()) {
writers[idx++] = i;
}
}
globalInterceptorHandler = new InterceptorHandler(writers, readers);
}
this.globalInterceptorHandler = globalInterceptorHandler;
}
public InterceptorHandler getGlobalInterceptorHandler() {
return globalInterceptorHandler;
}
public List<ResourceRequestFilterHandler> getGlobalRequestInterceptorHandlers() {
return globalRequestInterceptorHandlers;
}
public List<ResourceResponseFilterHandler> getGlobalResponseInterceptorHandlers() {
return globalResponseInterceptorHandlers;
}
public Map<ResourceInterceptor<ContainerRequestFilter>, ContainerRequestFilter> getPreMatchContainerRequestFilters() {
return preMatchContainerRequestFilters;
}
private <T> LinkedHashMap<ResourceInterceptor<T>, T> createInterceptorInstances(
List<ResourceInterceptor<T>> interceptors, Consumer<Closeable> closeTaskHandler) {
if (interceptors.isEmpty()) {
return (LinkedHashMap) EMPTY_INTERCEPTOR_MAP;
}
LinkedHashMap<ResourceInterceptor<T>, T> result = new LinkedHashMap<>();
List<BeanFactory.BeanInstance<T>> responseBeanInstances = new ArrayList<>(interceptors.size());
Collections.sort(interceptors);
for (ResourceInterceptor<T> interceptor : interceptors) {
if (RuntimeType.CLIENT.equals(interceptor.getRuntimeType())) {
continue;
}
BeanFactory.BeanInstance<T> beanInstance = interceptor.getFactory().createInstance();
responseBeanInstances.add(beanInstance);
T containerResponseFilter = beanInstance.getInstance();
result.put(interceptor, containerResponseFilter);
}
closeTaskHandler.accept(new BeanFactory.BeanInstance.ClosingTask<>(responseBeanInstances));
return result;
}
public MethodInterceptorContext forMethod(ResourceMethod method, ResteasyReactiveResourceInfo lazyMethod) {
return new MethodInterceptorContext(method, lazyMethod);
}
<T> TreeMap<ResourceInterceptor<T>, T> buildInterceptorMap(
Map<ResourceInterceptor<T>, T> globalInterceptorsMap,
Map<ResourceInterceptor<T>, T> nameInterceptorsMap,
Map<ResourceInterceptor<T>, T> methodSpecificInterceptorsMap, ResourceMethod method, boolean reversed) {
TreeMap<ResourceInterceptor<T>, T> interceptorsToUse = new TreeMap<>(
reversed ? HasPriority.TreeMapComparator.REVERSED : HasPriority.TreeMapComparator.INSTANCE);
interceptorsToUse.putAll(globalInterceptorsMap);
interceptorsToUse.putAll(methodSpecificInterceptorsMap);
for (ResourceInterceptor<T> nameInterceptor : nameInterceptorsMap.keySet()) {
// in order to the interceptor to be used, the method needs to have all the "qualifiers" that the interceptor has
if (method.getNameBindingNames().containsAll(nameInterceptor.getNameBindingNames())) {
interceptorsToUse.put(nameInterceptor, nameInterceptorsMap.get(nameInterceptor));
}
}
return interceptorsToUse;
}
public | RuntimeInterceptorDeployment |
java | netty__netty | common/src/test/java/io/netty/util/ResourceLeakDetectorTest.java | {
"start": 1162,
"end": 7055
} | class ____ {
@SuppressWarnings("unused")
private static volatile int sink;
@Test
@Timeout(value = 60000, unit = TimeUnit.MILLISECONDS)
public void testConcurrentUsage() throws Throwable {
final AtomicBoolean finished = new AtomicBoolean();
final AtomicReference<Throwable> error = new AtomicReference<Throwable>();
// With 50 threads issue #6087 is reproducible on every run.
Thread[] threads = new Thread[50];
final CyclicBarrier barrier = new CyclicBarrier(threads.length);
for (int i = 0; i < threads.length; i++) {
Thread t = new Thread(new Runnable() {
final Queue<LeakAwareResource> resources = new ArrayDeque<LeakAwareResource>(100);
@Override
public void run() {
try {
barrier.await();
// Run 10000 times or until the test is marked as finished.
for (int b = 0; b < 1000 && !finished.get(); b++) {
// Allocate 100 LeakAwareResource per run and close them after it.
for (int a = 0; a < 100; a++) {
DefaultResource resource = new DefaultResource();
ResourceLeakTracker<Resource> leak = DefaultResource.detector.track(resource);
LeakAwareResource leakAwareResource = new LeakAwareResource(resource, leak);
resources.add(leakAwareResource);
}
if (closeResources(true)) {
finished.set(true);
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (Throwable e) {
error.compareAndSet(null, e);
} finally {
// Just close all resource now without assert it to eliminate more reports.
closeResources(false);
}
}
private boolean closeResources(boolean checkClosed) {
for (;;) {
LeakAwareResource r = resources.poll();
if (r == null) {
return false;
}
boolean closed = r.close();
if (checkClosed && !closed) {
error.compareAndSet(null,
new AssertionError("ResourceLeak.close() returned 'false' but expected 'true'"));
return true;
}
}
}
});
threads[i] = t;
t.start();
}
// Just wait until all threads are done.
for (Thread t: threads) {
t.join();
}
// Check if we had any leak reports in the ResourceLeakDetector itself
DefaultResource.detector.assertNoErrors();
assertNoErrors(error);
}
@Timeout(10)
@Test
public void testLeakSetupHints() throws Throwable {
DefaultResource.detectorWithSetupHint.initialise();
leakResource();
do {
// Trigger GC.
System.gc();
// Track another resource to trigger refqueue visiting.
Resource resource2 = new DefaultResource();
DefaultResource.detectorWithSetupHint.track(resource2).close(resource2);
// Give the GC something to work on.
for (int i = 0; i < 1000; i++) {
sink = System.identityHashCode(new byte[10000]);
}
} while (DefaultResource.detectorWithSetupHint.getLeaksFound() < 1 && !Thread.interrupted());
assertThat(DefaultResource.detectorWithSetupHint.getLeaksFound()).isOne();
DefaultResource.detectorWithSetupHint.assertNoErrors();
}
@Timeout(10)
@Test
public void testLeakBrokenHint() throws Throwable {
DefaultResource.detectorWithSetupHint.initialise();
DefaultResource.detectorWithSetupHint.failOnUntraced = false;
DefaultResource.detectorWithSetupHint.initialHint = new ResourceLeakHint() {
@Override
public String toHintString() {
throw new RuntimeException("expected failure");
}
};
try {
leakResource();
fail("expected failure");
} catch (RuntimeException e) {
assertThat(e.getMessage()).isEqualTo("expected failure");
}
DefaultResource.detectorWithSetupHint.initialHint = DefaultResource.detectorWithSetupHint.canaryString;
do {
// Trigger GC.
System.gc();
// Track another resource to trigger refqueue visiting.
Resource resource2 = new DefaultResource();
DefaultResource.detectorWithSetupHint.track(resource2).close(resource2);
// Give the GC something to work on.
for (int i = 0; i < 1000; i++) {
sink = System.identityHashCode(new byte[10000]);
}
} while (DefaultResource.detectorWithSetupHint.getLeaksFound() < 1 && !Thread.interrupted());
assertThat(DefaultResource.detectorWithSetupHint.getLeaksFound()).isOne();
DefaultResource.detectorWithSetupHint.assertNoErrors();
}
private static void leakResource() {
Resource resource = new DefaultResource();
// We'll never close this ResourceLeakTracker.
DefaultResource.detectorWithSetupHint.track(resource);
}
// Mimic the way how we implement our classes that should help with leak detection
private static final | ResourceLeakDetectorTest |
java | quarkusio__quarkus | independent-projects/arc/processor/src/test/java/io/quarkus/arc/processor/AnnotationLiteralProcessorTest.java | {
"start": 2382,
"end": 9755
} | class ____ extends AnnotationLiteral<ComplexAnnotation> implements ComplexAnnotation {
private final boolean bool;
private final byte b;
private final short s;
private final int i;
private final long l;
private final float f;
private final double d;
private final char ch;
private final String str;
private final SimpleEnum en;
private final Class<?> cls;
private final SimpleAnnotation nested;
private final boolean[] boolArray;
private final byte[] bArray;
private final short[] sArray;
private final int[] iArray;
private final long[] lArray;
private final float[] fArray;
private final double[] dArray;
private final char[] chArray;
private final String[] strArray;
private final SimpleEnum[] enArray;
private final Class<?>[] clsArray;
private final SimpleAnnotation[] nestedArray;
public Literal(boolean bool, byte b, short s, int i, long l, float f, double d, char ch, String str, SimpleEnum en,
Class<?> cls, SimpleAnnotation nested, boolean[] boolArray, byte[] bArray, short[] sArray, int[] iArray,
long[] lArray, float[] fArray, double[] dArray, char[] chArray, String[] strArray, SimpleEnum[] enArray,
Class<?>[] clsArray, SimpleAnnotation[] nestedArray) {
this.bool = bool;
this.b = b;
this.s = s;
this.i = i;
this.l = l;
this.f = f;
this.d = d;
this.ch = ch;
this.str = str;
this.en = en;
this.cls = cls;
this.nested = nested;
this.boolArray = boolArray;
this.bArray = bArray;
this.sArray = sArray;
this.iArray = iArray;
this.lArray = lArray;
this.fArray = fArray;
this.dArray = dArray;
this.chArray = chArray;
this.strArray = strArray;
this.enArray = enArray;
this.clsArray = clsArray;
this.nestedArray = nestedArray;
}
@Override
public boolean bool() {
return bool;
}
@Override
public byte b() {
return b;
}
@Override
public short s() {
return s;
}
@Override
public int i() {
return i;
}
@Override
public long l() {
return l;
}
@Override
public float f() {
return f;
}
@Override
public double d() {
return d;
}
@Override
public char ch() {
return ch;
}
@Override
public String str() {
return str;
}
@Override
public SimpleEnum en() {
return en;
}
@Override
public Class<?> cls() {
return cls;
}
@Override
public SimpleAnnotation nested() {
return nested;
}
@Override
public boolean[] boolArray() {
return boolArray;
}
@Override
public byte[] bArray() {
return bArray;
}
@Override
public short[] sArray() {
return sArray;
}
@Override
public int[] iArray() {
return iArray;
}
@Override
public long[] lArray() {
return lArray;
}
@Override
public float[] fArray() {
return fArray;
}
@Override
public double[] dArray() {
return dArray;
}
@Override
public char[] chArray() {
return chArray;
}
@Override
public String[] strArray() {
return strArray;
}
@Override
public SimpleEnum[] enArray() {
return enArray;
}
@Override
public Class<?>[] clsArray() {
return clsArray;
}
@Override
public SimpleAnnotation[] nestedArray() {
return nestedArray;
}
}
}
private final String generatedClass = "io.quarkus.arc.processor.test.GeneratedClass";
private final IndexView index;
public AnnotationLiteralProcessorTest() throws IOException {
index = Index.of(SimpleEnum.class, MemberlessAnnotation.class, SimpleAnnotation.class, ComplexAnnotation.class);
}
@Test
public void test() throws ReflectiveOperationException {
AnnotationLiteralProcessor literals = new AnnotationLiteralProcessor(index, ignored -> true);
TestClassMaker tcm = new TestClassMaker();
Gizmo gizmo = Gizmo.create(tcm);
ClassDesc desc = gizmo.class_(generatedClass, cc -> {
cc.staticMethod("get", mc -> {
mc.public_();
mc.returning(Object.class); // always `ComplexAnnotation`
mc.body(bc -> {
bc.return_(literals.create(bc, index.getClassByName(ComplexAnnotation.class), complexAnnotationJandex()));
});
});
});
Collection<ResourceOutput.Resource> resources = new AnnotationLiteralGenerator(false)
.generate(literals.getCache(), Collections.emptySet());
for (ResourceOutput.Resource resource : resources) {
if (resource.getType() == ResourceOutput.Resource.Type.JAVA_CLASS) {
tcm.write(ClassDesc.of(resource.getName().replace('/', '.')), resource.getData());
} else {
throw new IllegalStateException("Unexpected " + resource.getType() + " " + resource.getName());
}
}
ComplexAnnotation annotation = (ComplexAnnotation) tcm.forClass(desc).staticMethod("get", Supplier.class).get();
verify(annotation);
assertInstanceOf(AbstractAnnotationLiteral.class, annotation);
AbstractAnnotationLiteral annotationLiteral = (AbstractAnnotationLiteral) annotation;
assertEquals(annotation.annotationType(), annotationLiteral.annotationType());
// verify both ways, to ensure our generated classes interop correctly with `AnnotationLiteral`
assertEquals(complexAnnotationRuntime(), annotation);
assertEquals(annotation, complexAnnotationRuntime());
assertEquals(complexAnnotationRuntime().hashCode(), annotation.hashCode());
assertEquals(
"@io.quarkus.arc.processor.AnnotationLiteralProcessorTest$ComplexAnnotation(bool=true, b=1, s=2, i=3, l=4, f=5.0, d=6.0, ch=a, str=bc, en=FOO, cls= | Literal |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/sqm/tree/domain/SqmDerivedRoot.java | {
"start": 870,
"end": 3420
} | class ____<T> extends SqmRoot<T> implements JpaDerivedRoot<T> {
private final SqmSubQuery<T> subQuery;
public SqmDerivedRoot(
SqmSubQuery<T> subQuery,
@Nullable String alias) {
this(
SqmCreationHelper.buildRootNavigablePath( "<<derived>>", alias ),
subQuery,
new AnonymousTupleType<>( subQuery ),
alias
);
}
protected SqmDerivedRoot(
NavigablePath navigablePath,
SqmSubQuery<T> subQuery,
SqmPathSource<T> pathSource,
@Nullable String alias) {
super(
navigablePath,
pathSource,
alias,
true,
subQuery.nodeBuilder()
);
this.subQuery = subQuery;
}
@Override
public SqmDerivedRoot<T> copy(SqmCopyContext context) {
final SqmDerivedRoot<T> existing = context.getCopy( this );
if ( existing != null ) {
return existing;
}
final SqmDerivedRoot<T> path = context.registerCopy(
this,
new SqmDerivedRoot<>(
getNavigablePath(),
getQueryPart().copy( context ),
getReferencedPathSource(),
getExplicitAlias()
)
);
copyTo( path, context );
return path;
}
@Override
public SqmSubQuery<T> getQueryPart() {
return subQuery;
}
@Override
public <X> X accept(SemanticQueryWalker<X> walker) {
return walker.visitRootDerived( this );
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// JPA
@Override
public SqmEntityDomainType<T> getModel() {
// Or should we throw an exception instead?
throw new UnsupportedOperationException( "Derived root does not have an entity type. Use getReferencedPathSource() instead." );
}
@Override
public String getEntityName() {
throw new UnsupportedOperationException( "Derived root does not have an entity type. Use getReferencedPathSource() instead." );
}
@Override
public SqmPathSource<T> getResolvedModel() {
return getReferencedPathSource();
}
@Override
public SqmCorrelatedRoot<T> createCorrelation() {
return new SqmCorrelatedDerivedRoot<>( this );
}
@Override
public <S extends T> SqmTreatedFrom<T, T, S> treatAs(EntityDomainType<S> treatTarget, @Nullable String alias, boolean fetch) {
throw new UnsupportedOperationException( "Derived roots can not be treated" );
}
@Override
public boolean deepEquals(SqmFrom<?, ?> object) {
return super.deepEquals( object )
&& subQuery.equals( ((SqmDerivedRoot<?>) object).subQuery );
}
@Override
public boolean isDeepCompatible(SqmFrom<?, ?> object) {
return super.isDeepCompatible( object )
&& subQuery.isCompatible( ((SqmDerivedRoot<?>) object).subQuery );
}
}
| SqmDerivedRoot |
java | apache__rocketmq | controller/src/main/java/org/apache/rocketmq/controller/impl/event/ApplyBrokerIdEvent.java | {
"start": 962,
"end": 2434
} | class ____ implements EventMessage {
private final String clusterName;
private final String brokerName;
private final String brokerAddress;
private final String registerCheckCode;
private final long newBrokerId;
public ApplyBrokerIdEvent(String clusterName, String brokerName, String brokerAddress, long newBrokerId,
String registerCheckCode) {
this.clusterName = clusterName;
this.brokerName = brokerName;
this.brokerAddress = brokerAddress;
this.newBrokerId = newBrokerId;
this.registerCheckCode = registerCheckCode;
}
@Override
public EventType getEventType() {
return EventType.APPLY_BROKER_ID_EVENT;
}
public String getBrokerName() {
return brokerName;
}
public String getBrokerAddress() {
return brokerAddress;
}
public long getNewBrokerId() {
return newBrokerId;
}
public String getClusterName() {
return clusterName;
}
public String getRegisterCheckCode() {
return registerCheckCode;
}
@Override
public String toString() {
return "ApplyBrokerIdEvent{" +
"clusterName='" + clusterName + '\'' +
", brokerName='" + brokerName + '\'' +
", brokerAddress='" + brokerAddress + '\'' +
", registerCheckCode='" + registerCheckCode + '\'' +
", newBrokerId=" + newBrokerId +
'}';
}
}
| ApplyBrokerIdEvent |
java | quarkusio__quarkus | extensions/vertx-http/runtime/src/main/java/io/quarkus/vertx/http/runtime/security/HttpSecurityConfiguration.java | {
"start": 21892,
"end": 22259
} | class ____ {
public final ClientAuth tlsClientAuth;
public final Optional<String> tlsConfigName;
private ProgrammaticTlsConfig(ClientAuth tlsClientAuth, Optional<String> tlsConfigName) {
this.tlsClientAuth = tlsClientAuth;
this.tlsConfigName = Objects.requireNonNull(tlsConfigName);
}
}
}
| ProgrammaticTlsConfig |
java | apache__camel | components/camel-mongodb/src/generated/java/org/apache/camel/component/mongodb/MongoDbComponentConfigurer.java | {
"start": 734,
"end": 3157
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
MongoDbComponent target = (MongoDbComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "mongoconnection":
case "mongoConnection": target.setMongoConnection(property(camelContext, com.mongodb.client.MongoClient.class, value)); return true;
default: return false;
}
}
@Override
public String[] getAutowiredNames() {
return new String[]{"mongoConnection"};
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "mongoconnection":
case "mongoConnection": return com.mongodb.client.MongoClient.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
MongoDbComponent target = (MongoDbComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "mongoconnection":
case "mongoConnection": return target.getMongoConnection();
default: return null;
}
}
}
| MongoDbComponentConfigurer |
java | spring-projects__spring-boot | module/spring-boot-jackson/src/test/java/org/springframework/boot/jackson/types/NameAndAge.java | {
"start": 865,
"end": 1852
} | class ____ extends Name {
private final int age;
private NameAndAge(@Nullable String name, int age) {
super(name);
this.age = age;
}
public int getAge() {
return this.age;
}
public String asKey() {
return this.name + " is " + this.age;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (obj instanceof NameAndAge other) {
boolean rtn = true;
rtn = rtn && ObjectUtils.nullSafeEquals(this.name, other.name);
rtn = rtn && ObjectUtils.nullSafeEquals(this.age, other.age);
return rtn;
}
return super.equals(obj);
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ObjectUtils.nullSafeHashCode(this.name);
result = prime * result + ObjectUtils.nullSafeHashCode(this.age);
return result;
}
public static NameAndAge create(@Nullable String name, int age) {
return new NameAndAge(name, age);
}
}
| NameAndAge |
java | quarkusio__quarkus | integration-tests/main/src/main/java/io/quarkus/it/rest/TestResource.java | {
"start": 14389,
"end": 14857
} | class ____ {
private String name;
private String value;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
}
@RegisterForReflection(targets = MyEntity.class)
public static | MyEntity |
java | spring-projects__spring-framework | framework-docs/src/main/java/org/springframework/docs/integration/observability/applicationevents/EmailReceivedEvent.java | {
"start": 794,
"end": 914
} | class ____ extends ApplicationEvent {
public EmailReceivedEvent(Object source) {
super(source);
}
}
| EmailReceivedEvent |
java | google__guava | android/guava-tests/benchmark/com/google/common/util/concurrent/MonitorBasedPriorityBlockingQueue.java | {
"start": 9069,
"end": 9311
} | class ____ method
@Override
public @Nullable E poll() {
Monitor monitor = this.monitor;
monitor.enter();
try {
return q.poll();
} finally {
monitor.leave();
}
}
@CanIgnoreReturnValue // pushed down from | to |
java | spring-projects__spring-security | oauth2/oauth2-client/src/main/java/org/springframework/security/oauth2/client/JdbcOAuth2AuthorizedClientService.java | {
"start": 13350,
"end": 15548
} | class ____
implements Function<OAuth2AuthorizedClientHolder, List<SqlParameterValue>> {
@Override
public List<SqlParameterValue> apply(OAuth2AuthorizedClientHolder authorizedClientHolder) {
OAuth2AuthorizedClient authorizedClient = authorizedClientHolder.getAuthorizedClient();
Authentication principal = authorizedClientHolder.getPrincipal();
ClientRegistration clientRegistration = authorizedClient.getClientRegistration();
OAuth2AccessToken accessToken = authorizedClient.getAccessToken();
OAuth2RefreshToken refreshToken = authorizedClient.getRefreshToken();
List<SqlParameterValue> parameters = new ArrayList<>();
parameters.add(new SqlParameterValue(Types.VARCHAR, clientRegistration.getRegistrationId()));
parameters.add(new SqlParameterValue(Types.VARCHAR, principal.getName()));
parameters.add(new SqlParameterValue(Types.VARCHAR, accessToken.getTokenType().getValue()));
parameters
.add(new SqlParameterValue(Types.BLOB, accessToken.getTokenValue().getBytes(StandardCharsets.UTF_8)));
parameters.add(new SqlParameterValue(Types.TIMESTAMP, Timestamp.from(accessToken.getIssuedAt())));
parameters.add(new SqlParameterValue(Types.TIMESTAMP, Timestamp.from(accessToken.getExpiresAt())));
String accessTokenScopes = null;
if (!CollectionUtils.isEmpty(accessToken.getScopes())) {
accessTokenScopes = StringUtils.collectionToDelimitedString(accessToken.getScopes(), ",");
}
parameters.add(new SqlParameterValue(Types.VARCHAR, accessTokenScopes));
byte[] refreshTokenValue = null;
Timestamp refreshTokenIssuedAt = null;
if (refreshToken != null) {
refreshTokenValue = refreshToken.getTokenValue().getBytes(StandardCharsets.UTF_8);
if (refreshToken.getIssuedAt() != null) {
refreshTokenIssuedAt = Timestamp.from(refreshToken.getIssuedAt());
}
}
parameters.add(new SqlParameterValue(Types.BLOB, refreshTokenValue));
parameters.add(new SqlParameterValue(Types.TIMESTAMP, refreshTokenIssuedAt));
return parameters;
}
}
/**
* A holder for an {@link OAuth2AuthorizedClient} and End-User {@link Authentication}
* (Resource Owner).
*/
public static final | OAuth2AuthorizedClientParametersMapper |
java | spring-projects__spring-boot | module/spring-boot-hibernate/src/test/java/org/springframework/boot/hibernate/autoconfigure/HibernateJpaAutoConfigurationTests.java | {
"start": 44701,
"end": 45010
} | class ____ {
@Bean
FilterRegistrationBean<OpenEntityManagerInViewFilter> openEntityManagerInViewFilterFilterRegistrationBean() {
return new FilterRegistrationBean<>();
}
}
@Configuration(proxyBeanMethods = false)
@TestAutoConfigurationPackage(City.class)
static | TestFilterRegistrationConfiguration |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ExtendedSearchUsageLongCounter.java | {
"start": 902,
"end": 2900
} | class ____ implements ExtendedSearchUsageMetric<ExtendedSearchUsageLongCounter> {
public static final String NAME = "extended_search_usage_long_counter";
private final Map<String, Long> values;
public ExtendedSearchUsageLongCounter(Map<String, Long> values) {
this.values = values;
}
public ExtendedSearchUsageLongCounter(StreamInput in) throws IOException {
this.values = in.readMap(StreamInput::readString, StreamInput::readLong);
}
public Map<String, Long> getValues() {
return Collections.unmodifiableMap(values);
}
@Override
public ExtendedSearchUsageLongCounter merge(ExtendedSearchUsageMetric<?> other) {
assert other instanceof ExtendedSearchUsageLongCounter;
ExtendedSearchUsageLongCounter otherLongCounter = (ExtendedSearchUsageLongCounter) other;
Map<String, Long> values = new java.util.HashMap<>(this.values);
otherLongCounter.getValues().forEach((key, otherValue) -> { values.merge(key, otherValue, Long::sum); });
return new ExtendedSearchUsageLongCounter(values);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeMap(values, StreamOutput::writeString, StreamOutput::writeLong);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
for (String key : values.keySet()) {
builder.field(key, values.get(key));
}
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ExtendedSearchUsageLongCounter that = (ExtendedSearchUsageLongCounter) o;
return Objects.equals(values, that.values);
}
@Override
public int hashCode() {
return values.hashCode();
}
@Override
public String getWriteableName() {
return NAME;
}
}
| ExtendedSearchUsageLongCounter |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java | {
"start": 1407,
"end": 5167
} | class ____ extends ESSingleNodeTestCase {
@Override
protected boolean forbidPrivateIndexSettings() {
return false;
}
@Override
protected Collection<Class<? extends Plugin>> getPlugins() {
return pluginList(InternalSettingsPlugin.class);
}
public void testThatDefaultAndStandardAnalyzerAreTheSameInstance() {
Analyzer currentStandardAnalyzer = PreBuiltAnalyzers.STANDARD.getAnalyzer(IndexVersion.current());
Analyzer currentDefaultAnalyzer = PreBuiltAnalyzers.DEFAULT.getAnalyzer(IndexVersion.current());
// special case, these two are the same instance
assertThat(currentDefaultAnalyzer, is(currentStandardAnalyzer));
}
public void testThatInstancesAreTheSameAlwaysForKeywordAnalyzer() {
assertThat(
PreBuiltAnalyzers.KEYWORD.getAnalyzer(IndexVersion.current()),
is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(IndexVersions.MINIMUM_COMPATIBLE))
);
assertThat(
PreBuiltAnalyzers.KEYWORD.getAnalyzer(IndexVersion.current()),
is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(IndexVersions.MINIMUM_READONLY_COMPATIBLE))
);
}
public void testThatInstancesAreCachedAndReused() {
assertSame(
PreBuiltAnalyzers.STANDARD.getAnalyzer(IndexVersion.current()),
PreBuiltAnalyzers.STANDARD.getAnalyzer(IndexVersion.current())
);
// same index version should be cached
IndexVersion v = IndexVersionUtils.randomVersion();
assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(v), PreBuiltAnalyzers.STANDARD.getAnalyzer(v));
assertNotSame(
PreBuiltAnalyzers.STANDARD.getAnalyzer(IndexVersion.current()),
PreBuiltAnalyzers.STANDARD.getAnalyzer(IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersion.current()))
);
// Same Lucene version should be cached:
IndexVersion v1 = IndexVersionUtils.randomVersion();
IndexVersion v2 = new IndexVersion(v1.id() - 1, v1.luceneVersion());
assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(v1), PreBuiltAnalyzers.STOP.getAnalyzer(v2));
}
public void testThatAnalyzersAreUsedInMapping() throws IOException {
int randomInt = randomInt(PreBuiltAnalyzers.values().length - 1);
PreBuiltAnalyzers randomPreBuiltAnalyzer = PreBuiltAnalyzers.values()[randomInt];
String analyzerName = randomPreBuiltAnalyzer.name().toLowerCase(Locale.ROOT);
IndexVersion randomVersion = IndexVersionUtils.randomWriteVersion();
Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, randomVersion).build();
NamedAnalyzer namedAnalyzer = new PreBuiltAnalyzerProvider(
analyzerName,
AnalyzerScope.INDEX,
randomPreBuiltAnalyzer.getAnalyzer(randomVersion)
).get();
XContentBuilder mapping = XContentFactory.jsonBuilder()
.startObject()
.startObject("_doc")
.startObject("properties")
.startObject("field")
.field("type", "text")
.field("analyzer", analyzerName)
.endObject()
.endObject()
.endObject()
.endObject();
MapperService mapperService = createIndex("test", indexSettings, mapping).mapperService();
MappedFieldType fieldType = mapperService.fieldType("field");
assertThat(fieldType.getTextSearchInfo().searchAnalyzer(), instanceOf(NamedAnalyzer.class));
NamedAnalyzer fieldMapperNamedAnalyzer = fieldType.getTextSearchInfo().searchAnalyzer();
assertThat(fieldMapperNamedAnalyzer.analyzer(), is(namedAnalyzer.analyzer()));
}
}
| PreBuiltAnalyzerTests |
java | quarkusio__quarkus | core/runtime/src/main/java/io/quarkus/runtime/graal/Target_sun_security_jca_JCAUtil.java | {
"start": 287,
"end": 442
} | class ____ {
@Alias
@RecomputeFieldValue(kind = RecomputeFieldValue.Kind.Reset)
private static SecureRandom def;
}
| Target_sun_security_jca_JCAUtil |
java | spring-projects__spring-boot | module/spring-boot-data-couchbase/src/test/java/org/springframework/boot/data/couchbase/autoconfigure/DataCouchbaseReactiveAndImperativeRepositoriesAutoConfigurationTests.java | {
"start": 2556,
"end": 2718
} | class ____ {
}
@Configuration(proxyBeanMethods = false)
@Import({ CouchbaseMockConfiguration.class, Registrar.class })
static | ImperativeAndReactiveConfiguration |
java | redisson__redisson | redisson-spring-data/redisson-spring-data-21/src/main/java/org/redisson/spring/data/connection/RedissonReactiveZSetCommands.java | {
"start": 2387,
"end": 25780
} | class ____ extends RedissonBaseReactive implements ReactiveZSetCommands {
RedissonReactiveZSetCommands(CommandReactiveExecutor executorService) {
super(executorService);
}
private static final RedisCommand<Double> ZADD_FLOAT = new RedisCommand<>("ZADD", new DoubleNullSafeReplayConvertor());
@Override
public Flux<NumericResponse<ZAddCommand, Number>> zAdd(Publisher<ZAddCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notEmpty(command.getTuples(), "Tuples must not be empty or null!");
byte[] keyBuf = toByteArray(command.getKey());
List<Object> params = new ArrayList<Object>(command.getTuples().size()*2+1);
params.add(keyBuf);
if (command.isIncr() || command.isUpsert() || command.isReturnTotalChanged()) {
if (command.isUpsert()) {
params.add("NX");
} else {
params.add("XX");
}
if (command.isReturnTotalChanged()) {
params.add("CH");
}
if (command.isIncr()) {
params.add("INCR");
}
}
for (Tuple entry : command.getTuples()) {
params.add(BigDecimal.valueOf(entry.getScore()).toPlainString());
params.add(entry.getValue());
}
Mono<Number> m;
if (command.isIncr()) {
m = write(keyBuf, DoubleCodec.INSTANCE, ZADD_FLOAT, params.toArray());
} else {
m = write(keyBuf, StringCodec.INSTANCE, RedisCommands.ZADD, params.toArray());
}
return m.map(v -> new NumericResponse<>(command, v));
});
}
@Override
public Flux<NumericResponse<ZRemCommand, Long>> zRem(Publisher<ZRemCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getValues(), "Values must not be null!");
List<Object> args = new ArrayList<Object>(command.getValues().size() + 1);
args.add(toByteArray(command.getKey()));
args.addAll(command.getValues().stream().map(v -> toByteArray(v)).collect(Collectors.toList()));
Mono<Long> m = write((byte[])args.get(0), StringCodec.INSTANCE, RedisCommands.ZREM_LONG, args.toArray());
return m.map(v -> new NumericResponse<>(command, v));
});
}
@Override
public Flux<NumericResponse<ZIncrByCommand, Double>> zIncrBy(Publisher<ZIncrByCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getValue(), "Member must not be null!");
Assert.notNull(command.getIncrement(), "Increment value must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
byte[] valueBuf = toByteArray(command.getValue());
Mono<Double> m = write(keyBuf, DoubleCodec.INSTANCE, RedisCommands.ZINCRBY, keyBuf, new BigDecimal(command.getIncrement().doubleValue()).toPlainString(), valueBuf);
return m.map(v -> new NumericResponse<>(command, v));
});
}
@Override
public Flux<NumericResponse<ZRankCommand, Long>> zRank(Publisher<ZRankCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getValue(), "Member must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
byte[] valueBuf = toByteArray(command.getValue());
RedisCommand<Long> cmd = RedisCommands.ZRANK;
if (command.getDirection() == Direction.DESC) {
cmd = RedisCommands.ZREVRANK;
}
Mono<Long> m = read(keyBuf, DoubleCodec.INSTANCE, cmd, keyBuf, valueBuf);
return m.map(v -> new NumericResponse<>(command, v));
});
}
private static final RedisCommand<Set<Tuple>> ZRANGE_ENTRY = new RedisCommand<>("ZRANGE", new ScoredSortedSetReplayDecoder());
private static final RedisCommand<Set<Tuple>> ZRANGE_ENTRY_V2 = new RedisCommand<Set<Tuple>>("ZRANGE",
new ListMultiDecoder2(new ObjectSetReplayDecoder(), new ScoredSortedSetReplayDecoderV2()));
private static final RedisCommand<Set<Object>> ZRANGE = new RedisCommand<>("ZRANGE", new ObjectSetReplayDecoder<Object>());
private static final RedisCommand<Set<Tuple>> ZREVRANGE_ENTRY = new RedisCommand<>("ZREVRANGE", new ScoredSortedSetReplayDecoder());
private static final RedisCommand<Set<Tuple>> ZREVRANGE_ENTRY_V2 = new RedisCommand("ZREVRANGE",
new ListMultiDecoder2(new ObjectSetReplayDecoder(), new ScoredSortedSetReplayDecoderV2()));
private static final RedisCommand<Set<Object>> ZREVRANGE = new RedisCommand<>("ZREVRANGE", new ObjectSetReplayDecoder<Object>());
@Override
public Flux<CommandResponse<ZRangeCommand, Flux<Tuple>>> zRange(Publisher<ZRangeCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getRange(), "Range must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
long start = command.getRange().getLowerBound().getValue().orElse(0L);
long end = command.getRange().getUpperBound().getValue().get();
Flux<Tuple> flux;
if (command.getDirection() == Direction.ASC) {
if (command.isWithScores()) {
RedisCommand<Set<Tuple>> cmd = ZRANGE_ENTRY;
if (executorService.getServiceManager().isResp3()) {
cmd = ZRANGE_ENTRY_V2;
}
Mono<Set<Tuple>> m = read(keyBuf, ByteArrayCodec.INSTANCE, cmd,
keyBuf, start, end, "WITHSCORES");
flux = m.flatMapMany(e -> Flux.fromIterable(e));
} else {
Mono<Set<byte[]>> m = read(keyBuf, ByteArrayCodec.INSTANCE, ZRANGE, keyBuf, start, end);
flux = m.flatMapMany(e -> Flux.fromIterable(e).map(b -> new DefaultTuple(b, Double.NaN)));
}
} else {
if (command.isWithScores()) {
RedisCommand<Set<Tuple>> cmd = ZREVRANGE_ENTRY;
if (executorService.getServiceManager().isResp3()) {
cmd = ZREVRANGE_ENTRY_V2;
}
Mono<Set<Tuple>> m = read(keyBuf, ByteArrayCodec.INSTANCE, cmd,
keyBuf, start, end, "WITHSCORES");
flux = m.flatMapMany(e -> Flux.fromIterable(e));
} else {
Mono<Set<byte[]>> m = read(keyBuf, ByteArrayCodec.INSTANCE, ZREVRANGE, keyBuf, start, end);
flux = m.flatMapMany(e -> Flux.fromIterable(e).map(b -> new DefaultTuple(b, Double.NaN)));
}
}
return Mono.just(new CommandResponse<>(command, flux));
});
}
private static final RedisCommand<Set<Tuple>> ZRANGEBYSCORE = new RedisCommand<Set<Tuple>>("ZRANGEBYSCORE", new ScoredSortedSetReplayDecoder());
private static final RedisCommand<Set<Tuple>> ZRANGEBYSCORE_V2 = new RedisCommand<Set<Tuple>>("ZRANGEBYSCORE",
new ListMultiDecoder2(new ObjectSetReplayDecoder(), new ScoredSortedSetReplayDecoderV2()));
private static final RedisCommand<Set<Tuple>> ZREVRANGEBYSCORE = new RedisCommand<Set<Tuple>>("ZREVRANGEBYSCORE", new ScoredSortedSetReplayDecoder());
private static final RedisCommand<Set<Tuple>> ZREVRANGEBYSCORE_V2 = new RedisCommand<Set<Tuple>>("ZREVRANGEBYSCORE",
new ListMultiDecoder2(new ObjectSetReplayDecoder(), new ScoredSortedSetReplayDecoderV2()));
@Override
public Flux<CommandResponse<ZRangeByScoreCommand, Flux<Tuple>>> zRangeByScore(
Publisher<ZRangeByScoreCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getRange(), "Range must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
String start = toLowerBound(command.getRange());
String end = toUpperBound(command.getRange());
List<Object> args = new ArrayList<Object>();
args.add(keyBuf);
if (command.getDirection() == Direction.ASC) {
args.add(start);
} else {
args.add(end);
}
if (command.getDirection() == Direction.ASC) {
args.add(end);
} else {
args.add(start);
}
if (command.isWithScores()) {
args.add("WITHSCORES");
}
if (command.getLimit().isPresent() && !command.getLimit().get().isUnlimited()) {
args.add("LIMIT");
args.add(command.getLimit().get().getOffset());
args.add(command.getLimit().get().getCount());
}
Flux<Tuple> flux;
if (command.getDirection() == Direction.ASC) {
if (command.isWithScores()) {
RedisCommand<Set<Tuple>> cmd = ZRANGEBYSCORE;
if (executorService.getServiceManager().isResp3()) {
cmd = ZRANGEBYSCORE_V2;
}
Mono<Set<Tuple>> m = read(keyBuf, ByteArrayCodec.INSTANCE, cmd, args.toArray());
flux = m.flatMapMany(e -> Flux.fromIterable(e));
} else {
Mono<Set<byte[]>> m = read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.ZRANGEBYSCORE, args.toArray());
flux = m.flatMapMany(e -> Flux.fromIterable(e).map(b -> new DefaultTuple(b, Double.NaN)));
}
} else {
if (command.isWithScores()) {
RedisCommand<Set<Tuple>> cmd = ZREVRANGEBYSCORE;
if (executorService.getServiceManager().isResp3()) {
cmd = ZREVRANGEBYSCORE_V2;
}
Mono<Set<Tuple>> m = read(keyBuf, ByteArrayCodec.INSTANCE, cmd, args.toArray());
flux = m.flatMapMany(e -> Flux.fromIterable(e));
} else {
Mono<Set<byte[]>> m = read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.ZREVRANGEBYSCORE, args.toArray());
flux = m.flatMapMany(e -> Flux.fromIterable(e).map(b -> new DefaultTuple(b, Double.NaN)));
}
}
return Mono.just(new CommandResponse<>(command, flux));
});
}
private static final RedisCommand<ListScanResult<Tuple>> ZSCAN = new RedisCommand<>("ZSCAN", new ListMultiDecoder2(new ScoredSortedSetScanDecoder<Object>(), new ScoredSortedSetScanReplayDecoder()));
@Override
public Flux<CommandResponse<KeyCommand, Flux<Tuple>>> zScan(Publisher<KeyScanCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getOptions(), "ScanOptions must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
Flux<Tuple> flux = Flux.create(new SetReactiveIterator<Tuple>() {
@Override
protected RFuture<ScanResult<Object>> scanIterator(RedisClient client, String nextIterPos) {
if (command.getOptions().getPattern() == null) {
return executorService.readAsync(client, keyBuf, ByteArrayCodec.INSTANCE, ZSCAN,
keyBuf, nextIterPos, "COUNT", Optional.ofNullable(command.getOptions().getCount()).orElse(10L));
}
return executorService.readAsync(client, keyBuf, ByteArrayCodec.INSTANCE, ZSCAN,
keyBuf, nextIterPos, "MATCH", command.getOptions().getPattern(),
"COUNT", Optional.ofNullable(command.getOptions().getCount()).orElse(10L));
}
});
return Mono.just(new CommandResponse<>(command, flux));
});
}
private static final RedisStrictCommand<Long> ZCOUNT = new RedisStrictCommand<Long>("ZCOUNT");
String toLowerBound(Range range) {
StringBuilder s = new StringBuilder();
if (!range.getLowerBound().isInclusive()) {
s.append("(");
}
if (!range.getLowerBound().getValue().isPresent() || range.getLowerBound().getValue().get().toString().isEmpty()) {
s.append("-inf");
} else {
s.append(range.getLowerBound().getValue().get());
}
return s.toString();
}
String toUpperBound(Range range) {
StringBuilder s = new StringBuilder();
if (!range.getUpperBound().isInclusive()) {
s.append("(");
}
if (!range.getUpperBound().getValue().isPresent() || range.getUpperBound().getValue().get().toString().isEmpty()) {
s.append("+inf");
} else {
s.append(range.getUpperBound().getValue().get());
}
return s.toString();
}
String toLexLowerBound(Range range, Object defaultValue) {
StringBuilder s = new StringBuilder();
if (range.getLowerBound().isInclusive()) {
s.append("[");
} else {
s.append("(");
}
if (!range.getLowerBound().getValue().isPresent() || range.getLowerBound().getValue().get().toString().isEmpty()) {
s.append(defaultValue);
} else {
s.append(range.getLowerBound().getValue().get());
}
return s.toString();
}
String toLexUpperBound(Range range, Object defaultValue) {
StringBuilder s = new StringBuilder();
if (range.getUpperBound().isInclusive()) {
s.append("[");
} else {
s.append("(");
}
if (!range.getUpperBound().getValue().isPresent() || range.getUpperBound().getValue().get().toString().isEmpty()) {
s.append(defaultValue);
} else {
s.append(range.getUpperBound().getValue().get());
}
return s.toString();
}
@Override
public Flux<NumericResponse<ZCountCommand, Long>> zCount(Publisher<ZCountCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getRange(), "Range must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
Mono<Long> m = read(keyBuf, StringCodec.INSTANCE, ZCOUNT,
keyBuf, toLowerBound(command.getRange()),
toUpperBound(command.getRange()));
return m.map(v -> new NumericResponse<>(command, v));
});
}
@Override
public Flux<NumericResponse<KeyCommand, Long>> zCard(Publisher<KeyCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
Mono<Long> m = read(keyBuf, StringCodec.INSTANCE, RedisCommands.ZCARD, keyBuf);
return m.map(v -> new NumericResponse<>(command, v));
});
}
@Override
public Flux<NumericResponse<ZScoreCommand, Double>> zScore(Publisher<ZScoreCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getValue(), "Value must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
byte[] valueBuf = toByteArray(command.getValue());
Mono<Double> m = read(keyBuf, StringCodec.INSTANCE, RedisCommands.ZSCORE, keyBuf, valueBuf);
return m.map(v -> new NumericResponse<>(command, v));
});
}
private static final RedisStrictCommand<Long> ZREMRANGEBYRANK = new RedisStrictCommand<Long>("ZREMRANGEBYRANK");
@Override
public Flux<NumericResponse<ZRemRangeByRankCommand, Long>> zRemRangeByRank(
Publisher<ZRemRangeByRankCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getRange(), "Range must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
Mono<Long> m = write(keyBuf, StringCodec.INSTANCE, ZREMRANGEBYRANK,
keyBuf, command.getRange().getLowerBound().getValue().orElse(0L),
command.getRange().getUpperBound().getValue().get());
return m.map(v -> new NumericResponse<>(command, v));
});
}
private static final RedisStrictCommand<Long> ZREMRANGEBYSCORE = new RedisStrictCommand<Long>("ZREMRANGEBYSCORE");
@Override
public Flux<NumericResponse<ZRemRangeByScoreCommand, Long>> zRemRangeByScore(
Publisher<ZRemRangeByScoreCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getRange(), "Range must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
Mono<Long> m = write(keyBuf, StringCodec.INSTANCE, ZREMRANGEBYSCORE,
keyBuf, toLowerBound(command.getRange()),
toUpperBound(command.getRange()));
return m.map(v -> new NumericResponse<>(command, v));
});
}
private static final RedisStrictCommand<Long> ZUNIONSTORE = new RedisStrictCommand<Long>("ZUNIONSTORE");
@Override
public Flux<NumericResponse<ZUnionStoreCommand, Long>> zUnionStore(Publisher<ZUnionStoreCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Destination key must not be null!");
Assert.notEmpty(command.getSourceKeys(), "Source keys must not be null or empty!");
byte[] keyBuf = toByteArray(command.getKey());
List<Object> args = new ArrayList<Object>(command.getSourceKeys().size() * 2 + 5);
args.add(keyBuf);
args.add(command.getSourceKeys().size());
args.addAll(command.getSourceKeys().stream().map(e -> toByteArray(e)).collect(Collectors.toList()));
if (!command.getWeights().isEmpty()) {
args.add("WEIGHTS");
for (Double weight : command.getWeights()) {
args.add(BigDecimal.valueOf(weight).toPlainString());
}
}
if (command.getAggregateFunction().isPresent()) {
args.add("AGGREGATE");
args.add(command.getAggregateFunction().get().name());
}
Mono<Long> m = write(keyBuf, LongCodec.INSTANCE, ZUNIONSTORE, args.toArray());
return m.map(v -> new NumericResponse<>(command, v));
});
}
private static final RedisStrictCommand<Long> ZINTERSTORE = new RedisStrictCommand<Long>("ZINTERSTORE");
@Override
public Flux<NumericResponse<ZInterStoreCommand, Long>> zInterStore(Publisher<ZInterStoreCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Destination key must not be null!");
Assert.notEmpty(command.getSourceKeys(), "Source keys must not be null or empty!");
byte[] keyBuf = toByteArray(command.getKey());
List<Object> args = new ArrayList<Object>(command.getSourceKeys().size() * 2 + 5);
args.add(keyBuf);
args.add(command.getSourceKeys().size());
args.addAll(command.getSourceKeys().stream().map(e -> toByteArray(e)).collect(Collectors.toList()));
if (!command.getWeights().isEmpty()) {
args.add("WEIGHTS");
for (Double weight : command.getWeights()) {
args.add(BigDecimal.valueOf(weight).toPlainString());
}
}
if (command.getAggregateFunction().isPresent()) {
args.add("AGGREGATE");
args.add(command.getAggregateFunction().get().name());
}
Mono<Long> m = write(keyBuf, LongCodec.INSTANCE, ZINTERSTORE, args.toArray());
return m.map(v -> new NumericResponse<>(command, v));
});
}
private static final RedisCommand<Set<Object>> ZRANGEBYLEX = new RedisCommand<Set<Object>>("ZRANGEBYLEX", new ObjectSetReplayDecoder<Object>());
private static final RedisCommand<Set<Object>> ZREVRANGEBYLEX = new RedisCommand<Set<Object>>("ZREVRANGEBYLEX", new ObjectSetReplayDecoder<Object>());
@Override
public Flux<CommandResponse<ZRangeByLexCommand, Flux<ByteBuffer>>> zRangeByLex(
Publisher<ZRangeByLexCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getRange(), "Range must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
String start = null;
String end = null;
if (command.getDirection() == Direction.ASC) {
start = toLexLowerBound(command.getRange(), "-");
end = toLexUpperBound(command.getRange(), "+");
} else {
start = toLexUpperBound(command.getRange(), "-");
end = toLexLowerBound(command.getRange(), "+");
}
Mono<Set<byte[]>> m;
if (!command.getLimit().isUnlimited()) {
if (command.getDirection() == Direction.ASC) {
m = read(keyBuf, ByteArrayCodec.INSTANCE, ZRANGEBYLEX,
keyBuf, start, end, "LIMIT", command.getLimit().getOffset(), command.getLimit().getCount());
} else {
m = read(keyBuf, ByteArrayCodec.INSTANCE, ZREVRANGEBYLEX,
keyBuf, start, end, "LIMIT", command.getLimit().getOffset(), command.getLimit().getCount());
}
} else {
if (command.getDirection() == Direction.ASC) {
m = read(keyBuf, ByteArrayCodec.INSTANCE, ZRANGEBYLEX,
keyBuf, start, end);
} else {
m = read(keyBuf, ByteArrayCodec.INSTANCE, ZREVRANGEBYLEX,
keyBuf, start, end);
}
}
Flux<ByteBuffer> flux = m.flatMapMany(e -> Flux.fromIterable(e).map(v -> ByteBuffer.wrap(v)));
return Mono.just(new CommandResponse<>(command, flux));
});
}
}
| RedissonReactiveZSetCommands |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/createTable/MySqlCreateTableTest7.java | {
"start": 1125,
"end": 2868
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "CREATE TABLE Orders\n" +
"(\n" +
"O_Id int NOT NULL,\n" +
"OrderNo int NOT NULL,\n" +
"Id_P int,\n" +
"PRIMARY KEY (O_Id),\n" +
"FOREIGN KEY (Id_P) REFERENCES Persons(Id_P)\n" +
")";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
MySqlCreateTableStatement stmt = (MySqlCreateTableStatement) statementList.get(0);
// print(statementList);
assertEquals(1, statementList.size());
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
stmt.accept(visitor);
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(2, visitor.getTables().size());
assertEquals(4, visitor.getColumns().size());
assertEquals(0, visitor.getConditions().size());
assertTrue(visitor.getTables().containsKey(new TableStat.Name("Orders")));
assertTrue(visitor.containsColumn("Orders", "O_Id"));
assertTrue(visitor.containsColumn("Orders", "OrderNo"));
assertTrue(visitor.containsColumn("Orders", "Id_P"));
SQLColumnDefinition column = stmt.findColumn("O_id");
assertNotNull(column);
assertEquals(1, column.getConstraints().size());
assertTrue(column.isPrimaryKey());
}
}
| MySqlCreateTableTest7 |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java | {
"start": 1362,
"end": 8998
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(TestPrivilegedOperationExecutor.class);
private String localDataDir;
private String customExecutorPath;
private Configuration nullConf = null;
private Configuration emptyConf;
private Configuration confWithExecutorPath;
private String cGroupTasksNone;
private String cGroupTasksInvalid;
private String cGroupTasks1;
private String cGroupTasks2;
private String cGroupTasks3;
private PrivilegedOperation opDisallowed;
private PrivilegedOperation opTasksNone;
private PrivilegedOperation opTasksInvalid;
private PrivilegedOperation opTasks1;
private PrivilegedOperation opTasks2;
private PrivilegedOperation opTasks3;
@BeforeEach
public void setup() {
localDataDir = System.getProperty("test.build.data");
customExecutorPath = localDataDir + "/bin/container-executor";
emptyConf = new YarnConfiguration();
confWithExecutorPath = new YarnConfiguration();
confWithExecutorPath.set(YarnConfiguration
.NM_LINUX_CONTAINER_EXECUTOR_PATH, customExecutorPath);
cGroupTasksNone = "none";
cGroupTasksInvalid = "invalid_string";
cGroupTasks1 = "cpu/hadoop_yarn/container_01/tasks";
cGroupTasks2 = "net_cls/hadoop_yarn/container_01/tasks";
cGroupTasks3 = "blkio/hadoop_yarn/container_01/tasks";
opDisallowed = new PrivilegedOperation
(PrivilegedOperation.OperationType.DELETE_AS_USER);
opTasksNone = new PrivilegedOperation
(PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP,
PrivilegedOperation.CGROUP_ARG_PREFIX + cGroupTasksNone);
opTasksInvalid = new PrivilegedOperation
(PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP,
cGroupTasksInvalid);
opTasks1 = new PrivilegedOperation
(PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP,
PrivilegedOperation.CGROUP_ARG_PREFIX + cGroupTasks1);
opTasks2 = new PrivilegedOperation
(PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP,
PrivilegedOperation.CGROUP_ARG_PREFIX + cGroupTasks2);
opTasks3 = new PrivilegedOperation
(PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP,
PrivilegedOperation.CGROUP_ARG_PREFIX + cGroupTasks3);
}
@Test
public void testExecutorPath() {
String containerExePath = PrivilegedOperationExecutor
.getContainerExecutorExecutablePath(nullConf);
//In case HADOOP_YARN_HOME isn't set, CWD is used. If conf is null or
//NM_LINUX_CONTAINER_EXECUTOR_PATH is not set, then a defaultPath is
//constructed.
String yarnHomeEnvVar = System.getenv("HADOOP_YARN_HOME");
String yarnHome = yarnHomeEnvVar != null ? yarnHomeEnvVar
: new File("").getAbsolutePath();
String expectedPath = yarnHome + "/bin/container-executor";
assertEquals(expectedPath, containerExePath);
containerExePath = PrivilegedOperationExecutor
.getContainerExecutorExecutablePath(emptyConf);
assertEquals(expectedPath, containerExePath);
//if NM_LINUX_CONTAINER_EXECUTOR_PATH is set, this must be returned
expectedPath = customExecutorPath;
containerExePath = PrivilegedOperationExecutor
.getContainerExecutorExecutablePath(confWithExecutorPath);
assertEquals(expectedPath, containerExePath);
}
@Test
public void testExecutionCommand() {
PrivilegedOperationExecutor exec = PrivilegedOperationExecutor
.getInstance(confWithExecutorPath);
PrivilegedOperation op = new PrivilegedOperation(PrivilegedOperation
.OperationType.TC_MODIFY_STATE);
String[] cmdArray = exec.getPrivilegedOperationExecutionCommand(null, op);
//No arguments added - so the resulting array should consist of
//1)full path to executor 2) cli switch
assertEquals(2, cmdArray.length);
assertEquals(customExecutorPath, cmdArray[0]);
assertEquals(op.getOperationType().getOption(), cmdArray[1]);
//other (dummy) arguments to tc modify state
String[] additionalArgs = { "cmd_file_1", "cmd_file_2", "cmd_file_3"};
op.appendArgs(additionalArgs);
cmdArray = exec.getPrivilegedOperationExecutionCommand(null, op);
//Resulting array should be of length 2 greater than the number of
//additional arguments added.
assertEquals(2 + additionalArgs.length, cmdArray.length);
assertEquals(customExecutorPath, cmdArray[0]);
assertEquals(op.getOperationType().getOption(), cmdArray[1]);
//Rest of args should be same as additional args.
for (int i = 0; i < additionalArgs.length; ++i) {
assertEquals(additionalArgs[i], cmdArray[2 + i]);
}
//Now test prefix commands
List<String> prefixCommands = Arrays.asList("nice", "-10");
cmdArray = exec.getPrivilegedOperationExecutionCommand(prefixCommands, op);
int prefixLength = prefixCommands.size();
//Resulting array should be of length of prefix command args + 2 (exec
// path + switch) + length of additional args.
assertEquals(prefixLength + 2 + additionalArgs.length,
cmdArray.length);
//Prefix command array comes first
for (int i = 0; i < prefixLength; ++i) {
assertEquals(prefixCommands.get(i), cmdArray[i]);
}
//Followed by the container executor path and the cli switch
assertEquals(customExecutorPath, cmdArray[prefixLength]);
assertEquals(op.getOperationType().getOption(),
cmdArray[prefixLength + 1]);
//Followed by the rest of the args
//Rest of args should be same as additional args.
for (int i = 0; i < additionalArgs.length; ++i) {
assertEquals(additionalArgs[i], cmdArray[prefixLength + 2 + i]);
}
}
@Test
public void testSquashCGroupOperationsWithInvalidOperations() {
List<PrivilegedOperation> ops = new ArrayList<>();
//Ensure that disallowed ops are rejected
ops.add(opTasksNone);
ops.add(opDisallowed);
try {
PrivilegedOperationExecutor.squashCGroupOperations(ops);
fail("Expected squash operation to fail with an exception!");
} catch (PrivilegedOperationException e) {
LOG.info("Caught expected exception : " + e);
}
//Ensure that invalid strings are rejected
ops.clear();
ops.add(opTasksNone);
ops.add(opTasksInvalid);
try {
PrivilegedOperationExecutor.squashCGroupOperations(ops);
fail("Expected squash operation to fail with an exception!");
} catch (PrivilegedOperationException e) {
LOG.info("Caught expected exception : " + e);
}
}
@Test
public void testSquashCGroupOperationsWithValidOperations() {
List<PrivilegedOperation> ops = new ArrayList<>();
//Test squashing, including 'none'
ops.clear();
ops.add(opTasks1);
//this is expected to be ignored
ops.add(opTasksNone);
ops.add(opTasks2);
ops.add(opTasks3);
try {
PrivilegedOperation op = PrivilegedOperationExecutor
.squashCGroupOperations(ops);
String expected = new StringBuilder
(PrivilegedOperation.CGROUP_ARG_PREFIX)
.append(cGroupTasks1).append(PrivilegedOperation
.LINUX_FILE_PATH_SEPARATOR)
.append(cGroupTasks2).append(PrivilegedOperation
.LINUX_FILE_PATH_SEPARATOR)
.append(cGroupTasks3).toString();
//We expect axactly one argument
assertEquals(1, op.getArguments().size());
//Squashed list of tasks files
assertEquals(expected, op.getArguments().get(0));
} catch (PrivilegedOperationException e) {
LOG.info("Caught unexpected exception : " + e);
fail("Caught unexpected exception: " + e);
}
}
} | TestPrivilegedOperationExecutor |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/MappingProcessor.java | {
"start": 20551,
"end": 21418
} | class ____ {
private final TypeElement deferredMapperElement;
private final Element erroneousElement;
private DeferredMapper(TypeElement deferredMapperElement, Element erroneousElement) {
this.deferredMapperElement = deferredMapperElement;
this.erroneousElement = erroneousElement;
}
}
/**
* Filters only the options belonging to the declared additional supported options.
*
* @param options all processor environment options
* @return filtered options
*/
private Map<String, String> resolveAdditionalOptions(Map<String, String> options) {
return options.entrySet().stream()
.filter( entry -> additionalSupportedOptions.contains( entry.getKey() ) )
.collect( Collectors.toMap( Map.Entry::getKey, Map.Entry::getValue ) );
}
}
| DeferredMapper |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/presentation/BinaryRepresentation.java | {
"start": 829,
"end": 3445
} | class ____ extends StandardRepresentation {
public static final BinaryRepresentation BINARY_REPRESENTATION = new BinaryRepresentation();
public static final String BYTE_PREFIX = "0b";
/**
* Returns binary the {@code toString} representation of the given object. It may or not the object's own
* implementation of {@code toString}.
*
* @param object the given object.
* @return the {@code toString} representation of the given object.
*/
@Override
public String toStringOf(Object object) {
if (hasCustomFormatterFor(object)) return customFormat(object);
if (object instanceof Character character) return toStringOf(character);
if (object instanceof Number number) return toStringOf(number);
if (object instanceof String string) return toStringOf(this, string);
return super.toStringOf(object);
}
protected String toStringOf(Representation representation, String s) {
return concat("\"", representation.toStringOf(s.toCharArray()), "\"");
}
@Override
protected String toStringOf(Number number) {
if (number instanceof Byte b) return toStringOf(b);
if (number instanceof Short s) return toStringOf(s);
if (number instanceof Integer i) return toStringOf(i);
if (number instanceof Long l) return toStringOf(l);
if (number instanceof Float f) return toStringOf(f);
if (number instanceof Double d) return toStringOf(d);
return number == null ? null : number.toString();
}
protected String toStringOf(Byte b) {
return toGroupedBinary(Integer.toBinaryString(b & 0xFF), 8);
}
protected String toStringOf(Short s) {
return toGroupedBinary(Integer.toBinaryString(s & 0xFFFF), 16);
}
protected String toStringOf(Integer i) {
return toGroupedBinary(Integer.toBinaryString(i), 32);
}
@Override
protected String toStringOf(Long l) {
return toGroupedBinary(Long.toBinaryString(l), 64);
}
@Override
protected String toStringOf(Float f) {
return toGroupedBinary(Integer.toBinaryString(Float.floatToIntBits(f)), 32);
}
protected String toStringOf(Double d) {
return toGroupedBinary(Long.toBinaryString(Double.doubleToRawLongBits(d)), 64);
}
@Override
protected String toStringOf(Character character) {
return concat("'", toStringOf((short) (int) character), "'");
}
private static String toGroupedBinary(String value, int size) {
return BYTE_PREFIX + NumberGrouping.toBinaryLiteral(toBinary(value, size));
}
private static String toBinary(String value, int size) {
return String.format("%" + size + "s", value).replace(' ', '0');
}
}
| BinaryRepresentation |
java | apache__avro | lang/java/avro/src/main/java/org/apache/avro/io/FastReaderBuilder.java | {
"start": 21284,
"end": 21386
} | interface ____ {
void execute(Object record, Decoder decoder) throws IOException;
}
}
| ExecutionStep |
java | google__error-prone | check_api/src/main/java/com/google/errorprone/matchers/method/MethodInvocationMatcher.java | {
"start": 6813,
"end": 7329
} | interface ____ which the invoked method was defined. */
public record DefinedIn(String owner) implements Token {
public static DefinedIn create(String owner) {
return new DefinedIn(owner);
}
@Override
public TokenType type() {
return TokenType.DEFINED_IN;
}
@Override
public Object comparisonKey() {
return owner();
}
}
/**
* A token specifying the exact type of the object on which the method is being invoked (or the
* | in |
java | quarkusio__quarkus | extensions/tls-registry/runtime/src/main/java/io/quarkus/tls/runtime/config/TlsConfigUtils.java | {
"start": 613,
"end": 6044
} | class ____ {
private static final Logger log = Logger.getLogger(TlsConfigUtils.class);
private TlsConfigUtils() {
// Avoid direct instantiation
}
/**
* Read the content of the path.
* <p>
* The file is read from the classpath if it exists, otherwise it is read from the file system.
*
* @param path the path, must not be {@code null}
* @return the content of the file
*/
public static byte[] read(Path path) {
byte[] data;
try {
final InputStream resource = Thread.currentThread().getContextClassLoader()
.getResourceAsStream(ClassPathUtils.toResourceName(path));
if (resource != null) {
try (InputStream is = resource) {
data = is.readAllBytes();
}
} else {
try (InputStream is = Files.newInputStream(path)) {
data = is.readAllBytes();
}
}
} catch (IOException e) {
throw new UncheckedIOException("Unable to read file " + path, e);
}
return data;
}
/**
* Configure the {@link TCPSSLOptions} with the given {@link TlsConfiguration}.
*
* @param options the options to configure
* @param configuration the configuration to use
*/
public static void configure(TCPSSLOptions options, TlsConfiguration configuration) {
options.setSsl(true);
if (configuration.getTrustStoreOptions() != null) {
options.setTrustOptions(configuration.getTrustStoreOptions());
}
// For mTLS:
if (configuration.getKeyStoreOptions() != null) {
options.setKeyCertOptions(configuration.getKeyStoreOptions());
}
SSLOptions sslOptions = configuration.getSSLOptions();
if (sslOptions != null) {
options.setSslHandshakeTimeout(sslOptions.getSslHandshakeTimeout());
options.setSslHandshakeTimeoutUnit(sslOptions.getSslHandshakeTimeoutUnit());
for (String suite : sslOptions.getEnabledCipherSuites()) {
options.addEnabledCipherSuite(suite);
}
for (Buffer buffer : sslOptions.getCrlValues()) {
options.addCrlValue(buffer);
}
options.setEnabledSecureTransportProtocols(sslOptions.getEnabledSecureTransportProtocols());
// Try to set ALPN configuration, but handle UnsupportedOperationException
// for example, if the underlying implementation does not support it (es. AMQP)
try {
options.setUseAlpn(sslOptions.isUseAlpn());
} catch (UnsupportedOperationException e) {
log.warnf(
"ALPN configuration not supported by implementation: %s. ALPN setting will be ignored.",
options.getClass().getName());
}
}
}
/**
* Configure the {@link ClientOptionsBase} with the given {@link TlsConfiguration}.
*
* @param options the options to configure
* @param configuration the configuration to use
*/
public static void configure(ClientOptionsBase options, TlsConfiguration configuration) {
configure((TCPSSLOptions) options, configuration);
if (configuration.isTrustAll()) {
options.setTrustAll(true);
}
}
/**
* Configure the {@link NetClientOptions} with the given {@link TlsConfiguration}.
*
* @param options the options to configure
* @param configuration the configuration to use
*/
public static void configure(NetClientOptions options, TlsConfiguration configuration) {
configure((ClientOptionsBase) options, configuration);
if (configuration.getHostnameVerificationAlgorithm().isPresent()) {
options.setHostnameVerificationAlgorithm(configuration.getHostnameVerificationAlgorithm().get());
}
}
/**
* Configure the {@link HttpClientOptions} with the given {@link TlsConfiguration}.
*
* @param options the options to configure
* @param configuration the configuration to use
*/
public static void configure(HttpClientOptions options, TlsConfiguration configuration) {
configure((ClientOptionsBase) options, configuration);
options.setForceSni(configuration.usesSni());
if (configuration.getHostnameVerificationAlgorithm().isPresent()
&& configuration.getHostnameVerificationAlgorithm().get().equals("NONE")) {
// Only disable hostname verification if the algorithm is explicitly set to NONE
options.setVerifyHost(false);
}
}
/**
* Configure the {@link WebSocketClientOptions} with the given {@link TlsConfiguration}.
*
* @param options the options to configure
* @param configuration the configuration to use
*/
public static void configure(WebSocketClientOptions options, TlsConfiguration configuration) {
configure((ClientOptionsBase) options, configuration);
if (configuration.getHostnameVerificationAlgorithm().isPresent()
&& configuration.getHostnameVerificationAlgorithm().get().equals("NONE")) {
// Only disable hostname verification if the algorithm is explicitly set to NONE
options.setVerifyHost(false);
}
}
}
| TlsConfigUtils |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/collectionelement/CountryAttitude.java | {
"start": 376,
"end": 1188
} | class ____ {
private Boy boy;
private Country country;
private boolean likes;
// TODO: This currently does not work
// @ManyToOne(optional = false)
// public Boy getBoy() {
// return boy;
// }
public void setBoy(Boy boy) {
this.boy = boy;
}
@ManyToOne(optional = false)
public Country getCountry() {
return country;
}
public void setCountry(Country country) {
this.country = country;
}
@Column(name = "b_likes")
public boolean isLikes() {
return likes;
}
public void setLikes(boolean likes) {
this.likes = likes;
}
@Override
public int hashCode() {
return country.hashCode();
}
@Override
public boolean equals(Object obj) {
if ( !( obj instanceof CountryAttitude ) ) {
return false;
}
return country.equals( ( (CountryAttitude) obj ).country );
}
}
| CountryAttitude |
java | quarkusio__quarkus | extensions/arc/deployment/src/test/java/io/quarkus/arc/test/observer/SyntheticObserverTest.java | {
"start": 3132,
"end": 3350
} | class ____ {
public static final List<String> EVENTS = new CopyOnWriteArrayList<String>();
void test(@Observes String event) {
EVENTS.add(event + "_MyObserver");
}
}
}
| MyObserver |
java | spring-projects__spring-boot | core/spring-boot-test/src/main/java/org/springframework/boot/test/context/runner/ContextConsumer.java | {
"start": 793,
"end": 1089
} | interface ____ to process an {@link ApplicationContext} with the ability to
* throw a (checked) exception.
*
* @param <C> the application context type
* @author Stephane Nicoll
* @author Andy Wilkinson
* @since 2.0.0
* @see AbstractApplicationContextRunner
*/
@FunctionalInterface
public | used |
java | netty__netty | codec-socks/src/main/java/io/netty/handler/codec/socksx/v5/Socks5CommandType.java | {
"start": 783,
"end": 2436
} | class ____ implements Comparable<Socks5CommandType> {
public static final Socks5CommandType CONNECT = new Socks5CommandType(0x01, "CONNECT");
public static final Socks5CommandType BIND = new Socks5CommandType(0x02, "BIND");
public static final Socks5CommandType UDP_ASSOCIATE = new Socks5CommandType(0x03, "UDP_ASSOCIATE");
public static Socks5CommandType valueOf(byte b) {
switch (b) {
case 0x01:
return CONNECT;
case 0x02:
return BIND;
case 0x03:
return UDP_ASSOCIATE;
}
return new Socks5CommandType(b);
}
private final byte byteValue;
private final String name;
private String text;
public Socks5CommandType(int byteValue) {
this(byteValue, "UNKNOWN");
}
public Socks5CommandType(int byteValue, String name) {
this.name = ObjectUtil.checkNotNull(name, "name");
this.byteValue = (byte) byteValue;
}
public byte byteValue() {
return byteValue;
}
@Override
public int hashCode() {
return byteValue;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof Socks5CommandType)) {
return false;
}
return byteValue == ((Socks5CommandType) obj).byteValue;
}
@Override
public int compareTo(Socks5CommandType o) {
return byteValue - o.byteValue;
}
@Override
public String toString() {
String text = this.text;
if (text == null) {
this.text = text = name + '(' + (byteValue & 0xFF) + ')';
}
return text;
}
}
| Socks5CommandType |
java | apache__dubbo | dubbo-spring-boot-project/dubbo-spring-boot-compatible/dubbo-spring-boot-autoconfigure-compatible/src/main/java/org/apache/dubbo/spring/boot/autoconfigure/RelaxedDubboConfigBinder.java | {
"start": 1237,
"end": 2064
} | class ____ implements ConfigurationBeanBinder {
@Override
public void bind(
Map<String, Object> configurationProperties,
boolean ignoreUnknownFields,
boolean ignoreInvalidFields,
Object configurationBean) {
RelaxedDataBinder relaxedDataBinder = new RelaxedDataBinder(configurationBean);
// Set ignored*
relaxedDataBinder.setIgnoreInvalidFields(ignoreInvalidFields);
relaxedDataBinder.setIgnoreUnknownFields(ignoreUnknownFields);
// Get properties under specified prefix from PropertySources
// Convert Map to MutablePropertyValues
MutablePropertyValues propertyValues = new MutablePropertyValues(configurationProperties);
// Bind
relaxedDataBinder.bind(propertyValues);
}
}
| RelaxedDubboConfigBinder |
java | google__guava | android/guava/src/com/google/common/collect/Maps.java | {
"start": 85200,
"end": 113624
} | class ____<
K extends @Nullable Object, V1 extends @Nullable Object, V2 extends @Nullable Object>
extends TransformedEntriesSortedMap<K, V1, V2> implements NavigableMap<K, V2> {
TransformedEntriesNavigableMap(
NavigableMap<K, V1> fromMap, EntryTransformer<? super K, ? super V1, V2> transformer) {
super(fromMap, transformer);
}
@Override
public @Nullable Entry<K, V2> ceilingEntry(@ParametricNullness K key) {
return transformEntry(fromMap().ceilingEntry(key));
}
@Override
public @Nullable K ceilingKey(@ParametricNullness K key) {
return fromMap().ceilingKey(key);
}
@Override
public NavigableSet<K> descendingKeySet() {
return fromMap().descendingKeySet();
}
@Override
public NavigableMap<K, V2> descendingMap() {
return transformEntries(fromMap().descendingMap(), transformer);
}
@Override
public @Nullable Entry<K, V2> firstEntry() {
return transformEntry(fromMap().firstEntry());
}
@Override
public @Nullable Entry<K, V2> floorEntry(@ParametricNullness K key) {
return transformEntry(fromMap().floorEntry(key));
}
@Override
public @Nullable K floorKey(@ParametricNullness K key) {
return fromMap().floorKey(key);
}
@Override
public NavigableMap<K, V2> headMap(@ParametricNullness K toKey) {
return headMap(toKey, false);
}
@Override
public NavigableMap<K, V2> headMap(@ParametricNullness K toKey, boolean inclusive) {
return transformEntries(fromMap().headMap(toKey, inclusive), transformer);
}
@Override
public @Nullable Entry<K, V2> higherEntry(@ParametricNullness K key) {
return transformEntry(fromMap().higherEntry(key));
}
@Override
public @Nullable K higherKey(@ParametricNullness K key) {
return fromMap().higherKey(key);
}
@Override
public @Nullable Entry<K, V2> lastEntry() {
return transformEntry(fromMap().lastEntry());
}
@Override
public @Nullable Entry<K, V2> lowerEntry(@ParametricNullness K key) {
return transformEntry(fromMap().lowerEntry(key));
}
@Override
public @Nullable K lowerKey(@ParametricNullness K key) {
return fromMap().lowerKey(key);
}
@Override
public NavigableSet<K> navigableKeySet() {
return fromMap().navigableKeySet();
}
@Override
public @Nullable Entry<K, V2> pollFirstEntry() {
return transformEntry(fromMap().pollFirstEntry());
}
@Override
public @Nullable Entry<K, V2> pollLastEntry() {
return transformEntry(fromMap().pollLastEntry());
}
@Override
public NavigableMap<K, V2> subMap(
@ParametricNullness K fromKey,
boolean fromInclusive,
@ParametricNullness K toKey,
boolean toInclusive) {
return transformEntries(
fromMap().subMap(fromKey, fromInclusive, toKey, toInclusive), transformer);
}
@Override
public NavigableMap<K, V2> subMap(@ParametricNullness K fromKey, @ParametricNullness K toKey) {
return subMap(fromKey, true, toKey, false);
}
@Override
public NavigableMap<K, V2> tailMap(@ParametricNullness K fromKey) {
return tailMap(fromKey, true);
}
@Override
public NavigableMap<K, V2> tailMap(@ParametricNullness K fromKey, boolean inclusive) {
return transformEntries(fromMap().tailMap(fromKey, inclusive), transformer);
}
private @Nullable Entry<K, V2> transformEntry(@Nullable Entry<K, V1> entry) {
return (entry == null) ? null : Maps.transformEntry(transformer, entry);
}
@Override
protected NavigableMap<K, V1> fromMap() {
return (NavigableMap<K, V1>) super.fromMap();
}
}
static <K extends @Nullable Object> Predicate<Entry<K, ?>> keyPredicateOnEntries(
Predicate<? super K> keyPredicate) {
return compose(keyPredicate, Entry::getKey);
}
static <V extends @Nullable Object> Predicate<Entry<?, V>> valuePredicateOnEntries(
Predicate<? super V> valuePredicate) {
return compose(valuePredicate, Entry::getValue);
}
/**
* Returns a map containing the mappings in {@code unfiltered} whose keys satisfy a predicate. The
* returned map is a live view of {@code unfiltered}; changes to one affect the other.
*
* <p>The resulting map's {@code keySet()}, {@code entrySet()}, and {@code values()} views have
* iterators that don't support {@code remove()}, but all other methods are supported by the map
* and its views. When given a key that doesn't satisfy the predicate, the map's {@code put()} and
* {@code putAll()} methods throw an {@link IllegalArgumentException}.
*
* <p>When methods such as {@code removeAll()} and {@code clear()} are called on the filtered map
* or its views, only mappings whose keys satisfy the filter will be removed from the underlying
* map.
*
* <p>The returned map isn't threadsafe or serializable, even if {@code unfiltered} is.
*
* <p>Many of the filtered map's methods, such as {@code size()}, iterate across every key/value
* mapping in the underlying map and determine which satisfy the filter. When a live view is
* <i>not</i> needed, it may be faster to copy the filtered map and use the copy.
*
* <p><b>Warning:</b> {@code keyPredicate} must be <i>consistent with equals</i>, as documented at
* {@link Predicate#apply}. Do not provide a predicate such as {@code
* Predicates.instanceOf(ArrayList.class)}, which is inconsistent with equals.
*/
public static <K extends @Nullable Object, V extends @Nullable Object> Map<K, V> filterKeys(
Map<K, V> unfiltered, Predicate<? super K> keyPredicate) {
checkNotNull(keyPredicate);
Predicate<Entry<K, ?>> entryPredicate = keyPredicateOnEntries(keyPredicate);
return (unfiltered instanceof AbstractFilteredMap)
? filterFiltered((AbstractFilteredMap<K, V>) unfiltered, entryPredicate)
: new FilteredKeyMap<K, V>(checkNotNull(unfiltered), keyPredicate, entryPredicate);
}
/**
* Returns a sorted map containing the mappings in {@code unfiltered} whose keys satisfy a
* predicate. The returned map is a live view of {@code unfiltered}; changes to one affect the
* other.
*
* <p>The resulting map's {@code keySet()}, {@code entrySet()}, and {@code values()} views have
* iterators that don't support {@code remove()}, but all other methods are supported by the map
* and its views. When given a key that doesn't satisfy the predicate, the map's {@code put()} and
* {@code putAll()} methods throw an {@link IllegalArgumentException}.
*
* <p>When methods such as {@code removeAll()} and {@code clear()} are called on the filtered map
* or its views, only mappings whose keys satisfy the filter will be removed from the underlying
* map.
*
* <p>The returned map isn't threadsafe or serializable, even if {@code unfiltered} is.
*
* <p>Many of the filtered map's methods, such as {@code size()}, iterate across every key/value
* mapping in the underlying map and determine which satisfy the filter. When a live view is
* <i>not</i> needed, it may be faster to copy the filtered map and use the copy.
*
* <p><b>Warning:</b> {@code keyPredicate} must be <i>consistent with equals</i>, as documented at
* {@link Predicate#apply}. Do not provide a predicate such as {@code
* Predicates.instanceOf(ArrayList.class)}, which is inconsistent with equals.
*
* @since 11.0
*/
public static <K extends @Nullable Object, V extends @Nullable Object> SortedMap<K, V> filterKeys(
SortedMap<K, V> unfiltered, Predicate<? super K> keyPredicate) {
// TODO(lowasser): Return a subclass of FilteredKeyMap for slightly better performance.
return filterEntries(unfiltered, keyPredicateOnEntries(keyPredicate));
}
/**
* Returns a navigable map containing the mappings in {@code unfiltered} whose keys satisfy a
* predicate. The returned map is a live view of {@code unfiltered}; changes to one affect the
* other.
*
* <p>The resulting map's {@code keySet()}, {@code entrySet()}, and {@code values()} views have
* iterators that don't support {@code remove()}, but all other methods are supported by the map
* and its views. When given a key that doesn't satisfy the predicate, the map's {@code put()} and
* {@code putAll()} methods throw an {@link IllegalArgumentException}.
*
* <p>When methods such as {@code removeAll()} and {@code clear()} are called on the filtered map
* or its views, only mappings whose keys satisfy the filter will be removed from the underlying
* map.
*
* <p>The returned map isn't threadsafe or serializable, even if {@code unfiltered} is.
*
* <p>Many of the filtered map's methods, such as {@code size()}, iterate across every key/value
* mapping in the underlying map and determine which satisfy the filter. When a live view is
* <i>not</i> needed, it may be faster to copy the filtered map and use the copy.
*
* <p><b>Warning:</b> {@code keyPredicate} must be <i>consistent with equals</i>, as documented at
* {@link Predicate#apply}. Do not provide a predicate such as {@code
* Predicates.instanceOf(ArrayList.class)}, which is inconsistent with equals.
*
* @since 14.0
*/
@GwtIncompatible // NavigableMap
public static <K extends @Nullable Object, V extends @Nullable Object>
NavigableMap<K, V> filterKeys(
NavigableMap<K, V> unfiltered, Predicate<? super K> keyPredicate) {
// TODO(lowasser): Return a subclass of FilteredKeyMap for slightly better performance.
return filterEntries(unfiltered, keyPredicateOnEntries(keyPredicate));
}
/**
* Returns a bimap containing the mappings in {@code unfiltered} whose keys satisfy a predicate.
* The returned bimap is a live view of {@code unfiltered}; changes to one affect the other.
*
* <p>The resulting bimap's {@code keySet()}, {@code entrySet()}, and {@code values()} views have
* iterators that don't support {@code remove()}, but all other methods are supported by the bimap
* and its views. When given a key that doesn't satisfy the predicate, the bimap's {@code put()},
* {@code forcePut()} and {@code putAll()} methods throw an {@link IllegalArgumentException}.
*
* <p>When methods such as {@code removeAll()} and {@code clear()} are called on the filtered
* bimap or its views, only mappings that satisfy the filter will be removed from the underlying
* bimap.
*
* <p>The returned bimap isn't threadsafe or serializable, even if {@code unfiltered} is.
*
* <p>Many of the filtered bimap's methods, such as {@code size()}, iterate across every key in
* the underlying bimap and determine which satisfy the filter. When a live view is <i>not</i>
* needed, it may be faster to copy the filtered bimap and use the copy.
*
* <p><b>Warning:</b> {@code entryPredicate} must be <i>consistent with equals </i>, as documented
* at {@link Predicate#apply}.
*
* @since 14.0
*/
public static <K extends @Nullable Object, V extends @Nullable Object> BiMap<K, V> filterKeys(
BiMap<K, V> unfiltered, Predicate<? super K> keyPredicate) {
checkNotNull(keyPredicate);
return filterEntries(unfiltered, keyPredicateOnEntries(keyPredicate));
}
/**
* Returns a map containing the mappings in {@code unfiltered} whose values satisfy a predicate.
* The returned map is a live view of {@code unfiltered}; changes to one affect the other.
*
* <p>The resulting map's {@code keySet()}, {@code entrySet()}, and {@code values()} views have
* iterators that don't support {@code remove()}, but all other methods are supported by the map
* and its views. When given a value that doesn't satisfy the predicate, the map's {@code put()},
* {@code putAll()}, and {@link Entry#setValue} methods throw an {@link IllegalArgumentException}.
*
* <p>When methods such as {@code removeAll()} and {@code clear()} are called on the filtered map
* or its views, only mappings whose values satisfy the filter will be removed from the underlying
* map.
*
* <p>The returned map isn't threadsafe or serializable, even if {@code unfiltered} is.
*
* <p>Many of the filtered map's methods, such as {@code size()}, iterate across every key/value
* mapping in the underlying map and determine which satisfy the filter. When a live view is
* <i>not</i> needed, it may be faster to copy the filtered map and use the copy.
*
* <p><b>Warning:</b> {@code valuePredicate} must be <i>consistent with equals</i>, as documented
* at {@link Predicate#apply}. Do not provide a predicate such as {@code
* Predicates.instanceOf(ArrayList.class)}, which is inconsistent with equals.
*/
public static <K extends @Nullable Object, V extends @Nullable Object> Map<K, V> filterValues(
Map<K, V> unfiltered, Predicate<? super V> valuePredicate) {
return filterEntries(unfiltered, valuePredicateOnEntries(valuePredicate));
}
/**
* Returns a sorted map containing the mappings in {@code unfiltered} whose values satisfy a
* predicate. The returned map is a live view of {@code unfiltered}; changes to one affect the
* other.
*
* <p>The resulting map's {@code keySet()}, {@code entrySet()}, and {@code values()} views have
* iterators that don't support {@code remove()}, but all other methods are supported by the map
* and its views. When given a value that doesn't satisfy the predicate, the map's {@code put()},
* {@code putAll()}, and {@link Entry#setValue} methods throw an {@link IllegalArgumentException}.
*
* <p>When methods such as {@code removeAll()} and {@code clear()} are called on the filtered map
* or its views, only mappings whose values satisfy the filter will be removed from the underlying
* map.
*
* <p>The returned map isn't threadsafe or serializable, even if {@code unfiltered} is.
*
* <p>Many of the filtered map's methods, such as {@code size()}, iterate across every key/value
* mapping in the underlying map and determine which satisfy the filter. When a live view is
* <i>not</i> needed, it may be faster to copy the filtered map and use the copy.
*
* <p><b>Warning:</b> {@code valuePredicate} must be <i>consistent with equals</i>, as documented
* at {@link Predicate#apply}. Do not provide a predicate such as {@code
* Predicates.instanceOf(ArrayList.class)}, which is inconsistent with equals.
*
* @since 11.0
*/
public static <K extends @Nullable Object, V extends @Nullable Object>
SortedMap<K, V> filterValues(
SortedMap<K, V> unfiltered, Predicate<? super V> valuePredicate) {
return filterEntries(unfiltered, valuePredicateOnEntries(valuePredicate));
}
/**
* Returns a navigable map containing the mappings in {@code unfiltered} whose values satisfy a
* predicate. The returned map is a live view of {@code unfiltered}; changes to one affect the
* other.
*
* <p>The resulting map's {@code keySet()}, {@code entrySet()}, and {@code values()} views have
* iterators that don't support {@code remove()}, but all other methods are supported by the map
* and its views. When given a value that doesn't satisfy the predicate, the map's {@code put()},
* {@code putAll()}, and {@link Entry#setValue} methods throw an {@link IllegalArgumentException}.
*
* <p>When methods such as {@code removeAll()} and {@code clear()} are called on the filtered map
* or its views, only mappings whose values satisfy the filter will be removed from the underlying
* map.
*
* <p>The returned map isn't threadsafe or serializable, even if {@code unfiltered} is.
*
* <p>Many of the filtered map's methods, such as {@code size()}, iterate across every key/value
* mapping in the underlying map and determine which satisfy the filter. When a live view is
* <i>not</i> needed, it may be faster to copy the filtered map and use the copy.
*
* <p><b>Warning:</b> {@code valuePredicate} must be <i>consistent with equals</i>, as documented
* at {@link Predicate#apply}. Do not provide a predicate such as {@code
* Predicates.instanceOf(ArrayList.class)}, which is inconsistent with equals.
*
* @since 14.0
*/
@GwtIncompatible // NavigableMap
public static <K extends @Nullable Object, V extends @Nullable Object>
NavigableMap<K, V> filterValues(
NavigableMap<K, V> unfiltered, Predicate<? super V> valuePredicate) {
return filterEntries(unfiltered, valuePredicateOnEntries(valuePredicate));
}
/**
* Returns a bimap containing the mappings in {@code unfiltered} whose values satisfy a predicate.
* The returned bimap is a live view of {@code unfiltered}; changes to one affect the other.
*
* <p>The resulting bimap's {@code keySet()}, {@code entrySet()}, and {@code values()} views have
* iterators that don't support {@code remove()}, but all other methods are supported by the bimap
* and its views. When given a value that doesn't satisfy the predicate, the bimap's {@code
* put()}, {@code forcePut()} and {@code putAll()} methods throw an {@link
* IllegalArgumentException}. Similarly, the map's entries have a {@link Entry#setValue} method
* that throws an {@link IllegalArgumentException} when the provided value doesn't satisfy the
* predicate.
*
* <p>When methods such as {@code removeAll()} and {@code clear()} are called on the filtered
* bimap or its views, only mappings that satisfy the filter will be removed from the underlying
* bimap.
*
* <p>The returned bimap isn't threadsafe or serializable, even if {@code unfiltered} is.
*
* <p>Many of the filtered bimap's methods, such as {@code size()}, iterate across every value in
* the underlying bimap and determine which satisfy the filter. When a live view is <i>not</i>
* needed, it may be faster to copy the filtered bimap and use the copy.
*
* <p><b>Warning:</b> {@code entryPredicate} must be <i>consistent with equals </i>, as documented
* at {@link Predicate#apply}.
*
* @since 14.0
*/
public static <K extends @Nullable Object, V extends @Nullable Object> BiMap<K, V> filterValues(
BiMap<K, V> unfiltered, Predicate<? super V> valuePredicate) {
return filterEntries(unfiltered, valuePredicateOnEntries(valuePredicate));
}
/**
* Returns a map containing the mappings in {@code unfiltered} that satisfy a predicate. The
* returned map is a live view of {@code unfiltered}; changes to one affect the other.
*
* <p>The resulting map's {@code keySet()}, {@code entrySet()}, and {@code values()} views have
* iterators that don't support {@code remove()}, but all other methods are supported by the map
* and its views. When given a key/value pair that doesn't satisfy the predicate, the map's {@code
* put()} and {@code putAll()} methods throw an {@link IllegalArgumentException}. Similarly, the
* map's entries have a {@link Entry#setValue} method that throws an {@link
* IllegalArgumentException} when the existing key and the provided value don't satisfy the
* predicate.
*
* <p>When methods such as {@code removeAll()} and {@code clear()} are called on the filtered map
* or its views, only mappings that satisfy the filter will be removed from the underlying map.
*
* <p>The returned map isn't threadsafe or serializable, even if {@code unfiltered} is.
*
* <p>Many of the filtered map's methods, such as {@code size()}, iterate across every key/value
* mapping in the underlying map and determine which satisfy the filter. When a live view is
* <i>not</i> needed, it may be faster to copy the filtered map and use the copy.
*
* <p><b>Warning:</b> {@code entryPredicate} must be <i>consistent with equals</i>, as documented
* at {@link Predicate#apply}.
*/
public static <K extends @Nullable Object, V extends @Nullable Object> Map<K, V> filterEntries(
Map<K, V> unfiltered, Predicate<? super Entry<K, V>> entryPredicate) {
checkNotNull(entryPredicate);
return (unfiltered instanceof AbstractFilteredMap)
? filterFiltered((AbstractFilteredMap<K, V>) unfiltered, entryPredicate)
: new FilteredEntryMap<K, V>(checkNotNull(unfiltered), entryPredicate);
}
/**
* Returns a sorted map containing the mappings in {@code unfiltered} that satisfy a predicate.
* The returned map is a live view of {@code unfiltered}; changes to one affect the other.
*
* <p>The resulting map's {@code keySet()}, {@code entrySet()}, and {@code values()} views have
* iterators that don't support {@code remove()}, but all other methods are supported by the map
* and its views. When given a key/value pair that doesn't satisfy the predicate, the map's {@code
* put()} and {@code putAll()} methods throw an {@link IllegalArgumentException}. Similarly, the
* map's entries have a {@link Entry#setValue} method that throws an {@link
* IllegalArgumentException} when the existing key and the provided value don't satisfy the
* predicate.
*
* <p>When methods such as {@code removeAll()} and {@code clear()} are called on the filtered map
* or its views, only mappings that satisfy the filter will be removed from the underlying map.
*
* <p>The returned map isn't threadsafe or serializable, even if {@code unfiltered} is.
*
* <p>Many of the filtered map's methods, such as {@code size()}, iterate across every key/value
* mapping in the underlying map and determine which satisfy the filter. When a live view is
* <i>not</i> needed, it may be faster to copy the filtered map and use the copy.
*
* <p><b>Warning:</b> {@code entryPredicate} must be <i>consistent with equals</i>, as documented
* at {@link Predicate#apply}.
*
* @since 11.0
*/
public static <K extends @Nullable Object, V extends @Nullable Object>
SortedMap<K, V> filterEntries(
SortedMap<K, V> unfiltered, Predicate<? super Entry<K, V>> entryPredicate) {
checkNotNull(entryPredicate);
return (unfiltered instanceof FilteredEntrySortedMap)
? filterFiltered((FilteredEntrySortedMap<K, V>) unfiltered, entryPredicate)
: new FilteredEntrySortedMap<K, V>(checkNotNull(unfiltered), entryPredicate);
}
/**
* Returns a sorted map containing the mappings in {@code unfiltered} that satisfy a predicate.
* The returned map is a live view of {@code unfiltered}; changes to one affect the other.
*
* <p>The resulting map's {@code keySet()}, {@code entrySet()}, and {@code values()} views have
* iterators that don't support {@code remove()}, but all other methods are supported by the map
* and its views. When given a key/value pair that doesn't satisfy the predicate, the map's {@code
* put()} and {@code putAll()} methods throw an {@link IllegalArgumentException}. Similarly, the
* map's entries have a {@link Entry#setValue} method that throws an {@link
* IllegalArgumentException} when the existing key and the provided value don't satisfy the
* predicate.
*
* <p>When methods such as {@code removeAll()} and {@code clear()} are called on the filtered map
* or its views, only mappings that satisfy the filter will be removed from the underlying map.
*
* <p>The returned map isn't threadsafe or serializable, even if {@code unfiltered} is.
*
* <p>Many of the filtered map's methods, such as {@code size()}, iterate across every key/value
* mapping in the underlying map and determine which satisfy the filter. When a live view is
* <i>not</i> needed, it may be faster to copy the filtered map and use the copy.
*
* <p><b>Warning:</b> {@code entryPredicate} must be <i>consistent with equals</i>, as documented
* at {@link Predicate#apply}.
*
* @since 14.0
*/
@GwtIncompatible // NavigableMap
public static <K extends @Nullable Object, V extends @Nullable Object>
NavigableMap<K, V> filterEntries(
NavigableMap<K, V> unfiltered, Predicate<? super Entry<K, V>> entryPredicate) {
checkNotNull(entryPredicate);
return (unfiltered instanceof FilteredEntryNavigableMap)
? filterFiltered((FilteredEntryNavigableMap<K, V>) unfiltered, entryPredicate)
: new FilteredEntryNavigableMap<K, V>(checkNotNull(unfiltered), entryPredicate);
}
/**
* Returns a bimap containing the mappings in {@code unfiltered} that satisfy a predicate. The
* returned bimap is a live view of {@code unfiltered}; changes to one affect the other.
*
* <p>The resulting bimap's {@code keySet()}, {@code entrySet()}, and {@code values()} views have
* iterators that don't support {@code remove()}, but all other methods are supported by the bimap
* and its views. When given a key/value pair that doesn't satisfy the predicate, the bimap's
* {@code put()}, {@code forcePut()} and {@code putAll()} methods throw an {@link
* IllegalArgumentException}. Similarly, the map's entries have an {@link Entry#setValue} method
* that throws an {@link IllegalArgumentException} when the existing key and the provided value
* don't satisfy the predicate.
*
* <p>When methods such as {@code removeAll()} and {@code clear()} are called on the filtered
* bimap or its views, only mappings that satisfy the filter will be removed from the underlying
* bimap.
*
* <p>The returned bimap isn't threadsafe or serializable, even if {@code unfiltered} is.
*
* <p>Many of the filtered bimap's methods, such as {@code size()}, iterate across every key/value
* mapping in the underlying bimap and determine which satisfy the filter. When a live view is
* <i>not</i> needed, it may be faster to copy the filtered bimap and use the copy.
*
* <p><b>Warning:</b> {@code entryPredicate} must be <i>consistent with equals </i>, as documented
* at {@link Predicate#apply}.
*
* @since 14.0
*/
public static <K extends @Nullable Object, V extends @Nullable Object> BiMap<K, V> filterEntries(
BiMap<K, V> unfiltered, Predicate<? super Entry<K, V>> entryPredicate) {
checkNotNull(unfiltered);
checkNotNull(entryPredicate);
return (unfiltered instanceof FilteredEntryBiMap)
? filterFiltered((FilteredEntryBiMap<K, V>) unfiltered, entryPredicate)
: new FilteredEntryBiMap<K, V>(unfiltered, entryPredicate);
}
/**
* Support {@code clear()}, {@code removeAll()}, and {@code retainAll()} when filtering a filtered
* map.
*/
private static <K extends @Nullable Object, V extends @Nullable Object> Map<K, V> filterFiltered(
AbstractFilteredMap<K, V> map, Predicate<? super Entry<K, V>> entryPredicate) {
return new FilteredEntryMap<>(map.unfiltered, Predicates.and(map.predicate, entryPredicate));
}
/**
* Support {@code clear()}, {@code removeAll()}, and {@code retainAll()} when filtering a filtered
* sorted map.
*/
private static <K extends @Nullable Object, V extends @Nullable Object>
SortedMap<K, V> filterFiltered(
FilteredEntrySortedMap<K, V> map, Predicate<? super Entry<K, V>> entryPredicate) {
Predicate<Entry<K, V>> predicate = Predicates.and(map.predicate, entryPredicate);
return new FilteredEntrySortedMap<>(map.sortedMap(), predicate);
}
/**
* Support {@code clear()}, {@code removeAll()}, and {@code retainAll()} when filtering a filtered
* navigable map.
*/
@GwtIncompatible // NavigableMap
private static <K extends @Nullable Object, V extends @Nullable Object>
NavigableMap<K, V> filterFiltered(
FilteredEntryNavigableMap<K, V> map, Predicate<? super Entry<K, V>> entryPredicate) {
Predicate<Entry<K, V>> predicate = Predicates.and(map.entryPredicate, entryPredicate);
return new FilteredEntryNavigableMap<>(map.unfiltered, predicate);
}
/**
* Support {@code clear()}, {@code removeAll()}, and {@code retainAll()} when filtering a filtered
* map.
*/
private static <K extends @Nullable Object, V extends @Nullable Object>
BiMap<K, V> filterFiltered(
FilteredEntryBiMap<K, V> map, Predicate<? super Entry<K, V>> entryPredicate) {
Predicate<Entry<K, V>> predicate = Predicates.and(map.predicate, entryPredicate);
return new FilteredEntryBiMap<>(map.unfiltered(), predicate);
}
private abstract static | TransformedEntriesNavigableMap |
java | quarkusio__quarkus | extensions/security/deployment/src/test/java/io/quarkus/security/test/cdi/inheritance/RolesAllowedBean.java | {
"start": 203,
"end": 319
} | class ____ {
public String ping() {
return RolesAllowedBean.class.getSimpleName();
}
}
| RolesAllowedBean |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/expression/CachedExpressionEvaluatorTests.java | {
"start": 1274,
"end": 2774
} | class ____ {
private final TestExpressionEvaluator expressionEvaluator = new TestExpressionEvaluator();
@Test
void parseNewExpression() {
Method method = ReflectionUtils.findMethod(getClass(), "toString");
Expression expression = expressionEvaluator.getTestExpression("true", method, getClass());
hasParsedExpression("true");
assertThat(expression.getValue()).asInstanceOf(BOOLEAN).isTrue();
assertThat(expressionEvaluator.testCache).as("Expression should be in cache").hasSize(1);
}
@Test
void cacheExpression() {
Method method = ReflectionUtils.findMethod(getClass(), "toString");
expressionEvaluator.getTestExpression("true", method, getClass());
expressionEvaluator.getTestExpression("true", method, getClass());
expressionEvaluator.getTestExpression("true", method, getClass());
hasParsedExpression("true");
assertThat(expressionEvaluator.testCache).as("Only one expression should be in cache").hasSize(1);
}
@Test
void cacheExpressionBasedOnConcreteType() {
Method method = ReflectionUtils.findMethod(getClass(), "toString");
expressionEvaluator.getTestExpression("true", method, getClass());
expressionEvaluator.getTestExpression("true", method, Object.class);
assertThat(expressionEvaluator.testCache).as("Cached expression should be based on type").hasSize(2);
}
private void hasParsedExpression(String expression) {
verify(expressionEvaluator.getParser(), times(1)).parseExpression(expression);
}
private static | CachedExpressionEvaluatorTests |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/bean/override/mockito/integration/AbstractMockitoBeanAndGenericsIntegrationTests.java | {
"start": 2106,
"end": 2189
} | class ____ {
@Bean
ThingImpl thing() {
return new ThingImpl();
}
}
}
| Config |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/hybrid/index/FileDataIndexSpilledRegionManagerImpl.java | {
"start": 16270,
"end": 17673
} | class ____<T extends FileDataIndexRegionHelper.Region>
implements FileDataIndexSpilledRegionManager.Factory<T> {
private final int regionGroupSizeInBytes;
private final long maxCacheCapacity;
private final int regionHeaderSize;
private final FileDataIndexRegionHelper<T> fileDataIndexRegionHelper;
public Factory(
int regionGroupSizeInBytes,
long maxCacheCapacity,
int regionHeaderSize,
FileDataIndexRegionHelper<T> fileDataIndexRegionHelper) {
this.regionGroupSizeInBytes = regionGroupSizeInBytes;
this.maxCacheCapacity = maxCacheCapacity;
this.regionHeaderSize = regionHeaderSize;
this.fileDataIndexRegionHelper = fileDataIndexRegionHelper;
}
@Override
public FileDataIndexSpilledRegionManager<T> create(
int numSubpartitions,
Path indexFilePath,
BiConsumer<Integer, T> cacheRegionConsumer) {
return new FileDataIndexSpilledRegionManagerImpl<>(
numSubpartitions,
indexFilePath,
regionGroupSizeInBytes,
maxCacheCapacity,
regionHeaderSize,
cacheRegionConsumer,
fileDataIndexRegionHelper);
}
}
}
| Factory |
java | spring-projects__spring-framework | spring-core-test/src/test/java/org/springframework/core/test/tools/TestCompilerTests.java | {
"start": 4959,
"end": 8497
} | class ____ {
@SuppressWarnings("deprecation")
public static void main(String[] args) {
new Hello().get();
}
}
""");
TestCompiler.forSystem().failOnWarning().withSources(
SourceFile.of(HELLO_DEPRECATED), main).compile(compiled -> {
Supplier<String> supplier = compiled.getInstance(Supplier.class,
"com.example.Hello");
assertThat(supplier.get()).isEqualTo("Hello Deprecated");
});
}
@Test
void withSourcesArrayAddsSource() {
SourceFile sourceFile = SourceFile.of(HELLO_WORLD);
TestCompiler.forSystem().withSources(sourceFile).compile(
this::assertSuppliesHelloWorld);
}
@Test
void withSourcesAddsSource() {
SourceFiles sourceFiles = SourceFiles.of(SourceFile.of(HELLO_WORLD));
TestCompiler.forSystem().withSources(sourceFiles).compile(
this::assertSuppliesHelloWorld);
}
@Test
void withResourcesArrayAddsResource() {
ResourceFile resourceFile = ResourceFile.of("META-INF/myfile", "test");
TestCompiler.forSystem().withResources(resourceFile).compile(
this::assertHasResource);
}
@Test
void withResourcesAddsResource() {
ResourceFiles resourceFiles = ResourceFiles.of(
ResourceFile.of("META-INF/myfile", "test"));
TestCompiler.forSystem().withResources(resourceFiles).compile(
this::assertHasResource);
}
@Test
void withProcessorsArrayAddsProcessors() {
SourceFile sourceFile = SourceFile.of(HELLO_WORLD);
TestProcessor processor = new TestProcessor();
TestCompiler.forSystem().withSources(sourceFile).withProcessors(processor).compile((compiled -> {
assertThat(processor.getProcessedAnnotations()).isNotEmpty();
assertThat(processor.getProcessedAnnotations()).satisfiesExactly(element ->
assertThat(element.getQualifiedName().toString()).isEqualTo("java.lang.Deprecated"));
}));
}
@Test
void withProcessorsAddsProcessors() {
SourceFile sourceFile = SourceFile.of(HELLO_WORLD);
TestProcessor processor = new TestProcessor();
List<Processor> processors = List.of(processor);
TestCompiler.forSystem().withSources(sourceFile).withProcessors(processors).compile((compiled -> {
assertThat(processor.getProcessedAnnotations()).isNotEmpty();
assertThat(processor.getProcessedAnnotations()).satisfiesExactly(element ->
assertThat(element.getQualifiedName().toString()).isEqualTo("java.lang.Deprecated"));
}));
}
@Test
void compileWithWritableContent() {
WritableContent content = appendable -> appendable.append(HELLO_WORLD);
TestCompiler.forSystem().compile(content, this::assertSuppliesHelloWorld);
}
@Test
void compileWithSourceFile() {
SourceFile sourceFile = SourceFile.of(HELLO_WORLD);
TestCompiler.forSystem().compile(sourceFile, this::assertSuppliesHelloWorld);
}
@Test
void compileWithSourceFiles() {
SourceFiles sourceFiles = SourceFiles.of(SourceFile.of(HELLO_WORLD));
TestCompiler.forSystem().compile(sourceFiles, this::assertSuppliesHelloWorld);
}
@Test
void compileWithSourceFilesAndResourceFiles() {
SourceFiles sourceFiles = SourceFiles.of(SourceFile.of(HELLO_WORLD));
ResourceFiles resourceFiles = ResourceFiles.of(
ResourceFile.of("META-INF/myfile", "test"));
TestCompiler.forSystem().compile(sourceFiles, resourceFiles, compiled -> {
assertSuppliesHelloWorld(compiled);
assertHasResource(compiled);
});
}
@Test
@CompileWithForkedClassLoader
void compiledCodeCanAccessExistingPackagePrivateClassIfAnnotated() throws LinkageError {
SourceFiles sourceFiles = SourceFiles.of(SourceFile.of("""
package com.example;
public | Main |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/JsonValueSerializationTest.java | {
"start": 2384,
"end": 2629
} | class ____
{
@JsonValue
public Map<String,String> toMap()
{
HashMap<String,String> map = new HashMap<String,String>();
map.put("a", "1");
return map;
}
}
static | MapBean |
java | apache__camel | components/camel-netty-http/src/test/java/org/apache/camel/component/netty/http/rest/RestNettyHttpOptionsTest.java | {
"start": 1101,
"end": 3366
} | class ____ extends BaseNettyTest {
@Test
public void testNettyServerOptions() {
Exchange exchange = template.request("http://localhost:" + getPort() + "/users/v1/customers",
exchange1 -> exchange1.getIn().setHeader(Exchange.HTTP_METHOD, "OPTIONS"));
assertEquals(200, exchange.getMessage().getHeader(Exchange.HTTP_RESPONSE_CODE));
assertEquals("GET,OPTIONS", exchange.getMessage().getHeader("ALLOW"));
assertEquals("", exchange.getMessage().getBody(String.class));
exchange = fluentTemplate.to("http://localhost:" + getPort() + "/users/v1/id/123")
.withHeader(Exchange.HTTP_METHOD, "OPTIONS").send();
assertEquals(200, exchange.getMessage().getHeader(Exchange.HTTP_RESPONSE_CODE));
assertEquals("PUT,OPTIONS", exchange.getMessage().getHeader("ALLOW"));
assertEquals("", exchange.getMessage().getBody(String.class));
}
@Test
public void testNettyServerMultipleOptions() {
Exchange exchange = template.request("http://localhost:" + getPort() + "/users/v2/options",
exchange1 -> exchange1.getIn().setHeader(Exchange.HTTP_METHOD, "OPTIONS"));
assertEquals(200, exchange.getMessage().getHeader(Exchange.HTTP_RESPONSE_CODE));
assertEquals("GET,POST,OPTIONS", exchange.getMessage().getHeader("ALLOW"));
assertEquals("", exchange.getMessage().getBody(String.class));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
// configure to use netty on localhost with the given port
restConfiguration().component("netty-http").host("localhost").port(getPort());
// use the rest DSL to define the rest services
rest("/users/")
.get("v1/customers")
.to("mock:customers")
.put("v1/id/{id}")
.to("mock:id")
.get("v2/options")
.to("mock:options")
.post("v2/options")
.to("mock:options");
}
};
}
}
| RestNettyHttpOptionsTest |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/config/datasource/ConfigDefaultPUDatasourceUrlMissingDynamicInjectionTest.java | {
"start": 525,
"end": 2005
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot(jar -> jar.addClass(MyEntity.class))
.withConfigurationResource("application.properties")
.overrideConfigKey("quarkus.datasource.jdbc.url", "")
// The URL won't be missing if dev services are enabled
.overrideConfigKey("quarkus.devservices.enabled", "false")
.assertException(e -> assertThat(e)
.hasMessageContainingAll(
"Unable to find datasource '<default>' for persistence unit '<default>'",
"Datasource '<default>' was deactivated automatically because its URL is not set.",
"To avoid this exception while keeping the bean inactive", // Message from Arc with generic hints
"To activate the datasource, set configuration property 'quarkus.datasource.jdbc.url'",
"Refer to https://quarkus.io/guides/datasource for guidance."));
@Inject
InjectableInstance<SessionFactory> sessionFactory;
@Inject
InjectableInstance<Session> session;
// Just try another bean type, but not all of them...
@Inject
InjectableInstance<SchemaManager> schemaManager;
@Test
public void test() {
Assertions.fail("Startup should have failed");
}
}
| ConfigDefaultPUDatasourceUrlMissingDynamicInjectionTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java | {
"start": 99357,
"end": 105295
} | class ____ {
private RawComparator comparator;
private MergeSort mergeSort; //the implementation of merge sort
private Path[] inFiles; // when merging or sorting
private Path outFile;
private int memory; // bytes
private int factor; // merged per pass
private FileSystem fs = null;
private Class keyClass;
private Class valClass;
private Configuration conf;
private Metadata metadata;
private Progressable progressable = null;
/**
* Sort and merge files containing the named classes.
* @param fs input FileSystem.
* @param keyClass input keyClass.
* @param valClass input valClass.
* @param conf input Configuration.
*/
public Sorter(FileSystem fs, Class<? extends WritableComparable> keyClass,
Class valClass, Configuration conf) {
this(fs, WritableComparator.get(keyClass, conf), keyClass, valClass, conf);
}
/**
* Sort and merge using an arbitrary {@link RawComparator}.
* @param fs input FileSystem.
* @param comparator input RawComparator.
* @param keyClass input keyClass.
* @param valClass input valClass.
* @param conf input Configuration.
*/
public Sorter(FileSystem fs, RawComparator comparator, Class keyClass,
Class valClass, Configuration conf) {
this(fs, comparator, keyClass, valClass, conf, new Metadata());
}
/**
* Sort and merge using an arbitrary {@link RawComparator}.
* @param fs input FileSystem.
* @param comparator input RawComparator.
* @param keyClass input keyClass.
* @param valClass input valClass.
* @param conf input Configuration.
* @param metadata input metadata.
*/
@SuppressWarnings("deprecation")
public Sorter(FileSystem fs, RawComparator comparator, Class keyClass,
Class valClass, Configuration conf, Metadata metadata) {
this.fs = fs;
this.comparator = comparator;
this.keyClass = keyClass;
this.valClass = valClass;
// Remember to fall-back on the deprecated MB and Factor keys
// until they are removed away permanently.
if (conf.get(CommonConfigurationKeys.IO_SORT_MB_KEY) != null) {
this.memory = conf.getInt(CommonConfigurationKeys.IO_SORT_MB_KEY,
CommonConfigurationKeys.SEQ_IO_SORT_MB_DEFAULT) * 1024 * 1024;
} else {
this.memory = conf.getInt(CommonConfigurationKeys.SEQ_IO_SORT_MB_KEY,
CommonConfigurationKeys.SEQ_IO_SORT_MB_DEFAULT) * 1024 * 1024;
}
if (conf.get(CommonConfigurationKeys.IO_SORT_FACTOR_KEY) != null) {
this.factor = conf.getInt(CommonConfigurationKeys.IO_SORT_FACTOR_KEY,
CommonConfigurationKeys.SEQ_IO_SORT_FACTOR_DEFAULT);
} else {
this.factor = conf.getInt(
CommonConfigurationKeys.SEQ_IO_SORT_FACTOR_KEY,
CommonConfigurationKeys.SEQ_IO_SORT_FACTOR_DEFAULT);
}
this.conf = conf;
this.metadata = metadata;
}
/**
* Set the number of streams to merge at once.
* @param factor factor.
*/
public void setFactor(int factor) { this.factor = factor; }
/** @return Get the number of streams to merge at once.*/
public int getFactor() { return factor; }
/**
* Set the total amount of buffer memory, in bytes.
* @param memory buffer memory.
*/
public void setMemory(int memory) { this.memory = memory; }
/** @return Get the total amount of buffer memory, in bytes.*/
public int getMemory() { return memory; }
/**
* Set the progressable object in order to report progress.
* @param progressable input Progressable.
*/
public void setProgressable(Progressable progressable) {
this.progressable = progressable;
}
/**
* Perform a file sort from a set of input files into an output file.
* @param inFiles the files to be sorted
* @param outFile the sorted output file
* @param deleteInput should the input files be deleted as they are read?
* @throws IOException raised on errors performing I/O.
*/
public void sort(Path[] inFiles, Path outFile,
boolean deleteInput) throws IOException {
if (fs.exists(outFile)) {
throw new IOException("already exists: " + outFile);
}
this.inFiles = inFiles;
this.outFile = outFile;
int segments = sortPass(deleteInput);
if (segments > 1) {
mergePass(outFile.getParent());
}
}
/**
* Perform a file sort from a set of input files and return an iterator.
* @param inFiles the files to be sorted
* @param tempDir the directory where temp files are created during sort
* @param deleteInput should the input files be deleted as they are read?
* @return iterator the RawKeyValueIterator
* @throws IOException raised on errors performing I/O.
*/
public RawKeyValueIterator sortAndIterate(Path[] inFiles, Path tempDir,
boolean deleteInput) throws IOException {
Path outFile = new Path(tempDir + Path.SEPARATOR + "all.2");
if (fs.exists(outFile)) {
throw new IOException("already exists: " + outFile);
}
this.inFiles = inFiles;
//outFile will basically be used as prefix for temp files in the cases
//where sort outputs multiple sorted segments. For the single segment
//case, the outputFile itself will contain the sorted data for that
//segment
this.outFile = outFile;
int segments = sortPass(deleteInput);
if (segments > 1)
return merge(outFile.suffix(".0"), outFile.suffix(".0.index"),
tempDir);
else if (segments == 1)
return merge(new Path[]{outFile}, true, tempDir);
else return null;
}
/**
* The backwards compatible | Sorter |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.java | {
"start": 236594,
"end": 237123
} | class ____ extends ParserRuleContext {
@SuppressWarnings("this-escape")
public PrimaryExpressionContext(ParserRuleContext parent, int invokingState) {
super(parent, invokingState);
}
@Override public int getRuleIndex() { return RULE_primaryExpression; }
@SuppressWarnings("this-escape")
public PrimaryExpressionContext() { }
public void copyFrom(PrimaryExpressionContext ctx) {
super.copyFrom(ctx);
}
}
@SuppressWarnings("CheckReturnValue")
public static | PrimaryExpressionContext |
java | spring-projects__spring-boot | smoke-test/spring-boot-smoke-test-data-cassandra/src/dockerTest/java/smoketest/data/cassandra/SampleDataCassandraApplicationSslTests.java | {
"start": 2032,
"end": 3068
} | class ____ {
@Container
@ServiceConnection
@JksTrustStore(location = "classpath:ssl/test-ca.p12", password = "password")
@JksKeyStore(location = "classpath:ssl/test-client.p12", password = "password")
static final SecureCassandraContainer cassandra = TestImage.container(SecureCassandraContainer.class);
@Autowired
private CassandraTemplate cassandraTemplate;
@Autowired
private SampleRepository repository;
@Test
void testRepository() {
SampleEntity entity = new SampleEntity();
entity.setDescription("Look, new @DataCassandraTest!");
String id = UUID.randomUUID().toString();
entity.setId(id);
SampleEntity savedEntity = this.repository.save(entity);
SampleEntity getEntity = this.cassandraTemplate.selectOneById(id, SampleEntity.class);
assertThat(getEntity).isNotNull();
assertThat(getEntity.getId()).isNotNull();
assertThat(getEntity.getId()).isEqualTo(savedEntity.getId());
this.repository.deleteAll();
}
@TestConfiguration(proxyBeanMethods = false)
static | SampleDataCassandraApplicationSslTests |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/tools/picocli/CommandLine.java | {
"start": 159657,
"end": 159758
} | class ____ group the built-in {@link ITypeConverter} implementations.
*/
private static final | to |
java | apache__avro | lang/java/avro/src/test/java/org/apache/avro/TestSchemaNormalization.java | {
"start": 2441,
"end": 5058
} | class ____ {
public static List<Object[]> cases() throws IOException {
return CaseFinder.find(data(), "fingerprint", new ArrayList<>());
}
@ParameterizedTest
@MethodSource("cases")
void canonicalization(String input, String expectedOutput) {
Locale originalDefaultLocale = Locale.getDefault();
Locale.setDefault(Locale.forLanguageTag("tr"));
Schema s = new Schema.Parser().parse(input);
long carefulFP = altFingerprint(SchemaNormalization.toParsingForm(s));
assertEquals(carefulFP, Long.parseLong(expectedOutput));
assertEqHex(carefulFP, SchemaNormalization.parsingFingerprint64(s));
Locale.setDefault(originalDefaultLocale);
}
}
private static String DATA_FILE = (System.getProperty("share.dir", "../../../share") + "/test/data/schema-tests.txt");
private static BufferedReader data() throws IOException {
return Files.newBufferedReader(Paths.get(DATA_FILE), UTF_8);
}
/**
* Compute the fingerprint of <i>bytes[s,s+l)</i> using a slow algorithm that's
* an alternative to that implemented in {@link SchemaNormalization}. Algo from
* Broder93 ("Some applications of Rabin's fingerprinting method").
*/
public static long altFingerprint(String s) {
// In our algorithm, we multiply all inputs by x^64 (which is
// equivalent to prepending it with a single "1" bit followed
// by 64 zero bits). This both deals with the fact that
// CRCs ignore leading zeros, and also ensures some degree of
// randomness for small inputs
long tmp = altExtend(SchemaNormalization.EMPTY64, 64, ONE, s.getBytes(UTF_8));
return altExtend(SchemaNormalization.EMPTY64, 64, tmp, POSTFIX);
}
private static long altExtend(long poly, int degree, long fp, byte[] b) {
final long overflowBit = 1L << (64 - degree);
for (byte b1 : b) {
for (int j = 1; j < 129; j = j << 1) {
boolean overflow = (0 != (fp & overflowBit));
fp >>>= 1;
if (0 != (j & b1))
fp |= ONE; // shift in the input bit
if (overflow) {
fp ^= poly; // hi-order coeff of poly kills overflow bit
}
}
}
return fp;
}
private static final long ONE = 0x8000000000000000L;
private static final byte[] POSTFIX = { 0, 0, 0, 0, 0, 0, 0, 0 };
private static void assertEqHex(long expected, long actual) {
assertEquals(expected, actual, () -> format("0x%016x != 0x%016x", expected, actual));
}
private static String format(String f, Object... args) {
return (new Formatter()).format(f, args).toString();
}
}
| TestFingerprintInternationalization |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KubernetesPersistentVolumesClaimsEndpointBuilderFactory.java | {
"start": 15991,
"end": 19613
} | interface ____ {
/**
* Kubernetes Persistent Volume Claim (camel-kubernetes)
* Perform operations on Kubernetes Persistent Volumes Claims and get
* notified on Persistent Volumes Claim changes.
*
* Category: container,cloud
* Since: 2.17
* Maven coordinates: org.apache.camel:camel-kubernetes
*
* @return the dsl builder for the headers' name.
*/
default KubernetesPersistentVolumesClaimsHeaderNameBuilder kubernetesPersistentVolumesClaims() {
return KubernetesPersistentVolumesClaimsHeaderNameBuilder.INSTANCE;
}
/**
* Kubernetes Persistent Volume Claim (camel-kubernetes)
* Perform operations on Kubernetes Persistent Volumes Claims and get
* notified on Persistent Volumes Claim changes.
*
* Category: container,cloud
* Since: 2.17
* Maven coordinates: org.apache.camel:camel-kubernetes
*
* Syntax: <code>kubernetes-persistent-volumes-claims:masterUrl</code>
*
* Path parameter: masterUrl (required)
* URL to a remote Kubernetes API server. This should only be used when
* your Camel application is connecting from outside Kubernetes. If you
* run your Camel application inside Kubernetes, then you can use local
* or client as the URL to tell Camel to run in local mode. If you
* connect remotely to Kubernetes, then you may also need some of the
* many other configuration options for secured connection with
* certificates, etc.
*
* @param path masterUrl
* @return the dsl builder
*/
default KubernetesPersistentVolumesClaimsEndpointBuilder kubernetesPersistentVolumesClaims(String path) {
return KubernetesPersistentVolumesClaimsEndpointBuilderFactory.endpointBuilder("kubernetes-persistent-volumes-claims", path);
}
/**
* Kubernetes Persistent Volume Claim (camel-kubernetes)
* Perform operations on Kubernetes Persistent Volumes Claims and get
* notified on Persistent Volumes Claim changes.
*
* Category: container,cloud
* Since: 2.17
* Maven coordinates: org.apache.camel:camel-kubernetes
*
* Syntax: <code>kubernetes-persistent-volumes-claims:masterUrl</code>
*
* Path parameter: masterUrl (required)
* URL to a remote Kubernetes API server. This should only be used when
* your Camel application is connecting from outside Kubernetes. If you
* run your Camel application inside Kubernetes, then you can use local
* or client as the URL to tell Camel to run in local mode. If you
* connect remotely to Kubernetes, then you may also need some of the
* many other configuration options for secured connection with
* certificates, etc.
*
* @param componentName to use a custom component name for the endpoint
* instead of the default name
* @param path masterUrl
* @return the dsl builder
*/
default KubernetesPersistentVolumesClaimsEndpointBuilder kubernetesPersistentVolumesClaims(String componentName, String path) {
return KubernetesPersistentVolumesClaimsEndpointBuilderFactory.endpointBuilder(componentName, path);
}
}
/**
* The builder of headers' name for the Kubernetes Persistent Volume Claim component.
*/
public static | KubernetesPersistentVolumesClaimsBuilders |
java | spring-projects__spring-framework | spring-websocket/src/test/java/org/springframework/web/socket/server/standard/ServerEndpointRegistrationTests.java | {
"start": 1310,
"end": 2188
} | class ____ {
@Test
void endpointPerConnection() throws Exception {
ConfigurableApplicationContext context = new AnnotationConfigApplicationContext(Config.class);
ServerEndpointRegistration registration = new ServerEndpointRegistration("/path", EchoEndpoint.class);
registration.setBeanFactory(context.getBeanFactory());
EchoEndpoint endpoint = registration.getConfigurator().getEndpointInstance(EchoEndpoint.class);
assertThat(endpoint).isNotNull();
}
@Test
void endpointSingleton() throws Exception {
EchoEndpoint endpoint = new EchoEndpoint(new EchoService());
ServerEndpointRegistration registration = new ServerEndpointRegistration("/path", endpoint);
EchoEndpoint actual = registration.getConfigurator().getEndpointInstance(EchoEndpoint.class);
assertThat(actual).isSameAs(endpoint);
}
@Configuration
static | ServerEndpointRegistrationTests |
java | apache__camel | core/camel-support/src/main/java/org/apache/camel/support/NormalizedUri.java | {
"start": 1076,
"end": 1203
} | class ____ extends ValueHolder<String> implements NormalizedEndpointUri {
// must extend ValueHolder to let this | NormalizedUri |
java | quarkusio__quarkus | extensions/funqy/funqy-amazon-lambda/runtime/src/main/java/io/quarkus/funqy/lambda/event/kinesis/PipesKinesisEventHandler.java | {
"start": 725,
"end": 2156
} | class ____
implements EventHandler<List<PipesKinesisEvent>, KinesisEvent.Record, StreamsEventResponse> {
@Override
public Stream<KinesisEvent.Record> streamEvent(List<PipesKinesisEvent> event, FunqyAmazonConfig amazonConfig) {
if (event == null) {
return Stream.empty();
}
return event.stream().map(Function.identity());
}
@Override
public String getIdentifier(KinesisEvent.Record message, FunqyAmazonConfig amazonConfig) {
return message.getSequenceNumber();
}
@Override
public Supplier<InputStream> getBody(KinesisEvent.Record message, FunqyAmazonConfig amazonConfig) {
if (message.getData() == null) {
return ByteArrayInputStream::nullInputStream;
}
return () -> new ByteBufferBackedInputStream(message.getData());
}
@Override
public StreamsEventResponse createResponse(List<String> failures, FunqyAmazonConfig amazonConfig) {
if (!amazonConfig.advancedEventHandling().kinesis().reportBatchItemFailures()) {
return null;
}
return StreamsEventResponse.builder().withBatchItemFailures(
failures.stream().map(id -> BatchItemFailure.builder().withItemIdentifier(id).build()).toList()).build();
}
@Override
public Class<KinesisEvent.Record> getMessageClass() {
return KinesisEvent.Record.class;
}
}
| PipesKinesisEventHandler |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentAction.java | {
"start": 1240,
"end": 1648
} | class ____ extends ActionType<CreateTrainedModelAssignmentAction.Response> {
public static final CreateTrainedModelAssignmentAction INSTANCE = new CreateTrainedModelAssignmentAction();
public static final String NAME = "cluster:internal/xpack/ml/model_allocation/create";
private CreateTrainedModelAssignmentAction() {
super(NAME);
}
public static | CreateTrainedModelAssignmentAction |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng8572DITypeHandlerTest.java | {
"start": 1139,
"end": 2723
} | class ____ extends AbstractMavenIntegrationTestCase {
@Test
public void testCustomTypeHandler() throws Exception {
// Build the extension first
File testDir = extractResources("/mng-8572-di-type-handler");
Verifier verifier = newVerifier(new File(testDir, "extension").getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.deleteArtifacts("org.apache.maven.its.mng8572");
verifier.addCliArgument("install");
verifier.execute();
verifier.verifyErrorFreeLog();
// Now use the extension in a test project
verifier = newVerifier(new File(testDir, "test").getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.addCliArguments(
"install:install-file",
"-Dfile=src/main/java/org/apache/maven/its/mng8572/test/DummyClass.java",
"-DpomFile=dummy-artifact-pom.xml",
"-Dpackaging=custom",
"-DcreateChecksum=true");
verifier.execute();
verifier.verifyErrorFreeLog();
verifier = newVerifier(new File(testDir, "test").getAbsolutePath());
verifier.setAutoclean(false);
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
// Verify that our custom type handler was used
verifier.verifyTextInLog("[INFO] [MNG-8572] Registering custom type handler for type: custom-type");
}
}
| MavenITmng8572DITypeHandlerTest |
java | spring-projects__spring-boot | cli/spring-boot-cli/src/test/java/org/springframework/boot/cli/command/OptionParsingCommandTests.java | {
"start": 911,
"end": 1202
} | class ____ {
@Test
void optionHelp() {
OptionHandler handler = new OptionHandler();
handler.option("bar", "Bar");
OptionParsingCommand command = new TestOptionParsingCommand("foo", "Foo", handler);
assertThat(command.getHelp()).contains("--bar");
}
static | OptionParsingCommandTests |
java | apache__hadoop | hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpOptions.java | {
"start": 1821,
"end": 22114
} | class ____ {
private static final float DELTA = 0.001f;
@Test
public void testSetIgnoreFailure() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"));
assertFalse(builder.build().shouldIgnoreFailures());
builder.withIgnoreFailures(true);
assertTrue(builder.build().shouldIgnoreFailures());
}
@Test
public void testSetOverwrite() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"));
assertFalse(builder.build().shouldOverwrite());
builder.withOverwrite(true);
assertTrue(builder.build().shouldOverwrite());
try {
builder.withSyncFolder(true).build();
fail("Update and overwrite aren't allowed together");
} catch (IllegalArgumentException ignore) {
}
}
@Test
public void testLogPath() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"));
assertNull(builder.build().getLogPath());
final Path logPath = new Path("hdfs://localhost:8020/logs");
builder.withLogPath(logPath);
assertEquals(logPath, builder.build().getLogPath());
}
@Test
public void testSetBlokcing() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"));
assertTrue(builder.build().shouldBlock());
builder.withBlocking(false);
assertFalse(builder.build().shouldBlock());
}
@Test
public void testSetBandwidth() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"));
assertEquals(0, builder.build().getMapBandwidth(), DELTA);
builder.withMapBandwidth(11);
assertEquals(11, builder.build().getMapBandwidth(), DELTA);
}
@Test
public void testSetNonPositiveBandwidth() {
assertThrows(IllegalArgumentException.class, ()->{
new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"))
.withMapBandwidth(-11)
.build();
});
}
@Test
public void testSetZeroBandwidth() {
assertThrows(IllegalArgumentException.class, () -> {
new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"))
.withMapBandwidth(0)
.build();
});
}
@Test
public void testSetSkipCRC() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"));
assertFalse(builder.build().shouldSkipCRC());
final DistCpOptions options = builder.withSyncFolder(true).withSkipCRC(true)
.build();
assertTrue(options.shouldSyncFolder());
assertTrue(options.shouldSkipCRC());
}
@Test
public void testSetAtomicCommit() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"));
assertFalse(builder.build().shouldAtomicCommit());
builder.withAtomicCommit(true);
assertTrue(builder.build().shouldAtomicCommit());
try {
builder.withSyncFolder(true).build();
fail("Atomic and sync folders were mutually exclusive");
} catch (IllegalArgumentException ignore) {
}
}
@Test
public void testSetWorkPath() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"));
assertNull(builder.build().getAtomicWorkPath());
builder.withAtomicCommit(true);
assertNull(builder.build().getAtomicWorkPath());
final Path workPath = new Path("hdfs://localhost:8020/work");
builder.withAtomicWorkPath(workPath);
assertEquals(workPath, builder.build().getAtomicWorkPath());
}
@Test
public void testSetSyncFolders() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"));
assertFalse(builder.build().shouldSyncFolder());
builder.withSyncFolder(true);
assertTrue(builder.build().shouldSyncFolder());
}
@Test
public void testSetDeleteMissing() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"));
assertFalse(builder.build().shouldDeleteMissing());
DistCpOptions options = builder.withSyncFolder(true)
.withDeleteMissing(true)
.build();
assertTrue(options.shouldSyncFolder());
assertTrue(options.shouldDeleteMissing());
options = new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"))
.withOverwrite(true)
.withDeleteMissing(true)
.build();
assertTrue(options.shouldOverwrite());
assertTrue(options.shouldDeleteMissing());
try {
new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"))
.withDeleteMissing(true)
.build();
fail("Delete missing should fail without update or overwrite options");
} catch (IllegalArgumentException e) {
assertExceptionContains("Delete missing is applicable only with update " +
"or overwrite options", e);
}
try {
new DistCpOptions.Builder(
new Path("hdfs://localhost:8020/source/first"),
new Path("hdfs://localhost:8020/target/"))
.withSyncFolder(true)
.withDeleteMissing(true)
.withUseDiff("s1", "s2")
.build();
fail("Should have failed as -delete and -diff are mutually exclusive.");
} catch (IllegalArgumentException e) {
assertExceptionContains(
"-delete and -diff/-rdiff are mutually exclusive.", e);
}
}
@Test
public void testSetMaps() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"));
assertEquals(DistCpConstants.DEFAULT_MAPS,
builder.build().getMaxMaps());
builder.maxMaps(1);
assertEquals(1, builder.build().getMaxMaps());
builder.maxMaps(0);
assertEquals(1, builder.build().getMaxMaps());
}
@Test
public void testSetNumListtatusThreads() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
new Path("hdfs://localhost:8020/source/first"),
new Path("hdfs://localhost:8020/target/"));
// If command line argument isn't set, we expect .getNumListstatusThreads
// option to be zero (so that we know when to override conf properties).
assertEquals(0, builder.build().getNumListstatusThreads());
builder.withNumListstatusThreads(12);
assertEquals(12, builder.build().getNumListstatusThreads());
builder.withNumListstatusThreads(0);
assertEquals(0, builder.build().getNumListstatusThreads());
// Ignore large number of threads.
builder.withNumListstatusThreads(MAX_NUM_LISTSTATUS_THREADS * 2);
assertEquals(MAX_NUM_LISTSTATUS_THREADS,
builder.build().getNumListstatusThreads());
}
@Test
public void testSourceListing() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
new Path("hdfs://localhost:8020/source/first"),
new Path("hdfs://localhost:8020/target/"));
assertEquals(new Path("hdfs://localhost:8020/source/first"),
builder.build().getSourceFileListing());
}
@Test
public void testMissingTarget() {
assertThrows(IllegalArgumentException.class, ()->{
new DistCpOptions.Builder(new Path("hdfs://localhost:8020/source/first"),
null);
});
}
@Test
public void testToString() {
DistCpOptions option = new DistCpOptions.Builder(new Path("abc"),
new Path("xyz")).build();
String val = "DistCpOptions{atomicCommit=false, syncFolder=false, " +
"deleteMissing=false, ignoreFailures=false, overwrite=false, " +
"append=false, useDiff=false, useRdiff=false, " +
"fromSnapshot=null, toSnapshot=null, " +
"skipCRC=false, blocking=true, numListstatusThreads=0, maxMaps=20, " +
"mapBandwidth=0.0, copyStrategy='uniformsize', preserveStatus=[], " +
"atomicWorkPath=null, logPath=null, sourceFileListing=abc, " +
"sourcePaths=null, targetPath=xyz, filtersFile='null', " +
"blocksPerChunk=0, copyBufferSize=8192, verboseLog=false, " +
"directWrite=false, useiterator=false, updateRoot=false}";
String optionString = option.toString();
assertEquals(val, optionString);
assertNotSame(DistCpOptionSwitch.ATOMIC_COMMIT.toString(),
DistCpOptionSwitch.ATOMIC_COMMIT.name());
}
@Test
public void testCopyStrategy() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
new Path("hdfs://localhost:8020/source/first"),
new Path("hdfs://localhost:8020/target/"));
assertEquals(DistCpConstants.UNIFORMSIZE,
builder.build().getCopyStrategy());
builder.withCopyStrategy("dynamic");
assertEquals("dynamic", builder.build().getCopyStrategy());
}
@Test
public void testTargetPath() {
final DistCpOptions options = new DistCpOptions.Builder(
new Path("hdfs://localhost:8020/source/first"),
new Path("hdfs://localhost:8020/target/")).build();
assertEquals(new Path("hdfs://localhost:8020/target/"),
options.getTargetPath());
}
@Test
public void testPreserve() {
DistCpOptions options = new DistCpOptions.Builder(
new Path("hdfs://localhost:8020/source/first"),
new Path("hdfs://localhost:8020/target/"))
.build();
assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE));
assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
assertFalse(options.shouldPreserve(FileAttribute.USER));
assertFalse(options.shouldPreserve(FileAttribute.GROUP));
assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
options = new DistCpOptions.Builder(
new Path("hdfs://localhost:8020/source/first"),
new Path("hdfs://localhost:8020/target/"))
.preserve(FileAttribute.ACL)
.build();
assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE));
assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
assertFalse(options.shouldPreserve(FileAttribute.USER));
assertFalse(options.shouldPreserve(FileAttribute.GROUP));
assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
assertTrue(options.shouldPreserve(FileAttribute.ACL));
options = new DistCpOptions.Builder(
new Path("hdfs://localhost:8020/source/first"),
new Path("hdfs://localhost:8020/target/"))
.preserve(FileAttribute.BLOCKSIZE)
.preserve(FileAttribute.REPLICATION)
.preserve(FileAttribute.PERMISSION)
.preserve(FileAttribute.USER)
.preserve(FileAttribute.GROUP)
.preserve(FileAttribute.CHECKSUMTYPE)
.build();
assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
assertTrue(options.shouldPreserve(FileAttribute.USER));
assertTrue(options.shouldPreserve(FileAttribute.GROUP));
assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
assertFalse(options.shouldPreserve(FileAttribute.XATTR));
}
@Test
public void testAppendOption() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"))
.withSyncFolder(true)
.withAppend(true);
assertTrue(builder.build().shouldAppend());
try {
// make sure -append is only valid when -update is specified
new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"))
.withAppend(true)
.build();
fail("Append should fail if update option is not specified");
} catch (IllegalArgumentException e) {
assertExceptionContains(
"Append is valid only with update options", e);
}
try {
// make sure -append is invalid when skipCrc is specified
new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"))
.withSyncFolder(true)
.withAppend(true)
.withSkipCRC(true)
.build();
fail("Append should fail if skipCrc option is specified");
} catch (IllegalArgumentException e) {
assertExceptionContains(
"Append is disallowed when skipping CRC", e);
}
}
@Test
public void testDiffOption() {
DistCpOptions options = new DistCpOptions.Builder(
new Path("hdfs://localhost:8020/source/first"),
new Path("hdfs://localhost:8020/target/"))
.withSyncFolder(true)
.withUseDiff("s1", "s2")
.build();
assertTrue(options.shouldUseDiff());
assertEquals("s1", options.getFromSnapshot());
assertEquals("s2", options.getToSnapshot());
options = new DistCpOptions.Builder(
new Path("hdfs://localhost:8020/source/first"),
new Path("hdfs://localhost:8020/target/"))
.withSyncFolder(true)
.withUseDiff("s1", ".")
.build();
assertTrue(options.shouldUseDiff());
assertEquals("s1", options.getFromSnapshot());
assertEquals(".", options.getToSnapshot());
// make sure -diff is only valid when -update is specified
try {
new DistCpOptions.Builder(new Path("hdfs://localhost:8020/source/first"),
new Path("hdfs://localhost:8020/target/"))
.withUseDiff("s1", "s2")
.build();
fail("-diff should fail if -update option is not specified");
} catch (IllegalArgumentException e) {
assertExceptionContains(
"-diff/-rdiff is valid only with -update option", e);
}
try {
new DistCpOptions.Builder(
new Path("hdfs://localhost:8020/source/first"),
new Path("hdfs://localhost:8020/target/"))
.withSyncFolder(true)
.withUseDiff("s1", "s2")
.withDeleteMissing(true)
.build();
fail("Should fail as -delete and -diff/-rdiff are mutually exclusive.");
} catch (IllegalArgumentException e) {
assertExceptionContains(
"-delete and -diff/-rdiff are mutually exclusive.", e);
}
try {
new DistCpOptions.Builder(new Path("hdfs://localhost:8020/source/first"),
new Path("hdfs://localhost:8020/target/"))
.withUseDiff("s1", "s2")
.withDeleteMissing(true)
.build();
fail("-diff should fail if -update option is not specified");
} catch (IllegalArgumentException e) {
assertExceptionContains(
"-delete and -diff/-rdiff are mutually exclusive.", e);
}
try {
new DistCpOptions.Builder(new Path("hdfs://localhost:8020/source/first"),
new Path("hdfs://localhost:8020/target/"))
.withDeleteMissing(true)
.withUseDiff("s1", "s2")
.build();
fail("Should have failed as -delete and -diff are mutually exclusive");
} catch (IllegalArgumentException e) {
assertExceptionContains(
"-delete and -diff/-rdiff are mutually exclusive", e);
}
}
@Test
public void testExclusionsOption() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
new Path("hdfs://localhost:8020/source/first"),
new Path("hdfs://localhost:8020/target/"));
assertNull(builder.build().getFiltersFile());
builder.withFiltersFile("/tmp/filters.txt");
assertEquals("/tmp/filters.txt", builder.build().getFiltersFile());
}
@Test
public void testSetOptionsForSplitLargeFile() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
new Path("hdfs://localhost:8020/source/"),
new Path("hdfs://localhost:8020/target/"))
.withAppend(true)
.withSyncFolder(true);
assertFalse(builder.build().shouldPreserve(FileAttribute.BLOCKSIZE));
assertTrue(builder.build().shouldAppend());
builder.withBlocksPerChunk(5440);
assertTrue(builder.build().shouldPreserve(FileAttribute.BLOCKSIZE));
assertFalse(builder.build().shouldAppend());
}
@Test
public void testSetCopyBufferSize() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"));
assertEquals(DistCpConstants.COPY_BUFFER_SIZE_DEFAULT,
builder.build().getCopyBufferSize());
builder.withCopyBufferSize(4194304);
assertEquals(4194304,
builder.build().getCopyBufferSize());
builder.withCopyBufferSize(-1);
assertEquals(DistCpConstants.COPY_BUFFER_SIZE_DEFAULT,
builder.build().getCopyBufferSize());
}
@Test
public void testVerboseLog() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"));
assertFalse(builder.build().shouldVerboseLog());
try {
builder.withVerboseLog(true).build();
fail("-v should fail if -log option is not specified");
} catch (IllegalArgumentException e) {
assertExceptionContains("-v is valid only with -log option", e);
}
final Path logPath = new Path("hdfs://localhost:8020/logs");
builder.withLogPath(logPath).withVerboseLog(true);
assertTrue(builder.build().shouldVerboseLog());
}
@Test
public void testAppendToConf() {
final int expectedBlocksPerChunk = 999;
final String expectedValForEmptyConfigKey = "VALUE_OF_EMPTY_CONFIG_KEY";
DistCpOptions options = new DistCpOptions.Builder(
Collections.singletonList(
new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"))
.withBlocksPerChunk(expectedBlocksPerChunk)
.build();
Configuration config = new Configuration();
config.set("", expectedValForEmptyConfigKey);
options.appendToConf(config);
assertEquals(expectedBlocksPerChunk,
config.getInt(
DistCpOptionSwitch
.BLOCKS_PER_CHUNK
.getConfigLabel(), 0));
assertEquals(expectedValForEmptyConfigKey, config.get(""),
"Some DistCpOptionSwitch's config label is empty! " +
"Pls ensure the config label is provided when apply to config, " +
"otherwise it may not be fetched properly");
}
@Test
public void testUpdateRoot() {
final DistCpOptions options = new DistCpOptions.Builder(
Collections.singletonList(
new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"))
.withUpdateRoot(true)
.build();
assertTrue(options.shouldUpdateRoot());
}
}
| TestDistCpOptions |
java | micronaut-projects__micronaut-core | test-suite-groovy/src/test/groovy/io/micronaut/docs/config/env/LowRateLimit.java | {
"start": 75,
"end": 211
} | class ____ extends RateLimit {
public LowRateLimit(Duration period, Integer limit) {
super(period, limit);
}
}
| LowRateLimit |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/KqlErrorTests.java | {
"start": 1227,
"end": 3779
} | class ____ extends ErrorsForCasesWithoutExamplesTestCase {
@Override
protected List<TestCaseSupplier> cases() {
return paramsToSuppliers(KqlTests.parameters());
}
@Override
protected Stream<List<DataType>> testCandidates(List<TestCaseSupplier> cases, Set<List<DataType>> valid) {
// Don't test null, as it is not allowed but the expected message is not a type error - so we check it separately in VerifierTests
return super.testCandidates(cases, valid).filter(sig -> false == sig.contains(DataType.NULL));
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new Kql(source, args.getFirst(), args.size() > 1 ? args.get(1) : null, EsqlTestUtils.TEST_CFG);
}
@Override
protected Matcher<String> expectedTypeErrorMatcher(List<Set<DataType>> validPerPosition, List<DataType> signature) {
return equalTo(errorMessageStringForKql(validPerPosition, signature, (l, p) -> "keyword, text"));
}
private static String errorMessageStringForKql(
List<Set<DataType>> validPerPosition,
List<DataType> signature,
AbstractFunctionTestCase.PositionalErrorMessageSupplier positionalErrorMessageSupplier
) {
boolean invalid = false;
for (int i = 0; i < signature.size() && invalid == false; i++) {
// Need to check for nulls and bad parameters in order
if (signature.get(i) == DataType.NULL) {
return TypeResolutions.ParamOrdinal.fromIndex(i).name().toLowerCase(Locale.ROOT)
+ " argument of ["
+ sourceForSignature(signature)
+ "] cannot be null, received []";
}
if (validPerPosition.get(i).contains(signature.get(i)) == false) {
// Map expressions have different error messages
if (i == 1) {
return format(null, "second argument of [{}] must be a map expression, received []", sourceForSignature(signature));
}
break;
}
}
try {
return typeErrorMessage(true, validPerPosition, signature, positionalErrorMessageSupplier);
} catch (IllegalStateException e) {
// This means all the positional args were okay, so the expected error is for nulls or from the combination
return EsqlBinaryComparison.formatIncompatibleTypesMessage(signature.get(0), signature.get(1), sourceForSignature(signature));
}
}
}
| KqlErrorTests |
java | apache__commons-lang | src/test/java/org/apache/commons/lang3/reflect/TypeUtilsTest.java | {
"start": 6287,
"end": 6364
} | class ____ implements This<String, B> {
// empty
}
public | Tester |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.