language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/indices/NodeIndicesStatsTests.java
|
{
"start": 1027,
"end": 2488
}
|
class ____ extends ESTestCase {
public void testInvalidLevel() {
final NodeIndicesStats stats = new NodeIndicesStats(
null,
Collections.emptyMap(),
Collections.emptyMap(),
Collections.emptyMap(),
randomBoolean()
);
final String level = randomAlphaOfLength(16);
final ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap("level", level));
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> stats.toXContentChunked(params));
assertThat(
e,
hasToString(containsString("level parameter must be one of [node] or [indices] or [shards] but was [" + level + "]"))
);
}
public void testIncludeShardsStatsFlag() {
final Index index = new Index("test", "_na_");
final Map<Index, List<IndexShardStats>> statsByShards = new HashMap<>();
final List<IndexShardStats> emptyList = List.of();
statsByShards.put(index, emptyList);
NodeIndicesStats stats = new NodeIndicesStats(null, Collections.emptyMap(), statsByShards, Collections.emptyMap(), true);
assertThat(stats.getShardStats(index), sameInstance(emptyList));
stats = new NodeIndicesStats(null, Collections.emptyMap(), statsByShards, Collections.emptyMap(), false);
assertThat(stats.getShardStats(index), nullValue());
}
}
|
NodeIndicesStatsTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java
|
{
"start": 1319,
"end": 8706
}
|
class ____ extends AbstractScalarFunctionTestCase {
public SubstringTests(@Name("TestCase") Supplier<TestCaseSupplier.TestCase> testCaseSupplier) {
this.testCase = testCaseSupplier.get();
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
return parameterSuppliersFromTypedDataWithDefaultChecks(
true,
List.of(new TestCaseSupplier("Substring basic test", List.of(DataType.KEYWORD, DataType.INTEGER, DataType.INTEGER), () -> {
int start = between(1, 8);
int length = between(1, 10 - start);
String text = randomAlphaOfLength(10);
return new TestCaseSupplier.TestCase(
List.of(
new TestCaseSupplier.TypedData(new BytesRef(text), DataType.KEYWORD, "str"),
new TestCaseSupplier.TypedData(start, DataType.INTEGER, "start"),
new TestCaseSupplier.TypedData(length, DataType.INTEGER, "end")
),
"SubstringEvaluator[str=Attribute[channel=0], start=Attribute[channel=1], length=Attribute[channel=2]]",
DataType.KEYWORD,
equalTo(new BytesRef(text.substring(start - 1, start + length - 1)))
);
}),
new TestCaseSupplier(
"Substring basic test with text input",
List.of(DataType.TEXT, DataType.INTEGER, DataType.INTEGER),
() -> {
int start = between(1, 8);
int length = between(1, 10 - start);
String text = randomAlphaOfLength(10);
return new TestCaseSupplier.TestCase(
List.of(
new TestCaseSupplier.TypedData(new BytesRef(text), DataType.TEXT, "str"),
new TestCaseSupplier.TypedData(start, DataType.INTEGER, "start"),
new TestCaseSupplier.TypedData(length, DataType.INTEGER, "end")
),
"SubstringEvaluator[str=Attribute[channel=0], start=Attribute[channel=1], length=Attribute[channel=2]]",
DataType.KEYWORD,
equalTo(new BytesRef(text.substring(start - 1, start + length - 1)))
);
}
),
new TestCaseSupplier("Substring empty string", List.of(DataType.TEXT, DataType.INTEGER, DataType.INTEGER), () -> {
int start = between(1, 8);
int length = between(1, 10 - start);
return new TestCaseSupplier.TestCase(
List.of(
new TestCaseSupplier.TypedData(new BytesRef(""), DataType.TEXT, "str"),
new TestCaseSupplier.TypedData(start, DataType.INTEGER, "start"),
new TestCaseSupplier.TypedData(length, DataType.INTEGER, "end")
),
"SubstringEvaluator[str=Attribute[channel=0], start=Attribute[channel=1], length=Attribute[channel=2]]",
DataType.KEYWORD,
equalTo(new BytesRef(""))
);
})
)
);
}
public Matcher<Object> resultsMatcher(List<TestCaseSupplier.TypedData> typedData) {
String str = ((BytesRef) typedData.get(0).data()).utf8ToString();
int start = (Integer) typedData.get(1).data();
int end = (Integer) typedData.get(2).data();
return equalTo(new BytesRef(str.substring(start - 1, start + end - 1)));
}
public void testNoLengthToString() {
assertThat(
evaluator(new Substring(Source.EMPTY, field("str", DataType.KEYWORD), field("start", DataType.INTEGER), null)).get(
driverContext()
).toString(),
equalTo("SubstringNoLengthEvaluator[str=Attribute[channel=0], start=Attribute[channel=1]]")
);
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new Substring(source, args.get(0), args.get(1), args.size() < 3 ? null : args.get(2));
}
public void testWholeString() {
assertThat(process("a tiger", 0, null), equalTo("a tiger"));
assertThat(process("a tiger", 1, null), equalTo("a tiger"));
}
public void testPositiveStartNoLength() {
assertThat(process("a tiger", 3, null), equalTo("tiger"));
}
public void testNegativeStartNoLength() {
assertThat(process("a tiger", -3, null), equalTo("ger"));
}
public void testPositiveStartMassiveLength() {
assertThat(process("a tiger", 3, 1000), equalTo("tiger"));
}
public void testNegativeStartMassiveLength() {
assertThat(process("a tiger", -3, 1000), equalTo("ger"));
}
public void testMassiveNegativeStartNoLength() {
assertThat(process("a tiger", -300, null), equalTo("a tiger"));
}
public void testMassiveNegativeStartSmallLength() {
assertThat(process("a tiger", -300, 1), equalTo("a"));
}
public void testPositiveStartReasonableLength() {
assertThat(process("a tiger", 1, 3), equalTo("a t"));
}
public void testUnicode() {
final String s = "a\ud83c\udf09tiger";
assert s.length() == 8 && s.codePointCount(0, s.length()) == 7;
assertThat(process(s, 3, 1000), equalTo("tiger"));
assertThat(process(s, -6, 1000), equalTo("\ud83c\udf09tiger"));
assert "🐱".length() == 2 && "🐶".length() == 2;
assert "🐱".codePointCount(0, 2) == 1 && "🐶".codePointCount(0, 2) == 1;
assert "🐱".getBytes(UTF_8).length == 4 && "🐶".getBytes(UTF_8).length == 4;
for (Integer len : new Integer[] { null, 100, 100000 }) {
assertThat(process(s, 3, len), equalTo("tiger"));
assertThat(process(s, -6, len), equalTo("\ud83c\udf09tiger"));
assertThat(process("🐱Meow!🐶Woof!", 0, len), equalTo("🐱Meow!🐶Woof!"));
assertThat(process("🐱Meow!🐶Woof!", 1, len), equalTo("🐱Meow!🐶Woof!"));
assertThat(process("🐱Meow!🐶Woof!", 2, len), equalTo("Meow!🐶Woof!"));
assertThat(process("🐱Meow!🐶Woof!", 3, len), equalTo("eow!🐶Woof!"));
}
}
public void testNegativeLength() {
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> process("a tiger", 1, -1));
assertThat(ex.getMessage(), containsString("Length parameter cannot be negative, found [-1]"));
}
private String process(String str, int start, Integer length) {
try (
EvalOperator.ExpressionEvaluator eval = evaluator(
new Substring(
Source.EMPTY,
field("str", DataType.KEYWORD),
new Literal(Source.EMPTY, start, DataType.INTEGER),
length == null ? null : new Literal(Source.EMPTY, length, DataType.INTEGER)
)
).get(driverContext());
Block block = eval.eval(row(List.of(new BytesRef(str))))
) {
return block.isNull(0) ? null : ((BytesRef) toJavaObject(block, 0)).utf8ToString();
}
}
}
|
SubstringTests
|
java
|
apache__flink
|
flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/KeyMap.java
|
{
"start": 22226,
"end": 22978
}
|
interface ____<K, V> {
/**
* Called whenever the traversal starts with a new key.
*
* @param key The key traversed.
* @throws Exception Method forwards all exceptions.
*/
void startNewKey(K key) throws Exception;
/**
* Called for each value found for the current key.
*
* @param value The next value.
* @throws Exception Method forwards all exceptions.
*/
void nextValue(V value) throws Exception;
/**
* Called when the traversal for the current key is complete.
*
* @throws Exception Method forwards all exceptions.
*/
void keyDone() throws Exception;
}
}
|
TraversalEvaluator
|
java
|
apache__flink
|
flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/storage/GSBlobStorageImpl.java
|
{
"start": 7186,
"end": 7945
}
|
class ____ implements GSBlobStorage.BlobMetadata {
private final GSBlobIdentifier blobIdentifier;
private final Blob blob;
private BlobMetadata(GSBlobIdentifier blobIdentifier, Blob blob) {
this.blobIdentifier = Preconditions.checkNotNull(blobIdentifier);
this.blob = Preconditions.checkNotNull(blob);
}
@Override
public String getChecksum() {
LOGGER.trace("Getting checksum for blob {}", blobIdentifier);
String checksum = blob.getCrc32c();
LOGGER.trace("Found checksum for blob {}: {}", blobIdentifier, checksum);
return checksum;
}
}
/** Blob write channel, wraps Google storage WriteChannel. */
static
|
BlobMetadata
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/ComponentCreatorTest.java
|
{
"start": 4089,
"end": 4908
}
|
interface ____ {",
" SimpleComponent build();",
" }",
"}");
CompilerTests.daggerCompiler(injectableTypeFile, componentFile)
.withProcessingOptions(compilerOptions)
.compile(
subject -> {
subject.hasErrorCount(0);
subject.generatedSource(goldenFileRule.goldenSource("test/DaggerSimpleComponent"));
});
}
@Test
public void testCanInstantiateModulesUserCannotSet() throws Exception {
assume().that(compilerType).isEqualTo(JAVAC);
Source module =
CompilerTests.javaSource(
"test.TestModule",
"package test;",
"",
"import dagger.Module;",
"import dagger.Provides;",
"",
"@Module",
"final
|
Builder
|
java
|
apache__camel
|
components/camel-avro-rpc/camel-avro-rpc-component/src/test/java/org/apache/camel/avro/generated/Key.java
|
{
"start": 3266,
"end": 5353
}
|
class ____ extends org.apache.avro.specific.SpecificRecordBuilderBase<Key>
implements org.apache.avro.data.RecordBuilder<Key> {
private java.lang.CharSequence key;
/** Creates a new Builder */
private Builder() {
super(org.apache.camel.avro.generated.Key.SCHEMA$);
}
/** Creates a Builder by copying an existing Builder */
private Builder(org.apache.camel.avro.generated.Key.Builder other) {
super(other);
}
/** Creates a Builder by copying an existing Key instance */
private Builder(org.apache.camel.avro.generated.Key other) {
super(org.apache.camel.avro.generated.Key.SCHEMA$);
if (isValidValue(fields()[0], other.key)) {
this.key = data().deepCopy(fields()[0].schema(), other.key);
fieldSetFlags()[0] = true;
}
}
/** Gets the value of the 'key' field */
public java.lang.CharSequence getKey() {
return key;
}
/** Sets the value of the 'key' field */
public org.apache.camel.avro.generated.Key.Builder setKey(java.lang.CharSequence value) {
validate(fields()[0], value);
this.key = value;
fieldSetFlags()[0] = true;
return this;
}
/** Checks whether the 'key' field has been set */
public boolean hasKey() {
return fieldSetFlags()[0];
}
/** Clears the value of the 'key' field */
public org.apache.camel.avro.generated.Key.Builder clearKey() {
key = null;
fieldSetFlags()[0] = false;
return this;
}
@Override
public Key build() {
try {
Key record = new Key();
record.key = fieldSetFlags()[0] ? this.key : (java.lang.CharSequence) defaultValue(fields()[0]);
return record;
} catch (Exception e) {
throw new org.apache.avro.AvroRuntimeException(e);
}
}
}
}
|
Builder
|
java
|
apache__kafka
|
clients/src/test/java/org/apache/kafka/common/network/ServerConnectionIdTest.java
|
{
"start": 1277,
"end": 9830
}
|
class ____ {
@Test
public void testFromString() {
// Test valid connection id
String connectionIdString = "localhost:9092-localhost:9093-1-2";
Optional<ServerConnectionId> serverConnectionIdOptional = ServerConnectionId.fromString(connectionIdString);
assertTrue(serverConnectionIdOptional.isPresent());
ServerConnectionId serverConnectionId = serverConnectionIdOptional.get();
assertEquals("localhost", serverConnectionId.localHost());
assertEquals(9092, serverConnectionId.localPort());
assertEquals("localhost", serverConnectionId.remoteHost());
assertEquals(9093, serverConnectionId.remotePort());
assertEquals(1, serverConnectionId.processorId());
assertEquals(2, serverConnectionId.index());
connectionIdString = "localhost:9092-127.0.0.1:9093-0-0";
serverConnectionIdOptional = ServerConnectionId.fromString(connectionIdString);
assertTrue(serverConnectionIdOptional.isPresent());
serverConnectionId = serverConnectionIdOptional.get();
assertEquals("localhost", serverConnectionId.localHost());
assertEquals(9092, serverConnectionId.localPort());
assertEquals("127.0.0.1", serverConnectionId.remoteHost());
assertEquals(9093, serverConnectionId.remotePort());
assertEquals(0, serverConnectionId.processorId());
assertEquals(0, serverConnectionId.index());
// IPv6 endpoints
connectionIdString = "2001:db8:0:0:0:0:0:1:9092-127.0.0.1:9093-1-2";
serverConnectionIdOptional = ServerConnectionId.fromString(connectionIdString);
assertTrue(serverConnectionIdOptional.isPresent());
serverConnectionId = serverConnectionIdOptional.get();
assertEquals("2001:db8:0:0:0:0:0:1", serverConnectionId.localHost());
assertEquals(9092, serverConnectionId.localPort());
assertEquals("127.0.0.1", serverConnectionId.remoteHost());
assertEquals(9093, serverConnectionId.remotePort());
assertEquals(1, serverConnectionId.processorId());
assertEquals(2, serverConnectionId.index());
connectionIdString = "2002:db9:1:0:0:0:0:1:9092-2001:db8::1:9093-0-1";
serverConnectionIdOptional = ServerConnectionId.fromString(connectionIdString);
assertTrue(serverConnectionIdOptional.isPresent());
serverConnectionId = serverConnectionIdOptional.get();
assertEquals("2002:db9:1:0:0:0:0:1", serverConnectionId.localHost());
assertEquals(9092, serverConnectionId.localPort());
assertEquals("2001:db8::1", serverConnectionId.remoteHost());
assertEquals(9093, serverConnectionId.remotePort());
assertEquals(0, serverConnectionId.processorId());
assertEquals(1, serverConnectionId.index());
}
@Test
public void testFromStringInvalid() {
// Test invalid connection id params length
String connectionIdString = "localhost:9092-localhost:9093-1";
assertFalse(ServerConnectionId.fromString(connectionIdString).isPresent());
connectionIdString = "localhost:9092-localhost:9093-1-2-3";
assertFalse(ServerConnectionId.fromString(connectionIdString).isPresent());
// Invalid separator
connectionIdString = "localhost-9092-localhost:9093-1-2";
assertFalse(ServerConnectionId.fromString(connectionIdString).isPresent());
connectionIdString = "localhost:9092:localhost-9093-1-2";
assertFalse(ServerConnectionId.fromString(connectionIdString).isPresent());
// No separator in port
connectionIdString = "localhost9092-localhost:9093-1-2";
assertFalse(ServerConnectionId.fromString(connectionIdString).isPresent());
connectionIdString = "localhost:9092-localhost9093-1-2";
assertFalse(ServerConnectionId.fromString(connectionIdString).isPresent());
// Invalid port
connectionIdString = "localhost:abcd-localhost:9093-1-2";
assertFalse(ServerConnectionId.fromString(connectionIdString).isPresent());
connectionIdString = "localhost:9092-localhost:abcd-1-2";
assertFalse(ServerConnectionId.fromString(connectionIdString).isPresent());
// Invalid processorId
connectionIdString = "localhost:9092-localhost:9093-a-2";
assertFalse(ServerConnectionId.fromString(connectionIdString).isPresent());
// Invalid index
connectionIdString = "localhost:9092-localhost:9093-1-b";
assertFalse(ServerConnectionId.fromString(connectionIdString).isPresent());
// Invalid IPv6 address
connectionIdString = "[2001:db8:0:0:0:0:0:1]:9092-127.0.0.1:9093-1-2";
assertFalse(ServerConnectionId.fromString(connectionIdString).isPresent());
}
@Test
public void testGenerateConnectionId() throws IOException {
Socket socket = mock(Socket.class);
when(socket.getLocalAddress()).thenReturn(InetAddress.getByName("127.0.0.1"));
when(socket.getLocalPort()).thenReturn(9092);
when(socket.getInetAddress()).thenReturn(InetAddress.getByName("127.0.0.1"));
when(socket.getPort()).thenReturn(9093);
assertEquals("127.0.0.1:9092-127.0.0.1:9093-0-0", ServerConnectionId.generateConnectionId(socket, 0, 0));
assertEquals("127.0.0.1:9092-127.0.0.1:9093-1-2", ServerConnectionId.generateConnectionId(socket, 1, 2));
}
@Test
public void testGenerateConnectionIdIpV6() throws IOException {
Socket socket = mock(Socket.class);
// The test should pass when the address is enclosed in brackets for socket. As getHostAddress()
// returns the address without brackets.
when(socket.getLocalAddress()).thenReturn(InetAddress.getByName("[2001:db8::1]"));
when(socket.getLocalPort()).thenReturn(9092);
when(socket.getInetAddress()).thenReturn(InetAddress.getByName("127.0.0.1"));
when(socket.getPort()).thenReturn(9093);
assertEquals("2001:db8:0:0:0:0:0:1:9092-127.0.0.1:9093-1-2", ServerConnectionId.generateConnectionId(socket, 1, 2));
when(socket.getLocalAddress()).thenReturn(InetAddress.getByName("[2002:db9:1::1]"));
when(socket.getLocalPort()).thenReturn(9092);
when(socket.getInetAddress()).thenReturn(InetAddress.getByName("[2001:db8::1]"));
when(socket.getPort()).thenReturn(9093);
assertEquals("2002:db9:1:0:0:0:0:1:9092-2001:db8:0:0:0:0:0:1:9093-1-2", ServerConnectionId.generateConnectionId(socket, 1, 2));
// Without brackets
when(socket.getLocalAddress()).thenReturn(InetAddress.getByName("2002:db9:1::1"));
when(socket.getLocalPort()).thenReturn(9092);
when(socket.getInetAddress()).thenReturn(InetAddress.getByName("2001:db8::1"));
when(socket.getPort()).thenReturn(9093);
assertEquals("2002:db9:1:0:0:0:0:1:9092-2001:db8:0:0:0:0:0:1:9093-1-2", ServerConnectionId.generateConnectionId(socket, 1, 2));
}
@Test
public void testParseHostPort() {
Optional<Map.Entry<String, Integer>> hostPortEntry = ServerConnectionId.parseHostPort("myhost:9092");
assertTrue(hostPortEntry.isPresent());
assertEquals("myhost", hostPortEntry.get().getKey());
assertEquals(9092, hostPortEntry.get().getValue());
hostPortEntry = ServerConnectionId.parseHostPort("127.0.0.1:9092");
assertTrue(hostPortEntry.isPresent());
assertEquals("127.0.0.1", hostPortEntry.get().getKey());
assertEquals(9092, hostPortEntry.get().getValue());
// IPv6 endpoint
hostPortEntry = ServerConnectionId.parseHostPort("2001:db8::1:9092");
assertTrue(hostPortEntry.isPresent());
assertEquals("2001:db8::1", hostPortEntry.get().getKey());
assertEquals(9092, hostPortEntry.get().getValue());
}
@Test
public void testParseHostPortInvalid() {
// Invalid separator
Optional<Map.Entry<String, Integer>> hostPortEntry = ServerConnectionId.parseHostPort("myhost-9092");
assertFalse(hostPortEntry.isPresent());
// No separator
hostPortEntry = ServerConnectionId.parseHostPort("myhost9092");
assertFalse(hostPortEntry.isPresent());
// Invalid port
hostPortEntry = ServerConnectionId.parseHostPort("myhost:abcd");
assertFalse(hostPortEntry.isPresent());
// Invalid IPv6 endpoint
hostPortEntry = ServerConnectionId.parseHostPort("[2001:db8::1]:9092");
assertFalse(hostPortEntry.isPresent());
}
}
|
ServerConnectionIdTest
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/configurers/PermitAllSupportTests.java
|
{
"start": 4265,
"end": 4701
}
|
class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.authorizeHttpRequests((requests) -> requests
.anyRequest().authenticated())
.formLogin((login) -> login
.loginPage("/xyz").permitAll()
.loginProcessingUrl("/abc?def").permitAll());
return http.build();
// @formatter:on
}
}
@Configuration
@EnableWebSecurity
static
|
PermitAllConfig
|
java
|
spring-projects__spring-framework
|
spring-test/src/main/java/org/springframework/test/web/reactive/server/DefaultWebTestClient.java
|
{
"start": 24492,
"end": 24993
}
|
class ____ {
static Configuration getConfiguration(@Nullable JsonEncoderDecoder jsonEncoderDecoder) {
Configuration jsonPathConfiguration = Configuration.defaultConfiguration();
if (jsonEncoderDecoder != null) {
MappingProvider mappingProvider = new EncoderDecoderMappingProvider(
jsonEncoderDecoder.encoder(), jsonEncoderDecoder.decoder());
return jsonPathConfiguration.mappingProvider(mappingProvider);
}
return jsonPathConfiguration;
}
}
}
|
JsonPathConfigurationProvider
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/index/mapper/LongScriptMapperTests.java
|
{
"start": 799,
"end": 3224
}
|
class ____ extends MapperScriptTestCase<LongFieldScript.Factory> {
private static LongFieldScript.Factory factory(Consumer<LongFieldScript> executor) {
return new LongFieldScript.Factory() {
@Override
public LongFieldScript.LeafFactory newFactory(
String fieldName,
Map<String, Object> params,
SearchLookup searchLookup,
OnScriptError onScriptError
) {
return new LongFieldScript.LeafFactory() {
@Override
public LongFieldScript newInstance(LeafReaderContext ctx) {
return new LongFieldScript(fieldName, params, searchLookup, OnScriptError.FAIL, ctx) {
@Override
public void execute() {
executor.accept(this);
}
};
}
};
}
};
}
@Override
protected String type() {
return NumberFieldMapper.NumberType.LONG.typeName();
}
@Override
protected LongFieldScript.Factory serializableScript() {
return factory(s -> {});
}
@Override
protected LongFieldScript.Factory errorThrowingScript() {
return factory(s -> { throw new UnsupportedOperationException("Oops"); });
}
@Override
protected LongFieldScript.Factory singleValueScript() {
return factory(s -> s.emit(4));
}
@Override
protected LongFieldScript.Factory multipleValuesScript() {
return factory(s -> {
s.emit(1);
s.emit(2);
});
}
@Override
protected void assertMultipleValues(List<IndexableField> fields) {
assertEquals(2, fields.size());
assertEquals("LongField <field:1>", fields.get(0).toString());
assertEquals("LongField <field:2>", fields.get(1).toString());
}
@Override
protected void assertDocValuesDisabled(List<IndexableField> fields) {
assertEquals(1, fields.size());
assertEquals("LongPoint <field:4>", fields.get(0).toString());
}
@Override
protected void assertIndexDisabled(List<IndexableField> fields) {
assertEquals(1, fields.size());
assertEquals("docValuesType=SORTED_NUMERIC<field:4>", fields.get(0).toString());
}
}
|
LongScriptMapperTests
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/TempDirectoryTests.java
|
{
"start": 33593,
"end": 37407
}
|
class ____ {
@Test
void makeEmptyTempDirectoryNonReadable(@TempDir Path tempDir) {
tempDir.toFile().setReadable(false);
}
@Test
void makeTempDirectoryWithFileNonReadable(@TempDir Path tempDir) throws IOException {
Files.createFile(tempDir.resolve("test-file.txt"));
tempDir.toFile().setReadable(false);
}
@Test
void makeTempDirectoryWithEmptyFolderNonReadable(@TempDir Path tempDir) throws IOException {
Files.createDirectory(tempDir.resolve("test-sub-dir"));
tempDir.toFile().setReadable(false);
}
@Test
void makeTempDirectoryWithNonWritableFileNonReadable(@TempDir Path tempDir) throws IOException {
Files.createFile(tempDir.resolve("test-file.txt")).toFile().setWritable(false);
tempDir.toFile().setReadable(false);
}
@Test
void makeTempDirectoryWithNonReadableFileNonReadable(@TempDir Path tempDir) throws IOException {
Files.createFile(tempDir.resolve("test-file.txt")).toFile().setReadable(false);
tempDir.toFile().setReadable(false);
}
@Test
void makeTempDirectoryWithNonWritableFolderNonReadable(@TempDir Path tempDir) throws IOException {
Files.createDirectory(tempDir.resolve("test-sub-dir")).toFile().setWritable(false);
tempDir.toFile().setReadable(false);
}
@Test
void makeTempDirectoryWithNonReadableFolderNonReadable(@TempDir Path tempDir) throws IOException {
Files.createDirectory(tempDir.resolve("test-sub-dir")).toFile().setReadable(false);
tempDir.toFile().setReadable(false);
}
@Test
void makeTempDirectoryWithNonExecutableFolderNonReadable(@TempDir Path tempDir) throws IOException {
Files.createDirectory(tempDir.resolve("test-sub-dir")).toFile().setExecutable(false);
tempDir.toFile().setReadable(false);
}
@Test
void makeTempDirectoryWithNonEmptyNonWritableFolderNonReadable(@TempDir Path tempDir) throws IOException {
Path subDir = Files.createDirectory(tempDir.resolve("test-sub-dir"));
Files.createFile(tempDir.resolve("test-sub-dir/test-file.txt"));
subDir.toFile().setWritable(false);
tempDir.toFile().setReadable(false);
}
@Test
void makeTempDirectoryWithNonEmptyNonReadableFolderNonReadable(@TempDir Path tempDir) throws IOException {
Path subDir = Files.createDirectory(tempDir.resolve("test-sub-dir"));
Files.createFile(tempDir.resolve("test-sub-dir/test-file.txt"));
subDir.toFile().setReadable(false);
tempDir.toFile().setReadable(false);
}
@Test
void makeTempDirectoryWithNonEmptyNonExecutableFolderNonReadable(@TempDir Path tempDir) throws IOException {
Path subDir = Files.createDirectory(tempDir.resolve("test-sub-dir"));
Files.createFile(tempDir.resolve("test-sub-dir/test-file.txt"));
subDir.toFile().setExecutable(false);
tempDir.toFile().setReadable(false);
}
@Test
void makeTempDirectoryWithNonEmptyFolderNonReadable(@TempDir Path tempDir) throws IOException {
Files.createDirectory(tempDir.resolve("test-sub-dir"));
Files.createFile(tempDir.resolve("test-sub-dir/test-file.txt"));
tempDir.toFile().setReadable(false);
}
@Test
void makeTempDirectoryWithNonEmptyFolderContainingNonWritableFileNonReadable(@TempDir Path tempDir)
throws IOException {
Files.createDirectory(tempDir.resolve("test-sub-dir"));
Files.createFile(tempDir.resolve("test-sub-dir/test-file.txt")).toFile().setWritable(false);
tempDir.toFile().setReadable(false);
}
@Test
void makeTempDirectoryWithNonEmptyFolderContainingNonReadableFileNonReadable(@TempDir Path tempDir)
throws IOException {
Files.createDirectory(tempDir.resolve("test-sub-dir"));
Files.createFile(tempDir.resolve("test-sub-dir/test-file.txt")).toFile().setReadable(false);
tempDir.toFile().setReadable(false);
}
}
@Nested
|
NonReadable
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
|
{
"start": 3064,
"end": 69417
}
|
class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(TestSnapshotDiffReport.class);
{
SnapshotTestHelper.disableLogs();
}
private static final long SEED = 0;
private static final short REPLICATION = 3;
private static final short REPLICATION_1 = 2;
private static final long BLOCKSIZE = 1024;
private static final long BUFFERLEN = BLOCKSIZE / 2;
private static final long FILELEN = BLOCKSIZE * 2;
private final Path dir = new Path("/TestSnapshot");
private final Path sub1 = new Path(dir, "sub1");
private Configuration conf;
private MiniDFSCluster cluster;
private DistributedFileSystem hdfs;
private final HashMap<Path, Integer> snapshotNumberMap = new HashMap<Path, Integer>();
@BeforeEach
public void setUp() throws Exception {
conf = new Configuration();
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES, true);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1);
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_SKIP_CAPTURE_ACCESSTIME_ONLY_CHANGE,
true);
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DIFF_ALLOW_SNAP_ROOT_DESCENDANT,
true);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DIFF_LISTING_LIMIT, 3);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.format(true).build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
}
@AfterEach
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
private String genSnapshotName(Path snapshotDir) {
int sNum = -1;
if (snapshotNumberMap.containsKey(snapshotDir)) {
sNum = snapshotNumberMap.get(snapshotDir);
}
snapshotNumberMap.put(snapshotDir, ++sNum);
return "s" + sNum;
}
void modifyAndCreateSnapshot(Path modifyDir, Path[] snapshotDirs)
throws Exception {
modifyAndCreateSnapshot(modifyDir, snapshotDirs, hdfs, this::genSnapshotName);
}
/**
* Create/modify/delete files under a given directory, also create snapshots
* of directories.
*/
static void modifyAndCreateSnapshot(Path modifyDir, Path[] snapshotDirs,
DistributedFileSystem hdfs, Function<Path, String> getSnapshotName)
throws Exception {
Path file10 = new Path(modifyDir, "file10");
Path file11 = new Path(modifyDir, "file11");
Path file12 = new Path(modifyDir, "file12");
Path file13 = new Path(modifyDir, "file13");
Path link13 = new Path(modifyDir, "link13");
Path file14 = new Path(modifyDir, "file14");
Path file15 = new Path(modifyDir, "file15");
DFSTestUtil.createFile(hdfs, file10, BLOCKSIZE, REPLICATION_1, SEED);
DFSTestUtil.createFile(hdfs, file11, BLOCKSIZE, REPLICATION_1, SEED);
DFSTestUtil.createFile(hdfs, file12, BLOCKSIZE, REPLICATION_1, SEED);
DFSTestUtil.createFile(hdfs, file13, BLOCKSIZE, REPLICATION_1, SEED);
// create link13
hdfs.createSymlink(file13, link13, false);
// create snapshot
for (Path snapshotDir : snapshotDirs) {
hdfs.allowSnapshot(snapshotDir);
hdfs.createSnapshot(snapshotDir, getSnapshotName.apply(snapshotDir));
}
// delete file11
hdfs.delete(file11, true);
// modify file12
hdfs.setReplication(file12, REPLICATION);
// modify file13
hdfs.setReplication(file13, REPLICATION);
// delete link13
hdfs.delete(link13, false);
// create file14
DFSTestUtil.createFile(hdfs, file14, BLOCKSIZE, REPLICATION, SEED);
// create file15
DFSTestUtil.createFile(hdfs, file15, BLOCKSIZE, REPLICATION, SEED);
// create snapshot
for (Path snapshotDir : snapshotDirs) {
hdfs.createSnapshot(snapshotDir, getSnapshotName.apply(snapshotDir));
}
// create file11 again
DFSTestUtil.createFile(hdfs, file11, BLOCKSIZE, REPLICATION, SEED);
// delete file12
hdfs.delete(file12, true);
// modify file13
hdfs.setReplication(file13, (short) (REPLICATION - 2));
// create link13 again
hdfs.createSymlink(file13, link13, false);
// delete file14
hdfs.delete(file14, true);
// modify file15
hdfs.setReplication(file15, (short) (REPLICATION - 1));
// create snapshot
for (Path snapshotDir : snapshotDirs) {
hdfs.createSnapshot(snapshotDir, getSnapshotName.apply(snapshotDir));
}
// modify file10
hdfs.setReplication(file10, (short) (REPLICATION + 1));
}
/**
* Check the correctness of the diff reports.
*/
private void verifyDiffReport(Path dir, String from, String to,
DiffReportEntry... entries) throws IOException {
DFSTestUtil.verifySnapshotDiffReport(hdfs, dir, from, to, entries);
}
/**
* Test the computation and representation of diff between snapshots.
*/
@Test
@Timeout(value = 60)
public void testDiffReport() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
Path subsub1 = new Path(sub1, "subsub1");
Path subsubsub1 = new Path(subsub1, "subsubsub1");
hdfs.mkdirs(subsubsub1);
modifyAndCreateSnapshot(sub1, new Path[]{sub1, subsubsub1});
modifyAndCreateSnapshot(subsubsub1, new Path[]{sub1, subsubsub1});
final String invalidName = "invalid";
try {
hdfs.getSnapshotDiffReport(sub1, invalidName, invalidName);
fail("Expect exception when providing invalid snapshot name " +
"for diff report");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(
"Cannot find the snapshot of directory " + sub1 + " with name "
+ invalidName, e);
}
// diff between the same snapshot
SnapshotDiffReport report = hdfs.getSnapshotDiffReport(sub1, "s0", "s0");
LOG.info(report.toString());
assertEquals(0, report.getDiffList().size());
report = hdfs.getSnapshotDiffReport(sub1, "", "");
LOG.info(report.toString());
assertEquals(0, report.getDiffList().size());
try {
report = hdfs.getSnapshotDiffReport(subsubsub1, null, "s2");
fail("Expect exception when providing null fromSnapshot ");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("null fromSnapshot", e);
}
report = hdfs.getSnapshotDiffReport(subsubsub1, "s0", "s2");
LOG.info(report.toString());
assertEquals(0, report.getDiffList().size());
// test path with scheme also works
report = hdfs.getSnapshotDiffReport(hdfs.makeQualified(subsubsub1),
"s0", "s2");
LOG.info(report.toString());
assertEquals(0, report.getDiffList().size());
verifyDiffReport(sub1, "s0", "s2",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file15")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file12")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file11")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file11")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file13")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("link13")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("link13")));
verifyDiffReport(sub1, "s0", "s5",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file15")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file12")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file10")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file11")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file11")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file13")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("link13")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("link13")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1/subsubsub1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file10")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file15")));
verifyDiffReport(sub1, "s2", "s5",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file10")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1/subsubsub1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file10")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file15")));
verifyDiffReport(sub1, "s3", "",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1/subsubsub1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file15")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file12")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1/subsubsub1/file10")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("subsub1/subsubsub1/link13")));
}
@Test
@Timeout(value = 60)
public void testSnapRootDescendantDiffReport() throws Exception {
Path subSub = new Path(sub1, "subsub1");
Path subSubSub = new Path(subSub, "subsubsub1");
Path nonSnapDir = new Path(dir, "non_snap");
hdfs.mkdirs(subSubSub);
hdfs.mkdirs(nonSnapDir);
modifyAndCreateSnapshot(sub1, new Path[]{sub1});
modifyAndCreateSnapshot(subSub, new Path[]{sub1});
modifyAndCreateSnapshot(subSubSub, new Path[]{sub1});
try {
hdfs.getSnapshotDiffReport(subSub, "s1", "s2");
hdfs.getSnapshotDiffReport(subSubSub, "s1", "s2");
} catch (IOException e) {
fail("Unexpected exception when getting snapshot diff report " +
subSub + ": " + e);
}
try {
hdfs.getSnapshotDiffReport(nonSnapDir, "s1", "s2");
fail("Snapshot diff report on a non snapshot directory '"
+ nonSnapDir.getName() + "'should fail!");
} catch (SnapshotException e) {
GenericTestUtils.assertExceptionContains(
"The path " + nonSnapDir +
" is neither snapshottable nor under a snapshot root!", e);
}
final String invalidName = "invalid";
try {
hdfs.getSnapshotDiffReport(subSub, invalidName, invalidName);
fail("Expect exception when providing invalid snapshot name " +
"for diff report");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(
"Cannot find the snapshot of directory " + sub1 + " with name "
+ invalidName, e);
}
// diff between the same snapshot
SnapshotDiffReport report = hdfs.getSnapshotDiffReport(subSub, "s0", "s0");
assertEquals(0, report.getDiffList().size());
report = hdfs.getSnapshotDiffReport(subSub, "", "");
assertEquals(0, report.getDiffList().size());
report = hdfs.getSnapshotDiffReport(subSubSub, "s0", "s2");
assertEquals(0, report.getDiffList().size());
report = hdfs.getSnapshotDiffReport(
hdfs.makeQualified(subSubSub), "s0", "s2");
assertEquals(0, report.getDiffList().size());
verifyDescendantDiffReports(sub1, subSub, subSubSub);
}
private void verifyDescendantDiffReports(final Path snapDir,
final Path snapSubDir, final Path snapSubSubDir) throws
IOException {
verifyDiffReport(snapDir, "s0", "s2",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file15")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file12")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file11")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file11")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file13")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("link13")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("link13")));
verifyDiffReport(snapSubDir, "s0", "s2", new DiffReportEntry[]{});
verifyDiffReport(snapSubSubDir, "s0", "s2", new DiffReportEntry[]{});
verifyDiffReport(snapDir, "s0", "s8",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file15")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file12")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file10")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file11")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file11")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file13")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("link13")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("link13")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/file10")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/link13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/file15")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1/subsubsub1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file10")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file15")));
verifyDiffReport(snapSubDir, "s0", "s8",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file10")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("link13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file15")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsubsub1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsubsub1/file10")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsubsub1/file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsubsub1/file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsubsub1/link13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsubsub1/file15")));
verifyDiffReport(snapSubSubDir, "s0", "s8",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file10")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("link13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file15")));
verifyDiffReport(snapDir, "s2", "s5",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file10")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/file10")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/link13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/file15")));
verifyDiffReport(snapSubDir, "s2", "s5",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file10")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("link13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file15")));
verifyDiffReport(snapSubSubDir, "s2", "s5",
new DiffReportEntry[]{});
verifyDiffReport(snapDir, "s3", "",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/file15")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("subsub1/file12")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1/file10")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("subsub1/file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/file11")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1/file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/link13")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("subsub1/link13")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1/subsubsub1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file10")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file15")));
verifyDiffReport(snapSubDir, "s3", "",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file15")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("file12")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("file10")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file11")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("link13")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("link13")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsubsub1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsubsub1/file10")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsubsub1/file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsubsub1/file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsubsub1/link13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsubsub1/file15")));
verifyDiffReport(snapSubSubDir, "s3", "",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file10")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("link13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file15")));
}
@Test
public void testSnapRootDescendantDiffReportWithRename() throws Exception {
Path subSub = new Path(sub1, "subsub1");
Path subSubSub = new Path(subSub, "subsubsub1");
Path nonSnapDir = new Path(dir, "non_snap");
hdfs.mkdirs(subSubSub);
hdfs.mkdirs(nonSnapDir);
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1, genSnapshotName(sub1));
Path file20 = new Path(subSubSub, "file20");
DFSTestUtil.createFile(hdfs, file20, BLOCKSIZE, REPLICATION_1, SEED);
hdfs.createSnapshot(sub1, genSnapshotName(sub1));
// Case 1: Move a file away from a descendant dir, but within the snap root.
// mv <snaproot>/<subsub>/<subsubsub>/file20 <snaproot>/<subsub>/file20
hdfs.rename(file20, new Path(subSub, file20.getName()));
hdfs.createSnapshot(sub1, genSnapshotName(sub1));
// The snapshot diff for the snap root detects the change as file rename
// as the file move happened within the snap root.
verifyDiffReport(sub1, "s1", "s2",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1/subsubsub1")),
new DiffReportEntry(DiffType.RENAME,
DFSUtil.string2Bytes("subsub1/subsubsub1/file20"),
DFSUtil.string2Bytes("subsub1/file20")));
// The snapshot diff for the descendant dir <subsub> still detects the
// change as file rename as the file move happened under the snap root
// descendant dir.
verifyDiffReport(subSub, "s1", "s2",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsubsub1")),
new DiffReportEntry(DiffType.RENAME,
DFSUtil.string2Bytes("subsubsub1/file20"),
DFSUtil.string2Bytes("file20")));
// The snapshot diff for the descendant dir <subsubsub> detects the
// change as file delete as the file got moved from its scope.
verifyDiffReport(subSubSub, "s1", "s2",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("file20")));
// Case 2: Move the file from the snap root descendant dir to any
// non snap root dir. mv <snaproot>/<subsub>/file20 <nonsnaproot>/file20.
hdfs.rename(new Path(subSub, file20.getName()),
new Path(dir, file20.getName()));
hdfs.createSnapshot(sub1, genSnapshotName(sub1));
// The snapshot diff for the snap root detects the change as file delete
// as the file got moved away from the snap root dir to some non snap
// root dir.
verifyDiffReport(sub1, "s2", "s3",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("subsub1/file20")));
// The snapshot diff for the snap root descendant <subsub> detects the
// change as file delete as the file was previously under its scope and
// got moved away from its scope.
verifyDiffReport(subSub, "s2", "s3",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("file20")));
// The file was already not under the descendant dir <subsubsub> scope.
// So, the snapshot diff report for the descendant dir doesn't
// show the file rename at all.
verifyDiffReport(subSubSub, "s2", "s3",
new DiffReportEntry[]{});
// Case 3: Move the file from the non-snap root dir to snap root dir
// mv <nonsnaproot>/file20 <snaproot>/file20
hdfs.rename(new Path(dir, file20.getName()),
new Path(sub1, file20.getName()));
hdfs.createSnapshot(sub1, genSnapshotName(sub1));
// Snap root directory should show the file moved in as a new file.
verifyDiffReport(sub1, "s3", "s4",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file20")));
// Snap descendant directories don't have visibility to the moved in file.
verifyDiffReport(subSub, "s3", "s4",
new DiffReportEntry[]{});
verifyDiffReport(subSubSub, "s3", "s4",
new DiffReportEntry[]{});
hdfs.rename(new Path(sub1, file20.getName()),
new Path(subSub, file20.getName()));
hdfs.createSnapshot(sub1, genSnapshotName(sub1));
// Snap root directory now shows the rename as both source and
// destination paths are under the snap root.
verifyDiffReport(sub1, "s4", "s5",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.RENAME,
DFSUtil.string2Bytes("file20"),
DFSUtil.string2Bytes("subsub1/file20")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1")));
// For the descendant directory under the snap root, the file
// moved in shows up as a new file created.
verifyDiffReport(subSub, "s4", "s5",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("file20")));
verifyDiffReport(subSubSub, "s4", "s5",
new DiffReportEntry[]{});
// Case 4: Snapshot diff for the newly created descendant directory.
Path subSubSub2 = new Path(subSub, "subsubsub2");
hdfs.mkdirs(subSubSub2);
Path file30 = new Path(subSubSub2, "file30");
DFSTestUtil.createFile(hdfs, file30, BLOCKSIZE, REPLICATION_1, SEED);
hdfs.createFile(file30);
hdfs.createSnapshot(sub1, genSnapshotName(sub1));
verifyDiffReport(sub1, "s5", "s6",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub2")));
verifyDiffReport(subSubSub2, "s5", "s6",
new DiffReportEntry[]{});
verifyDiffReport(subSubSub2, "s1", "s2",
new DiffReportEntry[]{});
}
@Test
public void testSnapshotDiffInfo() throws Exception {
Path snapshotRootDirPath = dir;
Path snapshotDirDescendantPath = new Path(snapshotRootDirPath, "desc");
Path snapshotDirNonDescendantPath = new Path("/dummy/non/snap/desc");
hdfs.mkdirs(snapshotDirDescendantPath);
hdfs.mkdirs(snapshotDirNonDescendantPath);
hdfs.allowSnapshot(snapshotRootDirPath);
hdfs.createSnapshot(snapshotRootDirPath, "s0");
hdfs.createSnapshot(snapshotRootDirPath, "s1");
INodeDirectory snapshotRootDir = cluster.getNameNode()
.getNamesystem().getFSDirectory().getINode(
snapshotRootDirPath.toUri().getPath())
.asDirectory();
INodeDirectory snapshotRootDescendantDir = cluster.getNameNode()
.getNamesystem().getFSDirectory().getINode(
snapshotDirDescendantPath.toUri().getPath())
.asDirectory();
INodeDirectory snapshotRootNonDescendantDir = cluster.getNameNode()
.getNamesystem().getFSDirectory().getINode(
snapshotDirNonDescendantPath.toUri().getPath())
.asDirectory();
try {
SnapshotDiffInfo sdi = new SnapshotDiffInfo(
snapshotRootDir,
snapshotRootDescendantDir,
new Snapshot(0, "s0", snapshotRootDescendantDir),
new Snapshot(0, "s1", snapshotRootDescendantDir));
LOG.info("SnapshotDiffInfo: " + sdi.getFrom() + " - " + sdi.getTo());
} catch (IllegalArgumentException iae){
fail("Unexpected exception when constructing SnapshotDiffInfo: " + iae);
}
try {
SnapshotDiffInfo sdi = new SnapshotDiffInfo(
snapshotRootDir,
snapshotRootNonDescendantDir,
new Snapshot(0, "s0", snapshotRootNonDescendantDir),
new Snapshot(0, "s1", snapshotRootNonDescendantDir));
LOG.info("SnapshotDiffInfo: " + sdi.getFrom() + " - " + sdi.getTo());
fail("SnapshotDiffInfo construction should fail for non snapshot root " +
"or non snapshot root descendant directories!");
} catch (IllegalArgumentException iae) {
// expected exception
}
}
/**
* Make changes under a sub-directory, then delete the sub-directory. Make
* sure the diff report computation correctly retrieve the diff from the
* deleted sub-directory.
*/
@Test
@Timeout(value = 60)
public void testDiffReport2() throws Exception {
Path subsub1 = new Path(sub1, "subsub1");
Path subsubsub1 = new Path(subsub1, "subsubsub1");
hdfs.mkdirs(subsubsub1);
modifyAndCreateSnapshot(subsubsub1, new Path[]{sub1});
// delete subsub1
hdfs.delete(subsub1, true);
// check diff report between s0 and s2
verifyDiffReport(sub1, "s0", "s2",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1/subsubsub1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file15")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file12")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("subsub1/subsubsub1/link13")));
// check diff report between s0 and the current status
verifyDiffReport(sub1, "s0", "",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("subsub1")));
}
@Test
public void testDiffReportWithQuota() throws Exception {
final Path testdir = new Path(sub1, "testdir1");
hdfs.mkdirs(testdir);
hdfs.allowSnapshot(testdir);
// Set quota BEFORE creating the snapshot
hdfs.setQuota(testdir, 10, 10);
hdfs.createSnapshot(testdir, "s0");
final SnapshotDiffReport report =
hdfs.getSnapshotDiffReport(testdir, "s0", "");
// The diff should be null. Snapshot dir inode should keep the quota.
assertEquals(0, report.getDiffList().size());
// Cleanup
hdfs.deleteSnapshot(testdir, "s0");
hdfs.disallowSnapshot(testdir);
hdfs.delete(testdir, true);
}
/**
* Rename a directory to its prior descendant, and verify the diff report.
*/
@Test
public void testDiffReportWithRename() throws Exception {
final Path root = new Path("/");
final Path sdir1 = new Path(root, "dir1");
final Path sdir2 = new Path(root, "dir2");
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
hdfs.mkdirs(bar);
hdfs.mkdirs(sdir2);
// create snapshot on root
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
// /dir1/foo/bar -> /dir2/bar
final Path bar2 = new Path(sdir2, "bar");
hdfs.rename(bar, bar2);
// /dir1/foo -> /dir2/bar/foo
final Path foo2 = new Path(bar2, "foo");
hdfs.rename(foo, foo2);
SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
// let's delete /dir2 to make things more complicated
hdfs.delete(sdir2, true);
verifyDiffReport(root, "s1", "s2",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir1/foo"),
DFSUtil.string2Bytes("dir2/bar/foo")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("dir1/foo/bar")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
new DiffReportEntry(DiffType.RENAME, DFSUtil
.string2Bytes("dir1/foo/bar"), DFSUtil.string2Bytes("dir2/bar")));
}
/**
* Rename a file/dir outside of the snapshottable dir should be reported as
* deleted. Rename a file/dir from outside should be reported as created.
*/
@Test
public void testDiffReportWithRenameOutside() throws Exception {
final Path root = new Path("/");
final Path dir1 = new Path(root, "dir1");
final Path dir2 = new Path(root, "dir2");
final Path foo = new Path(dir1, "foo");
final Path fileInFoo = new Path(foo, "file");
final Path bar = new Path(dir2, "bar");
final Path fileInBar = new Path(bar, "file");
DFSTestUtil.createFile(hdfs, fileInFoo, BLOCKSIZE, REPLICATION, SEED);
DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPLICATION, SEED);
// create snapshot on /dir1
SnapshotTestHelper.createSnapshot(hdfs, dir1, "s0");
// move bar into dir1
final Path newBar = new Path(dir1, "newBar");
hdfs.rename(bar, newBar);
// move foo out of dir1 into dir2
final Path newFoo = new Path(dir2, "new");
hdfs.rename(foo, newFoo);
SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
verifyDiffReport(dir1, "s0", "s1",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes(newBar
.getName())),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes(foo.getName())));
}
/**
* Renaming a file/dir then delete the ancestor dir of the rename target
* should be reported as deleted.
*/
@Test
public void testDiffReportWithRenameAndDelete() throws Exception {
final Path root = new Path("/");
final Path dir1 = new Path(root, "dir1");
final Path dir2 = new Path(root, "dir2");
final Path foo = new Path(dir1, "foo");
final Path fileInFoo = new Path(foo, "file");
final Path bar = new Path(dir2, "bar");
final Path fileInBar = new Path(bar, "file");
DFSTestUtil.createFile(hdfs, fileInFoo, BLOCKSIZE, REPLICATION, SEED);
DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPLICATION, SEED);
SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
hdfs.rename(fileInFoo, fileInBar, Rename.OVERWRITE);
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
verifyDiffReport(root, "s0", "s1",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2/bar")),
new DiffReportEntry(DiffType.DELETE, DFSUtil
.string2Bytes("dir2/bar/file")),
new DiffReportEntry(DiffType.RENAME,
DFSUtil.string2Bytes("dir1/foo/file"),
DFSUtil.string2Bytes("dir2/bar/file")));
// delete bar
hdfs.delete(bar, true);
SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
verifyDiffReport(root, "s0", "s2",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("dir2/bar")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("dir1/foo/file")));
}
@Test
public void testDiffReportWithRenameToNewDir() throws Exception {
final Path root = new Path("/");
final Path foo = new Path(root, "foo");
final Path fileInFoo = new Path(foo, "file");
DFSTestUtil.createFile(hdfs, fileInFoo, BLOCKSIZE, REPLICATION, SEED);
SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
final Path bar = new Path(root, "bar");
hdfs.mkdirs(bar);
final Path fileInBar = new Path(bar, "file");
hdfs.rename(fileInFoo, fileInBar);
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
verifyDiffReport(root, "s0", "s1",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("foo")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("bar")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("foo/file"),
DFSUtil.string2Bytes("bar/file")));
}
/**
* Rename a file and then append some data to it
*/
@Test
public void testDiffReportWithRenameAndAppend() throws Exception {
final Path root = new Path("/");
final Path foo = new Path(root, "foo");
DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPLICATION, SEED);
SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
final Path bar = new Path(root, "bar");
hdfs.rename(foo, bar);
DFSTestUtil.appendFile(hdfs, bar, 10); // append 10 bytes
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
// we always put modification on the file before rename
verifyDiffReport(root, "s0", "s1",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("foo")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("foo"),
DFSUtil.string2Bytes("bar")));
}
/**
* Nested renamed dir/file and the withNameList in the WithCount node of the
* parental directory is empty due to snapshot deletion. See HDFS-6996 for
* details.
*/
@Test
public void testDiffReportWithRenameAndSnapshotDeletion() throws Exception {
final Path root = new Path("/");
final Path foo = new Path(root, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, SEED);
SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
// rename /foo to /foo2
final Path foo2 = new Path(root, "foo2");
hdfs.rename(foo, foo2);
// now /foo/bar becomes /foo2/bar
final Path bar2 = new Path(foo2, "bar");
// delete snapshot s0 so that the withNameList inside of the WithCount node
// of foo becomes empty
hdfs.deleteSnapshot(root, "s0");
// create snapshot s1 and rename bar again
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
final Path bar3 = new Path(foo2, "bar-new");
hdfs.rename(bar2, bar3);
// we always put modification on the file before rename
verifyDiffReport(root, "s1", "",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("foo2")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("foo2/bar"),
DFSUtil.string2Bytes("foo2/bar-new")));
}
private void createFile(final Path filePath) throws IOException {
DFSTestUtil.createFile(hdfs, filePath, (int) BUFFERLEN,
FILELEN, BLOCKSIZE, REPLICATION, SEED);
}
private int writeToStream(final FSDataOutputStream outputStream,
byte[] buf) throws IOException {
outputStream.write(buf);
((HdfsDataOutputStream)outputStream).hsync(
EnumSet.of(SyncFlag.UPDATE_LENGTH));
return buf.length;
}
private void restartNameNode() throws Exception {
cluster.triggerBlockReports();
NameNode nameNode = cluster.getNameNode();
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
NameNodeAdapter.leaveSafeMode(nameNode);
cluster.restartNameNode(true);
}
/**
* Test Snapshot diff report for snapshots with open files captures in them.
* Also verify if the diff report remains the same across NameNode restarts.
*/
@Test
@Timeout(value = 120)
public void testDiffReportWithOpenFiles() throws Exception {
// Construct the directory tree
final Path level0A = new Path("/level_0_A");
final Path flumeSnapRootDir = level0A;
final String flumeFileName = "flume.log";
final String flumeSnap1Name = "flume_snap_1";
final String flumeSnap2Name = "flume_snap_2";
// Create files and open a stream
final Path flumeFile = new Path(level0A, flumeFileName);
createFile(flumeFile);
FSDataOutputStream flumeOutputStream = hdfs.append(flumeFile);
// Create Snapshot S1
final Path flumeS1Dir = SnapshotTestHelper.createSnapshot(
hdfs, flumeSnapRootDir, flumeSnap1Name);
final Path flumeS1Path = new Path(flumeS1Dir, flumeFileName);
final long flumeFileLengthAfterS1 = hdfs.getFileStatus(flumeFile).getLen();
// Verify if Snap S1 file length is same as the the live one
assertEquals(flumeFileLengthAfterS1, hdfs.getFileStatus(flumeS1Path).getLen());
verifyDiffReport(level0A, flumeSnap1Name, "",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")));
long flumeFileWrittenDataLength = flumeFileLengthAfterS1;
int newWriteLength = (int) (BLOCKSIZE * 1.5);
byte[] buf = new byte[newWriteLength];
Random random = new Random();
random.nextBytes(buf);
// Write more data to flume file
flumeFileWrittenDataLength += writeToStream(flumeOutputStream, buf);
// Create Snapshot S2
final Path flumeS2Dir = SnapshotTestHelper.createSnapshot(
hdfs, flumeSnapRootDir, flumeSnap2Name);
final Path flumeS2Path = new Path(flumeS2Dir, flumeFileName);
// Verify live files length is same as all data written till now
final long flumeFileLengthAfterS2 = hdfs.getFileStatus(flumeFile).getLen();
assertEquals(flumeFileWrittenDataLength, flumeFileLengthAfterS2);
// Verify if Snap S2 file length is same as the live one
assertEquals(flumeFileLengthAfterS2, hdfs.getFileStatus(flumeS2Path).getLen());
verifyDiffReport(level0A, flumeSnap1Name, "",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes(flumeFileName)));
verifyDiffReport(level0A, flumeSnap2Name, "");
verifyDiffReport(level0A, flumeSnap1Name, flumeSnap2Name,
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes(flumeFileName)));
// Write more data to flume file
flumeFileWrittenDataLength += writeToStream(flumeOutputStream, buf);
// Verify old flume snapshots have point-in-time / frozen file lengths
// even after the live file have moved forward.
assertEquals(flumeFileLengthAfterS1, hdfs.getFileStatus(flumeS1Path).getLen());
assertEquals(flumeFileLengthAfterS2, hdfs.getFileStatus(flumeS2Path).getLen());
flumeOutputStream.close();
// Verify if Snap S2 file length is same as the live one
assertEquals(flumeFileWrittenDataLength, hdfs.getFileStatus(flumeFile).getLen());
// Verify old flume snapshots have point-in-time / frozen file lengths
// even after the live file have moved forward.
assertEquals(flumeFileLengthAfterS1,
hdfs.getFileStatus(flumeS1Path).getLen());
assertEquals(flumeFileLengthAfterS2,
hdfs.getFileStatus(flumeS2Path).getLen());
verifyDiffReport(level0A, flumeSnap1Name, "",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes(flumeFileName)));
verifyDiffReport(level0A, flumeSnap2Name, "",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes(flumeFileName)));
verifyDiffReport(level0A, flumeSnap1Name, flumeSnap2Name,
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes(flumeFileName)));
restartNameNode();
verifyDiffReport(level0A, flumeSnap1Name, flumeSnap2Name,
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes(flumeFileName)));
}
private long getAccessTime(Path path) throws IOException {
return hdfs.getFileStatus(path).getAccessTime();
}
private String getAccessTimeStr(Path path) throws IOException {
SimpleDateFormat timeFmt = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
return timeFmt.format(new Date(getAccessTime(path)));
}
private Path getSSpath(Path path, Path ssRoot, String ssName) {
return new Path(ssRoot, ".snapshot/" + ssName + "/" +
path.toString().substring(ssRoot.toString().length()));
}
private void printAtime(Path path, Path ssRoot, String ssName)
throws IOException {
Path ssPath = getSSpath(path, ssRoot, ssName);
LOG.info("Access time "
+ path + ": " + getAccessTimeStr(path)
+ " " + ssPath + ": " + getAccessTimeStr(ssPath));
}
private void assertAtimeEquals(Path path, Path ssRoot,
String ssName1, String ssName2)
throws IOException {
Path ssPath1 = getSSpath(path, ssRoot, ssName1);
Path ssPath2 = getSSpath(path, ssRoot, ssName2);
assertEquals(getAccessTime(ssPath1), getAccessTime(ssPath2));
}
private void assertAtimeNotEquals(Path path, Path ssRoot,
String ssName1, String ssName2)
throws IOException {
Path ssPath1 = getSSpath(path, ssRoot, ssName1);
Path ssPath2 = getSSpath(path, ssRoot, ssName2);
assertNotEquals(getAccessTime(ssPath1), getAccessTime(ssPath2));
}
/**
* Check to see access time is not captured in snapshot when applicable.
* When DFS_NAMENODE_SNAPSHOT_SKIP_CAPTURE_ACCESSTIME_ONLY_CHANGE
* is set to true, and if a file's access time changed between two
* snapshots but has no other modification, then the access time is not
* captured in snapshot.
*/
@Test
public void testDontCaptureAccessTimeOnlyChangeReport() throws Exception {
final Path froot = new Path("/");
final Path root = new Path(froot, "/testSdiffCalc");
// items created pre enabling snapshot
final Path filePreSS = new Path(root, "fParent/filePreSS");
final Path dirPreSS = new Path(root, "dirPreSS");
final Path dirPreSSChild = new Path(dirPreSS, "dirPreSSChild");
// items created after enabling snapshot
final Path filePostSS = new Path(root, "fParent/filePostSS");
final Path dirPostSS = new Path(root, "dirPostSS");
final Path dirPostSSChild = new Path(dirPostSS, "dirPostSSChild");
DFSTestUtil.createFile(hdfs, filePreSS, BLOCKSIZE, REPLICATION, SEED);
DFSTestUtil.createFile(hdfs, dirPreSSChild, BLOCKSIZE, REPLICATION, SEED);
SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
printAtime(filePreSS, root, "s0");
printAtime(dirPreSS, root, "s0");
// items created after creating the first snapshot
DFSTestUtil.createFile(hdfs, filePostSS, BLOCKSIZE, REPLICATION, SEED);
DFSTestUtil.createFile(hdfs, dirPostSSChild, BLOCKSIZE, REPLICATION, SEED);
Thread.sleep(3000);
long now = Time.now();
hdfs.setTimes(filePreSS, -1, now);
hdfs.setTimes(filePostSS, -1, now);
hdfs.setTimes(dirPreSS, -1, now);
hdfs.setTimes(dirPostSS, -1, now);
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
printAtime(filePreSS, root, "s1");
printAtime(dirPreSS, root, "s1");
printAtime(filePostSS, root, "s1");
printAtime(dirPostSS, root, "s1");
Thread.sleep(3000);
now = Time.now();
hdfs.setTimes(filePreSS, -1, now);
hdfs.setTimes(filePostSS, -1, now);
hdfs.setTimes(dirPreSS, -1, now);
hdfs.setTimes(dirPostSS, -1, now);
SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
printAtime(filePreSS, root, "s2");
printAtime(dirPreSS, root, "s2");
printAtime(filePostSS, root, "s2");
printAtime(dirPostSS, root, "s2");
Thread.sleep(3000);
now = Time.now();
// modify filePostSS, and change access time
hdfs.setReplication(filePostSS, (short) (REPLICATION - 1));
hdfs.setTimes(filePostSS, -1, now);
SnapshotTestHelper.createSnapshot(hdfs, root, "s3");
LOG.info("\nsnapshotDiff s0 -> s1:");
LOG.info(hdfs.getSnapshotDiffReport(root, "s0", "s1").toString());
LOG.info("\nsnapshotDiff s1 -> s2:");
LOG.info(hdfs.getSnapshotDiffReport(root, "s1", "s2").toString());
assertAtimeEquals(filePreSS, root, "s0", "s1");
assertAtimeEquals(dirPreSS, root, "s0", "s1");
assertAtimeEquals(filePreSS, root, "s1", "s2");
assertAtimeEquals(dirPreSS, root, "s1", "s2");
assertAtimeEquals(filePostSS, root, "s1", "s2");
assertAtimeEquals(dirPostSS, root, "s1", "s2");
// access time should be captured in snapshot due to
// other modification
assertAtimeNotEquals(filePostSS, root, "s2", "s3");
// restart NN, and see the access time relationship
// still stands (no change caused by edit logs
// loading)
cluster.restartNameNodes();
cluster.waitActive();
assertAtimeEquals(filePreSS, root, "s0", "s1");
assertAtimeEquals(dirPreSS, root, "s0", "s1");
assertAtimeEquals(filePreSS, root, "s1", "s2");
assertAtimeEquals(dirPreSS, root, "s1", "s2");
assertAtimeEquals(filePostSS, root, "s1", "s2");
assertAtimeEquals(dirPostSS, root, "s1", "s2");
assertAtimeNotEquals(filePostSS, root, "s2", "s3");
}
/**
* Tests to verfy the diff report with maximum SnapsdiffReportEntries limit
* over an rpc being set to 3.
* @throws Exception
*/
@Test
public void testDiffReportWithRpcLimit() throws Exception {
final Path root = new Path("/");
hdfs.mkdirs(root);
for (int i = 1; i < 4; i++) {
final Path path = new Path(root, "dir" + i);
hdfs.mkdirs(path);
}
SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
for (int i = 1; i < 4; i++) {
final Path path = new Path(root, "dir" + i);
for (int j = 1; j < 4; j++) {
final Path file = new Path(path, "file" + j);
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
}
}
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
verifyDiffReport(root, "s0", "s1",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("dir1/file1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("dir1/file2")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("dir1/file3")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("dir2/file1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("dir2/file2")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("dir2/file3")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir3")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("dir3/file1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("dir3/file2")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("dir3/file3")));
}
@Test
public void testDiffReportWithRpcLimit2() throws Exception {
final Path root = new Path("/");
hdfs.mkdirs(root);
for (int i = 1; i <=3; i++) {
final Path path = new Path(root, "dir" + i);
hdfs.mkdirs(path);
}
for (int i = 1; i <= 3; i++) {
final Path path = new Path(root, "dir" + i);
for (int j = 1; j < 4; j++) {
final Path file = new Path(path, "file" + j);
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
}
}
SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
Path targetDir = new Path(root, "dir4");
//create directory dir4
hdfs.mkdirs(targetDir);
//moves files from dir1 to dir4
Path path = new Path(root, "dir1");
for (int j = 1; j < 4; j++) {
final Path srcPath = new Path(path, "file" + j);
final Path targetPath = new Path(targetDir, "file" + j);
hdfs.rename(srcPath, targetPath);
}
targetDir = new Path(root, "dir3");
//overwrite existing files in dir3 from files in dir1
path = new Path(root, "dir2");
for (int j = 1; j < 4; j++) {
final Path srcPath = new Path(path, "file" + j);
final Path targetPath = new Path(targetDir, "file" + j);
hdfs.rename(srcPath, targetPath, Rename.OVERWRITE);
}
final Path pathToRename = new Path(root, "dir2");
//move dir2 inside dir3
hdfs.rename(pathToRename, targetDir);
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
verifyDiffReport(root, "s0", "s1",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("dir4")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir2"),
DFSUtil.string2Bytes("dir3/dir2")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir1/file1"),
DFSUtil.string2Bytes("dir4/file1")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir1/file2"),
DFSUtil.string2Bytes("dir4/file2")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir1/file3"),
DFSUtil.string2Bytes("dir4/file3")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir2/file1"),
DFSUtil.string2Bytes("dir3/file1")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir2/file2"),
DFSUtil.string2Bytes("dir3/file2")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir2/file3"),
DFSUtil.string2Bytes("dir3/file3")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir3")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("dir3/file1")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("dir3/file1")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("dir3/file3")));
}
/**
* Tests to verify the diff report with maximum SnapsdiffReportEntries limit
* over an rpc being set to 3.
* @throws Exception
*/
@Test
public void testDiffReportWithRpcLimit3() throws Exception {
final Path root = new Path("/");
hdfs.mkdirs(root);
Path path = new Path(root, "dir1");
hdfs.mkdirs(path);
for (int j = 1; j <= 4; j++) {
final Path file = new Path(path, "file" + j);
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
}
SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
path = new Path(root, "dir1");
for (int j = 1; j <= 4; j++) {
final Path file = new Path(path, "file" + j);
hdfs.delete(file, false);
}
for (int j = 5; j <= 10; j++) {
final Path file = new Path(path, "file" + j);
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
}
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
verifyDiffReport(root, "s0", "s1",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("dir1/file5")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("dir1/file6")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("dir1/file7")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("dir1/file8")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("dir1/file9")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("dir1/file10")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("dir1/file1")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("dir1/file2")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("dir1/file3")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("dir1/file4")));
}
private void verifyDiffReportForGivenReport(Path dirPath, String from,
String to, SnapshotDiffReport report, DiffReportEntry... entries)
throws IOException {
// reverse the order of from and to
SnapshotDiffReport inverseReport =
hdfs.getSnapshotDiffReport(dirPath, to, from);
LOG.info(report.toString());
LOG.info(inverseReport.toString() + "\n");
assertEquals(entries.length, report.getDiffList().size());
assertEquals(entries.length, inverseReport.getDiffList().size());
for (DiffReportEntry entry : entries) {
if (entry.getType() == DiffType.MODIFY) {
assertTrue(report.getDiffList().contains(entry));
assertTrue(inverseReport.getDiffList().contains(entry));
} else if (entry.getType() == DiffType.DELETE) {
assertTrue(report.getDiffList().contains(entry));
assertTrue(inverseReport.getDiffList().contains(
new DiffReportEntry(DiffType.CREATE, entry.getSourcePath())));
} else if (entry.getType() == DiffType.CREATE) {
assertTrue(report.getDiffList().contains(entry));
assertTrue(inverseReport.getDiffList().contains(
new DiffReportEntry(DiffType.DELETE, entry.getSourcePath())));
}
}
}
@Test
public void testSnapshotDiffReportRemoteIterator() throws Exception {
final Path root = new Path("/");
hdfs.mkdirs(root);
for (int i = 1; i <= 3; i++) {
final Path path = new Path(root, "dir" + i);
hdfs.mkdirs(path);
}
for (int i = 1; i <= 3; i++) {
final Path path = new Path(root, "dir" + i);
for (int j = 1; j < 4; j++) {
final Path file = new Path(path, "file" + j);
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
}
}
SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
Path targetDir = new Path(root, "dir4");
//create directory dir4
hdfs.mkdirs(targetDir);
//moves files from dir1 to dir4
Path path = new Path(root, "dir1");
for (int j = 1; j < 4; j++) {
final Path srcPath = new Path(path, "file" + j);
final Path targetPath = new Path(targetDir, "file" + j);
hdfs.rename(srcPath, targetPath);
}
targetDir = new Path(root, "dir3");
//overwrite existing files in dir3 from files in dir1
path = new Path(root, "dir2");
for (int j = 1; j < 4; j++) {
final Path srcPath = new Path(path, "file" + j);
final Path targetPath = new Path(targetDir, "file" + j);
hdfs.rename(srcPath, targetPath, Rename.OVERWRITE);
}
final Path pathToRename = new Path(root, "dir2");
//move dir2 inside dir3
hdfs.rename(pathToRename, targetDir);
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
RemoteIterator<SnapshotDiffReportListing> iterator =
hdfs.snapshotDiffReportListingRemoteIterator(root, "s0", "s1");
SnapshotDiffReportGenerator snapshotDiffReport;
List<SnapshotDiffReportListing.DiffReportListingEntry> modifiedList =
new TreeList();
List<SnapshotDiffReportListing.DiffReportListingEntry> createdList =
new ChunkedArrayList<>();
List<SnapshotDiffReportListing.DiffReportListingEntry> deletedList =
new ChunkedArrayList<>();
SnapshotDiffReportListing report = null;
List<SnapshotDiffReportListing> reportList = new ArrayList<>();
while (iterator.hasNext()) {
report = iterator.next();
reportList.add(report);
modifiedList.addAll(report.getModifyList());
createdList.addAll(report.getCreateList());
deletedList.addAll(report.getDeleteList());
}
try {
iterator.next();
} catch (Exception e) {
assertTrue(
e.getMessage().contains("No more entry in SnapshotDiffReport for /"));
}
assertNotEquals(0, reportList.size());
// generate the snapshotDiffReport and Verify
snapshotDiffReport = new SnapshotDiffReportGenerator("/", "s0", "s1",
report.getIsFromEarlier(), modifiedList, createdList, deletedList);
verifyDiffReportForGivenReport(root, "s0", "s1",
snapshotDiffReport.generateReport(),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("dir4")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir2"),
DFSUtil.string2Bytes("dir3/dir2")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir1/file1"),
DFSUtil.string2Bytes("dir4/file1")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir1/file2"),
DFSUtil.string2Bytes("dir4/file2")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir1/file3"),
DFSUtil.string2Bytes("dir4/file3")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir2/file1"),
DFSUtil.string2Bytes("dir3/file1")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir2/file2"),
DFSUtil.string2Bytes("dir3/file2")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir2/file3"),
DFSUtil.string2Bytes("dir3/file3")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir3")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("dir3/file1")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("dir3/file1")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("dir3/file3")));
}
@Test
public void testSnapshotDiffReportRemoteIterator2() throws Exception {
final Path root = new Path("/");
hdfs.mkdirs(root);
SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
try {
hdfs.snapshotDiffReportListingRemoteIterator(root, "s0", "");
} catch (Exception e) {
assertTrue(e.getMessage().contains("Remote Iterator is"
+ "supported for snapshotDiffReport between two snapshots"));
}
}
@Test
public void testSubtrees() throws Exception {
final Path root = new Path("/");
final Path foo = new Path(root, "foo");
final Path bar = new Path(foo, "bar");
hdfs.mkdirs(bar);
modifyAndCreateSnapshot(bar, new Path[]{root});
final SnapshottableDirectoryStatus[] snapshottables
= hdfs.getSnapshottableDirListing();
assertEquals(1, snapshottables.length);
assertEquals(3, snapshottables[0].getSnapshotNumber());
final SnapshotStatus[] statuses = hdfs.getSnapshotListing(root);
assertEquals(3, statuses.length);
for (int i = 0; i < statuses.length; i++) {
final SnapshotStatus s = statuses[i];
LOG.info("Snapshot #{}: {}", s.getSnapshotID(), s.getFullPath());
assertEquals(i, s.getSnapshotID());
}
for (int i = 0; i <= 2; i++) {
for (int j = 0; j <= 2; j++) {
assertDiff(root, foo, bar, "s" + i, "s" + j);
}
}
}
private void assertDiff(Path root, Path foo, Path bar,
String from, String to) throws Exception {
final String barDiff = diff(bar, from, to);
final String fooDiff = diff(foo, from, to);
assertEquals(barDiff, fooDiff.replace("/bar", ""));
final String rootDiff = diff(root, from, to);
assertEquals(fooDiff, rootDiff.replace("/foo", ""));
assertEquals(barDiff, rootDiff.replace("/foo/bar", ""));
}
private String diff(Path path, String from, String to) throws Exception {
final SnapshotDiffReport diff = hdfs.getSnapshotDiffReport(path, from, to);
LOG.info("DIFF {} from {} to {}", path, from, to);
LOG.info("{}", diff);
final String report = diff.toString();
return report.substring(report.indexOf(":") + 1);
}
}
|
TestSnapshotDiffReport
|
java
|
apache__camel
|
components/camel-micrometer/src/test/java/org/apache/camel/component/micrometer/eventnotifier/MicrometerRouteEventNotifierTest.java
|
{
"start": 1295,
"end": 2387
}
|
class ____ extends AbstractMicrometerEventNotifierTest {
private static final String ROUTE_ID = "test";
@Override
protected AbstractMicrometerEventNotifier<?> getEventNotifier() {
return new MicrometerRouteEventNotifier();
}
@Test
public void testCamelRouteEvents() throws Exception {
Gauge added = meterRegistry.find(DEFAULT_CAMEL_ROUTES_ADDED).gauge();
Gauge running = meterRegistry.find(DEFAULT_CAMEL_ROUTES_RUNNING).gauge();
assertEquals(0.0d, added.value(), 0.0001d);
assertEquals(0.0d, running.value(), 0.0001d);
context.addRoutes(new TestRoute());
assertEquals(1.0d, added.value(), 0.0001d);
assertEquals(1.0d, running.value(), 0.0001d);
context.getRouteController().stopRoute(ROUTE_ID);
assertEquals(1.0d, added.value(), 0.0001d);
assertEquals(0.0d, running.value(), 0.0001d);
context.removeRoute(ROUTE_ID);
assertEquals(0.0d, added.value(), 0.0001d);
assertEquals(0.0d, running.value(), 0.0001d);
}
private
|
MicrometerRouteEventNotifierTest
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
|
{
"start": 22665,
"end": 23072
}
|
class ____ its contents keep:
*
* 1) Valid fsname {@literal -->} blocklist (kept on disk, logged)
* 2) Set of all valid blocks (inverted #1)
* 3) block {@literal -->} machinelist (kept in memory, rebuilt dynamically
* from reports)
* 4) machine {@literal -->} blocklist (inverted #2)
* 5) LRU cache of updated-heartbeat machines
*/
@InterfaceAudience.Private
@Metrics(context="dfs")
public
|
and
|
java
|
google__error-prone
|
check_api/src/main/java/com/google/errorprone/ErrorProneAnalyzer.java
|
{
"start": 4363,
"end": 12203
}
|
class ____ implements TaskListener {
private final Context context;
private final RefactoringCollection refactoringCollection;
public RefactoringTask(Context context, RefactoringCollection refactoringCollection) {
this.context = context;
this.refactoringCollection = refactoringCollection;
}
@Override
public void started(TaskEvent event) {}
@Override
public void finished(TaskEvent event) {
if (event.getKind() != Kind.GENERATE) {
return;
}
RefactoringResult refactoringResult;
try {
refactoringResult = refactoringCollection.applyChanges(event.getSourceFile().toUri());
} catch (Exception e) {
PrintWriter out = Log.instance(context).getWriter(WriterKind.ERROR);
out.println(e.getMessage());
out.flush();
return;
}
if (refactoringResult.type() == RefactoringCollection.RefactoringResultType.CHANGED) {
PrintWriter out = Log.instance(context).getWriter(WriterKind.NOTICE);
out.println(refactoringResult.message());
out.flush();
}
}
}
public static ErrorProneAnalyzer createByScanningForPlugins(
ScannerSupplier scannerSupplier, ErrorProneOptions errorProneOptions, Context context) {
return new ErrorProneAnalyzer(
scansPlugins(scannerSupplier, errorProneOptions, context),
errorProneOptions,
context,
JavacErrorDescriptionListener.provider(context));
}
private static Supplier<CodeTransformer> scansPlugins(
ScannerSupplier scannerSupplier, ErrorProneOptions errorProneOptions, Context context) {
return Suppliers.memoize(
() -> {
// we can't load plugins from the processorpath until the filemanager has been
// initialized, so do it lazily
ErrorProneTimings timings = ErrorProneTimings.instance(context);
try (AutoCloseable unused = timings.initializationTimeSpan()) {
return ErrorProneScannerTransformer.create(
ErrorPronePlugins.loadPlugins(scannerSupplier, context)
.applyOverrides(errorProneOptions)
.get());
} catch (InvalidCommandLineOptionException e) {
throw new PropagatedException(e);
} catch (Exception e) {
// for the timing span, should be impossible
throw new AssertionError(e);
}
});
}
static ErrorProneAnalyzer createWithCustomDescriptionListener(
Supplier<CodeTransformer> codeTransformer,
ErrorProneOptions errorProneOptions,
Context context,
DescriptionListener.Factory descriptionListenerFactory) {
return new ErrorProneAnalyzer(
codeTransformer, errorProneOptions, context, descriptionListenerFactory);
}
private ErrorProneAnalyzer(
Supplier<CodeTransformer> transformer,
ErrorProneOptions errorProneOptions,
Context context,
DescriptionListener.Factory descriptionListenerFactory) {
this.transformer = checkNotNull(transformer);
this.errorProneOptions = checkNotNull(errorProneOptions);
this.descriptionListenerFactory = checkNotNull(descriptionListenerFactory);
Context errorProneContext = new SubContext(context);
errorProneContext.put(ErrorProneOptions.class, errorProneOptions);
this.context = errorProneContext;
}
private int errorProneErrors = 0;
@Override
public void finished(TaskEvent taskEvent) {
if (taskEvent.getKind() != Kind.ANALYZE) {
return;
}
if (JavaCompiler.instance(context).errorCount() > errorProneErrors) {
return;
}
TreePath path = JavacTrees.instance(context).getPath(taskEvent.getTypeElement());
if (path == null) {
path = new TreePath(taskEvent.getCompilationUnit());
}
// Assert that the event is unique and scan the current tree.
verify(seen.add(path.getLeaf()), "Duplicate FLOW event for: %s", taskEvent.getTypeElement());
Log log = Log.instance(context);
JCCompilationUnit compilation = (JCCompilationUnit) path.getCompilationUnit();
DescriptionListener descriptionListener =
descriptionListenerFactory.getDescriptionListener(log, compilation);
DescriptionListener countingDescriptionListener =
d -> {
if (d.severity() == SeverityLevel.ERROR) {
errorProneErrors++;
}
descriptionListener.onDescribed(d);
};
JavaFileObject originalSource = log.useSource(compilation.getSourceFile());
try {
if (shouldExcludeSourceFile(compilation)) {
return;
}
if (path.getLeaf() instanceof CompilationUnitTree) {
// We only get TaskEvents for compilation units if they contain no package declarations
// (e.g. package-info.java files). In this case it's safe to analyze the
// CompilationUnitTree immediately.
transformer.get().apply(path, context, countingDescriptionListener);
} else if (finishedCompilation(path.getCompilationUnit())) {
// Otherwise this TaskEvent is for a ClassTree, and we can scan the whole
// CompilationUnitTree once we've seen all the enclosed classes.
transformer.get().apply(new TreePath(compilation), context, countingDescriptionListener);
}
} catch (ErrorProneError e) {
e.logFatalError(log, context);
// let the exception propagate to javac's main, where it will cause the compilation to
// terminate with Result.ABNORMAL
throw e;
} catch (LinkageError e) {
// similar to ErrorProneError
String version = ErrorProneVersion.loadVersionFromPom().or("unknown version");
log.error("error.prone.crash", getStackTraceAsString(e), version, "(see stack trace)");
throw e;
} catch (CompletionFailure e) {
// A CompletionFailure can be triggered when error-prone tries to complete a symbol
// that isn't on the compilation classpath. This can occur when a check performs an
// instanceof test on a symbol, which requires inspecting the transitive closure of the
// symbol's supertypes. If javac didn't need to check the symbol's assignability
// then a normal compilation would have succeeded, and no diagnostics will have been
// reported yet, but we don't want to crash javac.
log.error("proc.cant.access", e.sym, e.getDetailValue(), getStackTraceAsString(e));
} finally {
log.useSource(originalSource);
}
}
/** Returns true if the given source file should be excluded from analysis. */
private boolean shouldExcludeSourceFile(CompilationUnitTree tree) {
Pattern excludedPattern = errorProneOptions.getExcludedPattern();
return excludedPattern != null
&& excludedPattern.matcher(ASTHelpers.getFileName(tree)).matches();
}
/** Returns true if all declarations inside the given compilation unit have been visited. */
private boolean finishedCompilation(CompilationUnitTree tree) {
OUTER:
for (Tree decl : tree.getTypeDecls()) {
switch (decl.getKind()) {
case EMPTY_STATEMENT -> {
// ignore ";" at the top level, which counts as an empty type decl
continue OUTER;
}
case IMPORT -> {
// The spec disallows mixing imports and empty top-level declarations (";"), but
// javac has a bug that causes it to accept empty declarations interspersed with imports:
// https://mail.openjdk.java.net/pipermail/compiler-dev/2013-August/006968.html
//
// Any import declarations after the first semi are incorrectly added to the list
// of type declarations, so we have to skip over them here.
continue OUTER;
}
default -> {}
}
if (!seen.contains(decl)) {
return false;
}
}
return true;
}
}
|
RefactoringTask
|
java
|
resilience4j__resilience4j
|
resilience4j-metrics/src/test/java/io/github/resilience4j/metrics/TimeLimiterMetricsTest.java
|
{
"start": 1071,
"end": 3981
}
|
class ____ extends AbstractTimeLimiterMetricsTest {
@Override
protected TimeLimiter given(String prefix, MetricRegistry metricRegistry) {
TimeLimiterRegistry timeLimiterRegistry = TimeLimiterRegistry.ofDefaults();
TimeLimiter timeLimiter = timeLimiterRegistry.timeLimiter("testLimit");
metricRegistry
.registerAll(TimeLimiterMetrics.ofTimeLimiterRegistry(prefix, timeLimiterRegistry));
return timeLimiter;
}
@Override
protected TimeLimiter given(MetricRegistry metricRegistry) {
TimeLimiterRegistry timeLimiterRegistry = TimeLimiterRegistry.ofDefaults();
TimeLimiter timeLimiter = timeLimiterRegistry.timeLimiter("testLimit");
metricRegistry.registerAll(TimeLimiterMetrics.ofTimeLimiterRegistry(timeLimiterRegistry));
return timeLimiter;
}
@Test
public void shouldRecordSuccesses() {
TimeLimiter timeLimiter = TimeLimiter.of(TimeLimiterConfig.ofDefaults());
metricRegistry.registerAll(TimeLimiterMetrics.ofTimeLimiter(timeLimiter));
timeLimiter.onSuccess();
timeLimiter.onSuccess();
assertThat(metricRegistry).hasMetricsSize(3);
assertThat(metricRegistry).counter(DEFAULT_PREFIX + SUCCESSFUL)
.hasValue(2L);
assertThat(metricRegistry).counter(DEFAULT_PREFIX + FAILED)
.hasValue(0L);
assertThat(metricRegistry).counter(DEFAULT_PREFIX + TIMEOUT)
.hasValue(0L);
}
@Test
public void shouldRecordErrors() {
TimeLimiter timeLimiter = TimeLimiter.of(TimeLimiterConfig.ofDefaults());
metricRegistry.registerAll(TimeLimiterMetrics.ofTimeLimiter(timeLimiter));
timeLimiter.onError(new RuntimeException());
timeLimiter.onError(new RuntimeException());
assertThat(metricRegistry).hasMetricsSize(3);
assertThat(metricRegistry).counter(DEFAULT_PREFIX + SUCCESSFUL)
.hasValue(0L);
assertThat(metricRegistry).counter(DEFAULT_PREFIX + FAILED)
.hasValue(2L);
assertThat(metricRegistry).counter(DEFAULT_PREFIX + TIMEOUT)
.hasValue(0L);
}
@Test
public void shouldRecordTimeouts() {
TimeLimiter timeLimiter = TimeLimiter.of(TimeLimiterConfig.custom()
.timeoutDuration(Duration.ZERO)
.build());
metricRegistry.registerAll(TimeLimiterMetrics.ofTimeLimiter(timeLimiter));
timeLimiter.onError(new TimeoutException());
timeLimiter.onError(new TimeoutException());
assertThat(metricRegistry).hasMetricsSize(3);
assertThat(metricRegistry).counter(DEFAULT_PREFIX + SUCCESSFUL)
.hasValue(0L);
assertThat(metricRegistry).counter(DEFAULT_PREFIX + FAILED)
.hasValue(0L);
assertThat(metricRegistry).counter(DEFAULT_PREFIX + TIMEOUT)
.hasValue(2L);
}
}
|
TimeLimiterMetricsTest
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvtVO/Page.java
|
{
"start": 64,
"end": 412
}
|
class ____<T> {
private Integer count;
private List<T> items;
public void setCount(Integer count) {
this.count = count;
}
public Integer getCount() {
return count;
}
public void setItems(List<T> items) {
this.items = items;
}
public List<T> getItems() {
return items;
}
}
|
Page
|
java
|
google__guava
|
guava/src/com/google/common/collect/RegularImmutableMap.java
|
{
"start": 1897,
"end": 11552
}
|
class ____<K, V> extends ImmutableMap<K, V> {
@SuppressWarnings("unchecked")
static final ImmutableMap<Object, Object> EMPTY =
new RegularImmutableMap<>((Entry<Object, Object>[]) ImmutableMap.EMPTY_ENTRY_ARRAY, null, 0);
/**
* Closed addressing tends to perform well even with high load factors. Being conservative here
* ensures that the table is still likely to be relatively sparse (hence it misses fast) while
* saving space.
*/
@VisibleForTesting static final double MAX_LOAD_FACTOR = 1.2;
/**
* Maximum allowed false positive probability of detecting a hash flooding attack given random
* input.
*/
@VisibleForTesting static final double HASH_FLOODING_FPP = 0.001;
/**
* Maximum allowed length of a hash table bucket before falling back to a j.u.HashMap based
* implementation. Experimentally determined.
*/
static final int MAX_HASH_BUCKET_LENGTH = 8;
// entries in insertion order
@VisibleForTesting final transient Entry<K, V>[] entries;
// array of linked lists of entries
private final transient @Nullable ImmutableMapEntry<K, V> @Nullable [] table;
// 'and' with an int to get a table index
private final transient int mask;
static <K, V> ImmutableMap<K, V> fromEntries(Entry<K, V>... entries) {
return fromEntryArray(entries.length, entries, /* throwIfDuplicateKeys= */ true);
}
/**
* Creates an ImmutableMap from the first n entries in entryArray. This implementation may replace
* the entries in entryArray with its own entry objects (though they will have the same key/value
* contents), and may take ownership of entryArray.
*/
static <K, V> ImmutableMap<K, V> fromEntryArray(
int n, @Nullable Entry<K, V>[] entryArray, boolean throwIfDuplicateKeys) {
checkPositionIndex(n, entryArray.length);
if (n == 0) {
@SuppressWarnings("unchecked") // it has no entries so the type variables don't matter
ImmutableMap<K, V> empty = (ImmutableMap<K, V>) EMPTY;
return empty;
}
try {
return fromEntryArrayCheckingBucketOverflow(n, entryArray, throwIfDuplicateKeys);
} catch (BucketOverflowException e) {
// probable hash flooding attack, fall back to j.u.HM based implementation and use its
// implementation of hash flooding protection
return JdkBackedImmutableMap.create(n, entryArray, throwIfDuplicateKeys);
}
}
private static <K, V> ImmutableMap<K, V> fromEntryArrayCheckingBucketOverflow(
int n, @Nullable Entry<K, V>[] entryArray, boolean throwIfDuplicateKeys)
throws BucketOverflowException {
/*
* The cast is safe: n==entryArray.length means that we have filled the whole array with Entry
* instances, in which case it is safe to cast it from an array of nullable entries to an array
* of non-null entries.
*/
@SuppressWarnings("nullness")
Entry<K, V>[] entries =
(n == entryArray.length) ? (Entry<K, V>[]) entryArray : createEntryArray(n);
int tableSize = Hashing.closedTableSize(n, MAX_LOAD_FACTOR);
@Nullable ImmutableMapEntry<K, V>[] table = createEntryArray(tableSize);
int mask = tableSize - 1;
// If duplicates are allowed, this IdentityHashMap will record the final Entry for each
// duplicated key. We will use this final Entry to overwrite earlier slots in the entries array
// that have the same key. Then a second pass will remove all but the first of the slots that
// have this Entry. The value in the map becomes false when this first entry has been copied, so
// we know not to copy the remaining ones.
IdentityHashMap<Entry<K, V>, Boolean> duplicates = null;
int dupCount = 0;
for (int entryIndex = n - 1; entryIndex >= 0; entryIndex--) {
// requireNonNull is safe because the first `n` elements have been filled in.
Entry<K, V> entry = requireNonNull(entryArray[entryIndex]);
K key = entry.getKey();
V value = entry.getValue();
checkEntryNotNull(key, value);
int tableIndex = Hashing.smear(key.hashCode()) & mask;
ImmutableMapEntry<K, V> keyBucketHead = table[tableIndex];
ImmutableMapEntry<K, V> effectiveEntry =
checkNoConflictInKeyBucket(key, value, keyBucketHead, throwIfDuplicateKeys);
if (effectiveEntry == null) {
// prepend, not append, so the entries can be immutable
effectiveEntry =
(keyBucketHead == null)
? makeImmutable(entry, key, value)
: new NonTerminalImmutableMapEntry<K, V>(key, value, keyBucketHead);
table[tableIndex] = effectiveEntry;
} else {
// We already saw this key, and the first value we saw (going backwards) is the one we are
// keeping. So we won't touch table[], but we do still want to add the existing entry that
// we found to entries[] so that we will see this key in the right place when iterating.
if (duplicates == null) {
duplicates = new IdentityHashMap<>();
}
duplicates.put(effectiveEntry, true);
dupCount++;
// Make sure we are not overwriting the original entries array, in case we later do
// buildOrThrow(). We would want an exception to include two values for the duplicate key.
if (entries == entryArray) {
// Temporary variable is necessary to defeat bad smartcast (entries adopting the type of
// entryArray) in the Kotlin translation.
Entry<K, V>[] originalEntries = entries;
entries = originalEntries.clone();
}
}
entries[entryIndex] = effectiveEntry;
}
if (duplicates != null) {
entries = removeDuplicates(entries, n, n - dupCount, duplicates);
int newTableSize = Hashing.closedTableSize(entries.length, MAX_LOAD_FACTOR);
if (newTableSize != tableSize) {
return fromEntryArrayCheckingBucketOverflow(
entries.length, entries, /* throwIfDuplicateKeys= */ true);
}
}
return new RegularImmutableMap<>(entries, table, mask);
}
/**
* Constructs a new entry array where each duplicated key from the original appears only once, at
* its first position but with its final value. The {@code duplicates} map is modified.
*
* @param entries the original array of entries including duplicates
* @param n the number of valid entries in {@code entries}
* @param newN the expected number of entries once duplicates are removed
* @param duplicates a map of canonical {@link Entry} objects for each duplicate key. This map
* will be updated by the method, setting each value to false as soon as the {@link Entry} has
* been included in the new entry array.
* @return an array of {@code newN} entries where no key appears more than once.
*/
static <K, V> Entry<K, V>[] removeDuplicates(
Entry<K, V>[] entries, int n, int newN, IdentityHashMap<Entry<K, V>, Boolean> duplicates) {
Entry<K, V>[] newEntries = createEntryArray(newN);
for (int in = 0, out = 0; in < n; in++) {
Entry<K, V> entry = entries[in];
Boolean status = duplicates.get(entry);
// null=>not dup'd; true=>dup'd, first; false=>dup'd, not first
if (status != null) {
if (status) {
duplicates.put(entry, false);
} else {
continue; // delete this entry; we already copied an earlier one for the same key
}
}
newEntries[out++] = entry;
}
return newEntries;
}
/** Makes an entry usable internally by a new ImmutableMap without rereading its contents. */
static <K, V> ImmutableMapEntry<K, V> makeImmutable(Entry<K, V> entry, K key, V value) {
boolean reusable =
entry instanceof ImmutableMapEntry && ((ImmutableMapEntry<K, V>) entry).isReusable();
return reusable ? (ImmutableMapEntry<K, V>) entry : new ImmutableMapEntry<K, V>(key, value);
}
/** Makes an entry usable internally by a new ImmutableMap. */
static <K, V> ImmutableMapEntry<K, V> makeImmutable(Entry<K, V> entry) {
return makeImmutable(entry, entry.getKey(), entry.getValue());
}
private RegularImmutableMap(
Entry<K, V>[] entries, @Nullable ImmutableMapEntry<K, V> @Nullable [] table, int mask) {
this.entries = entries;
this.table = table;
this.mask = mask;
}
/**
* Checks if the given key already appears in the hash chain starting at {@code keyBucketHead}. If
* it does not, then null is returned. If it does, then if {@code throwIfDuplicateKeys} is true an
* {@code IllegalArgumentException} is thrown, and otherwise the existing {@link Entry} is
* returned.
*
* @throws IllegalArgumentException if another entry in the bucket has the same key and {@code
* throwIfDuplicateKeys} is true
* @throws BucketOverflowException if this bucket has too many entries, which may indicate a hash
* flooding attack
*/
@CanIgnoreReturnValue
static <K, V> @Nullable ImmutableMapEntry<K, V> checkNoConflictInKeyBucket(
Object key,
Object newValue,
@Nullable ImmutableMapEntry<K, V> keyBucketHead,
boolean throwIfDuplicateKeys)
throws BucketOverflowException {
int bucketSize = 0;
for (; keyBucketHead != null; keyBucketHead = keyBucketHead.getNextInKeyBucket()) {
if (keyBucketHead.getKey().equals(key)) {
if (throwIfDuplicateKeys) {
checkNoConflict(/* safe= */ false, "key", keyBucketHead, key + "=" + newValue);
} else {
return keyBucketHead;
}
}
if (++bucketSize > MAX_HASH_BUCKET_LENGTH) {
throw new BucketOverflowException();
}
}
return null;
}
static final
|
RegularImmutableMap
|
java
|
junit-team__junit5
|
documentation/src/test/java/example/registration/WebServerDemo.java
|
{
"start": 523,
"end": 1177
}
|
class ____ {
// end::user_guide[]
// @formatter:off
// tag::user_guide[]
@RegisterExtension
static WebServerExtension server = WebServerExtension.builder()
.enableSecurity(false)
.build();
// end::user_guide[]
// @formatter:on
// tag::user_guide[]
@Test
void getProductList() {
// end::user_guide[]
@SuppressWarnings("resource")
// tag::user_guide[]
WebClient webClient = new WebClient();
String serverUrl = server.getServerUrl();
// Use WebClient to connect to web server using serverUrl and verify response
assertEquals(200, webClient.get(serverUrl + "/products").getResponseStatus());
}
}
// end::user_guide[]
|
WebServerDemo
|
java
|
google__guava
|
android/guava-testlib/src/com/google/common/collect/testing/google/MultisetFeature.java
|
{
"start": 1660,
"end": 1772
}
|
interface ____ {
MultisetFeature[] value() default {};
MultisetFeature[] absent() default {};
}
}
|
Require
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/codec/tsdb/TSDBDocValuesEncoder.java
|
{
"start": 3000,
"end": 13415
}
|
class ____ {
private final DocValuesForUtil forUtil;
private final int numericBlockSize;
public TSDBDocValuesEncoder(int numericBlockSize) {
this.forUtil = new DocValuesForUtil(numericBlockSize);
this.numericBlockSize = numericBlockSize;
}
/**
* Delta-encode monotonic fields. This is typically helpful with near-primary sort fields or
* SORTED_NUMERIC/SORTED_SET doc values with many values per document.
*/
private void deltaEncode(int token, int tokenBits, long[] in, DataOutput out) throws IOException {
int gts = 0;
int lts = 0;
for (int i = 1; i < numericBlockSize; ++i) {
if (in[i] > in[i - 1]) {
gts++;
} else if (in[i] < in[i - 1]) {
lts++;
}
}
final boolean doDeltaCompression = (gts == 0 && lts >= 2) || (lts == 0 && gts >= 2);
long first = 0;
if (doDeltaCompression) {
for (int i = numericBlockSize - 1; i > 0; --i) {
in[i] -= in[i - 1];
}
// Avoid setting in[0] to 0 in case there is a minimum interval between
// consecutive values. This might later help compress data using fewer
// bits per value.
first = in[0] - in[1];
in[0] = in[1];
token = (token << 1) | 0x01;
} else {
token <<= 1;
}
removeOffset(token, tokenBits + 1, in, out);
if (doDeltaCompression) {
out.writeZLong(first);
}
}
private void removeOffset(int token, int tokenBits, long[] in, DataOutput out) throws IOException {
long min = Long.MAX_VALUE;
long max = Long.MIN_VALUE;
for (long l : in) {
min = Math.min(l, min);
max = Math.max(l, max);
}
if (max - min < 0) {
// overflow
min = 0;
} else if (min > 0 && min < (max >>> 2)) {
// removing the offset is unlikely going to help save bits per value, yet it makes decoding
// slower
min = 0;
}
if (min != 0) {
for (int i = 0; i < numericBlockSize; ++i) {
in[i] -= min;
}
token = (token << 1) | 0x01;
} else {
token <<= 1;
}
gcdEncode(token, tokenBits + 1, in, out);
if (min != 0) {
out.writeZLong(min);
}
}
/**
* See if numbers have a common divisor. This is typically helpful for integer values in
* floats/doubles or dates that don't have millisecond accuracy.
*/
private void gcdEncode(int token, int tokenBits, long[] in, DataOutput out) throws IOException {
long gcd = 0;
for (long l : in) {
gcd = MathUtil.gcd(gcd, l);
if (gcd == 1) {
break;
}
}
final boolean doGcdCompression = Long.compareUnsigned(gcd, 1) > 0;
if (doGcdCompression) {
for (int i = 0; i < numericBlockSize; ++i) {
in[i] /= gcd;
}
token = (token << 1) | 0x01;
} else {
token <<= 1;
}
forEncode(token, tokenBits + 1, in, out);
if (doGcdCompression) {
out.writeVLong(gcd - 2);
}
}
private void forEncode(int token, int tokenBits, long[] in, DataOutput out) throws IOException {
long or = 0;
for (long l : in) {
or |= l;
}
int bitsPerValue = or == 0 ? 0 : DocValuesForUtil.roundBits(PackedInts.unsignedBitsRequired(or));
out.writeVInt((bitsPerValue << tokenBits) | token);
if (bitsPerValue > 0) {
forUtil.encode(in, bitsPerValue, out);
}
}
/**
* Encode the given longs using a combination of delta-coding, GCD factorization and bit packing.
*/
public void encode(long[] in, DataOutput out) throws IOException {
assert in.length == numericBlockSize;
deltaEncode(0, 0, in, out);
}
/**
* Optimizes for encoding sorted fields where we expect a block to mostly either be the same value
* or to make a transition from one value to a second one.
* <p>
* The header is a vlong where the number of trailing ones defines the encoding strategy:
* <ul>
* <li>0: single run</li>
* <li>1: two runs</li>
* <li>2: bit-packed</li>
* <li>3: cycle</li>
* </ul>
*/
public void encodeOrdinals(long[] in, DataOutput out, int bitsPerOrd) throws IOException {
assert in.length == numericBlockSize;
int numRuns = 1;
long firstValue = in[0];
long previousValue = firstValue;
boolean cyclic = false;
int cycleLength = 0;
for (int i = 1; i < in.length; ++i) {
long currentValue = in[i];
if (previousValue != currentValue) {
numRuns++;
}
if (currentValue == firstValue && cycleLength != -1) {
if (cycleLength == 0) {
// first candidate cycle detected
cycleLength = i;
} else if (cycleLength == 1 || i % cycleLength != 0) {
// if the first two values are the same this isn't a cycle, it might be a run, though
// this also isn't a cycle if the index of the next occurrence of the first value
// isn't a multiple of the candidate cycle length
// we can stop looking for cycles now
cycleLength = -1;
}
}
previousValue = currentValue;
}
// if the cycle is too long, bit-packing may be more space efficient
int maxCycleLength = in.length / 4;
if (numRuns > 2 && cycleLength > 1 && cycleLength <= maxCycleLength) {
cyclic = true;
for (int i = cycleLength; i < in.length; ++i) {
if (in[i] != in[i - cycleLength]) {
cyclic = false;
break;
}
}
}
if (numRuns == 1 && bitsPerOrd < 63) {
long value = in[0];
// unset first bit (0 trailing ones) to indicate the block has a single run
out.writeVLong(value << 1);
} else if (numRuns == 2 && bitsPerOrd < 62) {
// set 1 trailing bit to indicate the block has two runs
out.writeVLong((in[0] << 2) | 0b01);
int firstRunLen = in.length;
for (int i = 1; i < in.length; ++i) {
if (in[i] != in[0]) {
firstRunLen = i;
break;
}
}
out.writeVInt(firstRunLen);
out.writeZLong(in[in.length - 1] - in[0]);
} else if (cyclic) {
// set 3 trailing bits to indicate the block cycles through the same values
long headerAndCycleLength = ((long) cycleLength << 4) | 0b0111;
out.writeVLong(headerAndCycleLength);
for (int i = 0; i < cycleLength; i++) {
out.writeVLong(in[i]);
}
} else {
// set 2 trailing bits to indicate the block is bit-packed
out.writeVLong(0b11);
forUtil.encode(in, bitsPerOrd, out);
}
}
public void decodeOrdinals(DataInput in, long[] out, int bitsPerOrd) throws IOException {
assert out.length == numericBlockSize : out.length;
long v1 = in.readVLong();
int encoding = Long.numberOfTrailingZeros(~v1);
v1 >>>= encoding + 1;
if (encoding == 0) {
// single run
Arrays.fill(out, v1);
} else if (encoding == 1) {
// two runs
int runLen = in.readVInt();
long v2 = v1 + in.readZLong();
Arrays.fill(out, 0, runLen, v1);
Arrays.fill(out, runLen, out.length, v2);
} else if (encoding == 2) {
// bit-packed
forUtil.decode(bitsPerOrd, in, out);
} else if (encoding == 3) {
// cycle encoding
int cycleLength = (int) v1;
for (int i = 0; i < cycleLength; i++) {
out[i] = in.readVLong();
}
int length = cycleLength;
while (length < out.length) {
int copyLength = Math.min(length, out.length - length);
System.arraycopy(out, 0, out, length, copyLength);
length += copyLength;
}
}
}
/** Decode longs that have been encoded with {@link #encode}. */
public void decode(DataInput in, long[] out) throws IOException {
assert out.length == numericBlockSize : out.length;
final int token = in.readVInt();
final int bitsPerValue = token >>> 3;
if (bitsPerValue != 0) {
forUtil.decode(bitsPerValue, in, out);
} else {
Arrays.fill(out, 0L);
}
// simple blocks that only perform bit packing exit early here
// this is typical for SORTED(_SET) ordinals
if ((token & 0x07) != 0) {
final boolean doGcdCompression = (token & 0x01) != 0;
if (doGcdCompression) {
final long gcd = 2 + in.readVLong();
mul(out, gcd);
}
final boolean hasOffset = (token & 0x02) != 0;
if (hasOffset) {
final long min = in.readZLong();
add(out, min);
}
final boolean doDeltaCompression = (token & 0x04) != 0;
if (doDeltaCompression) {
final long first = in.readZLong();
out[0] += first;
deltaDecode(out);
}
}
}
// this loop should auto-vectorize
private void mul(long[] arr, long m) {
for (int i = 0; i < numericBlockSize; ++i) {
arr[i] *= m;
}
}
// this loop should auto-vectorize
private void add(long[] arr, long min) {
for (int i = 0; i < numericBlockSize; ++i) {
arr[i] += min;
}
}
private void deltaDecode(long[] arr) {
long sum = 0;
for (int i = 0; i < numericBlockSize; ++i) {
sum += arr[i];
arr[i] = sum;
}
}
}
|
TSDBDocValuesEncoder
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/error/ShouldBeMarked.java
|
{
"start": 718,
"end": 1425
}
|
class ____ extends BasicErrorMessageFactory {
private static final String EXPECTING_TO_BE_MARKED = "%nExpecting %s to be a marked but was not";
private static final String EXPECTING_NOT_TO_BE_MARKED = "%nExpecting %s not to be a marked but was";
public static ErrorMessageFactory shouldBeMarked(AtomicMarkableReference<?> actual) {
return new ShouldBeMarked(actual, true);
}
public static ErrorMessageFactory shouldNotBeMarked(AtomicMarkableReference<?> actual) {
return new ShouldBeMarked(actual, false);
}
private ShouldBeMarked(AtomicMarkableReference<?> actual, boolean marked) {
super(marked ? EXPECTING_TO_BE_MARKED : EXPECTING_NOT_TO_BE_MARKED, actual);
}
}
|
ShouldBeMarked
|
java
|
spring-projects__spring-boot
|
module/spring-boot-mongodb/src/main/java/org/springframework/boot/mongodb/autoconfigure/StandardMongoClientSettingsBuilderCustomizer.java
|
{
"start": 1163,
"end": 2525
}
|
class ____ implements MongoClientSettingsBuilderCustomizer, Ordered {
private final UuidRepresentation uuidRepresentation;
private final MongoConnectionDetails connectionDetails;
private int order;
public StandardMongoClientSettingsBuilderCustomizer(MongoConnectionDetails connectionDetails,
UuidRepresentation uuidRepresentation) {
this.connectionDetails = connectionDetails;
this.uuidRepresentation = uuidRepresentation;
}
@Override
public void customize(MongoClientSettings.Builder settingsBuilder) {
settingsBuilder.uuidRepresentation(this.uuidRepresentation);
settingsBuilder.applyConnectionString(this.connectionDetails.getConnectionString());
settingsBuilder.applyToSslSettings(this::configureSslIfNeeded);
}
private void configureSslIfNeeded(SslSettings.Builder settings) {
SslBundle sslBundle = this.connectionDetails.getSslBundle();
if (sslBundle != null) {
settings.enabled(true);
Assert.state(!sslBundle.getOptions().isSpecified(), "SSL options cannot be specified with MongoDB");
settings.context(sslBundle.createSslContext());
}
}
@Override
public int getOrder() {
return this.order;
}
/**
* Set the order value of this object.
* @param order the new order value
* @see #getOrder()
*/
public void setOrder(int order) {
this.order = order;
}
}
|
StandardMongoClientSettingsBuilderCustomizer
|
java
|
apache__dubbo
|
dubbo-remoting/dubbo-remoting-http12/src/main/java/org/apache/dubbo/remoting/http12/message/DefaultHttpRequest.java
|
{
"start": 2336,
"end": 21006
}
|
class ____ implements HttpRequest {
private final HttpMetadata metadata;
private final HttpChannel channel;
private final HttpHeaders headers;
private String method;
private String uri;
private String contentType;
private String charset;
private List<HttpCookie> cookies;
private List<Locale> locales;
private QueryStringDecoder decoder;
private HttpPostRequestDecoder postDecoder;
private boolean postParsed;
private Map<String, Object> attributes;
private InputStream inputStream;
public DefaultHttpRequest(HttpMetadata metadata, HttpChannel channel) {
this.metadata = metadata;
this.channel = channel;
headers = metadata.headers();
if (metadata instanceof RequestMetadata) {
RequestMetadata requestMetadata = (RequestMetadata) metadata;
method = requestMetadata.method();
uri = requestMetadata.path();
} else {
throw new UnsupportedOperationException();
}
}
public HttpMetadata getMetadata() {
return metadata;
}
@Override
public boolean isHttp2() {
return metadata instanceof Http2Header;
}
@Override
public String method() {
return method;
}
@Override
public void setMethod(String method) {
this.method = method;
}
@Override
public String uri() {
return uri;
}
@Override
public void setUri(String uri) {
this.uri = uri;
decoder = null;
}
@Override
public String path() {
return getDecoder().path();
}
@Override
public String rawPath() {
return getDecoder().rawPath();
}
@Override
public String query() {
return getDecoder().rawQuery();
}
@Override
public String header(CharSequence name) {
return headers.getFirst(name);
}
@Override
public List<String> headerValues(CharSequence name) {
return headers.get(name);
}
@Override
public Date dateHeader(CharSequence name) {
String value = headers.getFirst(name);
return StringUtils.isEmpty(value) ? null : DateFormatter.parseHttpDate(value);
}
@Override
public boolean hasHeader(CharSequence name) {
return headers.containsKey(name);
}
@Override
public Collection<String> headerNames() {
return headers.names();
}
@Override
public HttpHeaders headers() {
return headers;
}
@Override
public void setHeader(CharSequence name, String value) {
headers.set(name, value);
}
@Override
public void setHeader(CharSequence name, Date value) {
headers.set(name, DateFormatter.format(value));
}
@Override
public void setHeader(CharSequence name, List<String> values) {
headers.set(name, values);
}
@Override
public Collection<HttpCookie> cookies() {
List<HttpCookie> cookies = this.cookies;
if (cookies == null) {
cookies = HttpUtils.decodeCookies(header(HttpHeaderNames.COOKIE.getKey()));
this.cookies = cookies;
}
return cookies;
}
@Override
public HttpCookie cookie(String name) {
List<HttpCookie> cookies = this.cookies;
if (cookies == null) {
cookies = HttpUtils.decodeCookies(header(HttpHeaderNames.COOKIE.getKey()));
this.cookies = cookies;
}
for (int i = 0, size = cookies.size(); i < size; i++) {
HttpCookie cookie = cookies.get(i);
if (cookie.name().equals(name)) {
return cookie;
}
}
return null;
}
@Override
public int contentLength() {
String value = headers.getFirst(HttpHeaderNames.CONTENT_LENGTH.getKey());
return value == null ? 0 : Integer.parseInt(value);
}
@Override
public String contentType() {
String contentType = this.contentType;
if (contentType == null) {
contentType = headers.getFirst(HttpHeaderNames.CONTENT_TYPE.getKey());
contentType = contentType == null ? StringUtils.EMPTY_STRING : contentType.trim();
this.contentType = contentType;
}
return contentType.isEmpty() ? null : contentType;
}
@Override
public void setContentType(String contentType) {
setContentType0(contentType == null ? StringUtils.EMPTY_STRING : contentType.trim());
charset = null;
}
private void setContentType0(String contentType) {
this.contentType = contentType;
headers.set(HttpHeaderNames.CONTENT_TYPE.getKey(), contentType());
}
@Override
public String mediaType() {
String contentType = contentType();
if (contentType == null) {
return null;
}
int index = contentType.indexOf(';');
return index == -1 ? contentType : contentType.substring(0, index);
}
@Override
public String charset() {
String charset = this.charset;
if (charset == null) {
String contentType = contentType();
charset = HttpUtils.parseCharset(contentType);
this.charset = charset;
}
return charset.isEmpty() ? null : charset;
}
@Override
public Charset charsetOrDefault() {
String charset = charset();
return charset == null ? StandardCharsets.UTF_8 : Charset.forName(charset);
}
@Override
public void setCharset(String charset) {
String contentType = contentType();
if (contentType != null) {
setContentType0(contentType + "; " + HttpUtils.CHARSET_PREFIX + charset);
}
this.charset = charset;
}
@Override
public String accept() {
return headers.getFirst(HttpHeaderNames.ACCEPT.getKey());
}
@Override
public Locale locale() {
return locales().get(0);
}
@Override
public List<Locale> locales() {
List<Locale> locales = this.locales;
if (locales == null) {
locales = HttpUtils.parseAcceptLanguage(headers.getFirst(HttpHeaderNames.CONTENT_LANGUAGE.getKey()));
if (locales.isEmpty()) {
locales = Collections.singletonList(Locale.getDefault());
}
this.locales = locales;
}
return locales;
}
@Override
public String scheme() {
String scheme = headers.getFirst(HttpConstants.X_FORWARDED_PROTO);
if (isHttp2()) {
scheme = headers.getFirst(PseudoHeaderName.SCHEME.value());
}
return scheme == null ? HttpConstants.HTTP : scheme;
}
@Override
public String serverHost() {
String host = getHost0();
return host == null ? localHost() + ':' + localPort() : host;
}
@Override
public String serverName() {
String host = headers.getFirst(HttpConstants.X_FORWARDED_HOST);
if (host != null) {
return host;
}
host = getHost0();
if (host != null) {
int index = host.lastIndexOf(':');
return index == -1 ? host : host.substring(0, index);
}
return localHost();
}
@Override
public int serverPort() {
String port = headers.getFirst(HttpConstants.X_FORWARDED_PORT);
if (port != null) {
return Integer.parseInt(port);
}
String host = getHost0();
if (host != null) {
int index = host.lastIndexOf(':');
return index == -1 ? -1 : Integer.parseInt(host.substring(0, index));
}
return localPort();
}
private String getHost0() {
return headers.getFirst(isHttp2() ? PseudoHeaderName.AUTHORITY.value() : HttpHeaderNames.HOST.getKey());
}
@Override
public String remoteHost() {
return getRemoteAddress().getHostString();
}
@Override
public String remoteAddr() {
return getRemoteAddress().getAddress().getHostAddress();
}
@Override
public int remotePort() {
return getRemoteAddress().getPort();
}
private InetSocketAddress getRemoteAddress() {
return (InetSocketAddress) channel.remoteAddress();
}
@Override
public String localHost() {
return getLocalAddress().getHostString();
}
@Override
public String localAddr() {
return getLocalAddress().getAddress().getHostAddress();
}
@Override
public int localPort() {
return getLocalAddress().getPort();
}
private InetSocketAddress getLocalAddress() {
return (InetSocketAddress) channel.localAddress();
}
@Override
public String parameter(String name) {
List<String> values = getDecoder().parameters().get(name);
if (CollectionUtils.isNotEmpty(values)) {
return values.get(0);
}
HttpPostRequestDecoder postDecoder = getPostDecoder();
if (postDecoder == null) {
return null;
}
List<InterfaceHttpData> items = postDecoder.getBodyHttpDatas(name);
if (items == null) {
return null;
}
for (int i = 0, size = items.size(); i < size; i++) {
InterfaceHttpData item = items.get(i);
if (item.getHttpDataType() == HttpDataType.Attribute) {
return HttpUtils.readPostValue(item);
}
}
return formParameter(name);
}
@Override
public String parameter(String name, String defaultValue) {
String value = parameter(name);
return value == null ? defaultValue : value;
}
@Override
public List<String> parameterValues(String name) {
List<String> values = getDecoder().parameters().get(name);
HttpPostRequestDecoder postDecoder = getPostDecoder();
if (postDecoder == null) {
return values;
}
List<InterfaceHttpData> items = postDecoder.getBodyHttpDatas(name);
if (items == null) {
return values;
}
for (int i = 0, size = items.size(); i < size; i++) {
InterfaceHttpData item = items.get(i);
if (item.getHttpDataType() == HttpDataType.Attribute) {
if (values == null) {
values = new ArrayList<>();
}
values.add(HttpUtils.readPostValue(item));
}
}
return values;
}
@Override
public String queryParameter(String name) {
return CollectionUtils.first(queryParameterValues(name));
}
@Override
public List<String> queryParameterValues(String name) {
return getDecoder().parameters().get(name);
}
@Override
public Collection<String> queryParameterNames() {
return getDecoder().parameters().keySet();
}
@Override
public Map<String, List<String>> queryParameters() {
return getDecoder().parameters();
}
@Override
public String formParameter(String name) {
HttpPostRequestDecoder postDecoder = getPostDecoder();
if (postDecoder == null) {
return null;
}
List<InterfaceHttpData> items = postDecoder.getBodyHttpDatas(name);
if (items == null) {
return null;
}
for (int i = 0, size = items.size(); i < size; i++) {
InterfaceHttpData item = items.get(i);
if (item.getHttpDataType() == HttpDataType.Attribute) {
return HttpUtils.readPostValue(item);
}
}
return null;
}
@Override
public List<String> formParameterValues(String name) {
HttpPostRequestDecoder postDecoder = getPostDecoder();
if (postDecoder == null) {
return null;
}
List<InterfaceHttpData> items = postDecoder.getBodyHttpDatas(name);
if (items == null) {
return null;
}
List<String> values = null;
for (int i = 0, size = items.size(); i < size; i++) {
InterfaceHttpData item = items.get(i);
if (item.getHttpDataType() == HttpDataType.Attribute) {
if (values == null) {
values = new ArrayList<>();
}
values.add(HttpUtils.readPostValue(item));
}
}
return values == null ? Collections.emptyList() : values;
}
@Override
public Collection<String> formParameterNames() {
HttpPostRequestDecoder postDecoder = getPostDecoder();
if (postDecoder == null) {
return Collections.emptyList();
}
List<InterfaceHttpData> items = postDecoder.getBodyHttpDatas();
if (items == null) {
return Collections.emptyList();
}
Set<String> names = null;
for (int i = 0, size = items.size(); i < size; i++) {
InterfaceHttpData item = items.get(i);
if (item.getHttpDataType() == HttpDataType.Attribute) {
if (names == null) {
names = new LinkedHashSet<>();
}
names.add(item.getName());
}
}
return names == null ? Collections.emptyList() : names;
}
@Override
public boolean hasParameter(String name) {
if (getDecoder().parameters().containsKey(name)) {
return true;
}
HttpPostRequestDecoder postDecoder = getPostDecoder();
if (postDecoder == null) {
return false;
}
List<InterfaceHttpData> items = postDecoder.getBodyHttpDatas(name);
if (items == null) {
return false;
}
for (int i = 0, size = items.size(); i < size; i++) {
InterfaceHttpData item = items.get(i);
if (item.getHttpDataType() == HttpDataType.Attribute) {
return true;
}
}
return false;
}
@Override
public Collection<String> parameterNames() {
Set<String> names = getDecoder().parameters().keySet();
HttpPostRequestDecoder postDecoder = getPostDecoder();
if (postDecoder == null) {
return names;
}
List<InterfaceHttpData> items = postDecoder.getBodyHttpDatas();
if (items == null) {
return names;
}
Set<String> allNames = null;
for (int i = 0, size = items.size(); i < size; i++) {
InterfaceHttpData item = items.get(i);
if (item.getHttpDataType() == HttpDataType.Attribute) {
if (allNames == null) {
allNames = new LinkedHashSet<>(names);
}
allNames.add(item.getName());
}
}
return allNames == null ? Collections.emptyList() : allNames;
}
@Override
public Collection<FileUpload> parts() {
HttpPostRequestDecoder postDecoder = getPostDecoder();
if (postDecoder == null) {
return Collections.emptyList();
}
List<InterfaceHttpData> items = postDecoder.getBodyHttpDatas();
if (items == null) {
return Collections.emptyList();
}
List<FileUpload> fileUploads = new ArrayList<>();
for (int i = 0, size = items.size(); i < size; i++) {
InterfaceHttpData item = items.get(i);
if (item.getHttpDataType() == HttpDataType.FileUpload) {
fileUploads.add(HttpUtils.readUpload(item));
}
}
return fileUploads;
}
@Override
public FileUpload part(String name) {
HttpPostRequestDecoder postDecoder = getPostDecoder();
if (postDecoder == null) {
return null;
}
List<InterfaceHttpData> items = postDecoder.getBodyHttpDatas(name);
if (items == null) {
return null;
}
for (int i = 0, size = items.size(); i < size; i++) {
InterfaceHttpData item = items.get(i);
if (item.getHttpDataType() == HttpDataType.FileUpload) {
return HttpUtils.readUpload(item);
}
}
return null;
}
private QueryStringDecoder getDecoder() {
if (decoder == null) {
String charset = charset();
if (charset == null) {
decoder = new QueryStringDecoder(uri);
} else {
decoder = new QueryStringDecoder(uri, Charset.forName(charset));
}
}
return decoder;
}
private HttpPostRequestDecoder getPostDecoder() {
HttpPostRequestDecoder postDecoder = this.postDecoder;
if (postDecoder == null) {
if (postParsed) {
return null;
}
if (inputStream != null && HttpMethods.supportBody(method)) {
postDecoder = HttpUtils.createPostRequestDecoder(this, inputStream, charset());
this.postDecoder = postDecoder;
}
postParsed = true;
}
return postDecoder;
}
@Override
@SuppressWarnings("unchecked")
public <T> T attribute(String name) {
return (T) getAttributes().get(name);
}
@Override
public void removeAttribute(String name) {
getAttributes().remove(name);
}
@Override
public void setAttribute(String name, Object value) {
getAttributes().put(name, value);
}
@Override
public boolean hasAttribute(String name) {
return attributes != null && attributes.containsKey(name);
}
@Override
public Collection<String> attributeNames() {
return getAttributes().keySet();
}
@Override
public Map<String, Object> attributes() {
return getAttributes();
}
private Map<String, Object> getAttributes() {
Map<String, Object> attributes = this.attributes;
if (attributes == null) {
attributes = new HashMap<>();
this.attributes = attributes;
}
return attributes;
}
@Override
public InputStream inputStream() {
return inputStream;
}
@Override
public void setInputStream(InputStream is) {
inputStream = is;
if (HttpMethods.isPost(method)) {
postDecoder = null;
postParsed = false;
}
}
@Override
public String toString() {
return "DefaultHttpRequest{" + fieldToString() + '}';
}
protected final String fieldToString() {
return "method='" + method + '\'' + ", uri='" + uri + '\'' + ", contentType='" + contentType() + '\'';
}
}
|
DefaultHttpRequest
|
java
|
apache__flink
|
flink-core-api/src/main/java/org/apache/flink/util/function/TriConsumerWithException.java
|
{
"start": 1114,
"end": 2235
}
|
interface ____<S, T, U, E extends Throwable> {
/**
* Performs this operation on the given arguments.
*
* @param s the first input argument
* @param t the second input argument
* @param u the third input argument
* @throws E in case of an error
*/
void accept(S s, T t, U u) throws E;
/**
* Convert a {@link TriConsumerWithException} into a {@link TriConsumer}.
*
* @param triConsumerWithException TriConsumer with exception to convert into a {@link
* TriConsumer}.
* @param <A> first input type
* @param <B> second input type
* @param <C> third input type
* @return {@link TriConsumer} which rethrows all checked exceptions as unchecked.
*/
static <A, B, C> TriConsumer<A, B, C> unchecked(
TriConsumerWithException<A, B, C, ?> triConsumerWithException) {
return (A a, B b, C c) -> {
try {
triConsumerWithException.accept(a, b, c);
} catch (Throwable t) {
ThrowingExceptionUtils.rethrow(t);
}
};
}
}
|
TriConsumerWithException
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java
|
{
"start": 5348,
"end": 6226
}
|
class ____ extends BroadcastRequest<Request> {
private final RefCounted refCounted = AbstractRefCounted.of(() -> {});
public Request(StreamInput in) throws IOException {
super(in);
}
public Request(String... indices) {
super(indices);
}
@Override
public void incRef() {
refCounted.incRef();
}
@Override
public boolean tryIncRef() {
return refCounted.tryIncRef();
}
@Override
public boolean decRef() {
return refCounted.decRef();
}
@Override
public boolean hasReferences() {
return refCounted.hasReferences();
}
@Override
public String toString() {
return "testrequest" + Arrays.toString(indices);
}
}
public static
|
Request
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/OutputUtil.java
|
{
"start": 1094,
"end": 1869
}
|
class ____ {
private static final Logger LOG = LoggerFactory.getLogger(OutputUtil.class);
public static final String NATIVE_TASK_OUTPUT_MANAGER = "nativetask.output.manager";
public static NativeTaskOutput createNativeTaskOutput(Configuration conf, String id) {
Class<?> clazz = conf.getClass(OutputUtil.NATIVE_TASK_OUTPUT_MANAGER,
NativeTaskOutputFiles.class);
LOG.info(OutputUtil.NATIVE_TASK_OUTPUT_MANAGER + " = " + clazz.getName());
try {
Constructor<?> ctor = clazz.getConstructor(Configuration.class, String.class);
ctor.setAccessible(true);
NativeTaskOutput instance = (NativeTaskOutput) ctor.newInstance(conf, id);
return instance;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
|
OutputUtil
|
java
|
reactor__reactor-core
|
reactor-core/src/main/java/reactor/core/publisher/MonoTakeLastOne.java
|
{
"start": 1698,
"end": 4241
}
|
class ____<T>
extends Operators.BaseFluxToMonoOperator<T, T> {
static final Object CANCELLED = new Object();
final boolean mustEmit;
@Nullable T value;
boolean done;
TakeLastOneSubscriber(CoreSubscriber<? super T> actual,
@Nullable T defaultValue,
boolean mustEmit) {
super(actual);
this.value = defaultValue;
this.mustEmit = mustEmit;
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.TERMINATED) return done && value == null;
if (key == Attr.CANCELLED) return value == CANCELLED;
return super.scanUnsafe(key);
}
@Override
public void onNext(T t) {
T old = this.value;
if (old == CANCELLED) {
// cancelled
Operators.onDiscard(t, actual.currentContext());
return;
}
synchronized (this) {
old = this.value;
if (old != CANCELLED) {
this.value = t;
}
}
if (old == CANCELLED) {
// cancelled
Operators.onDiscard(t, actual.currentContext());
return;
}
Operators.onDiscard(old, actual.currentContext()); //FIXME cache context
}
@Override
public void onError(Throwable t) {
if (this.done) {
Operators.onErrorDropped(t, actual.currentContext());
return;
}
this.done = true;
final T v;
synchronized (this) {
v = this.value;
this.value = null;
}
if (v == CANCELLED) {
Operators.onErrorDropped(t, actual.currentContext());
return;
}
if (v != null) {
Operators.onDiscard(v, actual.currentContext());
}
this.actual.onError(t);
}
@Override
public void onComplete() {
if (this.done) {
return;
}
this.done = true;
final T v = this.value;
if (v == CANCELLED) {
return;
}
if (v == null) {
if (mustEmit) {
actual.onError(Operators.onOperatorError(new NoSuchElementException(
"Flux#last() didn't observe any " + "onNext signal"),
actual.currentContext()));
}
else {
actual.onComplete();
}
return;
}
completePossiblyEmpty();
}
@Override
public void cancel() {
s.cancel();
final T v;
synchronized (this) {
v = this.value;
@SuppressWarnings("unchecked")
T cancelled = (T) CANCELLED;
this.value = cancelled;
}
if (v != null) {
Operators.onDiscard(v, actual.currentContext());
}
}
@Override
@Nullable T accumulatedValue() {
final T v;
synchronized (this) {
v = this.value;
this.value = null;
}
if (v == CANCELLED) {
return null;
}
return v;
}
}
}
|
TakeLastOneSubscriber
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/RecreateOnResetOperatorCoordinator.java
|
{
"start": 12766,
"end": 18221
}
|
class ____ {
private final OperatorID operatorId;
private final BlockingQueue<NamedCall> pendingCalls;
private final Map<String, String> mdc;
private QuiesceableContext internalQuiesceableContext;
private OperatorCoordinator internalCoordinator;
private boolean hasCaughtUp;
private boolean closed;
private volatile boolean failed;
private DeferrableCoordinator(OperatorID operatorId, JobID jobID) {
this.operatorId = operatorId;
this.pendingCalls = new LinkedBlockingQueue<>();
this.hasCaughtUp = false;
this.closed = false;
this.failed = false;
this.mdc = MdcUtils.asContextData(jobID);
}
synchronized <T extends Exception> void applyCall(
String name, ThrowingConsumer<OperatorCoordinator, T> call) throws T {
synchronized (this) {
if (hasCaughtUp) {
// The new coordinator has caught up.
try (MdcUtils.MdcCloseable ignored = MdcUtils.withContext(mdc)) {
call.accept(internalCoordinator);
}
} else {
pendingCalls.add(new NamedCall(name, call));
}
}
}
synchronized void createNewInternalCoordinator(
OperatorCoordinator.Context context, Provider provider) {
if (closed) {
return;
}
// Create a new internal coordinator and a new quiesceable context.
// We assume that the coordinator creation is fast. Otherwise the creation
// of the new internal coordinator may block the applyCall() method
// which is invoked in the scheduler main thread.
try {
internalQuiesceableContext = new QuiesceableContext(context);
internalCoordinator = provider.getCoordinator(internalQuiesceableContext);
} catch (Exception e) {
LOG.error("Failed to create new internal coordinator due to ", e);
cleanAndFailJob(e);
}
}
synchronized CompletableFuture<Void> closeAsync(long timeoutMs) {
closed = true;
if (internalCoordinator != null) {
internalQuiesceableContext.quiesce();
pendingCalls.clear();
return closeAsyncWithTimeout(
"SourceCoordinator for " + operatorId,
(ThrowingRunnable<Exception>) internalCoordinator::close,
Duration.ofMillis(timeoutMs))
.exceptionally(
e -> {
cleanAndFailJob(e);
return null;
});
} else {
return CompletableFuture.completedFuture(null);
}
}
void processPendingCalls() {
if (failed || closed || internalCoordinator == null) {
return;
}
String name = "Unknown Call Name";
try {
while (!hasCaughtUp) {
while (!pendingCalls.isEmpty()) {
NamedCall namedCall = pendingCalls.poll();
if (namedCall != null) {
name = namedCall.name;
namedCall.getConsumer().accept(internalCoordinator);
}
}
synchronized (this) {
// We need to check the pending calls queue again in case a new
// pending call is added after we process the last one and before
// we grab the lock.
if (pendingCalls.isEmpty()) {
hasCaughtUp = true;
}
}
}
} catch (Throwable t) {
LOG.error("Failed to process pending calls {} on coordinator.", name, t);
cleanAndFailJob(t);
}
}
void start() throws Exception {
internalCoordinator.start();
}
void resetAndStart(
final long checkpointId,
@Nullable final byte[] checkpointData,
final boolean started) {
if (failed || closed || internalCoordinator == null) {
return;
}
try {
internalCoordinator.resetToCheckpoint(checkpointId, checkpointData);
// Start the new coordinator if this coordinator has been started before reset to
// the checkpoint.
if (started) {
internalCoordinator.start();
}
} catch (Exception e) {
LOG.error("Failed to reset the coordinator to checkpoint and start.", e);
cleanAndFailJob(e);
}
}
private void cleanAndFailJob(Throwable t) {
// Don't repeatedly fail the job.
if (!failed) {
failed = true;
internalQuiesceableContext.getContext().failJob(t);
pendingCalls.clear();
}
}
}
private static
|
DeferrableCoordinator
|
java
|
elastic__elasticsearch
|
modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DataStreamsStatsResponseTests.java
|
{
"start": 1001,
"end": 3802
}
|
class ____ extends AbstractWireSerializingTestCase<DataStreamsStatsAction.Response> {
@Override
protected Writeable.Reader<DataStreamsStatsAction.Response> instanceReader() {
return DataStreamsStatsAction.Response::new;
}
@Override
protected DataStreamsStatsAction.Response createTestInstance() {
return randomStatsResponse();
}
@Override
protected DataStreamsStatsAction.Response mutateInstance(DataStreamsStatsAction.Response instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
public static DataStreamsStatsAction.Response randomStatsResponse() {
int dataStreamCount = randomInt(10);
int backingIndicesTotal = 0;
long totalStoreSize = 0L;
ArrayList<DataStreamsStatsAction.DataStreamStats> dataStreamStats = new ArrayList<>();
for (int i = 0; i < dataStreamCount; i++) {
String dataStreamName = randomAlphaOfLength(8).toLowerCase(Locale.getDefault());
int backingIndices = randomInt(5);
backingIndicesTotal += backingIndices;
long storeSize = randomLongBetween(250, 1000000000);
totalStoreSize += storeSize;
long maximumTimestamp = randomRecentTimestamp();
dataStreamStats.add(
new DataStreamsStatsAction.DataStreamStats(
dataStreamName,
backingIndices,
ByteSizeValue.ofBytes(storeSize),
maximumTimestamp
)
);
}
int totalShards = randomIntBetween(backingIndicesTotal, backingIndicesTotal * 3);
int successfulShards = randomInt(totalShards);
int failedShards = totalShards - successfulShards;
List<DefaultShardOperationFailedException> exceptions = new ArrayList<>();
for (int i = 0; i < failedShards; i++) {
exceptions.add(
new DefaultShardOperationFailedException(
randomAlphaOfLength(8).toLowerCase(Locale.getDefault()),
randomInt(totalShards),
new ElasticsearchException("boom")
)
);
}
return new DataStreamsStatsAction.Response(
totalShards,
successfulShards,
failedShards,
exceptions,
dataStreamCount,
backingIndicesTotal,
ByteSizeValue.ofBytes(totalStoreSize),
dataStreamStats.toArray(DataStreamsStatsAction.DataStreamStats[]::new)
);
}
private static long randomRecentTimestamp() {
long base = System.currentTimeMillis();
return randomLongBetween(base - TimeUnit.HOURS.toMillis(1), base);
}
}
|
DataStreamsStatsResponseTests
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
|
{
"start": 12072,
"end": 41649
}
|
class ____ {
ErrorType error = ErrorType.NONE;
private int badNodeIndex = -1;
private boolean waitForRestart = true;
private int restartingNodeIndex = -1;
private long restartingNodeDeadline = 0;
private final long datanodeRestartTimeout;
ErrorState(long datanodeRestartTimeout) {
this.datanodeRestartTimeout = datanodeRestartTimeout;
}
synchronized void resetInternalError() {
if (hasInternalError()) {
error = ErrorType.NONE;
}
badNodeIndex = -1;
restartingNodeIndex = -1;
restartingNodeDeadline = 0;
waitForRestart = true;
}
synchronized void reset() {
error = ErrorType.NONE;
badNodeIndex = -1;
restartingNodeIndex = -1;
restartingNodeDeadline = 0;
waitForRestart = true;
}
synchronized boolean hasInternalError() {
return error == ErrorType.INTERNAL;
}
synchronized boolean hasExternalError() {
return error == ErrorType.EXTERNAL;
}
synchronized boolean hasError() {
return error != ErrorType.NONE;
}
synchronized boolean hasDatanodeError() {
return error == ErrorType.INTERNAL && isNodeMarked();
}
synchronized void setInternalError() {
this.error = ErrorType.INTERNAL;
}
synchronized void setExternalError() {
if (!hasInternalError()) {
this.error = ErrorType.EXTERNAL;
}
}
synchronized void setBadNodeIndex(int index) {
this.badNodeIndex = index;
}
synchronized int getBadNodeIndex() {
return badNodeIndex;
}
synchronized int getRestartingNodeIndex() {
return restartingNodeIndex;
}
synchronized void initRestartingNode(int i, String message,
boolean shouldWait) {
restartingNodeIndex = i;
if (shouldWait) {
restartingNodeDeadline = Time.monotonicNow() + datanodeRestartTimeout;
// If the data streamer has already set the primary node
// bad, clear it. It is likely that the write failed due to
// the DN shutdown. Even if it was a real failure, the pipeline
// recovery will take care of it.
badNodeIndex = -1;
} else {
this.waitForRestart = false;
}
LOG.info(message);
}
synchronized boolean isRestartingNode() {
return restartingNodeIndex >= 0;
}
synchronized boolean isNodeMarked() {
return badNodeIndex >= 0 || (isRestartingNode() && doWaitForRestart());
}
/**
* This method is used when no explicit error report was received, but
* something failed. The first node is a suspect or unsure about the cause
* so that it is marked as failed.
*/
synchronized void markFirstNodeIfNotMarked() {
// There should be no existing error and no ongoing restart.
if (!isNodeMarked()) {
badNodeIndex = 0;
}
}
synchronized void adjustState4RestartingNode() {
// Just took care of a node error while waiting for a node restart
if (restartingNodeIndex >= 0) {
// If the error came from a node further away than the restarting
// node, the restart must have been complete.
if (badNodeIndex > restartingNodeIndex) {
restartingNodeIndex = -1;
} else if (badNodeIndex < restartingNodeIndex) {
// the node index has shifted.
restartingNodeIndex--;
} else if (waitForRestart) {
throw new IllegalStateException("badNodeIndex = " + badNodeIndex
+ " = restartingNodeIndex = " + restartingNodeIndex);
}
}
if (!isRestartingNode()) {
error = ErrorType.NONE;
}
badNodeIndex = -1;
}
synchronized void checkRestartingNodeDeadline(DatanodeInfo[] nodes) {
if (restartingNodeIndex >= 0) {
if (error == ErrorType.NONE) {
throw new IllegalStateException("error=false while checking" +
" restarting node deadline");
}
// check badNodeIndex
if (badNodeIndex == restartingNodeIndex) {
// ignore, if came from the restarting node
badNodeIndex = -1;
}
// not within the deadline
if (Time.monotonicNow() >= restartingNodeDeadline) {
// expired. declare the restarting node dead
restartingNodeDeadline = 0;
final int i = restartingNodeIndex;
restartingNodeIndex = -1;
LOG.warn("Datanode " + i + " did not restart within "
+ datanodeRestartTimeout + "ms: " + nodes[i]);
// Mark the restarting node as failed. If there is any other failed
// node during the last pipeline construction attempt, it will not be
// overwritten/dropped. In this case, the restarting node will get
// excluded in the following attempt, if it still does not come up.
if (badNodeIndex == -1) {
badNodeIndex = i;
}
}
}
}
boolean doWaitForRestart() {
return waitForRestart;
}
}
private volatile boolean streamerClosed = false;
protected final BlockToWrite block; // its length is number of bytes acked
protected Token<BlockTokenIdentifier> accessToken;
private DataOutputStream blockStream;
private DataInputStream blockReplyStream;
private ResponseProcessor response = null;
private final Object nodesLock = new Object();
private volatile DatanodeInfo[] nodes = null; // list of targets for current block
private volatile StorageType[] storageTypes = null;
private volatile String[] storageIDs = null;
private final ErrorState errorState;
private volatile BlockConstructionStage stage; // block construction stage
protected long bytesSent = 0; // number of bytes that've been sent
private final boolean isLazyPersistFile;
private long lastPacket;
/** Nodes have been used in the pipeline before and have failed. */
private final List<DatanodeInfo> failed = new ArrayList<>();
/** Restarting Nodes */
private List<DatanodeInfo> restartingNodes = new ArrayList<>();
/** The times have retried to recover pipeline, for the same packet. */
private volatile int pipelineRecoveryCount = 0;
/** Has the current block been hflushed? */
private boolean isHflushed = false;
/** Append on an existing block? */
private final boolean isAppend;
private long currentSeqno = 0;
private long lastQueuedSeqno = -1;
private long lastAckedSeqno = -1;
private long bytesCurBlock = 0; // bytes written in current block
private final LastExceptionInStreamer lastException = new LastExceptionInStreamer();
private Socket s;
protected final DFSClient dfsClient;
protected final String src;
/** Only for DataTransferProtocol.writeBlock(..) */
final DataChecksum checksum4WriteBlock;
final Progressable progress;
protected final HdfsFileStatus stat;
// appending to existing partial block
private volatile boolean appendChunk = false;
// both dataQueue and ackQueue are protected by dataQueue lock
protected final LinkedList<DFSPacket> dataQueue = new LinkedList<>();
private final Map<Long, Long> packetSendTime = new HashMap<>();
private final LinkedList<DFSPacket> ackQueue = new LinkedList<>();
private final AtomicReference<CachingStrategy> cachingStrategy;
private final ByteArrayManager byteArrayManager;
//persist blocks on namenode
private final AtomicBoolean persistBlocks = new AtomicBoolean(false);
private boolean failPacket = false;
private final long dfsclientSlowLogThresholdMs;
private long artificialSlowdown = 0;
// List of congested data nodes. The stream will back off if the DataNodes
// are congested
private final List<DatanodeInfo> congestedNodes = new ArrayList<>();
private final Map<DatanodeInfo, Integer> slowNodeMap = new HashMap<>();
private int congestionBackOffMeanTimeInMs;
private int congestionBackOffMaxTimeInMs;
private int lastCongestionBackoffTime;
private int maxPipelineRecoveryRetries;
private int markSlowNodeAsBadNodeThreshold;
protected final LoadingCache<DatanodeInfo, DatanodeInfo> excludedNodes;
private final String[] favoredNodes;
private final EnumSet<AddBlockFlag> addBlockFlags;
private DataStreamer(HdfsFileStatus stat, ExtendedBlock block,
DFSClient dfsClient, String src,
Progressable progress, DataChecksum checksum,
AtomicReference<CachingStrategy> cachingStrategy,
ByteArrayManager byteArrayManage,
boolean isAppend, String[] favoredNodes,
EnumSet<AddBlockFlag> flags) {
this.block = new BlockToWrite(block);
this.dfsClient = dfsClient;
this.src = src;
this.progress = progress;
this.stat = stat;
this.checksum4WriteBlock = checksum;
this.cachingStrategy = cachingStrategy;
this.byteArrayManager = byteArrayManage;
this.isLazyPersistFile = isLazyPersist(stat);
this.isAppend = isAppend;
this.favoredNodes = favoredNodes;
final DfsClientConf conf = dfsClient.getConf();
this.dfsclientSlowLogThresholdMs = conf.getSlowIoWarningThresholdMs();
this.excludedNodes = initExcludedNodes(conf.getExcludedNodesCacheExpiry());
this.errorState = new ErrorState(conf.getDatanodeRestartTimeout());
this.addBlockFlags = flags;
this.maxPipelineRecoveryRetries = conf.getMaxPipelineRecoveryRetries();
this.markSlowNodeAsBadNodeThreshold = conf.getMarkSlowNodeAsBadNodeThreshold();
congestionBackOffMeanTimeInMs = dfsClient.getConfiguration().getInt(
HdfsClientConfigKeys.DFS_CLIENT_CONGESTION_BACKOFF_MEAN_TIME,
HdfsClientConfigKeys.DFS_CLIENT_CONGESTION_BACKOFF_MEAN_TIME_DEFAULT);
congestionBackOffMaxTimeInMs = dfsClient.getConfiguration().getInt(
HdfsClientConfigKeys.DFS_CLIENT_CONGESTION_BACKOFF_MAX_TIME,
HdfsClientConfigKeys.DFS_CLIENT_CONGESTION_BACKOFF_MAX_TIME_DEFAULT);
if (congestionBackOffMeanTimeInMs <= 0) {
LOG.warn("Configuration: {} is not appropriate, using default value: {}",
HdfsClientConfigKeys.DFS_CLIENT_CONGESTION_BACKOFF_MEAN_TIME,
HdfsClientConfigKeys.DFS_CLIENT_CONGESTION_BACKOFF_MEAN_TIME_DEFAULT);
}
if (congestionBackOffMaxTimeInMs <= 0) {
LOG.warn("Configuration: {} is not appropriate, using default value: {}",
HdfsClientConfigKeys.DFS_CLIENT_CONGESTION_BACKOFF_MAX_TIME,
HdfsClientConfigKeys.DFS_CLIENT_CONGESTION_BACKOFF_MAX_TIME_DEFAULT);
}
if (congestionBackOffMaxTimeInMs < congestionBackOffMeanTimeInMs) {
LOG.warn("Configuration: {} can not less than {}, using their default values.",
HdfsClientConfigKeys.DFS_CLIENT_CONGESTION_BACKOFF_MAX_TIME,
HdfsClientConfigKeys.DFS_CLIENT_CONGESTION_BACKOFF_MEAN_TIME);
}
if (congestionBackOffMeanTimeInMs <= 0 || congestionBackOffMaxTimeInMs <= 0 ||
congestionBackOffMaxTimeInMs < congestionBackOffMeanTimeInMs) {
congestionBackOffMeanTimeInMs =
HdfsClientConfigKeys.DFS_CLIENT_CONGESTION_BACKOFF_MEAN_TIME_DEFAULT;
congestionBackOffMaxTimeInMs =
HdfsClientConfigKeys.DFS_CLIENT_CONGESTION_BACKOFF_MAX_TIME_DEFAULT;
}
}
/**
* construction with tracing info
*/
DataStreamer(HdfsFileStatus stat, ExtendedBlock block, DFSClient dfsClient,
String src, Progressable progress, DataChecksum checksum,
AtomicReference<CachingStrategy> cachingStrategy,
ByteArrayManager byteArrayManage, String[] favoredNodes,
EnumSet<AddBlockFlag> flags) {
this(stat, block, dfsClient, src, progress, checksum, cachingStrategy,
byteArrayManage, false, favoredNodes, flags);
stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
}
/**
* Construct a data streamer for appending to the last partial block
* @param lastBlock last block of the file to be appended
* @param stat status of the file to be appended
*/
DataStreamer(LocatedBlock lastBlock, HdfsFileStatus stat, DFSClient dfsClient,
String src, Progressable progress, DataChecksum checksum,
AtomicReference<CachingStrategy> cachingStrategy,
ByteArrayManager byteArrayManage) {
this(stat, lastBlock.getBlock(), dfsClient, src, progress, checksum, cachingStrategy,
byteArrayManage, true, null, null);
stage = BlockConstructionStage.PIPELINE_SETUP_APPEND;
bytesSent = block.getNumBytes();
accessToken = lastBlock.getBlockToken();
}
/**
* Set pipeline in construction
*
* @param lastBlock the last block of a file
* @throws IOException
*/
void setPipelineInConstruction(LocatedBlock lastBlock) throws IOException{
// setup pipeline to append to the last block XXX retries??
setPipeline(lastBlock);
if (nodes.length < 1) {
throw new IOException("Unable to retrieve blocks locations " +
" for last block " + block + " of file " + src);
}
}
void setAccessToken(Token<BlockTokenIdentifier> t) {
this.accessToken = t;
}
protected void setPipeline(LocatedBlock lb) {
setPipeline(lb.getLocations(), lb.getStorageTypes(), lb.getStorageIDs());
}
protected void setPipeline(DatanodeInfo[] newNodes, StorageType[] newStorageTypes,
String[] newStorageIDs) {
synchronized (nodesLock) {
this.nodes = newNodes;
}
this.storageTypes = newStorageTypes;
this.storageIDs = newStorageIDs;
}
/**
* Initialize for data streaming
*/
private void initDataStreaming() {
this.setName("DataStreamer for file " + src +
" block " + block);
if (LOG.isDebugEnabled()) {
LOG.debug("nodes {} storageTypes {} storageIDs {}",
Arrays.toString(nodes),
Arrays.toString(storageTypes),
Arrays.toString(storageIDs));
}
response = new ResponseProcessor(nodes);
response.start();
stage = BlockConstructionStage.DATA_STREAMING;
lastPacket = Time.monotonicNow();
}
protected void endBlock() {
LOG.debug("Closing old block {}", block);
this.setName("DataStreamer for file " + src);
closeResponder();
closeStream();
setPipeline(null, null, null);
stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
}
private boolean shouldStop() {
return streamerClosed || errorState.hasError() || !dfsClient.clientRunning;
}
/*
* streamer thread is the only thread that opens streams to datanode,
* and closes them. Any error recovery is also done by this thread.
*/
@Override
public void work() {
TraceScope scope = null;
while (!streamerClosed && dfsClient.clientRunning) {
// if the Responder encountered an error, shutdown Responder
if (errorState.hasError()) {
closeResponder();
}
DFSPacket one;
try {
// process datanode IO errors if any
boolean doSleep = processDatanodeOrExternalError();
synchronized (dataQueue) {
// wait for a packet to be sent.
while ((!shouldStop() && dataQueue.isEmpty()) || doSleep) {
long timeout = 1000;
if (stage == BlockConstructionStage.DATA_STREAMING) {
timeout = sendHeartbeat();
}
try {
dataQueue.wait(timeout);
} catch (InterruptedException e) {
LOG.debug("Thread interrupted", e);
}
doSleep = false;
}
if (shouldStop()) {
continue;
}
// get packet to be sent.
one = dataQueue.getFirst(); // regular data packet
SpanContext[] parents = one.getTraceParents();
if (parents != null && parents.length > 0) {
// The original code stored multiple parents in the DFSPacket, and
// use them ALL here when creating a new Span. We only use the
// last one FOR NOW. Moreover, we don't activate the Span for now.
scope = dfsClient.getTracer().
newScope("dataStreamer", parents[0], false);
//scope.getSpan().setParents(parents);
}
}
// The DataStreamer has to release the dataQueue before sleeping,
// otherwise it will cause the ResponseProcessor to accept the ACK delay.
try {
backOffIfNecessary();
} catch (InterruptedException e) {
LOG.debug("Thread interrupted", e);
}
// get new block from namenode.
LOG.debug("stage={}, {}", stage, this);
if (stage == BlockConstructionStage.PIPELINE_SETUP_CREATE) {
LOG.debug("Allocating new block: {}", this);
setupPipelineForCreate();
initDataStreaming();
} else if (stage == BlockConstructionStage.PIPELINE_SETUP_APPEND) {
LOG.debug("Append to block {}", block);
setupPipelineForAppendOrRecovery();
if (streamerClosed) {
continue;
}
initDataStreaming();
}
long lastByteOffsetInBlock = one.getLastByteOffsetBlock();
if (lastByteOffsetInBlock > stat.getBlockSize()) {
throw new IOException("BlockSize " + stat.getBlockSize() +
" < lastByteOffsetInBlock, " + this + ", " + one);
}
if (one.isLastPacketInBlock()) {
// wait for all data packets have been successfully acked
waitForAllAcks();
if(shouldStop()) {
continue;
}
stage = BlockConstructionStage.PIPELINE_CLOSE;
}
// send the packet
SpanContext spanContext = null;
synchronized (dataQueue) {
// move packet from dataQueue to ackQueue
if (!one.isHeartbeatPacket()) {
if (scope != null) {
one.setSpan(scope.span());
spanContext = scope.span().getContext();
scope.close();
}
scope = null;
dataQueue.removeFirst();
ackQueue.addLast(one);
packetSendTime.put(one.getSeqno(), Time.monotonicNowNanos());
dataQueue.notifyAll();
}
}
LOG.debug("{} sending {}", this, one);
// write out data to remote datanode
try (TraceScope ignored = dfsClient.getTracer().
newScope("DataStreamer#writeTo", spanContext)) {
sendPacket(one);
} catch (IOException e) {
// HDFS-3398 treat primary DN is down since client is unable to
// write to primary DN. If a failed or restarting node has already
// been recorded by the responder, the following call will have no
// effect. Pipeline recovery can handle only one node error at a
// time. If the primary node fails again during the recovery, it
// will be taken out then.
errorState.markFirstNodeIfNotMarked();
throw e;
}
// update bytesSent
long tmpBytesSent = one.getLastByteOffsetBlock();
if (bytesSent < tmpBytesSent) {
bytesSent = tmpBytesSent;
}
if (shouldStop()) {
continue;
}
// Is this block full?
if (one.isLastPacketInBlock()) {
// wait for the close packet has been acked
try {
waitForAllAcks();
} catch (IOException ioe) {
// No need to do a close recovery if the last packet was acked.
// i.e. ackQueue is empty. waitForAllAcks() can get an exception
// (e.g. connection reset) while sending a heartbeat packet,
// if the DN sends the final ack and closes the connection.
synchronized (dataQueue) {
if (!ackQueue.isEmpty()) {
throw ioe;
}
}
}
if (shouldStop()) {
continue;
}
endBlock();
}
if (progress != null) { progress.progress(); }
// This is used by unit test to trigger race conditions.
if (artificialSlowdown != 0 && dfsClient.clientRunning) {
Thread.sleep(artificialSlowdown);
}
} catch (Throwable e) {
// Log warning if there was a real error.
if (!errorState.isRestartingNode()) {
// Since their messages are descriptive enough, do not always
// log a verbose stack-trace WARN for quota exceptions.
if (e instanceof QuotaExceededException) {
LOG.debug("DataStreamer Quota Exception", e);
} else {
LOG.warn("DataStreamer Exception", e);
}
}
lastException.set(e);
assert !(e instanceof NullPointerException);
errorState.setInternalError();
if (!errorState.isNodeMarked()) {
// Not a datanode issue
streamerClosed = true;
}
} finally {
if (scope != null) {
scope.close();
scope = null;
}
}
}
closeInternal();
}
private void waitForAllAcks() throws IOException {
// wait until all data packets have been successfully acked
synchronized (dataQueue) {
while (!shouldStop() && !ackQueue.isEmpty()) {
try {
// wait for acks to arrive from datanodes
dataQueue.wait(sendHeartbeat());
} catch (InterruptedException e) {
LOG.debug("Thread interrupted ", e);
}
}
}
}
private void sendPacket(DFSPacket packet) throws IOException {
// write out data to remote datanode
try {
packet.writeTo(blockStream);
blockStream.flush();
} catch (IOException e) {
// HDFS-3398 treat primary DN is down since client is unable to
// write to primary DN. If a failed or restarting node has already
// been recorded by the responder, the following call will have no
// effect. Pipeline recovery can handle only one node error at a
// time. If the primary node fails again during the recovery, it
// will be taken out then.
errorState.markFirstNodeIfNotMarked();
throw e;
}
lastPacket = Time.monotonicNow();
}
private long sendHeartbeat() throws IOException {
final long heartbeatInterval = dfsClient.getConf().getSocketTimeout()/2;
long timeout = heartbeatInterval - (Time.monotonicNow() - lastPacket);
if (timeout <= 0) {
sendPacket(createHeartbeatPacket());
timeout = heartbeatInterval;
}
return timeout;
}
private void closeInternal() {
closeResponder(); // close and join
closeStream();
streamerClosed = true;
release();
synchronized (dataQueue) {
dataQueue.notifyAll();
}
}
/**
* release the DFSPackets in the two queues
*
*/
void release() {
synchronized (dataQueue) {
releaseBuffer(dataQueue, byteArrayManager);
releaseBuffer(ackQueue, byteArrayManager);
}
}
/**
* wait for the ack of seqno
*
* @param seqno the sequence number to be acked
* @throws IOException
*/
void waitForAckedSeqno(long seqno) throws IOException {
try (TraceScope ignored = dfsClient.getTracer().
newScope("waitForAckedSeqno")) {
LOG.debug("{} waiting for ack for: {}", this, seqno);
int dnodes;
synchronized (nodesLock) {
dnodes = nodes != null ? nodes.length : 3;
}
int writeTimeout = dfsClient.getDatanodeWriteTimeout(dnodes);
long begin = Time.monotonicNowNanos();
try {
synchronized (dataQueue) {
while (!streamerClosed) {
checkClosed();
if (lastAckedSeqno >= seqno) {
break;
}
try {
dataQueue.wait(1000); // when we receive an ack, we notify on
long duration = Time.monotonicNowNanos() - begin;
if (TimeUnit.NANOSECONDS.toMillis(duration) > writeTimeout) {
LOG.error("No ack received, took {}ms (threshold={}ms). "
+ "File being written: {}, block: {}, "
+ "Write pipeline datanodes: {}.",
TimeUnit.NANOSECONDS.toMillis(duration), writeTimeout, src, block, nodes);
throw new InterruptedIOException("No ack received after " +
TimeUnit.NANOSECONDS.toSeconds(duration) + "s and a timeout of " +
writeTimeout / 1000 + "s");
}
// dataQueue
} catch (InterruptedException ie) {
throw new InterruptedIOException(
"Interrupted while waiting for data to be acknowledged by pipeline");
}
}
}
checkClosed();
} catch (ClosedChannelException cce) {
LOG.debug("Closed channel exception", cce);
}
long duration = Time.monotonicNowNanos() - begin;
if (TimeUnit.NANOSECONDS.toMillis(duration) > dfsclientSlowLogThresholdMs) {
LOG.warn("Slow waitForAckedSeqno took {}ms (threshold={}ms). File being"
+ " written: {}, block: {}, Write pipeline datanodes: {}.",
TimeUnit.NANOSECONDS.toMillis(duration), dfsclientSlowLogThresholdMs,
src, block, nodes);
}
}
}
/**
* wait for space of dataQueue and queue the packet
*
* @param packet the DFSPacket to be queued
* @throws IOException
*/
void waitAndQueuePacket(DFSPacket packet) throws IOException {
synchronized (dataQueue) {
try {
// If queue is full, then wait till we have enough space
boolean firstWait = true;
try {
while (!streamerClosed && dataQueue.size() + ackQueue.size() >
dfsClient.getConf().getWriteMaxPackets()) {
if (firstWait) {
Span span = Tracer.getCurrentSpan();
if (span != null) {
span.addTimelineAnnotation("dataQueue.wait");
}
firstWait = false;
}
try {
dataQueue.wait();
} catch (InterruptedException e) {
// If we get interrupted while waiting to queue data, we still need to get rid
// of the current packet. This is because we have an invariant that if
// currentPacket gets full, it will get queued before the next writeChunk.
//
// Rather than wait around for space in the queue, we should instead try to
// return to the caller as soon as possible, even though we slightly overrun
// the MAX_PACKETS length.
Thread.currentThread().interrupt();
break;
}
}
} finally {
Span span = Tracer.getCurrentSpan();
if ((span != null) && (!firstWait)) {
span.addTimelineAnnotation("end.wait");
}
}
checkClosed();
queuePacket(packet);
} catch (ClosedChannelException cce) {
LOG.debug("Closed channel exception", cce);
}
}
}
/*
* close the streamer, should be called only by an external thread
* and only after all data to be sent has been flushed to datanode.
*
* Interrupt this data streamer if force is true
*
* @param force if this data stream is forced to be closed
*/
void close(boolean force) {
streamerClosed = true;
synchronized (dataQueue) {
dataQueue.notifyAll();
}
if (force) {
this.interrupt();
}
}
void setStreamerAsClosed() {
streamerClosed = true;
}
private void checkClosed() throws IOException {
if (streamerClosed) {
lastException.throwException4Close();
}
}
private void closeResponder() {
if (response != null) {
try {
response.close();
response.join();
} catch (InterruptedException e) {
LOG.debug("Thread interrupted", e);
Thread.currentThread().interrupt();
} finally {
response = null;
}
}
}
void closeStream() {
final MultipleIOException.Builder b = new MultipleIOException.Builder();
if (blockStream != null) {
try {
blockStream.close();
} catch (IOException e) {
b.add(e);
} finally {
blockStream = null;
}
}
if (blockReplyStream != null) {
try {
blockReplyStream.close();
} catch (IOException e) {
b.add(e);
} finally {
blockReplyStream = null;
}
}
if (null != s) {
try {
s.close();
} catch (IOException e) {
b.add(e);
} finally {
s = null;
}
}
final IOException ioe = b.build();
if (ioe != null) {
lastException.set(ioe);
}
}
/**
* Examine whether it is worth waiting for a node to restart.
* @param index the node index
*/
boolean shouldWaitForRestart(int index) {
// Only one node in the pipeline.
if (nodes.length == 1) {
return true;
}
/*
* Treat all nodes as remote for test when skip enabled.
*/
if (DFSClientFaultInjector.get().skipRollingRestartWait()) {
return false;
}
// Is it a local node?
InetAddress addr = null;
try {
addr = InetAddress.getByName(nodes[index].getIpAddr());
} catch (java.net.UnknownHostException e) {
// we are passing an ip address. this should not happen.
assert false;
}
return addr != null && NetUtils.isLocalAddress(addr);
}
//
// Processes responses from the datanodes. A packet is removed
// from the ackQueue when its response arrives.
//
private
|
ErrorState
|
java
|
micronaut-projects__micronaut-core
|
http-netty/src/main/java/io/micronaut/http/netty/channel/loom/LoomBranchSupport.java
|
{
"start": 7863,
"end": 8069
}
|
class ____ implements Runnable {
VirtualThreadSchedulerProxy proxy;
@Override
public void run() {
throw new UnsupportedOperationException();
}
}
|
UnwrapClass
|
java
|
mockito__mockito
|
mockito-core/src/main/java/org/mockito/MockitoSession.java
|
{
"start": 1959,
"end": 4515
}
|
class ____ {
* @Mock Foo foo;
*
* //Keeping session object in a field so that we can complete session in 'tear down' method.
* //It is recommended to hide the session object, along with 'setup' and 'tear down' methods in a base class / runner.
* //Keep in mind that you can use Mockito's JUnit runner or rule instead of MockitoSession and get the same behavior.
* MockitoSession mockito;
*
* @Before public void setup() {
* //initialize session to start mocking
* mockito = Mockito.mockitoSession()
* .initMocks(this)
* .strictness(Strictness.STRICT_STUBS)
* .startMocking();
* }
*
* @After public void tearDown() {
* //It is necessary to finish the session so that Mockito
* // can detect incorrect stubbing and validate Mockito usage
* //'finishMocking()' is intended to be used in your test framework's 'tear down' method.
* mockito.finishMocking();
* }
*
* // test methods ...
* }
* </code></pre>
*
* <p>
* Why to use {@code MockitoSession}?
* What's the difference between {@code MockitoSession}, {@link MockitoJUnitRunner}, {@link MockitoRule}
* and traditional {@link MockitoAnnotations#openMocks(Object)}?
* <p>
* Great questions!
* There is no need to use {@code MockitoSession} if you already use {@link MockitoJUnitRunner} or {@link MockitoRule}.
* If you are JUnit user who does not leverage Mockito rule or runner we strongly recommend to do so.
* Both the runner and the rule support strict stubbing which can really help driving cleaner tests.
* See {@link StrictStubs MockitoJUnitRunner.StrictStubs} and {@link MockitoRule#strictness(Strictness)}.
* If you cannot use Mockito's JUnit support {@code MockitoSession} exactly is for you!
* You can automatically take advantage of strict stubbing ({@link Strictness}),
* automatic initialization of annotated mocks ({@link MockitoAnnotations}),
* and extra validation ({@link Mockito#validateMockitoUsage()}).
* If you use Mockito annotations with {@link MockitoAnnotations#openMocks(Object)}
* but not Mockito runner/rule please try out Mockito's JUnit support (runner or rule) or
* start using {@code MockitoSession}. You'll get cleaner tests and better productivity.
* <p>
* Mockito team would really appreciate feedback about {@code MockitoSession} API.
* Help us out by commenting at <a href="https://github.com/mockito/mockito/issues/857">issue 857</a>.
*
* @since 2.7.0
*/
@NotExtensible
public
|
ExampleTest
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/api/RTransferQueueReactive.java
|
{
"start": 1609,
"end": 2488
}
|
class ____ the specified element
* prevents it from being added to this queue
* @throws NullPointerException if the specified element is null
* @throws IllegalArgumentException if some property of the specified
* element prevents it from being added to this queue
*/
Mono<Void> transfer(V e);
/**
* Transfers the element to waiting consumer
* which invoked {@link #take} or {@link #poll} method
* at the moment of transfer.
* Waits up to defined <code>timeout</code> if necessary for a consumer.
*
* @param e the element to transfer
* @param timeout the maximum time to wait
* @param unit the time unit
* @return <code>true</code> if the element was transferred and <code>false</code>
* otherwise
*/
Mono<Boolean> tryTransfer(V e, long timeout, TimeUnit unit);
}
|
of
|
java
|
quarkusio__quarkus
|
core/deployment/src/test/java/io/quarkus/deployment/runnerjar/ExcludedArtifactsTest.java
|
{
"start": 569,
"end": 3781
}
|
class ____ extends BootstrapFromOriginalJarTestBase {
@Override
protected TsArtifact composeApplication() {
final TsArtifact extADep = TsArtifact.jar("ext-a-dep");
// excluded in the extension descriptor addToExpectedLib(extADep);
final TsArtifact depC = TsArtifact.jar("dep-c");
addToExpectedLib(depC);
extADep.addDependency(depC);
final TsArtifact depE = TsArtifact.jar("org.banned", "dep-e", "1");
depC.addDependency(depE);
final TsArtifact depG = TsArtifact.jar("dep-g");
depE.addDependency(depG);
addToExpectedLib(depG);
final TsArtifact extADeploymentDep = TsArtifact.jar("ext-a-deployment-dep");
final TsQuarkusExt extA = new TsQuarkusExt("ext-a");
addToExpectedLib(extA.getRuntime());
extA.getRuntime().addDependency(extADep);
extA.getDeployment().addDependency(extADeploymentDep);
extA.getDescriptor().set("excluded-artifacts",
extADep.getKey().toString() + ",org.banned*");
final TsArtifact depB = TsArtifact.jar("dep-b");
addToExpectedLib(depB);
final TsArtifact depD = TsArtifact.jar("org.banned.too", "dep-d", "1");
depB.addDependency(depD);
final TsArtifact depF = TsArtifact.jar("org.banned", "dep-f", "1");
depB.addDependency(depF);
return TsArtifact.jar("app")
.addManagedDependency(platformDescriptor())
.addManagedDependency(platformProperties())
.addDependency(extA)
.addDependency(new TsDependency(depB));
}
@Override
protected void assertAppModel(ApplicationModel model) throws Exception {
Set<Dependency> expected = new HashSet<>();
expected.add(new ArtifactDependency(ArtifactCoords.jar("io.quarkus.bootstrap.test", "ext-a-deployment", "1"),
DependencyFlags.DEPLOYMENT_CP));
expected.add(new ArtifactDependency(ArtifactCoords.jar("io.quarkus.bootstrap.test", "ext-a-deployment-dep", "1"),
DependencyFlags.DEPLOYMENT_CP));
assertEquals(expected, getDeploymentOnlyDeps(model));
expected = new HashSet<>();
expected.add(new ArtifactDependency(ArtifactCoords.jar("io.quarkus.bootstrap.test", "ext-a", "1"),
DependencyFlags.RUNTIME_CP, DependencyFlags.DEPLOYMENT_CP, DependencyFlags.RUNTIME_EXTENSION_ARTIFACT,
DependencyFlags.DIRECT, DependencyFlags.TOP_LEVEL_RUNTIME_EXTENSION_ARTIFACT));
expected.add(new ArtifactDependency(ArtifactCoords.jar("io.quarkus.bootstrap.test", "dep-c", "1"),
DependencyFlags.RUNTIME_CP, DependencyFlags.DEPLOYMENT_CP));
expected.add(new ArtifactDependency(ArtifactCoords.jar("io.quarkus.bootstrap.test", "dep-b", "1"),
DependencyFlags.RUNTIME_CP, DependencyFlags.DEPLOYMENT_CP, DependencyFlags.DIRECT));
expected.add(new ArtifactDependency(ArtifactCoords.jar("io.quarkus.bootstrap.test", "dep-g", "1"),
DependencyFlags.RUNTIME_CP, DependencyFlags.DEPLOYMENT_CP));
assertEquals(expected, getDependenciesWithFlag(model, DependencyFlags.RUNTIME_CP));
}
}
|
ExcludedArtifactsTest
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/client/internal/Requests.java
|
{
"start": 616,
"end": 905
}
|
class ____ {
/**
* The default content type to use to generate source documents when indexing.
* TODO: remove this, we shouldn't have mutable public static fields that we use in prod code
*/
public static XContentType INDEX_CONTENT_TYPE = XContentType.JSON;
}
|
Requests
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java
|
{
"start": 1015,
"end": 2780
}
|
class ____ {
static final String POOL_A = "blockpool-a";
static final String POOL_B = "blockpool-b";
static final Block BLOCK_1_GS1 = new Block(1L, 100L, 1L);
static final Block BLOCK_1_GS2 = new Block(1L, 100L, 2L);
static final Block BLOCK_2_GS1 = new Block(2L, 100L, 1L);
@Test
public void testEquals() {
// Same block -> equal
assertEquals(
new ExtendedBlock(POOL_A, BLOCK_1_GS1),
new ExtendedBlock(POOL_A, BLOCK_1_GS1));
// Different pools, same block id -> not equal
assertNotEquals(
new ExtendedBlock(POOL_A, BLOCK_1_GS1),
new ExtendedBlock(POOL_B, BLOCK_1_GS1));
// Same pool, different block id -> not equal
assertNotEquals(
new ExtendedBlock(POOL_A, BLOCK_1_GS1),
new ExtendedBlock(POOL_A, BLOCK_2_GS1));
// Same block, different genstamps -> equal
assertEquals(
new ExtendedBlock(POOL_A, BLOCK_1_GS1),
new ExtendedBlock(POOL_A, BLOCK_1_GS2));
}
@Test
public void testHashcode() {
// Different pools, same block id -> different hashcode
assertNotEquals(
new ExtendedBlock(POOL_A, BLOCK_1_GS1).hashCode(),
new ExtendedBlock(POOL_B, BLOCK_1_GS1).hashCode());
// Same pool, different block id -> different hashcode
assertNotEquals(
new ExtendedBlock(POOL_A, BLOCK_1_GS1).hashCode(),
new ExtendedBlock(POOL_A, BLOCK_2_GS1).hashCode());
// Same block -> same hashcode
assertEquals(
new ExtendedBlock(POOL_A, BLOCK_1_GS1).hashCode(),
new ExtendedBlock(POOL_A, BLOCK_1_GS1).hashCode());
}
private static void assertNotEquals(Object a, Object b) {
assertFalse(a.equals(b), "expected not equal: '" + a + "' and '" + b + "'");
}
}
|
TestExtendedBlock
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestCompatibilityChecker.java
|
{
"start": 542,
"end": 1347
}
|
class ____ {
private RestCompatibilityChecker() {}
public static <T> void checkAndSetDeprecatedParam(
String deprecatedParam,
String newParam,
RestApiVersion compatVersion,
RestRequest restRequest,
BiFunction<RestRequest, String, T> extractor,
Consumer<T> setter
) {
final T paramValue;
if (restRequest.getRestApiVersion() == compatVersion && restRequest.hasParam(deprecatedParam)) {
LoggingDeprecationHandler.INSTANCE.logRenamedField(null, () -> null, deprecatedParam, newParam, true);
paramValue = extractor.apply(restRequest, deprecatedParam);
} else {
paramValue = extractor.apply(restRequest, newParam);
}
setter.accept(paramValue);
}
}
|
RestCompatibilityChecker
|
java
|
quarkusio__quarkus
|
extensions/reactive-mysql-client/deployment/src/test/java/io/quarkus/reactive/mysql/client/LocalhostMySQLPoolCreator.java
|
{
"start": 128,
"end": 404
}
|
class ____ implements MySQLPoolCreator {
@Override
public Pool create(Input input) {
return Pool.pool(input.vertx(), input.mySQLConnectOptionsList().get(0).setHost("localhost").setPort(3308),
input.poolOptions());
}
}
|
LocalhostMySQLPoolCreator
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
|
{
"start": 132804,
"end": 132902
}
|
class ____ that will be used to load the various objects.
*
* @param classLoader the new
|
loader
|
java
|
apache__camel
|
components/camel-salesforce/camel-salesforce-component/src/main/java/org/apache/camel/component/salesforce/api/dto/bulk/BatchStateEnum.java
|
{
"start": 1606,
"end": 2325
}
|
enum ____ {
@XmlEnumValue("Queued")
QUEUED("Queued"),
@XmlEnumValue("InProgress")
IN_PROGRESS("InProgress"),
@XmlEnumValue("Completed")
COMPLETED("Completed"),
@XmlEnumValue("Failed")
FAILED("Failed"),
@XmlEnumValue("NotProcessed")
NOT_PROCESSED("NotProcessed");
private final String value;
BatchStateEnum(String v) {
value = v;
}
public String value() {
return value;
}
public static BatchStateEnum fromValue(String v) {
for (BatchStateEnum c : BatchStateEnum.values()) {
if (c.value.equals(v)) {
return c;
}
}
throw new IllegalArgumentException(v);
}
}
|
BatchStateEnum
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/observable/ObservableJoin.java
|
{
"start": 2791,
"end": 11789
}
|
class ____<TLeft, TRight, TLeftEnd, TRightEnd, R>
extends AtomicInteger implements Disposable, JoinSupport {
private static final long serialVersionUID = -6071216598687999801L;
final Observer<? super R> downstream;
final SpscLinkedArrayQueue<Object> queue;
final CompositeDisposable disposables;
final Map<Integer, TLeft> lefts;
final Map<Integer, TRight> rights;
final AtomicReference<Throwable> error;
final Function<? super TLeft, ? extends ObservableSource<TLeftEnd>> leftEnd;
final Function<? super TRight, ? extends ObservableSource<TRightEnd>> rightEnd;
final BiFunction<? super TLeft, ? super TRight, ? extends R> resultSelector;
final AtomicInteger active;
int leftIndex;
int rightIndex;
volatile boolean cancelled;
static final Integer LEFT_VALUE = 1;
static final Integer RIGHT_VALUE = 2;
static final Integer LEFT_CLOSE = 3;
static final Integer RIGHT_CLOSE = 4;
JoinDisposable(Observer<? super R> actual,
Function<? super TLeft, ? extends ObservableSource<TLeftEnd>> leftEnd,
Function<? super TRight, ? extends ObservableSource<TRightEnd>> rightEnd,
BiFunction<? super TLeft, ? super TRight, ? extends R> resultSelector) {
this.downstream = actual;
this.disposables = new CompositeDisposable();
this.queue = new SpscLinkedArrayQueue<>(bufferSize());
this.lefts = new LinkedHashMap<>();
this.rights = new LinkedHashMap<>();
this.error = new AtomicReference<>();
this.leftEnd = leftEnd;
this.rightEnd = rightEnd;
this.resultSelector = resultSelector;
this.active = new AtomicInteger(2);
}
@Override
public void dispose() {
if (!cancelled) {
cancelled = true;
cancelAll();
if (getAndIncrement() == 0) {
queue.clear();
}
}
}
@Override
public boolean isDisposed() {
return cancelled;
}
void cancelAll() {
disposables.dispose();
}
void errorAll(Observer<?> a) {
Throwable ex = ExceptionHelper.terminate(error);
lefts.clear();
rights.clear();
a.onError(ex);
}
void fail(Throwable exc, Observer<?> a, SpscLinkedArrayQueue<?> q) {
Exceptions.throwIfFatal(exc);
ExceptionHelper.addThrowable(error, exc);
q.clear();
cancelAll();
errorAll(a);
}
void drain() {
if (getAndIncrement() != 0) {
return;
}
int missed = 1;
SpscLinkedArrayQueue<Object> q = queue;
Observer<? super R> a = downstream;
for (;;) {
for (;;) {
if (cancelled) {
q.clear();
return;
}
Throwable ex = error.get();
if (ex != null) {
q.clear();
cancelAll();
errorAll(a);
return;
}
boolean d = active.get() == 0;
Integer mode = (Integer)q.poll();
boolean empty = mode == null;
if (d && empty) {
lefts.clear();
rights.clear();
disposables.dispose();
a.onComplete();
return;
}
if (empty) {
break;
}
Object val = q.poll();
if (mode == LEFT_VALUE) {
@SuppressWarnings("unchecked")
TLeft left = (TLeft)val;
int idx = leftIndex++;
lefts.put(idx, left);
ObservableSource<TLeftEnd> p;
try {
p = Objects.requireNonNull(leftEnd.apply(left), "The leftEnd returned a null ObservableSource");
} catch (Throwable exc) {
fail(exc, a, q);
return;
}
LeftRightEndObserver end = new LeftRightEndObserver(this, true, idx);
disposables.add(end);
p.subscribe(end);
ex = error.get();
if (ex != null) {
q.clear();
cancelAll();
errorAll(a);
return;
}
for (TRight right : rights.values()) {
R w;
try {
w = Objects.requireNonNull(resultSelector.apply(left, right), "The resultSelector returned a null value");
} catch (Throwable exc) {
fail(exc, a, q);
return;
}
a.onNext(w);
}
}
else if (mode == RIGHT_VALUE) {
@SuppressWarnings("unchecked")
TRight right = (TRight)val;
int idx = rightIndex++;
rights.put(idx, right);
ObservableSource<TRightEnd> p;
try {
p = Objects.requireNonNull(rightEnd.apply(right), "The rightEnd returned a null ObservableSource");
} catch (Throwable exc) {
fail(exc, a, q);
return;
}
LeftRightEndObserver end = new LeftRightEndObserver(this, false, idx);
disposables.add(end);
p.subscribe(end);
ex = error.get();
if (ex != null) {
q.clear();
cancelAll();
errorAll(a);
return;
}
for (TLeft left : lefts.values()) {
R w;
try {
w = Objects.requireNonNull(resultSelector.apply(left, right), "The resultSelector returned a null value");
} catch (Throwable exc) {
fail(exc, a, q);
return;
}
a.onNext(w);
}
}
else if (mode == LEFT_CLOSE) {
LeftRightEndObserver end = (LeftRightEndObserver)val;
lefts.remove(end.index);
disposables.remove(end);
} else {
LeftRightEndObserver end = (LeftRightEndObserver)val;
rights.remove(end.index);
disposables.remove(end);
}
}
missed = addAndGet(-missed);
if (missed == 0) {
break;
}
}
}
@Override
public void innerError(Throwable ex) {
if (ExceptionHelper.addThrowable(error, ex)) {
active.decrementAndGet();
drain();
} else {
RxJavaPlugins.onError(ex);
}
}
@Override
public void innerComplete(LeftRightObserver sender) {
disposables.delete(sender);
active.decrementAndGet();
drain();
}
@Override
public void innerValue(boolean isLeft, Object o) {
synchronized (this) {
queue.offer(isLeft ? LEFT_VALUE : RIGHT_VALUE, o);
}
drain();
}
@Override
public void innerClose(boolean isLeft, LeftRightEndObserver index) {
synchronized (this) {
queue.offer(isLeft ? LEFT_CLOSE : RIGHT_CLOSE, index);
}
drain();
}
@Override
public void innerCloseError(Throwable ex) {
if (ExceptionHelper.addThrowable(error, ex)) {
drain();
} else {
RxJavaPlugins.onError(ex);
}
}
}
}
|
JoinDisposable
|
java
|
quarkusio__quarkus
|
integration-tests/kubernetes/quarkus-standard-way/src/test/java/io/quarkus/it/kubernetes/BaseOpenshiftWithRemoteRegistry.java
|
{
"start": 382,
"end": 2410
}
|
class ____ extends BaseWithRemoteRegistry {
public void assertGeneratedResources(String name, String tag, Path buildDir) throws IOException {
List<HasMetadata> resourceList = getResources("openshift", buildDir);
assertGeneratedResources(name, tag, resourceList);
}
public void assertGeneratedResources(String name, String tag, List<HasMetadata> resourceList) throws IOException {
super.assertGeneratedResources(name, resourceList);
assertThat(resourceList)
.filteredOn(
h -> "Secret".equals(h.getKind()) && h.getMetadata().getName().equals(name + "-push-secret"))
.singleElement().satisfies(h -> {
assertThat(h).isInstanceOfSatisfying(Secret.class, s -> {
assertThat(s.getType()).isEqualTo("kubernetes.io/dockerconfigjson");
assertThat(s.getData()).containsKey(".dockerconfigjson");
});
});
assertThat(resourceList).filteredOn(h -> "BuildConfig".equals(h.getKind())).singleElement().satisfies(h -> {
assertThat(h.getMetadata()).satisfies(m -> {
assertThat(m.getName()).isEqualTo(name);
});
assertThat(h).isInstanceOfSatisfying(BuildConfig.class, b -> {
assertThat(b.getSpec().getOutput().getTo().getKind()).isEqualTo("DockerImage");
assertThat(b.getSpec().getOutput().getTo().getName()).isEqualTo("quay.io/user/" + name + ":" + tag);
});
});
assertThat(resourceList)
.filteredOn(h -> "ImageStream".equals(h.getKind()) && h.getMetadata().getName().equals(name))
.singleElement().satisfies(h -> {
assertThat(h).isInstanceOfSatisfying(ImageStream.class, i -> {
assertThat(i.getSpec().getDockerImageRepository()).isEqualTo("quay.io/user/" + name);
});
});
}
}
|
BaseOpenshiftWithRemoteRegistry
|
java
|
apache__maven
|
its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng2103PluginExecutionInheritanceTest.java
|
{
"start": 1150,
"end": 2132
}
|
class ____ extends AbstractMavenIntegrationTestCase {
/**
* Verify that the plugin-level inherited flag can be overridden by the execution-level flag.
*
* @throws Exception in case of failure
*/
@Test
public void testit() throws Exception {
File testDir = extractResources("/mng-2103");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("child-1/target");
verifier.deleteDirectory("child-2/target");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
List<String> execs = verifier.loadLines("child-1/target/log.txt");
assertEquals(Arrays.asList(new String[] {"inherited"}), execs);
execs = verifier.loadLines("child-2/target/log.txt");
assertEquals(Arrays.asList(new String[] {"inherited"}), execs);
}
}
|
MavenITmng2103PluginExecutionInheritanceTest
|
java
|
quarkusio__quarkus
|
test-framework/junit5-internal/src/main/java/io/quarkus/test/DevModeTestApplicationModel.java
|
{
"start": 570,
"end": 2624
}
|
class ____ implements ApplicationModel {
private final ResolvedDependency appArtifact;
private final ApplicationModel delegate;
DevModeTestApplicationModel(ResolvedDependency testAppArtifact, ApplicationModel delegate) {
this.appArtifact = testAppArtifact;
this.delegate = delegate;
}
@Override
public ResolvedDependency getAppArtifact() {
return appArtifact;
}
@Override
public Collection<ResolvedDependency> getDependencies() {
return delegate.getDependencies();
}
@Override
public Iterable<ResolvedDependency> getDependencies(int flags) {
return delegate.getDependencies(flags);
}
@Override
public Iterable<ResolvedDependency> getDependenciesWithAnyFlag(int flags) {
return delegate.getDependenciesWithAnyFlag(flags);
}
@Override
public Collection<ResolvedDependency> getRuntimeDependencies() {
return delegate.getRuntimeDependencies();
}
@Override
public PlatformImports getPlatforms() {
return delegate.getPlatforms();
}
@Override
public Collection<ExtensionCapabilities> getExtensionCapabilities() {
return delegate.getExtensionCapabilities();
}
@Override
public Set<ArtifactKey> getParentFirst() {
return delegate.getParentFirst();
}
@Override
public Set<ArtifactKey> getRunnerParentFirst() {
return delegate.getRunnerParentFirst();
}
@Override
public Set<ArtifactKey> getLowerPriorityArtifacts() {
return delegate.getLowerPriorityArtifacts();
}
@Override
public Set<ArtifactKey> getReloadableWorkspaceDependencies() {
return delegate.getReloadableWorkspaceDependencies();
}
@Override
public Map<ArtifactKey, Set<String>> getRemovedResources() {
return delegate.getRemovedResources();
}
@Override
public Collection<ExtensionDevModeConfig> getExtensionDevModeConfig() {
return delegate.getExtensionDevModeConfig();
}
}
|
DevModeTestApplicationModel
|
java
|
quarkusio__quarkus
|
core/deployment/src/main/java/io/quarkus/deployment/cmd/RunCommandProcessor.java
|
{
"start": 626,
"end": 3001
}
|
class ____ {
private static final String JAVA_HOME_SYS = "java.home";
private static final String JAVA_HOME_ENV = "JAVA_HOME";
@BuildStep
public RunCommandActionResultBuildItem commands(List<RunCommandActionBuildItem> cmds) {
return new RunCommandActionResultBuildItem(cmds);
}
@SuppressWarnings("deprecation") // legacy jar
@BuildStep
public void defaultJavaCommand(PackageConfig packageConfig,
OutputTargetBuildItem jar,
BuildProducer<RunCommandActionBuildItem> cmds,
BuildSystemTargetBuildItem buildSystemTarget) {
Path jarPath = switch (packageConfig.jar().type()) {
case UBER_JAR -> jar.getOutputDirectory()
.resolve(jar.getBaseName() + packageConfig.computedRunnerSuffix() + ".jar");
// todo: legacy JAR should be using runnerSuffix()
case LEGACY_JAR -> jar.getOutputDirectory()
.resolve(jar.getBaseName() + packageConfig.computedRunnerSuffix() + ".jar");
case FAST_JAR, MUTABLE_JAR -> jar.getOutputDirectory()
.resolve(DEFAULT_FAST_JAR_DIRECTORY_NAME).resolve(QUARKUS_RUN_JAR);
};
List<String> args = new ArrayList<>();
args.add(determineJavaPath());
for (Map.Entry<?, ?> e : buildSystemTarget.getBuildSystemProps().entrySet()) {
args.add("-D" + e.getKey().toString() + "=" + e.getValue().toString());
}
args.add("-jar");
args.add(jarPath.toAbsolutePath().toString());
cmds.produce(new RunCommandActionBuildItem("java", args, null, null, null, false));
}
private String determineJavaPath() {
// try system property first - it will be the JAVA_HOME used by the current JVM
String home = System.getProperty(JAVA_HOME_SYS);
if (home == null) {
// No luck, somewhat a odd JVM not enforcing this property
// try with the JAVA_HOME environment variable
home = System.getenv(JAVA_HOME_ENV);
}
if (home != null) {
File javaHome = new File(home);
File file = new File(javaHome, "bin/java");
if (file.exists()) {
return file.getAbsolutePath();
}
}
// just assume 'java' is on the system path
return "java";
}
}
|
RunCommandProcessor
|
java
|
quarkusio__quarkus
|
independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/providers/serialisers/ServerPathPartBodyHandler.java
|
{
"start": 783,
"end": 1770
}
|
class ____ extends PathPartBodyHandler implements ServerMessageBodyWriter<PathPart> {
@Override
public long getSize(PathPart o, Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) {
try {
return Files.size(o.file);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public boolean isWriteable(Class<?> type, Type genericType, ResteasyReactiveResourceInfo target, MediaType mediaType) {
return PathPart.class.isAssignableFrom(type);
}
@Override
public void writeResponse(PathPart o, Type genericType, ServerRequestContext context)
throws WebApplicationException {
ServerHttpResponse serverResponse = context.serverResponse();
// sendFile implies end(), even though javadoc doesn't say, if you add end() it will throw
serverResponse.sendFile(o.file.toString(), o.offset, o.count);
}
}
|
ServerPathPartBodyHandler
|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-server-webflux/src/main/java/org/springframework/cloud/gateway/config/HttpClientProperties.java
|
{
"start": 7773,
"end": 9618
}
|
class ____ {
/**
* proxyType for proxy configuration of Netty HttpClient (http, socks4 or socks5).
*/
private ProxyProvider.Proxy type = ProxyProvider.Proxy.HTTP;
/** Hostname for proxy configuration of Netty HttpClient. */
private String host;
/** Port for proxy configuration of Netty HttpClient. */
private Integer port;
/** Username for proxy configuration of Netty HttpClient. */
private String username;
/** Password for proxy configuration of Netty HttpClient. */
private String password;
/**
* Regular expression (Java) for a configured list of hosts. that should be
* reached directly, bypassing the proxy
*/
private String nonProxyHostsPattern;
public ProxyProvider.Proxy getType() {
return type;
}
public void setType(ProxyProvider.Proxy type) {
this.type = type;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Integer getPort() {
return port;
}
public void setPort(Integer port) {
this.port = port;
}
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public String getNonProxyHostsPattern() {
return nonProxyHostsPattern;
}
public void setNonProxyHostsPattern(String nonProxyHostsPattern) {
this.nonProxyHostsPattern = nonProxyHostsPattern;
}
@Override
public String toString() {
return "Proxy{" + "type='" + type + '\'' + "host='" + host + '\'' + ", port=" + port + ", username='"
+ username + '\'' + ", password='" + password + '\'' + ", nonProxyHostsPattern='"
+ nonProxyHostsPattern + '\'' + '}';
}
}
public static
|
Proxy
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/core/MethodIntrospector.java
|
{
"start": 1432,
"end": 5326
}
|
class ____ {
private MethodIntrospector() {
}
/**
* Select methods on the given target type based on the lookup of associated metadata.
* <p>Callers define methods of interest through the {@link MetadataLookup} parameter,
* allowing to collect the associated metadata into the result map.
* @param targetType the target type to search methods on
* @param metadataLookup a {@link MetadataLookup} callback to inspect methods of interest,
* returning non-null metadata to be associated with a given method if there is a match,
* or {@code null} for no match
* @return the selected methods associated with their metadata (in the order of retrieval),
* or an empty map in case of no match
*/
public static <T> Map<Method, T> selectMethods(Class<?> targetType, final MetadataLookup<T> metadataLookup) {
final Map<Method, T> methodMap = new LinkedHashMap<>();
Set<Class<?>> handlerTypes = new LinkedHashSet<>();
Class<?> specificHandlerType = null;
if (!Proxy.isProxyClass(targetType)) {
specificHandlerType = ClassUtils.getUserClass(targetType);
handlerTypes.add(specificHandlerType);
}
handlerTypes.addAll(ClassUtils.getAllInterfacesForClassAsSet(targetType));
for (Class<?> currentHandlerType : handlerTypes) {
final Class<?> targetClass = (specificHandlerType != null ? specificHandlerType : currentHandlerType);
ReflectionUtils.doWithMethods(currentHandlerType, method -> {
Method specificMethod = ClassUtils.getMostSpecificMethod(method, targetClass);
T result = metadataLookup.inspect(specificMethod);
if (result != null) {
Method bridgedMethod = BridgeMethodResolver.findBridgedMethod(specificMethod);
if (bridgedMethod == specificMethod || bridgedMethod == method ||
bridgedMethod.equals(specificMethod) || bridgedMethod.equals(method) ||
metadataLookup.inspect(bridgedMethod) == null) {
methodMap.put(specificMethod, result);
}
}
}, ReflectionUtils.USER_DECLARED_METHODS);
}
return methodMap;
}
/**
* Select methods on the given target type based on a filter.
* <p>Callers define methods of interest through the {@code MethodFilter} parameter.
* @param targetType the target type to search methods on
* @param methodFilter a {@code MethodFilter} to help
* recognize handler methods of interest
* @return the selected methods, or an empty set in case of no match
*/
public static Set<Method> selectMethods(Class<?> targetType, final ReflectionUtils.MethodFilter methodFilter) {
return selectMethods(targetType,
(MetadataLookup<Boolean>) method -> (methodFilter.matches(method) ? Boolean.TRUE : null)).keySet();
}
/**
* Select an invocable method on the target type: either the given method itself
* if actually exposed on the target type, or otherwise a corresponding method
* on one of the target type's interfaces or on the target type itself.
* <p>Matches on user-declared interfaces will be preferred since they are likely
* to contain relevant metadata that corresponds to the method on the target class.
* @param method the method to check
* @param targetType the target type to search methods on
* (typically an interface-based JDK proxy)
* @return a corresponding invocable method on the target type
* @throws IllegalStateException if the given method is not invocable on the given
* target type (typically due to a proxy mismatch)
*/
public static Method selectInvocableMethod(Method method, Class<?> targetType) {
if (method.getDeclaringClass().isAssignableFrom(targetType)) {
return method;
}
try {
String methodName = method.getName();
Class<?>[] parameterTypes = method.getParameterTypes();
for (Class<?> ifc : targetType.getInterfaces()) {
try {
return ifc.getMethod(methodName, parameterTypes);
}
catch (NoSuchMethodException ex) {
// Alright, not on this
|
MethodIntrospector
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/ast/SQLCurrentTimeExpr.java
|
{
"start": 1475,
"end": 1969
}
|
enum ____ {
CURRENT_TIME("CURRENT_TIME"),
CURRENT_DATE("CURRENT_DATE"),
CURDATE("CURDATE"),
CURTIME("CURTIME"),
CURRENT_TIMESTAMP("CURRENT_TIMESTAMP"),
LOCALTIME("LOCALTIME"),
LOCALTIMESTAMP("LOCALTIMESTAMP"),
SYSDATE("SYSDATE");
public final String name;
public final String nameLCase;
Type(String name) {
this.name = name;
this.nameLCase = name.toLowerCase();
}
}
}
|
Type
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/json/JsonPathValueAssertTests.java
|
{
"start": 3322,
"end": 4222
}
|
class ____ {
@Test
void asBooleanWithBooleanPrimitiveValue() {
assertThat(forValue(true)).asBoolean().isEqualTo(true);
}
@Test
void asBooleanWithBooleanWrapperValue() {
assertThat(forValue(Boolean.FALSE)).asBoolean().isEqualTo(false);
}
@Test
void asBooleanWithNonBooleanFails() {
String value = "false";
AssertProvider<JsonPathValueAssert> actual = forValue(value);
assertThatExceptionOfType(AssertionError.class)
.isThrownBy(() -> assertThat(actual).asBoolean().isEqualTo(false))
.satisfies(hasFailedToBeOfType(value, "a boolean"));
}
@Test
void asBooleanWithNullFails() {
AssertProvider<JsonPathValueAssert> actual = forValue(null);
assertThatExceptionOfType(AssertionError.class)
.isThrownBy(() -> assertThat(actual).asBoolean().isEqualTo(false))
.satisfies(hasFailedToBeOfTypeWhenNull("a boolean"));
}
}
@Nested
|
AsBooleanTests
|
java
|
apache__camel
|
core/camel-management-api/src/main/java/org/apache/camel/api/management/ManagedResource.java
|
{
"start": 1130,
"end": 1269
}
|
class ____ being managed in the JMX server.
*/
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
@Inherited
@Documented
public @
|
as
|
java
|
quarkusio__quarkus
|
extensions/qute/deployment/src/main/java/io/quarkus/qute/deployment/ImplicitValueResolverBuildItem.java
|
{
"start": 557,
"end": 694
}
|
class ____ the synthetic template data is not equal the build fails.
*
* @see TemplateData
* @see TemplateDataBuilder
*/
public final
|
and
|
java
|
junit-team__junit5
|
junit-platform-commons/src/main/java/org/junit/platform/commons/support/conversion/StringToJavaTimeConverter.java
|
{
"start": 913,
"end": 2128
}
|
class ____ implements StringToObjectConverter {
private static final Map<Class<?>, Function<String, ?>> CONVERTERS = Map.ofEntries( //
entry(Duration.class, Duration::parse), //
entry(Instant.class, Instant::parse), //
entry(LocalDate.class, LocalDate::parse), //
entry(LocalDateTime.class, LocalDateTime::parse), //
entry(LocalTime.class, LocalTime::parse), //
entry(MonthDay.class, MonthDay::parse), //
entry(OffsetDateTime.class, OffsetDateTime::parse), //
entry(OffsetTime.class, OffsetTime::parse), //
entry(Period.class, Period::parse), //
entry(Year.class, Year::parse), //
entry(YearMonth.class, YearMonth::parse), //
entry(ZonedDateTime.class, ZonedDateTime::parse), //
entry(ZoneId.class, ZoneId::of), //
entry(ZoneOffset.class, ZoneOffset::of) //
);
@Override
public boolean canConvertTo(Class<?> targetType) {
return CONVERTERS.containsKey(targetType);
}
@Override
public Object convert(String source, Class<?> targetType) throws Exception {
Function<String, ?> converter = Preconditions.notNull(CONVERTERS.get(targetType),
() -> "No registered converter for %s".formatted(targetType.getName()));
return converter.apply(source);
}
}
|
StringToJavaTimeConverter
|
java
|
apache__avro
|
lang/java/avro/src/test/java/org/apache/avro/TestSchemaValidation.java
|
{
"start": 1169,
"end": 11417
}
|
class ____ {
/** Collection of reader/writer schema pair that are compatible. */
public static final List<ReaderWriter> COMPATIBLE_READER_WRITER_TEST_CASES = list(
new ReaderWriter(BOOLEAN_SCHEMA, BOOLEAN_SCHEMA),
new ReaderWriter(INT_SCHEMA, INT_SCHEMA),
new ReaderWriter(LONG_SCHEMA, INT_SCHEMA), new ReaderWriter(LONG_SCHEMA, LONG_SCHEMA),
// Avro spec says INT/LONG can be promoted to FLOAT/DOUBLE.
// This is arguable as this causes a loss of precision.
new ReaderWriter(FLOAT_SCHEMA, INT_SCHEMA), new ReaderWriter(FLOAT_SCHEMA, LONG_SCHEMA),
new ReaderWriter(DOUBLE_SCHEMA, LONG_SCHEMA),
new ReaderWriter(DOUBLE_SCHEMA, INT_SCHEMA), new ReaderWriter(DOUBLE_SCHEMA, FLOAT_SCHEMA),
new ReaderWriter(STRING_SCHEMA, STRING_SCHEMA),
new ReaderWriter(BYTES_SCHEMA, BYTES_SCHEMA),
new ReaderWriter(INT_ARRAY_SCHEMA, INT_ARRAY_SCHEMA), new ReaderWriter(LONG_ARRAY_SCHEMA, INT_ARRAY_SCHEMA),
new ReaderWriter(INT_MAP_SCHEMA, INT_MAP_SCHEMA), new ReaderWriter(LONG_MAP_SCHEMA, INT_MAP_SCHEMA),
new ReaderWriter(ENUM1_AB_SCHEMA, ENUM1_AB_SCHEMA), new ReaderWriter(ENUM1_ABC_SCHEMA, ENUM1_AB_SCHEMA),
// String-to/from-bytes, introduced in Avro 1.7.7
new ReaderWriter(STRING_SCHEMA, BYTES_SCHEMA), new ReaderWriter(BYTES_SCHEMA, STRING_SCHEMA),
// Tests involving unions:
new ReaderWriter(EMPTY_UNION_SCHEMA, EMPTY_UNION_SCHEMA), new ReaderWriter(INT_UNION_SCHEMA, INT_UNION_SCHEMA),
new ReaderWriter(INT_STRING_UNION_SCHEMA, STRING_INT_UNION_SCHEMA),
new ReaderWriter(INT_UNION_SCHEMA, EMPTY_UNION_SCHEMA), new ReaderWriter(LONG_UNION_SCHEMA, INT_UNION_SCHEMA),
new ReaderWriter(FLOAT_UNION_SCHEMA, INT_UNION_SCHEMA), new ReaderWriter(FLOAT_UNION_SCHEMA, LONG_UNION_SCHEMA),
new ReaderWriter(DOUBLE_UNION_SCHEMA, INT_UNION_SCHEMA), new ReaderWriter(LONG_UNION_SCHEMA, EMPTY_UNION_SCHEMA),
new ReaderWriter(DOUBLE_UNION_SCHEMA, LONG_UNION_SCHEMA),
new ReaderWriter(FLOAT_UNION_SCHEMA, EMPTY_UNION_SCHEMA),
new ReaderWriter(DOUBLE_UNION_SCHEMA, FLOAT_UNION_SCHEMA),
new ReaderWriter(STRING_UNION_SCHEMA, EMPTY_UNION_SCHEMA),
new ReaderWriter(STRING_UNION_SCHEMA, BYTES_UNION_SCHEMA),
new ReaderWriter(BYTES_UNION_SCHEMA, EMPTY_UNION_SCHEMA),
new ReaderWriter(BYTES_UNION_SCHEMA, STRING_UNION_SCHEMA),
new ReaderWriter(DOUBLE_UNION_SCHEMA, INT_FLOAT_UNION_SCHEMA),
new ReaderWriter(NULL_INT_ARRAY_UNION_SCHEMA, INT_ARRAY_SCHEMA),
new ReaderWriter(NULL_INT_MAP_UNION_SCHEMA, INT_MAP_SCHEMA),
// Readers capable of reading all branches of a union are compatible
new ReaderWriter(FLOAT_SCHEMA, INT_FLOAT_UNION_SCHEMA), new ReaderWriter(LONG_SCHEMA, INT_LONG_UNION_SCHEMA),
new ReaderWriter(DOUBLE_SCHEMA, INT_FLOAT_UNION_SCHEMA),
new ReaderWriter(DOUBLE_SCHEMA, INT_LONG_FLOAT_DOUBLE_UNION_SCHEMA),
// Special case of singleton unions:
new ReaderWriter(FLOAT_SCHEMA, FLOAT_UNION_SCHEMA), new ReaderWriter(INT_UNION_SCHEMA, INT_SCHEMA),
new ReaderWriter(INT_SCHEMA, INT_UNION_SCHEMA),
// Tests involving records:
new ReaderWriter(EMPTY_RECORD1, EMPTY_RECORD1), new ReaderWriter(EMPTY_RECORD1, A_INT_RECORD1),
new ReaderWriter(A_INT_RECORD1, A_INT_RECORD1), new ReaderWriter(A_DINT_RECORD1, A_INT_RECORD1),
new ReaderWriter(A_DINT_RECORD1, A_DINT_RECORD1), new ReaderWriter(A_INT_RECORD1, A_DINT_RECORD1),
new ReaderWriter(A_LONG_RECORD1, A_INT_RECORD1),
new ReaderWriter(A_INT_RECORD1, A_INT_B_INT_RECORD1), new ReaderWriter(A_DINT_RECORD1, A_INT_B_INT_RECORD1),
new ReaderWriter(A_INT_B_DINT_RECORD1, A_INT_RECORD1), new ReaderWriter(A_DINT_B_DINT_RECORD1, EMPTY_RECORD1),
new ReaderWriter(A_DINT_B_DINT_RECORD1, A_INT_RECORD1),
new ReaderWriter(A_INT_B_INT_RECORD1, A_DINT_B_DINT_RECORD1),
// The SchemaValidator, unlike the SchemaCompatibility class, cannot cope with
// recursive schemas
// See AVRO-2074
// new ReaderWriter(INT_LIST_RECORD, INT_LIST_RECORD),
// new ReaderWriter(LONG_LIST_RECORD, LONG_LIST_RECORD),
// new ReaderWriter(LONG_LIST_RECORD, INT_LIST_RECORD),
new ReaderWriter(NULL_SCHEMA, NULL_SCHEMA),
// This is comparing two records that have an inner array of records with
// different namespaces.
new ReaderWriter(NS_RECORD1, NS_RECORD2));
/** Collection of reader/writer schema pair that are incompatible. */
public static final List<ReaderWriter> INCOMPATIBLE_READER_WRITER_TEST_CASES = list(
new ReaderWriter(NULL_SCHEMA, INT_SCHEMA), new ReaderWriter(NULL_SCHEMA, LONG_SCHEMA),
new ReaderWriter(BOOLEAN_SCHEMA, INT_SCHEMA),
new ReaderWriter(INT_SCHEMA, NULL_SCHEMA), new ReaderWriter(INT_SCHEMA, BOOLEAN_SCHEMA),
new ReaderWriter(INT_SCHEMA, LONG_SCHEMA), new ReaderWriter(INT_SCHEMA, FLOAT_SCHEMA),
new ReaderWriter(INT_SCHEMA, DOUBLE_SCHEMA),
new ReaderWriter(LONG_SCHEMA, FLOAT_SCHEMA), new ReaderWriter(LONG_SCHEMA, DOUBLE_SCHEMA),
new ReaderWriter(FLOAT_SCHEMA, DOUBLE_SCHEMA),
new ReaderWriter(STRING_SCHEMA, BOOLEAN_SCHEMA), new ReaderWriter(STRING_SCHEMA, INT_SCHEMA),
new ReaderWriter(BYTES_SCHEMA, NULL_SCHEMA), new ReaderWriter(BYTES_SCHEMA, INT_SCHEMA),
new ReaderWriter(INT_ARRAY_SCHEMA, LONG_ARRAY_SCHEMA), new ReaderWriter(INT_MAP_SCHEMA, INT_ARRAY_SCHEMA),
new ReaderWriter(INT_ARRAY_SCHEMA, INT_MAP_SCHEMA), new ReaderWriter(INT_MAP_SCHEMA, LONG_MAP_SCHEMA),
// new ReaderWriter(ENUM1_AB_SCHEMA, ENUM1_ABC_SCHEMA),
// new ReaderWriter(ENUM1_BC_SCHEMA, ENUM1_ABC_SCHEMA),
new ReaderWriter(ENUM1_AB_SCHEMA, ENUM2_AB_SCHEMA), new ReaderWriter(INT_SCHEMA, ENUM2_AB_SCHEMA),
new ReaderWriter(ENUM2_AB_SCHEMA, INT_SCHEMA),
// Tests involving unions:
new ReaderWriter(INT_UNION_SCHEMA, INT_STRING_UNION_SCHEMA),
new ReaderWriter(STRING_UNION_SCHEMA, INT_STRING_UNION_SCHEMA),
new ReaderWriter(FLOAT_SCHEMA, INT_LONG_FLOAT_DOUBLE_UNION_SCHEMA),
new ReaderWriter(LONG_SCHEMA, INT_FLOAT_UNION_SCHEMA), new ReaderWriter(INT_SCHEMA, INT_FLOAT_UNION_SCHEMA),
// new ReaderWriter(EMPTY_RECORD2, EMPTY_RECORD1),
new ReaderWriter(A_INT_RECORD1, EMPTY_RECORD1), new ReaderWriter(A_INT_B_DINT_RECORD1, EMPTY_RECORD1),
// new ReaderWriter(INT_LIST_RECORD, LONG_LIST_RECORD),
new ReaderWriter(NULL_SCHEMA, INT_SCHEMA));
SchemaValidatorBuilder builder = new SchemaValidatorBuilder();
Schema rec = SchemaBuilder.record("test.Rec").fields().name("a").type().intType().intDefault(1).name("b").type()
.longType().noDefault().endRecord();
Schema rec2 = SchemaBuilder.record("test.Rec").fields().name("a").type().intType().intDefault(1).name("b").type()
.longType().noDefault().name("c").type().intType().intDefault(0).endRecord();
Schema rec3 = SchemaBuilder.record("test.Rec").fields().name("b").type().longType().noDefault().name("c").type()
.intType().intDefault(0).endRecord();
Schema rec4 = SchemaBuilder.record("test.Rec").fields().name("b").type().longType().noDefault().name("c").type()
.intType().noDefault().endRecord();
Schema rec5 = SchemaBuilder.record("test.Rec").fields().name("a").type().stringType().stringDefault("") // different
// type from
// original
.name("b").type().longType().noDefault().name("c").type().intType().intDefault(0).endRecord();
@Test
void allTypes() throws SchemaValidationException {
Schema s = SchemaBuilder.record("r").fields().requiredBoolean("boolF").requiredInt("intF").requiredLong("longF")
.requiredFloat("floatF").requiredDouble("doubleF").requiredString("stringF").requiredBytes("bytesF")
.name("fixedF1").type().fixed("F1").size(1).noDefault().name("enumF").type().enumeration("E1").symbols("S")
.noDefault().name("mapF").type().map().values().stringType().noDefault().name("arrayF").type().array().items()
.stringType().noDefault().name("recordF").type().record("inner").fields().name("f").type().intType().noDefault()
.endRecord().noDefault().optionalBoolean("boolO").endRecord();
testValidatorPasses(builder.mutualReadStrategy().validateLatest(), s, s);
}
@Test
void readOnePrior() throws SchemaValidationException {
testValidatorPasses(builder.canReadStrategy().validateLatest(), rec3, rec);
testValidatorPasses(builder.canReadStrategy().validateLatest(), rec5, rec3);
testValidatorFails(builder.canReadStrategy().validateLatest(), rec4, rec);
}
@Test
void readAllPrior() throws SchemaValidationException {
testValidatorPasses(builder.canReadStrategy().validateAll(), rec3, rec, rec2);
testValidatorFails(builder.canReadStrategy().validateAll(), rec4, rec, rec2, rec3);
testValidatorFails(builder.canReadStrategy().validateAll(), rec5, rec, rec2, rec3);
}
@Test
void onePriorCanRead() throws SchemaValidationException {
testValidatorPasses(builder.canBeReadStrategy().validateLatest(), rec, rec3);
testValidatorFails(builder.canBeReadStrategy().validateLatest(), rec, rec4);
}
@Test
void allPriorCanRead() throws SchemaValidationException {
testValidatorPasses(builder.canBeReadStrategy().validateAll(), rec, rec3, rec2);
testValidatorFails(builder.canBeReadStrategy().validateAll(), rec, rec4, rec3, rec2);
}
@Test
void onePriorCompatible() throws SchemaValidationException {
testValidatorPasses(builder.mutualReadStrategy().validateLatest(), rec, rec3);
testValidatorFails(builder.mutualReadStrategy().validateLatest(), rec, rec4);
}
@Test
void allPriorCompatible() throws SchemaValidationException {
testValidatorPasses(builder.mutualReadStrategy().validateAll(), rec, rec3, rec2);
testValidatorFails(builder.mutualReadStrategy().validateAll(), rec, rec4, rec3, rec2);
}
@Test
void invalidBuild() {
assertThrows(AvroRuntimeException.class, () -> {
builder.strategy(null).validateAll();
});
}
public static
|
TestSchemaValidation
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/api/RJsonStoreReactive.java
|
{
"start": 1134,
"end": 23948
}
|
interface ____<K, V> extends RExpirableReactive {
/**
* Gets value by specified key and JSONPath
*
* @param key entry key
* @param codec entry value codec
* @param paths JSON paths
* @param <T> the type of object
* @return entry value
*/
<T> Mono<T> get(K key, JsonCodec codec, String... paths);
/**
* Sets value by specified key and JSONPath only if previous value is empty.
*
* @param key entry key
* @param path JSON path
* @param value entry value
* @return {@code true} if successful, or {@code false} if
* value was already set
*/
Mono<Boolean> setIfAbsent(K key, String path, Object value);
/**
* Sets value by specified key and JSONPath only if previous value is non-empty.
*
* @param key entry key
* @param path JSON path
* @param value object
* @return {@code true} if successful, or {@code false} if
* element wasn't set
*/
Mono<Boolean> setIfExists(K key, String path, Object value);
/**
* Atomically sets the value to the given updated value
* by specified key and JSONPath, only if serialized state of
* the current value equals to serialized state of the expected value.
*
* @param key entry key
* @param path JSON path
* @param expect the expected value
* @param update the new value
* @return {@code true} if successful; or {@code false} if the actual value
* was not equal to the expected value.
*/
Mono<Boolean> compareAndSet(K key, String path, Object expect, Object update);
/**
* Retrieves current value stored by specified key and JSONPath then
* replaces it with new value.
*
* @param key entry key
* @param codec entry value codec
* @param path JSON path
* @param newValue value to set
* @return previous value
*/
<T> Mono<T> getAndSet(K key, JsonCodec codec, String path, Object newValue);
/**
* Stores value by specified key and JSONPath.
*
* @param key entry key
* @param path JSON path
* @param value value to set
*/
Mono<Void> set(K key, String path, Object value);
/**
* Returns size of string data by specified key and JSONPath
*
* @param key entry key
* @param path JSON path
* @return size of string
*/
Mono<Long> stringSize(K key, String path);
/**
* Returns list of string data size by specified key and JSONPath.
* Compatible only with enhanced syntax starting with '$' character.
*
* @param key entry key
* @param path JSON path
* @return list of string data sizes
*/
Mono<List<Long>> stringSizeMulti(K key, String path);
/**
* Appends string data to element specified by specified key and JSONPath.
* Returns new size of string data.
*
* @param key entry key
* @param path JSON path
* @param value data
* @return size of string data
*/
Mono<Long> stringAppend(K key, String path, Object value);
/**
* Appends string data to elements specified by specified key and JSONPath.
* Returns new size of string data.
* Compatible only with enhanced syntax starting with '$' character.
*
* @param key entry key
* @param path JSON path
* @param value data
* @return list of string data sizes
*/
Mono<List<Long>> stringAppendMulti(K key, String path, Object value);
/**
* Appends values to array by specified key and JSONPath.
* Returns new size of array.
*
* @param key entry key
* @param path JSON path
* @param values values to append
* @return size of array
*/
Mono<Long> arrayAppend(K key, String path, Object... values);
/**
* Appends values to arrays by specified key and JSONPath.
* Returns new size of arrays.
* Compatible only with enhanced syntax starting with '$' character.
*
* @param key entry key
* @param path JSON path
* @param values values to append
* @return list of arrays size
*/
Mono<List<Long>> arrayAppendMulti(K key, String path, Object... values);
/**
* Returns index of object in array by specified key and JSONPath.
* -1 means object not found.
*
* @param key entry key
* @param path JSON path
* @param value value to search
* @return index in array
*/
Mono<Long> arrayIndex(K key, String path, Object value);
/**
* Returns index of object in arrays by specified key and JSONPath.
* -1 means object not found.
* Compatible only with enhanced syntax starting with '$' character.
*
* @param key entry key
* @param path JSON path
* @param value value to search
* @return list of index in arrays
*/
Mono<List<Long>> arrayIndexMulti(K key, String path, Object value);
/**
* Returns index of object in array by specified key and JSONPath
* in range between <code>start</code> (inclusive) and <code>end</code> (exclusive) indexes.
* -1 means object not found.
*
* @param key entry key
* @param path JSON path
* @param value value to search
* @param start start index, inclusive
* @param end end index, exclusive
* @return index in array
*/
Mono<Long> arrayIndex(K key, String path, Object value, Long start, Long end);
/**
* Returns index of object in arrays by specified key and JSONPath
* in range between <code>start</code> (inclusive) and <code>end</code> (exclusive) indexes.
* -1 means object not found.
* Compatible only with enhanced syntax starting with '$' character.
*
* @param key entry key
* @param path JSON path
* @param value value to search
* @param start start index, inclusive
* @param end end index, exclusive
* @return list of index in arrays
*/
Mono<List<Long>> arrayIndexMulti(K key, String path, Object value, Long start, Long end);
/**
* Inserts values into array by specified key and JSONPath.
* Values are inserted at defined <code>index</code>.
*
* @param key entry key
* @param path JSON path
* @param index array index at which values are inserted
* @param values values to insert
* @return size of array
*/
Mono<Long> arrayInsert(K key, String path, Long index, Object... values);
/**
* Inserts values into arrays by specified key and JSONPath.
* Values are inserted at defined <code>index</code>.
* Compatible only with enhanced syntax starting with '$' character.
*
* @param key entry key
* @param path JSON path
* @param index array index at which values are inserted
* @param values values to insert
* @return list of arrays size
*/
Mono<List<Long>> arrayInsertMulti(K key, String path, Long index, Object... values);
/**
* Returns size of array by specified key and JSONPath.
*
* @param key entry key
* @param path JSON path
* @return size of array
*/
Mono<Long> arraySize(K key, String path);
/**
* Returns size of arrays by specified key and JSONPath.
* Compatible only with enhanced syntax starting with '$' character.
*
* @param key entry key
* @param path JSON path
* @return list of arrays size
*/
Mono<List<Long>> arraySizeMulti(K key, String path);
/**
* Polls last element of array by specified key and JSONPath.
*
* @param key entry key
* @param codec object codec
* @param path JSON path
* @return last element
*
* @param <T> the type of object
*/
<T> Mono<T> arrayPollLast(K key, JsonCodec codec, String path);
/**
* Polls last element of arrays by specified key and JSONPath.
* Compatible only with enhanced syntax starting with '$' character.
*
* @param key entry key
* @param codec object codec
* @param path JSON path
* @return list of last elements
*
* @param <T> the type of object
*/
<T> Mono<List<T>> arrayPollLastMulti(K key, JsonCodec codec, String path);
/**
* Polls first element of array by specified key and JSONPath.
*
* @param key entry key
* @param codec object codec
* @param path JSON path
* @return first element
*
* @param <T> the type of object
*/
<T> Mono<T> arrayPollFirst(K key, JsonCodec codec, String path);
/**
* Polls first element of arrays by specified key and JSONPath.
* Compatible only with enhanced syntax starting with '$' character.
*
* @param key entry key
* @param codec object codec
* @param path JSON path
* @return list of first elements
*
* @param <T> the type of object
*/
<T> Mono<List<T>> arrayPollFirstMulti(K key, JsonCodec codec, String path);
/**
* Pops element located at index of array by specified key and JSONPath.
*
* @param key entry key
* @param codec object codec
* @param path JSON path
* @param index array index
* @return element
*
* @param <T> the type of object
*/
<T> Mono<T> arrayPop(K key, JsonCodec codec, String path, Long index);
/**
* Pops elements located at index of arrays by specified key and JSONPath.
* Compatible only with enhanced syntax starting with '$' character.
*
* @param key entry key
* @param codec object codec
* @param path JSON path
* @param index array index
* @return list of elements
*
* @param <T> the type of object
*/
<T> Mono<List<T>> arrayPopMulti(K key, JsonCodec codec, String path, Long index);
/**
* Trims array by specified key and JSONPath in range
* between <code>start</code> (inclusive) and <code>end</code> (inclusive) indexes.
*
* @param key entry key
* @param path JSON path
* @param start start index, inclusive
* @param end end index, inclusive
* @return length of array
*/
Mono<Long> arrayTrim(K key, String path, Long start, Long end);
/**
* Trims arrays by specified key and JSONPath in range
* between <code>start</code> (inclusive) and <code>end</code> (inclusive) indexes.
* Compatible only with enhanced syntax starting with '$' character.
*
* @param key entry key
* @param path JSON path
* @param start start index, inclusive
* @param end end index, inclusive
* @return length of array
*/
Mono<List<Long>> arrayTrimMulti(K key, String path, Long start, Long end);
/**
* Clears value by specified key
*
* @param key entry key
* @return {@code true} if successful, or {@code false} if
* entry doesn't exist
*/
Mono<Boolean> clear(K key);
/**
* Clears json containers by specified keys.
*
* @param keys entry keys
* @return number of cleared containers
*/
Mono<Long> clear(Set<K> keys);
/**
* Clears json container by specified keys and JSONPath.
* Compatible only with enhanced syntax starting with '$' character.
*
* @param path JSON path
* @param keys entry keys
* @return number of cleared containers
*/
Mono<Long> clear(String path, Set<K> keys);
/**
* Increments the current value specified by key and JSONPath.
*
* @param key entry key
* @param path JSON path
* @param delta increment value
* @return the updated value
*/
<T extends Number> Mono<T> incrementAndGet(K key, String path, T delta);
/**
* Increments the current values specified by key and JSONPath.
* Compatible only with enhanced syntax starting with '$' character.
*
* @param key entry key
* @param path JSON path
* @param delta increment value
* @return list of updated value
*/
<T extends Number> Mono<List<T>> incrementAndGetMulti(K key, String path, T delta);
/**
* Merges value into element by the specified key and JSONPath.
*
* @param key entry key
* @param path JSON path
* @param value value to merge
*/
Mono<Void> merge(K key, String path, Object value);
/**
* Returns keys amount in JSON container by specified key
*
* @param key entry key
* @return keys amount
*/
Mono<Long> countKeys(K key);
/**
* Returns keys amount in JSON container specified by key and JSONPath
*
* @param key entry key
* @param path JSON path
* @return keys amount
*/
Mono<Long> countKeys(K key, String path);
/**
* Returns list of keys amount in JSON containers specified by key and JSONPath
*
* @param key entry key
* @param path JSON path
* @return list of keys amount
*/
Mono<List<Long>> countKeysMulti(K key, String path);
/**
* Returns list of keys in JSON container by specified key
*
* @return list of keys
*/
Mono<List<String>> getKeys(K key);
/**
* Returns list of keys in JSON container by specified key and JSONPath
*
* @param path JSON path
* @return list of keys
*/
Mono<List<String>> getKeys(K key, String path);
/**
* Returns list of keys in JSON containers by specified key and JSONPath
*
* @param path JSON path
* @return list of keys
*/
List<Mono<List<String>>> getKeysMulti(K key, String path);
/**
* Toggle Mono<Boolean> value by specified key and JSONPath
*
* @param path JSON path
* @return new Mono<Boolean> value
*/
Mono<Boolean> toggle(K key, String path);
/**
* Toggle Mono<Boolean> values by specified key and JSONPath
*
* @param path JSON path
* @return list of Mono<Boolean> values
*/
List<Mono<Boolean>> toggleMulti(K key, String path);
/**
* Returns type of value
*
* @return type of element
*/
Mono<JsonType> getType(K key);
/**
* Returns type of element specified by key and JSONPath
*
* @param path JSON path
* @return type of element
*/
Mono<JsonType> getType(K key, String path);
/**
* Deletes entry by specified key
*
* @param key entry key
* @return {@code true} if successful, or {@code false} if
* entry doesn't exist
*/
Mono<Boolean> delete(K key);
/**
* Deletes entries by specified keys
*
* @param keys entry keys
* @return number of deleted elements
*/
Mono<Long> delete(Set<K> keys);
/**
* Deletes JSON elements specified by keys and JSONPath
*
* @param path JSON path
* @param keys entry keys
* @return number of deleted elements
*/
Mono<Long> delete(String path, Set<K> keys);
/**
* Returns size of entry in bytes specified by key.
*
* @param key entry key
* @return entry size
*/
Mono<Long> sizeInMemory(K key);
/**
* Retrieves value by specified key.
*
* @param key entry key
* @return element
*/
Mono<V> get(K key);
/**
* Retrieves values by specified keys.
*
* @param keys entry keys
* @return map with entries where value mapped by key
*/
Mono<Map<K, V>> get(Set<K> keys);
/**
* Retrieves values by specified keys and JSONPath.
*
* @param path JSON path
* @param keys entry keys
* @return map with entries where value mapped by key
*/
Mono<Map<K, V>> get(String path, Set<K> keys);
/**
* Retrieves entry value by specified key and removes it.
*
* @param key entry key
* @return element
*/
Mono<V> getAndDelete(K key);
/**
* Sets value only if entry doesn't exist.
*
* @param key entry key
* @param value value to set
* @return {@code true} if successful, or {@code false} if
* element was already set
*/
Mono<Boolean> setIfAbsent(K key, V value);
/**
* Sets value with defined duration only if entry doesn't exist.
*
* @param key entry key
* @param value value to set
* @param duration expiration duration
* @return {@code true} if successful, or {@code false} if
* element was already set
*/
Mono<Boolean> setIfAbsent(K key, V value, Duration duration);
/**
* Sets value only if entry already exists.
*
* @param key entry key
* @param value value to set
* @return {@code true} if successful, or {@code false} if
* element wasn't set
*/
Mono<Boolean> setIfExists(K key, V value);
/**
* Sets <code>value</code> with expiration <code>duration</code> only if entry already exists.
*
* @param key entry key
* @param value value to set
* @param duration expiration duration
* @return {@code true} if successful, or {@code false} if
* element wasn't set
*/
Mono<Boolean> setIfExists(K key, V value, Duration duration);
/**
* Atomically sets the value to the given updated value
* by specified key only if serialized state of the current value equals
* to serialized state of the expected value.
*
* @param key entry key
* @param expect the expected value
* @param update the new value
* @return {@code true} if successful; or {@code false} if the actual value
* was not equal to the expected value.
*/
Mono<Boolean> compareAndSet(K key, V expect, V update);
/**
* Retrieves current value by specified key and replaces it with new value.
*
* @param key entry key
* @param newValue value to set
* @return previous value
*/
Mono<V> getAndSet(K key, V newValue);
/**
* Retrieves current value by specified key and replaces it
* with value and defines expiration <code>duration</code>.
*
* @param key entry key
* @param value value to set
* @param duration expiration duration
* @return previous value
*/
Mono<V> getAndSet(K key, V value, Duration duration);
/**
* Retrieves current value by specified key and sets an expiration duration for it.
* <p>
* Requires <b>Redis 6.2.0 and higher.</b>
*
* @param key entry key
* @param duration of object time to live interval
* @return value
*/
Mono<V> getAndExpire(K key, Duration duration);
/**
* Retrieves current value by specified key and sets an expiration date for it.
* <p>
* Requires <b>Redis 6.2.0 and higher.</b>
*
* @param key entry key
* @param time of exact object expiration moment
* @return value
*/
Mono<V> getAndExpire(K key, Instant time);
/**
* Retrieves current value by specified key and clears expiration date set before.
* <p>
* Requires <b>Redis 6.2.0 and higher.</b>
*
* @param key entry key
* @return value
*/
Mono<V> getAndClearExpire(K key);
/**
* Stores value by specified key.
*
* @param key entry key
* @param value value to set
*/
Mono<Void> set(K key, V value);
/**
* Stores values by specified keys.
*
* @param entries entries to store
*/
Mono<Void> set(Map<K, V> entries);
/**
* Stores values by specified keys and JSONPath.
*
* @param path JSONPath
* @param entries entries to store
*/
Mono<Void> set(String path, Map<K, V> entries);
/**
* Stores value by specified key with defined expiration duration.
*
* @param key entry key
* @param value value to set
* @param duration expiration duration
*/
Mono<Void> set(K key, V value, Duration duration);
/**
* Stores values by specified keys with defined expiration duration.
*
* @param entries entries to store
* @param duration expiration duration
*/
Mono<Void> set(Map<K, V> entries, Duration duration);
/**
* Sets value by specified key and keep existing TTL.
* <p>
* Requires <b>Redis 6.0.0 and higher.</b>
*
* @param value value to set
*/
Mono<Void> setAndKeepTTL(K key, V value);
/**
* Adds object event listener
*
* @see ExpiredObjectListener
* @see DeletedObjectListener
* @see org.redisson.api.listener.SetObjectListener
*
* @param listener object event listener
* @return listener id
*/
Mono<Integer> addListener(ObjectListener listener);
/**
* Remaining time to live of map entry associated with a <code>key</code>.
*
* @param key - map key
* @return time in milliseconds
* -2 if the key does not exist.
* -1 if the key exists but has no associated expire.
*/
Mono<Long> remainTimeToLive(K key);
/**
* Returns <code>true</code> if this map contains map entry
* mapped by specified <code>key</code>, otherwise <code>false</code>
*
* @param key - map key
* @return <code>true</code> if this map contains map entry
* mapped by specified <code>key</code>, otherwise <code>false</code>
*/
Mono<Boolean> containsKey(Object key);
/**
* Read all keys at once
*
* @return keys
*/
Mono<Set<K>> readAllKeySet();
/**
* Returns entries amount in store
*
* @return entries amount
*/
Mono<Integer> size();
/**
* Returns <code>RCountDownLatch</code> instance associated with key
*
* @param key - map key
* @return countdownlatch
*/
RCountDownLatchReactive getCountDownLatch(K key);
/**
* Returns <code>RPermitExpirableSemaphore</code> instance associated with key
*
* @param key - map key
* @return permitExpirableSemaphore
*/
RPermitExpirableSemaphoreReactive getPermitExpirableSemaphore(K key);
/**
* Returns <code>RSemaphore</code> instance associated with key
*
* @param key - map key
* @return semaphore
*/
RSemaphoreReactive getSemaphore(K key);
/**
* Returns <code>RLock</code> instance associated with key
*
* @param key - map key
* @return fairlock
*/
RLockReactive getFairLock(K key);
/**
* Returns <code>RReadWriteLock</code> instance associated with key
*
* @param key - map key
* @return readWriteLock
*/
RReadWriteLockReactive getReadWriteLock(K key);
/**
* Returns <code>RLock</code> instance associated with key
*
* @param key - map key
* @return lock
*/
RLockReactive getLock(K key);
}
|
RJsonStoreReactive
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/resource/basic/resource/ResourceLocatorSubresource.java
|
{
"start": 3370,
"end": 3484
}
|
class ____ {
@RestPath
String param;
@QueryParam("value")
String value;
}
}
|
Params
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/vectors/RescoreVectorBuilder.java
|
{
"start": 1138,
"end": 4113
}
|
class ____ implements Writeable, ToXContentObject {
public static final ParseField OVERSAMPLE_FIELD = new ParseField("oversample");
public static final float NO_OVERSAMPLE = 0.0F;
public static final float MIN_OVERSAMPLE = 1.0F;
private static final ConstructingObjectParser<RescoreVectorBuilder, Void> PARSER = new ConstructingObjectParser<>(
"rescore_vector",
args -> new RescoreVectorBuilder((Float) args[0])
);
static {
PARSER.declareFloat(ConstructingObjectParser.constructorArg(), OVERSAMPLE_FIELD);
}
private static final TransportVersion RESCORE_VECTOR_ALLOW_ZERO = TransportVersion.fromName("rescore_vector_allow_zero");
// Oversample is required as of now as it is the only field in the rescore vector
private final float oversample;
public RescoreVectorBuilder(float numCandidatesFactor) {
Objects.requireNonNull(numCandidatesFactor, "[" + OVERSAMPLE_FIELD.getPreferredName() + "] must be set");
if (numCandidatesFactor < MIN_OVERSAMPLE && numCandidatesFactor != NO_OVERSAMPLE) {
throw new IllegalArgumentException("[" + OVERSAMPLE_FIELD.getPreferredName() + "] must be >= " + MIN_OVERSAMPLE + " or 0");
}
this.oversample = numCandidatesFactor;
}
public RescoreVectorBuilder(StreamInput in) throws IOException {
this.oversample = in.readFloat();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
// We don't want to serialize a `0` oversample to a node that doesn't know what to do with it.
if (oversample == NO_OVERSAMPLE && out.getTransportVersion().supports(RESCORE_VECTOR_ALLOW_ZERO) == false) {
throw new ElasticsearchStatusException(
"[rescore_vector] does not support a 0 for ["
+ OVERSAMPLE_FIELD.getPreferredName()
+ "] before version ["
+ RESCORE_VECTOR_ALLOW_ZERO.toReleaseVersion()
+ "]",
RestStatus.BAD_REQUEST
);
}
out.writeFloat(oversample);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(OVERSAMPLE_FIELD.getPreferredName(), oversample);
builder.endObject();
return builder;
}
public static RescoreVectorBuilder fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
RescoreVectorBuilder that = (RescoreVectorBuilder) o;
return Objects.equals(oversample, that.oversample);
}
@Override
public int hashCode() {
return Objects.hashCode(oversample);
}
public float oversample() {
return oversample;
}
}
|
RescoreVectorBuilder
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/persister/spi/PersisterClassResolver.java
|
{
"start": 891,
"end": 980
}
|
interface ____ extends Service {
/**
* Returns the entity persister
|
PersisterClassResolver
|
java
|
mockito__mockito
|
mockito-core/src/main/java/org/mockito/exceptions/verification/SmartNullPointerException.java
|
{
"start": 227,
"end": 428
}
|
class ____ extends MockitoException {
private static final long serialVersionUID = 1L;
public SmartNullPointerException(String message) {
super(message);
}
}
|
SmartNullPointerException
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/insertordering/InsertOrderingRCATest.java
|
{
"start": 33807,
"end": 33877
}
|
enum ____ {
BOOLEAN, STRING, NULL, NUMERIC
}
public
|
MetadataFieldType
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/cache/QueryCacheAndOneToManyTest.java
|
{
"start": 5275,
"end": 5850
}
|
class ____ {
@Id
@GeneratedValue
private Long id;
@Version
private Long version;
@ManyToOne
@JoinColumn(name = "ref")
private MyEntity2 ref;
public MyEntity1() {
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public Long getVersion() {
return version;
}
public void setVersion(Long version) {
this.version = version;
}
public MyEntity2 getRef() {
return ref;
}
public void setRef(MyEntity2 ref) {
this.ref = ref;
}
}
@Entity(name = "MyEntity2")
public static
|
MyEntity1
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/test/java/org/hibernate/orm/test/envers/entities/manytomany/biowned/ListBiowning2Entity.java
|
{
"start": 649,
"end": 2320
}
|
class ____ {
@Id
@GeneratedValue
private Integer id;
private String data;
@ManyToMany
@JoinTable(
name = "biowning",
joinColumns = @JoinColumn(name = "biowning2_id"),
inverseJoinColumns = @JoinColumn(name = "biowning1_id", insertable = false, updatable = false)
)
private List<ListBiowning1Entity> references = new ArrayList<ListBiowning1Entity>();
public ListBiowning2Entity() {
}
public ListBiowning2Entity(Integer id, String data) {
this.id = id;
this.data = data;
}
public ListBiowning2Entity(String data) {
this.data = data;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getData() {
return data;
}
public void setData(String data) {
this.data = data;
}
public List<ListBiowning1Entity> getReferences() {
return references;
}
public void setReferences(List<ListBiowning1Entity> references) {
this.references = references;
}
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( !(o instanceof ListBiowning2Entity) ) {
return false;
}
ListBiowning2Entity that = (ListBiowning2Entity) o;
if ( data != null ? !data.equals( that.data ) : that.data != null ) {
return false;
}
//noinspection RedundantIfStatement
if ( id != null ? !id.equals( that.id ) : that.id != null ) {
return false;
}
return true;
}
public int hashCode() {
int result;
result = (id != null ? id.hashCode() : 0);
result = 31 * result + (data != null ? data.hashCode() : 0);
return result;
}
public String toString() {
return "ListBiowning2Entity(id = " + id + ", data = " + data + ")";
}
}
|
ListBiowning2Entity
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/inference/textembedding/TextEmbeddingOperatorTests.java
|
{
"start": 963,
"end": 5725
}
|
class ____ extends InferenceOperatorTestCase<DenseEmbeddingFloatResults> {
private static final String SIMPLE_INFERENCE_ID = "test_text_embedding";
private static final int EMBEDDING_DIMENSION = 384; // Common embedding dimension
private int inputChannel;
@Before
public void initTextEmbeddingChannels() {
inputChannel = between(0, inputsCount - 1);
}
@Override
protected Operator.OperatorFactory simple(SimpleOptions options) {
return new TextEmbeddingOperator.Factory(mockedInferenceService(), SIMPLE_INFERENCE_ID, evaluatorFactory(inputChannel));
}
@Override
protected void assertSimpleOutput(List<Page> input, List<Page> results) {
assertThat(results, hasSize(input.size()));
for (int curPage = 0; curPage < input.size(); curPage++) {
Page inputPage = input.get(curPage);
Page resultPage = results.get(curPage);
assertEquals(inputPage.getPositionCount(), resultPage.getPositionCount());
assertEquals(inputPage.getBlockCount() + 1, resultPage.getBlockCount());
for (int channel = 0; channel < inputPage.getBlockCount(); channel++) {
Block inputBlock = inputPage.getBlock(channel);
Block resultBlock = resultPage.getBlock(channel);
assertBlockContentEquals(inputBlock, resultBlock);
}
assertTextEmbeddingResults(inputPage, resultPage);
}
}
private void assertTextEmbeddingResults(Page inputPage, Page resultPage) {
BytesRefBlock inputBlock = resultPage.getBlock(inputChannel);
FloatBlock resultBlock = (FloatBlock) resultPage.getBlock(inputPage.getBlockCount());
BlockStringReader blockReader = new InferenceOperatorTestCase.BlockStringReader();
for (int curPos = 0; curPos < inputPage.getPositionCount(); curPos++) {
if (inputBlock.isNull(curPos)) {
assertThat(resultBlock.isNull(curPos), equalTo(true));
} else {
// Verify that we have an embedding vector at this position
assertThat(resultBlock.isNull(curPos), equalTo(false));
assertThat(resultBlock.getValueCount(curPos), equalTo(EMBEDDING_DIMENSION));
// Get the input text to verify our mock embedding generation
String inputText = blockReader.readString(inputBlock, curPos);
// Verify the embedding values match our mock generation pattern
int firstValueIndex = resultBlock.getFirstValueIndex(curPos);
for (int i = 0; i < EMBEDDING_DIMENSION; i++) {
float expectedValue = generateMockEmbeddingValue(inputText, i);
float actualValue = resultBlock.getFloat(firstValueIndex + i);
assertThat(actualValue, equalTo(expectedValue));
}
}
}
}
@Override
protected DenseEmbeddingFloatResults mockInferenceResult(InferenceAction.Request request) {
// For text embedding, we expect one input text per request
String inputText = request.getInput().get(0);
// Generate a deterministic mock embedding based on the input text
float[] mockEmbedding = generateMockEmbedding(inputText, EMBEDDING_DIMENSION);
var embeddingResult = new DenseEmbeddingFloatResults.Embedding(mockEmbedding);
return new DenseEmbeddingFloatResults(List.of(embeddingResult));
}
@Override
protected Matcher<String> expectedDescriptionOfSimple() {
return expectedToStringOfSimple();
}
@Override
protected Matcher<String> expectedToStringOfSimple() {
return equalTo("TextEmbeddingOperator[inference_id=[" + SIMPLE_INFERENCE_ID + "]]");
}
/**
* Generates a deterministic mock embedding vector based on the input text.
* This ensures our tests are repeatable and verifiable.
*/
private float[] generateMockEmbedding(String inputText, int dimension) {
float[] embedding = new float[dimension];
int textHash = inputText.hashCode();
for (int i = 0; i < dimension; i++) {
embedding[i] = generateMockEmbeddingValue(inputText, i);
}
return embedding;
}
/**
* Generates a single embedding value for a specific dimension based on input text.
* Uses a deterministic function so tests are repeatable.
*/
private float generateMockEmbeddingValue(String inputText, int dimension) {
// Create a deterministic value based on input text and dimension
int hash = (inputText.hashCode() + dimension * 31) % 10000;
return hash / 10000.0f; // Normalize to [0, 1) range
}
}
|
TextEmbeddingOperatorTests
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/cache/CacheReproTests.java
|
{
"start": 15481,
"end": 15702
}
|
class ____ {
@Bean
public CacheManager cacheManager() {
return new ConcurrentMapCacheManager();
}
@Bean
public Spr11249Service service() {
return new Spr11249Service();
}
}
public static
|
Spr11249Config
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/TimeoutExtensionTests.java
|
{
"start": 28235,
"end": 28398
}
|
class ____ {
@BeforeEach
void setUp() {
}
@Test
void testMethod() {
}
}
}
@SuppressWarnings("JUnitMalformedDeclaration")
static
|
NestedClass
|
java
|
mapstruct__mapstruct
|
processor/src/main/java/org/mapstruct/ap/internal/model/source/Method.java
|
{
"start": 4570,
"end": 6148
}
|
class ____ interface) that defines this method.
*/
Type getDefiningType();
/**
* @return {@code true}, if the method represents a mapping lifecycle callback (Before/After mapping method)
*/
boolean isLifecycleCallbackMethod();
/**
* @return {@code true}, if the method is an update method, i.e. it has a parameter annotated with
* {@code @MappingTarget}.
*/
boolean isUpdateMethod();
/**
*
* @return the mapping options for this method
*/
MappingMethodOptions getOptions();
default ConditionMethodOptions getConditionOptions() {
return ConditionMethodOptions.empty();
}
/**
*
* @return true when @MappingTarget annotated parameter is the same type as the return type. The method has
* to be an update method in order for this to be true.
*/
default boolean isMappingTargetAssignableToReturnType() {
return isUpdateMethod() && getResultType().isAssignableTo( getReturnType() );
}
/**
* @return the first source type, intended for mapping methods from single source to target
*/
default Type getMappingSourceType() {
return getSourceParameters().get( 0 ).getType();
}
/**
* @return the short name for error messages when verbose, full name when not
*/
String describe();
/**
* Returns the formal type parameters of this method in declaration order.
*
* @return the formal type parameters, or an empty list if there are none
*/
List<Type> getTypeParameters();
}
|
or
|
java
|
alibaba__nacos
|
console/src/main/java/com/alibaba/nacos/console/controller/v3/ai/ConsoleA2aController.java
|
{
"start": 2542,
"end": 6316
}
|
class ____ {
private final A2aProxy a2aProxy;
public ConsoleA2aController(A2aProxy a2aProxy) {
this.a2aProxy = a2aProxy;
}
/**
* register agent.
*
* @param form the agent card form to register
* @return result of the registration operation
* @throws NacosException if the agent registration fails due to invalid input or internal error
*/
@PostMapping
@Secured(action = ActionTypes.WRITE, signType = SignType.AI, apiType = ApiType.CONSOLE_API)
public Result<String> registerAgent(AgentCardForm form) throws NacosException {
form.validate();
AgentCard agentCard = AgentRequestUtil.parseAgentCard(form);
a2aProxy.registerAgent(agentCard, form);
return Result.success("ok");
}
/**
* get agent card.
*
* @param form the agent form to get
* @return result of the get operation
* @throws NacosApiException if the agent get fails due to invalid input or internal error
*/
@GetMapping
@Secured(action = ActionTypes.READ, signType = SignType.AI, apiType = ApiType.CONSOLE_API)
public Result<AgentCardDetailInfo> getAgentCard(AgentForm form) throws NacosException {
form.validate();
return Result.success(a2aProxy.getAgentCard(form));
}
/**
* update agent.
*
* @param form the agent update form to update
* @return result of the update operation
* @throws NacosException if the agent update fails due to invalid input or internal error
*/
@PutMapping
@Secured(action = ActionTypes.WRITE, signType = SignType.AI, apiType = ApiType.CONSOLE_API)
public Result<String> updateAgentCard(AgentCardUpdateForm form) throws NacosException {
form.validate();
AgentCard agentCard = AgentRequestUtil.parseAgentCard(form);
a2aProxy.updateAgentCard(agentCard, form);
return Result.success("ok");
}
/**
* delete agent.
*
* @param form the agent form to delete
* @return result of the deletion operation
* @throws NacosException if the agent deletion fails due to invalid input or internal error
*/
@DeleteMapping
@Secured(action = ActionTypes.WRITE, signType = SignType.AI, apiType = ApiType.CONSOLE_API)
public Result<String> deleteAgent(AgentForm form) throws NacosException {
form.validate();
a2aProxy.deleteAgent(form);
return Result.success("ok");
}
/**
* list agents.
*
* @param agentListForm the agent list form to list
* @param pageForm the page form to list
* @return result of the list operation
* @throws NacosException if the agent list fails due to invalid input or internal error
*/
@GetMapping("/list")
@Secured(action = ActionTypes.READ, signType = SignType.AI, apiType = ApiType.CONSOLE_API)
public Result<Page<AgentCardVersionInfo>> listAgents(AgentListForm agentListForm, PageForm pageForm)
throws NacosException {
agentListForm.validate();
pageForm.validate();
return Result.success(a2aProxy.listAgents(agentListForm, pageForm));
}
/**
* List all versions for target Agent.
*
* @param agentForm agent form
* @return all version for target agent.
* @throws NacosException nacos exception
*/
@GetMapping("/version/list")
@Secured(action = ActionTypes.READ, signType = SignType.AI, apiType = ApiType.ADMIN_API)
public Result<List<AgentVersionDetail>> listAgentVersions(AgentForm agentForm) throws NacosException {
agentForm.validate();
return Result.success(a2aProxy.listAgentVersions(agentForm.getNamespaceId(), agentForm.getAgentName()));
}
}
|
ConsoleA2aController
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemOperationsMocked.java
|
{
"start": 1098,
"end": 2841
}
|
class ____ extends
FSMainOperationsBaseTest {
private static final String TEST_ROOT_DIR =
"/tmp/TestNativeAzureFileSystemOperationsMocked";
public TestNativeAzureFileSystemOperationsMocked (){
super(TEST_ROOT_DIR);
}
@BeforeEach
@Override
public void setUp() throws Exception {
fSys = AzureBlobStorageTestAccount.createMock().getFileSystem();
}
@Override
protected FileSystem createFileSystem() throws Exception {
return AzureBlobStorageTestAccount.createMock().getFileSystem();
}
public void testListStatusThrowsExceptionForUnreadableDir() throws Exception {
System.out
.println("Skipping testListStatusThrowsExceptionForUnreadableDir since WASB"
+ " doesn't honor directory permissions.");
assumeNotWindows();
}
@Override
public void testGlobStatusThrowsExceptionForUnreadableDir()
throws Exception {
System.out.println(
"Skipping testGlobStatusThrowsExceptionForUnreadableDir since WASB"
+ " doesn't honor directory permissions.");
assumeNotWindows();
}
@Override
public String getTestRootDir() {
return TEST_ROOT_DIR;
}
@Override
public Path getTestRootPath(FileSystem fSys) {
return fSys.makeQualified(new Path(TEST_ROOT_DIR));
}
@Override
public Path getTestRootPath(FileSystem fSys, String pathString) {
return fSys.makeQualified(new Path(TEST_ROOT_DIR, pathString));
}
@Override
public Path getAbsoluteTestRootPath(FileSystem fSys) {
Path testRootPath = new Path(TEST_ROOT_DIR);
if (testRootPath.isAbsolute()) {
return testRootPath;
} else {
return new Path(fSys.getWorkingDirectory(), TEST_ROOT_DIR);
}
}
}
|
TestNativeAzureFileSystemOperationsMocked
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/operators/testutils/CollectionIterator.java
|
{
"start": 1050,
"end": 1876
}
|
class ____<T> implements ResettableMutableObjectIterator<T> {
private final Collection<T> collection;
private Iterator<T> iterator;
public CollectionIterator(Collection<T> collection) {
this.collection = collection;
this.iterator = collection.iterator();
}
@Override
public T next(T reuse) throws IOException {
return next();
}
@Override
public T next() throws IOException {
if (!iterator.hasNext()) {
return null;
} else {
return iterator.next();
}
}
@Override
public void reset() throws IOException {
iterator = collection.iterator();
}
public static <T> CollectionIterator<T> of(T... values) {
return new CollectionIterator<T>(Arrays.asList(values));
}
}
|
CollectionIterator
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/api/RecursiveComparisonAssert.java
|
{
"start": 36977,
"end": 39356
}
|
class ____ {
* int number;
* String street;
* }
*
* Person sherlock = new Person("Sherlock", 1.80);
* sherlock.home.address.street = "Baker Street";
* sherlock.home.address.number = 221;
*
* Person sherlock2 = new Person("Sherlock", 1.90);
* sherlock2.home.address.street = "Butcher Street";
* sherlock2.home.address.number = 221;
*
* // assertion succeeds as we ignore Address and height
* assertThat(sherlock).usingRecursiveComparison()
* .ignoringFieldsOfTypes(double.class, Address.class)
* .isEqualTo(sherlock2);
*
* // now this assertion fails as expected since the home.address.street fields and height differ
* assertThat(sherlock).usingRecursiveComparison()
* .isEqualTo(sherlock2);</code></pre>
*
* @param typesToIgnore the types we want to ignore in the object under test fields.
* @return this {@link RecursiveComparisonAssert} to chain other methods.
*/
public RecursiveComparisonAssert<?> ignoringFieldsOfTypes(Class<?>... typesToIgnore) {
recursiveComparisonConfiguration.ignoreFieldsOfTypes(typesToIgnore);
return myself;
}
/**
* Makes the recursive comparison to ignore the fields of the object under test having types matching one of the given regexes.
* The fields are ignored if their types <b>exactly match one of the regexes</b>, if a field is a subtype of a matched type it is not ignored.
* <p>
* One use case of this method is to ignore types that can't be introspected.
* <p>
* If {@code strictTypeChecking} mode is enabled and a field of the object under test is null, the recursive
* comparison evaluates the corresponding expected field's type (if not null), if it is disabled then the field is evaluated as
* usual (i.e. it is not ignored).
* <p>
* <b>Warning</b>: primitive types are not directly supported because under the hood they are converted to their
* corresponding wrapping types, for example {@code int} to {@code java.lang.Integer}. The preferred way to ignore
* primitive types is to use {@link #ignoringFieldsOfTypes(Class[])}.
* Another way is to ignore the wrapping type, for example ignoring {@code java.lang.Integer} ignores both
* {@code java.lang.Integer} and {@code int} fields.
* <p>
* Example:
* <pre><code class='java'>
|
Address
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/resource/basic/SubResourceFieldInjectionTest.java
|
{
"start": 5210,
"end": 5462
}
|
class ____ {
@RestPath("order-id")
public String orderId;
@RestPath
public String positionId;
@GET
public String get() {
return orderId + ":" + positionId;
}
}
}
|
PositionResourceImpl
|
java
|
apache__camel
|
core/camel-api/src/main/java/org/apache/camel/spi/OptimisticLockingAggregationRepository.java
|
{
"start": 1544,
"end": 5299
}
|
class ____ extends RuntimeException {
private static final @Serial long serialVersionUID = 1L;
}
/**
* Add the given {@link org.apache.camel.Exchange} under the correlation key.
* <p/>
* Will perform optimistic locking to replace expected existing exchange with the new supplied exchange.
* <p/>
* If the {@code oldExchange} is null the underlying implementation is to assume this is the very first Exchange for
* the supplied correlation key. When the implementation comes to store to the Exchange if there is already an
* existing Exchange present for this correlation key the implementation should throw an OptimisticLockingException.
* <p/>
* If the {@code oldExchange} is not null the underlying implementation should use it to compare with the existing
* exchange when doing an atomic compare-and-set/swap operation.
* <p/>
* The implementation may achieve this by storing a version identifier in the Exchange as a parameter. Set before
* returning from {@link AggregationRepository#get(org.apache.camel.CamelContext, String)}} and retrieved from the
* exchange when passed to
* {@link AggregationRepository#add(org.apache.camel.CamelContext, String, org.apache.camel.Exchange)}.
* <p/>
* Note: The {@link org.apache.camel.processor.aggregate.MemoryAggregationRepository} is an exception to this
* recommendation. It uses the {@code oldExchange}'s Object identify to perform it's compare-and-set/swap operation,
* instead of a version parameter. This is not the recommended approach, and should be avoided.
* <p/>
* The {@link org.apache.camel.processor.aggregate.AggregateProcessor} will ensure that the exchange received from
* {@link OptimisticLockingAggregationRepository#get(org.apache.camel.CamelContext, String)} is passed as
* {@code oldExchange}, and that the aggregated exchange received from the
* {@link org.apache.camel.processor.aggregate.AggregationStrategy} is passed as the {@code newExchange}.
*
* @param camelContext the current CamelContext
* @param key the correlation key
* @param oldExchange the old exchange that is expected to exist when replacing with the new
* exchange
* @param newExchange the new aggregated exchange, to replace old exchange
* @return the old exchange if any existed
* @throws OptimisticLockingException This should be thrown when the currently stored exchange differs from the
* supplied oldExchange.
*/
Exchange add(CamelContext camelContext, String key, Exchange oldExchange, Exchange newExchange)
throws OptimisticLockingException;
/**
* Removes the given Exchange when both the supplied key and Exchange are present in the repository. If the supplied
* Exchange does not match the Exchange actually stored against the key this method should throw an
* OptimisticLockingException to indicate that the value of the correlation key has changed from the expected value.
* <p/>
*
* @param camelContext the current CamelContext
* @param key the correlation key
* @param exchange the exchange to remove
* @throws OptimisticLockingException This should be thrown when the exchange has already been deleted, or otherwise
* modified.
*/
@Override
void remove(CamelContext camelContext, String key, Exchange exchange) throws OptimisticLockingException;
}
|
OptimisticLockingException
|
java
|
elastic__elasticsearch
|
test/framework/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java
|
{
"start": 1626,
"end": 16727
}
|
class ____ extends MapperTestCase {
/**
* @return a List of OutOfRangeSpec to test for this number type
*/
protected abstract List<NumberTypeOutOfRangeSpec> outOfRangeSpecs();
/**
* @return an appropriate value to use for a missing value for this number type
*/
protected abstract Number missingValue();
/**
* @return does this mapper allow index time scripts
*/
protected boolean allowsIndexTimeScript() {
return false;
}
@Override
protected void registerParameters(ParameterChecker checker) throws IOException {
checker.registerConflictCheck("doc_values", b -> b.field("doc_values", false));
checker.registerConflictCheck("index", b -> b.field("index", false));
checker.registerConflictCheck("store", b -> b.field("store", true));
checker.registerConflictCheck("null_value", b -> b.field("null_value", 1));
checker.registerUpdateCheck(b -> b.field("coerce", false), m -> assertFalse(((NumberFieldMapper) m).coerce()));
if (allowsIndexTimeScript()) {
checker.registerConflictCheck("script", b -> b.field("script", "foo"));
checker.registerUpdateCheck(b -> {
minimalMapping(b);
b.field("script", "test");
b.field("on_script_error", "fail");
}, b -> {
minimalMapping(b);
b.field("script", "test");
b.field("on_script_error", "continue");
}, m -> assertThat((m).builderParams.onScriptError(), is(OnScriptError.CONTINUE)));
}
}
@Override
protected Object getSampleValueForDocument() {
return 123;
}
public void testExistsQueryDocValuesDisabled() throws IOException {
MapperService mapperService = createMapperService(fieldMapping(b -> {
minimalMapping(b);
b.field("doc_values", false);
}));
assertExistsQuery(mapperService);
assertParseMinimalWarnings();
}
public void testAggregationsDocValuesDisabled() throws IOException {
MapperService mapperService = createMapperService(fieldMapping(b -> {
minimalMapping(b);
b.field("doc_values", false);
}));
assertAggregatableConsistency(mapperService.fieldType("field"));
}
public void testDefaults() throws Exception {
XContentBuilder mapping = fieldMapping(this::minimalMapping);
DocumentMapper mapper = createDocumentMapper(mapping);
assertEquals(Strings.toString(mapping), mapper.mappingSource().toString());
ParsedDocument doc = mapper.parse(source(b -> b.field("field", 123)));
List<IndexableField> fields = doc.rootDoc().getFields("field");
// One field indexes points
assertEquals(1, fields.stream().filter(f -> f.fieldType().pointIndexDimensionCount() != 0).count());
// One field indexes doc values
assertEquals(1, fields.stream().filter(f -> f.fieldType().docValuesType() != DocValuesType.NONE).count());
}
public void testNotIndexed() throws Exception {
DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> {
minimalMapping(b);
b.field("index", false);
}));
ParsedDocument doc = mapper.parse(source(b -> b.field("field", 123)));
List<IndexableField> fields = doc.rootDoc().getFields("field");
assertEquals(1, fields.size());
IndexableField dvField = fields.get(0);
assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType());
}
public void testNoDocValues() throws Exception {
DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> {
minimalMapping(b);
b.field("doc_values", false);
}));
ParsedDocument doc = mapper.parse(source(b -> b.field("field", 123)));
List<IndexableField> fields = doc.rootDoc().getFields("field");
assertEquals(1, fields.size());
IndexableField pointField = fields.get(0);
assertEquals(1, pointField.fieldType().pointIndexDimensionCount());
assertEquals(123, pointField.numericValue().doubleValue(), 0d);
}
public void testStore() throws Exception {
DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> {
minimalMapping(b);
b.field("store", true);
}));
ParsedDocument doc = mapper.parse(source(b -> b.field("field", 123)));
List<IndexableField> fields = doc.rootDoc().getFields("field");
// One field indexes points
assertEquals(1, fields.stream().filter(f -> f.fieldType().pointIndexDimensionCount() != 0).count());
// One field indexes doc values
assertEquals(1, fields.stream().filter(f -> f.fieldType().docValuesType() != DocValuesType.NONE).count());
// One field is stored
assertEquals(1, fields.stream().filter(f -> f.fieldType().stored()).count());
}
public void testCoerce() throws IOException {
DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping));
ParsedDocument doc = mapper.parse(source(b -> b.field("field", "123")));
List<IndexableField> fields = doc.rootDoc().getFields("field");
// One field indexes points
assertEquals(1, fields.stream().filter(f -> f.fieldType().pointIndexDimensionCount() != 0).count());
// One field indexes doc values
assertEquals(1, fields.stream().filter(f -> f.fieldType().docValuesType() != DocValuesType.NONE).count());
DocumentMapper mapper2 = createDocumentMapper(fieldMapping(b -> {
minimalMapping(b);
b.field("coerce", false);
}));
Exception e = expectThrows(DocumentParsingException.class, () -> mapper2.parse(source(b -> b.field("field", "123"))));
assertThat(e.getCause().getMessage(), containsString("passed as String"));
}
@Override
protected boolean supportsIgnoreMalformed() {
return true;
}
@Override
protected List<ExampleMalformedValue> exampleMalformedValues() {
return List.of(
exampleMalformedValue("a").errorMatches("For input string: \"a\""),
exampleMalformedValue(b -> b.value(false)).errorMatches(
both(containsString("Current token")).and(containsString("not numeric, can not use numeric value accessors"))
)
);
}
/**
* Test that in case the malformed value is an xContent object we throw error regardless of `ignore_malformed`
*/
public void testIgnoreMalformedWithObject() throws Exception {
SourceToParse malformed = source(b -> b.startObject("field").field("foo", "bar").endObject());
for (Boolean ignoreMalformed : new Boolean[] { true, false }) {
DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> {
minimalMapping(b);
b.field("ignore_malformed", ignoreMalformed);
}));
DocumentParsingException e = expectThrows(DocumentParsingException.class, () -> mapper.parse(malformed));
assertThat(e.getCause().getMessage(), containsString("Cannot parse object as number"));
}
}
public void testNullValue() throws IOException {
DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping));
SourceToParse source = source(b -> b.nullField("field"));
ParsedDocument doc = mapper.parse(source);
assertThat(doc.rootDoc().getFields("field"), empty());
Number missing = missingValue();
mapper = createDocumentMapper(fieldMapping(b -> {
minimalMapping(b);
b.field("null_value", missing);
}));
doc = mapper.parse(source);
List<IndexableField> fields = doc.rootDoc().getFields("field");
List<IndexableField> pointFields = fields.stream().filter(f -> f.fieldType().pointIndexDimensionCount() != 0).toList();
assertEquals(1, pointFields.size());
assertEquals(1, pointFields.get(0).fieldType().pointIndexDimensionCount());
assertFalse(pointFields.get(0).fieldType().stored());
List<IndexableField> dvFields = fields.stream().filter(f -> f.fieldType().docValuesType() != DocValuesType.NONE).toList();
assertEquals(1, dvFields.size());
assertEquals(DocValuesType.SORTED_NUMERIC, dvFields.get(0).fieldType().docValuesType());
assertFalse(dvFields.get(0).fieldType().stored());
}
public void testOutOfRangeValues() throws IOException {
for (NumberTypeOutOfRangeSpec item : outOfRangeSpecs()) {
DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", item.type.typeName())));
Exception e = expectThrows(DocumentParsingException.class, () -> mapper.parse(source(item::write)));
assertThat(
"Incorrect error message for [" + item.type + "] with value [" + item.value + "]",
e.getCause().getMessage(),
containsString(item.message)
);
}
}
public void testDimension() throws IOException {
// Test default setting
MapperService mapperService = createMapperService(fieldMapping(b -> minimalMapping(b)));
NumberFieldMapper.NumberFieldType ft = (NumberFieldMapper.NumberFieldType) mapperService.fieldType("field");
assertFalse(ft.isDimension());
// dimension = false is allowed
assertDimension(false, NumberFieldMapper.NumberFieldType::isDimension);
// dimension = true is allowed
assertDimension(true, NumberFieldMapper.NumberFieldType::isDimension);
assertTimeSeriesIndexing();
}
public void testMetricType() throws IOException {
// Test default setting
MapperService mapperService = createMapperService(fieldMapping(this::minimalMapping));
NumberFieldMapper.NumberFieldType ft = (NumberFieldMapper.NumberFieldType) mapperService.fieldType("field");
assertNull(ft.getMetricType());
assertMetricType("gauge", NumberFieldMapper.NumberFieldType::getMetricType);
assertMetricType("counter", NumberFieldMapper.NumberFieldType::getMetricType);
{
// Test invalid metric type for this field type
Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> {
minimalMapping(b);
b.field("time_series_metric", "histogram");
})));
assertThat(
e.getCause().getMessage(),
containsString("Unknown value [histogram] for field [time_series_metric] - accepted values are [gauge, counter]")
);
}
{
// Test invalid metric type for this field type
Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> {
minimalMapping(b);
b.field("time_series_metric", "unknown");
})));
assertThat(
e.getCause().getMessage(),
containsString("Unknown value [unknown] for field [time_series_metric] - accepted values are [gauge, counter]")
);
}
}
public void testTimeSeriesIndexDefault() throws Exception {
var randomMetricType = randomFrom(TimeSeriesParams.MetricType.scalar());
var indexSettings = getIndexSettingsBuilder().put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES.getName())
.put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "dimension_field");
var mapperService = createMapperService(indexSettings.build(), fieldMapping(b -> {
minimalMapping(b);
b.field("time_series_metric", randomMetricType.toString());
}));
var ft = (NumberFieldMapper.NumberFieldType) mapperService.fieldType("field");
assertThat(ft.getMetricType(), equalTo(randomMetricType));
assertTrue(ft.indexType().hasOnlyDocValues());
}
public void testMetricAndDocvalues() {
Exception e = expectThrows(MapperParsingException.class, () -> createDocumentMapper(fieldMapping(b -> {
minimalMapping(b);
b.field("time_series_metric", "counter").field("doc_values", false);
})));
assertThat(e.getCause().getMessage(), containsString("Field [time_series_metric] requires that [doc_values] is true"));
}
@Override
protected Object generateRandomInputValue(MappedFieldType ft) {
Number n = randomNumber();
return randomBoolean() ? n : n.toString();
}
@Override
protected IngestScriptSupport ingestScriptSupport() {
return new IngestScriptSupport() {
@Override
@SuppressWarnings("unchecked")
protected <T> T compileOtherScript(Script script, ScriptContext<T> context) {
if (context == LongFieldScript.CONTEXT) {
return (T) LongFieldScript.PARSE_FROM_SOURCE;
}
if (context == DoubleFieldScript.CONTEXT) {
return (T) DoubleFieldScript.PARSE_FROM_SOURCE;
}
throw new UnsupportedOperationException("Unknown script " + script.getIdOrCode());
}
@Override
ScriptFactory emptyFieldScript() {
return null;
}
@Override
ScriptFactory nonEmptyFieldScript() {
return null;
}
};
}
public void testScriptableTypes() throws IOException {
if (allowsIndexTimeScript()) {
createDocumentMapper(fieldMapping(b -> {
minimalMapping(b);
b.field("script", "foo");
}));
} else {
Exception e = expectThrows(MapperParsingException.class, () -> createDocumentMapper(fieldMapping(b -> {
minimalMapping(b);
b.field("script", "foo");
})));
assertEquals("Failed to parse mapping: Unknown parameter [script] for mapper [field]", e.getMessage());
}
}
public void testAllowMultipleValuesField() throws IOException {
MapperService mapperService = createMapperService(fieldMapping(b -> minimalMapping(b)));
Mapper mapper = mapperService.mappingLookup().getMapper("field");
if (mapper instanceof NumberFieldMapper numberFieldMapper) {
numberFieldMapper.setAllowMultipleValues(false);
} else {
fail("mapper [" + mapper.getClass() + "] error, not number field");
}
Exception e = expectThrows(
DocumentParsingException.class,
() -> mapperService.documentMapper().parse(source(b -> b.array("field", randomNumber(), randomNumber(), randomNumber())))
);
assertThat(e.getCause().getMessage(), containsString("Only one field can be stored per key"));
}
protected abstract Number randomNumber();
protected final
|
NumberFieldMapperTests
|
java
|
junit-team__junit5
|
junit-vintage-engine/src/test/java/org/junit/vintage/engine/descriptor/VintageTestDescriptorTests.java
|
{
"start": 610,
"end": 1774
}
|
class ____ {
private static final UniqueId uniqueId = UniqueId.forEngine("vintage");
@Test
void legacyReportingNameUsesClassName() {
var description = Description.createSuiteDescription(ConcreteJUnit4TestCase.class);
var testDescriptor = new VintageTestDescriptor(uniqueId, description, null);
assertEquals("org.junit.vintage.engine.samples.junit4.ConcreteJUnit4TestCase",
testDescriptor.getLegacyReportingName());
}
@Test
void legacyReportingNameUsesMethodName() {
var description = Description.createTestDescription(ConcreteJUnit4TestCase.class, "legacyTest");
var testDescriptor = new VintageTestDescriptor(uniqueId, description, null);
assertEquals("legacyTest", testDescriptor.getLegacyReportingName());
}
@Test
void legacyReportingNameFallbackToDisplayName() {
var suiteName = "Legacy Suite";
var description = Description.createSuiteDescription(suiteName);
var testDescriptor = new VintageTestDescriptor(uniqueId, description, null);
assertEquals(testDescriptor.getDisplayName(), testDescriptor.getLegacyReportingName());
assertEquals(suiteName, testDescriptor.getLegacyReportingName());
}
}
|
VintageTestDescriptorTests
|
java
|
alibaba__nacos
|
naming/src/main/java/com/alibaba/nacos/naming/misc/SwitchDomain.java
|
{
"start": 11927,
"end": 12796
}
|
class ____ implements HealthParams {
public static final int MIN_MAX = 3000;
public static final int MIN_MIN = 500;
private int max = 5000;
private int min = 500;
private float factor = 0.85F;
@Override
public int getMax() {
return max;
}
@Override
public int getMin() {
return min;
}
@Override
public float getFactor() {
return factor;
}
public void setFactor(float factor) {
this.factor = factor;
}
public void setMax(int max) {
this.max = max;
}
public void setMin(int min) {
this.min = min;
}
}
public static
|
HttpHealthParams
|
java
|
apache__kafka
|
raft/src/main/java/org/apache/kafka/raft/RaftMessage.java
|
{
"start": 891,
"end": 966
}
|
interface ____ {
int correlationId();
ApiMessage data();
}
|
RaftMessage
|
java
|
FasterXML__jackson-databind
|
src/main/java/tools/jackson/databind/ValueDeserializer.java
|
{
"start": 1356,
"end": 2039
}
|
interface ____ needed to separate
* resolution of dependent deserializers (which may have cyclic link back
* to deserializer itself, directly or indirectly).
*<p>
* In addition, to support per-property annotations (to configure aspects
* of deserialization on per-property basis), deserializers may want
* to override
* {@link #createContextual} which allows specialization of deserializers:
* it is passed information on property, and can create a newly configured
* deserializer for handling that particular property.
*<br>
* Resolution of deserializers occurs before contextualization.
*<p>
* NOTE: In Jackson 2.x was named {@code JsonDeserializer}
*/
public abstract
|
is
|
java
|
mybatis__mybatis-3
|
src/main/java/org/apache/ibatis/mapping/Environment.java
|
{
"start": 1544,
"end": 2408
}
|
class ____ {
private final String id;
private TransactionFactory transactionFactory;
private DataSource dataSource;
public Builder(String id) {
this.id = id;
}
public Builder transactionFactory(TransactionFactory transactionFactory) {
this.transactionFactory = transactionFactory;
return this;
}
public Builder dataSource(DataSource dataSource) {
this.dataSource = dataSource;
return this;
}
public String id() {
return this.id;
}
public Environment build() {
return new Environment(this.id, this.transactionFactory, this.dataSource);
}
}
public String getId() {
return this.id;
}
public TransactionFactory getTransactionFactory() {
return this.transactionFactory;
}
public DataSource getDataSource() {
return this.dataSource;
}
}
|
Builder
|
java
|
spring-projects__spring-framework
|
spring-expression/src/test/java/org/springframework/expression/spel/SpelReproTests.java
|
{
"start": 66555,
"end": 67343
}
|
class ____ implements PropertyAccessor {
@Override
public Class<?>[] getSpecificTargetClasses() {
return new Class<?>[] {Map.class};
}
@Override
public boolean canRead(EvaluationContext context, Object target, String name) {
return (((Map<?, ?>) target).containsKey(name));
}
@Override
public TypedValue read(EvaluationContext context, Object target, String name) {
return new TypedValue(((Map<?, ?>) target).get(name));
}
@Override
public boolean canWrite(EvaluationContext context, Object target, String name) {
return true;
}
@Override
@SuppressWarnings("unchecked")
public void write(EvaluationContext context, Object target, String name, Object newValue) {
((Map<String, Object>) target).put(name, newValue);
}
}
static
|
MapAccessor
|
java
|
netty__netty
|
transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java
|
{
"start": 3962,
"end": 22582
}
|
class ____ {
@BeforeAll
public static void setUpClass() {
assumeFalse(PlatformDependent.javaVersion() == 26, "Fails on JDK26, possible Blockhound bug?");
BlockHound.install();
}
@Test
public void testServiceLoader() {
for (BlockHoundIntegration integration : ServiceLoader.load(BlockHoundIntegration.class)) {
if (integration instanceof NettyBlockHoundIntegration) {
return;
}
}
fail("NettyBlockHoundIntegration cannot be loaded with ServiceLoader");
}
@Test
public void testBlockingCallsInNettyThreads() throws Exception {
final FutureTask<Void> future = new FutureTask<>(() -> {
Thread.sleep(0);
return null;
});
GlobalEventExecutor.INSTANCE.execute(future);
try {
future.get(5, TimeUnit.SECONDS);
fail("Expected an exception due to a blocking call but none was thrown");
} catch (ExecutionException e) {
assertInstanceOf(BlockingOperationError.class, e.getCause());
}
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testGlobalEventExecutorTakeTask() throws InterruptedException {
testEventExecutorTakeTask(GlobalEventExecutor.INSTANCE);
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testSingleThreadEventExecutorTakeTask() throws InterruptedException {
SingleThreadEventExecutor executor =
new SingleThreadEventExecutor(null, new DefaultThreadFactory("test"), true) {
@Override
protected void run() {
while (!confirmShutdown()) {
Runnable task = takeTask();
if (task != null) {
task.run();
}
}
}
};
testEventExecutorTakeTask(executor);
}
private static void testEventExecutorTakeTask(EventExecutor eventExecutor) throws InterruptedException {
CountDownLatch latch = new CountDownLatch(1);
ScheduledFuture<?> f = eventExecutor.schedule(latch::countDown, 10, TimeUnit.MILLISECONDS);
f.sync();
latch.await();
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testSingleThreadEventExecutorAddTask() throws Exception {
TestLinkedBlockingQueue<Runnable> taskQueue = new TestLinkedBlockingQueue<>();
SingleThreadEventExecutor executor =
new SingleThreadEventExecutor(null, new DefaultThreadFactory("test"), true) {
@Override
protected Queue<Runnable> newTaskQueue(int maxPendingTasks) {
return taskQueue;
}
@Override
protected void run() {
while (!confirmShutdown()) {
Runnable task = takeTask();
if (task != null) {
task.run();
}
}
}
};
taskQueue.emulateContention();
CountDownLatch latch = new CountDownLatch(1);
executor.submit(() -> {
executor.execute(() -> { }); // calls addTask
latch.countDown();
});
taskQueue.waitUntilContented();
taskQueue.removeContention();
latch.await();
}
@Test
void permittingBlockingCallsInFastThreadLocalThreadSubclass() throws Exception {
final FutureTask<Void> future = new FutureTask<>(() -> {
Thread.sleep(0);
return null;
});
FastThreadLocalThread thread = new FastThreadLocalThread(future) {
@Override
public boolean permitBlockingCalls() {
return true; // The Thread.sleep(0) call should not be flagged because we allow blocking calls.
}
};
thread.start();
future.get(5, TimeUnit.SECONDS);
thread.join();
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testHashedWheelTimerStartStop() throws Exception {
HashedWheelTimer timer = new HashedWheelTimer();
Future<?> futureStart = GlobalEventExecutor.INSTANCE.submit(timer::start);
futureStart.get(5, TimeUnit.SECONDS);
Future<?> futureStop = GlobalEventExecutor.INSTANCE.submit(timer::stop);
futureStop.get(5, TimeUnit.SECONDS);
}
// Tests copied from io.netty.handler.ssl.SslHandlerTest
@Test
public void testHandshakeWithExecutorThatExecuteDirectory() throws Exception {
testHandshakeWithExecutor(Runnable::run, "TLSv1.2");
}
@Test
public void testHandshakeWithExecutorThatExecuteDirectoryTLSv13() throws Exception {
assumeTrue(SslProvider.isTlsv13Supported(SslProvider.JDK));
testHandshakeWithExecutor(Runnable::run, "TLSv1.3");
}
@Test
public void testHandshakeWithImmediateExecutor() throws Exception {
testHandshakeWithExecutor(ImmediateExecutor.INSTANCE, "TLSv1.2");
}
@Test
public void testHandshakeWithImmediateExecutorTLSv13() throws Exception {
assumeTrue(SslProvider.isTlsv13Supported(SslProvider.JDK));
testHandshakeWithExecutor(ImmediateExecutor.INSTANCE, "TLSv1.3");
}
@Test
public void testHandshakeWithImmediateEventExecutor() throws Exception {
testHandshakeWithExecutor(ImmediateEventExecutor.INSTANCE, "TLSv1.2");
}
@Test
public void testHandshakeWithImmediateEventExecutorTLSv13() throws Exception {
assumeTrue(SslProvider.isTlsv13Supported(SslProvider.JDK));
testHandshakeWithExecutor(ImmediateEventExecutor.INSTANCE, "TLSv1.3");
}
@Test
public void testHandshakeWithExecutor() throws Exception {
ExecutorService executorService = Executors.newCachedThreadPool();
try {
testHandshakeWithExecutor(executorService, "TLSv1.2");
} finally {
executorService.shutdown();
}
}
@Test
public void testHandshakeWithExecutorTLSv13() throws Exception {
assumeTrue(SslProvider.isTlsv13Supported(SslProvider.JDK));
ExecutorService executorService = Executors.newCachedThreadPool();
try {
testHandshakeWithExecutor(executorService, "TLSv1.3");
} finally {
executorService.shutdown();
}
}
@Test
public void testTrustManagerVerifyJDK() throws Exception {
testTrustManagerVerify(SslProvider.JDK, "TLSv1.2");
}
@Test
public void testTrustManagerVerifyTLSv13JDK() throws Exception {
assumeTrue(SslProvider.isTlsv13Supported(SslProvider.JDK));
testTrustManagerVerify(SslProvider.JDK, "TLSv1.3");
}
@Test
public void testTrustManagerVerifyOpenSSL() throws Exception {
testTrustManagerVerify(SslProvider.OPENSSL, "TLSv1.2");
}
@Test
public void testTrustManagerVerifyTLSv13OpenSSL() throws Exception {
assumeTrue(SslProvider.isTlsv13Supported(SslProvider.OPENSSL));
testTrustManagerVerify(SslProvider.OPENSSL, "TLSv1.3");
}
@Test
public void testSslHandlerWrapAllowsBlockingCalls() throws Exception {
final SslContext sslClientCtx = SslContextBuilder.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.sslProvider(SslProvider.JDK)
.endpointIdentificationAlgorithm(null)
.build();
final SslHandler sslHandler = sslClientCtx.newHandler(UnpooledByteBufAllocator.DEFAULT);
EventLoopGroup group = new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory());
final CountDownLatch activeLatch = new CountDownLatch(1);
final AtomicReference<Throwable> error = new AtomicReference<>();
Channel sc = null;
Channel cc = null;
try {
sc = new ServerBootstrap()
.group(group)
.channel(NioServerSocketChannel.class)
.childHandler(new ChannelInboundHandlerAdapter())
.bind(new InetSocketAddress(0))
.syncUninterruptibly()
.channel();
cc = new Bootstrap()
.group(group)
.channel(NioSocketChannel.class)
.handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) {
ch.pipeline().addLast(sslHandler);
ch.pipeline().addLast(new ChannelInboundHandlerAdapter() {
@Override
public void channelActive(ChannelHandlerContext ctx) {
activeLatch.countDown();
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
if (evt instanceof SslHandshakeCompletionEvent &&
((SslHandshakeCompletionEvent) evt).cause() != null) {
Throwable cause = ((SslHandshakeCompletionEvent) evt).cause();
cause.printStackTrace();
error.set(cause);
}
ctx.fireUserEventTriggered(evt);
}
});
}
})
.connect(sc.localAddress())
.addListener((ChannelFutureListener) future ->
future.channel().writeAndFlush(wrappedBuffer(new byte [] { 1, 2, 3, 4 })))
.syncUninterruptibly()
.channel();
assertTrue(activeLatch.await(5, TimeUnit.SECONDS));
assertNull(error.get());
} finally {
if (cc != null) {
cc.close().syncUninterruptibly();
}
if (sc != null) {
sc.close().syncUninterruptibly();
}
group.shutdownGracefully();
ReferenceCountUtil.release(sslClientCtx);
}
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void pooledBufferAllocation() throws Exception {
AtomicLong iterationCounter = new AtomicLong();
PooledByteBufAllocator allocator = PooledByteBufAllocator.DEFAULT;
FutureTask<Void> task = new FutureTask<>(() -> {
List<ByteBuf> buffers = new ArrayList<>();
long count;
do {
count = iterationCounter.get();
} while (count == 0);
for (int i = 0; i < 13; i++) {
int size = 8 << i;
buffers.add(allocator.ioBuffer(size, size));
}
for (ByteBuf buffer : buffers) {
buffer.release();
}
return null;
});
FastThreadLocalThread thread = new FastThreadLocalThread(task);
thread.start();
do {
allocator.dumpStats(); // This will take internal pool locks and we'll race with the thread.
iterationCounter.set(1);
} while (thread.isAlive());
thread.join();
task.get();
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testUnixResolverDnsServerAddressStreamProvider_Parse() throws InterruptedException {
doTestParseResolverFilesAllowsBlockingCalls(DnsServerAddressStreamProviders::unixDefault);
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testHostsFileParser_Parse() throws InterruptedException {
doTestParseResolverFilesAllowsBlockingCalls(DnsNameResolverBuilder::new);
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testUnixResolverDnsServerAddressStreamProvider_ParseEtcResolverSearchDomainsAndOptions()
throws InterruptedException {
EventLoopGroup group = new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory());
try {
DnsNameResolverBuilder builder = new DnsNameResolverBuilder(group.next())
.datagramChannelFactory(NioDatagramChannel::new);
doTestParseResolverFilesAllowsBlockingCalls(builder::build);
} finally {
group.shutdownGracefully();
}
}
private static void doTestParseResolverFilesAllowsBlockingCalls(Callable<Object> callable)
throws InterruptedException {
SingleThreadEventExecutor executor =
new SingleThreadEventExecutor(null, new DefaultThreadFactory("test"), true) {
@Override
protected void run() {
while (!confirmShutdown()) {
Runnable task = takeTask();
if (task != null) {
task.run();
}
}
}
};
try {
CountDownLatch latch = new CountDownLatch(1);
List<Object> result = new ArrayList<>();
List<Throwable> error = new ArrayList<>();
executor.execute(() -> {
try {
result.add(callable.call());
} catch (Throwable t) {
error.add(t);
}
latch.countDown();
});
latch.await();
assertEquals(0, error.size());
assertEquals(1, result.size());
} finally {
executor.shutdownGracefully();
}
}
private static void testTrustManagerVerify(SslProvider provider, String tlsVersion) throws Exception {
final SslContext sslClientCtx = SslContextBuilder.forClient()
.sslProvider(provider)
.protocols(tlsVersion)
.endpointIdentificationAlgorithm(null)
.trustManager(ResourcesUtil.getFile(
NettyBlockHoundIntegrationTest.class, "mutual_auth_ca.pem"))
.build();
File cert = ResourcesUtil.getFile(NettyBlockHoundIntegrationTest.class, "localhost_server.pem");
File key = ResourcesUtil.getFile(NettyBlockHoundIntegrationTest.class, "localhost_server.key");
final SslContext sslServerCtx = SslContextBuilder.forServer(cert, key, null)
.sslProvider(provider)
.protocols(tlsVersion)
.build();
final SslHandler clientSslHandler = sslClientCtx.newHandler(UnpooledByteBufAllocator.DEFAULT);
final SslHandler serverSslHandler = sslServerCtx.newHandler(UnpooledByteBufAllocator.DEFAULT);
testHandshake(sslClientCtx, clientSslHandler, serverSslHandler);
}
private static void testHandshakeWithExecutor(Executor executor, String tlsVersion) throws Exception {
final SslContext sslClientCtx = SslContextBuilder.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.endpointIdentificationAlgorithm(null)
.sslProvider(SslProvider.JDK).protocols(tlsVersion).build();
X509Bundle cert = new CertificateBuilder()
.subject("cn=localhost")
.setIsCertificateAuthority(true)
.buildSelfSigned();
final SslContext sslServerCtx = SslContextBuilder.forServer(cert.toKeyManagerFactory())
.sslProvider(SslProvider.JDK).protocols(tlsVersion).build();
final SslHandler clientSslHandler = sslClientCtx.newHandler(UnpooledByteBufAllocator.DEFAULT, executor);
final SslHandler serverSslHandler = sslServerCtx.newHandler(UnpooledByteBufAllocator.DEFAULT, executor);
testHandshake(sslClientCtx, clientSslHandler, serverSslHandler);
}
private static void testHandshake(SslContext sslClientCtx, SslHandler clientSslHandler,
SslHandler serverSslHandler) throws Exception {
EventLoopGroup group = new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory());
Channel sc = null;
Channel cc = null;
try {
sc = new ServerBootstrap()
.group(group)
.channel(NioServerSocketChannel.class)
.childHandler(serverSslHandler)
.bind(new InetSocketAddress(0)).syncUninterruptibly().channel();
ChannelFuture future = new Bootstrap()
.group(group)
.channel(NioSocketChannel.class)
.handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) {
ch.pipeline()
.addLast(clientSslHandler)
.addLast(new ChannelInboundHandlerAdapter() {
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
if (evt instanceof SslHandshakeCompletionEvent &&
((SslHandshakeCompletionEvent) evt).cause() != null) {
((SslHandshakeCompletionEvent) evt).cause().printStackTrace();
}
ctx.fireUserEventTriggered(evt);
}
});
}
}).connect(sc.localAddress());
cc = future.syncUninterruptibly().channel();
clientSslHandler.handshakeFuture().await().sync();
serverSslHandler.handshakeFuture().await().sync();
} finally {
if (cc != null) {
cc.close().syncUninterruptibly();
}
if (sc != null) {
sc.close().syncUninterruptibly();
}
group.shutdownGracefully();
ReferenceCountUtil.release(sslClientCtx);
}
}
private static
|
NettyBlockHoundIntegrationTest
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/persistent/decider/EnableAssignmentDecider.java
|
{
"start": 1256,
"end": 2983
}
|
class ____ {
public static final Setting<Allocation> CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING = new Setting<>(
"cluster.persistent_tasks.allocation.enable",
Allocation.ALL.toString(),
Allocation::fromString,
Dynamic,
NodeScope
);
public static final String ALLOCATION_NONE_EXPLANATION = "no persistent task assignments are allowed due to cluster settings";
private volatile Allocation enableAssignment;
public EnableAssignmentDecider(final Settings settings, final ClusterSettings clusterSettings) {
this.enableAssignment = CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.get(settings);
clusterSettings.addSettingsUpdateConsumer(CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING, this::setEnableAssignment);
}
public void setEnableAssignment(final Allocation enableAssignment) {
this.enableAssignment = enableAssignment;
}
/**
* Returns a {@link AssignmentDecision} whether the given persistent task can be assigned
* to a node of the cluster. The decision depends on the current value of the setting
* {@link EnableAssignmentDecider#CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING}.
*
* @return the {@link AssignmentDecision}
*/
public AssignmentDecision canAssign() {
if (enableAssignment == Allocation.NONE) {
return new AssignmentDecision(AssignmentDecision.Type.NO, ALLOCATION_NONE_EXPLANATION);
}
return AssignmentDecision.YES;
}
/**
* Allocation values or rather their string representation to be used used with
* {@link EnableAssignmentDecider#CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING}
* via cluster settings.
*/
public
|
EnableAssignmentDecider
|
java
|
spring-projects__spring-security
|
saml2/saml2-service-provider/src/test/java/org/springframework/security/saml2/core/Saml2X509CredentialTests.java
|
{
"start": 1281,
"end": 8337
}
|
class ____ {
private PrivateKey key;
private X509Certificate certificate;
@BeforeEach
public void setup() throws Exception {
String keyData = "-----BEGIN PRIVATE KEY-----\n"
+ "MIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBANG7v8QjQGU3MwQE\n"
+ "VUBxvH6Uuiy/MhZT7TV0ZNjyAF2ExA1gpn3aUxx6jYK5UnrpxRRE/KbeLucYbOhK\n"
+ "cDECt77Rggz5TStrOta0BQTvfluRyoQtmQ5Nkt6Vqg7O2ZapFt7k64Sal7AftzH6\n"
+ "Q2BxWN1y04bLdDrH4jipqRj/2qEFAgMBAAECgYEAj4ExY1jjdN3iEDuOwXuRB+Nn\n"
+ "x7pC4TgntE2huzdKvLJdGvIouTArce8A6JM5NlTBvm69mMepvAHgcsiMH1zGr5J5\n"
+ "wJz23mGOyhM1veON41/DJTVG+cxq4soUZhdYy3bpOuXGMAaJ8QLMbQQoivllNihd\n"
+ "vwH0rNSK8LTYWWPZYIECQQDxct+TFX1VsQ1eo41K0T4fu2rWUaxlvjUGhK6HxTmY\n"
+ "8OMJptunGRJL1CUjIb45Uz7SP8TPz5FwhXWsLfS182kRAkEA3l+Qd9C9gdpUh1uX\n"
+ "oPSNIxn5hFUrSTW1EwP9QH9vhwb5Vr8Jrd5ei678WYDLjUcx648RjkjhU9jSMzIx\n"
+ "EGvYtQJBAMm/i9NR7IVyyNIgZUpz5q4LI21rl1r4gUQuD8vA36zM81i4ROeuCly0\n"
+ "KkfdxR4PUfnKcQCX11YnHjk9uTFj75ECQEFY/gBnxDjzqyF35hAzrYIiMPQVfznt\n"
+ "YX/sDTE2AdVBVGaMj1Cb51bPHnNC6Q5kXKQnj/YrLqRQND09Q7ParX0CQQC5NxZr\n"
+ "9jKqhHj8yQD6PlXTsY4Occ7DH6/IoDenfdEVD5qlet0zmd50HatN2Jiqm5ubN7CM\n" + "INrtuLp4YHbgk1mi\n"
+ "-----END PRIVATE KEY-----";
this.key = RsaKeyConverters.pkcs8().convert(new ByteArrayInputStream(keyData.getBytes(StandardCharsets.UTF_8)));
final CertificateFactory factory = CertificateFactory.getInstance("X.509");
String certificateData = "-----BEGIN CERTIFICATE-----\n"
+ "MIICgTCCAeoCCQCuVzyqFgMSyDANBgkqhkiG9w0BAQsFADCBhDELMAkGA1UEBhMC\n"
+ "VVMxEzARBgNVBAgMCldhc2hpbmd0b24xEjAQBgNVBAcMCVZhbmNvdXZlcjEdMBsG\n"
+ "A1UECgwUU3ByaW5nIFNlY3VyaXR5IFNBTUwxCzAJBgNVBAsMAnNwMSAwHgYDVQQD\n"
+ "DBdzcC5zcHJpbmcuc2VjdXJpdHkuc2FtbDAeFw0xODA1MTQxNDMwNDRaFw0yODA1\n"
+ "MTExNDMwNDRaMIGEMQswCQYDVQQGEwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjES\n"
+ "MBAGA1UEBwwJVmFuY291dmVyMR0wGwYDVQQKDBRTcHJpbmcgU2VjdXJpdHkgU0FN\n"
+ "TDELMAkGA1UECwwCc3AxIDAeBgNVBAMMF3NwLnNwcmluZy5zZWN1cml0eS5zYW1s\n"
+ "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDRu7/EI0BlNzMEBFVAcbx+lLos\n"
+ "vzIWU+01dGTY8gBdhMQNYKZ92lMceo2CuVJ66cUURPym3i7nGGzoSnAxAre+0YIM\n"
+ "+U0razrWtAUE735bkcqELZkOTZLelaoOztmWqRbe5OuEmpewH7cx+kNgcVjdctOG\n"
+ "y3Q6x+I4qakY/9qhBQIDAQABMA0GCSqGSIb3DQEBCwUAA4GBAAeViTvHOyQopWEi\n"
+ "XOfI2Z9eukwrSknDwq/zscR0YxwwqDBMt/QdAODfSwAfnciiYLkmEjlozWRtOeN+\n"
+ "qK7UFgP1bRl5qksrYX5S0z2iGJh0GvonLUt3e20Ssfl5tTEDDnAEUMLfBkyaxEHD\n"
+ "RZ/nbTJ7VTeZOSyRoVn5XHhpuJ0B\n" + "-----END CERTIFICATE-----";
this.certificate = (X509Certificate) factory
.generateCertificate(new ByteArrayInputStream(certificateData.getBytes(StandardCharsets.UTF_8)));
}
@Test
public void constructorWhenRelyingPartyWithCredentialsThenItSucceeds() {
new Saml2X509Credential(this.key, this.certificate, Saml2X509CredentialType.SIGNING);
new Saml2X509Credential(this.key, this.certificate, Saml2X509CredentialType.SIGNING,
Saml2X509CredentialType.DECRYPTION);
new Saml2X509Credential(this.key, this.certificate, Saml2X509CredentialType.DECRYPTION);
Saml2X509Credential.signing(this.key, this.certificate);
Saml2X509Credential.decryption(this.key, this.certificate);
}
@Test
public void constructorWhenAssertingPartyWithCredentialsThenItSucceeds() {
new Saml2X509Credential(this.certificate, Saml2X509CredentialType.VERIFICATION);
new Saml2X509Credential(this.certificate, Saml2X509CredentialType.VERIFICATION,
Saml2X509CredentialType.ENCRYPTION);
new Saml2X509Credential(this.certificate, Saml2X509CredentialType.ENCRYPTION);
Saml2X509Credential.verification(this.certificate);
Saml2X509Credential.encryption(this.certificate);
}
@Test
public void constructorWhenRelyingPartyWithoutCredentialsThenItFails() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new Saml2X509Credential(null, (X509Certificate) null, Saml2X509CredentialType.SIGNING));
}
@Test
public void constructorWhenRelyingPartyWithoutPrivateKeyThenItFails() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new Saml2X509Credential(null, this.certificate, Saml2X509CredentialType.SIGNING));
}
@Test
public void constructorWhenRelyingPartyWithoutCertificateThenItFails() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new Saml2X509Credential(this.key, null, Saml2X509CredentialType.SIGNING));
}
@Test
public void constructorWhenAssertingPartyWithoutCertificateThenItFails() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new Saml2X509Credential(null, Saml2X509CredentialType.SIGNING));
}
@Test
public void constructorWhenRelyingPartyWithEncryptionUsageThenItFails() {
assertThatIllegalStateException()
.isThrownBy(() -> new Saml2X509Credential(this.key, this.certificate, Saml2X509CredentialType.ENCRYPTION));
}
@Test
public void constructorWhenRelyingPartyWithVerificationUsageThenItFails() {
assertThatIllegalStateException().isThrownBy(
() -> new Saml2X509Credential(this.key, this.certificate, Saml2X509CredentialType.VERIFICATION));
}
@Test
public void constructorWhenAssertingPartyWithSigningUsageThenItFails() {
assertThatIllegalStateException()
.isThrownBy(() -> new Saml2X509Credential(this.certificate, Saml2X509CredentialType.SIGNING));
}
@Test
public void constructorWhenAssertingPartyWithDecryptionUsageThenItFails() {
assertThatIllegalStateException()
.isThrownBy(() -> new Saml2X509Credential(this.certificate, Saml2X509CredentialType.DECRYPTION));
}
@Test
public void factoryWhenRelyingPartyForSigningWithoutCredentialsThenItFails() {
assertThatIllegalArgumentException().isThrownBy(() -> Saml2X509Credential.signing(null, null));
}
@Test
public void factoryWhenRelyingPartyForSigningWithoutPrivateKeyThenItFails() {
assertThatIllegalArgumentException().isThrownBy(() -> Saml2X509Credential.signing(null, this.certificate));
}
@Test
public void factoryWhenRelyingPartyForSigningWithoutCertificateThenItFails() {
assertThatIllegalArgumentException().isThrownBy(() -> Saml2X509Credential.signing(this.key, null));
}
@Test
public void factoryWhenRelyingPartyForDecryptionWithoutCredentialsThenItFails() {
assertThatIllegalArgumentException().isThrownBy(() -> Saml2X509Credential.decryption(null, null));
}
@Test
public void factoryWhenRelyingPartyForDecryptionWithoutPrivateKeyThenItFails() {
assertThatIllegalArgumentException().isThrownBy(() -> Saml2X509Credential.decryption(null, this.certificate));
}
@Test
public void factoryWhenRelyingPartyForDecryptionWithoutCertificateThenItFails() {
assertThatIllegalArgumentException().isThrownBy(() -> Saml2X509Credential.decryption(this.key, null));
}
@Test
public void factoryWhenAssertingPartyForVerificationWithoutCertificateThenItFails() {
assertThatIllegalArgumentException().isThrownBy(() -> Saml2X509Credential.verification(null));
}
@Test
public void factoryWhenAssertingPartyForEncryptionWithoutCertificateThenItFails() {
assertThatIllegalArgumentException().isThrownBy(() -> Saml2X509Credential.encryption(null));
}
}
|
Saml2X509CredentialTests
|
java
|
micronaut-projects__micronaut-core
|
http/src/main/java/io/micronaut/http/filter/BaseFilterProcessor.java
|
{
"start": 5392,
"end": 12128
}
|
class ____ method annotations
*/
protected abstract void addFilter(Supplier<GenericHttpFilter> factory, AnnotationMetadata methodAnnotations, FilterMetadata metadata);
private <T> void process0(BeanDefinition<T> beanDefinition, ExecutableMethod<T, ?> method) {
if (beanContext != null) {
FilterMetadata beanLevel = metadata(beanDefinition, filterAnnotation);
if (method.isAnnotationPresent(RequestFilter.class)) {
FilterMetadata methodLevel = metadata(method, RequestFilter.class);
FilterMetadata combined = combineMetadata(beanLevel, methodLevel);
addFilter(() -> MethodFilter.prepareFilterMethod(beanContext.getConversionService(), beanContext.getBean(beanDefinition), method, false, combined.order, argumentBinderRegistry, getExecutor(combined)), method, combined);
}
if (method.isAnnotationPresent(ResponseFilter.class)) {
FilterMetadata methodLevel = metadata(method, ResponseFilter.class);
FilterMetadata combined = combineMetadata(beanLevel, methodLevel);
addFilter(() -> MethodFilter.prepareFilterMethod(beanContext.getConversionService(), beanContext.getBean(beanDefinition), method, true, combined.order, argumentBinderRegistry, getExecutor(combined)), method, combined);
}
}
}
private Executor getExecutor(FilterMetadata metadata) {
if (metadata.executeOn != null) {
return beanContext.getBean(Executor.class, Qualifiers.byName(metadata.executeOn));
} else {
return null;
}
}
private FilterMetadata combineMetadata(FilterMetadata beanLevel, FilterMetadata methodLevel) {
List<String> patterns;
if (beanLevel.patterns == null) {
patterns = methodLevel.patterns;
} else if (methodLevel.patterns == null) {
patterns = beanLevel.patterns;
} else {
if (beanLevel.patternStyle == FilterPatternStyle.REGEX ||
methodLevel.patternStyle == FilterPatternStyle.REGEX) {
throw new UnsupportedOperationException("Concatenating regex filter patterns is " +
"not supported. Please declare the full pattern on the method instead.");
}
patterns = beanLevel.patterns.stream()
.flatMap(p1 -> methodLevel.patterns.stream().map(p2 -> concatAntPatterns(p1, p2)))
.toList();
}
if (patterns != null && (beanLevel.appendContextPath == null || beanLevel.appendContextPath)) {
patterns = prependContextPath(patterns);
}
FilterOrder order;
if (methodLevel.order != null) {
order = methodLevel.order;
} else if (beanLevel.order != null) {
// allow overriding using Ordered.getOrder, where possible
order = new FilterOrder.Dynamic(((FilterOrder.Fixed) beanLevel.order).value());
} else {
order = new FilterOrder.Dynamic(Ordered.LOWEST_PRECEDENCE);
}
return new FilterMetadata(
methodLevel.patterns == null ? beanLevel.patternStyle : methodLevel.patternStyle,
patterns,
methodLevel.methods == null ? beanLevel.methods : methodLevel.methods,
order,
methodLevel.executeOn == null ? beanLevel.executeOn : methodLevel.executeOn,
beanLevel.serviceId, // only present on bean level
beanLevel.excludeServiceId, // only present on bean level
beanLevel.appendContextPath, // Define if contextPath is appended,
methodLevel.isPreMatching
);
}
/**
* Prepend server context path if necessary.
*
* @param patterns Input patterns
* @return Output patterns with server context path prepended
*/
@NonNull
protected List<String> prependContextPath(@NonNull List<String> patterns) {
return patterns;
}
static String concatAntPatterns(String p1, String p2) {
StringBuilder combined = new StringBuilder(p1.length() + p2.length() + 1);
combined.append(p1);
if (!p1.endsWith(AntPathMatcher.DEFAULT_PATH_SEPARATOR)) {
combined.append(AntPathMatcher.DEFAULT_PATH_SEPARATOR);
}
if (p2.startsWith(AntPathMatcher.DEFAULT_PATH_SEPARATOR)) {
combined.append(p2, AntPathMatcher.DEFAULT_PATH_SEPARATOR.length(), p2.length());
} else {
combined.append(p2);
}
return combined.toString();
}
private FilterMetadata metadata(AnnotationMetadata annotationMetadata, Class<? extends Annotation> annotationType) {
HttpMethod[] methods = annotationMetadata.enumValues(annotationType, "methods", HttpMethod.class);
String[] patterns = annotationMetadata.stringValues(annotationType);
OptionalInt order = annotationMetadata.intValue(Order.class);
String[] serviceId = annotationMetadata.stringValues(annotationType, "serviceId"); // only on ClientFilter
String[] excludeServiceId = annotationMetadata.stringValues(annotationType, "excludeServiceId"); // only on ClientFilter
Optional<Boolean> appendContextPath = annotationMetadata.booleanValue(annotationType, "appendContextPath");
return new FilterMetadata(
annotationMetadata.enumValue(annotationType, "patternStyle", FilterPatternStyle.class).orElse(FilterPatternStyle.ANT),
ArrayUtils.isNotEmpty(patterns) ? Arrays.asList(patterns) : null,
ArrayUtils.isNotEmpty(methods) ? Arrays.asList(methods) : null,
order.isPresent() ? new FilterOrder.Fixed(order.getAsInt()) : null,
annotationMetadata.stringValue(ExecuteOn.class).orElse(null),
ArrayUtils.isNotEmpty(serviceId) ? Arrays.asList(serviceId) : null,
ArrayUtils.isNotEmpty(excludeServiceId) ? Arrays.asList(excludeServiceId) : null,
appendContextPath.orElse(null),
annotationMetadata.hasStereotype("io.micronaut.http.server.annotation.PreMatching")
);
}
protected record FilterMetadata(
FilterPatternStyle patternStyle,
@Nullable List<String> patterns,
@Nullable List<HttpMethod> methods,
@Nullable FilterOrder order,
@Nullable String executeOn,
@Nullable List<String> serviceId,
@Nullable List<String> excludeServiceId,
@Nullable Boolean appendContextPath,
boolean isPreMatching
) {
}
/**
* Interface that signals to {@link FilterRunner} that we should wait for the request body to
* arrive before running this binder.
*
* @param <T> Arg type
*/
public
|
and
|
java
|
mapstruct__mapstruct
|
processor/src/main/java/org/mapstruct/ap/internal/conversion/Conversions.java
|
{
"start": 1103,
"end": 16076
}
|
class ____ {
private final Map<Key, ConversionProvider> conversions = new HashMap<>();
private final Type enumType;
private final Type stringType;
private final Type integerType;
private final TypeFactory typeFactory;
public Conversions(TypeFactory typeFactory) {
this.typeFactory = typeFactory;
this.enumType = typeFactory.getType( Enum.class );
this.stringType = typeFactory.getType( String.class );
this.integerType = typeFactory.getType( Integer.class );
//native types <> native types, including wrappers
registerNativeTypeConversion( byte.class, Byte.class );
registerNativeTypeConversion( byte.class, short.class );
registerNativeTypeConversion( byte.class, Short.class );
registerNativeTypeConversion( byte.class, int.class );
registerNativeTypeConversion( byte.class, Integer.class );
registerNativeTypeConversion( byte.class, long.class );
registerNativeTypeConversion( byte.class, Long.class );
registerNativeTypeConversion( byte.class, float.class );
registerNativeTypeConversion( byte.class, Float.class );
registerNativeTypeConversion( byte.class, double.class );
registerNativeTypeConversion( byte.class, Double.class );
registerNativeTypeConversion( Byte.class, short.class );
registerNativeTypeConversion( Byte.class, Short.class );
registerNativeTypeConversion( Byte.class, int.class );
registerNativeTypeConversion( Byte.class, Integer.class );
registerNativeTypeConversion( Byte.class, long.class );
registerNativeTypeConversion( Byte.class, Long.class );
registerNativeTypeConversion( Byte.class, float.class );
registerNativeTypeConversion( Byte.class, Float.class );
registerNativeTypeConversion( Byte.class, double.class );
registerNativeTypeConversion( Byte.class, Double.class );
registerNativeTypeConversion( short.class, Short.class );
registerNativeTypeConversion( short.class, int.class );
registerNativeTypeConversion( short.class, Integer.class );
registerNativeTypeConversion( short.class, long.class );
registerNativeTypeConversion( short.class, Long.class );
registerNativeTypeConversion( short.class, float.class );
registerNativeTypeConversion( short.class, Float.class );
registerNativeTypeConversion( short.class, double.class );
registerNativeTypeConversion( short.class, Double.class );
registerNativeTypeConversion( Short.class, int.class );
registerNativeTypeConversion( Short.class, Integer.class );
registerNativeTypeConversion( Short.class, long.class );
registerNativeTypeConversion( Short.class, Long.class );
registerNativeTypeConversion( Short.class, float.class );
registerNativeTypeConversion( Short.class, Float.class );
registerNativeTypeConversion( Short.class, double.class );
registerNativeTypeConversion( Short.class, Double.class );
registerNativeTypeConversion( int.class, Integer.class );
registerNativeTypeConversion( int.class, long.class );
registerNativeTypeConversion( int.class, Long.class );
registerNativeTypeConversion( int.class, float.class );
registerNativeTypeConversion( int.class, Float.class );
registerNativeTypeConversion( int.class, double.class );
registerNativeTypeConversion( int.class, Double.class );
registerNativeTypeConversion( Integer.class, long.class );
registerNativeTypeConversion( Integer.class, Long.class );
registerNativeTypeConversion( Integer.class, float.class );
registerNativeTypeConversion( Integer.class, Float.class );
registerNativeTypeConversion( Integer.class, double.class );
registerNativeTypeConversion( Integer.class, Double.class );
registerNativeTypeConversion( long.class, Long.class );
registerNativeTypeConversion( long.class, float.class );
registerNativeTypeConversion( long.class, Float.class );
registerNativeTypeConversion( long.class, double.class );
registerNativeTypeConversion( long.class, Double.class );
registerNativeTypeConversion( Long.class, float.class );
registerNativeTypeConversion( Long.class, Float.class );
registerNativeTypeConversion( Long.class, double.class );
registerNativeTypeConversion( Long.class, Double.class );
registerNativeTypeConversion( float.class, Float.class );
registerNativeTypeConversion( float.class, double.class );
registerNativeTypeConversion( float.class, Double.class );
registerNativeTypeConversion( Float.class, double.class );
registerNativeTypeConversion( Float.class, Double.class );
registerNativeTypeConversion( double.class, Double.class );
registerNativeTypeConversion( boolean.class, Boolean.class );
registerNativeTypeConversion( char.class, Character.class );
//BigInteger <> native types
registerBigIntegerConversion( byte.class );
registerBigIntegerConversion( Byte.class );
registerBigIntegerConversion( short.class );
registerBigIntegerConversion( Short.class );
registerBigIntegerConversion( int.class );
registerBigIntegerConversion( Integer.class );
registerBigIntegerConversion( long.class );
registerBigIntegerConversion( Long.class );
registerBigIntegerConversion( float.class );
registerBigIntegerConversion( Float.class );
registerBigIntegerConversion( double.class );
registerBigIntegerConversion( Double.class );
//BigDecimal <> native types
registerBigDecimalConversion( byte.class );
registerBigDecimalConversion( Byte.class );
registerBigDecimalConversion( short.class );
registerBigDecimalConversion( Short.class );
registerBigDecimalConversion( int.class );
registerBigDecimalConversion( Integer.class );
registerBigDecimalConversion( long.class );
registerBigDecimalConversion( Long.class );
registerBigDecimalConversion( float.class );
registerBigDecimalConversion( Float.class );
registerBigDecimalConversion( double.class );
registerBigDecimalConversion( Double.class );
//native types <> String
registerToStringConversion( byte.class );
registerToStringConversion( Byte.class );
registerToStringConversion( short.class );
registerToStringConversion( Short.class );
registerToStringConversion( int.class );
registerToStringConversion( Integer.class );
registerToStringConversion( long.class );
registerToStringConversion( Long.class );
registerToStringConversion( float.class );
registerToStringConversion( Float.class );
registerToStringConversion( double.class );
registerToStringConversion( Double.class );
registerToStringConversion( boolean.class );
registerToStringConversion( Boolean.class );
register( char.class, String.class, new CharToStringConversion() );
register( Character.class, String.class, new CharWrapperToStringConversion() );
register( BigInteger.class, String.class, new BigIntegerToStringConversion() );
register( BigDecimal.class, String.class, new BigDecimalToStringConversion() );
register( StringBuilder.class, String.class, new StringBuilderToStringConversion() );
registerJodaConversions();
registerJava8TimeConversions();
//misc.
register( Enum.class, String.class, new EnumStringConversion() );
register( Enum.class, Integer.class, new EnumToIntegerConversion() );
register( Enum.class, int.class, new EnumToIntegerConversion() );
register( Date.class, String.class, new DateToStringConversion() );
register( BigDecimal.class, BigInteger.class, new BigDecimalToBigIntegerConversion() );
registerJavaTimeSqlConversions();
// java.util.Currency <~> String
register( Currency.class, String.class, new CurrencyToStringConversion() );
register( UUID.class, String.class, new UUIDToStringConversion() );
register( Locale.class, String.class, new LocaleToStringConversion() );
registerURLConversion();
}
private void registerJodaConversions() {
if ( !isJodaTimeAvailable() ) {
return;
}
// Joda to String
register( JodaTimeConstants.DATE_TIME_FQN, String.class, new JodaDateTimeToStringConversion() );
register( JodaTimeConstants.LOCAL_DATE_FQN, String.class, new JodaLocalDateToStringConversion() );
register( JodaTimeConstants.LOCAL_DATE_TIME_FQN, String.class, new JodaLocalDateTimeToStringConversion() );
register( JodaTimeConstants.LOCAL_TIME_FQN, String.class, new JodaLocalTimeToStringConversion() );
// Joda to Date
register( JodaTimeConstants.DATE_TIME_FQN, Date.class, new JodaTimeToDateConversion() );
register( JodaTimeConstants.LOCAL_DATE_FQN, Date.class, new JodaTimeToDateConversion() );
register( JodaTimeConstants.LOCAL_DATE_TIME_FQN, Date.class, new JodaTimeToDateConversion() );
// Joda to Calendar
register( JodaTimeConstants.DATE_TIME_FQN, Calendar.class, new JodaDateTimeToCalendarConversion() );
}
private void registerJava8TimeConversions() {
// Java 8 time to String
register( ZonedDateTime.class, String.class, new JavaZonedDateTimeToStringConversion() );
register( LocalDate.class, String.class, new JavaLocalDateToStringConversion() );
register( LocalDateTime.class, String.class, new JavaLocalDateTimeToStringConversion() );
register( LocalTime.class, String.class, new JavaLocalTimeToStringConversion() );
register( Instant.class, String.class, new StaticParseToStringConversion() );
register( Period.class, String.class, new StaticParseToStringConversion() );
register( Duration.class, String.class, new StaticParseToStringConversion() );
// Java 8 time to Date
register( ZonedDateTime.class, Date.class, new JavaZonedDateTimeToDateConversion() );
register( LocalDateTime.class, Date.class, new JavaLocalDateTimeToDateConversion() );
register( LocalDate.class, Date.class, new JavaLocalDateToDateConversion() );
register( Instant.class, Date.class, new JavaInstantToDateConversion() );
// Java 8 time
register( LocalDateTime.class, LocalDate.class, new JavaLocalDateTimeToLocalDateConversion() );
}
private void registerJavaTimeSqlConversions() {
if ( isJavaSqlAvailable() ) {
register( LocalDate.class, java.sql.Date.class, new JavaLocalDateToSqlDateConversion() );
register( Date.class, Time.class, new DateToSqlTimeConversion() );
register( Date.class, java.sql.Date.class, new DateToSqlDateConversion() );
register( Date.class, Timestamp.class, new DateToSqlTimestampConversion() );
}
}
private boolean isJodaTimeAvailable() {
return typeFactory.isTypeAvailable( JodaTimeConstants.DATE_TIME_FQN );
}
private boolean isJavaSqlAvailable() {
return typeFactory.isTypeAvailable( "java.sql.Date" );
}
private void registerNativeTypeConversion(Class<?> sourceType, Class<?> targetType) {
if ( sourceType.isPrimitive() && targetType.isPrimitive() ) {
register( sourceType, targetType, new PrimitiveToPrimitiveConversion( sourceType ) );
}
else if ( sourceType.isPrimitive() ) {
register( sourceType, targetType, new PrimitiveToWrapperConversion( sourceType, targetType ) );
}
else if ( targetType.isPrimitive() ) {
register( sourceType, targetType, inverse( new PrimitiveToWrapperConversion( targetType, sourceType ) ) );
}
else {
register( sourceType, targetType, new WrapperToWrapperConversion( sourceType, targetType ) );
}
}
private void registerToStringConversion(Class<?> sourceType) {
if ( sourceType.isPrimitive() ) {
register( sourceType, String.class, new PrimitiveToStringConversion( sourceType ) );
}
else {
register( sourceType, String.class, new WrapperToStringConversion( sourceType ) );
}
}
private void registerBigIntegerConversion(Class<?> targetType) {
if ( targetType.isPrimitive() ) {
register( BigInteger.class, targetType, new BigIntegerToPrimitiveConversion( targetType ) );
}
else {
register( BigInteger.class, targetType, new BigIntegerToWrapperConversion( targetType ) );
}
}
private void registerBigDecimalConversion(Class<?> targetType) {
if ( targetType.isPrimitive() ) {
register( BigDecimal.class, targetType, new BigDecimalToPrimitiveConversion( targetType ) );
}
else {
register( BigDecimal.class, targetType, new BigDecimalToWrapperConversion( targetType ) );
}
}
private void registerURLConversion() {
if ( isJavaURLAvailable() ) {
register( URL.class, String.class, new URLToStringConversion() );
}
}
private boolean isJavaURLAvailable() {
return typeFactory.isTypeAvailable( "java.net.URL" );
}
private void register(Class<?> sourceClass, Class<?> targetClass, ConversionProvider conversion) {
Type sourceType = typeFactory.getType( sourceClass );
Type targetType = typeFactory.getType( targetClass );
conversions.put( new Key( sourceType, targetType ), conversion );
conversions.put( new Key( targetType, sourceType ), inverse( conversion ) );
}
private void register(String sourceTypeName, Class<?> targetClass, ConversionProvider conversion) {
Type sourceType = typeFactory.getType( sourceTypeName );
Type targetType = typeFactory.getType( targetClass );
conversions.put( new Key( sourceType, targetType ), conversion );
conversions.put( new Key( targetType, sourceType ), inverse( conversion ) );
}
public ConversionProvider getConversion(Type sourceType, Type targetType) {
if ( sourceType.isEnumType() &&
( targetType.equals( stringType ) ||
targetType.getBoxedEquivalent().equals( integerType ) )
) {
sourceType = enumType;
}
else if ( targetType.isEnumType() &&
( sourceType.equals( stringType ) ||
sourceType.getBoxedEquivalent().equals( integerType ) )
) {
targetType = enumType;
}
return conversions.get( new Key( sourceType, targetType ) );
}
private static
|
Conversions
|
java
|
spring-projects__spring-framework
|
spring-jdbc/src/main/java/org/springframework/jdbc/object/SqlUpdate.java
|
{
"start": 1418,
"end": 1998
}
|
class ____ concrete. Although it can be subclassed (for example
* to add a custom update method) it can easily be parameterized by setting
* SQL and declaring parameters.
*
* <p>Like all {@code RdbmsOperation} classes that ship with the Spring
* Framework, {@code SqlQuery} instances are thread-safe after their
* initialization is complete. That is, after they are constructed and configured
* via their setter methods, they can be used safely from multiple threads.
*
* @author Rod Johnson
* @author Thomas Risberg
* @author Juergen Hoeller
* @see SqlQuery
*/
public
|
is
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/parser/JSONReaderScannerTest__entity_long.java
|
{
"start": 328,
"end": 1274
}
|
class ____ extends TestCase {
public void test_scanInt() throws Exception {
StringBuffer buf = new StringBuffer();
buf.append('[');
for (int i = 0; i < 10; ++i) {
if (i != 0) {
buf.append(',');
}
//1000000000000
//
long value = (long) 1000000000000L + 1L + (long) i;
buf.append("{\"id\":" + value + "}");
}
buf.append(']');
Reader reader = new StringReader(buf.toString());
JSONReaderScanner scanner = new JSONReaderScanner(reader);
DefaultJSONParser parser = new DefaultJSONParser(scanner);
List<VO> array = parser.parseArray(VO.class);
for (int i = 0; i < array.size(); ++i) {
long value = (long) 1000000000000L + 1L + (long) i;
Assert.assertEquals(value, array.get(i).getId());
}
}
public static
|
JSONReaderScannerTest__entity_long
|
java
|
apache__dubbo
|
dubbo-config/dubbo-config-api/src/test/java/org/apache/dubbo/config/integration/multiple/servicediscoveryregistry/ServiceDiscoveryRegistryInfoWrapper.java
|
{
"start": 1130,
"end": 2296
}
|
class ____ {
private ServiceDiscoveryRegistry serviceDiscoveryRegistry;
private MetadataServiceDelegation inMemoryWritableMetadataService;
private boolean registered;
private boolean subscribed;
private String host;
private int port;
public ServiceDiscoveryRegistry getServiceDiscoveryRegistry() {
return serviceDiscoveryRegistry;
}
public void setServiceDiscoveryRegistry(ServiceDiscoveryRegistry serviceDiscoveryRegistry) {
this.serviceDiscoveryRegistry = serviceDiscoveryRegistry;
}
public boolean isRegistered() {
return registered;
}
public void setRegistered(boolean registered) {
this.registered = registered;
}
public boolean isSubscribed() {
return subscribed;
}
public void setSubscribed(boolean subscribed) {
this.subscribed = subscribed;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
}
|
ServiceDiscoveryRegistryInfoWrapper
|
java
|
netty__netty
|
handler/src/main/java/io/netty/handler/ssl/OpenSslSessionStats.java
|
{
"start": 939,
"end": 7710
}
|
class ____ {
private final ReferenceCountedOpenSslContext context;
// IMPORTANT: We take the OpenSslContext and not just the long (which points the native instance) to prevent
// the GC to collect OpenSslContext as this would also free the pointer and so could result in a
// segfault when the user calls any of the methods here that try to pass the pointer down to the native
// level.
OpenSslSessionStats(ReferenceCountedOpenSslContext context) {
this.context = context;
}
/**
* Returns the current number of sessions in the internal session cache.
*/
public long number() {
Lock readerLock = context.ctxLock.readLock();
readerLock.lock();
try {
return SSLContext.sessionNumber(context.ctx);
} finally {
readerLock.unlock();
}
}
/**
* Returns the number of started SSL/TLS handshakes in client mode.
*/
public long connect() {
Lock readerLock = context.ctxLock.readLock();
readerLock.lock();
try {
return SSLContext.sessionConnect(context.ctx);
} finally {
readerLock.unlock();
}
}
/**
* Returns the number of successfully established SSL/TLS sessions in client mode.
*/
public long connectGood() {
Lock readerLock = context.ctxLock.readLock();
readerLock.lock();
try {
return SSLContext.sessionConnectGood(context.ctx);
} finally {
readerLock.unlock();
}
}
/**
* Returns the number of start renegotiations in client mode.
*/
public long connectRenegotiate() {
Lock readerLock = context.ctxLock.readLock();
readerLock.lock();
try {
return SSLContext.sessionConnectRenegotiate(context.ctx);
} finally {
readerLock.unlock();
}
}
/**
* Returns the number of started SSL/TLS handshakes in server mode.
*/
public long accept() {
Lock readerLock = context.ctxLock.readLock();
readerLock.lock();
try {
return SSLContext.sessionAccept(context.ctx);
} finally {
readerLock.unlock();
}
}
/**
* Returns the number of successfully established SSL/TLS sessions in server mode.
*/
public long acceptGood() {
Lock readerLock = context.ctxLock.readLock();
readerLock.lock();
try {
return SSLContext.sessionAcceptGood(context.ctx);
} finally {
readerLock.unlock();
}
}
/**
* Returns the number of start renegotiations in server mode.
*/
public long acceptRenegotiate() {
Lock readerLock = context.ctxLock.readLock();
readerLock.lock();
try {
return SSLContext.sessionAcceptRenegotiate(context.ctx);
} finally {
readerLock.unlock();
}
}
/**
* Returns the number of successfully reused sessions. In client mode, a session set with {@code SSL_set_session}
* successfully reused is counted as a hit. In server mode, a session successfully retrieved from internal or
* external cache is counted as a hit.
*/
public long hits() {
Lock readerLock = context.ctxLock.readLock();
readerLock.lock();
try {
return SSLContext.sessionHits(context.ctx);
} finally {
readerLock.unlock();
}
}
/**
* Returns the number of successfully retrieved sessions from the external session cache in server mode.
*/
public long cbHits() {
Lock readerLock = context.ctxLock.readLock();
readerLock.lock();
try {
return SSLContext.sessionCbHits(context.ctx);
} finally {
readerLock.unlock();
}
}
/**
* Returns the number of sessions proposed by clients that were not found in the internal session cache
* in server mode.
*/
public long misses() {
Lock readerLock = context.ctxLock.readLock();
readerLock.lock();
try {
return SSLContext.sessionMisses(context.ctx);
} finally {
readerLock.unlock();
}
}
/**
* Returns the number of sessions proposed by clients and either found in the internal or external session cache
* in server mode, but that were invalid due to timeout. These sessions are not included in the {@link #hits()}
* count.
*/
public long timeouts() {
Lock readerLock = context.ctxLock.readLock();
readerLock.lock();
try {
return SSLContext.sessionTimeouts(context.ctx);
} finally {
readerLock.unlock();
}
}
/**
* Returns the number of sessions that were removed because the maximum session cache size was exceeded.
*/
public long cacheFull() {
Lock readerLock = context.ctxLock.readLock();
readerLock.lock();
try {
return SSLContext.sessionCacheFull(context.ctx);
} finally {
readerLock.unlock();
}
}
/**
* Returns the number of times a client presented a ticket that did not match any key in the list.
*/
public long ticketKeyFail() {
Lock readerLock = context.ctxLock.readLock();
readerLock.lock();
try {
return SSLContext.sessionTicketKeyFail(context.ctx);
} finally {
readerLock.unlock();
}
}
/**
* Returns the number of times a client did not present a ticket and we issued a new one
*/
public long ticketKeyNew() {
Lock readerLock = context.ctxLock.readLock();
readerLock.lock();
try {
return SSLContext.sessionTicketKeyNew(context.ctx);
} finally {
readerLock.unlock();
}
}
/**
* Returns the number of times a client presented a ticket derived from an older key,
* and we upgraded to the primary key.
*/
public long ticketKeyRenew() {
Lock readerLock = context.ctxLock.readLock();
readerLock.lock();
try {
return SSLContext.sessionTicketKeyRenew(context.ctx);
} finally {
readerLock.unlock();
}
}
/**
* Returns the number of times a client presented a ticket derived from the primary key.
*/
public long ticketKeyResume() {
Lock readerLock = context.ctxLock.readLock();
readerLock.lock();
try {
return SSLContext.sessionTicketKeyResume(context.ctx);
} finally {
readerLock.unlock();
}
}
}
|
OpenSslSessionStats
|
java
|
google__guice
|
core/test/com/google/inject/BindingTest.java
|
{
"start": 20069,
"end": 20120
}
|
enum ____ { TURKEY, PORK, TOFU }
private static
|
Food
|
java
|
spring-projects__spring-boot
|
core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/condition/ConditionalOnBooleanPropertyTests.java
|
{
"start": 8615,
"end": 8835
}
|
class ____ {
@Bean
String foo() {
return "foo";
}
}
@Configuration(proxyBeanMethods = false)
@ConditionalOnBooleanProperty("property1")
@ConditionalOnBooleanProperty("property2")
static
|
NameAndValueAttribute
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/annotation/configuration/ImportAnnotationDetectionTests.java
|
{
"start": 3277,
"end": 3403
}
|
class ____ {
}
@Configuration
@MetaImport1
@MetaImport2
@Import(Config2a.class)
static
|
MultiMetaImportConfigWithLocalImport
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/DeadLetterChannelUnmarshalSetHeaderTest.java
|
{
"start": 1225,
"end": 2175
}
|
class ____ extends ContextTestSupport {
@Test
public void testDLCSetHeader() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:error");
mock.expectedBodiesReceived("Hello World");
mock.expectedHeaderReceived("foo", "123");
mock.expectedHeaderReceived("bar", "456");
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
MyDataFormat df = new MyDataFormat();
from("direct:start").errorHandler(deadLetterChannel("direct:error")).unmarshal(df);
from("direct:error").setHeader("foo", constant("123")).setHeader("bar", constant("456")).to("mock:error");
}
};
}
private static
|
DeadLetterChannelUnmarshalSetHeaderTest
|
java
|
quarkusio__quarkus
|
integration-tests/grpc-test-random-port/src/test/java/io/quarkus/grpc/examples/hello/RandomPortTestBase.java
|
{
"start": 476,
"end": 1333
}
|
class ____ {
@GrpcClient("hello")
MutinyGreeterStub client;
@GrpcClient("hello")
Channel channel;
@Test
@DisabledOnIntegrationTest
void testRandomPort() {
assertSoftly(softly -> {
HelloRequest request = HelloRequest.newBuilder().setName("neo").build();
HelloReply reply = client.sayHello(request).await().indefinitely();
softly.assertThat(reply.getMessage()).startsWith("Hello neo");
int clientPort = HostAndPort.fromString(channel.authority()).getPort();
int serverPort = ConfigProvider.getConfig().getValue(serverPortProperty(), Integer.class);
softly.assertThat(clientPort).isNotEqualTo(0);
softly.assertThat(clientPort).isEqualTo(serverPort);
});
}
protected abstract String serverPortProperty();
}
|
RandomPortTestBase
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/producer/disposer/DisposerWithQualifiersTest.java
|
{
"start": 1009,
"end": 1594
}
|
class ____ {
@RegisterExtension
ArcTestContainer container = new ArcTestContainer(Producer.class, Dependency.class, Foo.class, Bar.class);
@Test
public void testDisposers() {
InstanceHandle<String> handle = Arc.container().instance(String.class, new Foo.Literal());
assertEquals("produced", handle.get());
assertEquals(0, Producer.destroyed.size());
handle.destroy();
assertEquals(1, Producer.destroyed.size());
assertEquals("produced", Producer.destroyed.get(0));
}
@Singleton
static
|
DisposerWithQualifiersTest
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/aop/aspectj/autoproxy/spr3064/SPR3064Tests.java
|
{
"start": 1878,
"end": 1973
}
|
class ____ implements Service {
@Override
@Transaction
public void serveMe() {
}
}
|
ServiceImpl
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestIntEvaluator.java
|
{
"start": 1157,
"end": 4887
}
|
class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(GreatestIntEvaluator.class);
private final Source source;
private final EvalOperator.ExpressionEvaluator[] values;
private final DriverContext driverContext;
private Warnings warnings;
public GreatestIntEvaluator(Source source, EvalOperator.ExpressionEvaluator[] values,
DriverContext driverContext) {
this.source = source;
this.values = values;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
IntBlock[] valuesBlocks = new IntBlock[values.length];
try (Releasable valuesRelease = Releasables.wrap(valuesBlocks)) {
for (int i = 0; i < valuesBlocks.length; i++) {
valuesBlocks[i] = (IntBlock)values[i].eval(page);
}
IntVector[] valuesVectors = new IntVector[values.length];
for (int i = 0; i < valuesBlocks.length; i++) {
valuesVectors[i] = valuesBlocks[i].asVector();
if (valuesVectors[i] == null) {
return eval(page.getPositionCount(), valuesBlocks);
}
}
return eval(page.getPositionCount(), valuesVectors).asBlock();
}
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
for (EvalOperator.ExpressionEvaluator e : values) {
baseRamBytesUsed += e.baseRamBytesUsed();
}
return baseRamBytesUsed;
}
public IntBlock eval(int positionCount, IntBlock[] valuesBlocks) {
try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) {
int[] valuesValues = new int[values.length];
position: for (int p = 0; p < positionCount; p++) {
for (int i = 0; i < valuesBlocks.length; i++) {
switch (valuesBlocks[i].getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
}
// unpack valuesBlocks into valuesValues
for (int i = 0; i < valuesBlocks.length; i++) {
int o = valuesBlocks[i].getFirstValueIndex(p);
valuesValues[i] = valuesBlocks[i].getInt(o);
}
result.appendInt(Greatest.process(valuesValues));
}
return result.build();
}
}
public IntVector eval(int positionCount, IntVector[] valuesVectors) {
try(IntVector.FixedBuilder result = driverContext.blockFactory().newIntVectorFixedBuilder(positionCount)) {
int[] valuesValues = new int[values.length];
position: for (int p = 0; p < positionCount; p++) {
// unpack valuesVectors into valuesValues
for (int i = 0; i < valuesVectors.length; i++) {
valuesValues[i] = valuesVectors[i].getInt(p);
}
result.appendInt(p, Greatest.process(valuesValues));
}
return result.build();
}
}
@Override
public String toString() {
return "GreatestIntEvaluator[" + "values=" + Arrays.toString(values) + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(() -> Releasables.close(values));
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static
|
GreatestIntEvaluator
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/engine/jdbc/Size.java
|
{
"start": 712,
"end": 3894
}
|
enum ____ {
NONE( 1 ),
K( NONE.factor * 1024 ),
M( K.factor * 1024 ),
G( M.factor * 1024 );
private final long factor;
LobMultiplier(long factor) {
this.factor = factor;
}
public long getFactor() {
return factor;
}
}
public static final long DEFAULT_LENGTH = Length.DEFAULT;
public static final long LONG_LENGTH = Length.LONG;
public static final long DEFAULT_LOB_LENGTH = Length.LOB_DEFAULT;
public static final int DEFAULT_PRECISION = 19;
public static final int DEFAULT_SCALE = 2;
private Integer precision;
private Integer scale;
private Long length;
private Integer arrayLength;
private LobMultiplier lobMultiplier;
public Size() {
}
/**
* Complete constructor.
*
* @param precision numeric precision
* @param scale numeric scale
* @param length type length
* @param lobMultiplier LOB length multiplier
* @deprecated in favor of {@link Size#Size(Integer, Integer, Long)}
*/
@Deprecated(forRemoval = true, since = "6.5")
public Size(Integer precision, Integer scale, Long length, LobMultiplier lobMultiplier) {
this.precision = precision;
this.scale = scale;
this.length = length;
this.lobMultiplier = lobMultiplier;
}
/**
* @deprecated in favor of {@link Size#Size(Integer, Integer, Long)}
*/
@Deprecated(forRemoval = true , since = "6.5")
public Size(Integer precision, Integer scale, Integer length, LobMultiplier lobMultiplier) {
this.precision = precision;
this.scale = scale;
this.length = length == null ? null : length.longValue();
this.lobMultiplier = lobMultiplier;
}
public Size(Integer precision, Integer scale, Long length) {
this( precision, scale, length, Size.LobMultiplier.NONE );
}
public static Size nil() {
return new Size();
}
public static Size precision(int precision) {
return new Size( precision, -1, -1L, null );
}
public static Size precision(int precision, int scale) {
return new Size( precision, scale, -1L, null );
}
public static Size length(long length) {
return new Size( -1, -1, length, null );
}
public static Size length(long length, LobMultiplier lobMultiplier) {
return new Size( -1, -1, length, lobMultiplier );
}
public Integer getPrecision() {
return precision;
}
public Integer getScale() {
return scale;
}
public Long getLength() {
return length;
}
public Integer getArrayLength() {
return arrayLength;
}
@Deprecated(forRemoval = true, since = "6.5")
public LobMultiplier getLobMultiplier() {
return lobMultiplier;
}
public void initialize(Size size) {
this.precision = size.precision;
this.scale = size.scale;
this.length = size.length;
}
public Size setPrecision(Integer precision) {
this.precision = precision;
return this;
}
public Size setScale(Integer scale) {
this.scale = scale;
return this;
}
public Size setLength(Long length) {
this.length = length;
return this;
}
public Size setArrayLength(Integer arrayLength) {
this.arrayLength = arrayLength;
return this;
}
@Deprecated(forRemoval = true, since = "6.5")
public Size setLobMultiplier(LobMultiplier lobMultiplier) {
this.lobMultiplier = lobMultiplier;
return this;
}
}
|
LobMultiplier
|
java
|
spring-projects__spring-framework
|
spring-websocket/src/test/java/org/springframework/web/socket/AbstractWebSocketIntegrationTests.java
|
{
"start": 2069,
"end": 2593
}
|
class ____ {
static Stream<Arguments> argumentsFactory() {
return Stream.of(
arguments(named("Jetty", new JettyWebSocketTestServer()), named("Standard", new StandardWebSocketClient())),
arguments(named("Tomcat", new TomcatWebSocketTestServer()), named("Standard", new StandardWebSocketClient())));
}
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD)
@ParameterizedTest(name = "[{index}] server = {0}, client = {1}")
@MethodSource("argumentsFactory")
protected @
|
AbstractWebSocketIntegrationTests
|
java
|
apache__camel
|
tooling/openapi-rest-dsl-generator/src/test/java/org/apache/camel/generator/openapi/RestDslYamlGeneratorV3SimpleTest.java
|
{
"start": 1284,
"end": 2079
}
|
class ____ {
static OpenAPI document;
@BeforeAll
public static void readOpenApiDoc() throws Exception {
document = new OpenAPIV3Parser().read("src/test/resources/org/apache/camel/generator/openapi/openapi-spec-simple.json");
}
@Test
public void shouldGenerateYamlWithDefaults() throws Exception {
final CamelContext context = new DefaultCamelContext();
final String yaml = RestDslGenerator.toYaml(document).generate(context);
final URI file = RestDslXmlGeneratorV3Test.class.getResource("/OpenApiV3PetstoreSimpleYaml.txt").toURI();
final String expectedContent = new String(Files.readAllBytes(Paths.get(file)), StandardCharsets.UTF_8);
assertThat(yaml).isEqualTo(expectedContent);
}
}
|
RestDslYamlGeneratorV3SimpleTest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.