language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__camel | components/camel-test/camel-test-main-junit5/src/test/java/org/apache/camel/test/main/junit5/annotation/AdviceRouteTest.java | {
"start": 3002,
"end": 3185
} | class ____ extends AdviceWithRouteBuilder {
@Override
public void configure() throws Exception {
replaceFromWith("direct:bar");
}
}
}
| TestBuilder2 |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/aggregator/AggregateSimpleExpressionIssueManualTest.java | {
"start": 1463,
"end": 3567
} | class ____ extends ContextTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(AggregateSimpleExpressionIssueManualTest.class);
private static final String DATA = "100,200,1,123456,2010-03-01T12:13:14,100,USD,Best Buy,5045,Santa Monica,CA,Type\n";
private final MyBean myBean = new MyBean();
private final AggStrategy aggStrategy = new AggStrategy();
@Test
public void testAggregateSimpleExpression() throws Exception {
// 10 files + 10 files * 100 batches
int files = 10;
int rows = 100000;
int batches = rows / 1000;
int total = files + (files * rows) + (files * batches);
LOG.info("There are {} exchanges", total);
NotifyBuilder notify = new NotifyBuilder(context).whenDone(total).create();
LOG.info("Writing 10 files with 100000 rows in each file");
// write 10 files of 100k rows
for (int i = 0; i < files; i++) {
try (Writer out = Files.newBufferedWriter(testFile("data" + i))) {
for (int j = 0; j < rows; j++) {
out.write(DATA);
}
}
}
// start the route
StopWatch watch = new StopWatch();
context.getRouteController().startRoute("foo");
LOG.info("Waiting to process all the files");
boolean matches = notify.matches(3, TimeUnit.MINUTES);
LOG.info("Should process all files {}", matches);
LOG.info("Time taken {} ms", watch.taken());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from(fileUri()).routeId("foo").autoStartup(false).log("Picked up ${file:name}").split()
.tokenize("\n").streaming()
.aggregate(constant(true), aggStrategy).completionSize(simple("1000")).completionTimeout(simple("500"))
.bean(myBean).end().end();
}
};
}
public static final | AggregateSimpleExpressionIssueManualTest |
java | reactor__reactor-core | reactor-core/src/withMicrometerTest/java/reactor/core/publisher/ThreadSwitchingParallelFlux.java | {
"start": 853,
"end": 2014
} | class ____<T> extends ParallelFlux<T> implements
Subscription, Runnable {
private final T item;
private final ExecutorService executorService;
AtomicBoolean done = new AtomicBoolean();
CoreSubscriber<? super T>[] actual;
public ThreadSwitchingParallelFlux(T item, ExecutorService executorService) {
this.item = item;
this.executorService = executorService;
}
@Override
public int parallelism() {
return 1;
}
@Override
public void subscribe(CoreSubscriber<? super T>[] subscribers) {
if (!validate(subscribers)) {
return;
}
this.actual = subscribers;
executorService.submit(this);
}
@Override
public void run() {
actual[0].onSubscribe(this);
}
private void deliver() {
if (done.compareAndSet(false, true)) {
this.actual[0].onNext(this.item);
this.executorService.submit(this.actual[0]::onComplete);
}
}
@Override
public void request(long n) {
if (Operators.validate(n)) {
if (!done.get()) {
this.executorService.submit(this::deliver);
}
}
}
@Override
public void cancel() {
done.set(true);
}
}
| ThreadSwitchingParallelFlux |
java | apache__flink | flink-state-backends/flink-statebackend-changelog/src/main/java/org/apache/flink/state/changelog/AbstractStateChangeLogger.java | {
"start": 2668,
"end": 8242
} | class ____<Key, Value, Ns>
implements StateChangeLogger<Value, Ns>, Closeable {
protected final StateChangelogWriter<?> stateChangelogWriter;
protected final InternalKeyContext<Key> keyContext;
protected RegisteredStateMetaInfoBase metaInfo;
private final StateMetaInfoSnapshot.BackendStateType stateType;
private final DataOutputSerializer out = new DataOutputSerializer(128);
private boolean metaDataWritten = false;
private final short stateShortId;
public AbstractStateChangeLogger(
StateChangelogWriter<?> stateChangelogWriter,
InternalKeyContext<Key> keyContext,
RegisteredStateMetaInfoBase metaInfo,
short stateId) {
this.stateChangelogWriter = checkNotNull(stateChangelogWriter);
this.keyContext = checkNotNull(keyContext);
this.metaInfo = checkNotNull(metaInfo);
if (metaInfo instanceof RegisteredKeyValueStateBackendMetaInfo) {
this.stateType = KEY_VALUE;
} else if (metaInfo instanceof RegisteredPriorityQueueStateBackendMetaInfo) {
this.stateType = PRIORITY_QUEUE;
} else {
throw new IllegalArgumentException("Unsupported state type: " + metaInfo);
}
this.stateShortId = stateId;
}
@Override
public void valueUpdated(Value newValue, Ns ns) throws IOException {
if (newValue == null) {
valueCleared(ns);
} else {
log(SET, out -> serializeValue(newValue, out), ns);
}
}
@Override
public void valueUpdatedInternal(Value newValue, Ns ns) throws IOException {
if (newValue == null) {
valueCleared(ns);
} else {
log(SET_INTERNAL, out -> serializeValue(newValue, out), ns);
}
}
protected abstract void serializeValue(Value value, DataOutputView out) throws IOException;
@Override
public void valueAdded(Value addedValue, Ns ns) throws IOException {
log(ADD, out -> serializeValue(addedValue, out), ns);
}
@Override
public void valueCleared(Ns ns) throws IOException {
log(CLEAR, ns);
}
@Override
public void valueElementAdded(
ThrowingConsumer<DataOutputView, IOException> dataSerializer, Ns ns)
throws IOException {
log(ADD_ELEMENT, dataSerializer, ns);
}
@Override
public void valueElementAddedOrUpdated(
ThrowingConsumer<DataOutputView, IOException> dataSerializer, Ns ns)
throws IOException {
log(ADD_OR_UPDATE_ELEMENT, dataSerializer, ns);
}
@Override
public void valueElementRemoved(
ThrowingConsumer<DataOutputView, IOException> dataSerializer, Ns ns)
throws IOException {
log(REMOVE_ELEMENT, dataSerializer, ns);
}
@Override
public void resetWritingMetaFlag() {
metaDataWritten = false;
}
protected AbstractStateChangeLogger<Key, Value, Ns> setMetaInfo(
RegisteredStateMetaInfoBase metaInfo) {
this.metaInfo = metaInfo;
return this;
}
protected void log(StateChangeOperation op, Ns ns) throws IOException {
logMetaIfNeeded();
stateChangelogWriter.append(keyContext.getCurrentKeyGroupIndex(), serialize(op, ns, null));
}
protected void log(
StateChangeOperation op,
@Nullable ThrowingConsumer<DataOutputView, IOException> dataWriter,
Ns ns)
throws IOException {
logMetaIfNeeded();
stateChangelogWriter.append(
keyContext.getCurrentKeyGroupIndex(), serialize(op, ns, dataWriter));
}
private void logMetaIfNeeded() throws IOException {
if (!metaDataWritten) {
stateChangelogWriter.appendMeta(
serializeRaw(
out -> {
out.writeByte(METADATA.getCode());
out.writeInt(CURRENT_STATE_META_INFO_SNAPSHOT_VERSION);
StateMetaInfoSnapshotReadersWriters.getWriter()
.writeStateMetaInfoSnapshot(metaInfo.snapshot(), out);
writeDefaultValueAndTtl(out);
out.writeShort(stateShortId);
out.writeByte(stateType.getCode());
}));
metaDataWritten = true;
}
}
protected void writeDefaultValueAndTtl(DataOutputView out) throws IOException {}
private byte[] serialize(
StateChangeOperation op,
Ns ns,
@Nullable ThrowingConsumer<DataOutputView, IOException> dataWriter)
throws IOException {
return serializeRaw(
wrapper -> {
wrapper.writeByte(op.getCode());
wrapper.writeShort(stateShortId);
serializeScope(ns, wrapper);
if (dataWriter != null) {
dataWriter.accept(wrapper);
}
});
}
protected abstract void serializeScope(Ns ns, DataOutputView out) throws IOException;
private byte[] serializeRaw(ThrowingConsumer<DataOutputView, IOException> dataWriter)
throws IOException {
dataWriter.accept(out);
byte[] bytes = out.getCopyOfBuffer();
out.clear();
return bytes;
}
@Override
public void close() throws IOException {
// do nothing
}
}
| AbstractStateChangeLogger |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/model/DelegatingServerRestHandler.java | {
"start": 227,
"end": 961
} | class ____ implements ServerRestHandler {
private List<ServerRestHandler> delegates;
public DelegatingServerRestHandler(List<ServerRestHandler> delegates) {
this.delegates = delegates;
}
// for bytecode recording
public DelegatingServerRestHandler() {
}
public List<ServerRestHandler> getDelegates() {
return delegates;
}
public void setDelegates(List<ServerRestHandler> delegates) {
this.delegates = delegates;
}
@Override
public void handle(ResteasyReactiveRequestContext requestContext) throws Exception {
for (int i = 0; i < delegates.size(); i++) {
delegates.get(0).handle(requestContext);
}
}
}
| DelegatingServerRestHandler |
java | apache__camel | components/camel-consul/src/test/java/org/apache/camel/component/consul/cloud/ConsulServiceRegistrationWithRoutePolicyAndMetadataIT.java | {
"start": 1062,
"end": 1791
} | class ____ extends ConsulServiceRegistrationTestBase {
@Override
protected RoutesBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
fromF("jetty:http://0.0.0.0:%d/service/endpoint", SERVICE_PORT).routeId(SERVICE_ID)
.routeProperty(ServiceDefinition.SERVICE_META_ID, SERVICE_ID)
.routeProperty(ServiceDefinition.SERVICE_META_NAME, SERVICE_NAME)
.routePolicy(new ServiceRegistrationRoutePolicy()).noAutoStartup()
.to("log:service-registry?level=INFO");
}
};
}
}
| ConsulServiceRegistrationWithRoutePolicyAndMetadataIT |
java | spring-projects__spring-boot | core/spring-boot-test/src/test/java/org/springframework/boot/test/context/SpringBootTestActiveProfileTests.java | {
"start": 1280,
"end": 1545
} | class ____ {
@Autowired
private ApplicationContext context;
@Test
void profiles() {
assertThat(this.context.getEnvironment().getActiveProfiles()).containsExactly("override");
}
@Configuration(proxyBeanMethods = false)
static | SpringBootTestActiveProfileTests |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/util/datetime/FastDatePrinter.java | {
"start": 33998,
"end": 35066
} | class ____ implements NumberRule {
private final int mField;
/**
* Constructs an instance of {@code TwoDigitNumberField} with the specified field.
*
* @param field the field
*/
TwoDigitNumberField(final int field) {
mField = field;
}
/**
* {@inheritDoc}
*/
@Override
public int estimateLength() {
return 2;
}
/**
* {@inheritDoc}
*/
@Override
public void appendTo(final Appendable buffer, final Calendar calendar) throws IOException {
appendTo(buffer, calendar.get(mField));
}
/**
* {@inheritDoc}
*/
@Override
public final void appendTo(final Appendable buffer, final int value) throws IOException {
if (value < 100) {
appendDigits(buffer, value);
} else {
appendFullDigits(buffer, value, 2);
}
}
}
/**
* <p>Inner | TwoDigitNumberField |
java | alibaba__nacos | client/src/main/java/com/alibaba/nacos/client/ai/remote/redo/AiGrpcRedoService.java | {
"start": 1169,
"end": 4918
} | class ____ extends AbstractRedoService {
private static final Logger LOGGER = LoggerFactory.getLogger(AiGrpcRedoService.class);
private final AiGrpcClient aiGrpcClient;
public AiGrpcRedoService(NacosClientProperties properties, AiGrpcClient aiGrpcClient) {
super(LOGGER, properties, RemoteConstants.LABEL_MODULE_AI);
this.aiGrpcClient = aiGrpcClient;
startRedoTask();
}
@Override
protected AbstractRedoTask buildRedoTask() {
return new AiRedoScheduledTask(this, aiGrpcClient);
}
public void cachedMcpServerEndpointForRedo(String mcpName, String address, int port, String version) {
RedoData<McpServerEndpoint> redoData = buildMcpServerEndpointRedoData(mcpName, address, port, version);
super.cachedRedoData(mcpName, redoData, McpServerEndpoint.class);
}
public void removeMcpServerEndpointForRedo(String mcpName) {
super.removeRedoData(mcpName, McpServerEndpoint.class);
}
public void mcpServerEndpointRegistered(String mcpName) {
super.dataRegistered(mcpName, McpServerEndpoint.class);
}
public void mcpServerEndpointDeregister(String mcpName) {
super.dataDeregister(mcpName, McpServerEndpoint.class);
}
public void mcpServerEndpointDeregistered(String mcpName) {
super.dataDeregistered(mcpName, McpServerEndpoint.class);
}
public boolean isMcpServerEndpointRegistered(String mcpName) {
return super.isDataRegistered(mcpName, McpServerEndpoint.class);
}
public Set<RedoData<McpServerEndpoint>> findMcpServerEndpointRedoData() {
return super.findRedoData(McpServerEndpoint.class);
}
public McpServerEndpoint getMcpServerEndpoint(String mcpName) {
RedoData<McpServerEndpoint> redoData = super.getRedoData(mcpName, McpServerEndpoint.class);
return redoData == null ? null : redoData.get();
}
private RedoData<McpServerEndpoint> buildMcpServerEndpointRedoData(String mcpName, String address, int port,
String version) {
McpServerEndpoint mcpServerEndpoint = new McpServerEndpoint(address, port, version);
McpServerEndpointRedoData result = new McpServerEndpointRedoData(mcpName);
result.set(mcpServerEndpoint);
return result;
}
public void cachedAgentEndpointForRedo(String agentName, AgentEndpointWrapper wrapper) {
AgentEndpointRedoData redoData = new AgentEndpointRedoData(agentName, wrapper);
super.cachedRedoData(agentName, redoData, AgentEndpointWrapper.class);
}
public void removeAgentEndpointForRedo(String agentName) {
super.removeRedoData(agentName, AgentEndpointWrapper.class);
}
public void agentEndpointRegistered(String agentName) {
super.dataRegistered(agentName, AgentEndpointWrapper.class);
}
public void agentEndpointDeregister(String agentName) {
super.dataDeregister(agentName, AgentEndpointWrapper.class);
}
public void agentEndpointDeregistered(String agentName) {
super.dataDeregistered(agentName, AgentEndpointWrapper.class);
}
public boolean isAgentEndpointRegistered(String agentName) {
return super.isDataRegistered(agentName, AgentEndpointWrapper.class);
}
public Set<RedoData<AgentEndpointWrapper>> findAgentEndpointRedoData() {
return super.findRedoData(AgentEndpointWrapper.class);
}
public AgentEndpointWrapper getAgentEndpoint(String agentName) {
RedoData<AgentEndpointWrapper> redoData = super.getRedoData(agentName, AgentEndpointWrapper.class);
return redoData == null ? null : redoData.get();
}
}
| AiGrpcRedoService |
java | google__guice | core/test/com/google/inject/GenericInjectionTest.java | {
"start": 2250,
"end": 2740
} | class ____ uses raw type
@SuppressWarnings({"unchecked", "rawtypes"})
@Override
protected void configure() {
bind(Key.get(new TypeLiteral<Parameterized<String>>() {}))
.to((Class) Parameterized.class);
}
});
Parameterized<String> parameterized =
injector.getInstance(Key.get(new TypeLiteral<Parameterized<String>>() {}));
assertNotNull(parameterized);
}
static | literal |
java | micronaut-projects__micronaut-core | aop/src/main/java/io/micronaut/aop/internal/intercepted/SynchronousInterceptedMethod.java | {
"start": 1036,
"end": 2027
} | class ____ implements InterceptedMethod {
private final MethodInvocationContext<?, ?> context;
private final Argument<?> returnTypeValue;
SynchronousInterceptedMethod(MethodInvocationContext<?, ?> context) {
this.context = context;
this.returnTypeValue = context.getReturnType().asArgument();
}
@Override
public ResultType resultType() {
return ResultType.SYNCHRONOUS;
}
@Override
public Argument<?> returnTypeValue() {
return returnTypeValue;
}
@Override
public Object interceptResult() {
return context.proceed();
}
@Override
public Object interceptResult(Interceptor<?, ?> from) {
return context.proceed(from);
}
@Override
public Object handleResult(Object result) {
return result;
}
@Override
public <E extends Throwable> Object handleException(Exception exception) throws E {
throw (E) exception;
}
}
| SynchronousInterceptedMethod |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_Johnny.java | {
"start": 1811,
"end": 1858
} | enum ____ {
MD5, SHA1
}
public static | EnumType |
java | quarkusio__quarkus | extensions/devservices/keycloak/src/main/java/io/quarkus/devservices/keycloak/KeycloakDevServicesUtils.java | {
"start": 580,
"end": 3332
} | class ____ {
private static final byte AMP = '&';
private static final byte EQ = '=';
private KeycloakDevServicesUtils() {
}
static WebClient createWebClient(Vertx vertx) {
WebClientOptions options = new WebClientOptions();
options.setTrustAll(true);
options.setVerifyHost(false);
return WebClient.create(new io.vertx.mutiny.core.Vertx(vertx), options);
}
static Uni<String> getPasswordAccessToken(WebClient client,
String tokenUrl,
String clientId,
String clientSecret,
String userName,
String userPassword,
Map<String, String> passwordGrantOptions) {
HttpRequest<Buffer> request = client.postAbs(tokenUrl);
request.putHeader(HttpHeaders.CONTENT_TYPE.toString(), HttpHeaders.APPLICATION_X_WWW_FORM_URLENCODED.toString());
io.vertx.mutiny.core.MultiMap props = new io.vertx.mutiny.core.MultiMap(MultiMap.caseInsensitiveMultiMap());
props.add("client_id", clientId);
if (clientSecret != null) {
props.add("client_secret", clientSecret);
}
props.add("username", userName);
props.add("password", userPassword);
props.add("grant_type", "password");
if (passwordGrantOptions != null) {
props.addAll(passwordGrantOptions);
}
return request.sendBuffer(encodeForm(props)).onItem()
.transform(KeycloakDevServicesUtils::getAccessTokenFromJson)
.onFailure()
.retry()
.withBackOff(Duration.ofSeconds(2), Duration.ofSeconds(2))
.expireIn(10 * 1000);
}
private static String getAccessTokenFromJson(HttpResponse<Buffer> resp) {
if (resp.statusCode() == 200) {
JsonObject json = resp.bodyAsJsonObject();
return json.getString("access_token");
} else {
String errorMessage = resp.bodyAsString();
throw new RuntimeException(errorMessage);
}
}
private static Buffer encodeForm(io.vertx.mutiny.core.MultiMap form) {
Buffer buffer = Buffer.buffer();
for (Map.Entry<String, String> entry : form) {
if (buffer.length() != 0) {
buffer.appendByte(AMP);
}
buffer.appendString(entry.getKey());
buffer.appendByte(EQ);
buffer.appendString(urlEncode(entry.getValue()));
}
return buffer;
}
private static String urlEncode(String value) {
try {
return URLEncoder.encode(value, StandardCharsets.UTF_8);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
}
| KeycloakDevServicesUtils |
java | apache__flink | flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/typeutils/AvroSchemaConverterTest.java | {
"start": 2446,
"end": 40345
} | class ____ {
@Test
void testAvroClassConversion() {
validateUserSchema(AvroSchemaConverter.convertToTypeInfo(User.class));
}
@Test
void testAvroSchemaConversion() {
final String schema = User.getClassSchema().toString(true);
validateUserSchema(AvroSchemaConverter.convertToTypeInfo(schema));
}
@Test
void testConvertAvroSchemaToDataType() {
final String schema = User.getClassSchema().toString(true);
validateUserSchema(AvroSchemaConverter.convertToDataType(schema));
}
@ParameterizedTest
@EnumSource(AvroEncoding.class)
void testAddingOptionalField(AvroEncoding encoding) throws IOException {
Schema oldSchema =
SchemaBuilder.record("record")
.fields()
.requiredLong("category_id")
.optionalString("name")
.endRecord();
Schema newSchema =
AvroSchemaConverter.convertToSchema(
ResolvedSchema.of(
Column.physical(
"category_id", DataTypes.BIGINT().notNull()),
Column.physical("name", DataTypes.STRING().nullable()),
Column.physical(
"description", DataTypes.STRING().nullable()))
.toSourceRowDataType()
.getLogicalType());
byte[] serializedRecord =
AvroTestUtils.writeRecord(
new GenericRecordBuilder(oldSchema)
.set("category_id", 1L)
.set("name", "test")
.build(),
oldSchema,
encoding);
GenericDatumReader<GenericRecord> datumReader =
new GenericDatumReader<>(oldSchema, newSchema);
Decoder decoder;
if (encoding == AvroEncoding.JSON) {
ByteArrayInputStream input = new ByteArrayInputStream(serializedRecord);
decoder = DecoderFactory.get().jsonDecoder(oldSchema, input);
} else {
decoder =
DecoderFactory.get()
.binaryDecoder(serializedRecord, 0, serializedRecord.length, null);
}
GenericRecord newRecord = datumReader.read(null, decoder);
assertThat(newRecord)
.isEqualTo(
new GenericRecordBuilder(newSchema)
.set("category_id", 1L)
.set("name", "test")
.set("description", null)
.build());
}
@Test
void testInvalidRawTypeAvroSchemaConversion() {
RowType rowType =
(RowType)
ResolvedSchema.of(
Column.physical("a", DataTypes.STRING()),
Column.physical(
"b",
DataTypes.RAW(Void.class, VoidSerializer.INSTANCE)))
.toSourceRowDataType()
.getLogicalType();
assertThatThrownBy(() -> AvroSchemaConverter.convertToSchema(rowType))
.isInstanceOf(UnsupportedOperationException.class)
.hasMessageStartingWith("Unsupported to derive Schema for type: RAW");
}
@Test
void testInvalidTimestampTypeAvroSchemaConversion() {
RowType rowType =
(RowType)
ResolvedSchema.of(
Column.physical("a", DataTypes.STRING()),
Column.physical("b", DataTypes.TIMESTAMP(9)))
.toSourceRowDataType()
.getLogicalType();
assertThatThrownBy(() -> AvroSchemaConverter.convertToSchema(rowType))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage(
"Avro does not support TIMESTAMP type with precision: 9, "
+ "it only supports precision less than 3.");
}
@Test
void testInvalidTimeTypeAvroSchemaConversion() {
RowType rowType =
(RowType)
ResolvedSchema.of(
Column.physical("a", DataTypes.STRING()),
Column.physical("b", DataTypes.TIME(6)))
.toSourceRowDataType()
.getLogicalType();
assertThatThrownBy(() -> AvroSchemaConverter.convertToSchema(rowType))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage(
"Avro does not support TIME type with precision: 6, it only supports precision less than 3.");
}
@Test
void testRowTypeAvroSchemaConversion() {
RowType rowType =
(RowType)
ResolvedSchema.of(
Column.physical(
"row1",
DataTypes.ROW(
DataTypes.FIELD("a", DataTypes.STRING()))),
Column.physical(
"row2",
DataTypes.ROW(
DataTypes.FIELD("b", DataTypes.STRING()))),
Column.physical(
"row3",
DataTypes.ROW(
DataTypes.FIELD(
"row3",
DataTypes.ROW(
DataTypes.FIELD(
"c",
DataTypes
.STRING()))))))
.toSourceRowDataType()
.getLogicalType();
Schema schema = AvroSchemaConverter.convertToSchema(rowType);
assertThat(schema.toString(true))
.isEqualTo(
"{\n"
+ " \"type\" : \"record\",\n"
+ " \"name\" : \"record\",\n"
+ " \"namespace\" : \"org.apache.flink.avro.generated\",\n"
+ " \"fields\" : [ {\n"
+ " \"name\" : \"row1\",\n"
+ " \"type\" : [ \"null\", {\n"
+ " \"type\" : \"record\",\n"
+ " \"name\" : \"record_row1\",\n"
+ " \"fields\" : [ {\n"
+ " \"name\" : \"a\",\n"
+ " \"type\" : [ \"null\", \"string\" ],\n"
+ " \"default\" : null\n"
+ " } ]\n"
+ " } ],\n"
+ " \"default\" : null\n"
+ " }, {\n"
+ " \"name\" : \"row2\",\n"
+ " \"type\" : [ \"null\", {\n"
+ " \"type\" : \"record\",\n"
+ " \"name\" : \"record_row2\",\n"
+ " \"fields\" : [ {\n"
+ " \"name\" : \"b\",\n"
+ " \"type\" : [ \"null\", \"string\" ],\n"
+ " \"default\" : null\n"
+ " } ]\n"
+ " } ],\n"
+ " \"default\" : null\n"
+ " }, {\n"
+ " \"name\" : \"row3\",\n"
+ " \"type\" : [ \"null\", {\n"
+ " \"type\" : \"record\",\n"
+ " \"name\" : \"record_row3\",\n"
+ " \"fields\" : [ {\n"
+ " \"name\" : \"row3\",\n"
+ " \"type\" : [ \"null\", {\n"
+ " \"type\" : \"record\",\n"
+ " \"name\" : \"record_row3_row3\",\n"
+ " \"fields\" : [ {\n"
+ " \"name\" : \"c\",\n"
+ " \"type\" : [ \"null\", \"string\" ],\n"
+ " \"default\" : null\n"
+ " } ]\n"
+ " } ],\n"
+ " \"default\" : null\n"
+ " } ]\n"
+ " } ],\n"
+ " \"default\" : null\n"
+ " } ]\n"
+ "}");
}
/** Test convert nullable data type to Avro schema then converts back. */
@Test
void testDataTypeToSchemaToDataTypeNullable() {
DataType dataType =
DataTypes.ROW(
DataTypes.FIELD("f_null", DataTypes.NULL()),
DataTypes.FIELD("f_boolean", DataTypes.BOOLEAN()),
// tinyint and smallint all convert to int
DataTypes.FIELD("f_int", DataTypes.INT()),
DataTypes.FIELD("f_bigint", DataTypes.BIGINT()),
DataTypes.FIELD("f_float", DataTypes.FLOAT()),
DataTypes.FIELD("f_double", DataTypes.DOUBLE()),
// char converts to string
DataTypes.FIELD("f_string", DataTypes.STRING()),
// binary converts to bytes
DataTypes.FIELD("f_varbinary", DataTypes.BYTES()),
DataTypes.FIELD("f_timestamp", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("f_date", DataTypes.DATE()),
DataTypes.FIELD("f_time", DataTypes.TIME(3)),
DataTypes.FIELD("f_decimal", DataTypes.DECIMAL(10, 0)),
DataTypes.FIELD(
"f_row",
DataTypes.ROW(
DataTypes.FIELD("f0", DataTypes.INT()),
DataTypes.FIELD("f1", DataTypes.TIMESTAMP(3)))),
// multiset converts to map
// map key is always not null
DataTypes.FIELD(
"f_map",
DataTypes.MAP(DataTypes.STRING().notNull(), DataTypes.INT())),
DataTypes.FIELD("f_array", DataTypes.ARRAY(DataTypes.INT())));
Schema schema = AvroSchemaConverter.convertToSchema(dataType.getLogicalType());
DataType converted = AvroSchemaConverter.convertToDataType(schema.toString());
assertThat(converted).isEqualTo(dataType);
}
/** Test convert non-nullable data type to Avro schema then converts back. */
@Test
void testDataTypeToSchemaToDataTypeNonNullable() {
DataType dataType =
DataTypes.ROW(
DataTypes.FIELD("f_boolean", DataTypes.BOOLEAN().notNull()),
// tinyint and smallint all convert to int
DataTypes.FIELD("f_int", DataTypes.INT().notNull()),
DataTypes.FIELD("f_bigint", DataTypes.BIGINT().notNull()),
DataTypes.FIELD("f_float", DataTypes.FLOAT().notNull()),
DataTypes.FIELD("f_double", DataTypes.DOUBLE().notNull()),
// char converts to string
DataTypes.FIELD("f_string", DataTypes.STRING().notNull()),
// binary converts to bytes
DataTypes.FIELD("f_varbinary", DataTypes.BYTES().notNull()),
DataTypes.FIELD("f_timestamp", DataTypes.TIMESTAMP(3).notNull()),
DataTypes.FIELD("f_date", DataTypes.DATE().notNull()),
DataTypes.FIELD("f_time", DataTypes.TIME(3).notNull()),
DataTypes.FIELD("f_decimal", DataTypes.DECIMAL(10, 0).notNull()),
DataTypes.FIELD(
"f_row",
DataTypes.ROW(
DataTypes.FIELD(
"f0", DataTypes.INT().notNull()),
DataTypes.FIELD(
"f1",
DataTypes.TIMESTAMP(3).notNull()))
.notNull()),
// multiset converts to map
// map key is always not null
DataTypes.FIELD(
"f_map",
DataTypes.MAP(
DataTypes.STRING().notNull(),
DataTypes.INT().notNull())
.notNull()),
DataTypes.FIELD(
"f_array",
DataTypes.ARRAY(DataTypes.INT().notNull()).notNull()))
.notNull();
Schema schema = AvroSchemaConverter.convertToSchema(dataType.getLogicalType());
DataType converted = AvroSchemaConverter.convertToDataType(schema.toString());
assertThat(converted).isEqualTo(dataType);
}
/** Test convert nullable Avro schema to data type then converts back. */
@Test
void testSchemaToDataTypeToSchemaNullable() {
String schemaStr =
"{\n"
+ " \"type\" : \"record\",\n"
+ " \"name\" : \"record\",\n"
+ " \"namespace\" : \"org.apache.flink.avro.generated\",\n"
+ " \"fields\" : [ {\n"
+ " \"name\" : \"f_null\",\n"
+ " \"type\" : \"null\",\n"
+ " \"default\" : null\n"
+ " }, {\n"
+ " \"name\" : \"f_boolean\",\n"
+ " \"type\" : [ \"null\", \"boolean\" ],\n"
+ " \"default\" : null\n"
+ " }, {\n"
+ " \"name\" : \"f_int\",\n"
+ " \"type\" : [ \"null\", \"int\" ],\n"
+ " \"default\" : null\n"
+ " }, {\n"
+ " \"name\" : \"f_bigint\",\n"
+ " \"type\" : [ \"null\", \"long\" ],\n"
+ " \"default\" : null\n"
+ " }, {\n"
+ " \"name\" : \"f_float\",\n"
+ " \"type\" : [ \"null\", \"float\" ],\n"
+ " \"default\" : null\n"
+ " }, {\n"
+ " \"name\" : \"f_double\",\n"
+ " \"type\" : [ \"null\", \"double\" ],\n"
+ " \"default\" : null\n"
+ " }, {\n"
+ " \"name\" : \"f_string\",\n"
+ " \"type\" : [ \"null\", \"string\" ],\n"
+ " \"default\" : null\n"
+ " }, {\n"
+ " \"name\" : \"f_varbinary\",\n"
+ " \"type\" : [ \"null\", \"bytes\" ],\n"
+ " \"default\" : null\n"
+ " }, {\n"
+ " \"name\" : \"f_timestamp\",\n"
+ " \"type\" : [ \"null\", {\n"
+ " \"type\" : \"long\",\n"
+ " \"logicalType\" : \"timestamp-millis\"\n"
+ " } ],\n"
+ " \"default\" : null\n"
+ " }, {\n"
+ " \"name\" : \"f_date\",\n"
+ " \"type\" : [ \"null\", {\n"
+ " \"type\" : \"int\",\n"
+ " \"logicalType\" : \"date\"\n"
+ " } ],\n"
+ " \"default\" : null\n"
+ " }, {\n"
+ " \"name\" : \"f_time\",\n"
+ " \"type\" : [ \"null\", {\n"
+ " \"type\" : \"int\",\n"
+ " \"logicalType\" : \"time-millis\"\n"
+ " } ],\n"
+ " \"default\" : null\n"
+ " }, {\n"
+ " \"name\" : \"f_decimal\",\n"
+ " \"type\" : [ \"null\", {\n"
+ " \"type\" : \"bytes\",\n"
+ " \"logicalType\" : \"decimal\",\n"
+ " \"precision\" : 10,\n"
+ " \"scale\" : 0\n"
+ " } ],\n"
+ " \"default\" : null\n"
+ " }, {\n"
+ " \"name\" : \"f_row\",\n"
+ " \"type\" : [ \"null\", {\n"
+ " \"type\" : \"record\",\n"
+ " \"name\" : \"record_f_row\",\n"
+ " \"fields\" : [ {\n"
+ " \"name\" : \"f0\",\n"
+ " \"type\" : [ \"null\", \"int\" ],\n"
+ " \"default\" : null\n"
+ " }, {\n"
+ " \"name\" : \"f1\",\n"
+ " \"type\" : [ \"null\", {\n"
+ " \"type\" : \"long\",\n"
+ " \"logicalType\" : \"timestamp-millis\"\n"
+ " } ],\n"
+ " \"default\" : null\n"
+ " } ]\n"
+ " } ],\n"
+ " \"default\" : null\n"
+ " }, {\n"
+ " \"name\" : \"f_map\",\n"
+ " \"type\" : [ \"null\", {\n"
+ " \"type\" : \"map\",\n"
+ " \"values\" : [ \"null\", \"int\" ]\n"
+ " } ],\n"
+ " \"default\" : null\n"
+ " }, {\n"
+ " \"name\" : \"f_array\",\n"
+ " \"type\" : [ \"null\", {\n"
+ " \"type\" : \"array\",\n"
+ " \"items\" : [ \"null\", \"int\" ]\n"
+ " } ],\n"
+ " \"default\" : null\n"
+ " } ]\n"
+ "}";
DataType dataType = AvroSchemaConverter.convertToDataType(schemaStr);
Schema schema = AvroSchemaConverter.convertToSchema(dataType.getLogicalType());
assertThat(schema).isEqualTo(new Schema.Parser().parse(schemaStr));
}
/** Test convert non-nullable Avro schema to data type then converts back. */
@Test
void testSchemaToDataTypeToSchemaNonNullable() {
String schemaStr =
"{\n"
+ " \"type\" : \"record\",\n"
+ " \"name\" : \"record\",\n"
+ " \"namespace\" : \"org.apache.flink.avro.generated\",\n"
+ " \"fields\" : [ {\n"
+ " \"name\" : \"f_boolean\",\n"
+ " \"type\" : \"boolean\"\n"
+ " }, {\n"
+ " \"name\" : \"f_int\",\n"
+ " \"type\" : \"int\"\n"
+ " }, {\n"
+ " \"name\" : \"f_bigint\",\n"
+ " \"type\" : \"long\"\n"
+ " }, {\n"
+ " \"name\" : \"f_float\",\n"
+ " \"type\" : \"float\"\n"
+ " }, {\n"
+ " \"name\" : \"f_double\",\n"
+ " \"type\" : \"double\"\n"
+ " }, {\n"
+ " \"name\" : \"f_string\",\n"
+ " \"type\" : \"string\"\n"
+ " }, {\n"
+ " \"name\" : \"f_varbinary\",\n"
+ " \"type\" : \"bytes\"\n"
+ " }, {\n"
+ " \"name\" : \"f_timestamp\",\n"
+ " \"type\" : {\n"
+ " \"type\" : \"long\",\n"
+ " \"logicalType\" : \"timestamp-millis\"\n"
+ " }\n"
+ " }, {\n"
+ " \"name\" : \"f_date\",\n"
+ " \"type\" : {\n"
+ " \"type\" : \"int\",\n"
+ " \"logicalType\" : \"date\"\n"
+ " }\n"
+ " }, {\n"
+ " \"name\" : \"f_time\",\n"
+ " \"type\" : {\n"
+ " \"type\" : \"int\",\n"
+ " \"logicalType\" : \"time-millis\"\n"
+ " }\n"
+ " }, {\n"
+ " \"name\" : \"f_decimal\",\n"
+ " \"type\" : {\n"
+ " \"type\" : \"bytes\",\n"
+ " \"logicalType\" : \"decimal\",\n"
+ " \"precision\" : 10,\n"
+ " \"scale\" : 0\n"
+ " }\n"
+ " }, {\n"
+ " \"name\" : \"f_row\",\n"
+ " \"type\" : {\n"
+ " \"type\" : \"record\",\n"
+ " \"name\" : \"record_f_row\",\n"
+ " \"fields\" : [ {\n"
+ " \"name\" : \"f0\",\n"
+ " \"type\" : \"int\"\n"
+ " }, {\n"
+ " \"name\" : \"f1\",\n"
+ " \"type\" : {\n"
+ " \"type\" : \"long\",\n"
+ " \"logicalType\" : \"timestamp-millis\"\n"
+ " }\n"
+ " } ]\n"
+ " }\n"
+ " }, {\n"
+ " \"name\" : \"f_map\",\n"
+ " \"type\" : {\n"
+ " \"type\" : \"map\",\n"
+ " \"values\" : \"int\"\n"
+ " }\n"
+ " }, {\n"
+ " \"name\" : \"f_array\",\n"
+ " \"type\" : {\n"
+ " \"type\" : \"array\",\n"
+ " \"items\" : \"int\"\n"
+ " }\n"
+ " } ]\n"
+ "}";
DataType dataType = AvroSchemaConverter.convertToDataType(schemaStr);
Schema schema = AvroSchemaConverter.convertToSchema(dataType.getLogicalType());
assertThat(schema).isEqualTo(new Schema.Parser().parse(schemaStr));
}
@Test
void testTimestampsSchemaToDataTypeToSchemaLegacyTimestampMapping() {
final Tuple4<Class<? extends SpecificRecord>, SpecificRecord, GenericRecord, Row> testData =
AvroTestUtils.getTimestampTestData();
String schemaStr = testData.f1.getSchema().toString();
DataType dataType = AvroSchemaConverter.convertToDataType(schemaStr);
assertThatThrownBy(() -> AvroSchemaConverter.convertToSchema(dataType.getLogicalType()))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage(
"Avro does not support TIMESTAMP type with precision: 6, it only supports precision less than 3.");
}
@Test
void testTimestampsSchemaToTypeInfoLegacyTimestampMapping() {
final Tuple4<Class<? extends SpecificRecord>, SpecificRecord, GenericRecord, Row> testData =
AvroTestUtils.getTimestampTestData();
String schemaStr = testData.f1.getSchema().toString();
TypeInformation<Row> typeInfo = AvroSchemaConverter.convertToTypeInfo(schemaStr);
validateLegacyTimestampsSchema(typeInfo);
}
@Test
void testTimestampsSchemaToDataTypeToSchemaNewMapping() {
final Tuple4<Class<? extends SpecificRecord>, SpecificRecord, GenericRecord, Row> testData =
AvroTestUtils.getTimestampTestData();
String schemaStr = testData.f1.getSchema().toString();
DataType dataType = AvroSchemaConverter.convertToDataType(schemaStr, false);
Schema schema = AvroSchemaConverter.convertToSchema(dataType.getLogicalType(), false);
DataType dataType2 = AvroSchemaConverter.convertToDataType(schema.toString(), false);
validateTimestampsSchema(dataType2);
}
@Test
void testTimestampsSchemaToTypeInfoNewMapping() {
final Tuple4<Class<? extends SpecificRecord>, SpecificRecord, GenericRecord, Row> testData =
AvroTestUtils.getTimestampTestData();
String schemaStr = testData.f1.getSchema().toString();
TypeInformation<Row> typeInfo = AvroSchemaConverter.convertToTypeInfo(schemaStr, false);
validateTimestampsSchema(typeInfo);
}
private void validateUserSchema(TypeInformation<?> actual) {
final TypeInformation<Row> address =
Types.ROW_NAMED(
new String[] {"num", "street", "city", "state", "zip"},
Types.INT,
Types.STRING,
Types.STRING,
Types.STRING,
Types.STRING);
final TypeInformation<Row> user =
Types.ROW_NAMED(
new String[] {
"name",
"favorite_number",
"favorite_color",
"type_long_test",
"type_double_test",
"type_null_test",
"type_bool_test",
"type_array_string",
"type_array_boolean",
"type_nullable_array",
"type_enum",
"type_map",
"type_fixed",
"type_union",
"type_nested",
"type_bytes",
"type_date",
"type_time_millis",
"type_time_micros",
"type_timestamp_millis",
"type_timestamp_micros",
"type_decimal_bytes",
"type_decimal_fixed"
},
Types.STRING,
Types.INT,
Types.STRING,
Types.LONG,
Types.DOUBLE,
Types.VOID,
Types.BOOLEAN,
Types.OBJECT_ARRAY(Types.STRING),
Types.OBJECT_ARRAY(Types.BOOLEAN),
Types.OBJECT_ARRAY(Types.STRING),
Types.STRING,
Types.MAP(Types.STRING, Types.LONG),
Types.PRIMITIVE_ARRAY(Types.BYTE),
Types.GENERIC(Object.class),
address,
Types.PRIMITIVE_ARRAY(Types.BYTE),
Types.SQL_DATE,
Types.SQL_TIME,
Types.SQL_TIME,
Types.SQL_TIMESTAMP,
Types.SQL_TIMESTAMP,
Types.BIG_DEC,
Types.BIG_DEC);
assertThat(actual).isEqualTo(user);
final RowTypeInfo userRowInfo = (RowTypeInfo) user;
assertThat(userRowInfo.schemaEquals(actual)).isTrue();
}
private void validateTimestampsSchema(TypeInformation<?> actual) {
final TypeInformation<Row> timestamps =
Types.ROW_NAMED(
new String[] {
"type_timestamp_millis",
"type_timestamp_micros",
"type_local_timestamp_millis",
"type_local_timestamp_micros"
},
Types.INSTANT,
Types.INSTANT,
Types.LOCAL_DATE_TIME,
Types.LOCAL_DATE_TIME);
final RowTypeInfo timestampsRowTypeInfo = (RowTypeInfo) timestamps;
assertThat(timestampsRowTypeInfo.schemaEquals(actual)).isTrue();
}
private void validateLegacyTimestampsSchema(TypeInformation<?> actual) {
final TypeInformation<Row> timestamps =
Types.ROW_NAMED(
new String[] {
"type_timestamp_millis",
"type_timestamp_micros",
"type_local_timestamp_millis",
"type_local_timestamp_micros"
},
Types.SQL_TIMESTAMP,
Types.SQL_TIMESTAMP,
Types.LONG,
Types.LONG);
final RowTypeInfo timestampsRowTypeInfo = (RowTypeInfo) timestamps;
assertThat(timestampsRowTypeInfo.schemaEquals(actual)).isTrue();
}
private void validateLegacyTimestampsSchema(DataType actual) {
final DataType timestamps =
DataTypes.ROW(
DataTypes.FIELD(
"type_timestamp_millis", DataTypes.TIMESTAMP(3).notNull()),
DataTypes.FIELD(
"type_timestamp_micros", DataTypes.TIMESTAMP(6).notNull()),
DataTypes.FIELD(
"type_local_timestamp_millis",
DataTypes.BIGINT().notNull()),
DataTypes.FIELD(
"type_local_timestamp_micros",
DataTypes.BIGINT().notNull()))
.notNull();
assertThat(actual).isEqualTo(timestamps);
}
private void validateTimestampsSchema(DataType actual) {
final DataType timestamps =
DataTypes.ROW(
DataTypes.FIELD(
"type_timestamp_millis",
DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3).notNull()),
DataTypes.FIELD(
"type_timestamp_micros",
DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(6).notNull()),
DataTypes.FIELD(
"type_local_timestamp_millis",
DataTypes.TIMESTAMP(3).notNull()),
DataTypes.FIELD(
"type_local_timestamp_micros",
DataTypes.TIMESTAMP(6).notNull()))
.notNull();
assertThat(actual).isEqualTo(timestamps);
}
private void validateUserSchema(DataType actual) {
final DataType address =
DataTypes.ROW(
DataTypes.FIELD("num", DataTypes.INT().notNull()),
DataTypes.FIELD("street", DataTypes.STRING().notNull()),
DataTypes.FIELD("city", DataTypes.STRING().notNull()),
DataTypes.FIELD("state", DataTypes.STRING().notNull()),
DataTypes.FIELD("zip", DataTypes.STRING().notNull()));
final DataType user =
DataTypes.ROW(
DataTypes.FIELD("name", DataTypes.STRING().notNull()),
DataTypes.FIELD("favorite_number", DataTypes.INT()),
DataTypes.FIELD("favorite_color", DataTypes.STRING()),
DataTypes.FIELD("type_long_test", DataTypes.BIGINT()),
DataTypes.FIELD("type_double_test", DataTypes.DOUBLE().notNull()),
DataTypes.FIELD("type_null_test", DataTypes.NULL()),
DataTypes.FIELD("type_bool_test", DataTypes.BOOLEAN().notNull()),
DataTypes.FIELD(
"type_array_string",
DataTypes.ARRAY(DataTypes.STRING().notNull()).notNull()),
DataTypes.FIELD(
"type_array_boolean",
DataTypes.ARRAY(DataTypes.BOOLEAN().notNull()).notNull()),
DataTypes.FIELD(
"type_nullable_array",
DataTypes.ARRAY(DataTypes.STRING().notNull())),
DataTypes.FIELD("type_enum", DataTypes.STRING().notNull()),
DataTypes.FIELD(
"type_map",
DataTypes.MAP(
DataTypes.STRING().notNull(),
DataTypes.BIGINT().notNull())
.notNull()),
DataTypes.FIELD("type_fixed", DataTypes.VARBINARY(16)),
DataTypes.FIELD(
"type_union",
new AtomicDataType(
new TypeInformationRawType<>(
false, Types.GENERIC(Object.class)),
Object.class)),
DataTypes.FIELD("type_nested", address),
DataTypes.FIELD("type_bytes", DataTypes.BYTES().notNull()),
DataTypes.FIELD("type_date", DataTypes.DATE().notNull()),
DataTypes.FIELD("type_time_millis", DataTypes.TIME(3).notNull()),
DataTypes.FIELD("type_time_micros", DataTypes.TIME(6).notNull()),
DataTypes.FIELD(
"type_timestamp_millis", DataTypes.TIMESTAMP(3).notNull()),
DataTypes.FIELD(
"type_timestamp_micros", DataTypes.TIMESTAMP(6).notNull()),
DataTypes.FIELD(
"type_decimal_bytes", DataTypes.DECIMAL(4, 2).notNull()),
DataTypes.FIELD(
"type_decimal_fixed", DataTypes.DECIMAL(4, 2).notNull()))
.notNull();
assertThat(actual).isEqualTo(user);
}
}
| AvroSchemaConverterTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java | {
"start": 17141,
"end": 72895
} | class ____ extends SubjectInheritingThread {
private final long ttl;
private final long ttlInterval;
EntityDeletionThread(Configuration conf) {
ttl = conf.getLong(TIMELINE_SERVICE_TTL_MS,
DEFAULT_TIMELINE_SERVICE_TTL_MS);
ttlInterval = conf.getLong(
TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS,
DEFAULT_TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS);
LOG.info("Starting deletion thread with ttl " + ttl + " and cycle "
+ "interval " + ttlInterval);
}
@Override
public void work() {
Thread.currentThread().setName("Leveldb Timeline Store Retention");
while (true) {
long timestamp = System.currentTimeMillis() - ttl;
try {
discardOldEntities(timestamp);
Thread.sleep(ttlInterval);
} catch (IOException e) {
LOG.error(e.toString());
} catch (InterruptedException e) {
LOG.info("Deletion thread received interrupt, exiting");
break;
}
}
}
}
@Override
public TimelineEntity getEntity(String entityId, String entityType,
EnumSet<Field> fields) throws IOException {
Long revStartTime = getStartTimeLong(entityId, entityType);
if (revStartTime == null) {
LOG.debug("Could not find start time for {} {} ", entityType, entityId);
return null;
}
byte[] prefix = KeyBuilder.newInstance().add(entityType)
.add(writeReverseOrderedLong(revStartTime)).add(entityId)
.getBytesForLookup();
DB db = entitydb.getDBForStartTime(revStartTime);
if (db == null) {
LOG.debug("Could not find db for {} {} ", entityType, entityId);
return null;
}
try (DBIterator iterator = db.iterator()) {
iterator.seek(prefix);
return getEntity(entityId, entityType, revStartTime, fields, iterator,
prefix, prefix.length);
}
}
/**
* Read entity from a db iterator. If no information is found in the specified
* fields for this entity, return null.
*/
private static TimelineEntity getEntity(String entityId, String entityType,
Long startTime, EnumSet<Field> fields, DBIterator iterator,
byte[] prefix, int prefixlen) throws IOException {
if (fields == null) {
fields = EnumSet.allOf(Field.class);
}
TimelineEntity entity = new TimelineEntity();
boolean events = false;
boolean lastEvent = false;
if (fields.contains(Field.EVENTS)) {
events = true;
} else if (fields.contains(Field.LAST_EVENT_ONLY)) {
lastEvent = true;
} else {
entity.setEvents(null);
}
boolean relatedEntities = false;
if (fields.contains(Field.RELATED_ENTITIES)) {
relatedEntities = true;
} else {
entity.setRelatedEntities(null);
}
boolean primaryFilters = false;
if (fields.contains(Field.PRIMARY_FILTERS)) {
primaryFilters = true;
} else {
entity.setPrimaryFilters(null);
}
boolean otherInfo = false;
if (fields.contains(Field.OTHER_INFO)) {
otherInfo = true;
} else {
entity.setOtherInfo(null);
}
// iterate through the entity's entry, parsing information if it is part
// of a requested field
for (; iterator.hasNext(); iterator.next()) {
byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(prefix, prefixlen, key)) {
break;
}
if (key.length == prefixlen) {
continue;
}
if (key[prefixlen] == PRIMARY_FILTERS_COLUMN[0]) {
if (primaryFilters) {
addPrimaryFilter(entity, key, prefixlen
+ PRIMARY_FILTERS_COLUMN.length);
}
} else if (key[prefixlen] == OTHER_INFO_COLUMN[0]) {
if (otherInfo) {
Object o = null;
String keyStr = parseRemainingKey(key,
prefixlen + OTHER_INFO_COLUMN.length);
try {
o = fstConf.asObject(iterator.peekNext().getValue());
entity.addOtherInfo(keyStr, o);
} catch (Exception ignore) {
try {
// Fall back to 2.24 parser
o = fstConf224.asObject(iterator.peekNext().getValue());
entity.addOtherInfo(keyStr, o);
} catch (Exception e) {
LOG.warn("Error while decoding "
+ entityId + ":otherInfo:" + keyStr, e);
}
}
}
} else if (key[prefixlen] == RELATED_ENTITIES_COLUMN[0]) {
if (relatedEntities) {
addRelatedEntity(entity, key, prefixlen
+ RELATED_ENTITIES_COLUMN.length);
}
} else if (key[prefixlen] == EVENTS_COLUMN[0]) {
if (events || (lastEvent && entity.getEvents().size() == 0)) {
TimelineEvent event = getEntityEvent(null, key, prefixlen
+ EVENTS_COLUMN.length, iterator.peekNext().getValue());
if (event != null) {
entity.addEvent(event);
}
}
} else if (key[prefixlen] == DOMAIN_ID_COLUMN[0]) {
byte[] v = iterator.peekNext().getValue();
String domainId = new String(v, UTF_8);
entity.setDomainId(domainId);
} else {
LOG.warn(String.format("Found unexpected column for entity %s of "
+ "type %s (0x%02x)", entityId, entityType, key[prefixlen]));
}
}
entity.setEntityId(entityId);
entity.setEntityType(entityType);
entity.setStartTime(startTime);
return entity;
}
@Override
public TimelineEvents getEntityTimelines(String entityType,
SortedSet<String> entityIds, Long limit, Long windowStart,
Long windowEnd, Set<String> eventType) throws IOException {
TimelineEvents events = new TimelineEvents();
if (entityIds == null || entityIds.isEmpty()) {
return events;
}
// create a lexicographically-ordered map from start time to entities
Map<byte[], List<EntityIdentifier>> startTimeMap =
new TreeMap<byte[], List<EntityIdentifier>>(
new Comparator<byte[]>() {
@Override
public int compare(byte[] o1, byte[] o2) {
return WritableComparator.compareBytes(o1, 0, o1.length, o2, 0,
o2.length);
}
});
// look up start times for the specified entities
// skip entities with no start time
for (String entityId : entityIds) {
byte[] startTime = getStartTime(entityId, entityType);
if (startTime != null) {
List<EntityIdentifier> entities = startTimeMap.get(startTime);
if (entities == null) {
entities = new ArrayList<EntityIdentifier>();
startTimeMap.put(startTime, entities);
}
entities.add(new EntityIdentifier(entityId, entityType));
}
}
for (Entry<byte[], List<EntityIdentifier>> entry : startTimeMap
.entrySet()) {
// look up the events matching the given parameters (limit,
// start time, end time, event types) for entities whose start times
// were found and add the entities to the return list
byte[] revStartTime = entry.getKey();
for (EntityIdentifier entityIdentifier : entry.getValue()) {
EventsOfOneEntity entity = new EventsOfOneEntity();
entity.setEntityId(entityIdentifier.getId());
entity.setEntityType(entityType);
events.addEvent(entity);
KeyBuilder kb = KeyBuilder.newInstance().add(entityType)
.add(revStartTime).add(entityIdentifier.getId())
.add(EVENTS_COLUMN);
byte[] prefix = kb.getBytesForLookup();
if (windowEnd == null) {
windowEnd = Long.MAX_VALUE;
}
byte[] revts = writeReverseOrderedLong(windowEnd);
kb.add(revts);
byte[] first = kb.getBytesForLookup();
byte[] last = null;
if (windowStart != null) {
last = KeyBuilder.newInstance().add(prefix)
.add(writeReverseOrderedLong(windowStart)).getBytesForLookup();
}
if (limit == null) {
limit = DEFAULT_LIMIT;
}
DB db = entitydb.getDBForStartTime(readReverseOrderedLong(
revStartTime, 0));
if (db == null) {
continue;
}
try (DBIterator iterator = db.iterator()) {
for (iterator.seek(first); entity.getEvents().size() < limit
&& iterator.hasNext(); iterator.next()) {
byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(prefix, prefix.length, key)
|| (last != null && WritableComparator.compareBytes(key, 0,
key.length, last, 0, last.length) > 0)) {
break;
}
TimelineEvent event = getEntityEvent(eventType, key, prefix.length,
iterator.peekNext().getValue());
if (event != null) {
entity.addEvent(event);
}
}
}
}
}
return events;
}
@Override
public TimelineEntities getEntities(String entityType, Long limit,
Long windowStart, Long windowEnd, String fromId, Long fromTs,
NameValuePair primaryFilter, Collection<NameValuePair> secondaryFilters,
EnumSet<Field> fields, CheckAcl checkAcl) throws IOException {
if (primaryFilter == null) {
// if no primary filter is specified, prefix the lookup with
// ENTITY_ENTRY_PREFIX
return getEntityByTime(EMPTY_BYTES, entityType, limit, windowStart,
windowEnd, fromId, fromTs, secondaryFilters, fields, checkAcl, false);
} else {
// if a primary filter is specified, prefix the lookup with
// INDEXED_ENTRY_PREFIX + primaryFilterName + primaryFilterValue +
// ENTITY_ENTRY_PREFIX
byte[] base = KeyBuilder.newInstance().add(primaryFilter.getName())
.add(fstConf.asByteArray(primaryFilter.getValue()), true)
.getBytesForLookup();
return getEntityByTime(base, entityType, limit, windowStart, windowEnd,
fromId, fromTs, secondaryFilters, fields, checkAcl, true);
}
}
/**
* Retrieves a list of entities satisfying given parameters.
*
* @param base
* A byte array prefix for the lookup
* @param entityType
* The type of the entity
* @param limit
* A limit on the number of entities to return
* @param starttime
* The earliest entity start time to retrieve (exclusive)
* @param endtime
* The latest entity start time to retrieve (inclusive)
* @param fromId
* Retrieve entities starting with this entity
* @param fromTs
* Ignore entities with insert timestamp later than this ts
* @param secondaryFilters
* Filter pairs that the entities should match
* @param fields
* The set of fields to retrieve
* @param usingPrimaryFilter
* true if this query is using a primary filter
* @return A list of entities
* @throws IOException
*/
private TimelineEntities getEntityByTime(byte[] base, String entityType,
Long limit, Long starttime, Long endtime, String fromId, Long fromTs,
Collection<NameValuePair> secondaryFilters, EnumSet<Field> fields,
CheckAcl checkAcl, boolean usingPrimaryFilter) throws IOException {
KeyBuilder kb = KeyBuilder.newInstance().add(base).add(entityType);
// only db keys matching the prefix (base + entity type) will be parsed
byte[] prefix = kb.getBytesForLookup();
if (endtime == null) {
// if end time is null, place no restriction on end time
endtime = Long.MAX_VALUE;
}
// Sanitize the fields parameter
if (fields == null) {
fields = EnumSet.allOf(Field.class);
}
// construct a first key that will be seeked to using end time or fromId
long firstStartTime = Long.MAX_VALUE;
byte[] first = null;
if (fromId != null) {
Long fromIdStartTime = getStartTimeLong(fromId, entityType);
if (fromIdStartTime == null) {
// no start time for provided id, so return empty entities
return new TimelineEntities();
}
if (fromIdStartTime <= endtime) {
// if provided id's start time falls before the end of the window,
// use it to construct the seek key
firstStartTime = fromIdStartTime;
first = kb.add(writeReverseOrderedLong(fromIdStartTime)).add(fromId)
.getBytesForLookup();
}
}
// if seek key wasn't constructed using fromId, construct it using end ts
if (first == null) {
firstStartTime = endtime;
first = kb.add(writeReverseOrderedLong(endtime)).getBytesForLookup();
}
byte[] last = null;
if (starttime != null) {
// if start time is not null, set a last key that will not be
// iterated past
last = KeyBuilder.newInstance().add(base).add(entityType)
.add(writeReverseOrderedLong(starttime)).getBytesForLookup();
}
if (limit == null) {
// if limit is not specified, use the default
limit = DEFAULT_LIMIT;
}
TimelineEntities entities = new TimelineEntities();
RollingLevelDB rollingdb = null;
if (usingPrimaryFilter) {
rollingdb = indexdb;
} else {
rollingdb = entitydb;
}
DB db = rollingdb.getDBForStartTime(firstStartTime);
while (entities.getEntities().size() < limit && db != null) {
try (DBIterator iterator = db.iterator()) {
iterator.seek(first);
// iterate until one of the following conditions is met: limit is
// reached, there are no more keys, the key prefix no longer matches,
// or a start time has been specified and reached/exceeded
while (entities.getEntities().size() < limit && iterator.hasNext()) {
byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(prefix, prefix.length, key)
|| (last != null && WritableComparator.compareBytes(key, 0,
key.length, last, 0, last.length) > 0)) {
break;
}
// read the start time and entity id from the current key
KeyParser kp = new KeyParser(key, prefix.length);
Long startTime = kp.getNextLong();
String entityId = kp.getNextString();
if (fromTs != null) {
long insertTime = readReverseOrderedLong(iterator.peekNext()
.getValue(), 0);
if (insertTime > fromTs) {
byte[] firstKey = key;
while (iterator.hasNext()) {
key = iterator.peekNext().getKey();
iterator.next();
if (!prefixMatches(firstKey, kp.getOffset(), key)) {
break;
}
}
continue;
}
}
// Even if other info and primary filter fields are not included, we
// still need to load them to match secondary filters when they are
// non-empty
EnumSet<Field> queryFields = EnumSet.copyOf(fields);
boolean addPrimaryFilters = false;
boolean addOtherInfo = false;
if (secondaryFilters != null && secondaryFilters.size() > 0) {
if (!queryFields.contains(Field.PRIMARY_FILTERS)) {
queryFields.add(Field.PRIMARY_FILTERS);
addPrimaryFilters = true;
}
if (!queryFields.contains(Field.OTHER_INFO)) {
queryFields.add(Field.OTHER_INFO);
addOtherInfo = true;
}
}
// parse the entity that owns this key, iterating over all keys for
// the entity
TimelineEntity entity = null;
if (usingPrimaryFilter) {
entity = getEntity(entityId, entityType, queryFields);
iterator.next();
} else {
entity = getEntity(entityId, entityType, startTime, queryFields,
iterator, key, kp.getOffset());
}
if (entity != null) {
// determine if the retrieved entity matches the provided secondary
// filters, and if so add it to the list of entities to return
boolean filterPassed = true;
if (secondaryFilters != null) {
for (NameValuePair filter : secondaryFilters) {
Object v = entity.getOtherInfo().get(filter.getName());
if (v == null) {
Set<Object> vs = entity.getPrimaryFilters()
.get(filter.getName());
if (vs == null || !vs.contains(filter.getValue())) {
filterPassed = false;
break;
}
} else if (!v.equals(filter.getValue())) {
filterPassed = false;
break;
}
}
}
if (filterPassed) {
if (entity.getDomainId() == null) {
entity.setDomainId(DEFAULT_DOMAIN_ID);
}
if (checkAcl == null || checkAcl.check(entity)) {
// Remove primary filter and other info if they are added for
// matching secondary filters
if (addPrimaryFilters) {
entity.setPrimaryFilters(null);
}
if (addOtherInfo) {
entity.setOtherInfo(null);
}
entities.addEntity(entity);
}
}
}
}
db = rollingdb.getPreviousDB(db);
}
}
return entities;
}
/**
* Put a single entity. If there is an error, add a TimelinePutError to the
* given response.
*
* @param entityUpdates
* a map containing all the scheduled writes for this put to the
* entity db
* @param indexUpdates
* a map containing all the scheduled writes for this put to the
* index db
*/
private long putEntities(TreeMap<Long, RollingWriteBatch> entityUpdates,
TreeMap<Long, RollingWriteBatch> indexUpdates, TimelineEntity entity,
TimelinePutResponse response) {
long putCount = 0;
List<EntityIdentifier> relatedEntitiesWithoutStartTimes =
new ArrayList<EntityIdentifier>();
byte[] revStartTime = null;
Map<String, Set<Object>> primaryFilters = null;
try {
List<TimelineEvent> events = entity.getEvents();
// look up the start time for the entity
Long startTime = getAndSetStartTime(entity.getEntityId(),
entity.getEntityType(), entity.getStartTime(), events);
if (startTime == null) {
// if no start time is found, add an error and return
TimelinePutError error = new TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(TimelinePutError.NO_START_TIME);
response.addError(error);
return putCount;
}
// Must have a domain
if (StringUtils.isEmpty(entity.getDomainId())) {
TimelinePutError error = new TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(TimelinePutError.NO_DOMAIN);
response.addError(error);
return putCount;
}
revStartTime = writeReverseOrderedLong(startTime);
long roundedStartTime = entitydb.computeCurrentCheckMillis(startTime);
RollingWriteBatch rollingWriteBatch = entityUpdates.get(roundedStartTime);
if (rollingWriteBatch == null) {
DB db = entitydb.getDBForStartTime(startTime);
if (db != null) {
WriteBatch writeBatch = db.createWriteBatch();
rollingWriteBatch = new RollingWriteBatch(db, writeBatch);
entityUpdates.put(roundedStartTime, rollingWriteBatch);
}
}
if (rollingWriteBatch == null) {
// if no start time is found, add an error and return
TimelinePutError error = new TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(TimelinePutError.EXPIRED_ENTITY);
response.addError(error);
return putCount;
}
WriteBatch writeBatch = rollingWriteBatch.getWriteBatch();
// Save off the getBytes conversion to avoid unnecessary cost
byte[] entityIdBytes = entity.getEntityId().getBytes(UTF_8);
byte[] entityTypeBytes = entity.getEntityType().getBytes(UTF_8);
byte[] domainIdBytes = entity.getDomainId().getBytes(UTF_8);
// write entity marker
byte[] markerKey = KeyBuilder.newInstance(3).add(entityTypeBytes, true)
.add(revStartTime).add(entityIdBytes, true).getBytesForLookup();
writeBatch.put(markerKey, EMPTY_BYTES);
++putCount;
// write domain id entry
byte[] domainkey = KeyBuilder.newInstance(4).add(entityTypeBytes, true)
.add(revStartTime).add(entityIdBytes, true).add(DOMAIN_ID_COLUMN)
.getBytes();
writeBatch.put(domainkey, domainIdBytes);
++putCount;
// write event entries
if (events != null) {
for (TimelineEvent event : events) {
byte[] revts = writeReverseOrderedLong(event.getTimestamp());
byte[] key = KeyBuilder.newInstance().add(entityTypeBytes, true)
.add(revStartTime).add(entityIdBytes, true).add(EVENTS_COLUMN)
.add(revts).add(event.getEventType().getBytes(UTF_8)).getBytes();
byte[] value = fstConf.asByteArray(event.getEventInfo());
writeBatch.put(key, value);
++putCount;
}
}
// write primary filter entries
primaryFilters = entity.getPrimaryFilters();
if (primaryFilters != null) {
for (Entry<String, Set<Object>> primaryFilter : primaryFilters
.entrySet()) {
for (Object primaryFilterValue : primaryFilter.getValue()) {
byte[] key = KeyBuilder.newInstance(6).add(entityTypeBytes, true)
.add(revStartTime).add(entityIdBytes, true)
.add(PRIMARY_FILTERS_COLUMN).add(primaryFilter.getKey())
.add(fstConf.asByteArray(primaryFilterValue)).getBytes();
writeBatch.put(key, EMPTY_BYTES);
++putCount;
}
}
}
// write other info entries
Map<String, Object> otherInfo = entity.getOtherInfo();
if (otherInfo != null) {
for (Entry<String, Object> info : otherInfo.entrySet()) {
byte[] key = KeyBuilder.newInstance(5).add(entityTypeBytes, true)
.add(revStartTime).add(entityIdBytes, true)
.add(OTHER_INFO_COLUMN).add(info.getKey()).getBytes();
byte[] value = fstConf.asByteArray(info.getValue());
writeBatch.put(key, value);
++putCount;
}
}
// write related entity entries
Map<String, Set<String>> relatedEntities = entity.getRelatedEntities();
if (relatedEntities != null) {
for (Entry<String, Set<String>> relatedEntityList : relatedEntities
.entrySet()) {
String relatedEntityType = relatedEntityList.getKey();
for (String relatedEntityId : relatedEntityList.getValue()) {
// look up start time of related entity
Long relatedStartTimeLong = getStartTimeLong(relatedEntityId,
relatedEntityType);
// delay writing the related entity if no start time is found
if (relatedStartTimeLong == null) {
relatedEntitiesWithoutStartTimes.add(new EntityIdentifier(
relatedEntityId, relatedEntityType));
continue;
}
byte[] relatedEntityStartTime =
writeReverseOrderedLong(relatedStartTimeLong);
long relatedRoundedStartTime = entitydb
.computeCurrentCheckMillis(relatedStartTimeLong);
RollingWriteBatch relatedRollingWriteBatch = entityUpdates
.get(relatedRoundedStartTime);
if (relatedRollingWriteBatch == null) {
DB db = entitydb.getDBForStartTime(relatedStartTimeLong);
if (db != null) {
WriteBatch relatedWriteBatch = db.createWriteBatch();
relatedRollingWriteBatch = new RollingWriteBatch(db,
relatedWriteBatch);
entityUpdates.put(relatedRoundedStartTime,
relatedRollingWriteBatch);
}
}
if (relatedRollingWriteBatch == null) {
// if no start time is found, add an error and return
TimelinePutError error = new TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(TimelinePutError.EXPIRED_ENTITY);
response.addError(error);
continue;
}
// This is the existing entity
byte[] relatedDomainIdBytes = relatedRollingWriteBatch.getDB().get(
createDomainIdKey(relatedEntityId, relatedEntityType,
relatedEntityStartTime));
// The timeline data created by the server before 2.6 won't have
// the domain field. We assume this timeline data is in the
// default timeline domain.
String domainId = null;
if (relatedDomainIdBytes == null) {
domainId = TimelineDataManager.DEFAULT_DOMAIN_ID;
} else {
domainId = new String(relatedDomainIdBytes, UTF_8);
}
if (!domainId.equals(entity.getDomainId())) {
// in this case the entity will be put, but the relation will be
// ignored
TimelinePutError error = new TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(TimelinePutError.FORBIDDEN_RELATION);
response.addError(error);
continue;
}
// write "forward" entry (related entity -> entity)
byte[] key = createRelatedEntityKey(relatedEntityId,
relatedEntityType, relatedEntityStartTime,
entity.getEntityId(), entity.getEntityType());
WriteBatch relatedWriteBatch = relatedRollingWriteBatch
.getWriteBatch();
relatedWriteBatch.put(key, EMPTY_BYTES);
++putCount;
}
}
}
// write index entities
RollingWriteBatch indexRollingWriteBatch = indexUpdates
.get(roundedStartTime);
if (indexRollingWriteBatch == null) {
DB db = indexdb.getDBForStartTime(startTime);
if (db != null) {
WriteBatch indexWriteBatch = db.createWriteBatch();
indexRollingWriteBatch = new RollingWriteBatch(db, indexWriteBatch);
indexUpdates.put(roundedStartTime, indexRollingWriteBatch);
}
}
if (indexRollingWriteBatch == null) {
// if no start time is found, add an error and return
TimelinePutError error = new TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(TimelinePutError.EXPIRED_ENTITY);
response.addError(error);
return putCount;
}
WriteBatch indexWriteBatch = indexRollingWriteBatch.getWriteBatch();
putCount += writePrimaryFilterEntries(indexWriteBatch, primaryFilters,
markerKey, EMPTY_BYTES);
} catch (IOException e) {
LOG.error("Error putting entity " + entity.getEntityId() + " of type "
+ entity.getEntityType(), e);
TimelinePutError error = new TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(TimelinePutError.IO_EXCEPTION);
response.addError(error);
}
for (EntityIdentifier relatedEntity : relatedEntitiesWithoutStartTimes) {
try {
Long relatedEntityStartAndInsertTime = getAndSetStartTime(
relatedEntity.getId(), relatedEntity.getType(),
readReverseOrderedLong(revStartTime, 0), null);
if (relatedEntityStartAndInsertTime == null) {
throw new IOException("Error setting start time for related entity");
}
long relatedStartTimeLong = relatedEntityStartAndInsertTime;
long relatedRoundedStartTime = entitydb
.computeCurrentCheckMillis(relatedStartTimeLong);
RollingWriteBatch relatedRollingWriteBatch = entityUpdates
.get(relatedRoundedStartTime);
if (relatedRollingWriteBatch == null) {
DB db = entitydb.getDBForStartTime(relatedStartTimeLong);
if (db != null) {
WriteBatch relatedWriteBatch = db.createWriteBatch();
relatedRollingWriteBatch = new RollingWriteBatch(db,
relatedWriteBatch);
entityUpdates
.put(relatedRoundedStartTime, relatedRollingWriteBatch);
}
}
if (relatedRollingWriteBatch == null) {
// if no start time is found, add an error and return
TimelinePutError error = new TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(TimelinePutError.EXPIRED_ENTITY);
response.addError(error);
continue;
}
WriteBatch relatedWriteBatch = relatedRollingWriteBatch.getWriteBatch();
byte[] relatedEntityStartTime =
writeReverseOrderedLong(relatedEntityStartAndInsertTime);
// This is the new entity, the domain should be the same
byte[] key = createDomainIdKey(relatedEntity.getId(),
relatedEntity.getType(), relatedEntityStartTime);
relatedWriteBatch.put(key, entity.getDomainId().getBytes(UTF_8));
++putCount;
relatedWriteBatch.put(
createRelatedEntityKey(relatedEntity.getId(),
relatedEntity.getType(), relatedEntityStartTime,
entity.getEntityId(), entity.getEntityType()), EMPTY_BYTES);
++putCount;
relatedWriteBatch.put(
createEntityMarkerKey(relatedEntity.getId(),
relatedEntity.getType(), relatedEntityStartTime), EMPTY_BYTES);
++putCount;
} catch (IOException e) {
LOG.error(
"Error putting related entity " + relatedEntity.getId()
+ " of type " + relatedEntity.getType() + " for entity "
+ entity.getEntityId() + " of type " + entity.getEntityType(),
e);
TimelinePutError error = new TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(TimelinePutError.IO_EXCEPTION);
response.addError(error);
}
}
return putCount;
}
/**
* For a given key / value pair that has been written to the db, write
* additional entries to the db for each primary filter.
*/
private static long writePrimaryFilterEntries(WriteBatch writeBatch,
Map<String, Set<Object>> primaryFilters, byte[] key, byte[] value)
throws IOException {
long putCount = 0;
if (primaryFilters != null) {
for (Entry<String, Set<Object>> pf : primaryFilters.entrySet()) {
for (Object pfval : pf.getValue()) {
writeBatch.put(addPrimaryFilterToKey(pf.getKey(), pfval, key), value);
++putCount;
}
}
}
return putCount;
}
@Override
public TimelinePutResponse put(TimelineEntities entities) {
LOG.debug("Starting put");
TimelinePutResponse response = new TimelinePutResponse();
TreeMap<Long, RollingWriteBatch> entityUpdates =
new TreeMap<Long, RollingWriteBatch>();
TreeMap<Long, RollingWriteBatch> indexUpdates =
new TreeMap<Long, RollingWriteBatch>();
long entityCount = 0;
long indexCount = 0;
try {
for (TimelineEntity entity : entities.getEntities()) {
entityCount += putEntities(entityUpdates, indexUpdates, entity,
response);
}
for (RollingWriteBatch entityUpdate : entityUpdates.values()) {
entityUpdate.write();
}
for (RollingWriteBatch indexUpdate : indexUpdates.values()) {
indexUpdate.write();
}
} finally {
for (RollingWriteBatch entityRollingWriteBatch : entityUpdates.values()) {
entityRollingWriteBatch.close();
}
for (RollingWriteBatch indexRollingWriteBatch : indexUpdates.values()) {
indexRollingWriteBatch.close();
}
}
LOG.debug("Put {} new leveldb entity entries and {} new leveldb index"
+ " entries from {} timeline entities", entityCount, indexCount,
entities.getEntities().size());
return response;
}
/**
* Get the unique start time for a given entity as a byte array that sorts the
* timestamps in reverse order (see
* {@link GenericObjectMapper#writeReverseOrderedLong(long)}).
*
* @param entityId
* The id of the entity
* @param entityType
* The type of the entity
* @return A byte array, null if not found
* @throws IOException
*/
private byte[] getStartTime(String entityId, String entityType)
throws IOException {
Long l = getStartTimeLong(entityId, entityType);
return l == null ? null : writeReverseOrderedLong(l);
}
/**
* Get the unique start time for a given entity as a Long.
*
* @param entityId
* The id of the entity
* @param entityType
* The type of the entity
* @return A Long, null if not found
* @throws IOException
*/
private Long getStartTimeLong(String entityId, String entityType)
throws IOException {
EntityIdentifier entity = new EntityIdentifier(entityId, entityType);
// start time is not provided, so try to look it up
if (startTimeReadCache.containsKey(entity)) {
// found the start time in the cache
return startTimeReadCache.get(entity);
} else {
// try to look up the start time in the db
byte[] b = createStartTimeLookupKey(entity.getId(), entity.getType());
byte[] v = starttimedb.get(b);
if (v == null) {
// did not find the start time in the db
return null;
} else {
// found the start time in the db
Long l = readReverseOrderedLong(v, 0);
startTimeReadCache.put(entity, l);
return l;
}
}
}
/**
* Get the unique start time for a given entity as a byte array that sorts the
* timestamps in reverse order (see
* {@link GenericObjectMapper#writeReverseOrderedLong(long)}). If the start
* time doesn't exist, set it based on the information provided.
*
* @param entityId
* The id of the entity
* @param entityType
* The type of the entity
* @param startTime
* The start time of the entity, or null
* @param events
* A list of events for the entity, or null
* @return A StartAndInsertTime
* @throws IOException
*/
private Long getAndSetStartTime(String entityId,
String entityType, Long startTime, List<TimelineEvent> events)
throws IOException {
EntityIdentifier entity = new EntityIdentifier(entityId, entityType);
Long time = startTimeWriteCache.get(entity);
if (time != null) {
// return the value in the cache
return time;
}
if (startTime == null && events != null) {
// calculate best guess start time based on lowest event time
startTime = Long.MAX_VALUE;
for (TimelineEvent e : events) {
if (e.getTimestamp() < startTime) {
startTime = e.getTimestamp();
}
}
}
// check the provided start time matches the db
return checkStartTimeInDb(entity, startTime);
}
/**
* Checks db for start time and returns it if it exists. If it doesn't exist,
* writes the suggested start time (if it is not null). This is only called
* when the start time is not found in the cache, so it adds it back into the
* cache if it is found.
*/
private Long checkStartTimeInDb(EntityIdentifier entity,
Long suggestedStartTime) throws IOException {
Long startAndInsertTime = null;
// create lookup key for start time
byte[] b = createStartTimeLookupKey(entity.getId(), entity.getType());
synchronized (this) {
// retrieve value for key
byte[] v = starttimedb.get(b);
if (v == null) {
// start time doesn't exist in db
if (suggestedStartTime == null) {
return null;
}
startAndInsertTime = suggestedStartTime;
// write suggested start time
starttimedb.put(b, writeReverseOrderedLong(suggestedStartTime));
} else {
// found start time in db, so ignore suggested start time
startAndInsertTime = readReverseOrderedLong(v, 0);
}
}
startTimeWriteCache.put(entity, startAndInsertTime);
startTimeReadCache.put(entity, startAndInsertTime);
return startAndInsertTime;
}
/**
* Creates a key for looking up the start time of a given entity, of the form
* START_TIME_LOOKUP_PREFIX + entity type + entity id.
*/
private static byte[] createStartTimeLookupKey(String entityId,
String entityType) throws IOException {
return KeyBuilder.newInstance().add(entityType).add(entityId).getBytes();
}
/**
* Creates an entity marker, serializing ENTITY_ENTRY_PREFIX + entity type +
* revstarttime + entity id.
*/
private static byte[] createEntityMarkerKey(String entityId,
String entityType, byte[] revStartTime) throws IOException {
return KeyBuilder.newInstance().add(entityType).add(revStartTime)
.add(entityId).getBytesForLookup();
}
/**
* Creates an index entry for the given key of the form INDEXED_ENTRY_PREFIX +
* primaryfiltername + primaryfiltervalue + key.
*/
private static byte[] addPrimaryFilterToKey(String primaryFilterName,
Object primaryFilterValue, byte[] key) throws IOException {
return KeyBuilder.newInstance().add(primaryFilterName)
.add(fstConf.asByteArray(primaryFilterValue), true).add(key).getBytes();
}
/**
* Creates an event object from the given key, offset, and value. If the event
* type is not contained in the specified set of event types, returns null.
*/
private static TimelineEvent getEntityEvent(Set<String> eventTypes,
byte[] key, int offset, byte[] value) throws IOException {
KeyParser kp = new KeyParser(key, offset);
long ts = kp.getNextLong();
String tstype = kp.getNextString();
if (eventTypes == null || eventTypes.contains(tstype)) {
TimelineEvent event = new TimelineEvent();
event.setTimestamp(ts);
event.setEventType(tstype);
Object o = null;
try {
o = fstConf.asObject(value);
} catch (Exception ignore) {
try {
// Fall back to 2.24 parser
o = fstConf224.asObject(value);
} catch (Exception e) {
LOG.warn("Error while decoding " + tstype, e);
}
}
if (o == null) {
event.setEventInfo(null);
} else if (o instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, Object> m = (Map<String, Object>) o;
event.setEventInfo(m);
} else {
throw new IOException("Couldn't deserialize event info map");
}
return event;
}
return null;
}
/**
* Parses the primary filter from the given key at the given offset and adds
* it to the given entity.
*/
private static void addPrimaryFilter(TimelineEntity entity, byte[] key,
int offset) throws IOException {
KeyParser kp = new KeyParser(key, offset);
String name = kp.getNextString();
byte[] bytes = kp.getRemainingBytes();
Object value = null;
try {
value = fstConf.asObject(bytes);
entity.addPrimaryFilter(name, value);
} catch (Exception ignore) {
try {
// Fall back to 2.24 parser
value = fstConf224.asObject(bytes);
entity.addPrimaryFilter(name, value);
} catch (Exception e) {
LOG.warn("Error while decoding " + name, e);
}
}
}
/**
* Creates a string representation of the byte array from the given offset to
* the end of the array (for parsing other info keys).
*/
private static String parseRemainingKey(byte[] b, int offset) {
return new String(b, offset, b.length - offset, UTF_8);
}
/**
* Creates a related entity key, serializing ENTITY_ENTRY_PREFIX + entity type
* + revstarttime + entity id + RELATED_ENTITIES_COLUMN + relatedentity type +
* relatedentity id.
*/
private static byte[] createRelatedEntityKey(String entityId,
String entityType, byte[] revStartTime, String relatedEntityId,
String relatedEntityType) throws IOException {
return KeyBuilder.newInstance().add(entityType).add(revStartTime)
.add(entityId).add(RELATED_ENTITIES_COLUMN).add(relatedEntityType)
.add(relatedEntityId).getBytes();
}
/**
* Parses the related entity from the given key at the given offset and adds
* it to the given entity.
*/
private static void addRelatedEntity(TimelineEntity entity, byte[] key,
int offset) throws IOException {
KeyParser kp = new KeyParser(key, offset);
String type = kp.getNextString();
String id = kp.getNextString();
entity.addRelatedEntity(type, id);
}
/**
* Creates a domain id key, serializing ENTITY_ENTRY_PREFIX + entity type +
* revstarttime + entity id + DOMAIN_ID_COLUMN.
*/
private static byte[] createDomainIdKey(String entityId, String entityType,
byte[] revStartTime) throws IOException {
return KeyBuilder.newInstance().add(entityType).add(revStartTime)
.add(entityId).add(DOMAIN_ID_COLUMN).getBytes();
}
/**
* Clears the cache to test reloading start times from leveldb (only for
* testing).
*/
@VisibleForTesting
void clearStartTimeCache() {
startTimeWriteCache.clear();
startTimeReadCache.clear();
}
@VisibleForTesting
static int getStartTimeReadCacheSize(Configuration conf) {
return conf
.getInt(
TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE,
DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE);
}
@VisibleForTesting
static int getStartTimeWriteCacheSize(Configuration conf) {
return conf
.getInt(
TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE,
DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE);
}
@VisibleForTesting
long evictOldStartTimes(long minStartTime) throws IOException {
LOG.info("Searching for start times to evict earlier than " + minStartTime);
long batchSize = 0;
long totalCount = 0;
long startTimesCount = 0;
WriteBatch writeBatch = null;
ReadOptions readOptions = new ReadOptions();
readOptions.fillCache(false);
try (DBIterator iterator = starttimedb.iterator(readOptions)) {
// seek to the first start time entry
iterator.seekToFirst();
writeBatch = starttimedb.createWriteBatch();
// evaluate each start time entry to see if it needs to be evicted or not
while (iterator.hasNext()) {
Map.Entry<byte[], byte[]> current = iterator.next();
byte[] entityKey = current.getKey();
byte[] entityValue = current.getValue();
long startTime = readReverseOrderedLong(entityValue, 0);
if (startTime < minStartTime) {
++batchSize;
++startTimesCount;
writeBatch.delete(entityKey);
// a large delete will hold the lock for too long
if (batchSize >= writeBatchSize) {
LOG.debug("Preparing to delete a batch of {} old start times",
batchSize);
starttimedb.write(writeBatch);
LOG.debug("Deleted batch of {}. Total start times deleted"
+ " so far this cycle: {}", batchSize, startTimesCount);
IOUtils.cleanupWithLogger(LOG, writeBatch);
writeBatch = starttimedb.createWriteBatch();
batchSize = 0;
}
}
++totalCount;
}
LOG.debug("Preparing to delete a batch of {} old start times",
batchSize);
starttimedb.write(writeBatch);
LOG.debug("Deleted batch of {}. Total start times deleted so far"
+ " this cycle: {}", batchSize, startTimesCount);
LOG.info("Deleted " + startTimesCount + "/" + totalCount
+ " start time entities earlier than " + minStartTime);
} finally {
IOUtils.cleanupWithLogger(LOG, writeBatch);
}
return startTimesCount;
}
/**
* Discards entities with start timestamp less than or equal to the given
* timestamp.
*/
@VisibleForTesting
void discardOldEntities(long timestamp) throws IOException,
InterruptedException {
long totalCount = 0;
long t1 = System.currentTimeMillis();
try {
totalCount += evictOldStartTimes(timestamp);
indexdb.evictOldDBs();
entitydb.evictOldDBs();
} finally {
long t2 = System.currentTimeMillis();
LOG.info("Discarded " + totalCount + " entities for timestamp "
+ timestamp + " and earlier in " + (t2 - t1) / 1000.0 + " seconds");
}
}
Version loadVersion() throws IOException {
byte[] data = starttimedb.get(bytes(TIMELINE_STORE_VERSION_KEY));
// if version is not stored previously, treat it as 1.0.
if (data == null || data.length == 0) {
return Version.newInstance(1, 0);
}
Version version = new VersionPBImpl(VersionProto.parseFrom(data));
return version;
}
// Only used for test
@VisibleForTesting
void storeVersion(Version state) throws IOException {
dbStoreVersion(state);
}
private void dbStoreVersion(Version state) throws IOException {
String key = TIMELINE_STORE_VERSION_KEY;
byte[] data = ((VersionPBImpl) state).getProto().toByteArray();
try {
starttimedb.put(bytes(key), data);
} catch (DBException e) {
throw new IOException(e);
}
}
Version getCurrentVersion() {
return CURRENT_VERSION_INFO;
}
/**
* 1) Versioning timeline store: major.minor. For e.g. 1.0, 1.1, 1.2...1.25,
* 2.0 etc. 2) Any incompatible change of TS-store is a major upgrade, and any
* compatible change of TS-store is a minor upgrade. 3) Within a minor
* upgrade, say 1.1 to 1.2: overwrite the version info and proceed as normal.
* 4) Within a major upgrade, say 1.2 to 2.0: throw exception and indicate
* user to use a separate upgrade tool to upgrade timeline store or remove
* incompatible old state.
*/
private void checkVersion() throws IOException {
Version loadedVersion = loadVersion();
LOG.info("Loaded timeline store version info " + loadedVersion);
if (loadedVersion.equals(getCurrentVersion())) {
return;
}
if (loadedVersion.isCompatibleTo(getCurrentVersion())) {
LOG.info("Storing timeline store version info " + getCurrentVersion());
dbStoreVersion(CURRENT_VERSION_INFO);
} else {
String incompatibleMessage = "Incompatible version for timeline store: "
+ "expecting version " + getCurrentVersion()
+ ", but loading version " + loadedVersion;
LOG.error(incompatibleMessage);
throw new IOException(incompatibleMessage);
}
}
// TODO: make data retention work with the domain data as well
@Override
public void put(TimelineDomain domain) throws IOException {
try (WriteBatch domainWriteBatch = domaindb.createWriteBatch();
WriteBatch ownerWriteBatch = ownerdb.createWriteBatch();) {
if (domain.getId() == null || domain.getId().length() == 0) {
throw new IllegalArgumentException("Domain doesn't have an ID");
}
if (domain.getOwner() == null || domain.getOwner().length() == 0) {
throw new IllegalArgumentException("Domain doesn't have an owner.");
}
// Write description
byte[] domainEntryKey = createDomainEntryKey(domain.getId(),
DESCRIPTION_COLUMN);
byte[] ownerLookupEntryKey = createOwnerLookupKey(domain.getOwner(),
domain.getId(), DESCRIPTION_COLUMN);
if (domain.getDescription() != null) {
domainWriteBatch.put(domainEntryKey,
domain.getDescription().getBytes(UTF_8));
ownerWriteBatch.put(ownerLookupEntryKey, domain.getDescription()
.getBytes(UTF_8));
} else {
domainWriteBatch.put(domainEntryKey, EMPTY_BYTES);
ownerWriteBatch.put(ownerLookupEntryKey, EMPTY_BYTES);
}
// Write owner
domainEntryKey = createDomainEntryKey(domain.getId(), OWNER_COLUMN);
ownerLookupEntryKey = createOwnerLookupKey(domain.getOwner(),
domain.getId(), OWNER_COLUMN);
// Null check for owner is done before
domainWriteBatch.put(domainEntryKey, domain.getOwner().getBytes(UTF_8));
ownerWriteBatch.put(ownerLookupEntryKey, domain.getOwner()
.getBytes(UTF_8));
// Write readers
domainEntryKey = createDomainEntryKey(domain.getId(), READER_COLUMN);
ownerLookupEntryKey = createOwnerLookupKey(domain.getOwner(),
domain.getId(), READER_COLUMN);
if (domain.getReaders() != null && domain.getReaders().length() > 0) {
domainWriteBatch.put(domainEntryKey, domain.getReaders()
.getBytes(UTF_8));
ownerWriteBatch.put(ownerLookupEntryKey,
domain.getReaders().getBytes(UTF_8));
} else {
domainWriteBatch.put(domainEntryKey, EMPTY_BYTES);
ownerWriteBatch.put(ownerLookupEntryKey, EMPTY_BYTES);
}
// Write writers
domainEntryKey = createDomainEntryKey(domain.getId(), WRITER_COLUMN);
ownerLookupEntryKey = createOwnerLookupKey(domain.getOwner(),
domain.getId(), WRITER_COLUMN);
if (domain.getWriters() != null && domain.getWriters().length() > 0) {
domainWriteBatch.put(domainEntryKey, domain.getWriters()
.getBytes(UTF_8));
ownerWriteBatch.put(ownerLookupEntryKey,
domain.getWriters().getBytes(UTF_8));
} else {
domainWriteBatch.put(domainEntryKey, EMPTY_BYTES);
ownerWriteBatch.put(ownerLookupEntryKey, EMPTY_BYTES);
}
// Write creation time and modification time
// We put both timestamps together because they are always retrieved
// together, and store them in the same way as we did for the entity's
// start time and insert time.
domainEntryKey = createDomainEntryKey(domain.getId(), TIMESTAMP_COLUMN);
ownerLookupEntryKey = createOwnerLookupKey(domain.getOwner(),
domain.getId(), TIMESTAMP_COLUMN);
long currentTimestamp = System.currentTimeMillis();
byte[] timestamps = domaindb.get(domainEntryKey);
if (timestamps == null) {
timestamps = new byte[16];
writeReverseOrderedLong(currentTimestamp, timestamps, 0);
writeReverseOrderedLong(currentTimestamp, timestamps, 8);
} else {
writeReverseOrderedLong(currentTimestamp, timestamps, 8);
}
domainWriteBatch.put(domainEntryKey, timestamps);
ownerWriteBatch.put(ownerLookupEntryKey, timestamps);
domaindb.write(domainWriteBatch);
ownerdb.write(ownerWriteBatch);
}
}
/**
* Creates a domain entity key with column name suffix, of the form
* DOMAIN_ENTRY_PREFIX + domain id + column name.
*/
private static byte[] createDomainEntryKey(String domainId, byte[] columnName)
throws IOException {
return KeyBuilder.newInstance().add(domainId).add(columnName).getBytes();
}
/**
* Creates an owner lookup key with column name suffix, of the form
* OWNER_LOOKUP_PREFIX + owner + domain id + column name.
*/
private static byte[] createOwnerLookupKey(String owner, String domainId,
byte[] columnName) throws IOException {
return KeyBuilder.newInstance().add(owner).add(domainId).add(columnName)
.getBytes();
}
@Override
public TimelineDomain getDomain(String domainId) throws IOException {
try (DBIterator iterator = domaindb.iterator()) {
byte[] prefix = KeyBuilder.newInstance().add(domainId)
.getBytesForLookup();
iterator.seek(prefix);
return getTimelineDomain(iterator, domainId, prefix);
}
}
@Override
public TimelineDomains getDomains(String owner) throws IOException {
try (DBIterator iterator = ownerdb.iterator()) {
byte[] prefix = KeyBuilder.newInstance().add(owner).getBytesForLookup();
iterator.seek(prefix);
List<TimelineDomain> domains = new ArrayList<TimelineDomain>();
while (iterator.hasNext()) {
byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(prefix, prefix.length, key)) {
break;
}
// Iterator to parse the rows of an individual domain
KeyParser kp = new KeyParser(key, prefix.length);
String domainId = kp.getNextString();
byte[] prefixExt = KeyBuilder.newInstance().add(owner).add(domainId)
.getBytesForLookup();
TimelineDomain domainToReturn = getTimelineDomain(iterator, domainId,
prefixExt);
if (domainToReturn != null) {
domains.add(domainToReturn);
}
}
// Sort the domains to return
Collections.sort(domains, new Comparator<TimelineDomain>() {
@Override
public int compare(TimelineDomain domain1, TimelineDomain domain2) {
int result = domain2.getCreatedTime().compareTo(
domain1.getCreatedTime());
if (result == 0) {
return domain2.getModifiedTime().compareTo(
domain1.getModifiedTime());
} else {
return result;
}
}
});
TimelineDomains domainsToReturn = new TimelineDomains();
domainsToReturn.addDomains(domains);
return domainsToReturn;
}
}
private static TimelineDomain getTimelineDomain(DBIterator iterator,
String domainId, byte[] prefix) throws IOException {
// Iterate over all the rows whose key starts with prefix to retrieve the
// domain information.
TimelineDomain domain = new TimelineDomain();
domain.setId(domainId);
boolean noRows = true;
for (; iterator.hasNext(); iterator.next()) {
byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(prefix, prefix.length, key)) {
break;
}
if (noRows) {
noRows = false;
}
byte[] value = iterator.peekNext().getValue();
if (value != null && value.length > 0) {
if (key[prefix.length] == DESCRIPTION_COLUMN[0]) {
domain.setDescription(new String(value, UTF_8));
} else if (key[prefix.length] == OWNER_COLUMN[0]) {
domain.setOwner(new String(value, UTF_8));
} else if (key[prefix.length] == READER_COLUMN[0]) {
domain.setReaders(new String(value, UTF_8));
} else if (key[prefix.length] == WRITER_COLUMN[0]) {
domain.setWriters(new String(value, UTF_8));
} else if (key[prefix.length] == TIMESTAMP_COLUMN[0]) {
domain.setCreatedTime(readReverseOrderedLong(value, 0));
domain.setModifiedTime(readReverseOrderedLong(value, 8));
} else {
LOG.error("Unrecognized domain column: " + key[prefix.length]);
}
}
}
if (noRows) {
return null;
} else {
return domain;
}
}
} | EntityDeletionThread |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetModelSnapshotsAction.java | {
"start": 1138,
"end": 4020
} | class ____ extends HandledTransportAction<
GetModelSnapshotsAction.Request,
GetModelSnapshotsAction.Response> {
private static final Logger logger = LogManager.getLogger(TransportGetModelSnapshotsAction.class);
private final JobResultsProvider jobResultsProvider;
private final JobManager jobManager;
private final ClusterService clusterService;
@Inject
public TransportGetModelSnapshotsAction(
TransportService transportService,
ActionFilters actionFilters,
JobResultsProvider jobResultsProvider,
JobManager jobManager,
ClusterService clusterService
) {
super(
GetModelSnapshotsAction.NAME,
transportService,
actionFilters,
GetModelSnapshotsAction.Request::new,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
this.jobResultsProvider = jobResultsProvider;
this.jobManager = jobManager;
this.clusterService = clusterService;
}
@Override
protected void doExecute(
Task task,
GetModelSnapshotsAction.Request request,
ActionListener<GetModelSnapshotsAction.Response> listener
) {
TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId());
logger.debug(
() -> format(
"Get model snapshots for job %s snapshot ID %s. from = %s, size = %s start = '%s', end='%s', sort=%s descending=%s",
request.getJobId(),
request.getSnapshotId(),
request.getPageParams().getFrom(),
request.getPageParams().getSize(),
request.getStart(),
request.getEnd(),
request.getSort(),
request.getDescOrder()
)
);
if (Strings.isAllOrWildcard(request.getJobId())) {
getModelSnapshots(request, parentTaskId, listener);
return;
}
jobManager.jobExists(
request.getJobId(),
parentTaskId,
listener.delegateFailureAndWrap((l, ok) -> getModelSnapshots(request, parentTaskId, l))
);
}
private void getModelSnapshots(
GetModelSnapshotsAction.Request request,
TaskId parentTaskId,
ActionListener<GetModelSnapshotsAction.Response> listener
) {
jobResultsProvider.modelSnapshots(
request.getJobId(),
request.getPageParams().getFrom(),
request.getPageParams().getSize(),
request.getStart(),
request.getEnd(),
request.getSort(),
request.getDescOrder(),
request.getSnapshotId(),
parentTaskId,
page -> listener.onResponse(new GetModelSnapshotsAction.Response(page)),
listener::onFailure
);
}
}
| TransportGetModelSnapshotsAction |
java | spring-projects__spring-boot | module/spring-boot-data-redis/src/test/java/org/springframework/boot/data/redis/autoconfigure/DataRedisAutoConfigurationTests.java | {
"start": 36029,
"end": 36380
} | class ____ {
@Bean
LettuceClientConfigurationBuilderCustomizer customizer() {
return LettuceClientConfigurationBuilder::useSsl;
}
@Bean
LettuceClientOptionsBuilderCustomizer clientOptionsBuilderCustomizer() {
return (builder) -> builder.autoReconnect(false);
}
}
@Configuration(proxyBeanMethods = false)
static | CustomConfiguration |
java | quarkusio__quarkus | extensions/hibernate-orm/runtime/src/main/java/io/quarkus/hibernate/orm/runtime/customized/BuiltinFormatMapperBehaviour.java | {
"start": 498,
"end": 5929
} | enum ____ {
/**
* The Quarkus preconfigured mappers are ignored and if there is no user provided one,
* Hibernate ORM will create a mapper according to its own rules.
*/
IGNORE {
@Override
protected void action(String puName, String type, List<String> causes) {
}
},
/**
* Uses a Quarkus preconfigured format mappers. If a format mapper operation is invoked a
* warning is logged.
*/
WARN {
@Override
protected void action(String puName, String type, List<String> causes) {
LOGGER.warn(message(puName, type, causes));
}
},
/**
* Currently the default one. If there is no user provided format mapper, a Quarkus preconfigured one will fail at runtime.
*/
FAIL {
@Override
protected void action(String puName, String type, List<String> causes) {
throw new IllegalStateException(message(puName, type, causes));
}
};
private static final Logger LOGGER = Logger.getLogger(BuiltinFormatMapperBehaviour.class);
private static final String TYPE_JSON = "JSON";
private static final String TYPE_XML = "XML";
private static String message(String puName, String type, List<String> causes) {
return String.format(Locale.ROOT,
"Persistence unit [%1$s] uses Quarkus' main formatting facilities for %2$s columns in the database. "
+ "\nAs these facilities are primarily meant for REST endpoints, and they might have been customized for such use, "
+ "this may lead to undesired behavior, up to and including data loss. "
+ "\nTo address this:"
+ "\n\t- If the application does not customize the %2$s serialization/deserialization, set \"quarkus.hibernate-orm.mapping.format.global=ignore\". This will be the default in future versions of Quarkus. "
+ "\n\t- Otherwise, define a custom `FormatMapper` bean annotated with "
+ (TYPE_JSON.equals(type) ? "@JsonFormat" : "@XmlFormat")
+ " and @PersistenceUnitExtension"
+ (PersistenceUnitUtil.isDefaultPersistenceUnit(puName) ? "" : "(\"%1$s\")")
+ " to address your database serialization/deserialization needs."
+ "\nThe precise causes for this failure are: \n\t- "
+ String.join("\n\t- ", causes)
+ "\nSee the migration guide for more details and how to proceed.",
puName, type);
}
public static boolean hasJsonProperties(MetadataImplementor metadata) {
AtomicBoolean hasJsonProperties = new AtomicBoolean(false);
metadata.getTypeConfiguration().getJavaTypeRegistry().forEachDescriptor(javaType -> {
if (javaType instanceof JsonJavaType<?>) {
hasJsonProperties.set(true);
}
});
if (hasJsonProperties.get()) {
return true;
} else {
// for JSON_ARRAY we need to check the jdbc type registry instead
return metadata.getTypeConfiguration().getJdbcTypeRegistry().hasRegisteredDescriptor(SqlTypes.JSON_ARRAY);
}
}
public static boolean hasXmlProperties(MetadataImplementor metadata) {
AtomicBoolean hasXmlProperties = new AtomicBoolean(false);
metadata.getTypeConfiguration().getJavaTypeRegistry().forEachDescriptor(javaType -> {
if (javaType instanceof XmlJavaType<?>) {
hasXmlProperties.set(true);
}
});
if (hasXmlProperties.get()) {
return true;
} else {
// for XML_ARRAY we need to check the jdbc type registry instead
return metadata.getTypeConfiguration().getJdbcTypeRegistry().hasRegisteredDescriptor(SqlTypes.XML_ARRAY);
}
}
public void jsonApply(MetadataImplementor metadata, String puName, ArcContainer container,
JsonFormatterCustomizationCheck check) {
if (hasJsonProperties(metadata)) {
List<String> causes = check.apply(container);
if (!causes.isEmpty()) {
action(puName, TYPE_JSON, causes);
}
}
}
public void xmlApply(MetadataImplementor metadata, String puName) {
// XML mapper can only be a JAXB based one. With Hibernate ORM 7.0 there was a change in the mapper:
// org.hibernate.type.format.jaxb.JaxbXmlFormatMapper -- where a new format was introduced
// and legacy one would be currently used for Quarkus. If we just bypass the built-in one, we will break the user data.
// There is:
// XML_FORMAT_MAPPER_LEGACY_FORMAT = "hibernate.type.xml_format_mapper.legacy_format"
// for "migration" purposes.
//
// Let's fail and tell the user to migrate their data to the new format and before that is done: use a delegate to org.hibernate.type.format.jaxb.JaxbXmlFormatMapper()
// using a legacy format:
if (hasXmlProperties(metadata)) {
action(puName, TYPE_XML,
List.of("The XML format mapper uses the legacy format. It is not compatible with the new default one."));
}
}
protected abstract void action(String puName, String type, List<String> causes);
}
| BuiltinFormatMapperBehaviour |
java | netty__netty | example/src/main/java/io/netty/example/echo/EchoServer.java | {
"start": 1355,
"end": 2912
} | class ____ {
static final int PORT = Integer.parseInt(System.getProperty("port", "8007"));
public static void main(String[] args) throws Exception {
// Configure SSL.
final SslContext sslCtx = ServerUtil.buildSslContext();
// Configure the server.
EventLoopGroup group = new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory());
final EchoServerHandler serverHandler = new EchoServerHandler();
try {
ServerBootstrap b = new ServerBootstrap();
b.group(group)
.channel(NioServerSocketChannel.class)
.option(ChannelOption.SO_BACKLOG, 100)
.handler(new LoggingHandler(LogLevel.INFO))
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline p = ch.pipeline();
if (sslCtx != null) {
p.addLast(sslCtx.newHandler(ch.alloc()));
}
//p.addLast(new LoggingHandler(LogLevel.INFO));
p.addLast(serverHandler);
}
});
// Start the server.
ChannelFuture f = b.bind(PORT).sync();
// Wait until the server socket is closed.
f.channel().closeFuture().sync();
} finally {
// Shut down all event loops to terminate all threads.
group.shutdownGracefully();
}
}
}
| EchoServer |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/Rounding.java | {
"start": 39790,
"end": 43242
} | class ____ extends Rounding {
static final byte ID = 2;
private final long interval;
private final ZoneId timeZone;
TimeIntervalRounding(long interval, ZoneId timeZone) {
if (interval < 1) throw new IllegalArgumentException("Zero or negative time interval not supported");
this.interval = interval;
this.timeZone = timeZone;
}
TimeIntervalRounding(StreamInput in) throws IOException {
this(in.readVLong(), in.readZoneId());
}
@Override
public void innerWriteTo(StreamOutput out) throws IOException {
out.writeVLong(interval);
out.writeZoneId(timeZone);
}
@Override
public byte id() {
return ID;
}
@Override
public Prepared prepare(long minUtcMillis, long maxUtcMillis) {
/*
* 128 is a power of two that isn't huge. We might be able to do
* better if the limit was based on the actual type of prepared
* rounding but this'll do for now.
*/
return prepareOffsetOrJavaTimeRounding(minUtcMillis, maxUtcMillis).maybeUseArray(minUtcMillis, maxUtcMillis, 128);
}
private TimeIntervalPreparedRounding prepareOffsetOrJavaTimeRounding(long minUtcMillis, long maxUtcMillis) {
long minLookup = minUtcMillis - interval;
long maxLookup = maxUtcMillis;
LocalTimeOffset.Lookup lookup = LocalTimeOffset.lookup(timeZone, minLookup, maxLookup);
if (lookup == null) {
return prepareJavaTime();
}
LocalTimeOffset fixedOffset = lookup.fixedInRange(minLookup, maxLookup);
if (fixedOffset != null) {
return new FixedRounding(fixedOffset);
}
return new VariableRounding(lookup);
}
@Override
public Prepared prepareForUnknown() {
LocalTimeOffset offset = LocalTimeOffset.fixedOffset(timeZone);
if (offset != null) {
return new FixedRounding(offset);
}
return prepareJavaTime();
}
@Override
public TimeIntervalPreparedRounding prepareJavaTime() {
return new JavaTimeRounding();
}
@Override
public long offset() {
return 0;
}
@Override
public Rounding withoutOffset() {
return this;
}
@Override
public int hashCode() {
return Objects.hash(interval, timeZone);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
TimeIntervalRounding other = (TimeIntervalRounding) obj;
return Objects.equals(interval, other.interval) && Objects.equals(timeZone, other.timeZone);
}
@Override
public String toString() {
return "Rounding[" + interval + " in " + timeZone + "]";
}
private static long roundKey(long value, long interval) {
if (value < 0) {
return (value - interval + 1) / interval;
} else {
return value / interval;
}
}
private abstract | TimeIntervalRounding |
java | mybatis__mybatis-3 | src/main/java/org/apache/ibatis/type/BigIntegerTypeHandler.java | {
"start": 907,
"end": 1884
} | class ____ extends BaseTypeHandler<BigInteger> {
@Override
public void setNonNullParameter(PreparedStatement ps, int i, BigInteger parameter, JdbcType jdbcType)
throws SQLException {
ps.setBigDecimal(i, new BigDecimal(parameter));
}
@Override
public BigInteger getNullableResult(ResultSet rs, String columnName) throws SQLException {
BigDecimal bigDecimal = rs.getBigDecimal(columnName);
return bigDecimal == null ? null : bigDecimal.toBigInteger();
}
@Override
public BigInteger getNullableResult(ResultSet rs, int columnIndex) throws SQLException {
BigDecimal bigDecimal = rs.getBigDecimal(columnIndex);
return bigDecimal == null ? null : bigDecimal.toBigInteger();
}
@Override
public BigInteger getNullableResult(CallableStatement cs, int columnIndex) throws SQLException {
BigDecimal bigDecimal = cs.getBigDecimal(columnIndex);
return bigDecimal == null ? null : bigDecimal.toBigInteger();
}
}
| BigIntegerTypeHandler |
java | apache__camel | components/camel-stax/src/main/java/org/apache/camel/component/stax/StAXBuilder.java | {
"start": 1383,
"end": 1699
} | class ____ has JAXB annotations to bind POJO.
*/
public static <T> Expression stax(String clazzName) {
return new StAXJAXBIteratorExpression<T>(clazzName);
}
/**
* Creates a {@link org.apache.camel.component.stax.StAXJAXBIteratorExpression}.
*
* @param clazz the | which |
java | apache__maven | impl/maven-cli/src/test/java/org/apache/maven/cling/invoker/mvnup/goals/CheckTest.java | {
"start": 1965,
"end": 2303
} | class ____ {
@Test
@DisplayName("should not save modifications to disk")
void shouldNotSaveModificationsToDisk() {
assertFalse(checkGoal.shouldSaveModifications(), "Check goal should not save modifications to disk");
}
}
@Nested
@DisplayName("Execution")
| ModificationBehaviorTests |
java | spring-projects__spring-security | oauth2/oauth2-authorization-server/src/main/java/org/springframework/security/oauth2/server/authorization/oidc/authentication/OidcClientConfigurationAuthenticationProvider.java | {
"start": 2875,
"end": 8633
} | class ____ implements AuthenticationProvider {
static final String DEFAULT_CLIENT_CONFIGURATION_AUTHORIZED_SCOPE = "client.read";
private final Log logger = LogFactory.getLog(getClass());
private final RegisteredClientRepository registeredClientRepository;
private final OAuth2AuthorizationService authorizationService;
private Converter<RegisteredClient, OidcClientRegistration> clientRegistrationConverter;
/**
* Constructs an {@code OidcClientConfigurationAuthenticationProvider} using the
* provided parameters.
* @param registeredClientRepository the repository of registered clients
* @param authorizationService the authorization service
*/
public OidcClientConfigurationAuthenticationProvider(RegisteredClientRepository registeredClientRepository,
OAuth2AuthorizationService authorizationService) {
Assert.notNull(registeredClientRepository, "registeredClientRepository cannot be null");
Assert.notNull(authorizationService, "authorizationService cannot be null");
this.registeredClientRepository = registeredClientRepository;
this.authorizationService = authorizationService;
this.clientRegistrationConverter = new RegisteredClientOidcClientRegistrationConverter();
}
/**
* Sets the {@link Converter} used for converting a {@link RegisteredClient} to an
* {@link OidcClientRegistration}.
* @param clientRegistrationConverter the {@link Converter} used for converting a
* {@link RegisteredClient} to an {@link OidcClientRegistration}
*/
public void setClientRegistrationConverter(
Converter<RegisteredClient, OidcClientRegistration> clientRegistrationConverter) {
Assert.notNull(clientRegistrationConverter, "clientRegistrationConverter cannot be null");
this.clientRegistrationConverter = clientRegistrationConverter;
}
@Override
public Authentication authenticate(Authentication authentication) throws AuthenticationException {
OidcClientRegistrationAuthenticationToken clientRegistrationAuthentication = (OidcClientRegistrationAuthenticationToken) authentication;
if (!StringUtils.hasText(clientRegistrationAuthentication.getClientId())) {
// This is not a Client Configuration Request.
// Return null to allow OidcClientRegistrationAuthenticationProvider to handle
// it.
return null;
}
// Validate the "registration" access token
AbstractOAuth2TokenAuthenticationToken<?> accessTokenAuthentication = null;
if (AbstractOAuth2TokenAuthenticationToken.class
.isAssignableFrom(clientRegistrationAuthentication.getPrincipal().getClass())) {
accessTokenAuthentication = (AbstractOAuth2TokenAuthenticationToken<?>) clientRegistrationAuthentication
.getPrincipal();
}
if (accessTokenAuthentication == null || !accessTokenAuthentication.isAuthenticated()) {
throw new OAuth2AuthenticationException(OAuth2ErrorCodes.INVALID_TOKEN);
}
String accessTokenValue = accessTokenAuthentication.getToken().getTokenValue();
OAuth2Authorization authorization = this.authorizationService.findByToken(accessTokenValue,
OAuth2TokenType.ACCESS_TOKEN);
if (authorization == null) {
throw new OAuth2AuthenticationException(OAuth2ErrorCodes.INVALID_TOKEN);
}
if (this.logger.isTraceEnabled()) {
this.logger.trace("Retrieved authorization with access token");
}
OAuth2Authorization.Token<OAuth2AccessToken> authorizedAccessToken = authorization.getAccessToken();
if (!authorizedAccessToken.isActive()) {
throw new OAuth2AuthenticationException(OAuth2ErrorCodes.INVALID_TOKEN);
}
checkScope(authorizedAccessToken, Collections.singleton(DEFAULT_CLIENT_CONFIGURATION_AUTHORIZED_SCOPE));
return findRegistration(clientRegistrationAuthentication, authorization);
}
@Override
public boolean supports(Class<?> authentication) {
return OidcClientRegistrationAuthenticationToken.class.isAssignableFrom(authentication);
}
private OidcClientRegistrationAuthenticationToken findRegistration(
OidcClientRegistrationAuthenticationToken clientRegistrationAuthentication,
OAuth2Authorization authorization) {
RegisteredClient registeredClient = this.registeredClientRepository
.findByClientId(clientRegistrationAuthentication.getClientId());
if (registeredClient == null) {
throw new OAuth2AuthenticationException(OAuth2ErrorCodes.INVALID_CLIENT);
}
if (!registeredClient.getId().equals(authorization.getRegisteredClientId())) {
throw new OAuth2AuthenticationException(OAuth2ErrorCodes.INVALID_CLIENT);
}
if (this.logger.isTraceEnabled()) {
this.logger.trace("Validated client configuration request parameters");
}
OidcClientRegistration clientRegistration = this.clientRegistrationConverter.convert(registeredClient);
if (this.logger.isTraceEnabled()) {
this.logger.trace("Authenticated client configuration request");
}
return new OidcClientRegistrationAuthenticationToken(
(Authentication) clientRegistrationAuthentication.getPrincipal(), clientRegistration);
}
@SuppressWarnings("unchecked")
private static void checkScope(OAuth2Authorization.Token<OAuth2AccessToken> authorizedAccessToken,
Set<String> requiredScope) {
Collection<String> authorizedScope = Collections.emptySet();
if (authorizedAccessToken.getClaims().containsKey(OAuth2ParameterNames.SCOPE)) {
authorizedScope = (Collection<String>) authorizedAccessToken.getClaims().get(OAuth2ParameterNames.SCOPE);
}
if (!authorizedScope.containsAll(requiredScope)) {
throw new OAuth2AuthenticationException(OAuth2ErrorCodes.INSUFFICIENT_SCOPE);
}
else if (authorizedScope.size() != requiredScope.size()) {
// Restrict the access token to only contain the required scope
throw new OAuth2AuthenticationException(OAuth2ErrorCodes.INVALID_TOKEN);
}
}
}
| OidcClientConfigurationAuthenticationProvider |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java | {
"start": 1603,
"end": 1787
} | class ____ extends MetadataFieldMapper {
public static final String NAME = "_ignored";
public static final String CONTENT_TYPE = "_ignored";
public static | IgnoredFieldMapper |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerResourceIncreaseRPC.java | {
"start": 3887,
"end": 6988
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(
TestContainerResourceIncreaseRPC.class);
@Test
void testHadoopProtoRPCTimeout() throws Exception {
testRPCTimeout(HadoopYarnProtoRPC.class.getName());
}
private void testRPCTimeout(String rpcClass) throws Exception {
Configuration conf = new Configuration();
// set timeout low for the test
conf.setInt("yarn.rpc.nm-command-timeout", 3000);
conf.set(YarnConfiguration.IPC_RPC_IMPL, rpcClass);
YarnRPC rpc = YarnRPC.create(conf);
String bindAddr = "localhost:0";
InetSocketAddress addr = NetUtils.createSocketAddr(bindAddr);
Server server = rpc.getServer(ContainerManagementProtocol.class,
new DummyContainerManager(), addr, conf, null, 1);
server.start();
try {
ContainerManagementProtocol proxy =
(ContainerManagementProtocol) rpc.getProxy(
ContainerManagementProtocol.class,
server.getListenerAddress(), conf);
ApplicationId applicationId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 0);
ContainerId containerId =
ContainerId.newContainerId(applicationAttemptId, 100);
NodeId nodeId = NodeId.newInstance("localhost", 1234);
Resource resource = Resource.newInstance(1234, 2);
ContainerTokenIdentifier containerTokenIdentifier =
new ContainerTokenIdentifier(containerId, "localhost", "user",
resource, System.currentTimeMillis() + 10000, 42, 42,
Priority.newInstance(0), 0);
Token containerToken =
newContainerToken(nodeId, "password".getBytes(),
containerTokenIdentifier);
// Construct container resource increase request,
List<Token> increaseTokens = new ArrayList<>();
increaseTokens.add(containerToken);
ContainerUpdateRequest request = ContainerUpdateRequest
.newInstance(increaseTokens);
try {
proxy.updateContainer(request);
} catch (Exception e) {
LOG.info(StringUtils.stringifyException(e));
assertEquals(SocketTimeoutException.class.getName(), e.getClass().getName(),
"Error, exception is not: " + SocketTimeoutException.class.getName());
return;
}
} finally {
server.stop();
}
fail("timeout exception should have occurred!");
}
public static Token newContainerToken(NodeId nodeId, byte[] password,
ContainerTokenIdentifier tokenIdentifier) {
// RPC layer client expects ip:port as service for tokens
InetSocketAddress addr =
NetUtils.createSocketAddrForHost(nodeId.getHost(), nodeId.getPort());
// NOTE: use SecurityUtil.setTokenService if this becomes a "real" token
Token containerToken =
Token.newInstance(tokenIdentifier.getBytes(),
ContainerTokenIdentifier.KIND.toString(), password, SecurityUtil
.buildTokenService(addr).toString());
return containerToken;
}
public | TestContainerResourceIncreaseRPC |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/mock/SerializableMode.java | {
"start": 190,
"end": 490
} | enum ____ {
/**
* No serialization.
*/
NONE,
/**
* Basic serializable mode for mock objects. Introduced in Mockito 1.8.1.
*/
BASIC,
/**
* Useful if the mock is deserialized in a different classloader / vm.
*/
ACROSS_CLASSLOADERS
}
| SerializableMode |
java | alibaba__nacos | config/src/main/java/com/alibaba/nacos/config/server/service/dump/DumpRequest.java | {
"start": 728,
"end": 2557
} | class ____ {
String dataId;
String group;
String tenant;
String grayName;
private long lastModifiedTs;
private String sourceIp;
public String getDataId() {
return dataId;
}
public void setDataId(String dataId) {
this.dataId = dataId;
}
public String getGroup() {
return group;
}
public void setGroup(String group) {
this.group = group;
}
public String getTenant() {
return tenant;
}
public void setTenant(String tenant) {
this.tenant = tenant;
}
public long getLastModifiedTs() {
return lastModifiedTs;
}
public void setLastModifiedTs(long lastModifiedTs) {
this.lastModifiedTs = lastModifiedTs;
}
public String getSourceIp() {
return sourceIp;
}
public String getGrayName() {
return grayName;
}
public void setGrayName(String grayName) {
this.grayName = grayName;
}
public void setSourceIp(String sourceIp) {
this.sourceIp = sourceIp;
}
/**
* create dump request.
*
* @param dataId dataId.
* @param group group.
* @param tenant tenant.
* @param lastModifiedTs lastModifiedTs.
* @param sourceIp sourceIp.
* @return
*/
public static DumpRequest create(String dataId, String group, String tenant, long lastModifiedTs, String sourceIp) {
DumpRequest dumpRequest = new DumpRequest();
dumpRequest.dataId = dataId;
dumpRequest.group = group;
dumpRequest.tenant = tenant;
dumpRequest.lastModifiedTs = lastModifiedTs;
dumpRequest.sourceIp = sourceIp;
return dumpRequest;
}
}
| DumpRequest |
java | apache__flink | flink-table/flink-table-api-java/src/test/java/org/apache/flink/table/test/program/TableApiTestStep.java | {
"start": 4032,
"end": 5063
} | interface ____ {
/** See {@link TableEnvironment#from(String)}. */
Table from(String path);
/** See {@link TableEnvironment#fromCall(String, Object...)}. */
Table fromCall(String path, Object... arguments);
/** See {@link TableEnvironment#fromCall(Class, Object...)}. */
Table fromCall(Class<? extends UserDefinedFunction> function, Object... arguments);
/** See {@link TableEnvironment#fromValues(Object...)}. */
Table fromValues(Object... values);
/** See {@link TableEnvironment#fromValues(AbstractDataType, Object...)}. */
Table fromValues(AbstractDataType<?> dataType, Object... values);
/** See {@link TableEnvironment#sqlQuery(String)}. */
Table sqlQuery(String query);
/** See {@link TableEnvironment#fromModel(String)}. */
Model fromModel(String modelPath);
/** See {@link TableEnvironment#fromModel(ModelDescriptor)}. */
Model from(ModelDescriptor modelDescriptor);
}
}
| TableEnvAccessor |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SASKeyGenerationException.java | {
"start": 923,
"end": 1282
} | class ____ extends AzureException {
private static final long serialVersionUID = 1L;
public SASKeyGenerationException(String message) {
super(message);
}
public SASKeyGenerationException(String message, Throwable cause) {
super(message, cause);
}
public SASKeyGenerationException(Throwable t) {
super(t);
}
} | SASKeyGenerationException |
java | apache__dubbo | dubbo-plugin/dubbo-qos/src/main/java/org/apache/dubbo/qos/command/impl/Online.java | {
"start": 1078,
"end": 1202
} | class ____ extends BaseOnline {
public Online(FrameworkModel frameworkModel) {
super(frameworkModel);
}
}
| Online |
java | apache__camel | components/camel-digitalocean/src/main/java/org/apache/camel/component/digitalocean/constants/DigitalOceanHeaders.java | {
"start": 909,
"end": 4498
} | interface ____ {
@Metadata(description = "The operation to perform",
javaType = "org.apache.camel.component.digitalocean.constants.DigitalOceanOperations")
String OPERATION = "CamelDigitalOceanOperation";
@Metadata(description = "The id", javaType = "Integer or String")
String ID = "CamelDigitalOceanId";
@Metadata(description = "The type", javaType = "org.apache.camel.component.digitalocean.constants.DigitalOceanImageTypes")
String TYPE = "CamelDigitalOceanType";
@Metadata(description = "The name", javaType = "String")
String NAME = "CamelDigitalOceanName";
String NEW_NAME = "CamelDigitalOceanNewName";
@Metadata(description = "The names of the droplet", javaType = "List<String>")
String NAMES = "CamelDigitalOceanNames";
@Metadata(description = "The code name of the region aka DigitalOcean data centers", javaType = "String")
String REGION = "CamelDigitalOceanRegion";
@Metadata(description = "The description", javaType = "String")
String DESCRIPTION = "CamelDigitalOceanDescription";
@Metadata(description = "The size of the droplet", javaType = "String")
String DROPLET_SIZE = "CamelDigitalOceanDropletSize";
@Metadata(description = "The image of the droplet", javaType = "String")
String DROPLET_IMAGE = "CamelDigitalOceanDropletImage";
@Metadata(description = "The keys of the droplet", javaType = "List<String>")
String DROPLET_KEYS = "CamelDigitalOceanDropletSSHKeys";
@Metadata(description = "The flag to enable backups", javaType = "Boolean")
String DROPLET_ENABLE_BACKUPS = "CamelDigitalOceanDropletEnableBackups";
@Metadata(description = "The flag to enable ipv6", javaType = "Boolean")
String DROPLET_ENABLE_IPV6 = "CamelDigitalOceanDropletEnableIpv6";
@Metadata(description = "The flag to enable private networking", javaType = "Boolean")
String DROPLET_ENABLE_PRIVATE_NETWORKING = "CamelDigitalOceanDropletEnablePrivateNetworking";
@Metadata(description = "The user data of the droplet", javaType = "String")
String DROPLET_USER_DATA = "CamelDigitalOceanDropletUserData";
@Metadata(description = "The volumes' identifier of the droplet", javaType = "List<String>")
String DROPLET_VOLUMES = "CamelDigitalOceanDropletVolumes";
@Metadata(description = "The tags of the droplet", javaType = "List<String>")
String DROPLET_TAGS = "CamelDigitalOceanDropletTags";
@Metadata(description = "The droplet identifier", javaType = "Integer")
String DROPLET_ID = "CamelDigitalOceanDropletId";
@Metadata(description = "The id of the DigitalOcean public image or your private image", javaType = "Integer")
String IMAGE_ID = "CamelDigitalOceanImageId";
@Metadata(description = "The kernel id to be changed for droplet", javaType = "Integer")
String KERNEL_ID = "CamelDigitalOceanKernelId";
@Metadata(description = "The name of the volume", javaType = "String")
String VOLUME_NAME = "CamelDigitalOceanVolumeName";
@Metadata(description = "The size value in GB", javaType = "Integer or Double")
String VOLUME_SIZE_GIGABYTES = "CamelDigitalOceanVolumeSizeGigabytes";
@Metadata(description = "The floating IP address", javaType = "String")
String FLOATING_IP_ADDRESS = "CamelDigitalOceanFloatingIPAddress";
@Metadata(description = "The SSH key fingerprint", javaType = "String")
String KEY_FINGERPRINT = "CamelDigitalOceanKeyFingerprint";
@Metadata(description = "The public key", javaType = "String")
String KEY_PUBLIC_KEY = "CamelDigitalOceanKeyPublicKey";
}
| DigitalOceanHeaders |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationStartData.java | {
"start": 1293,
"end": 2935
} | class ____ {
@Public
@Unstable
public static ApplicationStartData newInstance(ApplicationId applicationId,
String applicationName, String applicationType, String queue,
String user, long submitTime, long startTime) {
ApplicationStartData appSD = Records.newRecord(ApplicationStartData.class);
appSD.setApplicationId(applicationId);
appSD.setApplicationName(applicationName);
appSD.setApplicationType(applicationType);
appSD.setQueue(queue);
appSD.setUser(user);
appSD.setSubmitTime(submitTime);
appSD.setStartTime(startTime);
return appSD;
}
@Public
@Unstable
public abstract ApplicationId getApplicationId();
@Public
@Unstable
public abstract void setApplicationId(ApplicationId applicationId);
@Public
@Unstable
public abstract String getApplicationName();
@Public
@Unstable
public abstract void setApplicationName(String applicationName);
@Public
@Unstable
public abstract String getApplicationType();
@Public
@Unstable
public abstract void setApplicationType(String applicationType);
@Public
@Unstable
public abstract String getUser();
@Public
@Unstable
public abstract void setUser(String user);
@Public
@Unstable
public abstract String getQueue();
@Public
@Unstable
public abstract void setQueue(String queue);
@Public
@Unstable
public abstract long getSubmitTime();
@Public
@Unstable
public abstract void setSubmitTime(long submitTime);
@Public
@Unstable
public abstract long getStartTime();
@Public
@Unstable
public abstract void setStartTime(long startTime);
}
| ApplicationStartData |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/event/service/internal/EventListenerRegistryImpl.java | {
"start": 4485,
"end": 8428
} | class ____ implements EventListenerRegistry {
@SuppressWarnings("rawtypes")
private final EventListenerGroup[] eventListeners;
private final Map<Class<?>,Object> listenerClassToInstanceMap = new HashMap<>();
@SuppressWarnings("rawtypes")
private EventListenerRegistryImpl(EventListenerGroup[] eventListeners) {
this.eventListeners = eventListeners;
}
public <T> EventListenerGroup<T> getEventListenerGroup(EventType<T> eventType) {
if ( eventListeners.length < eventType.ordinal() + 1 ) {
// eventType is a custom EventType that has not been registered.
// registeredEventListeners array was not allocated enough space to
// accommodate it.
throw new HibernateException( "Unable to find listeners for type [" + eventType.eventName() + "]" );
}
@SuppressWarnings("unchecked")
final EventListenerGroup<T> listeners = eventListeners[ eventType.ordinal() ];
if ( listeners == null ) {
throw new HibernateException( "Unable to find listeners for type [" + eventType.eventName() + "]" );
}
return listeners;
}
@Override
public void addDuplicationStrategy(DuplicationStrategy strategy) {
for ( var group : eventListeners ) {
if ( group != null ) {
group.addDuplicationStrategy( strategy );
}
}
}
@Override
@SafeVarargs
public final <T> void setListeners(EventType<T> type, Class<? extends T>... listenerClasses) {
setListeners( type, resolveListenerInstances( type, listenerClasses ) );
}
@SafeVarargs
@AllowReflection // Possible array types are registered in org.hibernate.graalvm.internal.StaticClassLists.typesNeedingArrayCopy
private <T> T[] resolveListenerInstances(EventType<T> type, Class<? extends T>... listenerClasses) {
@SuppressWarnings("unchecked")
final T[] listeners = (T[]) Array.newInstance( type.baseListenerInterface(), listenerClasses.length );
for ( int i = 0; i < listenerClasses.length; i++ ) {
listeners[i] = resolveListenerInstance( listenerClasses[i] );
}
return listeners;
}
private <T> T resolveListenerInstance(Class<T> listenerClass) {
@SuppressWarnings("unchecked")
final T listenerInstance = (T) listenerClassToInstanceMap.get( listenerClass );
if ( listenerInstance == null ) {
T newListenerInstance = instantiateListener( listenerClass );
listenerClassToInstanceMap.put( listenerClass, newListenerInstance );
return newListenerInstance;
}
else {
return listenerInstance;
}
}
private <T> T instantiateListener(Class<T> listenerClass) {
try {
//noinspection deprecation
return listenerClass.newInstance();
}
catch ( Exception e ) {
throw new EventListenerRegistrationException(
"Unable to instantiate specified event listener class: " + listenerClass.getName(),
e
);
}
}
@Override
@SafeVarargs
public final <T> void setListeners(EventType<T> type, T... listeners) {
final var registeredListeners = getEventListenerGroup( type );
registeredListeners.clear();
if ( listeners != null ) {
for ( T listener : listeners ) {
registeredListeners.appendListener( listener );
}
}
}
@Override
@SafeVarargs
public final <T> void appendListeners(EventType<T> type, Class<? extends T>... listenerClasses) {
appendListeners( type, resolveListenerInstances( type, listenerClasses ) );
}
@Override
@SafeVarargs
public final <T> void appendListeners(EventType<T> type, T... listeners) {
getEventListenerGroup( type ).appendListeners( listeners );
}
@Override
@SafeVarargs
public final <T> void prependListeners(EventType<T> type, Class<? extends T>... listenerClasses) {
prependListeners( type, resolveListenerInstances( type, listenerClasses ) );
}
@Override
@SafeVarargs
public final <T> void prependListeners(EventType<T> type, T... listeners) {
getEventListenerGroup( type ).prependListeners( listeners );
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Builder
public static | EventListenerRegistryImpl |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionRequestTests.java | {
"start": 604,
"end": 1481
} | class ____ extends AbstractXContentSerializingTestCase<PutCalendarAction.Request> {
private final String calendarId = JobTests.randomValidJobId();
@Override
protected PutCalendarAction.Request createTestInstance() {
return new PutCalendarAction.Request(CalendarTests.testInstance(calendarId));
}
@Override
protected PutCalendarAction.Request mutateInstance(PutCalendarAction.Request instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected Writeable.Reader<PutCalendarAction.Request> instanceReader() {
return PutCalendarAction.Request::new;
}
@Override
protected PutCalendarAction.Request doParseInstance(XContentParser parser) {
return PutCalendarAction.Request.parseRequest(calendarId, parser);
}
}
| PutCalendarActionRequestTests |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/AckedBatchedClusterStateUpdateTask.java | {
"start": 729,
"end": 947
} | class ____ a cluster state update task that notifies an AcknowledgedResponse listener when
* all the nodes have acknowledged the cluster state update request. It works with batched cluster state updates.
*/
public | models |
java | resilience4j__resilience4j | resilience4j-hedge/src/main/java/io/github/resilience4j/hedge/HedgeConfig.java | {
"start": 3837,
"end": 3931
} | enum ____ {
PRECONFIGURED, AVERAGE_PLUS
}
public static | HedgeDurationSupplierType |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-links/runtime/src/main/java/io/quarkus/resteasy/reactive/links/RestLinksProvider.java | {
"start": 218,
"end": 654
} | interface ____ {
/**
* @param elementType The resource type.
* @return the web links associated with the element type.
*/
Collection<Link> getTypeLinks(Class<?> elementType);
/**
* @param instance the resource instance.
* @param <T> the resource generic type.
* @return the web links associated with the instance.
*/
<T> Collection<Link> getInstanceLinks(T instance);
}
| RestLinksProvider |
java | elastic__elasticsearch | libs/x-content/impl/src/test/java/org/elasticsearch/xcontent/provider/cbor/ESCborParserTests.java | {
"start": 1030,
"end": 3106
} | class ____ extends ESTestCase {
public void testParseText() throws IOException {
testStringValue("foo");
testStringValue("føø");
testStringValue("f\u00F8\u00F8");
testStringValue("ツ"); // 3 bytes in UTF-8, counts as 1 character
testStringValue("🐔"); // 4 bytes in UTF-8, counts as 2 characters
testStringValue(randomUnicodeOfLengthBetween(1, 1_000));
}
private void testStringValue(String expected) throws IOException {
CBORFactory factory = new ESCborFactoryBuilder().build();
assertThat(factory, Matchers.instanceOf(ESCborFactory.class));
ByteArrayOutputStream outputStream;
try (XContentBuilder builder = CborXContent.contentBuilder()) {
builder.map(Map.of("text", expected));
outputStream = (ByteArrayOutputStream) builder.getOutputStream();
}
ESCborParser parser = (ESCborParser) factory.createParser(outputStream.toByteArray());
assertThat(parser, Matchers.instanceOf(ESCborParser.class));
assertThat(parser.nextToken(), equalTo(JsonToken.START_OBJECT));
assertThat(parser.nextFieldName(), equalTo("text"));
assertThat(parser.nextToken(), equalTo(JsonToken.VALUE_STRING));
Text text = parser.getValueAsText();
assertThat(text.hasBytes(), equalTo(true));
assertThat(text.stringLength(), equalTo(expected.length()));
assertThat(text.string(), equalTo(expected));
// Retrieve twice
assertThat(parser.getValueAsText().string(), equalTo(expected));
assertThat(parser.getValueAsString(), equalTo(expected));
// Use the getText() to ensure _tokenIncomplete works
assertThat(parser.getText(), equalTo(expected));
// The optimisation is not used after the getText()
assertThat(parser.getValueAsText(), nullValue());
// Original CBOR getValueAsString works.
assertThat(parser.getValueAsString(), equalTo(expected));
assertThat(parser.nextToken(), equalTo(JsonToken.END_OBJECT));
}
}
| ESCborParserTests |
java | spring-projects__spring-boot | module/spring-boot-devtools/src/main/java/org/springframework/boot/devtools/classpath/ClassPathDirectories.java | {
"start": 1187,
"end": 2061
} | class ____ implements Iterable<File> {
private static final Log logger = LogFactory.getLog(ClassPathDirectories.class);
private final List<File> directories = new ArrayList<>();
public ClassPathDirectories(URL @Nullable [] urls) {
if (urls != null) {
addUrls(urls);
}
}
private void addUrls(URL[] urls) {
for (URL url : urls) {
addUrl(url);
}
}
private void addUrl(URL url) {
if (url.getProtocol().equals("file") && url.getPath().endsWith("/")) {
try {
this.directories.add(ResourceUtils.getFile(url));
}
catch (Exception ex) {
logger.warn(LogMessage.format("Unable to get classpath URL %s", url));
logger.trace(LogMessage.format("Unable to get classpath URL %s", url), ex);
}
}
}
@Override
public Iterator<File> iterator() {
return Collections.unmodifiableList(this.directories).iterator();
}
}
| ClassPathDirectories |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/CustomDeserializersTest.java | {
"start": 3292,
"end": 3457
} | class ____ {
protected int x, y;
public Immutable(int x0, int y0) {
x = x0;
y = y0;
}
}
public static | Immutable |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/inlineme/InlinerTest.java | {
"start": 31273,
"end": 31829
} | class ____ {
public void doTest() {
Client client = new Client();
client.after(1);
client.after(1, 2, 3);
}
}
""")
.doTest();
}
@Test
public void replaceWithJustParameter() {
bugCheckerWithCheckFixCompiles()
.allowBreakingChanges()
.addInputLines(
"Client.java",
"""
import com.google.errorprone.annotations.InlineMe;
import java.time.Duration;
public final | Caller |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/type/TypeVariableResolver.java | {
"start": 754,
"end": 869
} | interface ____ types that hold and can resolve type variables.
*
* @author Graeme Rocher
* @since 1.0
*/
public | for |
java | spring-projects__spring-security | ldap/src/test/java/org/springframework/security/ldap/jackson/LdapUserDetailsImplMixinTests.java | {
"start": 1558,
"end": 5465
} | class ____ {
private static final String USER_PASSWORD = "Password1234";
private static final String AUTHORITIES_ARRAYLIST_JSON = "[\"java.util.Collections$UnmodifiableRandomAccessList\", []]";
// @formatter:off
private static final String USER_JSON = "{"
+ "\"@class\": \"org.springframework.security.ldap.userdetails.LdapUserDetailsImpl\", "
+ "\"dn\": \"ignored=ignored\","
+ "\"username\": \"ghengis\","
+ "\"password\": \"" + USER_PASSWORD + "\","
+ "\"accountNonExpired\": true, "
+ "\"accountNonLocked\": true, "
+ "\"credentialsNonExpired\": true, "
+ "\"enabled\": true, "
+ "\"authorities\": " + AUTHORITIES_ARRAYLIST_JSON + ","
+ "\"graceLoginsRemaining\": " + Integer.MAX_VALUE + ","
+ "\"timeBeforeExpiration\": " + Integer.MAX_VALUE
+ "}";
// @formatter:on
private JsonMapper mapper;
@BeforeEach
public void setup() {
ClassLoader loader = getClass().getClassLoader();
this.mapper = JsonMapper.builder().addModules(SecurityJacksonModules.getModules(loader)).build();
}
@Test
public void serializeWhenMixinRegisteredThenSerializes() throws Exception {
LdapUserDetailsMapper mapper = new LdapUserDetailsMapper();
LdapUserDetailsImpl p = (LdapUserDetailsImpl) mapper.mapUserFromContext(createUserContext(), "ghengis",
AuthorityUtils.NO_AUTHORITIES);
String json = this.mapper.writeValueAsString(p);
JSONAssert.assertEquals(USER_JSON, json, true);
}
@Test
public void serializeWhenEraseCredentialInvokedThenUserPasswordIsNull() throws JacksonException, JSONException {
LdapUserDetailsMapper mapper = new LdapUserDetailsMapper();
LdapUserDetailsImpl p = (LdapUserDetailsImpl) mapper.mapUserFromContext(createUserContext(), "ghengis",
AuthorityUtils.NO_AUTHORITIES);
p.eraseCredentials();
String actualJson = this.mapper.writeValueAsString(p);
JSONAssert.assertEquals(USER_JSON.replaceAll("\"" + USER_PASSWORD + "\"", "null"), actualJson, true);
}
@Test
public void deserializeWhenMixinNotRegisteredThenThrowJsonProcessingException() {
assertThatExceptionOfType(JacksonException.class)
.isThrownBy(() -> new JsonMapper().readValue(USER_JSON, LdapUserDetailsImpl.class));
}
@Test
public void deserializeWhenMixinRegisteredThenDeserializes() throws Exception {
LdapUserDetailsMapper mapper = new LdapUserDetailsMapper();
LdapUserDetailsImpl expectedAuthentication = (LdapUserDetailsImpl) mapper
.mapUserFromContext(createUserContext(), "ghengis", AuthorityUtils.NO_AUTHORITIES);
LdapUserDetailsImpl authentication = this.mapper.readValue(USER_JSON, LdapUserDetailsImpl.class);
assertThat(authentication.getAuthorities()).containsExactlyElementsOf(expectedAuthentication.getAuthorities());
assertThat(authentication.getDn()).isEqualTo(expectedAuthentication.getDn());
assertThat(authentication.getUsername()).isEqualTo(expectedAuthentication.getUsername());
assertThat(authentication.getPassword()).isEqualTo(expectedAuthentication.getPassword());
assertThat(authentication.getGraceLoginsRemaining())
.isEqualTo(expectedAuthentication.getGraceLoginsRemaining());
assertThat(authentication.getTimeBeforeExpiration())
.isEqualTo(expectedAuthentication.getTimeBeforeExpiration());
assertThat(authentication.isAccountNonExpired()).isEqualTo(expectedAuthentication.isAccountNonExpired());
assertThat(authentication.isAccountNonLocked()).isEqualTo(expectedAuthentication.isAccountNonLocked());
assertThat(authentication.isEnabled()).isEqualTo(expectedAuthentication.isEnabled());
assertThat(authentication.isCredentialsNonExpired())
.isEqualTo(expectedAuthentication.isCredentialsNonExpired());
}
private DirContextAdapter createUserContext() {
DirContextAdapter ctx = new DirContextAdapter();
ctx.setDn(LdapNameBuilder.newInstance("ignored=ignored").build());
ctx.setAttributeValue("userPassword", USER_PASSWORD);
return ctx;
}
}
| LdapUserDetailsImplMixinTests |
java | elastic__elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/TagVersionsTask.java | {
"start": 1110,
"end": 5199
} | class ____ extends AbstractVersionsTask {
private static final Logger LOGGER = Logging.getLogger(TagVersionsTask.class);
private Version releaseVersion;
private Map<String, Integer> tagVersions = Map.of();
@Inject
public TagVersionsTask(BuildLayout layout) {
super(layout);
}
@Option(option = "release", description = "The release version to be tagged")
public void release(String version) {
releaseVersion = Version.fromString(version);
}
@Option(option = "tag-version", description = "Version id to tag. Of the form <VersionType>:<id>.")
public void tagVersions(List<String> version) {
this.tagVersions = splitVersionIds(version);
}
@TaskAction
public void executeTask() throws IOException {
if (releaseVersion == null) {
throw new IllegalArgumentException("Release version not specified");
}
if (tagVersions.isEmpty()) {
throw new IllegalArgumentException("No version tags specified");
}
LOGGER.lifecycle("Tagging version {} component ids", releaseVersion);
var versions = expandV7Version(tagVersions);
for (var v : versions.entrySet()) {
Path recordFile = switch (v.getKey()) {
case TRANSPORT_VERSION_TYPE -> rootDir.resolve(TRANSPORT_VERSIONS_RECORD);
case INDEX_VERSION_TYPE -> rootDir.resolve(INDEX_VERSIONS_RECORD);
default -> throw new IllegalArgumentException("Unknown version type " + v.getKey());
};
LOGGER.lifecycle("Adding version record for {} to [{}]: [{},{}]", v.getKey(), recordFile, releaseVersion, v.getValue());
Path file = rootDir.resolve(recordFile);
List<String> versionRecords = Files.readAllLines(file);
var modified = addVersionRecord(versionRecords, releaseVersion, v.getValue());
if (modified.isPresent()) {
Files.write(
file,
modified.get(),
StandardOpenOption.CREATE,
StandardOpenOption.WRITE,
StandardOpenOption.TRUNCATE_EXISTING
);
}
}
}
/*
* V7 just extracts a single Version. If so, this version needs to be applied to transport and index versions.
*/
private static Map<String, Integer> expandV7Version(Map<String, Integer> tagVersions) {
Integer v7Version = tagVersions.get("Version");
if (v7Version == null) return tagVersions;
return Map.of(TRANSPORT_VERSION_TYPE, v7Version, INDEX_VERSION_TYPE, v7Version);
}
private static final Pattern VERSION_LINE = Pattern.compile("(\\d+\\.\\d+\\.\\d+),(\\d+)");
static Optional<List<String>> addVersionRecord(List<String> versionRecordLines, Version release, int id) {
Map<Version, Integer> versions = versionRecordLines.stream().map(l -> {
Matcher m = VERSION_LINE.matcher(l);
if (m.matches() == false) throw new IllegalArgumentException(String.format("Incorrect format for line [%s]", l));
return m;
}).collect(Collectors.toMap(m -> Version.fromString(m.group(1)), m -> Integer.parseInt(m.group(2))));
Integer existing = versions.putIfAbsent(release, id);
if (existing != null) {
if (existing.equals(id)) {
LOGGER.lifecycle("Version id [{}] for release [{}] already recorded", id, release);
return Optional.empty();
} else {
throw new IllegalArgumentException(
String.format(
"Release [%s] already recorded with version id [%s], cannot update to version [%s]",
release,
existing,
id
)
);
}
}
return Optional.of(
versions.entrySet().stream().sorted(Map.Entry.comparingByKey()).map(e -> e.getKey() + "," + e.getValue()).toList()
);
}
}
| TagVersionsTask |
java | quarkusio__quarkus | extensions/redis-client/runtime/src/main/java/io/quarkus/redis/datasource/transactions/TransactionalRedisDataSource.java | {
"start": 12513,
"end": 21462
} | class ____ the values
* @param <V> the type of the value
* @return the object to execute commands manipulating streams.
*/
default <V> TransactionalStreamCommands<String, String, V> stream(Class<V> typeOfValue) {
return stream(String.class, String.class, typeOfValue);
}
/**
* Gets the object to manipulate JSON values.
* This group requires the <a href="https://redis.io/docs/stack/json/">RedisJSON module</a>.
*
* @return the object to manipulate JSON values.
*/
default TransactionalJsonCommands<String> json() {
return json(String.class);
}
/**
* Gets the object to manipulate JSON values.
* This group requires the <a href="https://redis.io/docs/stack/json/">RedisJSON module</a>.
*
* @param <K> the type of keys
* @return the object to manipulate JSON values.
*/
<K> TransactionalJsonCommands<K> json(Class<K> redisKeyType);
/**
* Gets the object to manipulate Bloom filters.
* This group requires the <a href="https://redis.io/docs/stack/bloom/">RedisBloom module</a>.
*
* @param valueType the type of value to store in the filters
* @param <V> the type of value
* @return the object to manipulate Bloom filters
*/
default <V> TransactionalBloomCommands<String, V> bloom(Class<V> valueType) {
return bloom(String.class, valueType);
}
/**
* Gets the object to manipulate Bloom filters.
* This group requires the <a href="https://redis.io/docs/stack/bloom/">RedisBloom module</a>.
*
* @param redisKeyType the type of the key
* @param valueType the type of value to store in the filters
* @param <K> the type of key
* @param <V> the type of value
* @return the object to manipulate Bloom filters
*/
<K, V> TransactionalBloomCommands<K, V> bloom(Class<K> redisKeyType, Class<V> valueType);
/**
* Gets the object to manipulate Cuckoo filters.
* This group requires the <a href="https://redis.io/docs/stack/bloom/">RedisBloom module</a> (including the Cuckoo
* filter support).
*
* @param <V> the type of the values added into the Cuckoo filter
* @return the object to manipulate Cuckoo values.
*/
default <V> TransactionalCuckooCommands<String, V> cuckoo(Class<V> valueType) {
return cuckoo(String.class, valueType);
}
/**
* Gets the object to manipulate Cuckoo filters.
* This group requires the <a href="https://redis.io/docs/stack/bloom/">RedisBloom module</a> (including the Cuckoo
* filter support).
*
* @param <K> the type of keys
* @param <V> the type of the values added into the Cuckoo filter
* @return the object to manipulate Cuckoo values.
*/
<K, V> TransactionalCuckooCommands<K, V> cuckoo(Class<K> redisKeyType, Class<V> valueType);
/**
* Gets the object to manipulate Count-Min sketches.
* This group requires the <a href="https://redis.io/docs/stack/bloom/">RedisBloom module</a> (including the count-min
* sketches support).
*
* @param <V> the type of the values added into the count-min sketches
* @return the object to manipulate count-min sketches.
*/
default <V> TransactionalCountMinCommands<String, V> countmin(Class<V> valueType) {
return countmin(String.class, valueType);
}
/**
* Gets the object to manipulate Count-Min sketches.
* This group requires the <a href="https://redis.io/docs/stack/bloom/">RedisBloom module</a> (including the count-min
* sketches support).
*
* @param <K> the type of keys
* @param <V> the type of the values added into the count-min sketches
* @return the object to manipulate count-min sketches.
*/
<K, V> TransactionalCountMinCommands<K, V> countmin(Class<K> redisKeyType, Class<V> valueType);
/**
* Gets the object to manipulate Top-K list.
* This group requires the <a href="https://redis.io/docs/stack/bloom/">RedisBloom module</a> (including the top-k
* list support).
*
* @param <V> the type of the values added into the top-k lists
* @return the object to manipulate top-k lists.
*/
default <V> TransactionalTopKCommands<String, V> topk(Class<V> valueType) {
return topk(String.class, valueType);
}
/**
* Gets the object to manipulate Top-K list.
* This group requires the <a href="https://redis.io/docs/stack/bloom/">RedisBloom module</a> (including the top-k
* list support).
*
* @param <K> the type of keys
* @param <V> the type of the values added into the top-k lists
* @return the object to manipulate top-k lists.
*/
<K, V> TransactionalTopKCommands<K, V> topk(Class<K> redisKeyType, Class<V> valueType);
/**
* Gets the object to manipulate graphs.
* This group requires the <a href="https://redis.io/docs/stack/graph/">RedisGraph module</a>.
*
* @return the object to manipulate graphs lists.
*/
@Experimental("The Redis graph support is experimental")
default TransactionalGraphCommands<String> graph() {
return graph(String.class);
}
/**
* Gets the object to manipulate graphs.
* This group requires the <a href="https://redis.io/docs/stack/graph/">RedisGraph module</a>.
*
* @param <K> the type of keys
* @return the object to manipulate graphs lists.
*/
@Experimental("The Redis graph support is experimental")
<K> TransactionalGraphCommands<K> graph(Class<K> redisKeyType);
/**
* Gets the object to emit commands from the {@code search} group.
* This group requires the <a href="https://redis.io/docs/stack/search/">RedisSearch module</a>.
*
* @param <K> the type of keys
* @return the object to search documents
*/
@Experimental("The Redis search support is experimental")
<K> TransactionalSearchCommands search(Class<K> redisKeyType);
/**
* Gets the object to emit commands from the {@code search} group.
* This group requires the <a href="https://redis.io/docs/stack/search/">RedisSearch module</a>.
*
* @return the object to search documents
*/
@Experimental("The Redis Search support is experimental")
default TransactionalSearchCommands search() {
return search(String.class);
}
/**
* Gets the object to emit commands from the {@code auto-suggest} group.
* This group requires the <a href="https://redis.io/docs/stack/search/">RedisSearch module</a>.
*
* @param <K> the type of keys
* @return the object to get suggestions
*/
@Experimental("The Redis auto-suggest support is experimental")
<K> TransactionalAutoSuggestCommands<K> autosuggest(Class<K> redisKeyType);
/**
* Gets the object to emit commands from the {@code auto-suggest} group.
* This group requires the <a href="https://redis.io/docs/stack/search/">RedisSearch module</a>.
*
* @return the object to get suggestions
*/
@Experimental("The Redis auto-suggest support is experimental")
default TransactionalAutoSuggestCommands<String> autosuggest() {
return autosuggest(String.class);
}
/**
* Gets the object to emit commands from the {@code time series} group.
* This group requires the <a href="https://redis.io/docs/stack/timeseries/">Redis Time Series module</a>.
*
* @param <K> the type of keys
* @return the object to manipulate time series
*/
@Experimental("The Redis time series support is experimental")
<K> TransactionalTimeSeriesCommands<K> timeseries(Class<K> redisKeyType);
/**
* Gets the object to emit commands from the {@code time series} group.
* This group requires the <a href="https://redis.io/docs/stack/timeseries/">Redis Time Series module</a>.
*
* @return the object to manipulate time series
*/
@Experimental("The Redis time series support is experimental")
default TransactionalTimeSeriesCommands<String> timeseries() {
return timeseries(String.class);
}
/**
* Executes a command.
* This method is used to execute commands not offered by the API.
*
* @param command the command name
* @param args the parameters, encoded as String.
*/
void execute(String command, String... args);
/**
* Executes a command.
* This method is used to execute commands not offered by the API.
*
* @param command the command
* @param args the parameters, encoded as String.
*/
void execute(Command command, String... args);
/**
* Executes a command.
* This method is used to execute commands not offered by the API.
*
* @param command the command
* @param args the parameters, encoded as String.
*/
void execute(io.vertx.redis.client.Command command, String... args);
}
| of |
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/ReactiveTypeHandlerTests.java | {
"start": 21654,
"end": 21846
} | class ____ {
private final String value;
public Bar(String value) {
this.value = value;
}
@SuppressWarnings("unused")
public String getValue() {
return this.value;
}
}
}
| Bar |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/buildextension/context/CustomContextTest.java | {
"start": 3118,
"end": 3254
} | interface ____ {
}
@NormalScope
@Inherited
@Target({ TYPE, METHOD, FIELD })
@Retention(RUNTIME)
public @ | FieldScoped |
java | netty__netty | codec-http3/src/main/java/io/netty/handler/codec/http3/Http3RequestStreamInitializer.java | {
"start": 1059,
"end": 2772
} | class ____ extends ChannelInitializer<QuicStreamChannel> {
@Override
protected final void initChannel(QuicStreamChannel ch) {
ChannelPipeline pipeline = ch.pipeline();
Http3ConnectionHandler connectionHandler = ch.parent().pipeline().get(Http3ConnectionHandler.class);
if (connectionHandler == null) {
throw new IllegalStateException("Couldn't obtain the " +
StringUtil.simpleClassName(Http3ConnectionHandler.class) + " of the parent Channel");
}
Http3RequestStreamEncodeStateValidator encodeStateValidator = new Http3RequestStreamEncodeStateValidator();
Http3RequestStreamDecodeStateValidator decodeStateValidator = new Http3RequestStreamDecodeStateValidator();
// Add the encoder and decoder in the pipeline so we can handle Http3Frames
pipeline.addLast(connectionHandler.newCodec(encodeStateValidator, decodeStateValidator));
// Add the handler that will validate what we write and receive on this stream.
pipeline.addLast(encodeStateValidator);
pipeline.addLast(decodeStateValidator);
pipeline.addLast(connectionHandler.newRequestStreamValidationHandler(ch, encodeStateValidator,
decodeStateValidator));
initRequestStream(ch);
}
/**
* Init the {@link QuicStreamChannel} to handle {@link Http3RequestStreamFrame}s. At the point of calling this
* method it is already valid to write {@link Http3RequestStreamFrame}s as the codec is already in the pipeline.
* @param ch the {QuicStreamChannel} for the request stream.
*/
protected abstract void initRequestStream(QuicStreamChannel ch);
}
| Http3RequestStreamInitializer |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/checkpointing/UnalignedCheckpointTestBase.java | {
"start": 47009,
"end": 47995
} | class ____ {
private long lastLeft = Long.MIN_VALUE;
private long lastRight = Long.MIN_VALUE;
}
}
protected static long withHeader(long value) {
checkState(
value <= Integer.MAX_VALUE,
"Value too large for header, this indicates that the test is running too long.");
return value ^ HEADER;
}
protected static long withoutHeader(long value) {
checkHeader(value);
return value ^ HEADER;
}
protected static long checkHeader(long value) {
if ((value & HEADER_MASK) != HEADER) {
throw new IllegalArgumentException(
"Stream corrupted. Cannot find the header "
+ Long.toHexString(HEADER)
+ " in the value "
+ Long.toHexString(value));
}
return value;
}
@ThrowableAnnotation(ThrowableType.NonRecoverableError)
static | State |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/logging/ESLogMessage.java | {
"start": 941,
"end": 1059
} | class ____ custom log4j logger messages. Carries additional fields which will populate JSON fields in logs.
*/
public | for |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/internal/ExitableDirectoryReader.java | {
"start": 1792,
"end": 2015
} | class ____ adapted from Lucene's ExitableDirectoryReader, but instead of using a query timeout for cancellation,
* a {@link QueryCancellation} object is used. The main behavior of the classes is mostly unchanged.
*/
| was |
java | micronaut-projects__micronaut-core | inject/src/main/java/io/micronaut/context/DefaultBeanContext.java | {
"start": 168647,
"end": 170713
} | class ____ implements MethodExecutionHandle<Object, Object> {
private final ExecutableMethod<Object, ?> method;
private final BeanDefinition<?> beanDefinition;
private Object target;
public BeanContextExecutionHandle(ExecutableMethod<Object, ?> method, BeanDefinition<?> beanDefinition) {
this.method = method;
this.beanDefinition = beanDefinition;
}
@NonNull
@Override
public AnnotationMetadata getAnnotationMetadata() {
return method.getAnnotationMetadata();
}
@Override
public Object getTarget() {
Object target = this.target;
if (target == null) {
synchronized (this) { // double check
target = this.target;
if (target == null) {
target = getBean(beanDefinition);
this.target = target;
}
}
}
return target;
}
@Override
public Class getDeclaringType() {
return beanDefinition.getBeanType();
}
@Override
public String getMethodName() {
return method.getMethodName();
}
@Override
public Argument[] getArguments() {
return method.getArguments();
}
@Override
public Method getTargetMethod() {
return method.getTargetMethod();
}
@Override
public ReturnType getReturnType() {
return method.getReturnType();
}
@Override
public Object invoke(Object... arguments) {
return method.invoke(getTarget(), arguments);
}
@NonNull
@Override
public ExecutableMethod<Object, Object> getExecutableMethod() {
return (ExecutableMethod<Object, Object>) method;
}
@Override
public String toString() {
return method.toString();
}
}
}
| BeanContextExecutionHandle |
java | google__auto | value/src/main/java/com/google/auto/value/extension/AutoValueExtension.java | {
"start": 11182,
"end": 11479
} | class ____ {...}
* }
* }</pre>
*
* <p>Here {@code toBuilderMethods()} will return a set containing the method {@code
* Foo.toBuilder()}.
*/
Set<ExecutableElement> toBuilderMethods();
/**
* Returns static no-argument methods in the {@code @AutoValue} | Builder |
java | apache__flink | flink-state-backends/flink-statebackend-changelog/src/main/java/org/apache/flink/state/changelog/restore/ValueStateChangeApplier.java | {
"start": 1156,
"end": 2044
} | class ____<K, N, T> extends KvStateChangeApplier<K, N> {
private final InternalValueState<K, N, T> state;
protected ValueStateChangeApplier(
InternalKeyContext<K> keyContext, InternalValueState<K, N, T> state) {
super(keyContext);
this.state = state;
}
@Override
protected InternalKvState<K, N, ?> getState() {
return state;
}
protected void applyInternal(StateChangeOperation operation, DataInputView in)
throws Exception {
switch (operation) {
case SET:
state.update(state.getValueSerializer().deserialize(in));
break;
case CLEAR:
state.clear();
break;
default:
throw new IllegalArgumentException("Unknown state change operation: " + operation);
}
}
}
| ValueStateChangeApplier |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java | {
"start": 2561,
"end": 12553
} | class ____ {
static final Logger LOG = LoggerFactory.getLogger(TestDiskBalancer.class);
public static final long MB = 1024 * 1024L;
public static final long GB = MB * 1024L;
public static final long TB = GB * 1024L;
private static int[] diskSizes =
{1, 2, 3, 4, 5, 6, 7, 8, 9, 100, 200, 300, 400, 500, 600, 700, 800, 900};
private Random rand;
private String stringTable =
"ABCDEDFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0987654321";
/**
* Constructs a util class.
*/
public DiskBalancerTestUtil() {
this.rand = new Random(Time.monotonicNow());
}
/**
* Returns a random string.
*
* @param length - Number of chars in the string
* @return random String
*/
private String getRandomName(int length) {
StringBuilder name = new StringBuilder();
for (int x = 0; x < length; x++) {
name.append(stringTable.charAt(rand.nextInt(stringTable.length())));
}
return name.toString();
}
/**
* Returns a Random Storage Type.
*
* @return - StorageType
*/
private StorageType getRandomStorageType() {
return StorageType.parseStorageType(rand.nextInt(3));
}
/**
* Returns random capacity, if the size is smaller than 10
* they are TBs otherwise the size is assigned to GB range.
*
* @return Long - Disk Size
*/
private long getRandomCapacity() {
int size = diskSizes[rand.nextInt(diskSizes.length)];
if (size < 10) {
return size * TB;
} else {
return size * GB;
}
}
/**
* Some value under 20% in these tests.
*/
private long getRandomReserved(long capacity) {
double rcap = capacity * 0.2d;
double randDouble = rand.nextDouble();
double temp = randDouble * rcap;
return (new Double(temp)).longValue();
}
/**
* Some value less that capacity - reserved.
*/
private long getRandomDfsUsed(long capacity, long reserved) {
double rcap = capacity - reserved;
double randDouble = rand.nextDouble();
double temp = randDouble * rcap;
return (new Double(temp)).longValue();
}
/**
* Creates a Random Volume of a specific storageType.
*
* @return Volume
*/
public DiskBalancerVolume createRandomVolume() {
return createRandomVolume(getRandomStorageType());
}
/**
* Creates a Random Volume for testing purpose.
*
* @param type - StorageType
* @return DiskBalancerVolume
*/
public DiskBalancerVolume createRandomVolume(StorageType type) {
DiskBalancerVolume volume = new DiskBalancerVolume();
volume.setPath("/tmp/disk/" + getRandomName(10));
volume.setStorageType(type.toString());
volume.setTransient(type.isTransient());
volume.setCapacity(getRandomCapacity());
volume.setReserved(getRandomReserved(volume.getCapacity()));
volume
.setUsed(getRandomDfsUsed(volume.getCapacity(), volume.getReserved()));
volume.setUuid(UUID.randomUUID().toString());
return volume;
}
/**
* Creates a RandomVolumeSet.
*
* @param type - Storage Type
* @param diskCount - How many disks you need.
* @return volumeSet
* @throws Exception
*/
public DiskBalancerVolumeSet createRandomVolumeSet(StorageType type,
int diskCount)
throws Exception {
Preconditions.checkState(diskCount > 0);
DiskBalancerVolumeSet volumeSet =
new DiskBalancerVolumeSet(type.isTransient());
for (int x = 0; x < diskCount; x++) {
volumeSet.addVolume(createRandomVolume(type));
}
assert (volumeSet.getVolumeCount() == diskCount);
return volumeSet;
}
/**
* Creates a RandomDataNode.
*
* @param diskTypes - Storage types needed in the Node
* @param diskCount - Disk count - that many disks of each type is created
* @return DataNode
* @throws Exception
*/
public DiskBalancerDataNode createRandomDataNode(StorageType[] diskTypes,
int diskCount)
throws Exception {
Preconditions.checkState(diskTypes.length > 0);
Preconditions.checkState(diskCount > 0);
DiskBalancerDataNode node =
new DiskBalancerDataNode(UUID.randomUUID().toString());
for (StorageType t : diskTypes) {
DiskBalancerVolumeSet vSet = createRandomVolumeSet(t, diskCount);
for (DiskBalancerVolume v : vSet.getVolumes()) {
node.addVolume(v);
}
}
return node;
}
/**
* Creates a RandomCluster.
*
* @param dataNodeCount - How many nodes you need
* @param diskTypes - StorageTypes you need in each node
* @param diskCount - How many disks you need of each type.
* @return Cluster
* @throws Exception
*/
public DiskBalancerCluster createRandCluster(int dataNodeCount,
StorageType[] diskTypes,
int diskCount)
throws Exception {
Preconditions.checkState(diskTypes.length > 0);
Preconditions.checkState(diskCount > 0);
Preconditions.checkState(dataNodeCount > 0);
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
// once we add these nodes into the connector, cluster will read them
// from the connector.
for (int x = 0; x < dataNodeCount; x++) {
nullConnector.addNode(createRandomDataNode(diskTypes, diskCount));
}
// with this call we have populated the cluster info
cluster.readClusterInfo();
return cluster;
}
/**
* Returns the number of blocks on a volume.
*
* @param source - Source Volume.
* @return Number of Blocks.
* @throws IOException
*/
public static int getBlockCount(FsVolumeSpi source,
boolean checkblockPoolCount)
throws IOException {
int count = 0;
for (String blockPoolID : source.getBlockPoolList()) {
FsVolumeSpi.BlockIterator sourceIter =
source.newBlockIterator(blockPoolID, "TestDiskBalancerSource");
int blockCount = 0;
while (!sourceIter.atEnd()) {
ExtendedBlock block = sourceIter.nextBlock();
if (block != null) {
blockCount++;
}
}
if (checkblockPoolCount) {
LOG.info("Block Pool Id: {}, blockCount: {}", blockPoolID, blockCount);
assertTrue(blockCount > 0);
}
count += blockCount;
}
return count;
}
public static MiniDFSCluster newImbalancedCluster(
final Configuration conf,
final int numDatanodes,
final long[] storageCapacities,
final int defaultBlockSize,
final int fileLen)
throws IOException, InterruptedException, TimeoutException {
return newImbalancedCluster(
conf,
numDatanodes,
storageCapacities,
defaultBlockSize,
fileLen,
null);
}
public static MiniDFSCluster newImbalancedCluster(
final Configuration conf,
final int numDatanodes,
final long[] storageCapacities,
final int defaultBlockSize,
final int fileLen,
final StartupOption dnOption)
throws IOException, InterruptedException, TimeoutException {
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
final String fileName = "/" + UUID.randomUUID().toString();
final Path filePath = new Path(fileName);
Preconditions.checkNotNull(storageCapacities);
Preconditions.checkArgument(
storageCapacities.length == 2,
"need to specify capacities for two storages.");
// Write a file and restart the cluster
File basedir = new File(GenericTestUtils.getRandomizedTempPath());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, basedir)
.numDataNodes(numDatanodes)
.storageCapacities(storageCapacities)
.storageTypes(new StorageType[]{StorageType.DISK, StorageType.DISK})
.storagesPerDatanode(2)
.dnStartupOption(dnOption)
.build();
FsVolumeImpl source = null;
FsVolumeImpl dest = null;
cluster.waitActive();
Random r = new Random();
FileSystem fs = cluster.getFileSystem(0);
TestBalancer.createFile(cluster, filePath, fileLen, (short) 1, 0);
DFSTestUtil.waitReplication(fs, filePath, (short) 1);
cluster.restartDataNodes();
cluster.waitActive();
// Get the data node and move all data to one disk.
for (int i = 0; i < numDatanodes; i++) {
DataNode dnNode = cluster.getDataNodes().get(i);
try (FsDatasetSpi.FsVolumeReferences refs =
dnNode.getFSDataset().getFsVolumeReferences()) {
source = (FsVolumeImpl) refs.get(0);
dest = (FsVolumeImpl) refs.get(1);
assertTrue(DiskBalancerTestUtil.getBlockCount(source, true) > 0);
DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(),
source, dest);
assertEquals(0, DiskBalancerTestUtil.getBlockCount(source, false));
}
}
cluster.restartDataNodes();
cluster.waitActive();
return cluster;
}
/**
* Moves all blocks to the destination volume.
*
* @param fsDataset - Dataset
* @param source - Source Volume.
* @param dest - Destination Volume.
* @throws IOException
*/
public static void moveAllDataToDestVolume(FsDatasetSpi fsDataset,
FsVolumeSpi source, FsVolumeSpi dest) throws IOException {
for (String blockPoolID : source.getBlockPoolList()) {
FsVolumeSpi.BlockIterator sourceIter =
source.newBlockIterator(blockPoolID, "TestDiskBalancerSource");
while (!sourceIter.atEnd()) {
ExtendedBlock block = sourceIter.nextBlock();
if (block != null) {
fsDataset.moveBlockAcrossVolumes(block, dest);
}
}
}
}
}
| DiskBalancerTestUtil |
java | spring-projects__spring-framework | spring-jdbc/src/main/java/org/springframework/jdbc/core/metadata/TableMetaDataProvider.java | {
"start": 865,
"end": 1046
} | class ____ table meta-data.
*
* <p>This is intended for internal use by the Simple JDBC classes.
*
* @author Thomas Risberg
* @author Sam Brannen
* @since 2.5
*/
public | providing |
java | alibaba__nacos | core/src/main/java/com/alibaba/nacos/core/distributed/AbstractConsistencyProtocol.java | {
"start": 1130,
"end": 1799
} | class ____<T extends Config, L extends RequestProcessor>
implements ConsistencyProtocol<T, L> {
protected final ProtocolMetaData metaData = new ProtocolMetaData();
protected Map<String, L> processorMap = Collections.synchronizedMap(new HashMap<>());
public void loadLogProcessor(List<L> logProcessors) {
logProcessors.forEach(logDispatcher -> processorMap.put(logDispatcher.group(), logDispatcher));
}
protected Map<String, L> allProcessor() {
return processorMap;
}
@Override
public ProtocolMetaData protocolMetaData() {
return this.metaData;
}
}
| AbstractConsistencyProtocol |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/file/FilerConsumerDoneFileNameDeleteTest.java | {
"start": 1152,
"end": 2593
} | class ____ extends ContextTestSupport {
private static final String TEST_FILE_NAME = "hello" + UUID.randomUUID() + ".txt";
@Test
public void testDoneFile() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(0);
template.sendBodyAndHeader(fileUri(), "Hello World", Exchange.FILE_NAME, TEST_FILE_NAME);
// wait a bit and it should not pickup the written file as there are no
// done file
Awaitility.await().pollDelay(50, TimeUnit.MILLISECONDS).untilAsserted(() -> assertMockEndpointsSatisfied());
resetMocks();
oneExchangeDone.reset();
getMockEndpoint("mock:result").expectedBodiesReceived("Hello World");
// write the done file
template.sendBodyAndHeader(fileUri(), "", Exchange.FILE_NAME, "done");
assertMockEndpointsSatisfied();
oneExchangeDone.matchesWaitTime();
// done file should be deleted now
assertFileNotExists(testFile("done"));
// as well the original file should be deleted
assertFileNotExists(testFile(TEST_FILE_NAME));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from(fileUri("?doneFileName=done&delete=true&initialDelay=0&delay=10")).to("mock:result");
}
};
}
}
| FilerConsumerDoneFileNameDeleteTest |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/sqm/tree/predicate/SqmInListPredicate.java | {
"start": 1014,
"end": 6386
} | class ____<T> extends AbstractNegatableSqmPredicate implements SqmInPredicate<T> {
private final SqmExpression<T> testExpression;
private final List<SqmExpression<T>> listExpressions;
public SqmInListPredicate(SqmExpression<T> testExpression, NodeBuilder nodeBuilder) {
this( testExpression, new ArrayList<>(), nodeBuilder );
}
@SuppressWarnings({"unchecked", "unused"})
public SqmInListPredicate(
SqmExpression<T> testExpression,
NodeBuilder nodeBuilder,
SqmExpression<T>... listExpressions) {
this( testExpression, ArrayHelper.toExpandableList( listExpressions ), nodeBuilder );
}
public SqmInListPredicate(
SqmExpression<T> testExpression,
List<? extends SqmExpression<T>> listExpressions,
NodeBuilder nodeBuilder) {
this( testExpression, listExpressions, false, nodeBuilder );
}
public SqmInListPredicate(
SqmExpression<T> testExpression,
List<? extends SqmExpression<T>> listExpressions,
boolean negated,
NodeBuilder nodeBuilder) {
super( negated, nodeBuilder );
this.testExpression = testExpression;
//noinspection unchecked
this.listExpressions = (List<SqmExpression<T>>) listExpressions;
for ( SqmExpression<T> listExpression : listExpressions ) {
implyListElementType( listExpression, testExpression, nodeBuilder );
}
}
@Override
public SqmInListPredicate<T> copy(SqmCopyContext context) {
final SqmInListPredicate<T> existing = context.getCopy( this );
if ( existing != null ) {
return existing;
}
List<SqmExpression<T>> listExpressions = new ArrayList<>( this.listExpressions.size() );
for ( SqmExpression<T> listExpression : this.listExpressions ) {
listExpressions.add( listExpression.copy( context ) );
}
final SqmInListPredicate<T> predicate = context.registerCopy(
this,
new SqmInListPredicate<>(
testExpression.copy( context ),
listExpressions,
isNegated(),
nodeBuilder()
)
);
copyTo( predicate, context );
return predicate;
}
@Override
public SqmExpression<T> getTestExpression() {
return testExpression;
}
@Override
public SqmExpression<T> getExpression() {
return getTestExpression();
}
@Override
public SqmInPredicate<T> value(@NonNull Object value) {
if ( value instanceof Collection ) {
//noinspection unchecked
for ( T v : ( (Collection<T>) value ) ) {
addExpression( nodeBuilder().value( v, testExpression ) );
}
}
else {
//noinspection unchecked
addExpression( nodeBuilder().value( (T) value, testExpression ) );
}
return this;
}
@Override
public SqmInPredicate<T> value(Expression<? extends T> value) {
//noinspection unchecked
addExpression( (SqmExpression<T>) value );
return this;
}
@Override
public SqmInPredicate<T> value(JpaExpression<? extends T> value) {
//noinspection unchecked
addExpression( (SqmExpression<T>) value );
return this;
}
public List<? extends SqmExpression<T>> getListExpressions() {
return listExpressions;
}
public void addExpression(SqmExpression<T> expression) {
implyListElementType( expression );
listExpressions.add( expression );
}
private void implyListElementType(SqmExpression<?> expression) {
implyListElementType( expression, getTestExpression(), nodeBuilder() );
}
private static void implyListElementType(SqmExpression<?> expression, SqmExpression<?> testExpression, NodeBuilder nodeBuilder) {
assertComparable( testExpression, expression, nodeBuilder );
expression.applyInferableType(
QueryHelper.highestPrecedenceType2( testExpression.getExpressible(), expression.getExpressible() )
);
}
@Override
public <X> X accept(SemanticQueryWalker<X> walker) {
return walker.visitInListPredicate( this );
}
@Override
public void appendHqlString(StringBuilder hql, SqmRenderContext context) {
testExpression.appendHqlString( hql, context );
if ( isNegated() ) {
hql.append( " not" );
}
hql.append( " in (" );
listExpressions.get( 0 ).appendHqlString( hql, context );
for ( int i = 1; i < listExpressions.size(); i++ ) {
hql.append( ", " );
listExpressions.get( i ).appendHqlString( hql, context );
}
hql.append( ')' );
}
@Override
public boolean equals(@Nullable Object object) {
return object instanceof SqmInListPredicate<?> that
&& this.isNegated() == that.isNegated()
&& this.testExpression.equals( that.testExpression )
&& Objects.equals( this.listExpressions, that.listExpressions );
}
@Override
public int hashCode() {
int result = Boolean.hashCode( isNegated() );
result = 31 * result + testExpression.hashCode();
result = 31 * result + Objects.hashCode( listExpressions );
return result;
}
@Override
public boolean isCompatible(Object object) {
return object instanceof SqmInListPredicate<?> that
&& this.isNegated() == that.isNegated()
&& this.testExpression.isCompatible( that.testExpression )
&& SqmCacheable.areCompatible( this.listExpressions, that.listExpressions );
}
@Override
public int cacheHashCode() {
int result = Boolean.hashCode( isNegated() );
result = 31 * result + testExpression.cacheHashCode();
result = 31 * result + SqmCacheable.cacheHashCode( listExpressions );
return result;
}
@Override
protected SqmNegatablePredicate createNegatedNode() {
return new SqmInListPredicate<>( testExpression, listExpressions, !isNegated(), nodeBuilder() );
}
}
| SqmInListPredicate |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/vertx/src/test/java/org/jboss/resteasy/reactive/server/vertx/test/simple/NameBindingWithInterfaceTest.java | {
"start": 942,
"end": 1868
} | class ____ {
@RegisterExtension
static ResteasyReactiveUnitTest test = new ResteasyReactiveUnitTest()
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class)
.addClasses(BlockingHelloResource.class, ReactiveHelloResource.class, BlockingHelloApi.class,
ReactiveHelloApi.class, AddTestHeaderContainerRequestFilter.class));
@Test
public void blockingHello() {
get("/blocking-hello")
.then()
.statusCode(200)
.body(Matchers.equalTo("hello"))
.header("test", "some-value");
}
@Test
public void reactiveHello() {
get("/reactive-hello")
.then()
.statusCode(200)
.body(Matchers.equalTo("hello"))
.header("test", "some-value");
}
@SomeFilter
public static | NameBindingWithInterfaceTest |
java | quarkusio__quarkus | extensions/smallrye-reactive-messaging-kafka/runtime/src/main/java/io/quarkus/smallrye/reactivemessaging/kafka/HibernateOrmStateStore.java | {
"start": 1088,
"end": 1712
} | class ____ implements CheckpointStateStore {
public static final String HIBERNATE_ORM_STATE_STORE = "quarkus-hibernate-orm";
private final String consumerGroupId;
private final SessionFactory sf;
private final Class<? extends CheckpointEntity> stateType;
public HibernateOrmStateStore(String consumerGroupId, SessionFactory sf,
Class<? extends CheckpointEntity> stateType) {
this.consumerGroupId = consumerGroupId;
this.sf = sf;
this.stateType = stateType;
}
@ApplicationScoped
@Identifier(HIBERNATE_ORM_STATE_STORE)
public static | HibernateOrmStateStore |
java | quarkusio__quarkus | extensions/hibernate-validator/deployment/src/test/java/io/quarkus/hibernate/validator/test/devmode/TestBean.java | {
"start": 79,
"end": 275
} | class ____ {
public String name;
// <placeholder2>
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
| TestBean |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/output/TransformingOutput.java | {
"start": 313,
"end": 2849
} | class ____<K, V, S, T> extends CommandOutput<K, V, T> {
private final CommandOutput<K, V, S> delegate;
private final Function<TransformingAccessor, T> mappingFunction;
private final TransformingAccessor accessor;
private TransformingOutput(CommandOutput<K, V, S> delegate, Function<TransformingAccessor, T> mappingFunction) {
super(delegate.codec, null);
this.delegate = delegate;
this.mappingFunction = mappingFunction;
this.accessor = new TransformingAccessor(delegate);
}
/**
* Create a new transforming output.
*/
public static <K, V, S, T> CommandOutput<K, V, T> transform(CommandOutput<K, V, S> delegate,
Function<TransformingAccessor, T> mappingFunction) {
return new TransformingOutput<>(delegate, mappingFunction);
}
@Override
public void set(ByteBuffer bytes) {
delegate.set(bytes);
}
@Override
public void setSingle(ByteBuffer bytes) {
delegate.setSingle(bytes);
}
@Override
public void setBigNumber(ByteBuffer bytes) {
delegate.setBigNumber(bytes);
}
@Override
public void set(long integer) {
delegate.set(integer);
}
@Override
public void set(double number) {
delegate.set(number);
}
@Override
public void set(boolean value) {
delegate.set(value);
}
@Override
public void setError(ByteBuffer error) {
delegate.setError(error);
}
@Override
public void setError(String error) {
delegate.setError(error);
}
@Override
public boolean hasError() {
return delegate.hasError();
}
@Override
public String getError() {
return delegate.getError();
}
@Override
public T get() {
return mappingFunction.apply(accessor);
}
@Override
public void complete(int depth) {
delegate.complete(depth);
}
@Override
public void multi(int count) {
delegate.multi(count);
}
@Override
public void multiArray(int count) {
delegate.multiArray(count);
}
@Override
public void multiPush(int count) {
delegate.multiPush(count);
}
@Override
public void multiMap(int count) {
delegate.multiMap(count);
}
@Override
public void multiSet(int count) {
delegate.multiSet(count);
}
/**
* Accessor for the underlying output.
*/
@SuppressWarnings("unchecked")
public static | TransformingOutput |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/functions/WithConfigurationOpenContext.java | {
"start": 1054,
"end": 1368
} | class ____ implements OpenContext {
private final Configuration configuration;
public WithConfigurationOpenContext(Configuration configuration) {
this.configuration = configuration;
}
public Configuration getConfiguration() {
return configuration;
}
}
| WithConfigurationOpenContext |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/internal/shortarrays/ShortArrays_assertIsSorted_Test.java | {
"start": 1585,
"end": 4242
} | class ____ extends ShortArraysBaseTest {
@Override
protected void initActualArray() {
actual = new short[] { 1, 2, 3, 4, 4 };
}
@Test
void should_pass_if_actual_is_sorted_in_ascending_order() {
arrays.assertIsSorted(someInfo(), actual);
}
@Test
void should_pass_if_actual_is_empty() {
arrays.assertIsSorted(someInfo(), emptyArray());
}
@Test
void should_pass_if_actual_contains_only_one_element() {
arrays.assertIsSorted(someInfo(), arrayOf(1));
}
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arrays.assertIsSorted(someInfo(), null))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_actual_is_not_sorted_in_ascending_order() {
AssertionInfo info = someInfo();
actual = arrayOf(1, 3, 2);
Throwable error = catchThrowable(() -> arrays.assertIsSorted(info, actual));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldBeSorted(1, actual));
}
@Test
void should_pass_if_actual_is_sorted_in_ascending_order_according_to_custom_comparison_strategy() {
actual = new short[] { 1, -2, 3, -4, 4 };
arraysWithCustomComparisonStrategy.assertIsSorted(someInfo(), actual);
}
@Test
void should_pass_if_actual_is_empty_whatever_custom_comparison_strategy_is() {
arraysWithCustomComparisonStrategy.assertIsSorted(someInfo(), emptyArray());
}
@Test
void should_pass_if_actual_contains_only_one_element_whatever_custom_comparison_strategy_is() {
arraysWithCustomComparisonStrategy.assertIsSorted(someInfo(), arrayOf(1));
}
@Test
void should_fail_if_actual_is_null_whatever_custom_comparison_strategy_is() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arraysWithCustomComparisonStrategy.assertIsSorted(someInfo(),
null))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_actual_is_not_sorted_in_ascending_order_according_to_custom_comparison_strategy() {
AssertionInfo info = someInfo();
actual = arrayOf(1, 3, 2);
Throwable error = catchThrowable(() -> arraysWithCustomComparisonStrategy.assertIsSorted(info, actual));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures)
.failure(info, shouldBeSortedAccordingToGivenComparator(1, actual, comparatorForCustomComparisonStrategy()));
}
}
| ShortArrays_assertIsSorted_Test |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/create/OracleCreateViewTest0.java | {
"start": 1021,
"end": 3159
} | class ____ extends OracleTest {
public void test_types() throws Exception {
String sql = //
" CREATE OR REPLACE FORCE VIEW \"SC0\".\"V_001\" (\"ID\", \"GROUP_ID\", \"IND_BY_ALL\", \"IND_BY_GROUP\", \"OWNER_MEMBER_ID\", \"OWNER_MEMBER_SEQ\", \"GMT_MODIFIED\") AS \n" +
" select id, GROUP_ID ,IND_BY_ALL, IND_BY_GROUP, OWNER_MEMBER_ID, OWNER_MEMBER_SEQ,gmt_modified ";
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
assertEquals("CREATE OR REPLACE VIEW \"SC0\".\"V_001\" (\n" +
"\t\"ID\", \n" +
"\t\"GROUP_ID\", \n" +
"\t\"IND_BY_ALL\", \n" +
"\t\"IND_BY_GROUP\", \n" +
"\t\"OWNER_MEMBER_ID\", \n" +
"\t\"OWNER_MEMBER_SEQ\", \n" +
"\t\"GMT_MODIFIED\"\n" +
")\n" +
"AS\n" +
"SELECT id, GROUP_ID, IND_BY_ALL, IND_BY_GROUP, OWNER_MEMBER_ID\n" +
"\t, OWNER_MEMBER_SEQ, gmt_modified\n" +
"FROM DUAL",
SQLUtils.toSQLString(stmt, JdbcConstants.ORACLE));
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
stmt.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(0, visitor.getTables().size());
// assertEquals(0, visitor.getColumns().size());
// assertTrue(visitor.getColumns().contains(new TableStat.Column("customers_part", "customer_id")));
}
}
| OracleCreateViewTest0 |
java | quarkusio__quarkus | extensions/grpc/deployment/src/main/java/io/quarkus/grpc/deployment/GrpcCommonProcessor.java | {
"start": 273,
"end": 762
} | class ____ {
/**
* Index the gRPC stubs.
* This is used to allows application find the classes generated from the proto file included in the dependency at build
* time.
* <p>
* See <a href="https://github.com/quarkusio/quarkus/issues/37312">#37312</a>
*/
@BuildStep
void indexGrpcStub(BuildProducer<IndexDependencyBuildItem> index) {
index.produce(new IndexDependencyBuildItem("io.quarkus", "quarkus-grpc-stubs"));
}
}
| GrpcCommonProcessor |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java | {
"start": 6067,
"end": 6378
} | interface ____ extends Vector.Builder permits LongVectorBuilder, FixedBuilder {
/**
* Appends a long to the current entry.
*/
Builder appendLong(long value);
@Override
LongVector build();
}
/**
* A builder that never grows.
*/
sealed | Builder |
java | spring-projects__spring-boot | module/spring-boot-cassandra/src/main/java/org/springframework/boot/cassandra/autoconfigure/CassandraProperties.java | {
"start": 1214,
"end": 4738
} | class ____ {
/**
* Location of the configuration file to use.
*/
private @Nullable Resource config;
/**
* Keyspace name to use.
*/
private @Nullable String keyspaceName;
/**
* Name of the Cassandra session.
*/
private @Nullable String sessionName;
/**
* Cluster node addresses in the form 'host:port', or a simple 'host' to use the
* configured port.
*/
private @Nullable List<String> contactPoints;
/**
* Port to use if a contact point does not specify one.
*/
private int port = 9042;
/**
* Datacenter that is considered "local". Contact points should be from this
* datacenter.
*/
private @Nullable String localDatacenter;
/**
* Login user of the server.
*/
private @Nullable String username;
/**
* Login password of the server.
*/
private @Nullable String password;
/**
* Compression supported by the Cassandra binary protocol.
*/
private @Nullable Compression compression;
/**
* Schema action to take at startup.
*/
private String schemaAction = "none";
/**
* SSL configuration.
*/
private Ssl ssl = new Ssl();
/**
* Connection configuration.
*/
private final Connection connection = new Connection();
/**
* Pool configuration.
*/
private final Pool pool = new Pool();
/**
* Request configuration.
*/
private final Request request = new Request();
/**
* Control connection configuration.
*/
private final Controlconnection controlconnection = new Controlconnection();
public @Nullable Resource getConfig() {
return this.config;
}
public void setConfig(@Nullable Resource config) {
this.config = config;
}
public @Nullable String getKeyspaceName() {
return this.keyspaceName;
}
public void setKeyspaceName(@Nullable String keyspaceName) {
this.keyspaceName = keyspaceName;
}
public @Nullable String getSessionName() {
return this.sessionName;
}
public void setSessionName(@Nullable String sessionName) {
this.sessionName = sessionName;
}
public @Nullable List<String> getContactPoints() {
return this.contactPoints;
}
public void setContactPoints(@Nullable List<String> contactPoints) {
this.contactPoints = contactPoints;
}
public int getPort() {
return this.port;
}
public void setPort(int port) {
this.port = port;
}
public @Nullable String getLocalDatacenter() {
return this.localDatacenter;
}
public void setLocalDatacenter(@Nullable String localDatacenter) {
this.localDatacenter = localDatacenter;
}
public @Nullable String getUsername() {
return this.username;
}
public void setUsername(@Nullable String username) {
this.username = username;
}
public @Nullable String getPassword() {
return this.password;
}
public void setPassword(@Nullable String password) {
this.password = password;
}
public @Nullable Compression getCompression() {
return this.compression;
}
public void setCompression(@Nullable Compression compression) {
this.compression = compression;
}
public Ssl getSsl() {
return this.ssl;
}
public void setSsl(Ssl ssl) {
this.ssl = ssl;
}
public String getSchemaAction() {
return this.schemaAction;
}
public void setSchemaAction(String schemaAction) {
this.schemaAction = schemaAction;
}
public Connection getConnection() {
return this.connection;
}
public Pool getPool() {
return this.pool;
}
public Request getRequest() {
return this.request;
}
public Controlconnection getControlconnection() {
return this.controlconnection;
}
public static | CassandraProperties |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketTests.java | {
"start": 973,
"end": 2918
} | class ____ extends AbstractBucketMetricsTestCase<SumBucketPipelineAggregationBuilder> {
@Override
protected SumBucketPipelineAggregationBuilder doCreateTestAggregatorFactory(String name, String bucketsPath) {
return new SumBucketPipelineAggregationBuilder(name, bucketsPath);
}
public void testValidate() {
AggregationBuilder singleBucketAgg = new GlobalAggregationBuilder("global");
AggregationBuilder multiBucketAgg = new TermsAggregationBuilder("terms").userValueTypeHint(ValueType.STRING);
final Set<AggregationBuilder> aggBuilders = new HashSet<>();
aggBuilders.add(singleBucketAgg);
aggBuilders.add(multiBucketAgg);
// First try to point to a non-existent agg
assertThat(
validate(aggBuilders, new SumBucketPipelineAggregationBuilder("name", "invalid_agg>metric")),
equalTo(
"Validation Failed: 1: "
+ PipelineAggregator.Parser.BUCKETS_PATH.getPreferredName()
+ " aggregation does not exist for aggregation [name]: invalid_agg>metric;"
)
);
// Now try to point to a single bucket agg
assertThat(
validate(aggBuilders, new SumBucketPipelineAggregationBuilder("name", "global>metric")),
equalTo(
"Validation Failed: 1: Unable to find unqualified multi-bucket aggregation in "
+ PipelineAggregator.Parser.BUCKETS_PATH.getPreferredName()
+ ". Path must include a multi-bucket aggregation for aggregation [name] found :"
+ GlobalAggregationBuilder.class.getName()
+ " for buckets path: global>metric;"
)
);
// Now try to point to a valid multi-bucket agg
assertThat(validate(aggBuilders, new SumBucketPipelineAggregationBuilder("name", "terms>metric")), nullValue());
}
}
| SumBucketTests |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/error/ShouldBeEqual_newAssertionError_without_JUnit_and_OTA4J_Test.java | {
"start": 1723,
"end": 3648
} | class ____ {
private Description description;
private ShouldBeEqual factory;
private ConstructorInvoker constructorInvoker;
@BeforeEach
public void setUp() throws NoSuchFieldException, IllegalAccessException {
Failures.instance().setRemoveAssertJRelatedElementsFromStackTrace(false);
description = new TestDescription("Jedi");
factory = shouldBeEqual("Luke", "Yoda", STANDARD_REPRESENTATION);
constructorInvoker = mock(ConstructorInvoker.class);
writeField(factory, "constructorInvoker", constructorInvoker, true);
}
@Test
void should_create_AssertionError_if_created_AssertionFailedError_is_null() throws Exception {
// GIVEN
given(constructorInvoker.newInstance(anyString(), any(Class[].class), any(Object[].class))).willReturn(null);
// WHEN
AssertionError error = factory.toAssertionError(description, STANDARD_REPRESENTATION);
// THEN
check(error);
}
private void check(AssertionError error) throws Exception {
verify(constructorInvoker, times(2)).newInstance(AssertionFailedError.class.getName(),
array(String.class, Object.class, Object.class),
format("[Jedi] %n" +
"expected: \"Yoda\"%n" +
" but was: \"Luke\""),
STANDARD_REPRESENTATION.toStringOf("Yoda"),
STANDARD_REPRESENTATION.toStringOf("Luke"));
assertThat(error).isNotInstanceOf(AssertionFailedError.class)
.hasMessage(format("[Jedi] %n" +
"expected: \"Yoda\"%n" +
" but was: \"Luke\""));
}
}
| ShouldBeEqual_newAssertionError_without_JUnit_and_OTA4J_Test |
java | apache__camel | components/camel-stream/src/main/java/org/apache/camel/component/stream/StreamConsumer.java | {
"start": 1773,
"end": 18708
} | class ____ extends DefaultConsumer implements Runnable {
private static final Logger LOG = LoggerFactory.getLogger(StreamConsumer.class);
private static final String TYPES = "in,file,http";
private static final String INVALID_URI = "Invalid uri, valid form: 'stream:{" + TYPES + "}'";
private static final List<String> TYPES_LIST = Arrays.asList(TYPES.split(","));
private ExecutorService executor;
private FileWatcherStrategy fileWatcher;
private volatile boolean watchFileChanged;
private volatile InputStream inputStream = System.in;
private volatile InputStream inputStreamToClose;
private volatile URLConnection urlConnectionToClose;
private volatile File file;
private StreamEndpoint endpoint;
private String uri;
private volatile boolean initialPromptDone;
private final List<String> lines = new CopyOnWriteArrayList<>();
public StreamConsumer(StreamEndpoint endpoint, Processor processor, String uri) throws Exception {
super(endpoint, processor);
this.endpoint = endpoint;
this.uri = uri;
validateUri(uri);
}
@Override
protected void doStart() throws Exception {
super.doStart();
// use file watch service if we read from file
if (endpoint.isFileWatcher()) {
String dir = new File(endpoint.getFileName()).getParent();
fileWatcher = new FileWatcherStrategy(dir, file -> {
String onlyName = file.getName();
String target = FileUtil.stripPath(endpoint.getFileName());
LOG.trace("File changed: {}", onlyName);
if (onlyName.equals(target)) {
// file is changed
watchFileChanged = true;
}
});
fileWatcher.setCamelContext(getEndpoint().getCamelContext());
}
ServiceHelper.startService(fileWatcher);
// if we scan the stream we are lenient and can wait for the stream to be available later
if (!endpoint.isScanStream()) {
initializeStreamLineMode();
}
executor = endpoint.getCamelContext().getExecutorServiceManager().newSingleThreadExecutor(this,
endpoint.getEndpointUri());
executor.execute(this);
if (endpoint.getGroupLines() < 0) {
throw new IllegalArgumentException(
"Option groupLines must be 0 or positive number, was " + endpoint.getGroupLines());
}
}
@Override
public void doStop() throws Exception {
if (executor != null) {
endpoint.getCamelContext().getExecutorServiceManager().shutdownNow(executor);
executor = null;
}
ServiceHelper.stopAndShutdownService(fileWatcher);
lines.clear();
IOHelper.close(inputStreamToClose);
if (urlConnectionToClose != null) {
closeURLConnection(urlConnectionToClose);
urlConnectionToClose = null;
}
super.doStop();
}
@Override
public void run() {
try {
if (endpoint.isReadLine()) {
readFromStreamLineMode();
} else {
readFromStreamRawMode();
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (Exception e) {
getExceptionHandler().handleException(e);
}
}
private BufferedReader initializeStreamLineMode() throws Exception {
// close old stream, before obtaining a new stream
IOHelper.close(inputStreamToClose);
if (urlConnectionToClose != null) {
closeURLConnection(urlConnectionToClose);
}
if ("in".equals(uri)) {
inputStream = System.in;
inputStreamToClose = null;
} else if ("file".equals(uri)) {
inputStream = resolveStreamFromFile();
inputStreamToClose = inputStream;
} else if ("http".equals(uri)) {
inputStream = resolveStreamFromUrl();
inputStreamToClose = inputStream;
}
if (inputStream != null) {
if ("http".equals(uri)) {
// read as-is
return IOHelper.buffered(new InputStreamReader(inputStream));
} else {
Charset charset = endpoint.getCharset();
return IOHelper.buffered(new InputStreamReader(inputStream, charset));
}
} else {
return null;
}
}
private InputStream initializeStreamRawMode() throws Exception {
// close old stream, before obtaining a new stream
IOHelper.close(inputStreamToClose);
if (urlConnectionToClose != null) {
closeURLConnection(urlConnectionToClose);
}
if ("in".equals(uri)) {
inputStream = System.in;
// do not close regular inputStream as it may be System.in etc.
inputStreamToClose = null;
} else if ("file".equals(uri)) {
inputStream = resolveStreamFromFile();
inputStreamToClose = inputStream;
} else if ("http".equals(uri)) {
inputStream = resolveStreamFromUrl();
inputStreamToClose = inputStream;
}
return inputStream;
}
private void readFromStreamRawMode() throws Exception {
long index = 0;
InputStream is = initializeStreamRawMode();
if (endpoint.isScanStream()) {
// repeat scanning from stream
while (isRunAllowed()) {
byte[] data = null;
try {
data = is.readAllBytes();
} catch (IOException e) {
// ignore
}
boolean eos = data == null || data.length == 0;
if (isRunAllowed() && endpoint.isRetry()) {
boolean reOpen = true;
if (endpoint.isFileWatcher()) {
reOpen = watchFileChanged;
}
if (reOpen) {
LOG.debug("File: {} changed/rollover, re-reading file from beginning", file);
is = initializeStreamRawMode();
// we have re-initialized the stream so lower changed flag
if (endpoint.isFileWatcher()) {
watchFileChanged = false;
}
} else {
LOG.trace("File: {} not changed since last read", file);
}
}
// sleep only if there is no input
if (eos) {
try {
Thread.sleep(endpoint.getScanStreamDelay());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
}
}
}
} else {
// regular read stream once until end of stream
boolean eos = false;
byte[] data = null;
while (!eos && isRunAllowed()) {
if (endpoint.getPromptMessage() != null) {
doPromptMessage();
}
try {
data = is.readAllBytes();
} catch (IOException e) {
// ignore
}
eos = data == null || data.length == 0;
if (!eos) {
processRaw(data, index);
}
}
}
}
private void readFromStreamLineMode() throws Exception {
long index = 0;
String line;
BufferedReader br = initializeStreamLineMode();
if (endpoint.isScanStream()) {
// repeat scanning from stream
while (isRunAllowed()) {
if (br != null) {
line = br.readLine();
LOG.trace("Read line: {}", line);
} else {
line = null;
}
boolean eos = line == null;
if (!eos && isRunAllowed()) {
index = processLine(line, false, index);
} else if (eos && isRunAllowed() && endpoint.isRetry()) {
boolean reOpen = true;
if (endpoint.isFileWatcher()) {
reOpen = watchFileChanged;
}
if (reOpen) {
LOG.debug("File: {} changed/rollover, re-reading file from beginning", file);
br = initializeStreamLineMode();
// we have re-initialized the stream so lower changed flag
if (endpoint.isFileWatcher()) {
watchFileChanged = false;
}
} else {
LOG.trace("File: {} not changed since last read", file);
}
}
// sleep only if there is no input
if (eos) {
try {
Thread.sleep(endpoint.getScanStreamDelay());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
}
}
}
} else {
// regular read stream once until end of stream
boolean eos = false;
String line2 = null;
while (!eos && isRunAllowed()) {
if (endpoint.getPromptMessage() != null) {
doPromptMessage();
}
if (line2 == null) {
line = br.readLine();
} else {
line = line2;
}
LOG.trace("Read line: {}", line);
eos = line == null;
if (!eos && isRunAllowed()) {
// read ahead if there is more data
line2 = readAhead(br);
boolean last = line2 == null;
index = processLine(line, last, index);
}
}
// EOL so trigger any
processLine(null, true, index);
}
// important: do not close the reader as it will close the standard system.in etc.
}
/**
* Strategy method for processing the line
*/
protected long processLine(String line, boolean last, long index) throws Exception {
lock.lock();
try {
if (endpoint.getGroupLines() > 0) {
// remember line
if (line != null) {
lines.add(line);
}
// should we flush lines?
if (!lines.isEmpty() && (lines.size() >= endpoint.getGroupLines() || last)) {
// spit out lines as we hit the size, or it was the last
List<String> copy = new ArrayList<>(lines);
Object body = endpoint.getGroupStrategy().groupLines(copy);
// remember to inc index when we create an exchange
Exchange exchange = createExchange(body, index++, last);
// clear lines
lines.clear();
getProcessor().process(exchange);
}
} else if (line != null) {
// single line
// remember to inc index when we create an exchange
Exchange exchange = createExchange(line, index++, last);
getProcessor().process(exchange);
}
return index;
} finally {
lock.unlock();
}
}
/**
* Strategy method for processing the data
*/
protected long processRaw(byte[] body, long index) throws Exception {
lock.lock();
try {
Exchange exchange = createExchange(body, index++, true);
getProcessor().process(exchange);
return index;
} finally {
lock.unlock();
}
}
/**
* Strategy method for prompting the prompt message
*/
protected void doPromptMessage() {
long delay = 0;
if (!initialPromptDone && endpoint.getInitialPromptDelay() > 0) {
initialPromptDone = true;
delay = endpoint.getInitialPromptDelay();
} else if (endpoint.getPromptDelay() > 0) {
delay = endpoint.getPromptDelay();
}
if (delay > 0) {
try {
Thread.sleep(delay);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
if (inputStream == System.in) {
System.out.print(endpoint.getPromptMessage());
}
}
private String readAhead(BufferedReader br) throws IOException {
if (uri.equals("in")) {
// do not read ahead with reading from system in
return null;
} else {
return br.readLine();
}
}
private InputStream resolveStreamFromFile() throws IOException {
String fileName = endpoint.getFileName();
StringHelper.notEmpty(fileName, "fileName");
FileInputStream fileStream;
file = new File(fileName);
if (LOG.isDebugEnabled()) {
LOG.debug("File to be scanned: {}, path: {}", file.getName(), file.getAbsolutePath());
}
if (file.canRead()) {
fileStream = new FileInputStream(file);
} else if (endpoint.isScanStream()) {
// if we scan the stream then it may not be available and we should return null
fileStream = null;
} else {
throw new IllegalArgumentException(INVALID_URI);
}
return fileStream;
}
/**
* From a comma-separated list of headers in the format of "FIELD=VALUE" or "FIELD:VALUE", split on the commas and
* split on the separator to create a stream of Map.Entry values while filtering out invalid combinations
*
* @param headerList A string containing a comma-separated list of headers
* @return A Stream of Map.Entry items which can then be added as headers to a URLConnection
*/
Stream<Map.Entry<String, String>> parseHeaders(String headerList) {
return Arrays.asList(headerList.split(","))
.stream()
.map(s -> s.split("[=:]"))
.filter(h -> h.length == 2)
.map(h -> Map.entry(h[0].trim(), h[1].trim()));
}
private InputStream resolveStreamFromUrl() throws IOException {
String url = endpoint.getHttpUrl();
StringHelper.notEmpty(url, "httpUrl");
urlConnectionToClose = URI.create(url).toURL().openConnection();
urlConnectionToClose.setUseCaches(false);
String headers = endpoint.getHttpHeaders();
if (headers != null) {
parseHeaders(headers)
.forEach(e -> urlConnectionToClose.setRequestProperty(e.getKey(), e.getValue()));
}
InputStream is;
try {
is = urlConnectionToClose.getInputStream();
} catch (IOException e) {
// close the http connection to avoid
// leaking gaps in case of an exception
if (urlConnectionToClose instanceof HttpURLConnection) {
((HttpURLConnection) urlConnectionToClose).disconnect();
}
throw e;
}
return is;
}
private void validateUri(String uri) throws IllegalArgumentException {
String[] s = uri.split(":");
if (s.length < 2) {
throw new IllegalArgumentException(INVALID_URI);
}
String[] t = s[1].split("\\?");
if (t.length < 1) {
throw new IllegalArgumentException(INVALID_URI);
}
this.uri = t[0].trim();
if (this.uri.startsWith("//")) {
this.uri = this.uri.substring(2);
}
if (!TYPES_LIST.contains(this.uri)) {
throw new IllegalArgumentException(INVALID_URI);
}
}
protected Exchange createExchange(Object body, long index, boolean last) {
Exchange exchange = createExchange(true);
exchange.getIn().setBody(body);
exchange.getIn().setHeader(StreamConstants.STREAM_INDEX, index);
exchange.getIn().setHeader(StreamConstants.STREAM_COMPLETE, last);
return exchange;
}
private static void closeURLConnection(URLConnection con) {
if (con instanceof HttpURLConnection) {
try {
((HttpURLConnection) con).disconnect();
} catch (Exception e) {
// ignore
}
}
}
}
| StreamConsumer |
java | quarkusio__quarkus | test-framework/common/src/test/java/io/quarkus/test/common/TestResourceManagerTest.java | {
"start": 576,
"end": 2254
} | class ____ {
private static final String OVERRIDEN_KEY = "overridenKey";
public static boolean parallelTestResourceRunned = false;
@ParameterizedTest
@ValueSource(classes = { MyTest.class, MyTest2.class })
void testRepeatableAnnotationsAreIndexed(Class<?> clazz) {
AtomicInteger counter = new AtomicInteger();
TestResourceManager manager = new TestResourceManager(clazz);
manager.inject(counter);
assertThat(counter.intValue()).isEqualTo(2);
}
@ParameterizedTest
@ValueSource(classes = { SequentialTestResourcesTest.class, SequentialTestResourcesTest2.class })
void testSequentialResourcesRunSequentially(Class<?> clazz) {
TestResourceManager manager = new TestResourceManager(clazz);
Map<String, String> props = manager.start();
Assertions.assertEquals("value1", props.get("key1"));
Assertions.assertEquals("value2", props.get("key2"));
Assertions.assertEquals("value2", props.get(OVERRIDEN_KEY));
}
@ParameterizedTest
@ValueSource(classes = { ParallelTestResourcesTest.class, ParallelTestResourcesTest2.class })
void testParallelResourcesRunInParallel(Class<?> clazz) {
TestResourceManager manager = new TestResourceManager(clazz);
Map<String, String> props = manager.start();
Assertions.assertEquals("value1", props.get("key1"));
Assertions.assertEquals("value2", props.get("key2"));
}
@WithTestResource(value = FirstLifecycleManager.class, scope = TestResourceScope.GLOBAL)
@WithTestResource(value = SecondLifecycleManager.class, scope = TestResourceScope.GLOBAL)
public static | TestResourceManagerTest |
java | apache__camel | components/camel-zookeeper/src/generated/java/org/apache/camel/component/zookeeper/ZooKeeperEndpointConfigurer.java | {
"start": 736,
"end": 5190
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
ZooKeeperEndpoint target = (ZooKeeperEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "backoff": target.getConfiguration().setBackoff(property(camelContext, long.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "create": target.getConfiguration().setCreate(property(camelContext, boolean.class, value)); return true;
case "createmode":
case "createMode": target.getConfiguration().setCreateMode(property(camelContext, java.lang.String.class, value)); return true;
case "exceptionhandler":
case "exceptionHandler": target.setExceptionHandler(property(camelContext, org.apache.camel.spi.ExceptionHandler.class, value)); return true;
case "exchangepattern":
case "exchangePattern": target.setExchangePattern(property(camelContext, org.apache.camel.ExchangePattern.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "listchildren":
case "listChildren": target.getConfiguration().setListChildren(property(camelContext, boolean.class, value)); return true;
case "repeat": target.getConfiguration().setRepeat(property(camelContext, boolean.class, value)); return true;
case "sendemptymessageondelete":
case "sendEmptyMessageOnDelete": target.getConfiguration().setSendEmptyMessageOnDelete(property(camelContext, boolean.class, value)); return true;
case "timeout": target.getConfiguration().setTimeout(property(camelContext, int.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "backoff": return long.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "create": return boolean.class;
case "createmode":
case "createMode": return java.lang.String.class;
case "exceptionhandler":
case "exceptionHandler": return org.apache.camel.spi.ExceptionHandler.class;
case "exchangepattern":
case "exchangePattern": return org.apache.camel.ExchangePattern.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "listchildren":
case "listChildren": return boolean.class;
case "repeat": return boolean.class;
case "sendemptymessageondelete":
case "sendEmptyMessageOnDelete": return boolean.class;
case "timeout": return int.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
ZooKeeperEndpoint target = (ZooKeeperEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "backoff": return target.getConfiguration().getBackoff();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "create": return target.getConfiguration().isCreate();
case "createmode":
case "createMode": return target.getConfiguration().getCreateMode();
case "exceptionhandler":
case "exceptionHandler": return target.getExceptionHandler();
case "exchangepattern":
case "exchangePattern": return target.getExchangePattern();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "listchildren":
case "listChildren": return target.getConfiguration().isListChildren();
case "repeat": return target.getConfiguration().isRepeat();
case "sendemptymessageondelete":
case "sendEmptyMessageOnDelete": return target.getConfiguration().isSendEmptyMessageOnDelete();
case "timeout": return target.getConfiguration().getTimeout();
default: return null;
}
}
}
| ZooKeeperEndpointConfigurer |
java | spring-projects__spring-boot | module/spring-boot-webmvc/src/test/java/org/springframework/boot/webmvc/autoconfigure/DispatcherServletRegistrationBeanTests.java | {
"start": 1113,
"end": 2496
} | class ____ {
@Test
@SuppressWarnings("NullAway") // Test null check
void createWhenPathIsNullThrowsException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new DispatcherServletRegistrationBean(new DispatcherServlet(), null))
.withMessageContaining("'path' must not be null");
}
@Test
void getPathReturnsPath() {
DispatcherServletRegistrationBean bean = new DispatcherServletRegistrationBean(new DispatcherServlet(),
"/test");
assertThat(bean.getPath()).isEqualTo("/test");
}
@Test
void getUrlMappingsReturnsSinglePathMappedPattern() {
DispatcherServletRegistrationBean bean = new DispatcherServletRegistrationBean(new DispatcherServlet(),
"/test");
assertThat(bean.getUrlMappings()).containsOnly("/test/*");
}
@Test
void setUrlMappingsCannotBeCalled() {
DispatcherServletRegistrationBean bean = new DispatcherServletRegistrationBean(new DispatcherServlet(),
"/test");
assertThatExceptionOfType(UnsupportedOperationException.class)
.isThrownBy(() -> bean.setUrlMappings(Collections.emptyList()));
}
@Test
void addUrlMappingsCannotBeCalled() {
DispatcherServletRegistrationBean bean = new DispatcherServletRegistrationBean(new DispatcherServlet(),
"/test");
assertThatExceptionOfType(UnsupportedOperationException.class).isThrownBy(() -> bean.addUrlMappings("/test"));
}
}
| DispatcherServletRegistrationBeanTests |
java | netty__netty | codec-native-quic/src/test/java/io/netty/handler/codec/quic/SipHashTest.java | {
"start": 1095,
"end": 5521
} | class ____ {
private static final byte[] REFERENCE_SEED = new byte[] { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F };
private static final int[][] REFERENCE_OUTPUT = {
{ 0x31, 0x0e, 0x0e, 0xdd, 0x47, 0xdb, 0x6f, 0x72, },
{ 0xfd, 0x67, 0xdc, 0x93, 0xc5, 0x39, 0xf8, 0x74, },
{ 0x5a, 0x4f, 0xa9, 0xd9, 0x09, 0x80, 0x6c, 0x0d, },
{ 0x2d, 0x7e, 0xfb, 0xd7, 0x96, 0x66, 0x67, 0x85, },
{ 0xb7, 0x87, 0x71, 0x27, 0xe0, 0x94, 0x27, 0xcf, },
{ 0x8d, 0xa6, 0x99, 0xcd, 0x64, 0x55, 0x76, 0x18, },
{ 0xce, 0xe3, 0xfe, 0x58, 0x6e, 0x46, 0xc9, 0xcb, },
{ 0x37, 0xd1, 0x01, 0x8b, 0xf5, 0x00, 0x02, 0xab, },
{ 0x62, 0x24, 0x93, 0x9a, 0x79, 0xf5, 0xf5, 0x93, },
{ 0xb0, 0xe4, 0xa9, 0x0b, 0xdf, 0x82, 0x00, 0x9e, },
{ 0xf3, 0xb9, 0xdd, 0x94, 0xc5, 0xbb, 0x5d, 0x7a, },
{ 0xa7, 0xad, 0x6b, 0x22, 0x46, 0x2f, 0xb3, 0xf4, },
{ 0xfb, 0xe5, 0x0e, 0x86, 0xbc, 0x8f, 0x1e, 0x75, },
{ 0x90, 0x3d, 0x84, 0xc0, 0x27, 0x56, 0xea, 0x14, },
{ 0xee, 0xf2, 0x7a, 0x8e, 0x90, 0xca, 0x23, 0xf7, },
{ 0xe5, 0x45, 0xbe, 0x49, 0x61, 0xca, 0x29, 0xa1, },
{ 0xdb, 0x9b, 0xc2, 0x57, 0x7f, 0xcc, 0x2a, 0x3f, },
{ 0x94, 0x47, 0xbe, 0x2c, 0xf5, 0xe9, 0x9a, 0x69, },
{ 0x9c, 0xd3, 0x8d, 0x96, 0xf0, 0xb3, 0xc1, 0x4b, },
{ 0xbd, 0x61, 0x79, 0xa7, 0x1d, 0xc9, 0x6d, 0xbb, },
{ 0x98, 0xee, 0xa2, 0x1a, 0xf2, 0x5c, 0xd6, 0xbe, },
{ 0xc7, 0x67, 0x3b, 0x2e, 0xb0, 0xcb, 0xf2, 0xd0, },
{ 0x88, 0x3e, 0xa3, 0xe3, 0x95, 0x67, 0x53, 0x93, },
{ 0xc8, 0xce, 0x5c, 0xcd, 0x8c, 0x03, 0x0c, 0xa8, },
{ 0x94, 0xaf, 0x49, 0xf6, 0xc6, 0x50, 0xad, 0xb8, },
{ 0xea, 0xb8, 0x85, 0x8a, 0xde, 0x92, 0xe1, 0xbc, },
{ 0xf3, 0x15, 0xbb, 0x5b, 0xb8, 0x35, 0xd8, 0x17, },
{ 0xad, 0xcf, 0x6b, 0x07, 0x63, 0x61, 0x2e, 0x2f, },
{ 0xa5, 0xc9, 0x1d, 0xa7, 0xac, 0xaa, 0x4d, 0xde, },
{ 0x71, 0x65, 0x95, 0x87, 0x66, 0x50, 0xa2, 0xa6, },
{ 0x28, 0xef, 0x49, 0x5c, 0x53, 0xa3, 0x87, 0xad, },
{ 0x42, 0xc3, 0x41, 0xd8, 0xfa, 0x92, 0xd8, 0x32, },
{ 0xce, 0x7c, 0xf2, 0x72, 0x2f, 0x51, 0x27, 0x71, },
{ 0xe3, 0x78, 0x59, 0xf9, 0x46, 0x23, 0xf3, 0xa7, },
{ 0x38, 0x12, 0x05, 0xbb, 0x1a, 0xb0, 0xe0, 0x12, },
{ 0xae, 0x97, 0xa1, 0x0f, 0xd4, 0x34, 0xe0, 0x15, },
{ 0xb4, 0xa3, 0x15, 0x08, 0xbe, 0xff, 0x4d, 0x31, },
{ 0x81, 0x39, 0x62, 0x29, 0xf0, 0x90, 0x79, 0x02, },
{ 0x4d, 0x0c, 0xf4, 0x9e, 0xe5, 0xd4, 0xdc, 0xca, },
{ 0x5c, 0x73, 0x33, 0x6a, 0x76, 0xd8, 0xbf, 0x9a, },
{ 0xd0, 0xa7, 0x04, 0x53, 0x6b, 0xa9, 0x3e, 0x0e, },
{ 0x92, 0x59, 0x58, 0xfc, 0xd6, 0x42, 0x0c, 0xad, },
{ 0xa9, 0x15, 0xc2, 0x9b, 0xc8, 0x06, 0x73, 0x18, },
{ 0x95, 0x2b, 0x79, 0xf3, 0xbc, 0x0a, 0xa6, 0xd4, },
{ 0xf2, 0x1d, 0xf2, 0xe4, 0x1d, 0x45, 0x35, 0xf9, },
{ 0x87, 0x57, 0x75, 0x19, 0x04, 0x8f, 0x53, 0xa9, },
{ 0x10, 0xa5, 0x6c, 0xf5, 0xdf, 0xcd, 0x9a, 0xdb, },
{ 0xeb, 0x75, 0x09, 0x5c, 0xcd, 0x98, 0x6c, 0xd0, },
{ 0x51, 0xa9, 0xcb, 0x9e, 0xcb, 0xa3, 0x12, 0xe6, },
{ 0x96, 0xaf, 0xad, 0xfc, 0x2c, 0xe6, 0x66, 0xc7, },
{ 0x72, 0xfe, 0x52, 0x97, 0x5a, 0x43, 0x64, 0xee, },
{ 0x5a, 0x16, 0x45, 0xb2, 0x76, 0xd5, 0x92, 0xa1, },
{ 0xb2, 0x74, 0xcb, 0x8e, 0xbf, 0x87, 0x87, 0x0a, },
{ 0x6f, 0x9b, 0xb4, 0x20, 0x3d, 0xe7, 0xb3, 0x81, },
{ 0xea, 0xec, 0xb2, 0xa3, 0x0b, 0x22, 0xa8, 0x7f, },
{ 0x99, 0x24, 0xa4, 0x3c, 0xc1, 0x31, 0x57, 0x24, },
{ 0xbd, 0x83, 0x8d, 0x3a, 0xaf, 0xbf, 0x8d, 0xb7, },
{ 0x0b, 0x1a, 0x2a, 0x32, 0x65, 0xd5, 0x1a, 0xea, },
{ 0x13, 0x50, 0x79, 0xa3, 0x23, 0x1c, 0xe6, 0x60, },
{ 0x93, 0x2b, 0x28, 0x46, 0xe4, 0xd7, 0x06, 0x66, },
{ 0xe1, 0x91, 0x5f, 0x5c, 0xb1, 0xec, 0xa4, 0x6c, },
{ 0xf3, 0x25, 0x96, 0x5c, 0xa1, 0x6d, 0x62, 0x9f, },
{ 0x57, 0x5f, 0xf2, 0x8e, 0x60, 0x38, 0x1b, 0xe5, },
{ 0x72, 0x45, 0x06, 0xeb, 0x4c, 0x32, 0x8a, 0x95, }
};
static | SipHashTest |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/Assertions_assertThat_with_primitive_byte_Test.java | {
"start": 866,
"end": 1081
} | class ____ {
@Test
void should_create_Assert() {
AbstractByteAssert<?> assertions = Assertions.assertThat((byte) 0);
assertThat(assertions).isNotNull();
}
}
| Assertions_assertThat_with_primitive_byte_Test |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/observability/SignalListenerFactory.java | {
"start": 1479,
"end": 2921
} | interface ____<T, STATE> {
/**
* Create the {@code STATE} object for the given {@link Publisher}. This assumes this factory will only be used on
* that particular source, allowing all {@link SignalListener} created by this factory to inherit the common state.
*
* @param source the source {@link Publisher} this factory is attached to.
* @return the common state
*/
STATE initializePublisherState(Publisher<? extends T> source);
/**
* Create a new {@link SignalListener} each time a new {@link org.reactivestreams.Subscriber} subscribes to the
* {@code source} {@link Publisher}.
* <p>
* The {@code source} {@link Publisher} is the same as the one that triggered common state creation at assembly time in
* {@link #initializePublisherState(Publisher)}). Said common state is passed to this method as well, and so is the
* {@link ContextView} for the newly registered {@link reactor.core.CoreSubscriber}.
*
* @param source the source {@link Publisher} that is being subscribed to
* @param listenerContext the {@link ContextView} associated with the new subscriber
* @param publisherContext the common state initialized in {@link #initializePublisherState(Publisher)}
* @return a stateful {@link SignalListener} observing signals to and from the new subscriber
*/
SignalListener<T> createListener(Publisher<? extends T> source, ContextView listenerContext, STATE publisherContext);
}
| SignalListenerFactory |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/config/ConfigDataPropertiesRuntimeHintsTests.java | {
"start": 1065,
"end": 1700
} | class ____ {
@Test
void shouldRegisterHints() {
RuntimeHints hints = new RuntimeHints();
new ConfigDataPropertiesRuntimeHints().registerHints(hints, getClass().getClassLoader());
assertThat(RuntimeHintsPredicates.reflection().onType(ConfigDataProperties.class)).accepts(hints);
assertThat(RuntimeHintsPredicates.reflection().onType(ConfigDataLocation.class)).accepts(hints);
assertThat(RuntimeHintsPredicates.reflection().onType(Activate.class)).accepts(hints);
assertThat(RuntimeHintsPredicates.reflection().onMethodInvocation(ConfigDataLocation.class, "of"))
.accepts(hints);
}
}
| ConfigDataPropertiesRuntimeHintsTests |
java | mockito__mockito | mockito-core/src/test/java/org/mockito/internal/matchers/apachecommons/EqualsBuilderTest.java | {
"start": 2395,
"end": 2566
} | class ____ extends TestObject {
public TestEmptySubObject(int a) {
super(a);
}
}
@SuppressWarnings("unused")
static | TestEmptySubObject |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy/deployment/src/test/java/io/quarkus/resteasy/test/security/inheritance/classpermitall/ClassPermitAllParentResourceWithoutPath_PathOnBase_SecurityOnParent.java | {
"start": 808,
"end": 2196
} | class ____
implements ClassPermitAllInterfaceWithoutPath_SecurityOnParent {
@Path(CLASS_PATH_ON_RESOURCE + IMPL_ON_PARENT + IMPL_METHOD_WITH_PATH + CLASS_PERMIT_ALL_PATH)
@POST
public String test_ClassPathOnResource_ImplOnParent_ImplMethodWithPath_ClassPermitAll(JsonObject array) {
return CLASS_PATH_ON_RESOURCE + IMPL_ON_PARENT + IMPL_METHOD_WITH_PATH + CLASS_PERMIT_ALL_PATH;
}
@Path(CLASS_PATH_ON_RESOURCE + IMPL_ON_PARENT + IMPL_METHOD_WITH_PATH + CLASS_PERMIT_ALL_METHOD_PERMIT_ALL_PATH)
@POST
public String test_ClassPathOnResource_ImplOnParent_ImplMethodWithPath_ClassPermitAllMethodPermitAll(JsonObject array) {
return CLASS_PATH_ON_RESOURCE + IMPL_ON_PARENT + IMPL_METHOD_WITH_PATH + CLASS_PERMIT_ALL_METHOD_PERMIT_ALL_PATH;
}
@Override
public String classPathOnResource_ImplOnParent_InterfaceMethodWithPath_ClassPermitAll(JsonObject array) {
return CLASS_PATH_ON_RESOURCE + IMPL_ON_PARENT + INTERFACE_METHOD_WITH_PATH + CLASS_PERMIT_ALL_PATH;
}
@PermitAll
@Override
public String classPathOnResource_ImplOnParent_InterfaceMethodWithPath_ClassPermitAllMethodPermitAll(JsonObject array) {
return CLASS_PATH_ON_RESOURCE + IMPL_ON_PARENT + INTERFACE_METHOD_WITH_PATH + CLASS_PERMIT_ALL_METHOD_PERMIT_ALL_PATH;
}
}
| ClassPermitAllParentResourceWithoutPath_PathOnBase_SecurityOnParent |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/creators/ConstructorDetectorTest.java | {
"start": 1267,
"end": 1475
} | class ____ {
protected short v;
SingleArgShort() { v = -1; }
public SingleArgShort(@ImplicitName("value") short value) {
v = value;
}
}
static | SingleArgShort |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java | {
"start": 2769,
"end": 5023
} | interface ____ JAXB
// can't handle and throws IllegalAnnotationExceptions
private static final Class[] IGNORE_TYPES = {TimelineEntity.class,
TimelineEntities.class};
private static final Set<Class> IGNORE_SET =
new HashSet<>(Arrays.asList(IGNORE_TYPES));
private static JAXBException je =
new JAXBException("TimelineEntity and TimelineEntities has " +
"IllegalAnnotation");
private static StackTraceElement[] stackTrace = new StackTraceElement[]{
new StackTraceElement(ContextFactory.class.getName(),
"createContext", "ContextFactory.java", -1)};
private ContextFactory() {
}
public static JAXBContext newContext(Class[] classes,
Map<String, Object> properties) throws Exception {
Class spFactory = Class.forName(
"com.sun.xml.bind.v2.ContextFactory");
Method m = spFactory.getMethod("createContext", Class[].class, Map.class);
return (JAXBContext) m.invoke(null, classes, properties);
}
// Called from WebComponent.service
public static JAXBContext createContext(Class[] classes,
Map<String, Object> properties) throws Exception {
for (Class c : classes) {
if (IGNORE_SET.contains(c)) {
je.setStackTrace(stackTrace);
throw je;
}
if (!CLASS_SET.contains(c)) {
try {
return newContext(classes, properties);
} catch (Exception e) {
LOG.warn("Error while Creating JAXBContext", e);
throw e;
}
}
}
try {
synchronized (ContextFactory.class) {
if (cacheContext == null) {
cacheContext = newContext(CTYPES, properties);
}
}
} catch(Exception e) {
LOG.warn("Error while Creating JAXBContext", e);
throw e;
}
return cacheContext;
}
// Called from WebComponent.init
public static JAXBContext createContext(String contextPath, ClassLoader
classLoader, Map<String, Object> properties) throws Exception {
Class spFactory = Class.forName(
"com.sun.xml.bind.v2.ContextFactory");
Method m = spFactory.getMethod("createContext", String.class,
ClassLoader.class, Map.class);
return (JAXBContext) m.invoke(null, contextPath, classLoader,
properties);
}
}
| which |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/internal/model/HelperMethod.java | {
"start": 1309,
"end": 6674
} | class ____ of build in method name
*/
@Override
public String getName() {
return Strings.decapitalize( this.getClass().getSimpleName() );
}
/**
* Returns the types used by this method for which import statements need to be generated. Defaults to the empty
* set. To be overridden by implementations in case they make use of additional types (note that the parameter and
* return type don't need to be added).
*
* @return the types used by this method for which import statements need to be generated
*/
public Set<Type> getImportTypes() {
return Collections.emptySet();
}
/**
* {@inheritDoc}
* <p>
* Default the targetType should be assignable to the returnType and the sourceType to the parameter,
* excluding generic type variables. When the implementor sees a need for this, this method can be overridden.
*/
@Override
public boolean matches(List<Type> sourceTypes, Type targetType) {
throw new IllegalStateException( "Irrelevant." );
}
@Override
public List<Parameter> getSourceParameters() {
return getParameters();
}
@Override
public List<Parameter> getContextParameters() {
return Collections.emptyList();
}
@Override
public ParameterProvidedMethods getContextProvidedMethods() {
return ParameterProvidedMethods.empty();
}
/**
* {@inheritDoc}
* <p>
* For built-in methods, the declaring mapper is always {@code null} as they will be added as private methods to the
* generated mapper.
*
* @return {@code null}
*/
@Override
public final Type getDeclaringMapper() {
return null;
}
@Override
public List<Parameter> getParameters() {
return Arrays.asList( getParameter() );
}
/**
* mapping target parameter mechanism not supported for built-in methods
*
* @return {@code null}
*/
@Override
public Parameter getMappingTargetParameter() {
return null;
}
/**
* target type parameter mechanism not supported for built-in methods
*
* @return {@code null}
*/
@Override
public Parameter getTargetTypeParameter() {
return null;
}
/**
* object factory mechanism not supported for built-in methods
*
* @return false
*/
@Override
public boolean isObjectFactory() {
return false;
}
@Override
public List<Type> getTypeParameters() {
return Collections.emptyList();
}
/**
* the conversion context is used to format an auxiliary parameter in the method call with context specific
* information such as a date format.
*
* @param conversionContext context
* @return null if no context parameter should be included "null" if there should be an explicit null call
* "'dateFormat'" for instance, to indicate how the build-in method should format the date
*/
public String getContextParameter(ConversionContext conversionContext) {
return null;
}
/**
* hashCode based on class
*
* @return hashCode
*/
@Override
public int hashCode() {
return this.getClass().hashCode();
}
/**
* equals based on class
*
* @param obj other class
*
* @return true when classes are the same
*/
@Override
public boolean equals(Object obj) {
if ( obj == null ) {
return false;
}
return ( getClass() == obj.getClass() );
}
/**
* Analyzes the Java Generic type variables in the parameter do match the type variables in the build in method same
* goes for the returnType.
*
* @param parameter source
* @param returnType target
* @return {@code true}, iff the type variables match
*/
public boolean doTypeVarsMatch(Type parameter, Type returnType) {
return true;
}
/**
* There's currently only one parameter foreseen instead of a list of parameter
*
* @return the parameter
*/
public abstract Parameter getParameter();
@Override
public Accessibility getAccessibility() {
return Accessibility.PRIVATE;
}
@Override
public List<Type> getThrownTypes() {
return Collections.emptyList();
}
@Override
public Type getResultType() {
return getReturnType();
}
@Override
public List<String> getParameterNames() {
List<String> parameterNames = new ArrayList<>( getParameters().size() );
for ( Parameter parameter : getParameters() ) {
parameterNames.add( parameter.getName() );
}
return parameterNames;
}
@Override
public boolean overridesMethod() {
return false;
}
@Override
public ExecutableElement getExecutable() {
return null;
}
@Override
public boolean isStatic() {
return false;
}
@Override
public boolean isDefault() {
return false;
}
@Override
public Type getDefiningType() {
return null;
}
@Override
public boolean isLifecycleCallbackMethod() {
return false;
}
@Override
public boolean isUpdateMethod() {
return false; // irrelevant
}
}
| name |
java | apache__camel | components/camel-jcr/src/main/java/org/apache/camel/component/jcr/JcrComponent.java | {
"start": 1155,
"end": 1586
} | class ____ extends DefaultComponent {
public JcrComponent() {
}
public JcrComponent(CamelContext context) {
super(context);
}
@Override
protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> properties) throws Exception {
JcrEndpoint endpoint = new JcrEndpoint(uri, this);
setProperties(endpoint, properties);
return endpoint;
}
}
| JcrComponent |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/PhaseExecutionInfo.java | {
"start": 868,
"end": 1001
} | class ____ information about the current phase being executed by Index
* Lifecycle Management on the specific index.
*/
public | contains |
java | elastic__elasticsearch | benchmarks/src/test/java/org/elasticsearch/benchmark/vector/scorer/VectorScorerJDKInt7uBenchmarkTests.java | {
"start": 784,
"end": 2611
} | class ____ extends ESTestCase {
final double delta = 1e-3;
final int size;
public VectorScorerJDKInt7uBenchmarkTests(int size) {
this.size = size;
}
@BeforeClass
public static void skipWindows() {
assumeFalse("doesn't work on windows yet", Constants.WINDOWS);
}
static boolean supportsHeapSegments() {
return Runtime.version().feature() >= 22;
}
public void testDotProduct() {
for (int i = 0; i < 100; i++) {
var bench = new VectorScorerJDKInt7uBenchmark();
bench.size = size;
bench.init();
try {
float expected = dotProductScalar(bench.byteArrayA, bench.byteArrayB);
assertEquals(expected, bench.dotProductLucene(), delta);
assertEquals(expected, bench.dotProductNativeWithNativeSeg(), delta);
if (supportsHeapSegments()) {
assertEquals(expected, bench.dotProductNativeWithHeapSeg(), delta);
}
} finally {
bench.teardown();
}
}
}
@ParametersFactory
public static Iterable<Object[]> parametersFactory() {
try {
var params = VectorScorerJDKInt7uBenchmark.class.getField("size").getAnnotationsByType(Param.class)[0].value();
return () -> Arrays.stream(params).map(Integer::parseInt).map(i -> new Object[] { i }).iterator();
} catch (NoSuchFieldException e) {
throw new AssertionError(e);
}
}
/** Computes the dot product of the given vectors a and b. */
static int dotProductScalar(byte[] a, byte[] b) {
int res = 0;
for (int i = 0; i < a.length; i++) {
res += a[i] * b[i];
}
return res;
}
}
| VectorScorerJDKInt7uBenchmarkTests |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java | {
"start": 7400,
"end": 50359
} | enum ____ {
AMCONTAINER(0), CONTAINER(1), LABELEDCONTAINER(2);
int value;
priority(int value) {
this.value = value;
}
public int getValue() {
return this.value;
}
};
@RegisterExtension
private TestName name = new TestName();
@BeforeEach
@SuppressWarnings("unchecked")
public void setup() {
conf = new CapacitySchedulerConfiguration(new Configuration(false));
conf.setLong(
CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL, 10000);
conf.setLong(CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL,
3000);
// report "ideal" preempt
conf.setFloat(CapacitySchedulerConfiguration.TOTAL_PREEMPTION_PER_ROUND,
1.0f);
conf.setFloat(
CapacitySchedulerConfiguration.PREEMPTION_NATURAL_TERMINATION_FACTOR,
1.0f);
conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
ProportionalCapacityPreemptionPolicy.class.getCanonicalName());
conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
// FairScheduler doesn't support this test,
// Set CapacityScheduler as the scheduler for this test.
conf.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getName());
mClock = mock(Clock.class);
mCS = mock(CapacityScheduler.class);
when(mCS.getResourceCalculator()).thenReturn(rc);
lm = mock(RMNodeLabelsManager.class);
try {
when(lm.isExclusiveNodeLabel(anyString())).thenReturn(true);
} catch (IOException e) {
// do nothing
}
when(mCS.getConfiguration()).thenReturn(conf);
rmContext = mock(RMContext.class);
when(mCS.getRMContext()).thenReturn(rmContext);
when(mCS.getPreemptionManager()).thenReturn(new PreemptionManager());
when(rmContext.getNodeLabelManager()).thenReturn(lm);
mDisp = mock(EventHandler.class);
Dispatcher disp = mock(Dispatcher.class);
when(rmContext.getDispatcher()).thenReturn(disp);
when(disp.getEventHandler()).thenReturn(mDisp);
rand = new Random();
long seed = rand.nextLong();
System.out.println(name.getMethodName() + " SEED: " + seed);
rand.setSeed(seed);
appAlloc = 0;
}
private static final int[][] Q_DATA_FOR_IGNORE = new int[][]{
// / A B C
{ 100, 40, 40, 20 }, // abs
{ 100, 100, 100, 100 }, // maxCap
{ 100, 0, 60, 40 }, // used
{ 0, 0, 0, 0 }, // pending
{ 0, 0, 0, 0 }, // reserved
{ 3, 1, 1, 1 }, // apps
{ -1, 1, 1, 1 }, // req granularity
{ 3, 0, 0, 0 }, // subqueues
};
@Test
public void testIgnore() {
ProportionalCapacityPreemptionPolicy policy =
buildPolicy(Q_DATA_FOR_IGNORE);
policy.editSchedule();
// don't correct imbalances without demand
verify(mDisp, never()).handle(isA(ContainerPreemptEvent.class));
}
@Test
public void testProportionalPreemption() {
int[][] qData = new int[][]{
// / A B C D
{ 100, 10, 40, 20, 30 }, // abs
{ 100, 100, 100, 100, 100 }, // maxCap
{ 100, 30, 60, 10, 0 }, // used
{ 45, 20, 5, 20, 0 }, // pending
{ 0, 0, 0, 0, 0 }, // reserved
{ 3, 1, 1, 1, 0 }, // apps
{ -1, 1, 1, 1, 1 }, // req granularity
{ 4, 0, 0, 0, 0 }, // subqueues
};
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// A will preempt guaranteed-allocated.
verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appA)));
}
@Test
public void testMaxCap() {
int[][] qData = new int[][]{
// / A B C
{ 100, 40, 40, 20 }, // abs
{ 100, 100, 45, 100 }, // maxCap
{ 100, 55, 45, 0 }, // used
{ 20, 10, 10, 0 }, // pending
{ 0, 0, 0, 0 }, // reserved
{ 2, 1, 1, 0 }, // apps
{ -1, 1, 1, 0 }, // req granularity
{ 3, 0, 0, 0 }, // subqueues
};
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// despite the imbalance, since B is at maxCap, do not correct
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA)));
}
@Test
public void testPreemptCycle() {
int[][] qData = new int[][]{
// / A B C
{ 100, 40, 40, 20 }, // abs
{ 100, 100, 100, 100 }, // maxCap
{ 100, 0, 60, 40 }, // used
{ 10, 10, 0, 0 }, // pending
{ 0, 0, 0, 0 }, // reserved
{ 3, 1, 1, 1 }, // apps
{ -1, 1, 1, 1 }, // req granularity
{ 3, 0, 0, 0 }, // subqueues
};
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// ensure all pending rsrc from A get preempted from other queues
verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appC)));
}
@Test
public void testExpireKill() {
final long killTime = 10000L;
int[][] qData = new int[][]{
// / A B C
{ 100, 40, 40, 20 }, // abs
{ 100, 100, 100, 100 }, // maxCap
{ 100, 0, 60, 40 }, // used
{ 10, 10, 0, 0 }, // pending
{ 0, 0, 0, 0 }, // reserved
{ 3, 1, 1, 1 }, // apps
{ -1, 1, 1, 1 }, // req granularity
{ 3, 0, 0, 0 }, // subqueues
};
conf.setLong(
CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL,
killTime);
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
// ensure all pending rsrc from A get preempted from other queues
when(mClock.getTime()).thenReturn(0L);
policy.editSchedule();
verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appC)));
// requests reiterated
when(mClock.getTime()).thenReturn(killTime / 2);
policy.editSchedule();
verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appC)));
// kill req sent
when(mClock.getTime()).thenReturn(killTime + 1);
policy.editSchedule();
verify(mDisp, times(20)).handle(evtCaptor.capture());
List<ContainerPreemptEvent> events = evtCaptor.getAllValues();
for (ContainerPreemptEvent e : events.subList(20, 20)) {
assertEquals(appC, e.getAppId());
assertEquals(MARK_CONTAINER_FOR_KILLABLE, e.getType());
}
}
@Test
public void testDeadzone() {
int[][] qData = new int[][]{
// / A B C
{ 100, 40, 40, 20 }, // abs
{ 100, 100, 100, 100 }, // maxCap
{ 100, 39, 43, 21 }, // used
{ 10, 10, 0, 0 }, // pending
{ 0, 0, 0, 0 }, // reserved
{ 3, 1, 1, 1 }, // apps
{ -1, 1, 1, 1 }, // req granularity
{ 3, 0, 0, 0 }, // subqueues
};
conf.setFloat(
CapacitySchedulerConfiguration.PREEMPTION_MAX_IGNORED_OVER_CAPACITY,
(float) 0.1);
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// ignore 10% overcapacity to avoid jitter
verify(mDisp, never()).handle(isA(ContainerPreemptEvent.class));
}
@Test
public void testPerQueueDisablePreemption() {
int[][] qData = new int[][]{
// / A B C
{ 100, 55, 25, 20 }, // abs
{ 100, 100, 100, 100 }, // maxCap
{ 100, 0, 54, 46 }, // used
{ 10, 10, 0, 0 }, // pending
{ 0, 0, 0, 0 }, // reserved
// appA appB appC
{ 3, 1, 1, 1 }, // apps
{ -1, 1, 1, 1 }, // req granularity
{ 3, 0, 0, 0 }, // subqueues
};
conf.setPreemptionDisabled(QUEUE_B, true);
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// Since queueB is not preemptable, get resources from queueC
verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appC)));
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appB)));
// Since queueB is preemptable, resources will be preempted
// from both queueB and queueC. Test must be reset so that the mDisp
// event handler will count only events from the following test and not the
// previous one.
setup();
conf.setPreemptionDisabled(QUEUE_B, false);
ProportionalCapacityPreemptionPolicy policy2 = buildPolicy(qData);
policy2.editSchedule();
verify(mDisp, times(4)).handle(argThat(new IsPreemptionRequestFor(appB)));
verify(mDisp, times(6)).handle(argThat(new IsPreemptionRequestFor(appC)));
}
@Test
public void testPerQueueDisablePreemptionHierarchical() {
int[][] qData = new int[][] {
// / A D
// B C E F
{ 200, 100, 50, 50, 100, 10, 90 }, // abs
{ 200, 200, 200, 200, 200, 200, 200 }, // maxCap
{ 200, 110, 60, 50, 90, 90, 0 }, // used
{ 10, 0, 0, 0, 10, 0, 10 }, // pending
{ 0, 0, 0, 0, 0, 0, 0 }, // reserved
// appA appB appC appD
{ 4, 2, 1, 1, 2, 1, 1 }, // apps
{ -1, -1, 1, 1, -1, 1, 1 }, // req granularity
{ 2, 2, 0, 0, 2, 0, 0 }, // subqueues
};
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// verify capacity taken from queueB (appA), not queueE (appC) despite
// queueE being far over its absolute capacity because queueA (queueB's
// parent) is over capacity and queueD (queueE's parent) is not.
ApplicationAttemptId expectedAttemptOnQueueB =
ApplicationAttemptId.newInstance(
appA.getApplicationId(), appA.getAttemptId());
assertTrue(mCS.getAppsInQueue("queueB").contains(expectedAttemptOnQueueB),
"appA should be running on queueB");
verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appA)));
// Need to call setup() again to reset mDisp
setup();
// Turn off preemption for queueB and it's children
conf.setPreemptionDisabled(QUEUE_A_QUEUE_B, true);
ProportionalCapacityPreemptionPolicy policy2 = buildPolicy(qData);
policy2.editSchedule();
ApplicationAttemptId expectedAttemptOnQueueC =
ApplicationAttemptId.newInstance(
appB.getApplicationId(), appB.getAttemptId());
ApplicationAttemptId expectedAttemptOnQueueE =
ApplicationAttemptId.newInstance(
appC.getApplicationId(), appC.getAttemptId());
// Now, all of queueB's (appA) over capacity is not preemptable, so neither
// is queueA's. Verify that capacity is taken from queueE (appC).
assertTrue(mCS.getAppsInQueue("queueC").contains(expectedAttemptOnQueueC),
"appB should be running on queueC");
assertTrue(mCS.getAppsInQueue("queueE").contains(expectedAttemptOnQueueE),
"appC should be running on queueE");
// Resources should have come from queueE (appC) and neither of queueA's
// children.
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA)));
verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appC)));
}
@Test
public void testPerQueueDisablePreemptionBroadHierarchical() {
int[][] qData = new int[][] {
// / A D G
// B C E F H I
{1000, 350, 150, 200, 400, 200, 200, 250, 100, 150 }, // abs
{1000,1000,1000,1000,1000,1000,1000,1000,1000,1000 }, // maxCap
{1000, 400, 200, 200, 400, 250, 150, 200, 150, 50 }, // used
{ 50, 0, 0, 0, 50, 0, 50, 0, 0, 0 }, // pending
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // reserved
// appA appB appC appD appE appF
{ 6, 2, 1, 1, 2, 1, 1, 2, 1, 1 }, // apps
{ -1, -1, 1, 1, -1, 1, 1, -1, 1, 1 }, // req granulrity
{ 3, 2, 0, 0, 2, 0, 0, 2, 0, 0 }, // subqueues
};
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// queueF(appD) wants resources, Verify that resources come from queueE(appC)
// because it's a sibling and queueB(appA) because queueA is over capacity.
verify(mDisp, times(27)).handle(argThat(new IsPreemptionRequestFor(appA)));
verify(mDisp, times(23)).handle(argThat(new IsPreemptionRequestFor(appC)));
// Need to call setup() again to reset mDisp
setup();
// Turn off preemption for queueB(appA)
conf.setPreemptionDisabled(QUEUE_A_QUEUE_B, true);
ProportionalCapacityPreemptionPolicy policy2 = buildPolicy(qData);
policy2.editSchedule();
// Now that queueB(appA) is not preemptable, verify that resources come
// from queueE(appC)
verify(mDisp, times(50)).handle(argThat(new IsPreemptionRequestFor(appC)));
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA)));
setup();
// Turn off preemption for two of the 3 queues with over-capacity.
conf.setPreemptionDisabled(QUEUE_D_QUEUE_E, true);
conf.setPreemptionDisabled(QUEUE_A_QUEUE_B, true);
ProportionalCapacityPreemptionPolicy policy3 = buildPolicy(qData);
policy3.editSchedule();
// Verify that the request was starved out even though queueH(appE) is
// over capacity. This is because queueG (queueH's parent) is NOT
// overcapacity.
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA))); // queueB
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appB))); // queueC
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appC))); // queueE
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appE))); // queueH
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appF))); // queueI
}
@Test
public void testPerQueueDisablePreemptionInheritParent() {
int[][] qData = new int[][] {
// / A E
// B C D F G H
{1000, 500, 200, 200, 100, 500, 200, 200, 100 }, // abs (guar)
{1000,1000,1000,1000,1000,1000,1000,1000,1000 }, // maxCap
{1000, 700, 0, 350, 350, 300, 0, 200, 100 }, // used
{ 200, 0, 0, 0, 0, 200, 200, 0, 0 }, // pending
{ 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // reserved
// appA appB appC appD appE
{ 5, 2, 0, 1, 1, 3, 1, 1, 1 }, // apps
{ -1, -1, 1, 1, 1, -1, 1, 1, 1 }, // req granulrity
{ 2, 3, 0, 0, 0, 3, 0, 0, 0 }, // subqueues
};
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// With all queues preemptable, resources should be taken from queueC(appA)
// and queueD(appB). Resources taken more from queueD(appB) than
// queueC(appA) because it's over its capacity by a larger percentage.
verify(mDisp, times(17)).handle(argThat(new IsPreemptionRequestFor(appA)));
verify(mDisp, times(183)).handle(argThat(new IsPreemptionRequestFor(appB)));
// Turn off preemption for queueA and it's children. queueF(appC)'s request
// should starve.
setup(); // Call setup() to reset mDisp
conf.setPreemptionDisabled(QUEUE_A, true);
ProportionalCapacityPreemptionPolicy policy2 = buildPolicy(qData);
policy2.editSchedule();
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA))); // queueC
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appB))); // queueD
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appD))); // queueG
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appE))); // queueH
}
@Test
public void testPerQueuePreemptionNotAllUntouchable() {
int[][] qData = new int[][] {
// / A E
// B C D F G H
{ 2000, 1000, 800, 100, 100, 1000, 500, 300, 200 }, // abs
{ 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000 }, // maxCap
{ 2000, 1300, 300, 800, 200, 700, 500, 0, 200 }, // used
{ 300, 0, 0, 0, 0, 300, 0, 300, 0 }, // pending
{ 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // reserved
// appA appB appC appD appE appF
{ 6, 3, 1, 1, 1, 3, 1, 1, 1 }, // apps
{ -1, -1, 1, 1, 1, -1, 1, 1, 1 }, // req granularity
{ 2, 3, 0, 0, 0, 3, 0, 0, 0 }, // subqueues
};
conf.setPreemptionDisabled(QUEUE_A_QUEUE_C, true);
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// Although queueC(appB) is way over capacity and is untouchable,
// queueD(appC) is preemptable. Request should be filled from queueD(appC).
verify(mDisp, times(100)).handle(argThat(new IsPreemptionRequestFor(appC)));
}
@Test
public void testPerQueueDisablePreemptionRootDisablesAll() {
int[][] qData = new int[][] {
// / A D G
// B C E F H I
{1000, 500, 250, 250, 250, 100, 150, 250, 100, 150 }, // abs
{1000,1000,1000,1000,1000,1000,1000,1000,1000,1000 }, // maxCap
{1000, 20, 0, 20, 490, 240, 250, 490, 240, 250 }, // used
{ 200, 200, 200, 0, 0, 0, 0, 0, 0, 0 }, // pending
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // reserved
// appA appB appC appD appE appF
{ 6, 2, 1, 1, 2, 1, 1, 2, 1, 1 }, // apps
{ -1, -1, 1, 1, -1, 1, 1, -1, 1, 1 }, // req granulrity
{ 3, 2, 0, 0, 2, 0, 0, 2, 0, 0 }, // subqueues
};
conf.setPreemptionDisabled(ROOT, true);
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// All queues should be non-preemptable, so request should starve.
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appB))); // queueC
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appC))); // queueE
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appD))); // queueB
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appE))); // queueH
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appF))); // queueI
}
@Test
public void testPerQueueDisablePreemptionOverAbsMaxCapacity() {
int[][] qData = new int[][] {
// / A D
// B C E F
{1000, 725, 360, 365, 275, 17, 258 }, // absCap
{1000,1000,1000,1000, 550, 109,1000 }, // absMaxCap
{1000, 741, 396, 345, 259, 110, 149 }, // used
{ 40, 20, 0, 20, 20, 20, 0 }, // pending
{ 0, 0, 0, 0, 0, 0, 0 }, // reserved
// appA appB appC appD
{ 4, 2, 1, 1, 2, 1, 1 }, // apps
{ -1, -1, 1, 1, -1, 1, 1 }, // req granulrity
{ 2, 2, 0, 0, 2, 0, 0 }, // subqueues
};
// QueueE inherits non-preemption from QueueD
conf.setPreemptionDisabled(QUEUE_D, true);
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// appC is running on QueueE. QueueE is over absMaxCap, but is not
// preemptable. Therefore, appC resources should not be preempted.
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appC)));
}
@Test
public void testOverCapacityImbalance() {
int[][] qData = new int[][]{
// / A B C
{ 100, 40, 40, 20 }, // abs
{ 100, 100, 100, 100 }, // maxCap
{ 100, 55, 45, 0 }, // used
{ 20, 10, 10, 0 }, // pending
{ 0, 0, 0, 0 }, // reserved
{ 2, 1, 1, 0 }, // apps
{ -1, 1, 1, 0 }, // req granularity
{ 3, 0, 0, 0 }, // subqueues
};
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// Will not preempt for over capacity queues
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA)));
}
@Test
public void testNaturalTermination() {
int[][] qData = new int[][]{
// / A B C
{ 100, 40, 40, 20 }, // abs
{ 100, 100, 100, 100 }, // maxCap
{ 100, 55, 45, 0 }, // used
{ 20, 10, 10, 0 }, // pending
{ 0, 0, 0, 0 }, // reserved
{ 2, 1, 1, 0 }, // apps
{ -1, 1, 1, 0 }, // req granularity
{ 3, 0, 0, 0 }, // subqueues
};
conf.setFloat(
CapacitySchedulerConfiguration.PREEMPTION_NATURAL_TERMINATION_FACTOR,
(float) 0.1);
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// ignore 10% imbalance between over-capacity queues
verify(mDisp, never()).handle(isA(ContainerPreemptEvent.class));
}
@Test
public void testObserveOnly() {
int[][] qData = new int[][]{
// / A B C
{ 100, 40, 40, 20 }, // abs
{ 100, 100, 100, 100 }, // maxCap
{ 100, 90, 10, 0 }, // used
{ 80, 10, 20, 50 }, // pending
{ 0, 0, 0, 0 }, // reserved
{ 2, 1, 1, 0 }, // apps
{ -1, 1, 1, 0 }, // req granularity
{ 3, 0, 0, 0 }, // subqueues
};
conf.setBoolean(CapacitySchedulerConfiguration.PREEMPTION_OBSERVE_ONLY,
true);
when(mCS.getConfiguration()).thenReturn(
new CapacitySchedulerConfiguration(conf));
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// verify even severe imbalance not affected
verify(mDisp, never()).handle(isA(ContainerPreemptEvent.class));
}
@Test
public void testHierarchical() {
int[][] qData = new int[][] {
// / A B C D E F
{ 200, 100, 50, 50, 100, 10, 90 }, // abs
{ 200, 200, 200, 200, 200, 200, 200 }, // maxCap
{ 200, 110, 60, 50, 90, 90, 0 }, // used
{ 10, 0, 0, 0, 10, 0, 10 }, // pending
{ 0, 0, 0, 0, 0, 0, 0 }, // reserved
{ 4, 2, 1, 1, 2, 1, 1 }, // apps
{ -1, -1, 1, 1, -1, 1, 1 }, // req granularity
{ 2, 2, 0, 0, 2, 0, 0 }, // subqueues
};
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// verify capacity taken from A1, not B1 despite B1 being far over
// its absolute guaranteed capacity
verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appA)));
}
@Test
public void testHierarchicalWithReserved() {
int[][] qData = new int[][] {
// / A B C D E F
{ 200, 100, 50, 50, 100, 10, 90 }, // abs
{ 200, 200, 200, 200, 200, 200, 200 }, // maxCap
{ 200, 110, 60, 50, 90, 90, 0 }, // used
{ 10, 0, 0, 0, 10, 0, 10 }, // pending
{ 40, 25, 15, 10, 15, 15, 0 }, // reserved
{ 4, 2, 1, 1, 2, 1, 1 }, // apps
{ -1, -1, 1, 1, -1, 1, 1 }, // req granularity
{ 2, 2, 0, 0, 2, 0, 0 }, // subqueues
};
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// verify capacity taken from A1, not B1 despite B1 being far over
// its absolute guaranteed capacity
verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appA)));
}
@Test
public void testZeroGuar() {
int[][] qData = new int[][] {
// / A B C D E F
{ 200, 100, 0, 99, 100, 10, 90 }, // abs
{ 200, 200, 200, 200, 200, 200, 200 }, // maxCap
{ 170, 80, 60, 20, 90, 90, 0 }, // used
{ 10, 0, 0, 0, 10, 0, 10 }, // pending
{ 0, 0, 0, 0, 0, 0, 0 }, // reserved
{ 4, 2, 1, 1, 2, 1, 1 }, // apps
{ -1, -1, 1, 1, -1, 1, 1 }, // req granularity
{ 2, 2, 0, 0, 2, 0, 0 }, // subqueues
};
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// verify capacity taken from A1, not B1 despite B1 being far over
// its absolute guaranteed capacity
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA)));
}
@Test
public void testZeroGuarOverCap() {
int[][] qData = new int[][] {
// / A B C D E F
{ 200, 100, 0, 100, 0, 100, 100 }, // abs
{ 200, 200, 200, 200, 200, 200, 200 }, // maxCap
{ 170, 170, 60, 20, 90, 0, 0 }, // used
{ 85, 50, 30, 10, 10, 20, 20 }, // pending
{ 0, 0, 0, 0, 0, 0, 0 }, // reserved
{ 4, 3, 1, 1, 1, 1, 1 }, // apps
{ -1, -1, 1, 1, 1, -1, 1 }, // req granularity
{ 2, 3, 0, 0, 0, 1, 0 }, // subqueues
};
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// No preemption should happen because zero guaranteed queues should be
// treated as always satisfied, they should not preempt from each other.
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA)));
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appB)));
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appC)));
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appD)));
}
@Test
public void testHierarchicalLarge() {
int[][] qData = new int[][] {
// / A D G
// B C E F H I
{ 400, 200, 60, 140, 100, 70, 30, 100, 10, 90 }, // abs
{ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400 }, // maxCap
{ 400, 210, 70, 140, 100, 50, 50, 90, 90, 0 }, // used
{ 15, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, // pending
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // reserved
// appA appB appC appD appE appF
{ 6, 2, 1, 1, 2, 1, 1, 2, 1, 1 }, // apps
{ -1, -1, 1, 1, -1, 1, 1, -1, 1, 1 }, // req granularity
{ 3, 2, 0, 0, 2, 0, 0, 2, 0, 0 }, // subqueues
};
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// verify capacity taken from A1, not H1 despite H1 being far over
// its absolute guaranteed capacity
// XXX note: compensating for rounding error in Resources.multiplyTo
// which is likely triggered since we use small numbers for readability
verify(mDisp, times(9)).handle(argThat(new IsPreemptionRequestFor(appA)));
verify(mDisp, times(6)).handle(argThat(new IsPreemptionRequestFor(appE)));
}
@Test
public void testContainerOrdering(){
List<RMContainer> containers = new ArrayList<RMContainer>();
ApplicationAttemptId appAttId = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(TS, 10), 0);
// create a set of containers
RMContainer rm1 = mockContainer(appAttId, 5, mock(Resource.class), 3);
RMContainer rm2 = mockContainer(appAttId, 3, mock(Resource.class), 3);
RMContainer rm3 = mockContainer(appAttId, 2, mock(Resource.class), 2);
RMContainer rm4 = mockContainer(appAttId, 1, mock(Resource.class), 2);
RMContainer rm5 = mockContainer(appAttId, 4, mock(Resource.class), 1);
// insert them in non-sorted order
containers.add(rm3);
containers.add(rm2);
containers.add(rm1);
containers.add(rm5);
containers.add(rm4);
// sort them
FifoCandidatesSelector.sortContainers(containers);
// verify the "priority"-first, "reverse container-id"-second
// ordering is enforced correctly
assert containers.get(0).equals(rm1);
assert containers.get(1).equals(rm2);
assert containers.get(2).equals(rm3);
assert containers.get(3).equals(rm4);
assert containers.get(4).equals(rm5);
}
@Test
public void testPolicyInitializeAfterSchedulerInitialized() {
@SuppressWarnings("resource")
MockRM rm = new MockRM(conf);
rm.init(conf);
// ProportionalCapacityPreemptionPolicy should be initialized after
// CapacityScheduler initialized. We will
// 1) find SchedulingMonitor from RMActiveService's service list,
// 2) check if ResourceCalculator in policy is null or not.
// If it's not null, we can come to a conclusion that policy initialized
// after scheduler got initialized
// Get SchedulingMonitor from SchedulingMonitorManager instead
CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
SchedulingMonitorManager smm = cs.getSchedulingMonitorManager();
Service service = smm.getAvailableSchedulingMonitor();
if (service instanceof SchedulingMonitor) {
ProportionalCapacityPreemptionPolicy policy =
(ProportionalCapacityPreemptionPolicy) ((SchedulingMonitor) service)
.getSchedulingEditPolicy();
assertNotNull(policy.getResourceCalculator());
return;
}
fail("Failed to find SchedulingMonitor service, please check what happened");
}
@Test
public void testSkipAMContainer() {
int[][] qData = new int[][] {
// / A B
{ 100, 50, 50 }, // abs
{ 100, 100, 100 }, // maxcap
{ 100, 100, 0 }, // used
{ 70, 20, 50 }, // pending
{ 0, 0, 0 }, // reserved
{ 5, 4, 1 }, // apps
{ -1, 1, 1 }, // req granularity
{ 2, 0, 0 }, // subqueues
};
setAMContainer = true;
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// By skipping AM Container, all other 24 containers of appD will be
// preempted
verify(mDisp, times(24)).handle(argThat(new IsPreemptionRequestFor(appD)));
// By skipping AM Container, all other 24 containers of appC will be
// preempted
verify(mDisp, times(24)).handle(argThat(new IsPreemptionRequestFor(appC)));
// Since AM containers of appC and appD are saved, 2 containers from appB
// has to be preempted.
verify(mDisp, times(2)).handle(argThat(new IsPreemptionRequestFor(appB)));
setAMContainer = false;
}
@Test
public void testPreemptSkippedAMContainers() {
int[][] qData = new int[][] {
// / A B
{ 100, 10, 90 }, // abs
{ 100, 100, 100 }, // maxcap
{ 100, 100, 0 }, // used
{ 70, 20, 90 }, // pending
{ 0, 0, 0 }, // reserved
{ 5, 4, 1 }, // apps
{ -1, 5, 5 }, // req granularity
{ 2, 0, 0 }, // subqueues
};
setAMContainer = true;
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// All 5 containers of appD will be preempted including AM container.
verify(mDisp, times(5)).handle(argThat(new IsPreemptionRequestFor(appD)));
// All 5 containers of appC will be preempted including AM container.
verify(mDisp, times(5)).handle(argThat(new IsPreemptionRequestFor(appC)));
// By skipping AM Container, all other 4 containers of appB will be
// preempted
verify(mDisp, times(4)).handle(argThat(new IsPreemptionRequestFor(appB)));
// By skipping AM Container, all other 4 containers of appA will be
// preempted
verify(mDisp, times(4)).handle(argThat(new IsPreemptionRequestFor(appA)));
setAMContainer = false;
}
@Test
public void testAMResourcePercentForSkippedAMContainers() {
int[][] qData = new int[][] {
// / A B
{ 100, 10, 90 }, // abs
{ 100, 100, 100 }, // maxcap
{ 100, 100, 0 }, // used
{ 70, 20, 90 }, // pending
{ 0, 0, 0 }, // reserved
{ 5, 4, 1 }, // apps
{ -1, 5, 5 }, // req granularity
{ 2, 0, 0 }, // subqueues
};
setAMContainer = true;
setAMResourcePercent = 0.5f;
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// AMResoucePercent is 50% of cluster and maxAMCapacity will be 5Gb.
// Total used AM container size is 20GB, hence 2 AM container has
// to be preempted as Queue Capacity is 10Gb.
verify(mDisp, times(5)).handle(argThat(new IsPreemptionRequestFor(appD)));
// Including AM Container, all other 4 containers of appC will be
// preempted
verify(mDisp, times(5)).handle(argThat(new IsPreemptionRequestFor(appC)));
// By skipping AM Container, all other 4 containers of appB will be
// preempted
verify(mDisp, times(4)).handle(argThat(new IsPreemptionRequestFor(appB)));
// By skipping AM Container, all other 4 containers of appA will be
// preempted
verify(mDisp, times(4)).handle(argThat(new IsPreemptionRequestFor(appA)));
setAMContainer = false;
}
@Test
public void testPreemptionWithVCoreResource() {
int[][] qData = new int[][]{
// / A B
{100, 100, 100}, // maxcap
{5, 1, 1}, // apps
{2, 0, 0}, // subqueues
};
// Resources can be set like memory:vcores
String[][] resData = new String[][]{
// / A B
{"100:100", "50:50", "50:50"}, // abs
{"10:100", "10:100", "0"}, // used
{"70:20", "70:20", "10:100"}, // pending
{"0", "0", "0"}, // reserved
{"-1", "1:10", "1:10"}, // req granularity
};
// Passing last param as TRUE to use DominantResourceCalculator
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData, resData,
true);
policy.editSchedule();
// 4 containers will be preempted here
verify(mDisp, times(4)).handle(argThat(new IsPreemptionRequestFor(appA)));
}
@Test
public void testHierarchicalLarge3Levels() {
int[][] qData = new int[][] {
// / A F I
// B C G H J K
// D E
{ 400, 200, 60, 140, 100, 40, 100, 70, 30, 100, 10, 90 }, // abs
{ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400 }, // maxCap
{ 400, 210, 60, 150, 100, 50, 100, 50, 50, 90, 10, 80 }, // used
{ 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10 }, // pending
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // reserved
// appA appB appC appD appE appF appG
{ 7, 3, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1 }, // apps
{ -1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1 }, // req granularity
{ 3, 2, 0, 2, 0, 0, 2, 0, 0, 2, 0, 0 }, // subqueues
};
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// XXX note: compensating for rounding error in Resources.multiplyTo
// which is likely triggered since we use small numbers for readability
//run with Logger.getRootLogger().setLevel(Level.DEBUG);
verify(mDisp, times(9)).handle(argThat(new IsPreemptionRequestFor(appC)));
assertEquals(10, policy
.getQueuePartitions().get("root.queueA.queueC.queueE").get("")
.preemptableExtra.getMemorySize());
//2nd level child(E) preempts 10, but parent A has only 9 extra
//check the parent can prempt only the extra from > 2 level child
TempQueuePerPartition tempQueueAPartition = policy.getQueuePartitions().get(
"root.queueA").get("");
assertEquals(0, tempQueueAPartition.untouchableExtra.getMemorySize());
long extraForQueueA =
tempQueueAPartition.getUsed().getMemorySize() - tempQueueAPartition
.getGuaranteed().getMemorySize();
assertEquals(extraForQueueA,
tempQueueAPartition.preemptableExtra.getMemorySize());
}
@Test
public void testHierarchicalLarge3LevelsWithReserved() {
int[][] qData = new int[][] {
// / A F I
// B C G H J K
// D E
{ 400, 200, 60, 140, 100, 40, 100, 70, 30, 100, 10, 90 }, // abs
{ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400 }, // maxCap
{ 400, 210, 60, 150, 100, 50, 100, 50, 50, 90, 10, 80 }, // used
{ 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10 }, // pending
{ 50, 30, 20, 10, 5, 5, 0, 0, 0, 10, 10, 0 }, // reserved
// appA appB appC appD appE appF appG
{ 7, 3, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1 }, // apps
{ -1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1 }, // req granularity
{ 3, 2, 0, 2, 0, 0, 2, 0, 0, 2, 0, 0 }, // subqueues
};
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
verify(mDisp, times(9)).handle(argThat(new IsPreemptionRequestFor(appC)));
assertEquals(10, policy
.getQueuePartitions().get("root.queueA.queueC.queueE")
.get("").preemptableExtra.getMemorySize());
//2nd level child(E) preempts 10, but parent A has only 9 extra
//check the parent can prempt only the extra from > 2 level child
TempQueuePerPartition tempQueueAPartition = policy.getQueuePartitions().get(
"root.queueA").get("");
assertEquals(0, tempQueueAPartition.untouchableExtra.getMemorySize());
long extraForQueueA =
tempQueueAPartition.getUsed().getMemorySize() - tempQueueAPartition
.getGuaranteed().getMemorySize();
assertEquals(extraForQueueA,
tempQueueAPartition.preemptableExtra.getMemorySize());
}
@Test
public void testPreemptionNotHappenForSingleReservedQueue() {
/*
* Test case to make sure, when reserved > pending, preemption will not
* happen if there's only one demanding queue.
*/
int[][] qData = new int[][]{
// / A B C
{ 100, 40, 40, 20 }, // abs
{ 100, 100, 100, 100 }, // maxCap
{ 100, 70, 0, 0 }, // used
{ 10, 30, 0, 0 }, // pending
{ 0, 50, 0, 0 }, // reserved
{ 1, 1, 0, 0 }, // apps
{ -1, 1, 1, 1 }, // req granularity
{ 3, 0, 0, 0 }, // subqueues
};
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// No preemption happens
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA)));
}
@Test
public void testRefreshPreemptionProperties() throws Exception {
ProportionalCapacityPreemptionPolicy policy =
buildPolicy(Q_DATA_FOR_IGNORE);
assertEquals(
CapacitySchedulerConfiguration.DEFAULT_PREEMPTION_MONITORING_INTERVAL,
policy.getMonitoringInterval());
assertEquals(
CapacitySchedulerConfiguration.DEFAULT_PREEMPTION_OBSERVE_ONLY,
policy.isObserveOnly());
CapacitySchedulerConfiguration newConf =
new CapacitySchedulerConfiguration(conf);
long newMonitoringInterval = 5000;
boolean newObserveOnly = true;
newConf.setLong(
CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL,
newMonitoringInterval);
newConf.setBoolean(CapacitySchedulerConfiguration.PREEMPTION_OBSERVE_ONLY,
newObserveOnly);
when(mCS.getConfiguration()).thenReturn(newConf);
policy.editSchedule();
assertEquals(newMonitoringInterval, policy.getMonitoringInterval());
assertEquals(newObserveOnly, policy.isObserveOnly());
}
@Test
public void testLeafQueueNameExtractionWithFlexibleAQC() throws Exception {
ProportionalCapacityPreemptionPolicy policy = buildPolicy(Q_DATA_FOR_IGNORE);
ParentQueue root = (ParentQueue) mCS.getRootQueue();
root.addDynamicParentQueue("childlessFlexible");
ParentQueue dynamicParent = setupDynamicParentQueue("root.dynamicParent", true);
extendRootQueueWithMock(root, dynamicParent);
policy.editSchedule();
assertFalse(policy.getLeafQueueNames().contains( "root.dynamicParent"),
"root.dynamicLegacyParent" + " should not be a LeafQueue candidate");
}
@Test
public void testLeafQueueNameExtractionWithLegacyAQC() throws Exception {
ProportionalCapacityPreemptionPolicy policy = buildPolicy(Q_DATA_FOR_IGNORE);
ParentQueue root = (ParentQueue) mCS.getRootQueue();
root.addDynamicParentQueue("childlessLegacy");
ParentQueue dynamicParent = setupDynamicParentQueue("root.dynamicLegacyParent", false);
extendRootQueueWithMock(root, dynamicParent);
policy.editSchedule();
assertFalse(policy.getLeafQueueNames().contains( "root.dynamicLegacyParent"),
"root.dynamicLegacyParent" + " should not be a LeafQueue candidate");
}
private ParentQueue setupDynamicParentQueue(String queuePath, boolean isFlexible) {
ParentQueue dynamicParent = mockParentQueue(null, 0, new LinkedList<>());
mockQueueFields(dynamicParent, queuePath);
if (isFlexible) {
when(dynamicParent.isEligibleForAutoQueueCreation()).thenReturn(true);
} else {
when(dynamicParent.isEligibleForLegacyAutoQueueCreation()).thenReturn(true);
}
return dynamicParent;
}
private void extendRootQueueWithMock(ParentQueue root, ParentQueue mockQueue) {
List<CSQueue> queues = root.getChildQueues();
ArrayList<CSQueue> extendedQueues = new ArrayList<>();
extendedQueues.add(mockQueue);
extendedQueues.addAll(queues);
when(root.getChildQueues()).thenReturn(extendedQueues);
}
private void mockQueueFields(ParentQueue queue, String queuePath) {
when(queue.getQueuePath()).thenReturn(queuePath);
when(queue.getQueueCapacities()).thenReturn(new QueueCapacities(false));
QueueResourceQuotas qrq = new QueueResourceQuotas();
qrq.setEffectiveMaxResource(Resource.newInstance(1, 1));
qrq.setEffectiveMinResource(Resources.createResource(1));
qrq.setEffectiveMaxResource(RMNodeLabelsManager.NO_LABEL, Resource.newInstance(1, 1));
qrq.setEffectiveMinResource(RMNodeLabelsManager.NO_LABEL, Resources.createResource(1));
when(queue.getQueueResourceQuotas()).thenReturn(qrq);
when(queue.getEffectiveCapacity(RMNodeLabelsManager.NO_LABEL))
.thenReturn(Resources.createResource(1));
when(queue.getEffectiveMaxCapacity(RMNodeLabelsManager.NO_LABEL))
.thenReturn(Resource.newInstance(1, 1));
ResourceUsage usage = new ResourceUsage();
usage.setUsed(Resources.createResource(1024));
usage.setReserved(Resources.createResource(1024));
when(queue.getQueueResourceUsage()).thenReturn(usage);
}
static | priority |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/type/AbstractMethodMetadataTests.java | {
"start": 7526,
"end": 7614
} | class ____ {
@Tag
public abstract String test();
}
public static | WithAbstractMethod |
java | apache__camel | components/camel-jaxb/src/main/java/org/apache/camel/converter/jaxb/JaxbDataFormat.java | {
"start": 22153,
"end": 26200
} | class ____ has been JAXB annotated
ClassLoader cl = camelContext.getApplicationContextClassLoader();
if (cl != null) {
if (contextPathIsClassName) {
LOG.debug("Creating JAXBContext with className: {} and ApplicationContextClassLoader: {}",
contextPath, cl);
Class clazz = camelContext.getClassResolver().resolveMandatoryClass(contextPath, cl);
return JAXBContext.newInstance(clazz);
} else {
LOG.debug("Creating JAXBContext with contextPath: {} and ApplicationContextClassLoader: {}",
contextPath, cl);
return JAXBContext.newInstance(contextPath, cl);
}
} else {
if (contextPathIsClassName) {
LOG.debug("Creating JAXBContext with className: {}", contextPath);
Class clazz = camelContext.getClassResolver().resolveMandatoryClass(contextPath);
return JAXBContext.newInstance(clazz);
} else {
LOG.debug("Creating JAXBContext with contextPath: {}", contextPath);
return JAXBContext.newInstance(contextPath);
}
}
} else {
LOG.debug("Creating JAXBContext");
return JAXBContext.newInstance();
}
}
protected Unmarshaller createUnmarshaller() throws JAXBException {
Unmarshaller unmarshaller = getContext().createUnmarshaller();
if (schema != null) {
unmarshaller.setSchema(cachedSchema);
unmarshaller.setEventHandler((ValidationEvent event) -> {
// continue if the severity is lower than the configured level
return event.getSeverity() < getSchemaSeverityLevel();
});
}
return unmarshaller;
}
protected Marshaller createMarshaller() throws JAXBException {
Marshaller marshaller = getContext().createMarshaller();
if (schema != null) {
marshaller.setSchema(cachedSchema);
marshaller.setEventHandler((ValidationEvent event) -> {
// continue if the severity is lower than the configured level
return event.getSeverity() < getSchemaSeverityLevel();
});
}
return marshaller;
}
private Schema createSchema(Source[] sources) throws SAXException {
SchemaFactory factory = createSchemaFactory(accessExternalSchemaProtocols);
return factory.newSchema(sources);
}
private Source[] getSources() throws FileNotFoundException, MalformedURLException {
// we support multiple schema by delimiting by comma
String[] schemas = schema.split(",");
Source[] sources = new Source[schemas.length];
for (int i = 0; i < schemas.length; i++) {
URL schemaUrl = ResourceHelper.resolveMandatoryResourceAsUrl(camelContext, schemas[i]);
sources[i] = new StreamSource(schemaUrl.toExternalForm());
}
return sources;
}
private static SchemaFactory createSchemaFactory(String protocols) throws SAXException {
SchemaFactory factory = SchemaFactory.newInstance(XMLConstants.W3C_XML_SCHEMA_NS_URI);
factory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
factory.setFeature("http://apache.org/xml/features/disallow-doctype-decl", true);
if (protocols == null || "false".equals(protocols) || "none".equals(protocols)) {
protocols = "";
LOG.debug("Configuring SchemaFactory to not allow access to external DTD/Schema");
} else {
LOG.debug("Configuring SchemaFactory to allow access to external DTD/Schema using protocols: {}", protocols);
}
factory.setProperty(XMLConstants.ACCESS_EXTERNAL_DTD, protocols);
factory.setProperty(XMLConstants.ACCESS_EXTERNAL_SCHEMA, protocols);
return factory;
}
}
| which |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java | {
"start": 1848,
"end": 2492
} | class ____ {
static final Logger LOG = LoggerFactory.getLogger(TestParallelReadUtil.class);
static BlockReaderTestUtil util = null;
static DFSClient dfsClient = null;
static final int FILE_SIZE_K = 256;
static Random rand = null;
static final int DEFAULT_REPLICATION_FACTOR = 2;
protected boolean verifyChecksums = true;
static {
// The client-trace log ends up causing a lot of blocking threads
// in this when it's being used as a performance benchmark.
GenericTestUtils.setLogLevel(
LoggerFactory.getLogger(DataNode.class.getName() + ".clienttrace"),
Level.WARN);
}
private | TestParallelReadUtil |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/ast/statement/SQLAlterIndexStatement.java | {
"start": 810,
"end": 2959
} | class ____ extends SQLStatementImpl implements SQLAlterStatement {
private SQLName name;
private SQLName renameTo;
private SQLExprTableSource table;
private boolean compile;
private Boolean enable;
protected boolean unusable;
private Boolean monitoringUsage;
private Rebuild rebuild;
private SQLExpr parallel;
private List<SQLAssignItem> partitions = new ArrayList<SQLAssignItem>();
protected SQLPartitionBy dbPartitionBy;
@Override
public void accept0(SQLASTVisitor visitor) {
if (visitor.visit(this)) {
acceptChild(visitor, name);
acceptChild(visitor, renameTo);
acceptChild(visitor, table);
acceptChild(visitor, partitions);
acceptChild(visitor, rebuild);
acceptChild(visitor, parallel);
}
visitor.endVisit(this);
}
public SQLName getRenameTo() {
return renameTo;
}
public void setRenameTo(SQLName renameTo) {
this.renameTo = renameTo;
}
public SQLExpr getParallel() {
return parallel;
}
public void setParallel(SQLExpr parallel) {
this.parallel = parallel;
}
public Boolean getMonitoringUsage() {
return monitoringUsage;
}
public void setMonitoringUsage(Boolean monitoringUsage) {
this.monitoringUsage = monitoringUsage;
}
public Rebuild getRebuild() {
return rebuild;
}
public void setRebuild(Rebuild rebuild) {
this.rebuild = rebuild;
}
public SQLName getName() {
return name;
}
public void setName(SQLName name) {
this.name = name;
}
public boolean isCompile() {
return compile;
}
public void setCompile(boolean compile) {
this.compile = compile;
}
public Boolean getEnable() {
return enable;
}
public void setEnable(Boolean enable) {
this.enable = enable;
}
public boolean isUnusable() {
return unusable;
}
public void setUnusable(boolean unusable) {
this.unusable = unusable;
}
public static | SQLAlterIndexStatement |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/env/AbstractPropertyResolver.java | {
"start": 1358,
"end": 1530
} | class ____ resolving properties against any underlying source.
*
* @author Chris Beams
* @author Juergen Hoeller
* @author Sam Brannen
* @since 3.1
*/
public abstract | for |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/deser/IntegerFieldDeserializerTest.java | {
"start": 145,
"end": 883
} | class ____ extends TestCase {
public void test_0() throws Exception {
Entity a = JSON.parseObject("{f1:null, f2:null}", Entity.class);
Assert.assertEquals(124, a.getF1());
Assert.assertEquals(null, a.getF2());
}
public void test_1() throws Exception {
Entity a = JSON.parseObject("{f1:22, f2:'33'}", Entity.class);
Assert.assertEquals(22, a.getF1());
Assert.assertEquals(33, a.getF2().intValue());
}
public void test_2() throws Exception {
Entity a = JSON.parseObject("{f1:'22', f2:33}", Entity.class);
Assert.assertEquals(22, a.getF1());
Assert.assertEquals(33, a.getF2().intValue());
}
public static | IntegerFieldDeserializerTest |
java | quarkusio__quarkus | extensions/grpc/runtime/src/main/java/io/quarkus/grpc/runtime/supports/GrpcClientConfigProvider.java | {
"start": 611,
"end": 2255
} | class ____ {
@Inject
GrpcConfiguration config;
public GrpcClientConfiguration getConfiguration(String name) {
Map<String, GrpcClientConfiguration> clients = config.clients();
if (clients == null) {
return null;
} else {
return clients.get(name);
}
}
public GrpcServerConfiguration getServerConfiguration() {
return config.server();
}
AbstractStub<?> adjustCallOptions(String serviceName, AbstractStub<?> stub) {
GrpcClientConfiguration clientConfig = config.clients() != null ? config.clients().get(serviceName) : null;
if (clientConfig != null) {
if (clientConfig.compression().isPresent()) {
stub = stub.withCompression(clientConfig.compression().get());
}
if (clientConfig.deadline().isPresent()) {
Duration deadline = clientConfig.deadline().get();
stub = stub.withDeadlineAfter(deadline.toMillis(), TimeUnit.MILLISECONDS);
}
}
return stub;
}
public static AbstractStub<?> configureStub(String serviceName, AbstractStub<?> stub) {
return Arc.container().instance(GrpcClientConfigProvider.class).get().adjustCallOptions(serviceName, stub);
}
public static AbstractStub<?> addBlockingClientInterceptor(AbstractStub<?> stub) {
return stub.withInterceptors(new EventLoopBlockingCheckInterceptor());
}
public static BiFunction<String, AbstractStub<?>, AbstractStub<?>> getStubConfigurator() {
return GrpcClientConfigProvider::configureStub;
}
}
| GrpcClientConfigProvider |
java | quarkusio__quarkus | extensions/arc/deployment/src/test/java/io/quarkus/arc/test/synthetic/SyntheticBeanBuildItemProxyTest.java | {
"start": 4998,
"end": 5256
} | class ____ {
private String value;
public SynthBean() {
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
}
}
| SynthBean |
java | google__guava | android/guava-tests/test/com/google/common/primitives/LongArrayAsListTest.java | {
"start": 1633,
"end": 3172
} | class ____ extends TestCase {
private static List<Long> asList(Long[] values) {
long[] temp = new long[values.length];
for (int i = 0; i < values.length; i++) {
temp[i] = checkNotNull(values[i]); // checkNotNull for GWT (do not optimize).
}
return Longs.asList(temp);
}
@J2ktIncompatible
@GwtIncompatible // suite
public static Test suite() {
List<ListTestSuiteBuilder<Long>> builders =
ImmutableList.of(
ListTestSuiteBuilder.using(new LongsAsListGenerator()).named("Longs.asList"),
ListTestSuiteBuilder.using(new LongsAsListHeadSubListGenerator())
.named("Longs.asList, head subList"),
ListTestSuiteBuilder.using(new LongsAsListTailSubListGenerator())
.named("Longs.asList, tail subList"),
ListTestSuiteBuilder.using(new LongsAsListMiddleSubListGenerator())
.named("Longs.asList, middle subList"));
TestSuite suite = new TestSuite();
for (ListTestSuiteBuilder<Long> builder : builders) {
suite.addTest(
builder
.withFeatures(
CollectionSize.ONE,
CollectionSize.SEVERAL,
CollectionFeature.RESTRICTS_ELEMENTS,
ListFeature.SUPPORTS_SET)
.createTestSuite());
}
return suite;
}
// Test generators. To let the GWT test suite generator access them, they need to be
// public named classes with a public default constructor.
public static final | LongArrayAsListTest |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/PushFilterIntoTableSourceScanRule.java | {
"start": 1616,
"end": 5010
} | class ____ extends PushFilterIntoSourceScanRuleBase {
public static final PushFilterIntoTableSourceScanRule INSTANCE =
new PushFilterIntoTableSourceScanRule();
public PushFilterIntoTableSourceScanRule() {
super(
operand(Filter.class, operand(LogicalTableScan.class, none())),
"PushFilterIntoTableSourceScanRule");
}
@Override
public boolean matches(RelOptRuleCall call) {
if (!super.matches(call)) {
return false;
}
Filter filter = call.rel(0);
if (filter.getCondition() == null) {
return false;
}
LogicalTableScan scan = call.rel(1);
TableSourceTable tableSourceTable = scan.getTable().unwrap(TableSourceTable.class);
return canPushdownFilter(tableSourceTable);
}
@Override
public void onMatch(RelOptRuleCall call) {
Filter filter = call.rel(0);
LogicalTableScan scan = call.rel(1);
TableSourceTable table = scan.getTable().unwrap(TableSourceTable.class);
pushFilterIntoScan(call, filter, scan, table);
}
private void pushFilterIntoScan(
RelOptRuleCall call,
Filter filter,
LogicalTableScan scan,
FlinkPreparingTableBase relOptTable) {
RelBuilder relBuilder = call.builder();
Tuple2<RexNode[], RexNode[]> extractedPredicates =
FlinkRexUtil.extractPredicates(
filter.getInput().getRowType().getFieldNames().toArray(new String[0]),
filter.getCondition(),
scan,
relBuilder.getRexBuilder());
RexNode[] convertiblePredicates = extractedPredicates._1;
RexNode[] unconvertedPredicates = extractedPredicates._2;
if (convertiblePredicates.length == 0) {
// no condition can be translated to expression
return;
}
Tuple2<SupportsFilterPushDown.Result, TableSourceTable> scanAfterPushdownWithResult =
resolveFiltersAndCreateTableSourceTable(
convertiblePredicates,
relOptTable.unwrap(TableSourceTable.class),
scan,
relBuilder);
SupportsFilterPushDown.Result result = scanAfterPushdownWithResult._1;
TableSourceTable tableSourceTable = scanAfterPushdownWithResult._2;
LogicalTableScan newScan =
LogicalTableScan.create(scan.getCluster(), tableSourceTable, scan.getHints());
if (result.getRemainingFilters().isEmpty() && unconvertedPredicates.length == 0) {
call.transformTo(newScan);
} else {
RexNode remainingCondition =
createRemainingCondition(
relBuilder, result.getRemainingFilters(), unconvertedPredicates);
RexNode simplifiedRemainingCondition =
FlinkRexUtil.simplify(
relBuilder.getRexBuilder(),
remainingCondition,
filter.getCluster().getPlanner().getExecutor());
Filter newFilter =
filter.copy(filter.getTraitSet(), newScan, simplifiedRemainingCondition);
call.transformTo(newFilter);
}
}
}
| PushFilterIntoTableSourceScanRule |
java | apache__flink | flink-core/src/main/java/org/apache/flink/core/memory/DataOutputViewStreamWrapper.java | {
"start": 1001,
"end": 1099
} | class ____ turns an {@link OutputStream} into a {@link DataOutputView}. */
@PublicEvolving
public | that |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.