language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | alibaba__nacos | api/src/main/java/com/alibaba/nacos/api/config/filter/IConfigRequest.java | {
"start": 727,
"end": 1164
} | interface ____ {
/**
* put param.
*
* @param key key
* @param value value
*/
void putParameter(String key, Object value);
/**
* get param.
*
* @param key key
* @return value
*/
Object getParameter(String key);
/**
* get config context.
*
* @return {@link IConfigContext}
*/
IConfigContext getConfigContext();
}
| IConfigRequest |
java | quarkusio__quarkus | extensions/cache/deployment/src/test/java/io/quarkus/cache/test/runtime/CachedResultsTest.java | {
"start": 6784,
"end": 6971
} | class ____ {
@Produces
@MyQualifier
EmbeddingModel produce() {
return new EmbeddingModelImpl();
}
}
public static | EmbeddingModelProducer |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/db2/DB2SelectTest_34.java | {
"start": 1059,
"end": 2869
} | class ____ extends DB2Test {
public void test_0() throws Exception {
String sql = "SELECT * INTO Persons_backup FROM Persons";
DB2StatementParser parser = new DB2StatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLSelectStatement stmt = (SQLSelectStatement) statementList.get(0);
assertEquals("Persons", stmt.getSelect().getQueryBlock().getFrom().toString());
assertEquals(1, statementList.size());
DB2SchemaStatVisitor visitor = new DB2SchemaStatVisitor();
stmt.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(2, visitor.getTables().size());
assertEquals(1, visitor.getColumns().size());
assertEquals(0, visitor.getConditions().size());
assertTrue(visitor.containsTable("Persons_backup"));
// assertTrue(visitor.getColumns().contains(new Column("DSN8B10.EMP", "WORKDEPT")));
// assertTrue(visitor.getColumns().contains(new Column("mytable", "first_name")));
// assertTrue(visitor.getColumns().contains(new Column("mytable", "full_name")));
assertEquals("SELECT *\n" +
"INTO Persons_backup\n" +
"FROM Persons", //
SQLUtils.toSQLString(stmt, JdbcConstants.DB2));
assertEquals("select *\n" +
"into Persons_backup\n" +
"from Persons", //
SQLUtils.toSQLString(stmt, JdbcConstants.DB2, SQLUtils.DEFAULT_LCASE_FORMAT_OPTION));
}
}
| DB2SelectTest_34 |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/any/annotations/AnyNullabilityTest.java | {
"start": 5032,
"end": 5773
} | class ____ {
@Id
private Long id;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public Property<?> getProperty() {
return property;
}
public void setProperty(Property<?> property) {
this.property = property;
}
@Any( optional = false )
@AnyDiscriminator( DiscriminatorType.STRING )
@AnyDiscriminatorValue( discriminator = "S", entity = StringProperty.class )
@AnyDiscriminatorValue( discriminator = "I", entity = IntegerProperty.class )
@AnyKeyJavaClass( Long.class )
@Column( name = "property_type" )
@JoinColumn( name = "property_id" )
private Property<?> property;
}
@Entity( name = "NonNullablePropertyHolder" )
public static | NonOptionalPropertyHolder |
java | spring-projects__spring-boot | module/spring-boot-web-server/src/testFixtures/java/org/springframework/boot/web/server/servlet/MockServletWebServer.java | {
"start": 6297,
"end": 6827
} | class ____ {
private final Filter filter;
private final FilterRegistration.Dynamic registration;
public RegisteredFilter(Filter filter) {
this.filter = filter;
this.registration = mock(FilterRegistration.Dynamic.class);
}
public FilterRegistration.Dynamic getRegistration() {
return this.registration;
}
public Filter getFilter() {
return this.filter;
}
}
/**
* Initializer (usually implement by adapting {@code ServletContextInitializer}).
*/
@FunctionalInterface
protected | RegisteredFilter |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java | {
"start": 1904,
"end": 7839
} | class ____ {
private ConfigurationMutationACLPolicy policy;
private RMContext rmContext;
private MutableConfScheduler scheduler;
private static final UserGroupInformation GOOD_USER = UserGroupInformation
.createUserForTesting("goodUser", new String[] {});
private static final UserGroupInformation BAD_USER = UserGroupInformation
.createUserForTesting("badUser", new String[] {});
private static final Map<String, String> EMPTY_MAP =
Collections.<String, String>emptyMap();
@BeforeEach
public void setUp() throws IOException {
rmContext = mock(RMContext.class);
scheduler = mock(MutableConfScheduler.class);
when(rmContext.getScheduler()).thenReturn(scheduler);
mockQueue("a", "root.a", scheduler);
mockQueue("b", "root.b", scheduler);
mockQueue("b1", "root.b1", scheduler);
}
private void mockQueue(String queueName,
String queuePath, MutableConfScheduler confScheduler)
throws IOException {
QueueInfo queueInfo = QueueInfo.
newInstance(queueName, queuePath, 0, 0,
0, null, null,
null, null, null, null, false, -1.0f, 10, null, false);
when(confScheduler.getQueueInfo(eq(queueName), anyBoolean(), anyBoolean()))
.thenReturn(queueInfo);
Queue queue = mock(Queue.class);
when(queue.hasAccess(eq(QueueACL.ADMINISTER_QUEUE), eq(GOOD_USER)))
.thenReturn(true);
when(queue.hasAccess(eq(QueueACL.ADMINISTER_QUEUE), eq(BAD_USER)))
.thenReturn(false);
when(confScheduler.getQueue(eq(queueName))).thenReturn(queue);
}
@Test
public void testDefaultPolicy() {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.YARN_ADMIN_ACL, GOOD_USER.getShortUserName());
conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
DefaultConfigurationMutationACLPolicy.class,
ConfigurationMutationACLPolicy.class);
policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
policy.init(conf, rmContext);
assertTrue(policy.isMutationAllowed(GOOD_USER, null));
assertFalse(policy.isMutationAllowed(BAD_USER, null));
}
@Test
public void testQueueAdminBasedPolicy() {
Configuration conf = new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
QueueAdminConfigurationMutationACLPolicy.class,
ConfigurationMutationACLPolicy.class);
policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
policy.init(conf, rmContext);
SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
QueueConfigInfo configInfo = new QueueConfigInfo("root.a", EMPTY_MAP);
updateInfo.getUpdateQueueInfo().add(configInfo);
assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
}
@Test
public void testQueueAdminPolicyAddQueue() {
Configuration conf = new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
QueueAdminConfigurationMutationACLPolicy.class,
ConfigurationMutationACLPolicy.class);
policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
policy.init(conf, rmContext);
// Add root.b.b1. Should check ACL of root.b queue.
SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
QueueConfigInfo configInfo = new QueueConfigInfo("root.b.b2", EMPTY_MAP);
updateInfo.getAddQueueInfo().add(configInfo);
assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
}
@Test
public void testQueueAdminPolicyAddNestedQueue() {
Configuration conf = new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
QueueAdminConfigurationMutationACLPolicy.class,
ConfigurationMutationACLPolicy.class);
policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
policy.init(conf, rmContext);
// Add root.b.b1.b11. Should check ACL of root.b queue.
SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
QueueConfigInfo configInfo = new QueueConfigInfo("root.b.b2.b21", EMPTY_MAP);
updateInfo.getAddQueueInfo().add(configInfo);
assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
}
@Test
public void testQueueAdminPolicyRemoveQueue() {
Configuration conf = new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
QueueAdminConfigurationMutationACLPolicy.class,
ConfigurationMutationACLPolicy.class);
policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
policy.init(conf, rmContext);
// Remove root.b.b1.
SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
updateInfo.getRemoveQueueInfo().add("root.b.b1");
assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
}
@Test
public void testQueueAdminPolicyGlobal() {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.YARN_ADMIN_ACL, GOOD_USER.getShortUserName());
conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
QueueAdminConfigurationMutationACLPolicy.class,
ConfigurationMutationACLPolicy.class);
policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
policy.init(conf, rmContext);
SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
assertTrue(policy.isMutationAllowed(BAD_USER, updateInfo));
updateInfo.getGlobalParams().put("globalKey", "globalValue");
assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
}
}
| TestConfigurationMutationACLPolicies |
java | apache__flink | flink-walkthroughs/flink-walkthrough-datastream-java/src/main/resources/archetype-resources/src/main/java/FraudDetectionJob.java | {
"start": 1279,
"end": 1809
} | class ____ {
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<Transaction> transactions = env
.addSource(new TransactionSource())
.name("transactions");
DataStream<Alert> alerts = transactions
.keyBy(Transaction::getAccountId)
.process(new FraudDetector())
.name("fraud-detector");
alerts
.addSink(new AlertSink())
.name("send-alerts");
env.execute("Fraud Detection");
}
}
| FraudDetectionJob |
java | apache__spark | common/kvstore/src/main/java/org/apache/spark/util/kvstore/RocksDBTypeInfo.java | {
"start": 4229,
"end": 7167
} | class ____ {
static final byte[] END_MARKER = new byte[] { '-' };
static final byte ENTRY_PREFIX = (byte) '+';
static final byte KEY_SEPARATOR = 0x0;
static byte TRUE = (byte) '1';
static byte FALSE = (byte) '0';
private static final byte SECONDARY_IDX_PREFIX = (byte) '.';
private static final byte POSITIVE_MARKER = (byte) '=';
private static final byte NEGATIVE_MARKER = (byte) '*';
private static final byte[] HEX_BYTES = new byte[] {
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
};
private final RocksDB db;
private final Class<?> type;
private final Map<String, Index> indices;
private final byte[] typePrefix;
RocksDBTypeInfo(RocksDB db, Class<?> type, byte[] alias) throws Exception {
this.db = db;
this.type = type;
this.indices = new HashMap<>();
KVTypeInfo ti = new KVTypeInfo(type);
// First create the parent indices, then the child indices.
ti.indices().forEach(idx -> {
// In RocksDB, there is no parent index for the NATURAL INDEX.
if (idx.parent().isEmpty() || idx.value().equals(KVIndex.NATURAL_INDEX_NAME)) {
indices.put(idx.value(), new Index(idx, ti.getAccessor(idx.value()), null));
}
});
ti.indices().forEach(idx -> {
if (!idx.parent().isEmpty() && !idx.value().equals(KVIndex.NATURAL_INDEX_NAME)) {
indices.put(idx.value(), new Index(idx, ti.getAccessor(idx.value()),
indices.get(idx.parent())));
}
});
this.typePrefix = alias;
}
Class<?> type() {
return type;
}
byte[] keyPrefix() {
return typePrefix;
}
Index naturalIndex() {
return index(KVIndex.NATURAL_INDEX_NAME);
}
Index index(String name) {
Index i = indices.get(name);
JavaUtils.checkArgument(i != null, "Index %s does not exist for type %s.", name,
type.getName());
return i;
}
Collection<Index> indices() {
return indices.values();
}
byte[] buildKey(byte[]... components) {
return buildKey(true, components);
}
byte[] buildKey(boolean addTypePrefix, byte[]... components) {
int len = 0;
if (addTypePrefix) {
len += typePrefix.length + 1;
}
for (byte[] comp : components) {
len += comp.length;
}
len += components.length - 1;
byte[] dest = new byte[len];
int written = 0;
if (addTypePrefix) {
System.arraycopy(typePrefix, 0, dest, 0, typePrefix.length);
dest[typePrefix.length] = KEY_SEPARATOR;
written += typePrefix.length + 1;
}
for (byte[] comp : components) {
System.arraycopy(comp, 0, dest, written, comp.length);
written += comp.length;
if (written < dest.length) {
dest[written] = KEY_SEPARATOR;
written++;
}
}
return dest;
}
/**
* Models a single index in RocksDB. See top-level class's javadoc for a description of how the
* keys are generated.
*/
| RocksDBTypeInfo |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/ExceptionHandlingConfigurerAccessDeniedHandlerTests.java | {
"start": 5184,
"end": 5770
} | class ____ {
AccessDeniedHandler teapotDeniedHandler = (request, response, exception) -> response
.setStatus(HttpStatus.I_AM_A_TEAPOT.value());
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.authorizeHttpRequests((requests) -> requests
.anyRequest().denyAll())
.exceptionHandling((handling) -> handling
.defaultAccessDeniedHandlerFor(
this.teapotDeniedHandler,
pathPattern("/hello/**")));
return http.build();
// @formatter:on
}
}
}
| SingleRequestMatcherAccessDeniedHandlerConfig |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPlacementConstraintPBConversion.java | {
"start": 2617,
"end": 2736
} | class ____ {@link PlacementConstraintToProtoConverter} and
* {@link PlacementConstraintFromProtoConverter}.
*/
public | for |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphTestUtils.java | {
"start": 2639,
"end": 22790
} | class ____ {
// ------------------------------------------------------------------------
// reaching states
// ------------------------------------------------------------------------
/**
* Waits until the Job has reached a certain state.
*
* <p>This method is based on polling and might miss very fast state transitions!
*/
public static void waitUntilJobStatus(ExecutionGraph eg, JobStatus status, long maxWaitMillis)
throws TimeoutException {
checkNotNull(eg);
checkNotNull(status);
checkArgument(maxWaitMillis >= 0);
// this is a poor implementation - we may want to improve it eventually
final long deadline =
maxWaitMillis == 0
? Long.MAX_VALUE
: System.nanoTime() + (maxWaitMillis * 1_000_000);
while (eg.getState() != status && System.nanoTime() < deadline) {
try {
Thread.sleep(2);
} catch (InterruptedException ignored) {
}
}
if (System.nanoTime() >= deadline) {
throw new TimeoutException(
String.format(
"The job did not reach status %s in time. Current status is %s.",
status, eg.getState()));
}
}
/**
* Waits until the Execution has reached a certain state.
*
* <p>This method is based on polling and might miss very fast state transitions!
*/
public static void waitUntilExecutionState(
Execution execution, ExecutionState state, long maxWaitMillis) throws TimeoutException {
checkNotNull(execution);
checkNotNull(state);
checkArgument(maxWaitMillis >= 0);
// this is a poor implementation - we may want to improve it eventually
final long deadline =
maxWaitMillis == 0
? Long.MAX_VALUE
: System.nanoTime() + (maxWaitMillis * 1_000_000);
while (execution.getState() != state && System.nanoTime() < deadline) {
try {
Thread.sleep(2);
} catch (InterruptedException ignored) {
}
}
if (System.nanoTime() >= deadline) {
throw new TimeoutException(
String.format(
"The execution did not reach state %s in time. Current state is %s.",
state, execution.getState()));
}
}
/**
* Waits until the ExecutionVertex has reached a certain state.
*
* <p>This method is based on polling and might miss very fast state transitions!
*/
public static void waitUntilExecutionVertexState(
ExecutionVertex executionVertex, ExecutionState state, long maxWaitMillis)
throws TimeoutException {
checkNotNull(executionVertex);
checkNotNull(state);
checkArgument(maxWaitMillis >= 0);
// this is a poor implementation - we may want to improve it eventually
final long deadline =
maxWaitMillis == 0
? Long.MAX_VALUE
: System.nanoTime() + (maxWaitMillis * 1_000_000);
while (true) {
Execution execution = executionVertex.getCurrentExecutionAttempt();
if (execution == null
|| (execution.getState() != state && System.nanoTime() < deadline)) {
try {
Thread.sleep(2);
} catch (InterruptedException ignored) {
}
} else {
break;
}
if (System.nanoTime() >= deadline) {
if (execution != null) {
throw new TimeoutException(
String.format(
"The execution vertex did not reach state %s in time. Current state is %s.",
state, execution.getState()));
} else {
throw new TimeoutException(
"Cannot get current execution attempt of " + executionVertex + '.');
}
}
}
}
/**
* Waits until all executions fulfill the given predicate.
*
* @param executionGraph for which to check the executions
* @param executionPredicate predicate which is to be fulfilled
* @param maxWaitMillis timeout for the wait operation
* @throws TimeoutException if the executions did not reach the target state in time
*/
public static void waitForAllExecutionsPredicate(
ExecutionGraph executionGraph,
Predicate<AccessExecution> executionPredicate,
long maxWaitMillis)
throws TimeoutException {
final Predicate<AccessExecutionGraph> allExecutionsPredicate =
allExecutionsPredicate(executionPredicate);
final Deadline deadline = Deadline.fromNow(Duration.ofMillis(maxWaitMillis));
boolean predicateResult;
do {
predicateResult = allExecutionsPredicate.test(executionGraph);
if (!predicateResult) {
try {
Thread.sleep(2L);
} catch (InterruptedException ignored) {
Thread.currentThread().interrupt();
}
}
} while (!predicateResult && deadline.hasTimeLeft());
if (!predicateResult) {
throw new TimeoutException("Not all executions fulfilled the predicate in time.");
}
}
public static Predicate<AccessExecutionGraph> allExecutionsPredicate(
final Predicate<AccessExecution> executionPredicate) {
return accessExecutionGraph -> {
final Iterable<? extends AccessExecutionVertex> allExecutionVertices =
accessExecutionGraph.getAllExecutionVertices();
for (AccessExecutionVertex executionVertex : allExecutionVertices) {
final AccessExecution currentExecutionAttempt =
executionVertex.getCurrentExecutionAttempt();
if (currentExecutionAttempt == null
|| !executionPredicate.test(currentExecutionAttempt)) {
return false;
}
}
return true;
};
}
public static Predicate<AccessExecution> isInExecutionState(ExecutionState executionState) {
return (AccessExecution execution) -> execution.getState() == executionState;
}
/**
* Takes all vertices in the given ExecutionGraph and switches their current execution to
* INITIALIZING.
*/
public static void switchAllVerticesToInitializing(ExecutionGraph eg) {
for (ExecutionVertex vertex : eg.getAllExecutionVertices()) {
vertex.getCurrentExecutionAttempt().switchToInitializing();
}
}
/**
* Takes all vertices in the given ExecutionGraph and switches their current execution to
* RUNNING.
*/
public static void switchAllVerticesToRunning(ExecutionGraph eg) {
for (ExecutionVertex vertex : eg.getAllExecutionVertices()) {
vertex.getCurrentExecutionAttempt().switchToInitializing();
vertex.getCurrentExecutionAttempt().switchToRunning();
}
}
/**
* Takes all vertices in the given ExecutionGraph and attempts to move them from CANCELING to
* CANCELED.
*/
public static void completeCancellingForAllVertices(ExecutionGraph eg) {
for (ExecutionVertex vertex : eg.getAllExecutionVertices()) {
vertex.getCurrentExecutionAttempt().completeCancelling();
}
}
public static void finishJobVertex(ExecutionGraph executionGraph, JobVertexID jobVertexId) {
for (ExecutionVertex vertex :
Objects.requireNonNull(executionGraph.getJobVertex(jobVertexId))
.getTaskVertices()) {
finishExecutionVertex(executionGraph, vertex);
}
}
public static void finishExecutionVertex(
ExecutionGraph executionGraph, ExecutionVertex executionVertex) {
executionGraph.updateState(
new TaskExecutionStateTransition(
new TaskExecutionState(
executionVertex.getCurrentExecutionAttempt().getAttemptId(),
ExecutionState.FINISHED)));
}
/**
* Takes all vertices in the given ExecutionGraph and switches their current execution to
* FINISHED.
*/
public static void finishAllVertices(ExecutionGraph eg) {
for (ExecutionVertex vertex : eg.getAllExecutionVertices()) {
vertex.getCurrentExecutionAttempt().markFinished();
}
}
/** Checks that all execution are in state DEPLOYING and then switches them to state RUNNING. */
public static void switchToRunning(ExecutionGraph eg) {
// check that all execution are in state DEPLOYING
for (ExecutionVertex ev : eg.getAllExecutionVertices()) {
final Execution exec = ev.getCurrentExecutionAttempt();
final ExecutionState executionState = exec.getState();
assert executionState == ExecutionState.DEPLOYING
: "Expected executionState to be DEPLOYING, was: " + executionState;
}
// switch executions to RUNNING
for (ExecutionVertex ev : eg.getAllExecutionVertices()) {
final Execution exec = ev.getCurrentExecutionAttempt();
exec.switchToRunning();
}
}
// ------------------------------------------------------------------------
// state modifications
// ------------------------------------------------------------------------
public static void setVertexState(ExecutionVertex vertex, ExecutionState state) {
try {
Execution exec = vertex.getCurrentExecutionAttempt();
Field f = Execution.class.getDeclaredField("state");
f.setAccessible(true);
f.set(exec, state);
} catch (Exception e) {
throw new RuntimeException("Modifying the state failed", e);
}
}
public static void setVertexResource(ExecutionVertex vertex, LogicalSlot slot) {
Execution exec = vertex.getCurrentExecutionAttempt();
if (!exec.tryAssignResource(slot)) {
throw new RuntimeException("Could not assign resource.");
}
}
// ------------------------------------------------------------------------
// Mocking ExecutionGraph
// ------------------------------------------------------------------------
public static DefaultExecutionGraph createExecutionGraph(
ScheduledExecutorService executor, JobVertex... vertices) throws Exception {
return createExecutionGraph(executor, Duration.ofSeconds(10L), vertices);
}
public static DefaultExecutionGraph createExecutionGraph(
ScheduledExecutorService executor, Duration timeout, JobVertex... vertices)
throws Exception {
checkNotNull(vertices);
checkNotNull(timeout);
DefaultExecutionGraph executionGraph =
TestingDefaultExecutionGraphBuilder.newBuilder()
.setJobGraph(JobGraphTestUtils.streamingJobGraph(vertices))
.setRpcTimeout(timeout)
.build(executor);
executionGraph.start(ComponentMainThreadExecutorServiceAdapter.forMainThread());
return executionGraph;
}
public static JobVertex createNoOpVertex(int parallelism) {
return createNoOpVertex("vertex", parallelism);
}
public static JobVertex createNoOpVertex(String name, int parallelism) {
return createNoOpVertex(name, parallelism, JobVertex.MAX_PARALLELISM_DEFAULT);
}
public static JobVertex createNoOpVertex(String name, int parallelism, int maxParallelism) {
JobVertex vertex = new JobVertex(name);
vertex.setInvokableClass(NoOpInvokable.class);
vertex.setParallelism(parallelism);
vertex.setMaxParallelism(maxParallelism);
return vertex;
}
// ------------------------------------------------------------------------
// utility mocking methods
// ------------------------------------------------------------------------
public static ExecutionVertexID createRandomExecutionVertexId() {
return new ExecutionVertexID(new JobVertexID(), new Random().nextInt(Integer.MAX_VALUE));
}
public static JobVertex createJobVertex(
String task1, int numTasks, Class<NoOpInvokable> invokable) {
JobVertex groupVertex = new JobVertex(task1);
groupVertex.setInvokableClass(invokable);
groupVertex.setParallelism(numTasks);
return groupVertex;
}
public static ExecutionJobVertex getExecutionJobVertex(
JobVertexID id, ScheduledExecutorService executor) throws Exception {
return getExecutionJobVertex(id, 1, null, executor);
}
public static ExecutionJobVertex getExecutionJobVertex(
JobVertexID id,
int parallelism,
@Nullable SlotSharingGroup slotSharingGroup,
ScheduledExecutorService executor)
throws Exception {
JobVertex ajv = new JobVertex("TestVertex", id);
ajv.setInvokableClass(AbstractInvokable.class);
ajv.setParallelism(parallelism);
if (slotSharingGroup != null) {
ajv.setSlotSharingGroup(slotSharingGroup);
}
return getExecutionJobVertex(ajv, executor);
}
public static ExecutionJobVertex getExecutionJobVertex(JobVertex jobVertex) throws Exception {
return getExecutionJobVertex(jobVertex, new DirectScheduledExecutorService());
}
public static ExecutionJobVertex getExecutionJobVertex(
JobVertex jobVertex, ScheduledExecutorService executor) throws Exception {
JobGraph jobGraph = JobGraphTestUtils.batchJobGraph(jobVertex);
SchedulerBase scheduler =
new DefaultSchedulerBuilder(
jobGraph,
ComponentMainThreadExecutorServiceAdapter.forMainThread(),
executor)
.build();
return scheduler.getExecutionJobVertex(jobVertex.getID());
}
public static ExecutionJobVertex getExecutionJobVertex(JobVertexID id) throws Exception {
return getExecutionJobVertex(id, new DirectScheduledExecutorService());
}
public static ExecutionVertex getExecutionVertex() throws Exception {
return getExecutionJobVertex(new JobVertexID(), new DirectScheduledExecutorService())
.getTaskVertices()[0];
}
public static Execution getExecution() throws Exception {
final ExecutionJobVertex ejv = getExecutionJobVertex(new JobVertexID());
return ejv.getTaskVertices()[0].getCurrentExecutionAttempt();
}
public static Execution getExecution(
final JobVertexID jid,
final int subtaskIndex,
final int numTasks,
final SlotSharingGroup slotSharingGroup)
throws Exception {
final ExecutionJobVertex ejv =
getExecutionJobVertex(
jid, numTasks, slotSharingGroup, new DirectScheduledExecutorService());
return ejv.getTaskVertices()[subtaskIndex].getCurrentExecutionAttempt();
}
public static ExecutionAttemptID createExecutionAttemptId() {
return createExecutionAttemptId(new JobVertexID(0, 0));
}
public static ExecutionAttemptID createExecutionAttemptId(JobVertexID jobVertexId) {
return createExecutionAttemptId(jobVertexId, 0, 0);
}
public static ExecutionAttemptID createExecutionAttemptId(
JobVertexID jobVertexId, int subtaskIndex, int attemptNumber) {
return createExecutionAttemptId(
new ExecutionVertexID(jobVertexId, subtaskIndex), attemptNumber);
}
public static ExecutionAttemptID createExecutionAttemptId(
ExecutionVertexID executionVertexId, int attemptNumber) {
return new ExecutionAttemptID(new ExecutionGraphID(), executionVertexId, attemptNumber);
}
// ------------------------------------------------------------------------
// graph vertex verifications
// ------------------------------------------------------------------------
/**
* Verifies the generated {@link ExecutionJobVertex} for a given {@link JobVertex} in a {@link
* ExecutionGraph}.
*
* @param executionGraph the generated execution graph
* @param originJobVertex the vertex to verify for
* @param inputJobVertices upstream vertices of the verified vertex, used to check inputs of
* generated vertex
* @param outputJobVertices downstream vertices of the verified vertex, used to check produced
* data sets of generated vertex
*/
static void verifyGeneratedExecutionJobVertex(
ExecutionGraph executionGraph,
JobVertex originJobVertex,
@Nullable List<JobVertex> inputJobVertices,
@Nullable List<JobVertex> outputJobVertices) {
ExecutionJobVertex ejv = executionGraph.getAllVertices().get(originJobVertex.getID());
assertThat(ejv).isNotNull();
// verify basic properties
assertThat(originJobVertex.getParallelism()).isEqualTo(ejv.getParallelism());
assertThat(executionGraph.getJobID()).isEqualTo(ejv.getJobId());
assertThat(originJobVertex.getID()).isEqualTo(ejv.getJobVertexId());
assertThat(originJobVertex).isEqualTo(ejv.getJobVertex());
// verify produced data sets
if (outputJobVertices == null) {
assertThat(ejv.getProducedDataSets()).isEmpty();
} else {
assertThat(outputJobVertices).hasSize(ejv.getProducedDataSets().length);
for (int i = 0; i < outputJobVertices.size(); i++) {
assertThat(originJobVertex.getProducedDataSets().get(i).getId())
.isEqualTo(ejv.getProducedDataSets()[i].getId());
assertThat(originJobVertex.getParallelism())
.isEqualTo(ejv.getProducedDataSets()[0].getPartitions().length);
}
}
// verify task vertices for their basic properties and their inputs
assertThat(originJobVertex.getParallelism()).isEqualTo(ejv.getTaskVertices().length);
int subtaskIndex = 0;
for (ExecutionVertex ev : ejv.getTaskVertices()) {
assertThat(executionGraph.getJobID()).isEqualTo(ev.getJobId());
assertThat(originJobVertex.getID()).isEqualTo(ev.getJobvertexId());
assertThat(originJobVertex.getParallelism())
.isEqualTo(ev.getTotalNumberOfParallelSubtasks());
assertThat(subtaskIndex).isEqualTo(ev.getParallelSubtaskIndex());
if (inputJobVertices == null) {
assertThat(ev.getNumberOfInputs()).isZero();
} else {
assertThat(inputJobVertices).hasSize(ev.getNumberOfInputs());
for (int i = 0; i < inputJobVertices.size(); i++) {
ConsumedPartitionGroup consumedPartitionGroup = ev.getConsumedPartitionGroup(i);
assertThat(inputJobVertices.get(i).getParallelism())
.isEqualTo(consumedPartitionGroup.size());
int expectedPartitionNum = 0;
for (IntermediateResultPartitionID consumedPartitionId :
consumedPartitionGroup) {
assertThat(consumedPartitionId.getPartitionNumber())
.isEqualTo(expectedPartitionNum);
expectedPartitionNum++;
}
}
}
subtaskIndex++;
}
}
}
| ExecutionGraphTestUtils |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-common/deployment/src/main/java/io/quarkus/resteasy/reactive/common/deployment/ApplicationResultBuildItem.java | {
"start": 208,
"end": 506
} | class ____ extends SimpleBuildItem {
final ApplicationScanningResult result;
public ApplicationResultBuildItem(ApplicationScanningResult result) {
this.result = result;
}
public ApplicationScanningResult getResult() {
return result;
}
}
| ApplicationResultBuildItem |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/util/concurrent/SpscArrayQueue.java | {
"start": 4397,
"end": 5761
} | class ____<T> extends SpscArrayQueueCold<T> {
/** */
private static final long serialVersionUID = -4461305682174876914L;
byte pad000,pad001,pad002,pad003,pad004,pad005,pad006,pad007;// 8b
byte pad010,pad011,pad012,pad013,pad014,pad015,pad016,pad017;// 16b
byte pad020,pad021,pad022,pad023,pad024,pad025,pad026,pad027;// 24b
byte pad030,pad031,pad032,pad033,pad034,pad035,pad036,pad037;// 32b
byte pad040,pad041,pad042,pad043,pad044,pad045,pad046,pad047;// 40b
byte pad050,pad051,pad052,pad053,pad054,pad055,pad056,pad057;// 48b
byte pad060,pad061,pad062,pad063,pad064,pad065,pad066,pad067;// 56b
byte pad070,pad071,pad072,pad073,pad074,pad075,pad076,pad077;// 64b
byte pad100,pad101,pad102,pad103,pad104,pad105,pad106,pad107;// 72b
byte pad110,pad111,pad112,pad113,pad114,pad115,pad116,pad117;// 80b
byte pad120,pad121,pad122,pad123,pad124,pad125,pad126,pad127;// 88b
byte pad130,pad131,pad132,pad133,pad134,pad135,pad136,pad137;// 96b
byte pad140,pad141,pad142,pad143,pad144,pad145,pad146,pad147;//104b
byte pad150,pad151,pad152,pad153,pad154,pad155,pad156,pad157;//112b
byte pad160,pad161,pad162,pad163,pad164,pad165,pad166,pad167;//120b
byte pad170,pad171,pad172,pad173,pad174,pad175,pad176,pad177;//128b
byte pad200,pad201,pad202,pad203; //132b
SpscArrayQueueP1(int length) {
super(length);
}
}
| SpscArrayQueueP1 |
java | apache__camel | components/camel-ai/camel-djl/src/main/java/org/apache/camel/component/djl/model/nlp/CustomWordEmbeddingPredictor.java | {
"start": 977,
"end": 1426
} | class ____ extends CustomNlpPredictor<NDList> {
public CustomWordEmbeddingPredictor(DJLEndpoint endpoint) {
super(endpoint);
}
@Override
public void process(Exchange exchange) {
super.process(exchange);
// DJL NDList should not be exposed outside the endpoint
NDList result = exchange.getIn().getBody(NDList.class);
exchange.getIn().setBody(result.encode());
}
}
| CustomWordEmbeddingPredictor |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/repositories/RepositoryInfo.java | {
"start": 951,
"end": 4199
} | class ____ implements Writeable, ToXContentFragment {
public final String ephemeralId;
public final String name;
public final String type;
public final Map<String, String> location;
public final long startedAt;
@Nullable
public final Long stoppedAt;
public RepositoryInfo(String ephemeralId, String name, String type, Map<String, String> location, long startedAt) {
this(ephemeralId, name, type, location, startedAt, null);
}
public RepositoryInfo(
String ephemeralId,
String name,
String type,
Map<String, String> location,
long startedAt,
@Nullable Long stoppedAt
) {
this.ephemeralId = ephemeralId;
this.name = name;
this.type = type;
this.location = location;
this.startedAt = startedAt;
if (stoppedAt != null && startedAt > stoppedAt) {
throw new IllegalArgumentException("createdAt must be before or equal to stoppedAt");
}
this.stoppedAt = stoppedAt;
}
public RepositoryInfo(StreamInput in) throws IOException {
this.ephemeralId = in.readString();
this.name = in.readString();
this.type = in.readString();
this.location = in.readMap(StreamInput::readString);
this.startedAt = in.readLong();
this.stoppedAt = in.readOptionalLong();
}
public RepositoryInfo stopped(long stoppedAt) {
assert isStopped() == false : "The repository is already stopped";
return new RepositoryInfo(ephemeralId, name, type, location, startedAt, stoppedAt);
}
public boolean isStopped() {
return stoppedAt != null;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(ephemeralId);
out.writeString(name);
out.writeString(type);
out.writeMap(location, StreamOutput::writeString);
out.writeLong(startedAt);
out.writeOptionalLong(stoppedAt);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("repository_name", name);
builder.field("repository_type", type);
builder.field("repository_location", location);
builder.field("repository_ephemeral_id", ephemeralId);
builder.field("repository_started_at", startedAt);
if (stoppedAt != null) {
builder.field("repository_stopped_at", stoppedAt);
}
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
RepositoryInfo that = (RepositoryInfo) o;
return ephemeralId.equals(that.ephemeralId)
&& name.equals(that.name)
&& type.equals(that.type)
&& location.equals(that.location)
&& startedAt == that.startedAt
&& Objects.equals(stoppedAt, that.stoppedAt);
}
@Override
public int hashCode() {
return Objects.hash(ephemeralId, name, type, location, startedAt, stoppedAt);
}
@Override
public String toString() {
return Strings.toString(this);
}
}
| RepositoryInfo |
java | google__error-prone | core/src/test/java/com/google/errorprone/dataflow/nullnesspropagation/NullnessPropagationTest.java | {
"start": 62503,
"end": 62860
} | class ____ {
public static void dereference(Coinductive o) {
// BUG: Diagnostic contains: (Nullable)
triggerNullnessChecker(o);
// BUG: Diagnostic contains: (Nullable)
triggerNullnessChecker(o.f);
o.f = (Coinductive) new Object();
// BUG: Diagnostic contains: (Non-null)
triggerNullnessChecker(o.f);
}
abstract | FieldAccessTest |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/logging/logback/SpringBootJoranConfiguratorTests.java | {
"start": 10773,
"end": 11366
} | interface ____ {
}
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
@WithResource(name = "property-in-if.xml", content = """
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<include resource="org/springframework/boot/logging/logback/base.xml"/>
<springProperty scope="context" name="MINE" source="my.example-property"/>
<if condition='property("MINE").contains("true")'>
<then>
<variable scope="context" name="MYCHECK" value="i-was-included"/>
</then>
</if>
</configuration>
""")
private @ | WithPropertyDefaultValueXmlResource |
java | google__error-prone | core/src/main/java/com/google/errorprone/refaster/RefasterRuleBuilderScanner.java | {
"start": 2326,
"end": 9122
} | class ____ extends SimpleTreeVisitor<Void, Void> {
private static final Logger logger =
Logger.getLogger(RefasterRuleBuilderScanner.class.toString());
static final Context.Key<Map<MethodSymbol, PlaceholderMethod>> PLACEHOLDER_METHODS_KEY =
new Context.Key<>();
private final Context context;
private final Map<MethodSymbol, PlaceholderMethod> placeholderMethods;
private final List<Template<?>> beforeTemplates;
private final List<Template<?>> afterTemplates;
private RefasterRuleBuilderScanner(Context context) {
this.context = new SubContext(context);
if (context.get(PLACEHOLDER_METHODS_KEY) == null) {
this.placeholderMethods = new HashMap<>();
context.put(PLACEHOLDER_METHODS_KEY, placeholderMethods);
} else {
this.placeholderMethods = context.get(PLACEHOLDER_METHODS_KEY);
}
this.beforeTemplates = new ArrayList<>();
this.afterTemplates = new ArrayList<>();
}
public static Collection<? extends CodeTransformer> extractRules(
ClassTree tree, Context context) {
ClassSymbol sym = ASTHelpers.getSymbol(tree);
RefasterRuleBuilderScanner scanner = new RefasterRuleBuilderScanner(context);
// visit abstract methods first
ImmutableList<MethodTree> methods =
new Ordering<MethodTree>() {
@Override
public int compare(MethodTree l, MethodTree r) {
return Boolean.compare(
l.getModifiers().getFlags().contains(Modifier.ABSTRACT),
r.getModifiers().getFlags().contains(Modifier.ABSTRACT));
}
}.reverse().immutableSortedCopy(Iterables.filter(tree.getMembers(), MethodTree.class));
scanner.visit(methods, null);
UTemplater templater = new UTemplater(context);
List<UType> types = templater.templateTypes(sym.type.getTypeArguments());
return scanner.createMatchers(
Iterables.filter(types, UTypeVar.class),
sym.getQualifiedName().toString(),
UTemplater.annotationMap(sym));
}
@Override
public Void visitMethod(MethodTree tree, Void v) {
try {
VisitorState state = new VisitorState(context);
logger.log(FINE, "Discovered method with name {0}", tree.getName());
if (hasAnnotation(tree, "com.google.errorprone.refaster.annotation.Placeholder", state)) {
checkArgument(
tree.getModifiers().getFlags().contains(Modifier.ABSTRACT),
"@Placeholder methods are expected to be abstract");
UTemplater templater = new UTemplater(context);
ImmutableMap.Builder<UVariableDecl, ImmutableClassToInstanceMap<Annotation>> params =
ImmutableMap.builder();
for (VariableTree param : tree.getParameters()) {
params.put(
templater.visitVariable(param, null),
UTemplater.annotationMap(ASTHelpers.getSymbol(param)));
}
MethodSymbol sym = ASTHelpers.getSymbol(tree);
placeholderMethods.put(
sym,
PlaceholderMethod.create(
tree.getName(),
templater.template(sym.getReturnType()),
params.buildOrThrow(),
UTemplater.annotationMap(sym)));
} else if (hasAnnotation(tree, BEFORE_TEMPLATE_ANNOTATION, state)) {
checkState(afterTemplates.isEmpty(), "BeforeTemplate must come before AfterTemplate");
Template<?> template = UTemplater.createTemplate(context, tree);
beforeTemplates.add(template);
if (template instanceof BlockTemplate) {
context.put(UTemplater.REQUIRE_BLOCK_KEY, /* data= */ true);
}
} else if (hasAnnotation(tree, AFTER_TEMPLATE_ANNOTATION, state)) {
afterTemplates.add(UTemplater.createTemplate(context, tree));
} else if (tree.getModifiers().getFlags().contains(Modifier.ABSTRACT)) {
throw new IllegalArgumentException(
"Placeholder methods must have @Placeholder, but abstract method does not: " + tree);
}
return null;
} catch (RuntimeException t) {
throw new RuntimeException("Error analysing: " + tree.getName(), t);
}
}
private ImmutableList<? extends CodeTransformer> createMatchers(
Iterable<UTypeVar> typeVars,
String qualifiedTemplateClass,
ImmutableClassToInstanceMap<Annotation> annotationMap) {
if (beforeTemplates.isEmpty() && afterTemplates.isEmpty()) {
// there's no template here
return ImmutableList.of();
} else {
if (annotationMap.containsKey(AllowCodeBetweenLines.class)) {
List<UBlank> blanks = new ArrayList<>();
for (int i = 0; i < beforeTemplates.size(); i++) {
if (beforeTemplates.get(i) instanceof ExpressionTemplate) {
throw new IllegalArgumentException(
"@AllowCodeBetweenLines may not be specified for expression templates.");
}
BlockTemplate before = (BlockTemplate) beforeTemplates.get(i);
List<UStatement> stmtsWithBlanks = new ArrayList<>();
for (UStatement stmt : before.templateStatements()) {
if (!stmtsWithBlanks.isEmpty()) {
UBlank blank = UBlank.create();
blanks.add(blank);
stmtsWithBlanks.add(blank);
}
stmtsWithBlanks.add(stmt);
}
beforeTemplates.set(i, before.withStatements(stmtsWithBlanks));
}
for (int i = 0; i < afterTemplates.size(); i++) {
BlockTemplate afterBlock = (BlockTemplate) afterTemplates.get(i);
afterTemplates.set(
i,
afterBlock.withStatements(Iterables.concat(blanks, afterBlock.templateStatements())));
}
}
RefasterRule<?, ?> rule =
RefasterRule.create(
qualifiedTemplateClass, typeVars, beforeTemplates, afterTemplates, annotationMap);
List<ExpressionTemplate> negatedAfterTemplates = new ArrayList<>();
for (Template<?> afterTemplate : afterTemplates) {
if (afterTemplate.annotations().containsKey(AlsoNegation.class)) {
negatedAfterTemplates.add(((ExpressionTemplate) afterTemplate).negation());
}
}
if (!negatedAfterTemplates.isEmpty()) {
List<ExpressionTemplate> negatedBeforeTemplates = new ArrayList<>();
for (Template<?> beforeTemplate : beforeTemplates) {
negatedBeforeTemplates.add(((ExpressionTemplate) beforeTemplate).negation());
}
RefasterRule<?, ?> negation =
RefasterRule.create(
qualifiedTemplateClass,
typeVars,
negatedBeforeTemplates,
negatedAfterTemplates,
annotationMap);
return ImmutableList.of(rule, negation);
}
return ImmutableList.of(rule);
}
}
}
| RefasterRuleBuilderScanner |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/PathAssertBaseTest.java | {
"start": 934,
"end": 990
} | class ____ {@link PathAssert} tests.
*/
public abstract | for |
java | grpc__grpc-java | interop-testing/src/test/java/io/grpc/testing/integration/TrafficControlProxy.java | {
"start": 5121,
"end": 5543
} | class ____ implements Runnable {
private final MessageQueue queue;
Reader(MessageQueue queue) {
this.queue = queue;
}
@Override
public void run() {
while (!shutDown) {
try {
queue.readIn();
} catch (IOException e) {
shutDown = true;
} catch (InterruptedException e) {
shutDown = true;
}
}
}
}
private final | Reader |
java | processing__processing4 | app/src/processing/app/ui/ColorChooser.java | {
"start": 12580,
"end": 14660
} | class ____ extends JComponent {
static final int WIDE = 256;
static final int HIGH = 256;
private int lastX, lastY;
public ColorRange() {
setCursor(Cursor.getPredefinedCursor(Cursor.CROSSHAIR_CURSOR));
addMouseListener(new MouseAdapter() {
@Override
public void mousePressed(MouseEvent e) {
updateMouse(e);
}
});
addMouseMotionListener(new MouseMotionAdapter() {
@Override
public void mouseDragged(MouseEvent e) {
updateMouse(e);
}
});
addKeyListener(new KeyAdapter() {
@Override
public void keyPressed(KeyEvent e) {
super.keyPressed(e);
if (e.getKeyCode() == KeyEvent.VK_ESCAPE) {
ColorChooser.this.hide();
}
}
});
}
private void updateMouse(MouseEvent e) {
int mouseX = e.getX();
int mouseY = e.getY();
if ((mouseX >= 0) && (mouseX < WIDE) &&
(mouseY >= 0) && (mouseY < HIGH)) {
int newSaturation = (int) (100 * (mouseX / 255.0f));
int newBrightness = 100 - ((int) (100 * (mouseY / 255.0f)));
saturationField.setText(String.valueOf(newSaturation));
brightnessField.setText(String.valueOf(newBrightness));
lastX = mouseX;
lastY = mouseY;
}
}
@Override
public void paintComponent(Graphics g) {
super.paintComponent(g);
for (int j = 0; j < WIDE; j++) {
for (int i = 0; i < HIGH; i++) {
g.setColor(Color.getHSBColor(hue / 360f, i / 256f, (255 - j) / 256f));
g.fillRect(i, j, 1, 1);
}
}
g.setColor((brightness > 50) ? Color.BLACK : Color.WHITE);
g.drawRect(lastX - 5, lastY - 5, 10, 10);
}
@Override
public Dimension getPreferredSize() {
return new Dimension(WIDE, HIGH);
}
@Override
public Dimension getMinimumSize() {
return getPreferredSize();
}
@Override
public Dimension getMaximumSize() {
return getPreferredSize();
}
}
public | ColorRange |
java | spring-projects__spring-boot | module/spring-boot-web-server/src/main/java/org/springframework/boot/web/server/servlet/SessionStoreDirectory.java | {
"start": 994,
"end": 1802
} | class ____ {
private @Nullable File directory;
@Nullable File getDirectory() {
return this.directory;
}
void setDirectory(@Nullable File directory) {
this.directory = directory;
}
public File getValidDirectory(boolean mkdirs) {
File dir = getDirectory();
if (dir == null) {
return new ApplicationTemp().getDir("servlet-sessions");
}
if (!dir.isAbsolute()) {
dir = new File(new ApplicationHome().getDir(), dir.getPath());
}
if (!dir.exists() && mkdirs) {
dir.mkdirs();
}
assertDirectory(mkdirs, dir);
return dir;
}
private void assertDirectory(boolean mkdirs, File dir) {
Assert.state(!mkdirs || dir.exists(), () -> "Session dir " + dir + " does not exist");
Assert.state(!dir.isFile(), () -> "Session dir " + dir + " points to a file");
}
}
| SessionStoreDirectory |
java | apache__kafka | share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorConfigTest.java | {
"start": 1057,
"end": 1946
} | class ____ {
@Test
public void testAppendLingerMs() {
ShareCoordinatorConfig config = createConfig(Map.of(ShareCoordinatorConfig.APPEND_LINGER_MS_CONFIG, -1));
assertEquals(OptionalInt.empty(), config.shareCoordinatorAppendLingerMs());
config = createConfig(Map.of(ShareCoordinatorConfig.APPEND_LINGER_MS_CONFIG, 0));
assertEquals(OptionalInt.of(0), config.shareCoordinatorAppendLingerMs());
config = createConfig(Map.of(ShareCoordinatorConfig.APPEND_LINGER_MS_CONFIG, 5));
assertEquals(OptionalInt.of(5), config.shareCoordinatorAppendLingerMs());
}
public static ShareCoordinatorConfig createConfig(Map<String, Object> configs) {
return new ShareCoordinatorConfig(new AbstractConfig(
ShareCoordinatorConfig.CONFIG_DEF,
configs,
false
));
}
}
| ShareCoordinatorConfigTest |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountErrorTests.java | {
"start": 795,
"end": 1570
} | class ____ extends ErrorsForCasesWithoutExamplesTestCase {
@Override
protected List<TestCaseSupplier> cases() {
return paramsToSuppliers(CountTests.parameters());
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new Count(source, args.get(0));
}
@Override
protected Matcher<String> expectedTypeErrorMatcher(List<Set<DataType>> validPerPosition, List<DataType> signature) {
return equalTo(
"argument of [dense_vector] must be [any type except counter types or dense_vector], found value [] type [dense_vector]"
);
}
@Override
protected void assertNumberOfCheckedSignatures(int checked) {
assertThat(checked, equalTo(1));
}
}
| CountErrorTests |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/create/OracleCreateSequenceTest.java | {
"start": 937,
"end": 2536
} | class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = //
"CREATE SEQUENCE customers_seq" +
" START WITH 1000" +
" INCREMENT BY 1" +
" NOCACHE" +
" NOCYCLE;";
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement statemen = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
statemen.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(0, visitor.getTables().size());
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("cdc.en_complaint_ipr_stat_fdt0")));
assertEquals(0, visitor.getColumns().size());
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "*")));
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "YEAR")));
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "order_mode")));
}
}
| OracleCreateSequenceTest |
java | apache__camel | components/camel-dfdl/src/generated/java/org/apache/camel/component/dfdl/DfdlEndpointUriFactory.java | {
"start": 514,
"end": 2224
} | class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":schemaUri";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(5);
props.add("lazyStartProducer");
props.add("parseDirection");
props.add("rootElement");
props.add("rootNamespace");
props.add("schemaUri");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
SECRET_PROPERTY_NAMES = Collections.emptySet();
MULTI_VALUE_PREFIXES = Collections.emptyMap();
}
@Override
public boolean isEnabled(String scheme) {
return "dfdl".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "schemaUri", null, true, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
| DfdlEndpointUriFactory |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryParenthesesTest.java | {
"start": 5519,
"end": 5819
} | class ____ {
void print(Integer i) {
(i++).toString();
}
}
""")
.doTest();
}
@Test
public void unaryPreFixParenthesesNeeded() {
helper
.addSourceLines(
"Test.java",
"""
| Test |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/dynamic/batch/BatchExecutor.java | {
"start": 62,
"end": 530
} | interface ____ enforce command queue flushing using {@link BatchSize}.
* <p>
* Commands remain in a batch queue until the batch size is reached or the queue is {@link BatchExecutor#flush() flushed}. If
* the batch size is not reached, commands remain not executed.
* <p>
* Commands that fail during the batch cause a {@link BatchException} while non-failed commands remain executed successfully.
*
* @author Mark Paluch
* @since 5.0
* @see BatchSize
*/
public | to |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-jackson/deployment/src/test/java/io/quarkus/resteasy/reactive/jackson/deployment/test/EmptyInputTest.java | {
"start": 620,
"end": 1931
} | class ____ {
@RegisterExtension
static QuarkusUnitTest test = new QuarkusUnitTest()
.setArchiveProducer(new Supplier<>() {
@Override
public JavaArchive get() {
return ShrinkWrap.create(JavaArchive.class)
.addClasses(GreetingResource.class, Greeting.class);
}
});
@Test
public void emptyBlocking() {
RestAssured.with().contentType(ContentType.JSON).post("/greeting/blocking")
.then().statusCode(200).body(equalTo("null"));
}
@Test
public void emptyNonBlocking() {
RestAssured.with().contentType(ContentType.JSON).post("/greeting/nonBlocking")
.then().statusCode(200).body(equalTo("null"));
}
@Test
public void nonEmptyBlocking() {
RestAssured.with().contentType(ContentType.JSON).body("{\"message\": \"Hi\"}").post("/greeting/blocking")
.then().statusCode(200).body(equalTo("Hi"));
}
@Test
public void nonEmptyNonBlocking() {
RestAssured.with().contentType(ContentType.JSON).body("{\"message\": \"Hey\"}").post("/greeting/nonBlocking")
.then().statusCode(200).body(equalTo("Hey"));
}
@Path("greeting")
public static | EmptyInputTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/hql/SameTableAliasInSubqueryWithEmbeddedTest.java | {
"start": 1287,
"end": 4465
} | class ____ {
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final MasterDataMetaData metadata1 = new MasterDataMetaData(
"SYSTEM",
"AT",
TransportMode.INTERNATIONAL,
"EUR",
"NESTED_1"
);
final MasterDataFileEntity entity1 = new MasterDataFileEntity(
new PrimaryKey(),
metadata1,
LocalDateTime.now(),
MasterDataImportStatus.SUCCESS
);
session.persist( entity1 );
final MasterDataMetaData metadata2 = new MasterDataMetaData(
"PREMIUM",
"DE",
TransportMode.DOMESTIC,
"EUR",
"NESTED_2"
);
final MasterDataFileEntity entity2 = new MasterDataFileEntity(
new PrimaryKey(),
metadata2,
LocalDateTime.now(),
MasterDataImportStatus.SUCCESS
);
session.persist( entity2 );
} );
}
@AfterAll
public void tearDown(SessionFactoryScope scope) {
scope.inTransaction(
session -> session.createMutationQuery( "delete from MasterDataFileEntity" ).executeUpdate()
);
}
@Test
public void test(SessionFactoryScope scope) {
final String jpql =
"select mdf.id from MasterDataFileEntity as mdf " +
"where mdf.dataImportStatus = 'SUCCESS' " +
" and mdf.metaData.country = :countryCode " +
" and mdf.metaData.nestedEmbeddable.nestedProperty = :nested " +
" and mdf.importFinishedAt = " +
" (select max(mdf.importFinishedAt) from MasterDataFileEntity as mdf " +
" where mdf.dataImportStatus = 'SUCCESS' " +
" and mdf.metaData.country = :countryCode " +
" and mdf.metaData.nestedEmbeddable.nestedProperty = :nested)";
final SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
statementInspector.clear();
scope.inTransaction( session -> {
TypedQuery<PrimaryKey> query = session.createQuery( jpql, PrimaryKey.class );
query.setParameter( "countryCode", "DE" );
query.setParameter( "nested", "NESTED_2" );
assertNotNull( query.getSingleResult() );
statementInspector.assertNumberOfOccurrenceInQueryNoSpace( 0, "mdfe1_0", 6 );
statementInspector.assertNumberOfOccurrenceInQueryNoSpace( 0, "mdfe2_0", 5 );
} );
}
@Test
public void testNestedOnly(SessionFactoryScope scope) {
final String jpql =
"select mdf.id from MasterDataFileEntity as mdf " +
"where mdf.metaData.nestedEmbeddable.nestedProperty = :nested " +
" and mdf.importFinishedAt = " +
" (select max(mdf.importFinishedAt) from MasterDataFileEntity as mdf " +
" where mdf.metaData.nestedEmbeddable.nestedProperty = :nested)";
final SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
statementInspector.clear();
scope.inTransaction( session -> {
TypedQuery<PrimaryKey> query = session.createQuery( jpql, PrimaryKey.class );
query.setParameter( "nested", "NESTED_2" );
assertNotNull( query.getSingleResult() );
statementInspector.assertNumberOfOccurrenceInQueryNoSpace( 0, "mdfe1_0", 4 );
statementInspector.assertNumberOfOccurrenceInQueryNoSpace( 0, "mdfe2_0", 3 );
} );
}
@Embeddable
public static | SameTableAliasInSubqueryWithEmbeddedTest |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/security/AbstractAuthenticationAuditListener.java | {
"start": 1325,
"end": 1908
} | class ____
implements ApplicationListener<AbstractAuthenticationEvent>, ApplicationEventPublisherAware {
@SuppressWarnings("NullAway.Init")
private ApplicationEventPublisher publisher;
@Override
public void setApplicationEventPublisher(ApplicationEventPublisher publisher) {
this.publisher = publisher;
}
protected ApplicationEventPublisher getPublisher() {
return this.publisher;
}
protected void publish(AuditEvent event) {
if (getPublisher() != null) {
getPublisher().publishEvent(new AuditApplicationEvent(event));
}
}
}
| AbstractAuthenticationAuditListener |
java | apache__camel | components/camel-http-base/src/main/java/org/apache/camel/http/base/HttpSendDynamicPreProcessor.java | {
"start": 988,
"end": 1696
} | class ____ implements Processor {
private final String path;
private final String query;
public HttpSendDynamicPreProcessor(String path, String query) {
this.path = path;
this.query = query;
}
@Override
public void process(Exchange exchange) throws Exception {
if (path != null) {
exchange.getIn().setHeader(Exchange.HTTP_PATH, path);
} else {
exchange.getIn().removeHeader(Exchange.HTTP_PATH);
}
if (query != null) {
exchange.getIn().setHeader(Exchange.HTTP_QUERY, query);
} else {
exchange.getIn().removeHeader(Exchange.HTTP_QUERY);
}
}
}
| HttpSendDynamicPreProcessor |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/hhh18829/AutoGeneratedIdClassTest.java | {
"start": 2584,
"end": 2692
} | class ____" );
final Class<?> idClass = maybeIdClass.get();
assertTrue( idClass.isRecord(), "Generated ID | Id |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-api/src/test/java/org/apache/dubbo/rpc/support/LocalException.java | {
"start": 848,
"end": 973
} | class ____ extends RuntimeException {
public LocalException(String message) {
super(message);
}
}
| LocalException |
java | spring-projects__spring-boot | module/spring-boot-micrometer-tracing-brave/src/test/java/org/springframework/boot/micrometer/tracing/brave/autoconfigure/zipkin/ZipkinWithBraveTracingAutoConfigurationTests.java | {
"start": 6393,
"end": 6588
} | class ____ {
@Bean
BytesMessageSender sender(Encoding encoding) {
return new NoopSender(encoding);
}
}
@Configuration(proxyBeanMethods = false)
private static final | SenderConfiguration |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/EntryPointAssertions_catchIOException_Test.java | {
"start": 970,
"end": 1729
} | class ____ extends EntryPointAssertionsBaseTest {
private static final IOException IO_EXCEPTION = new IOException();
@ParameterizedTest
@MethodSource("catchIOExceptions")
void should_catch_IOException(Function<ThrowingCallable, IOException> catchIOException) {
// GIVEN
ThrowingCallable throwingCallable = () -> {
throw IO_EXCEPTION;
};
// WHEN
IOException throwable = catchIOException.apply(throwingCallable);
// THEN
then(throwable).isSameAs(IO_EXCEPTION);
}
private static Stream<Function<ThrowingCallable, IOException>> catchIOExceptions() {
return Stream.of(Assertions::catchIOException, BDDAssertions::catchIOException, withAssertions::catchIOException);
}
}
| EntryPointAssertions_catchIOException_Test |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/windowing/sessionwindows/SessionEventGeneratorImpl.java | {
"start": 10972,
"end": 11951
} | class ____ extends AbstractEventGenerator {
@Override
public E generateEvent(long globalWatermark) {
return createEventFromTimestamp(
generateArbitraryInSessionTimestamp(), globalWatermark, Timing.IN_LATENESS);
}
@Override
public long getLocalWatermark() {
return getAfterLatenessTimestamp() - 1;
}
@Override
public boolean canGenerateEventAtWatermark(long globalWatermark) {
return isTriggered(globalWatermark);
}
@Override
public boolean hasMoreEvents() {
return hasMoreInLatenessEvents();
}
@Override
public EventGenerator<K, E> getNextGenerator(long globalWatermark) {
return new AfterLatenessGenerator();
}
}
/**
* Internal generator delegate for producing late session events with timestamps after the
* lateness.
*/
private | InLatenessGenerator |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ThreadSafeCheckerTest.java | {
"start": 26832,
"end": 27855
} | enum ____ implements Super {
INSTANCE {
public void f() {}
}
}
""")
.doTest();
}
// TODO(cushon): we could probably run this externally, but we'd have to
// build protos with maven.
private String jarPath(Class<?> clazz) throws Exception {
URI uri = clazz.getProtectionDomain().getCodeSource().getLocation().toURI();
return new File(uri).toString();
}
// any final null reference constant is immutable, but do we actually care?
//
// javac makes it annoying to figure this out - since null isn't a compile-time constant,
// none of that machinery can be used. Instead, we need to look at the actual AST node
// for the member declaration to see that it's initialized to null.
@Ignore
@Test
public void immutableNull() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import com.google.errorprone.annotations.ThreadSafe;
@ThreadSafe
| Test |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/plugins/DoNotMockEnforcer.java | {
"start": 1393,
"end": 2473
} | class ____ of the type to be mocked and
* checks it against {@link #checkTypeForDoNotMockViolation(Class)}. If any types fails
* the validation, the traversal is interrupted and the error message is returned.
*
* @param creationSettings The mock creation settings
* @return Optional message if this type can not be mocked, or {@code null} otherwise
* @since 5.9.0
*/
@Override
default String checkTypeForDoNotMockViolation(MockCreationSettings<?> creationSettings) {
String warning = recursiveCheckDoNotMockAnnotationForType(creationSettings.getTypeToMock());
if (warning != null) {
return warning;
}
for (Class<?> aClass : creationSettings.getExtraInterfaces()) {
warning = recursiveCheckDoNotMockAnnotationForType(aClass);
if (warning != null) {
return warning;
}
}
return null;
}
private String recursiveCheckDoNotMockAnnotationForType(Class<?> type) {
// Object and interfaces do not have a super | hierarchy |
java | apache__camel | components/camel-paho/src/main/java/org/apache/camel/component/paho/PahoConsumer.java | {
"start": 1476,
"end": 6806
} | class ____ extends DefaultConsumer {
private static final Logger LOG = LoggerFactory.getLogger(PahoConsumer.class);
private volatile MqttClient client;
private volatile String clientId;
private volatile boolean stopClient;
private volatile MqttConnectOptions connectOptions;
public PahoConsumer(Endpoint endpoint, Processor processor) {
super(endpoint, processor);
}
public MqttClient getClient() {
return client;
}
public void setClient(MqttClient client) {
this.client = client;
}
@Override
protected void doStart() throws Exception {
super.doStart();
connectOptions = PahoEndpoint.createMqttConnectOptions(getEndpoint().getConfiguration());
if (client == null) {
clientId = getEndpoint().getConfiguration().getClientId();
if (clientId == null) {
clientId = "camel-" + MqttClient.generateClientId();
}
stopClient = true;
client = new MqttClient(
getEndpoint().getConfiguration().getBrokerUrl(),
clientId,
PahoEndpoint.createMqttClientPersistence(getEndpoint().getConfiguration()));
LOG.debug("Connecting client: {} to broker: {}", clientId, getEndpoint().getConfiguration().getBrokerUrl());
if (getEndpoint().getConfiguration().isManualAcksEnabled()) {
client.setManualAcks(true);
}
client.connect(connectOptions);
}
client.setCallback(new MqttCallbackExtended() {
@Override
public void connectComplete(boolean reconnect, String serverURI) {
if (reconnect) {
try {
client.subscribe(getEndpoint().getTopic(), getEndpoint().getConfiguration().getQos());
} catch (MqttException e) {
LOG.error("MQTT resubscribe failed {}", e.getMessage(), e);
}
}
}
@Override
public void connectionLost(Throwable cause) {
LOG.debug("MQTT broker connection lost due {}", cause.getMessage(), cause);
}
@Override
public void messageArrived(String topic, MqttMessage message) throws Exception {
LOG.debug("Message arrived on topic: {} -> {}", topic, message);
Exchange exchange = createExchange(message, topic);
// use default consumer callback
AsyncCallback cb = defaultConsumerCallback(exchange, true);
getAsyncProcessor().process(exchange, cb);
}
@Override
public void deliveryComplete(IMqttDeliveryToken token) {
LOG.debug("Delivery complete. Token: {}", token);
}
});
LOG.debug("Subscribing client: {} to topic: {}", clientId, getEndpoint().getTopic());
client.subscribe(getEndpoint().getTopic(), getEndpoint().getConfiguration().getQos());
}
@Override
protected void doStop() throws Exception {
super.doStop();
if (stopClient && client != null && client.isConnected()) {
String topic = getEndpoint().getTopic();
// only unsubscribe if we are not durable
if (getEndpoint().getConfiguration().isCleanSession()) {
LOG.debug("Unsubscribing client: {} from topic: {}", clientId, topic);
client.unsubscribe(topic);
} else {
LOG.debug("Client: {} is durable so will not unsubscribe from topic: {}", clientId, topic);
}
LOG.debug("Disconnecting client: {} from broker: {}", clientId, getEndpoint().getConfiguration().getBrokerUrl());
client.disconnect();
}
client = null;
}
@Override
public PahoEndpoint getEndpoint() {
return (PahoEndpoint) super.getEndpoint();
}
public Exchange createExchange(MqttMessage mqttMessage, String topic) {
Exchange exchange = createExchange(true);
PahoMessage paho = new PahoMessage(exchange.getContext(), mqttMessage);
paho.setBody(mqttMessage.getPayload());
paho.setHeader(PahoConstants.MQTT_TOPIC, topic);
paho.setHeader(PahoConstants.MQTT_QOS, mqttMessage.getQos());
exchange.setIn(paho);
if (getEndpoint().getConfiguration().isManualAcksEnabled()) {
exchange.getExchangeExtension().addOnCompletion(new Synchronization() {
@Override
public void onComplete(Exchange exchange) {
try {
PahoConsumer.this.client.messageArrivedComplete(mqttMessage.getId(), mqttMessage.getQos());
} catch (MqttException e) {
LOG.warn("Failed to commit message with ID {} due to MqttException.", mqttMessage.getId());
}
}
@Override
public void onFailure(Exchange exchange) {
LOG.error("Rollback due to error processing Exchange ID: {}", exchange.getExchangeId(),
exchange.getException());
}
});
}
return exchange;
}
}
| PahoConsumer |
java | spring-projects__spring-boot | documentation/spring-boot-docs/src/main/java/org/springframework/boot/docs/features/externalconfig/typesafeconfigurationproperties/constructorbinding/nonnull/MyProperties.java | {
"start": 995,
"end": 1563
} | class ____ {
private final boolean enabled;
private final InetAddress remoteAddress;
private final Security security;
// tag::code[]
public MyProperties(boolean enabled, InetAddress remoteAddress, @DefaultValue Security security) {
this.enabled = enabled;
this.remoteAddress = remoteAddress;
this.security = security;
}
// end::code[]
public boolean isEnabled() {
return this.enabled;
}
public InetAddress getRemoteAddress() {
return this.remoteAddress;
}
public Security getSecurity() {
return this.security;
}
public static | MyProperties |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/state/CheckpointStorageLoaderTest.java | {
"start": 22301,
"end": 22783
} | class ____ implements StateBackend {
@Override
public <K> CheckpointableKeyedStateBackend<K> createKeyedStateBackend(
KeyedStateBackendParameters<K> parameters) throws Exception {
return null;
}
@Override
public OperatorStateBackend createOperatorStateBackend(
OperatorStateBackendParameters parameters) throws Exception {
return null;
}
}
static final | ModernStateBackend |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/engine/TestTagTests.java | {
"start": 851,
"end": 3208
} | class ____ {
@SuppressWarnings("DataFlowIssue")
@Test
void validSyntax() {
// @formatter:off
assertAll("Valid Tag Syntax",
() -> yep("fast"),
() -> yep("super_fast"),
() -> yep("unit-test"),
() -> yep("integration.test"),
() -> yep("org.example.CustomTagClass"),
() -> yep(" surrounded-by-whitespace\t\n"),
() -> nope(null),
() -> nope(""),
() -> nope(" "),
() -> nope("\t"),
() -> nope("\f"),
() -> nope("\r"),
() -> nope("\n"),
() -> nope("custom tag"), // internal space
() -> nope(","), // comma
() -> nope("("), // opening parenthesis
() -> nope(")"), // closing parenthesis
() -> nope("&"), // boolean AND
() -> nope("|"), // boolean OR
() -> nope("!") // boolean NOT
);
// @formatter:on
}
@Test
void factory() {
assertEquals("foo", TestTag.create("foo").getName());
assertEquals("foo.tag", TestTag.create("foo.tag").getName());
assertEquals("foo-tag", TestTag.create("foo-tag").getName());
assertEquals("foo-tag", TestTag.create(" foo-tag ").getName());
assertEquals("foo-tag", TestTag.create("\t foo-tag \n").getName());
}
@SuppressWarnings("DataFlowIssue")
@Test
void factoryPreconditions() {
assertSyntaxViolation(null);
assertSyntaxViolation("");
assertSyntaxViolation(" ");
assertSyntaxViolation("X\tX");
assertSyntaxViolation("X\nX");
assertSyntaxViolation("XXX\u005CtXXX");
}
@Test
void tagEqualsOtherTagWithSameName() {
assertEquals(TestTag.create("fast"), TestTag.create("fast"));
assertEquals(TestTag.create("fast").hashCode(), TestTag.create("fast").hashCode());
assertNotEquals(null, TestTag.create("fast"));
assertNotEquals(TestTag.create("fast"), null);
}
@Test
void toStringPrintsName() {
assertEquals("fast", TestTag.create("fast").toString());
}
private static void yep(String tag) {
assertTrue(TestTag.isValid(tag), () -> "'%s' should be a valid tag".formatted(tag));
}
private static void nope(String tag) {
assertFalse(TestTag.isValid(tag), () -> "'%s' should not be a valid tag".formatted(tag));
}
private void assertSyntaxViolation(String tag) {
assertPreconditionViolationFor(() -> TestTag.create(tag))//
.withMessageStartingWith("Tag name")//
.withMessageEndingWith("must be syntactically valid");
}
}
| TestTagTests |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/defaultbean/DefaultClassBeanTest.java | {
"start": 2834,
"end": 2924
} | interface ____ {
void write(StringBuilder sb);
}
@Singleton
static | Author |
java | google__dagger | javatests/dagger/internal/codegen/ComponentCreatorTestHelper.java | {
"start": 1406,
"end": 3278
} | class ____ {
private final CompilerMode compilerMode;
protected final ComponentCreatorKind creatorKind;
protected final ErrorMessages.ComponentCreatorMessages messages;
ComponentCreatorTestHelper(
CompilerMode compilerMode, ComponentCreatorAnnotation componentCreatorAnnotation) {
this.compilerMode = compilerMode;
this.creatorKind = componentCreatorAnnotation.creatorKind();
this.messages = creatorMessagesFor(componentCreatorAnnotation);
}
// For tests where code for both builders and factories can be largely equivalent, i.e. when there
// is nothing to set, just preprocess the lines to change code written for a builder to code for a
// factory.
// For more complicated code, use a JavaFileBuilder to add different code depending on the creator
// kind.
/**
* Processes the given lines, replacing builder-related names with factory-related names if the
* creator kind is {@code FACTORY}.
*/
String process(String... lines) {
Stream<String> stream = Arrays.stream(lines);
if (creatorKind.equals(FACTORY)) {
stream =
stream.map(
line ->
line.replace("Builder", "Factory")
.replace("builder", "factory")
.replace("build", "createComponent"));
}
return stream.collect(joining("\n"));
}
/**
* Returns a Java source with the {@linkplain #process(String...)} processed} versions of the
* given lines.
*/
Source preprocessedJavaSource(String fullyQualifiedName, String... lines) {
return CompilerTests.javaSource(fullyQualifiedName, process(lines));
}
/** Returns a file builder for the current creator kind. */
JavaFileBuilder javaFileBuilder(String qualifiedName) {
return new JavaFileBuilder(qualifiedName).withSettings(compilerMode, creatorKind);
}
}
| ComponentCreatorTestHelper |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/recursive/comparison/fields/RecursiveComparisonAssert_assumptions_Test.java | {
"start": 1074,
"end": 2769
} | class ____ extends WithComparingFieldsIntrospectionStrategyBaseTest {
@Test
void should_ignore_test_when_one_of_the_assumption_fails() {
// GIVEN
Person actual = new Person("John");
actual.home.address.number = 1;
Person expected = new Person("John");
expected.home.address.number = 1;
Person unexpected = new Person("John");
unexpected.home.address.number = 2;
// THEN
assumeThat(actual).usingRecursiveComparison(recursiveComparisonConfiguration).isEqualTo(expected);
expectAssumptionNotMetException(() -> assumeThat(actual).usingRecursiveComparison(recursiveComparisonConfiguration)
.isEqualTo(unexpected));
}
@Test
void should_run_test_when_all_assumptions_are_met() {
// GIVEN
Person actual = new Person("John");
actual.home.address.number = 1;
Person expected = new Person("John");
expected.home.address.number = 1;
// THEN
thenCode(() -> {
assumeThat("foo").isNotNull()
.isNotEmpty()
.isEqualTo("foo");
assumeThat(actual).usingRecursiveComparison(recursiveComparisonConfiguration).isEqualTo(expected);
assumeThat(expected).usingRecursiveComparison(recursiveComparisonConfiguration).isEqualTo(actual);
assumeThat(actual).as("test description")
.withFailMessage("error message")
.withRepresentation(UNICODE_REPRESENTATION)
.usingRecursiveComparison(recursiveComparisonConfiguration)
.isEqualTo(expected);
}).doesNotThrowAnyException();
}
}
| RecursiveComparisonAssert_assumptions_Test |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/MonoAllTest.java | {
"start": 1009,
"end": 3767
} | class ____ {
@Test
public void sourceNull() {
assertThatExceptionOfType(NullPointerException.class).isThrownBy(() -> {
new MonoAll<>(null, v -> true);
});
}
@Test
public void predicateNull() {
assertThatExceptionOfType(NullPointerException.class).isThrownBy(() -> {
new MonoAll<>(null, null);
});
}
@Test
public void normal() {
AssertSubscriber<Boolean> ts = AssertSubscriber.create();
Flux.range(1, 10).all(v -> true).subscribe(ts);
ts.assertValues(true)
.assertComplete()
.assertNoError();
}
@Test
public void normalBackpressured() {
AssertSubscriber<Boolean> ts = AssertSubscriber.create(0);
Flux.range(1, 10).all(v -> true).subscribe(ts);
ts.assertNoValues()
.assertNotComplete()
.assertNoError();
ts.request(1);
ts.assertValues(true)
.assertComplete()
.assertNoError();
}
@Test
public void someMatch() {
AssertSubscriber<Boolean> ts = AssertSubscriber.create();
Flux.range(1, 10).all(v -> v < 6).subscribe(ts);
ts.assertValues(false)
.assertComplete()
.assertNoError();
}
@Test
public void someMatchBackpressured() {
AssertSubscriber<Boolean> ts = AssertSubscriber.create(0);
Flux.range(1, 10).all(v -> v < 6).subscribe(ts);
ts.assertNoValues()
.assertNotComplete()
.assertNoError();
ts.request(1);
ts.assertValues(false)
.assertComplete()
.assertNoError();
}
@Test
public void predicateThrows() {
AssertSubscriber<Boolean> ts = AssertSubscriber.create();
Flux.range(1, 10).all(v -> {
throw new RuntimeException("forced failure");
}).subscribe(ts);
ts.assertNoValues()
.assertNotComplete()
.assertError(RuntimeException.class)
.assertErrorWith(e -> assertThat(e).hasMessageContaining("forced failure"));
}
@Test
public void scanOperator(){
MonoAll<Integer> test = new MonoAll<>(Flux.just(1, 2, 3), v -> true);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
}
@Test
public void scanSubscriber() {
CoreSubscriber<Boolean> actual = new LambdaMonoSubscriber<>(null, e -> {}, null, null);
MonoAll.AllSubscriber<String> test = new MonoAll.AllSubscriber<>(actual, String::isEmpty);
Subscription parent = Operators.emptySubscription();
test.onSubscribe(parent);
assertThat(test.scan(Scannable.Attr.PREFETCH)).isEqualTo(0);
assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(parent);
assertThat(test.scan(Scannable.Attr.ACTUAL)).isSameAs(actual);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
assertThat(test.scan(Scannable.Attr.TERMINATED)).isFalse();
test.onError(new IllegalStateException("boom"));
assertThat(test.scan(Scannable.Attr.TERMINATED)).isTrue();
}
}
| MonoAllTest |
java | apache__camel | components/camel-servicenow/camel-servicenow-component/src/main/java/org/apache/camel/component/servicenow/annotations/ServiceNowSysParm.java | {
"start": 1177,
"end": 1249
} | interface ____ {
String name();
String value();
}
| ServiceNowSysParm |
java | apache__dubbo | dubbo-plugin/dubbo-qos/src/main/java/org/apache/dubbo/qos/textui/TTable.java | {
"start": 1342,
"end": 9355
} | class ____ implements TComponent {
// column definition
private final ColumnDefine[] columnDefineArray;
// border
private final Border border = new Border();
// padding
private int padding;
public TTable(ColumnDefine[] columnDefineArray) {
this.columnDefineArray = null == columnDefineArray ? new ColumnDefine[0] : columnDefineArray;
}
public TTable(int columnNum) {
this.columnDefineArray = new ColumnDefine[columnNum];
for (int index = 0; index < this.columnDefineArray.length; index++) {
columnDefineArray[index] = new ColumnDefine();
}
}
@Override
public String rendering() {
final StringBuilder tableSB = new StringBuilder();
// process width cache
final int[] widthCacheArray = new int[getColumnCount()];
for (int index = 0; index < widthCacheArray.length; index++) {
widthCacheArray[index] = abs(columnDefineArray[index].getWidth());
}
final int rowCount = getRowCount();
for (int rowIndex = 0; rowIndex < rowCount; rowIndex++) {
final boolean isFirstRow = rowIndex == 0;
final boolean isLastRow = rowIndex == rowCount - 1;
// print first separation line
if (isFirstRow && border.has(Border.BORDER_OUTER_TOP)) {
tableSB.append(drawSeparationLine(widthCacheArray)).append(System.lineSeparator());
}
// print inner separation lines
if (!isFirstRow && border.has(Border.BORDER_INNER_H)) {
tableSB.append(drawSeparationLine(widthCacheArray)).append(System.lineSeparator());
}
// draw one line
tableSB.append(drawRow(widthCacheArray, rowIndex));
// print ending separation line
if (isLastRow && border.has(Border.BORDER_OUTER_BOTTOM)) {
tableSB.append(drawSeparationLine(widthCacheArray)).append(System.lineSeparator());
}
}
return tableSB.toString();
}
private String drawRow(int[] widthCacheArray, int rowIndex) {
final StringBuilder rowSB = new StringBuilder();
final Scanner[] scannerArray = new Scanner[getColumnCount()];
try {
boolean hasNextLine;
do {
hasNextLine = false;
final StringBuilder segmentSB = new StringBuilder();
for (int colIndex = 0; colIndex < getColumnCount(); colIndex++) {
final int width = widthCacheArray[colIndex];
final boolean isFirstColOfRow = colIndex == 0;
final boolean isLastColOfRow = colIndex == widthCacheArray.length - 1;
final String borderChar;
if (isFirstColOfRow && border.has(Border.BORDER_OUTER_LEFT)) {
borderChar = "|";
} else if (!isFirstColOfRow && border.has(Border.BORDER_INNER_V)) {
borderChar = "|";
} else {
borderChar = EMPTY_STRING;
}
if (null == scannerArray[colIndex]) {
scannerArray[colIndex] = new Scanner(
new StringReader(wrap(getData(rowIndex, columnDefineArray[colIndex]), width)));
}
final Scanner scanner = scannerArray[colIndex];
final String data;
if (scanner.hasNextLine()) {
data = scanner.nextLine();
hasNextLine = true;
} else {
data = EMPTY_STRING;
}
if (width > 0) {
final ColumnDefine columnDefine = columnDefineArray[colIndex];
final String dataFormat = getDataFormat(columnDefine, width, data);
final String paddingChar = repeat(" ", padding);
segmentSB.append(format(borderChar + paddingChar + dataFormat + paddingChar, data));
}
if (isLastColOfRow) {
if (border.has(Border.BORDER_OUTER_RIGHT)) {
segmentSB.append('|');
}
segmentSB.append(System.lineSeparator());
}
}
if (hasNextLine) {
rowSB.append(segmentSB);
}
} while (hasNextLine);
return rowSB.toString();
} finally {
for (Scanner scanner : scannerArray) {
if (null != scanner) {
scanner.close();
}
}
}
}
private String getData(int rowIndex, ColumnDefine columnDefine) {
return columnDefine.getRowCount() <= rowIndex ? EMPTY_STRING : columnDefine.rows.get(rowIndex);
}
private String getDataFormat(ColumnDefine columnDefine, int width, String data) {
switch (columnDefine.align) {
case MIDDLE: {
final int length = length(data);
final int diff = width - length;
final int left = diff / 2;
return repeat(" ", diff - left) + "%s" + repeat(" ", left);
}
case RIGHT: {
return "%" + width + "s";
}
case LEFT:
default: {
return "%-" + width + "s";
}
}
}
/**
* get row count
*/
private int getRowCount() {
int rowCount = 0;
for (ColumnDefine columnDefine : columnDefineArray) {
rowCount = max(rowCount, columnDefine.getRowCount());
}
return rowCount;
}
/**
* position to last column
*/
private int indexLastCol(final int[] widthCacheArray) {
for (int colIndex = widthCacheArray.length - 1; colIndex >= 0; colIndex--) {
final int width = widthCacheArray[colIndex];
if (width <= 0) {
continue;
}
return colIndex;
}
return 0;
}
/**
* draw separation line
*/
private String drawSeparationLine(final int[] widthCacheArray) {
final StringBuilder separationLineSB = new StringBuilder();
final int lastCol = indexLastCol(widthCacheArray);
final int colCount = widthCacheArray.length;
for (int colIndex = 0; colIndex < colCount; colIndex++) {
final int width = widthCacheArray[colIndex];
if (width <= 0) {
continue;
}
final boolean isFirstCol = colIndex == 0;
final boolean isLastCol = colIndex == lastCol;
if (isFirstCol && border.has(Border.BORDER_OUTER_LEFT)) {
separationLineSB.append('+');
}
if (!isFirstCol && border.has(Border.BORDER_INNER_V)) {
separationLineSB.append('+');
}
separationLineSB.append(repeat("-", width + 2 * padding));
if (isLastCol && border.has(Border.BORDER_OUTER_RIGHT)) {
separationLineSB.append('+');
}
}
return separationLineSB.toString();
}
/**
* Add a row
*/
public TTable addRow(Object... columnDataArray) {
if (null != columnDataArray) {
for (int index = 0; index < columnDefineArray.length; index++) {
final ColumnDefine columnDefine = columnDefineArray[index];
if (index < columnDataArray.length && null != columnDataArray[index]) {
columnDefine.rows.add(replaceTab(columnDataArray[index].toString()));
} else {
columnDefine.rows.add(EMPTY_STRING);
}
}
}
return this;
}
/**
* alignment
*/
public | TTable |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/authentication/ProviderManager.java | {
"start": 13438,
"end": 13733
} | class ____ implements AuthenticationEventPublisher {
@Override
public void publishAuthenticationFailure(AuthenticationException exception, Authentication authentication) {
}
@Override
public void publishAuthenticationSuccess(Authentication authentication) {
}
}
}
| NullEventPublisher |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/SelectManyToOneEmbeddedIdWithKeyManyToOneQueryTest.java | {
"start": 3775,
"end": 5385
} | class ____ implements Serializable {
private static final long serialVersionUID = 1L;
private IntIdEntity intIdEntity;
private String key;
public EmbeddableTestEntityId() {
}
public EmbeddableTestEntityId(IntIdEntity intIdEntity, String key) {
this.intIdEntity = intIdEntity;
this.key = key;
}
@ManyToOne(fetch = FetchType.LAZY, optional = false)
@JoinColumn(name = "id_fk")
public IntIdEntity getIntIdEntity() {
return intIdEntity;
}
public void setIntIdEntity(IntIdEntity intIdEntity) {
this.intIdEntity = intIdEntity;
}
@Column(name = "id_key", length = 100)
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((intIdEntity == null) ? 0 : intIdEntity.hashCode());
result = prime * result + ((key == null) ? 0 : key.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
EmbeddableTestEntityId other = (EmbeddableTestEntityId) obj;
if (intIdEntity == null) {
if (other.intIdEntity != null)
return false;
} else if (!intIdEntity.equals(other.intIdEntity))
return false;
if (key == null) {
if (other.key != null)
return false;
} else if (!key.equals(other.key))
return false;
return true;
}
}
@Entity(name = "IntIdEntity")
@Table(name = "id_fkity")
public static | EmbeddableTestEntityId |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/hql/HQLTest.java | {
"start": 3009,
"end": 75565
} | class ____ {
@AfterEach
void dropTestData(SessionFactoryScope factoryScope) {
factoryScope.dropData();
}
@BeforeEach
public void createTestData(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( (entityManager) -> {
Person person1 = new Person("John Doe");
person1.setNickName("JD");
person1.setAddress("Earth");
person1.setCreatedOn(LocalDateTime.of(2000, 1, 1, 0, 0, 0)) ;
person1.getAddresses().put(AddressType.HOME, "Home address");
person1.getAddresses().put(AddressType.OFFICE, "Office address");
entityManager.persist(person1);
Person person2 = new Person("Mrs. John Doe");
person2.setAddress("Earth");
person2.setCreatedOn(LocalDateTime.of(2000, 1, 2, 12, 0, 0)) ;
entityManager.persist(person2);
Person person3 = new Person("Dr_ John Doe");
entityManager.persist(person3);
Phone phone1 = new Phone("123-456-7890");
phone1.setId(1L);
phone1.setType(PhoneType.MOBILE);
person1.addPhone(phone1);
phone1.getRepairTimestamps().add(LocalDateTime.of(2005, 1, 1, 12, 0, 0));
phone1.getRepairTimestamps().add(LocalDateTime.of(2006, 1, 1, 12, 0, 0));
Call call11 = new Call();
call11.setDuration(12);
call11.setTimestamp(LocalDateTime.of(2000, 1, 1, 0, 0, 0));
Call call12 = new Call();
call12.setDuration(33);
call12.setTimestamp(LocalDateTime.of(2000, 1, 1, 1, 0, 0));
phone1.addCall(call11);
phone1.addCall(call12);
Phone phone2 = new Phone("098-765-4321");
phone2.setId(2L);
phone2.setType(PhoneType.LAND_LINE);
Phone phone3 = new Phone("098-765-4320");
phone3.setId(3L);
phone3.setType(PhoneType.LAND_LINE);
person2.addPhone(phone2);
person2.addPhone(phone3);
Account account1 = new Account();
account1.setOwner(person1);
entityManager.persist(account1);
Account account2 = new Account();
account1.setOwner(person2);
entityManager.persist(account2);
CreditCardPayment creditCardPayment = new CreditCardPayment();
creditCardPayment.setCompleted(true);
creditCardPayment.setAmount(BigDecimal.ZERO);
creditCardPayment.setPerson(person1);
creditCardPayment.setCardNumber("1234-1234-1234-1234");
creditCardPayment.setAccount(account1);
call11.setPayment(creditCardPayment);
WireTransferPayment wireTransferPayment = new WireTransferPayment();
wireTransferPayment.setCompleted(true);
wireTransferPayment.setAmount(BigDecimal.valueOf(100));
wireTransferPayment.setPerson(person2);
wireTransferPayment.setAccount(account2);
call12.setPayment(wireTransferPayment);
entityManager.persist(creditCardPayment);
entityManager.persist(wireTransferPayment);
} );
}
@Test
public void test_hql_select_simplest_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( (session) -> {
List<Object> objects = session.createQuery(
"from java.lang.Object",
Object.class)
.getResultList();
//tag::hql-select-simplest-example[]
List<Person> persons = session.createQuery(
"from Person", Person.class)
.getResultList();
//end::hql-select-simplest-example[]
//tag::hql-select-simplest-example-alt[]
LocalDateTime datetime = session.createQuery(
"select local datetime",
LocalDateTime.class)
.getSingleResult();
//end::hql-select-simplest-example-alt[]
} );
}
@Test
public void test_hql_select_simplest_jpql_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( (entityManager) -> {
//tag::hql-select-simplest-jpql-example[]
List<Person> persons = entityManager.createQuery(
"select p " +
"from Person p",
Person.class)
.getResultList();
//end::hql-select-simplest-jpql-example[]
Session session = entityManager.unwrap(Session.class);
//tag::hql-select-last-example[]
List<String> datetimes = session.createQuery(
"from Person p select p.name",
String.class)
.getResultList();
//end::hql-select-last-example[]
});
}
@Test
public void test_hql_select_no_from(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( (session) -> {
//tag::hql-select-no-from[]
// result type Person, only the Person selected
List<Person> persons = session.createQuery(
"from Person join phones", Person.class)
.getResultList();
for (Person person: persons) {
//...
}
// result type Object[], both Person and Phone selected
List<Object[]> personsWithPhones = session.createQuery(
"from Person join phones", Object[].class)
.getResultList();
for (Object[] personWithPhone: personsWithPhones) {
Person p = (Person) personWithPhone[0];
Phone ph = (Phone) personWithPhone[1];
//...
}
//end::hql-select-no-from[]
});
}
@Test
public void hql_update_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-update-example[]
entityManager.createQuery(
"update Person set nickName = 'Nacho' " +
"where name = 'Ignacio'")
.executeUpdate();
//end::hql-update-example[]
});
}
@Test
public void hql_insert_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-insert-example[]
entityManager.createQuery(
"insert Person (id, name) " +
"values (100L, 'Jane Doe')")
.executeUpdate();
//end::hql-insert-example[]
});
}
@Test
public void hql_multi_insert_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-insert-example[]
entityManager.createQuery(
"insert Person (id, name) " +
"values (101L, 'J A Doe III'), " +
"(102L, 'J X Doe'), " +
"(103L, 'John Doe, Jr')")
.executeUpdate();
//end::hql-insert-example[]
});
}
@Test
public void hql_insert_with_sequence_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
entityManager.createQuery(
"insert Person (name) values ('Jane Doe2')" )
.executeUpdate();
});
}
@Test
public void hql_select_simplest_jpql_fqn_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-select-simplest-jpql-fqn-example[]
List<Person> persons = entityManager.createQuery(
"select p " +
"from org.hibernate.testing.orm.domain.userguide.Person p",
Person.class)
.getResultList();
//end::hql-select-simplest-jpql-fqn-example[]
});
}
@Test
public void test_hql_multiple_root_reference_jpql_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-multiple-root-reference-jpql-example[]
List<Object[]> persons = entityManager.createQuery(
"select distinct pr, ph " +
"from Person pr, Phone ph " +
"where ph.person = pr and ph is not null",
Object[].class)
.getResultList();
//end::hql-multiple-root-reference-jpql-example[]
assertEquals(3, persons.size());
});
}
@Test
public void test_hql_cross_join_jpql_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-cross-join-jpql-example[]
List<Object[]> persons = entityManager.createQuery(
"select distinct pr, ph " +
"from Person pr cross join Phone ph " +
"where ph.person = pr and ph is not null",
Object[].class)
.getResultList();
//end::hql-cross-join-jpql-example[]
assertEquals(3, persons.size());
});
}
@Test
public void test_hql_multiple_same_root_reference_jpql_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-multiple-same-root-reference-jpql-example[]
List<Person> persons = entityManager.createQuery(
"select distinct pr1 " +
"from Person pr1, Person pr2 " +
"where pr1.id <> pr2.id " +
" and pr1.address = pr2.address " +
" and pr1.createdOn < pr2.createdOn",
Person.class)
.getResultList();
//end::hql-multiple-same-root-reference-jpql-example[]
assertEquals(1, persons.size());
});
}
@Test
public void test_hql_explicit_root_join_example_1(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-explicit-root-join-example[]
List<Person> persons = entityManager.createQuery(
"select distinct pr " +
"from Person pr " +
"join Phone ph on ph.person = pr " +
"where ph.type = :phoneType",
Person.class)
.setParameter("phoneType", PhoneType.MOBILE)
.getResultList();
//end::hql-explicit-root-join-example[]
assertEquals(1, persons.size());
});
}
@Test
public void test_hql_explicit_inner_join_example_1(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-explicit-inner-join-example[]
List<Person> persons = entityManager.createQuery(
"select distinct pr " +
"from Person pr " +
"join pr.phones ph " +
"where ph.type = :phoneType",
Person.class)
.setParameter("phoneType", PhoneType.MOBILE)
.getResultList();
//end::hql-explicit-inner-join-example[]
assertEquals(1, persons.size());
});
}
@Test
public void test_hql_explicit_inner_join_example_2(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-explicit-inner-join-example[]
// same query, but specifying join type as 'inner' explicitly
List<Person> persons = entityManager.createQuery(
"select distinct pr " +
"from Person pr " +
"inner join pr.phones ph " +
"where ph.type = :phoneType",
Person.class)
.setParameter("phoneType", PhoneType.MOBILE)
.getResultList();
//end::hql-explicit-inner-join-example[]
assertEquals(1, persons.size());
});
}
@Test
public void test_hql_explicit_outer_join_example_1(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-explicit-outer-join-example[]
List<Person> persons = entityManager.createQuery(
"select distinct pr " +
"from Person pr " +
"left join pr.phones ph " +
"where ph is null " +
" or ph.type = :phoneType",
Person.class)
.setParameter("phoneType", PhoneType.LAND_LINE)
.getResultList();
//end::hql-explicit-outer-join-example[]
assertEquals(2, persons.size());
});
}
@Test
public void test_hql_explicit_outer_join_example_2(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-explicit-outer-join-example[]
// same query, but specifying join type as 'outer' explicitly
List<Person> persons = entityManager.createQuery(
"select distinct pr " +
"from Person pr " +
"left outer join pr.phones ph " +
"where ph is null " +
" or ph.type = :phoneType",
Person.class)
.setParameter("phoneType", PhoneType.LAND_LINE)
.getResultList();
//end::hql-explicit-outer-join-example[]
assertEquals(2, persons.size());
});
}
@Test
public void test_hql_explicit_fetch_join_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-explicit-fetch-join-example[]
List<Person> persons = entityManager.createQuery(
"select distinct pr " +
"from Person pr " +
"left join fetch pr.phones ",
Person.class)
.getResultList();
//end::hql-explicit-fetch-join-example[]
assertEquals(3, persons.size());
});
}
@Test
public void test_hql_explicit_join_with_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
Session session = entityManager.unwrap(Session.class);
//tag::hql-explicit-join-with-example[]
List<Object[]> personsAndPhones = session.createQuery(
"select pr.name, ph.number " +
"from Person pr " +
"left join pr.phones ph with ph.type = :phoneType ",
Object[].class)
.setParameter("phoneType", PhoneType.LAND_LINE)
.getResultList();
//end::hql-explicit-join-with-example[]
assertEquals(4, personsAndPhones.size());
});
}
@Test
public void test_jpql_explicit_join_on_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-explicit-join-jpql-on-example[]
List<Object[]> personsAndPhones = entityManager.createQuery(
"select pr.name, ph.number " +
"from Person pr " +
"left join pr.phones ph on ph.type = :phoneType ",
Object[].class)
.setParameter("phoneType", PhoneType.LAND_LINE)
.getResultList();
//end::hql-explicit-join-jpql-on-example[]
assertEquals(4, personsAndPhones.size());
});
}
@Test
public void test_hql_implicit_join_example_1(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
String address = "Earth";
//tag::hql-implicit-join-example[]
List<Phone> phones = entityManager.createQuery(
"select ph " +
"from Phone ph " +
"where ph.person.address = :address ",
Phone.class)
.setParameter("address", address)
.getResultList();
//end::hql-implicit-join-example[]
assertEquals(3, phones.size());
});
}
@Test
public void test_hql_implicit_join_example_2(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
String address = "Earth";
//tag::hql-implicit-join-example[]
// same as
List<Phone> phones = entityManager.createQuery(
"select ph " +
"from Phone ph " +
"join ph.person pr " +
"where pr.address = :address ",
Phone.class)
.setParameter("address", address)
.getResultList();
//end::hql-implicit-join-example[]
assertEquals(3, phones.size());
});
}
@Test
public void test_hql_implicit_join_alias_example_1(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
String address = "Earth";
LocalDateTime timestamp = LocalDateTime.ofInstant(Instant.EPOCH, ZoneId.systemDefault());
//tag::hql-implicit-join-alias-example[]
List<Phone> phones = entityManager.createQuery(
"select ph " +
"from Phone ph " +
"where ph.person.address = :address " +
" and ph.person.createdOn > :timestamp",
Phone.class)
.setParameter("address", address)
.setParameter("timestamp", timestamp)
.getResultList();
//end::hql-implicit-join-alias-example[]
assertEquals(3, phones.size());
});
}
@Test
public void test_hql_implicit_join_alias_example_2(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
String address = "Earth";
LocalDateTime timestamp = LocalDateTime.ofInstant(Instant.EPOCH, ZoneId.systemDefault());
//tag::hql-implicit-join-alias-example[]
//same as
List<Phone> phones = entityManager.createQuery(
"select ph " +
"from Phone ph " +
"inner join ph.person pr " +
"where pr.address = :address " +
" and pr.createdOn > :timestamp",
Phone.class)
.setParameter("address", address)
.setParameter("timestamp", timestamp)
.getResultList();
//end::hql-implicit-join-alias-example[]
assertEquals(3, phones.size());
});
}
@Test
public void test_hql_collection_valued_associations_1(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
String address = "Earth";
int duration = 20;
//tag::hql-collection-valued-associations[]
List<Phone> phones = entityManager.createQuery(
"select ph " +
"from Person pr " +
"join pr.phones ph " +
"join ph.calls c " +
"where pr.address = :address " +
" and c.duration > :duration",
Phone.class)
.setParameter("address", address)
.setParameter("duration", duration)
.getResultList();
//end::hql-collection-valued-associations[]
assertEquals(1, phones.size());
assertEquals( "123-456-7890", phones.get( 0).getNumber());
});
}
@Test
// we do not need to document this syntax, which is a
// legacy of EJB entity beans, prior to JPA / EJB 3
public void test_hql_collection_valued_associations_2(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
Session session = entityManager.unwrap(Session.class);
String address = "Earth";
int duration = 20;
//tag::ejb-collection-valued-associations[]
// alternate syntax
List<Phone> phones = session.createQuery(
"select ph " +
"from Person pr, " +
"in (pr.phones) ph, " +
"in (ph.calls) c " +
"where pr.address = :address " +
" and c.duration > :duration",
Phone.class)
.setParameter("address", address)
.setParameter("duration", duration)
.getResultList();
//end::ejb-collection-valued-associations[]
assertEquals(1, phones.size());
assertEquals( "123-456-7890", phones.get( 0).getNumber());
});
}
@Test
public void test_hql_collection_qualification_associations_1(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
Long id = 1L;
//tag::hql-collection-qualification-example[]
// select all the calls (the map value) for a given Phone
// note that here we don't need to use value() or element()
// since it is implicit
List<Call> calls = entityManager.createQuery(
"select ch " +
"from Phone ph " +
"join ph.callHistory ch " +
"where ph.id = :id ",
Call.class)
.setParameter("id", id)
.getResultList();
//end::hql-collection-qualification-example[]
assertEquals(2, calls.size());
});
}
@Test
public void test_hql_collection_qualification_associations_2(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
Long id = 1L;
//tag::hql-collection-qualification-example[]
// same as above, but with value() explicit
List<Call> calls = entityManager.createQuery(
"select value(ch) " +
"from Phone ph " +
"join ph.callHistory ch " +
"where ph.id = :id ",
Call.class)
.setParameter("id", id)
.getResultList();
//end::hql-collection-qualification-example[]
assertEquals(2, calls.size());
});
}
@Test
public void test_hql_collection_qualification_associations_3(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
Long id = 1L;
//tag::hql-collection-qualification-example[]
// select all the Call timestamps (the map key) for a given Phone
// note that here we *do* need to explicitly specify key()
List<LocalDateTime> timestamps = entityManager.createQuery(
"select key(ch) " +
"from Phone ph " +
"join ph.callHistory ch " +
"where ph.id = :id ",
LocalDateTime.class)
.setParameter("id", id)
.getResultList();
//end::hql-collection-qualification-example[]
assertEquals(2, timestamps.size());
});
}
@Test
public void test_hql_collection_qualification_associations_4(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
Long id = 1L;
//tag::hql-collection-qualification-example[]
// select all the Call and their timestamps (the 'Map.Entry') for a given Phone
List<Map.Entry<Date, Call>> callHistory = entityManager.createQuery(
"select entry(ch) " +
"from Phone ph " +
"join ph.callHistory ch " +
"where ph.id = :id ")
.setParameter("id", id)
.getResultList();
//end::hql-collection-qualification-example[]
});
}
@Test
public void test_hql_collection_qualification_associations_5(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
Long id = 1L;
Integer phoneIndex = 0;
//tag::hql-collection-qualification-example[]
// Sum all call durations for a given Phone of a specific Person
Long duration = entityManager.createQuery(
"select sum(ch.duration) " +
"from Person pr " +
"join pr.phones ph " +
"join ph.callHistory ch " +
"where ph.id = :id " +
" and index(ph) = :phoneIndex",
Long.class)
.setParameter("id", id)
.setParameter("phoneIndex", phoneIndex)
.getSingleResult();
//end::hql-collection-qualification-example[]
assertEquals(45, duration.intValue());
});
}
@Test
public void test_hql_collection_implicit_join_1(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
Long id = 1L;
//tag::hql-collection-implicit-join-example[]
// implicit join to a map value()
List<Call> calls = entityManager.createQuery(
"select value(ph.callHistory) " +
"from Phone ph " +
"where ph.id = :id ",
Call.class)
.setParameter("id", id)
.getResultList();
//end::hql-collection-implicit-join-example[]
assertEquals(2, calls.size());
});
}
@Test
public void test_hql_collection_implicit_join_2(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
Long id = 1L;
//tag::hql-collection-implicit-join-example[]
// implicit join to a map key()
List<LocalDateTime> timestamps = entityManager.createQuery(
"select key(ph.callHistory) " +
"from Phone ph " +
"where ph.id = :id ",
LocalDateTime.class)
.setParameter("id", id)
.getResultList();
//end::hql-collection-implicit-join-example[]
assertEquals(2, timestamps.size());
});
}
@Test
public void test_projection_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::jpql-projection-example[]
List<Object[]> results = entityManager.createQuery(
"select p.name, p.nickName " +
"from Person p ",
Object[].class
).getResultList();
for (Object[] result : results) {
String name = (String) result[0];
String nickName = (String) result[1];
}
List<Tuple> tuples = entityManager.createQuery(
"select p.name as name, p.nickName as nickName " +
"from Person p ",
Tuple.class
).getResultList();
for (Tuple tuple : tuples) {
String name = tuple.get("name", String.class);
String nickName = tuple.get("nickName", String.class);
}
//end::jpql-projection-example[]
});
}
@Test
public void test_union_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-union-example[]
List<String> results = entityManager.createQuery(
"select p.name from Person p " +
"union " +
"select p.nickName from Person p where p.nickName is not null",
String.class
).getResultList();
//end::hql-union-example[]
assertEquals(4, results.size());
});
}
@Test
public void test_jpql_api_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::jpql-api-example[]
Query query = entityManager.createQuery(
"select p " +
"from Person p " +
"where p.name like :name"
);
TypedQuery<Person> typedQuery = entityManager.createQuery(
"select p " +
"from Person p " +
"where p.name like :name",
Person.class
);
//end::jpql-api-example[]
});
}
@Test
public void test_jpql_api_named_query_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::jpql-api-named-query-example[]
Query query = entityManager.createNamedQuery("get_person_by_name");
TypedQuery<Person> typedQuery = entityManager.createNamedQuery("get_person_by_name", Person.class);
//end::jpql-api-named-query-example[]
});
}
@Test
public void test_jpql_api_hibernate_named_query_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::jpql-api-hibernate-named-query-example[]
Phone phone = entityManager
.createNamedQuery("get_phone_by_number", Phone.class)
.setParameter("number", "123-456-7890")
.getSingleResult();
//end::jpql-api-hibernate-named-query-example[]
assertNotNull(phone);
});
}
@Test
public void test_jpql_api_basic_usage_example(SessionFactoryScope factoryScope) {
int page = 1;
factoryScope.inTransaction( entityManager -> {
//tag::jpql-api-basic-usage-example[]
Person person = entityManager.createQuery(
"select p " +
"from Person p " +
"where p.name = :name",
Person.class)
.setParameter("name", "John Doe")
.setMaxResults(1)
.getSingleResult();
List<Person> people = entityManager.createQuery(
"select p " +
"from Person p " +
"where p.name like :name",
Person.class)
.setParameter("name", "J%")
.setFirstResult(page*10)
.setMaxResults(10)
.getResultList();
//end::jpql-api-basic-usage-example[]
});
}
@Test
public void test_jpql_api_hint_usage_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::jpql-api-hint-usage-example[]
Person query = entityManager.createQuery(
"select p " +
"from Person p " +
"where p.name like :name",
Person.class)
// timeout - in milliseconds
.setHint("jakarta.persistence.query.timeout", 2000)
// flush only at commit time
.setFlushMode(FlushModeType.COMMIT)
.setParameter("name", "J%")
.getSingleResult();
//end::jpql-api-hint-usage-example[]
});
}
@Test
public void test_jpql_api_parameter_example_1(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::jpql-api-parameter-example[]
Query query = entityManager.createQuery(
"select p " +
"from Person p " +
"where p.name like :name")
.setParameter("name", "J%");
//end::jpql-api-parameter-example[]
});
}
@Test
public void test_jpql_api_parameter_example_2(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
LocalDateTime timestamp = LocalDateTime.now();
//tag::jpql-api-parameter-example[]
Query query = entityManager.createQuery(
"select p " +
"from Person p " +
"where p.createdOn > :timestamp")
.setParameter("timestamp", timestamp);
//end::jpql-api-parameter-example[]
});
}
@Test
public void test_jpql_api_positional_parameter_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::jpql-api-ordinal-parameter-example[]
TypedQuery<Person> query = entityManager.createQuery(
"select p " +
"from Person p " +
"where p.name like ?1",
Person.class)
.setParameter(1, "J%");
//end::jpql-api-ordinal-parameter-example[]
});
}
@Test
public void test_jpql_api_list_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::jpql-api-list-example[]
List<Person> persons = entityManager.createQuery(
"select p " +
"from Person p " +
"where p.name like :name",
Person.class)
.setParameter("name", "J%")
.getResultList();
//end::jpql-api-list-example[]
});
}
@Test
public void test_jpql_api_stream_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::jpql-api-stream-example[]
try(Stream<Person> personStream = entityManager.createQuery(
"select p " +
"from Person p " +
"where p.name like :name",
Person.class)
.setParameter("name", "J%")
.getResultStream()) {
List<Person> persons = personStream
.skip(5)
.limit(5)
.toList();
}
//end::jpql-api-stream-example[]
});
}
@Test
public void test_jpql_api_single_result_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::jpql-api-single-result-example[]
Person person = (Person) entityManager.createQuery(
"select p " +
"from Person p " +
"where p.name like :name")
.setParameter("name", "J%")
.getSingleResult();
//end::jpql-api-single-result-example[]
});
}
@Test
public void test_hql_api_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
QueryProducer session = entityManager.unwrap(Session.class);
//tag::hql-api-example[]
org.hibernate.query.Query<Person> query = session.createQuery(
"select p " +
"from Person p " +
"where p.name like :name",
Person.class);
//end::hql-api-example[]
});
}
@Test
public void test_hql_api_named_query_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
QueryProducer session = entityManager.unwrap(Session.class);
//tag::hql-api-named-query-example[]
org.hibernate.query.Query<Person> query = session.createNamedQuery(
"get_person_by_name",
Person.class);
//end::hql-api-named-query-example[]
});
}
@Test
public void test_hql_api_basic_usage_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
QueryProducer session = entityManager.unwrap(Session.class);
//tag::hql-api-basic-usage-example[]
org.hibernate.query.Query<Person> query = session.createQuery(
"select p " +
"from Person p " +
"where p.name like :name",
Person.class)
// timeout - in seconds
.setTimeout(2)
// write to L2 caches, but do not read from them
.setCacheMode(CacheMode.REFRESH)
// assuming query cache was enabled for the SessionFactory
.setCacheable(true)
// add a comment to the generated SQL if enabled via the hibernate.use_sql_comments configuration property
.setComment("+ INDEX(p idx_person_name)");
//end::hql-api-basic-usage-example[]
});
}
@Test
public void test_hql_api_parameter_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
QueryProducer session = entityManager.unwrap(Session.class);
//tag::hql-api-parameter-example[]
org.hibernate.query.Query<Person> query = session.createQuery(
"select p " +
"from Person p " +
"where p.name like :name",
Person.class)
.setParameter("name", "J%", StandardBasicTypes.STRING);
//end::hql-api-parameter-example[]
});
}
@Test
public void test_hql_api_parameter_inferred_type_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
QueryProducer session = entityManager.unwrap(Session.class);
//tag::hql-api-parameter-inferred-type-example[]
org.hibernate.query.Query<Person> query = session.createQuery(
"select p " +
"from Person p " +
"where p.name like :name",
Person.class)
.setParameter("name", "J%");
//end::hql-api-parameter-inferred-type-example[]
});
}
@Test
public void test_hql_api_parameter_short_form_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
LocalDateTime timestamp = LocalDateTime.now();
QueryProducer session = entityManager.unwrap(Session.class);
//tag::hql-api-parameter-short-form-example[]
org.hibernate.query.Query<Person> query = session.createQuery(
"select p " +
"from Person p " +
"where p.name like :name " +
" and p.createdOn > :timestamp",
Person.class)
.setParameter("name", "J%")
.setParameter("timestamp", timestamp);
//end::hql-api-parameter-short-form-example[]
});
}
@Test
@ExpectedException( IllegalArgumentException.class )
public void test_hql_api_positional_parameter_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
Date timestamp = new Date();
QueryProducer session = entityManager.unwrap(Session.class);
//tag::hql-api-ordinal-parameter-example[]
org.hibernate.query.Query<Person> query = session.createQuery(
"select p " +
"from Person p " +
"where p.name like ?",
Person.class)
.setParameter(1, "J%");
//end::hql-api-ordinal-parameter-example[]
});
}
@Test
public void test_hql_api_list_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
QueryProducer session = entityManager.unwrap(Session.class);
//tag::hql-api-list-example[]
List<Person> persons = session.createQuery(
"select p " +
"from Person p " +
"where p.name like :name",
Person.class)
.setParameter("name", "J%")
.getResultList();
//end::hql-api-list-example[]
});
}
@Test
public void test_hql_api_stream_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
QueryProducer session = entityManager.unwrap(Session.class);
//tag::hql-api-stream-example[]
try(Stream<Person> persons = session.createQuery(
"select p " +
"from Person p " +
"where p.name like :name",
Person.class)
.setParameter("name", "J%")
.getResultStream()) {
Map<Phone, List<Call>> callRegistry = persons
.flatMap(person -> person.getPhones().stream())
.flatMap(phone -> phone.getCalls().stream())
.collect(Collectors.groupingBy(Call::getPhone));
process(callRegistry);
}
//end::hql-api-stream-example[]
});
}
private void process(Map<Phone, List<Call>> callRegistry) {
}
@Test
public void test_hql_api_stream_projection_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
QueryProducer session = entityManager.unwrap(Session.class);
//tag::hql-api-stream-projection-example[]
try (Stream<Object[]> persons = session.createQuery(
"select p.name, p.nickName " +
"from Person p " +
"where p.name like :name",
Object[].class)
.setParameter("name", "J%")
.getResultStream()) {
persons.map(row -> new PersonNames((String) row[0], (String) row[1]))
.forEach(this::process);
}
//end::hql-api-stream-projection-example[]
});
}
@Test
public void test_hql_api_scroll_projection_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
QueryProducer session = entityManager.unwrap(Session.class);
//tag::hql-api-scroll-example[]
try (ScrollableResults<Person> scrollableResults = session.createQuery(
"select p " +
"from Person p " +
"where p.name like :name",
Person.class)
.setParameter("name", "J%")
.scroll()
) {
while (scrollableResults.next()) {
Person person = scrollableResults.get();
process(person);
}
}
//end::hql-api-scroll-example[]
});
}
@Test
public void test_hql_api_scroll_open_example(SessionFactoryScope factoryScope) {
//noinspection resource
ScrollableResults<Person> scrollableResults = factoryScope.fromTransaction( entityManager -> {
QueryProducer session = entityManager.unwrap(Session.class);
return session.createQuery(
"select p " +
"from Person p " +
"where p.name like :name",
Person.class)
.setParameter("name", "J%")
.scroll();
} );
try {
scrollableResults.next();
fail("Should throw exception because the ResultSet must be closed by now!");
}
catch (Exception expected) {
}
}
private void process(Person person) {
}
private void process(PersonNames personName) {
}
@Test
public void test_hql_api_unique_result_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
Session session = entityManager.unwrap(Session.class);
//tag::hql-api-unique-result-example[]
Person person = session.createQuery(
"select p " +
"from Person p " +
"where p.name like :name",
Person.class)
.setParameter("name", "J%")
.getSingleResult();
//end::hql-api-unique-result-example[]
});
}
@Test
public void test_hql_string_literals_example_1(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-string-literals-example[]
List<Person> persons = entityManager.createQuery(
"select p " +
"from Person p " +
"where p.name like 'Joe'",
Person.class)
.getResultList();
//end::hql-string-literals-example[]
});
}
@Test
public void test_hql_string_literals_example_2(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-string-literals-example[]
// Escaping quotes
List<Person> persons = entityManager.createQuery(
"select p " +
"from Person p " +
"where p.name like 'Joe''s'",
Person.class)
.getResultList();
//end::hql-string-literals-example[]
});
}
@Test
public void test_hql_string_literals_example_3(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-numeric-literals-example[]
// simple integer literal
Person person = entityManager.createQuery(
"select p " +
"from Person p " +
"where p.id = 1",
Person.class)
.getSingleResult();
//end::hql-numeric-literals-example[]
});
}
@Test
public void test_hql_string_literals_example_4(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-numeric-literals-example[]
// simple integer literal, typed as a long
Person person = entityManager.createQuery(
"select p " +
"from Person p " +
"where p.id = 1L",
Person.class)
.getSingleResult();
//end::hql-numeric-literals-example[]
});
}
@Test
public void test_hql_string_literals_example_5(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-numeric-literals-example[]
// decimal notation
List<Call> calls = entityManager.createQuery(
"select c " +
"from Call c " +
"where c.duration > 100.5",
Call.class)
.getResultList();
//end::hql-numeric-literals-example[]
});
}
@Test
public void test_hql_string_literals_example_6(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-numeric-literals-example[]
// decimal notation, typed as a float
List<Call> calls = entityManager.createQuery(
"select c " +
"from Call c " +
"where c.duration > 100.5F",
Call.class)
.getResultList();
//end::hql-numeric-literals-example[]
});
}
@Test
public void test_hql_string_literals_example_7(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-numeric-literals-example[]
// scientific notation
List<Call> calls = entityManager.createQuery(
"select c " +
"from Call c " +
"where c.duration > 1e+2",
Call.class)
.getResultList();
//end::hql-numeric-literals-example[]
});
}
@Test
public void test_hql_string_literals_example_8(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-numeric-literals-example[]
// scientific notation, typed as a float
List<Call> calls = entityManager.createQuery(
"select c " +
"from Call c " +
"where c.duration > 1e+2F",
Call.class)
.getResultList();
//end::hql-numeric-literals-example[]
});
}
@Test
public void test_hql_java_constant_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-java-constant-example[]
// select clause date/time arithmetic operations
Double pi = entityManager.createQuery(
"select java.lang.Math.PI",
Double.class)
.getSingleResult();
//end::hql-java-constant-example[]
assertEquals(java.lang.Math.PI, pi, 1e-9);
});
}
@Test
public void test_hql_enum_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-enum-example[]
// select clause date/time arithmetic operations
List<Phone> phones1 = entityManager.createQuery(
"from Phone ph " +
"where ph.type = LAND_LINE",
Phone.class)
.getResultList();
//end::hql-enum-example[]
});
}
@Test
public void test_hql_numeric_arithmetic_example_1(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-numeric-arithmetic-example[]
// select clause date/time arithmetic operations
Long duration = entityManager.createQuery(
"select sum(ch.duration) * :multiplier " +
"from Person pr " +
"join pr.phones ph " +
"join ph.callHistory ch " +
"where ph.id = 1L ",
Long.class)
.setParameter("multiplier", 1000L)
.getSingleResult();
//end::hql-numeric-arithmetic-example[]
assertTrue( duration > 0 );
});
}
@Test
public void test_hql_numeric_arithmetic_example_2(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-numeric-arithmetic-example[]
// select clause date/time arithmetic operations
Integer years = entityManager.createQuery(
"select year(local date) - year(p.createdOn) " +
"from Person p " +
"where p.id = 1L",
Integer.class)
.getSingleResult();
//end::hql-numeric-arithmetic-example[]
assertTrue( years > 0 );
});
}
@Test
public void test_hql_numeric_arithmetic_example_3(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-numeric-arithmetic-example[]
// where clause arithmetic operations
List<Person> persons = entityManager.createQuery(
"select p " +
"from Person p " +
"where year(local date) - year(p.createdOn) > 1",
Person.class)
.getResultList();
//end::hql-numeric-arithmetic-example[]
assertFalse( persons.isEmpty() );
});
}
@Test
public void test_hql_concatenation_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-concatenation-example[]
String name = entityManager.createQuery(
"select 'Customer ' || p.name " +
"from Person p " +
"where p.id = 1",
String.class)
.getSingleResult();
//end::hql-concatenation-example[]
assertNotNull(name);
});
}
@Test
public void test_hql_aggregate_functions_example_1(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-aggregate-functions-example[]
Object[] callStatistics = entityManager.createQuery(
"select " +
" count(c), " +
" sum(c.duration), " +
" min(c.duration), " +
" max(c.duration), " +
" avg(c.duration) " +
"from Call c ",
Object[].class)
.getSingleResult();
//end::hql-aggregate-functions-example[]
assertNotNull(callStatistics);
});
}
@Test
public void test_hql_aggregate_functions_example_2(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-aggregate-functions-example[]
Long phoneCount = entityManager.createQuery(
"select count(distinct c.phone) " +
"from Call c ",
Long.class)
.getSingleResult();
//end::hql-aggregate-functions-example[]
assertNotNull(phoneCount);
});
}
@Test
public void test_hql_aggregate_functions_example_3(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-aggregate-functions-example[]
List<Object[]> callCount = entityManager.createQuery(
"select p.number, count(c) " +
"from Call c " +
"join c.phone p " +
"group by p.number",
Object[].class)
.getResultList();
//end::hql-aggregate-functions-example[]
assertNotNull(callCount.get(0));
});
}
@Test
public void test_hql_aggregate_functions_simple_filter_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-aggregate-functions-simple-filter-example[]
List<Long> callCount = entityManager.createQuery(
"select count(c) filter (where c.duration < 30) " +
"from Call c ",
Long.class)
.getResultList();
//end::hql-aggregate-functions-simple-filter-example[]
assertNotNull(callCount.get(0));
});
}
@Test
public void test_hql_aggregate_functions_filter_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-aggregate-functions-filter-example[]
List<Object[]> callCount = entityManager.createQuery(
"select p.number, count(c) filter (where c.duration < 30) " +
"from Call c " +
"join c.phone p " +
"group by p.number",
Object[].class)
.getResultList();
//end::hql-aggregate-functions-filter-example[]
assertNotNull(callCount.get(0));
});
}
@Test
@SkipForDialect(dialectClass = DerbyDialect.class)
@SkipForDialect(dialectClass = SybaseASEDialect.class)
@SkipForDialect(dialectClass = FirebirdDialect.class, reason = "order by not supported in list")
@SkipForDialect(dialectClass = InformixDialect.class)
public void test_hql_aggregate_functions_within_group_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-aggregate-functions-within-group-example[]
List<String> callCount = entityManager.createQuery(
"select listagg(p.number, ', ') within group (order by p.type,p.number) " +
"from Phone p " +
"group by p.person",
String.class)
.getResultList();
//end::hql-aggregate-functions-within-group-example[]
assertNotNull(callCount.get(0));
});
}
@Test
@SkipForDialect(dialectClass = DerbyDialect.class, reason = "See https://issues.apache.org/jira/browse/DERBY-2072")
public void test_hql_concat_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-concat-function-example[]
List<String> callHistory = entityManager.createQuery(
"select concat(p.number, ' : ' , cast(c.duration as string)) " +
"from Call c " +
"join c.phone p",
String.class)
.getResultList();
//end::hql-concat-function-example[]
assertEquals(2, callHistory.size());
});
}
@Test
public void test_hql_substring_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-substring-function-example[]
// JPQL-style
List<String> prefixes = entityManager.createQuery(
"select substring(p.number, 1, 2) " +
"from Call c " +
"join c.phone p",
String.class)
.getResultList();
// ANSI SQL-style
List<String> prefixes2 = entityManager.createQuery(
"select substring(p.number from 1 for 2) " +
"from Call c " +
"join c.phone p",
String.class)
.getResultList();
//end::hql-substring-function-example[]
assertEquals(2, prefixes.size());
assertEquals(2, prefixes2.size());
});
}
@Test
public void test_hql_upper_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-upper-function-example[]
List<String> names = entityManager.createQuery(
"select upper(p.name) " +
"from Person p ",
String.class)
.getResultList();
//end::hql-upper-function-example[]
assertEquals(3, names.size());
});
}
@Test
public void test_hql_lower_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-lower-function-example[]
List<String> names = entityManager.createQuery(
"select lower(p.name) " +
"from Person p ",
String.class)
.getResultList();
//end::hql-lower-function-example[]
assertEquals(3, names.size());
});
}
@Test
public void test_hql_trim_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-trim-function-example[]
// trim whitespace from both ends
List<String> names1 = entityManager.createQuery(
"select trim(p.name) " +
"from Person p ",
String.class)
.getResultList();
// trim leading spaces
List<String> names2 = entityManager.createQuery(
"select trim(leading ' ' from p.name) " +
"from Person p ",
String.class)
.getResultList();
//end::hql-trim-function-example[]
assertEquals(3, names1.size());
assertEquals(3, names2.size());
});
}
@Test
public void test_hql_length_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-length-function-example[]
List<Integer> lengths = entityManager.createQuery(
"select length(p.name) " +
"from Person p ",
Integer.class)
.getResultList();
//end::hql-length-function-example[]
assertEquals(3, lengths.size());
});
}
@Test
public void test_hql_locate_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-locate-function-example[]
List<Integer> sizes = entityManager.createQuery(
"select locate('John', p.name) " +
"from Person p ",
Integer.class)
.getResultList();
//end::hql-locate-function-example[]
assertEquals(3, sizes.size());
});
}
@Test
public void test_hql_position_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-position-function-example[]
List<Integer> sizes = entityManager.createQuery(
"select position('John' in p.name) " +
"from Person p ",
Integer.class)
.getResultList();
//end::hql-position-function-example[]
assertEquals(3, sizes.size());
});
}
@Test
public void test_hql_abs_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-abs-function-example[]
List<Integer> abs = entityManager.createQuery(
"select abs(c.duration) " +
"from Call c ",
Integer.class)
.getResultList();
//end::hql-abs-function-example[]
assertEquals(2, abs.size());
});
}
@Test
public void test_hql_mod_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-mod-function-example[]
List<Integer> mods = entityManager.createQuery(
"select mod(c.duration, 10) " +
"from Call c ",
Integer.class)
.getResultList();
//end::hql-mod-function-example[]
assertEquals(2, mods.size());
});
}
@Test
@SkipForDialect(dialectClass = CockroachDialect.class, reason = "https://github.com/cockroachdb/cockroach/issues/26710")
public void test_hql_sqrt_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-sqrt-function-example[]
List<Double> sqrts = entityManager.createQuery(
"select sqrt(c.duration) " +
"from Call c ",
Double.class)
.getResultList();
//end::hql-sqrt-function-example[]
assertEquals(2, sqrts.size());
});
}
@Test
@SkipForDialect(dialectClass = SQLServerDialect.class)
@SkipForDialect(dialectClass = DerbyDialect.class, reason = "Comparisons between 'DATE' and 'TIMESTAMP' are not supported")
public void test_hql_current_date_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-current-date-function-example[]
List<Call> calls = entityManager.createQuery(
"select c " +
"from Call c " +
"where c.timestamp = current_date",
Call.class)
.getResultList();
//end::hql-current-date-function-example[]
assertEquals(0, calls.size());
});
}
@Test
public void test_hql_current_date_function_example_sql_server(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-extract-function-example[]
List<Call> calls = entityManager.createQuery(
"select c " +
"from Call c " +
"where extract(date from c.timestamp) = local date",
Call.class)
.getResultList();
//end::hql-extract-function-example[]
assertEquals(0, calls.size());
});
}
@Test
@RequiresDialect(H2Dialect.class)
public void test_hql_current_time_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-current-time-function-example[]
List<Call> calls = entityManager.createQuery(
"select c " +
"from Call c " +
"where c.timestamp = current_time",
Call.class)
.getResultList();
//end::hql-current-time-function-example[]
assertEquals(0, calls.size());
});
}
@Test
public void test_hql_current_timestamp_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-current-timestamp-function-example[]
List<Call> calls = entityManager.createQuery(
"select c " +
"from Call c " +
"where c.timestamp = current_timestamp",
Call.class)
.getResultList();
//end::hql-current-timestamp-function-example[]
assertEquals(0, calls.size());
});
}
@Test
public void test_var_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-native-function-example[]
// careful: these functions are not supported on all databases!
List<Tuple> variances = entityManager.createQuery(
"select var_samp(c.duration) as sampvar, var_pop(c.duration) as popvar " +
"from Call c ",
Tuple.class)
.getResultList();
//end::hql-native-function-example[]
});
}
@Test
@RequiresDialect(H2Dialect.class)
@RequiresDialect(MySQLDialect.class)
@RequiresDialect(PostgreSQLDialect.class)
@RequiresDialect(OracleDialect.class)
public void test_hql_bit_length_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-native-function-example[]
List<Number> bits = entityManager.createQuery(
"select bit_length(c.phone.number) " +
"from Call c ",
Number.class)
.getResultList();
//end::hql-native-function-example[]
assertEquals(2, bits.size());
});
}
@Test
public void test_hql_cast_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-cast-function-example[]
List<String> durations = entityManager.createQuery(
"select cast(c.duration as String) " +
"from Call c ",
String.class)
.getResultList();
//end::hql-cast-function-example[]
assertEquals(2, durations.size());
});
}
@Test
public void test_hql_extract_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-extract-function-example[]
List<Integer> years = entityManager.createQuery(
"select extract(year from c.timestamp) " +
"from Call c ",
Integer.class)
.getResultList();
//end::hql-extract-function-example[]
assertEquals(2, years.size());
});
}
@Test
public void test_hql_year_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-year-function-example[]
List<Integer> years = entityManager.createQuery(
"select year(c.timestamp) " +
"from Call c ",
Integer.class)
.getResultList();
//end::hql-year-function-example[]
assertEquals(2, years.size());
});
}
@Test
@SkipForDialect(dialectClass = SQLServerDialect.class)
public void test_hql_str_function_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-str-function-example[]
List<String> timestamps = entityManager.createQuery(
"select str(c.timestamp) " +
"from Call c ",
String.class)
.getResultList();
//end::hql-str-function-example[]
assertEquals(2, timestamps.size());
});
}
@Test
@RequiresDialect(SQLServerDialect.class)
public void test_hql_str_function_example_sql_server(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-str-function-example[]
// Special SQL Server function "str" that converts floats
List<String> timestamps = entityManager.createQuery(
"select str(cast(duration as float) / 60, 4, 2) " +
"from Call c ",
String.class)
.getResultList();
//end::hql-str-function-example[]
assertEquals(2, timestamps.size());
});
}
@Test
public void test_hql_collection_expressions_example_1(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
Call call = entityManager.createQuery("select c from Call c", Call.class).getResultList().get(1);
//tag::hql-collection-expressions-example[]
List<Phone> phones = entityManager.createQuery(
"select p " +
"from Phone p " +
"where max(elements(p.calls)) = :call",
Phone.class)
.setParameter("call", call)
.getResultList();
//end::hql-collection-expressions-example[]
assertEquals(1, phones.size());
});
}
@Test
public void test_hql_collection_expressions_example_2(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
Call call = entityManager.createQuery("select c from Call c", Call.class).getResultList().get(0);
//tag::hql-collection-expressions-example[]
List<Phone> phones = entityManager.createQuery(
"select p " +
"from Phone p " +
"where min(elements(p.calls)) = :call",
Phone.class)
.setParameter("call", call)
.getResultList();
//end::hql-collection-expressions-example[]
assertEquals(1, phones.size());
});
}
@Test
public void test_hql_collection_expressions_example_3(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-collection-expressions-example[]
List<Person> persons = entityManager.createQuery(
"select p " +
"from Person p " +
"where max(indices(p.phones)) = 0",
Person.class)
.getResultList();
//end::hql-collection-expressions-example[]
assertEquals(1, persons.size());
});
}
@Test
public void test_hql_collection_expressions_example_5(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
Call call = entityManager.createQuery("select c from Call c", Call.class).getResultList().get(0);
Phone phone = call.getPhone();
//tag::hql-collection-expressions-some-example[]
List<Person> persons = entityManager.createQuery(
"select p " +
"from Person p " +
"where :phone = some elements(p.phones)",
Person.class)
.setParameter("phone", phone)
.getResultList();
//end::hql-collection-expressions-some-example[]
assertEquals(1, persons.size());
});
}
@Test
public void test_hql_collection_expressions_example_4(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
Call call = entityManager.createQuery("select c from Call c", Call.class).getResultList().get(0);
Phone phone = call.getPhone();
//tag::hql-collection-expressions-some-example[]
// the above query can be re-written with member of
List<Person> persons = entityManager.createQuery(
"select p " +
"from Person p " +
"where :phone member of p.phones",
Person.class)
.setParameter("phone", phone)
.getResultList();
//end::hql-collection-expressions-some-example[]
assertEquals(1, persons.size());
});
}
@Test
public void test_hql_collection_expressions_example_6(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-collection-expressions-exists-example[]
List<Person> persons = entityManager.createQuery(
"select p " +
"from Person p " +
"where exists elements(p.phones)",
Person.class)
.getResultList();
//end::hql-collection-expressions-exists-example[]
assertEquals(2, persons.size());
});
}
@Test
@SkipForDialect(dialectClass = DerbyDialect.class, reason = "Comparisons between 'DATE' and 'TIMESTAMP' are not supported")
public void test_hql_collection_expressions_example_8(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-collection-expressions-all-example[]
List<Phone> phones = entityManager.createQuery(
"select p " +
"from Phone p " +
"where local date > all elements(p.repairTimestamps)",
Phone.class)
.getResultList();
//end::hql-collection-expressions-all-example[]
assertEquals(3, phones.size());
});
}
@Test
public void test_hql_collection_expressions_example_9(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-collection-expressions-in-example[]
List<Person> persons = entityManager.createQuery(
"select p " +
"from Person p " +
"where 1 in indices(p.phones)",
Person.class)
.getResultList();
//end::hql-collection-expressions-in-example[]
assertEquals(1, persons.size());
});
}
@Test
public void test_hql_collection_expressions_example_10(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-size-example[]
List<Person> persons = entityManager.createQuery(
"select p " +
"from Person p " +
"where size(p.phones) >= 2",
Person.class)
.getResultList();
//end::hql-size-example[]
assertEquals(1, persons.size());
});
}
@Test
public void test_collection_index_operator_example_1(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-collection-index-operator-example[]
// indexed lists
List<Person> persons = entityManager.createQuery(
"select p " +
"from Person p " +
"where p.phones[0].type = LAND_LINE",
Person.class)
.getResultList();
//end::hql-collection-index-operator-example[]
assertEquals(1, persons.size());
});
}
@Test
public void test_hql_collection_index_operator_example_2(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
String address = "Home address";
//tag::hql-collection-index-operator-example[]
// maps
List<Person> persons = entityManager.createQuery(
"select p " +
"from Person p " +
"where p.addresses['HOME'] = :address",
Person.class)
.setParameter("address", address)
.getResultList();
//end::hql-collection-index-operator-example[]
assertEquals(1, persons.size());
});
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsSubqueryInOnClause.class)
public void test_hql_collection_index_operator_example_3(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-collection-index-operator-example[]
//max index in list
List<Person> persons = entityManager.createQuery(
"select pr " +
"from Person pr " +
"where pr.phones[max(indices(pr.phones))].type = 'LAND_LINE'",
Person.class)
.getResultList();
//end::hql-collection-index-operator-example[]
assertEquals(1, persons.size());
});
}
@Test
public void test_hql_polymorphism_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-polymorphism-example[]
List<Payment> payments = entityManager.createQuery(
"select p " +
"from Payment p ",
Payment.class)
.getResultList();
//end::hql-polymorphism-example[]
assertEquals(2, payments.size());
});
}
@Test
public void test_hql_treat_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-treat-example[]
List<Payment> payments = entityManager.createQuery(
"select p " +
"from Payment p " +
"where length(treat(p as CreditCardPayment).cardNumber) between 16 and 20",
Payment.class)
.getResultList();
//end::hql-treat-example[]
assertEquals(1, payments.size());
});
}
@Test
public void test_hql_join_many_treat_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-join-treat-example[]
// a to-many association
List<Object[]> payments = entityManager.createQuery(
"select a, ccp " +
"from Account a " +
"join treat(a.payments as CreditCardPayment) ccp " +
"where length(ccp.cardNumber) between 16 and 20",
Object[].class)
.getResultList();
//end::hql-join-treat-example[]
assertEquals(1, payments.size());
});
}
@Test
public void test_hql_join_one_treat_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-join-treat-example[]
// a to-one association
List<Object[]> payments = entityManager.createQuery(
"select c, ccp " +
"from Call c " +
"join treat(c.payment as CreditCardPayment) ccp " +
"where length(ccp.cardNumber) between 16 and 20",
Object[].class)
.getResultList();
//end::hql-join-treat-example[]
assertEquals(1, payments.size());
});
}
@Test
public void test_hql_entity_type_exp_example_1(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-entity-type-exp-example[]
List<Payment> payments = entityManager.createQuery(
"select p " +
"from Payment p " +
"where type(p) = CreditCardPayment",
Payment.class)
.getResultList();
//end::hql-entity-type-exp-example[]
assertEquals(1, payments.size());
});
}
@Test
public void test_hql_entity_type_exp_example_2(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-entity-type-exp-example[]
// using a parameter instead of a literal entity type
List<Payment> payments = entityManager.createQuery(
"select p " +
"from Payment p " +
"where type(p) = :type",
Payment.class)
.setParameter("type", WireTransferPayment.class)
.getResultList();
//end::hql-entity-type-exp-example[]
assertEquals(1, payments.size());
});
}
@Test
public void test_hql_entity_type_exp_example_3(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-entity-type-exp-example[]
// using a parameter instead of a literal entity type
List<Payment> payments = entityManager.createQuery(
"select p " +
"from Payment p " +
"where type(p) = type(:instance)",
Payment.class)
.setParameter("instance", new WireTransferPayment())
.getResultList();
//end::hql-entity-type-exp-example[]
assertEquals(1, payments.size());
});
}
@Test
public void test_simple_case_expressions_example_1(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-simple-case-expressions-example[]
List<String> nickNames = entityManager.createQuery(
"select " +
" case p.nickName " +
" when 'NA' " +
" then '<no nick name>' " +
" else p.nickName " +
" end " +
"from Person p",
String.class)
.getResultList();
//end::hql-simple-case-expressions-example[]
assertEquals(3, nickNames.size());
});
}
@Test
public void test_simple_case_expressions_example_2(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-coalesce-example[]
List<String> nickNames = entityManager.createQuery(
"select coalesce(p.nickName, '<no nick name>') " +
"from Person p",
String.class)
.getResultList();
//end::hql-coalesce-example[]
assertEquals(3, nickNames.size());
});
}
@Test
public void test_searched_case_expressions_example_1(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-searched-case-expressions-example[]
List<String> nickNames = entityManager.createQuery(
"select " +
" case " +
" when p.nickName is null " +
" then " +
" case " +
" when p.name is null " +
" then '<no nick name>' " +
" else p.name " +
" end" +
" else p.nickName " +
" end " +
"from Person p",
String.class)
.getResultList();
//end::hql-searched-case-expressions-example[]
assertEquals(3, nickNames.size());
});
}
@Test
public void test_searched_case_expressions_example_2(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-coalesce-example[]
List<String> nickNames = entityManager.createQuery(
"select coalesce(p.nickName, p.name, '<no nick name>') " +
"from Person p",
String.class)
.getResultList();
//end::hql-coalesce-example[]
assertEquals(3, nickNames.size());
});
}
@Test
public void test_case_arithmetic_expressions_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-case-arithmetic-expressions-example[]
List<Long> values = entityManager.createQuery(
"select " +
" case when p.nickName is null " +
" then p.id * 1000 " +
" else p.id " +
" end " +
"from Person p " +
"order by p.id",
Long.class)
.getResultList();
assertEquals(3, values.size());
assertEquals(1L, (long) values.get(0));
assertEquals(2000, (long) values.get(1));
assertEquals(3000, (long) values.get(2));
//end::hql-case-arithmetic-expressions-example[]
});
}
@Test
public void test_hql_null_if_example_1(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-nullif-example[]
List<String> nickNames = entityManager.createQuery(
"select nullif(p.nickName, p.name) " +
"from Person p",
String.class)
.getResultList();
//end::hql-nullif-example[]
assertEquals(3, nickNames.size());
});
}
@Test
public void test_hql_null_if_example_2(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-nullif-example[]
// equivalent CASE expression
List<String> nickNames = entityManager.createQuery(
"select " +
" case" +
" when p.nickName = p.name" +
" then null" +
" else p.nickName" +
" end " +
"from Person p",
String.class)
.getResultList();
//end::hql-nullif-example[]
assertEquals(3, nickNames.size());
});
}
@Test
public void test_hql_select_clause_dynamic_instantiation_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-select-clause-dynamic-instantiation-example[]
CallStatistics callStatistics = entityManager.createQuery(
"select new org.hibernate.orm.test.hql.CallStatistics(" +
" count(c), " +
" sum(c.duration), " +
" min(c.duration), " +
" max(c.duration), " +
" avg(c.duration)" +
") " +
"from Call c ",
CallStatistics.class)
.getSingleResult();
//end::hql-select-clause-dynamic-instantiation-example[]
assertNotNull(callStatistics);
});
}
@Test
public void test_hql_select_clause_dynamic_list_instantiation_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-select-clause-dynamic-list-instantiation-example[]
List<List> phoneCallDurations = entityManager.createQuery(
"select new list(" +
" p.number, " +
" c.duration " +
") " +
"from Call c " +
"join c.phone p ",
List.class)
.getResultList();
//end::hql-select-clause-dynamic-list-instantiation-example[]
assertNotNull(phoneCallDurations);
});
}
@Test
public void test_hql_select_clause_dynamic_map_instantiation_example(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-select-clause-dynamic-map-instantiation-example[]
List<Map> phoneCallTotalDurations = entityManager.createQuery(
"select new map(" +
" p.number as phoneNumber , " +
" sum(c.duration) as totalDuration, " +
" avg(c.duration) as averageDuration " +
") " +
"from Call c " +
"join c.phone p " +
"group by p.number ",
Map.class)
.getResultList();
//end::hql-select-clause-dynamic-map-instantiation-example[]
assertNotNull(phoneCallTotalDurations);
});
}
@Test
public void test_hql_relational_comparisons_example_1(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-relational-comparisons-example[]
// numeric comparison
List<Call> calls = entityManager.createQuery(
"select c " +
"from Call c " +
"where c.duration < 30 ",
Call.class)
.getResultList();
//end::hql-relational-comparisons-example[]
assertEquals(1, calls.size());
});
}
@Test
public void test_hql_relational_comparisons_example_2(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-relational-comparisons-example[]
// string comparison
List<Person> persons = entityManager.createQuery(
"select p " +
"from Person p " +
"where p.name like 'John%' ",
Person.class)
.getResultList();
//end::hql-relational-comparisons-example[]
assertEquals(1, persons.size());
});
}
@Test
@RequiresDialect(H2Dialect.class)
@RequiresDialect(PostgreSQLDialect.class)
@RequiresDialect(MySQLDialect.class)
public void test_hql_relational_comparisons_example_3(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-relational-comparisons-example[]
// datetime comparison
List<Person> persons = entityManager.createQuery(
"select p " +
"from Person p " +
"where p.createdOn > date 1950-01-01 ",
Person.class)
.getResultList();
//end::hql-relational-comparisons-example[]
assertEquals(2, persons.size());
});
}
@Test
public void test_hql_relational_comparisons_example_4(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
//tag::hql-relational-comparisons-example[]
// | HQLTest |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java | {
"start": 2109,
"end": 10096
} | class ____ extends Storage {
private final FileJournalManager fjm;
private final StorageDirectory sd;
private StorageState state;
private static final List<Pattern> PAXOS_DIR_PURGE_REGEXES =
ImmutableList.of(Pattern.compile("(\\d+)"));
private static final String STORAGE_EDITS_SYNC = "edits.sync";
/**
* @param conf Configuration object
* @param logDir the path to the directory in which data will be stored
* @param errorReporter a callback to report errors
* @throws IOException
*/
protected JNStorage(Configuration conf, File logDir, StartupOption startOpt,
StorageErrorReporter errorReporter) throws IOException {
super(NodeType.JOURNAL_NODE);
sd = new StorageDirectory(logDir, null, false, new FsPermission(conf.get(
DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY,
DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_DEFAULT)));
this.addStorageDir(sd);
this.fjm = new FileJournalManager(conf, sd, errorReporter);
analyzeAndRecoverStorage(startOpt);
}
FileJournalManager getJournalManager() {
return fjm;
}
@Override
public boolean isPreUpgradableLayout(StorageDirectory sd)
throws IOException {
return false;
}
/**
* Find an edits file spanning the given transaction ID range.
* If no such file exists, an exception is thrown.
*/
File findFinalizedEditsFile(long startTxId, long endTxId)
throws IOException {
File ret = new File(sd.getCurrentDir(),
NNStorage.getFinalizedEditsFileName(startTxId, endTxId));
if (!ret.exists()) {
throw new IOException(
"No edits file for range " + startTxId + "-" + endTxId);
}
return ret;
}
/**
* @return the path for an in-progress edits file starting at the given
* transaction ID. This does not verify existence of the file.
*/
File getInProgressEditLog(long startTxId) {
return new File(sd.getCurrentDir(),
NNStorage.getInProgressEditsFileName(startTxId));
}
/**
* @param segmentTxId the first txid of the segment
* @param epoch the epoch number of the writer which is coordinating
* recovery
* @return the temporary path in which an edits log should be stored
* while it is being downloaded from a remote JournalNode
*/
File getSyncLogTemporaryFile(long segmentTxId, long epoch) {
String name = NNStorage.getInProgressEditsFileName(segmentTxId) +
".epoch=" + epoch;
return new File(sd.getCurrentDir(), name);
}
/**
* Directory {@code edits.sync} temporarily holds the log segments
* downloaded through {@link JournalNodeSyncer} before they are moved to
* {@code current} directory.
*
* @return the directory path
*/
File getEditsSyncDir() {
return new File(sd.getRoot(), STORAGE_EDITS_SYNC);
}
File getTemporaryEditsFile(long startTxId, long endTxId) {
return new File(getEditsSyncDir(), String.format("%s_%019d-%019d",
NNStorage.NameNodeFile.EDITS.getName(), startTxId, endTxId));
}
File getFinalizedEditsFile(long startTxId, long endTxId) {
return new File(sd.getCurrentDir(), String.format("%s_%019d-%019d",
NNStorage.NameNodeFile.EDITS.getName(), startTxId, endTxId));
}
/**
* @return the path for the file which contains persisted data for the
* paxos-like recovery process for the given log segment.
*/
File getPaxosFile(long segmentTxId) {
return new File(getOrCreatePaxosDir(), String.valueOf(segmentTxId));
}
File getOrCreatePaxosDir() {
File paxosDir = new File(sd.getCurrentDir(), "paxos");
if(!paxosDir.exists()) {
LOG.info("Creating paxos dir: {}", paxosDir.toPath());
if(!paxosDir.mkdir()) {
LOG.error("Could not create paxos dir: {}", paxosDir.toPath());
}
}
return paxosDir;
}
File getRoot() {
return sd.getRoot();
}
/**
* Remove any log files and associated paxos files which are older than
* the given txid.
*/
void purgeDataOlderThan(long minTxIdToKeep) throws IOException {
fjm.purgeLogsOlderThan(minTxIdToKeep);
purgeMatching(getOrCreatePaxosDir(),
PAXOS_DIR_PURGE_REGEXES, minTxIdToKeep);
}
/**
* Purge files in the given directory which match any of the set of patterns.
* The patterns must have a single numeric capture group which determines
* the associated transaction ID of the file. Only those files for which
* the transaction ID is less than the <code>minTxIdToKeep</code> parameter
* are removed.
*/
private static void purgeMatching(File dir, List<Pattern> patterns,
long minTxIdToKeep) throws IOException {
for (File f : FileUtil.listFiles(dir)) {
if (!f.isFile()) continue;
for (Pattern p : patterns) {
Matcher matcher = p.matcher(f.getName());
if (matcher.matches()) {
// This parsing will always succeed since the group(1) is
// /\d+/ in the regex itself.
long txid = Long.parseLong(matcher.group(1));
if (txid < minTxIdToKeep) {
LOG.info("Purging no-longer needed file {}", txid);
if (!f.delete()) {
LOG.warn("Unable to delete no-longer-needed data {}", f);
}
break;
}
}
}
}
}
void format(NamespaceInfo nsInfo, boolean force) throws IOException {
unlockAll();
try {
sd.analyzeStorage(StartupOption.FORMAT, this, !force);
} finally {
sd.unlock();
}
setStorageInfo(nsInfo);
LOG.info("Formatting journal {} with nsid: {}", sd, getNamespaceID());
// Unlock the directory before formatting, because we will
// re-analyze it after format(). The analyzeStorage() call
// below is reponsible for re-locking it. This is a no-op
// if the storage is not currently locked.
unlockAll();
sd.clearDirectory();
writeProperties(sd);
getOrCreatePaxosDir();
analyzeStorage();
}
void analyzeStorage() throws IOException {
this.state = sd.analyzeStorage(StartupOption.REGULAR, this);
refreshStorage();
}
void refreshStorage() throws IOException {
if (state == StorageState.NORMAL) {
readProperties(sd);
}
}
@Override
protected void setLayoutVersion(Properties props, StorageDirectory sd)
throws IncorrectVersionException, InconsistentFSStateException {
int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion"));
// For journal node, since it now does not decode but just scan through the
// edits, it can handle edits with future version in most of the cases.
// Thus currently we may skip the layoutVersion check here.
layoutVersion = lv;
}
void analyzeAndRecoverStorage(StartupOption startOpt) throws IOException {
this.state = sd.analyzeStorage(startOpt, this);
final boolean needRecover = state != StorageState.NORMAL
&& state != StorageState.NON_EXISTENT
&& state != StorageState.NOT_FORMATTED;
if (state == StorageState.NORMAL && startOpt != StartupOption.ROLLBACK) {
readProperties(sd);
} else if (needRecover) {
sd.doRecover(state);
}
}
void checkConsistentNamespace(NamespaceInfo nsInfo)
throws IOException {
if (nsInfo.getNamespaceID() != getNamespaceID()) {
throw new IOException("Incompatible namespaceID for journal " +
this.sd + ": NameNode has nsId " + nsInfo.getNamespaceID() +
" but storage has nsId " + getNamespaceID());
}
if (!nsInfo.getClusterID().equals(getClusterID())) {
throw new IOException("Incompatible clusterID for journal " +
this.sd + ": NameNode has clusterId '" + nsInfo.getClusterID() +
"' but storage has clusterId '" + getClusterID() + "'");
}
}
public void close() throws IOException {
LOG.info("Closing journal storage for {}", sd);
unlockAll();
}
public boolean isFormatted() {
return state == StorageState.NORMAL;
}
}
| JNStorage |
java | apache__camel | components/camel-ftp/src/main/java/org/apache/camel/component/file/remote/FtpsConfiguration.java | {
"start": 1025,
"end": 3599
} | class ____ extends FtpConfiguration {
@UriParam(defaultValue = "TLSv1.3", label = "security")
private String securityProtocol = "TLSv1.3";
@UriParam(label = "security")
private boolean implicit;
@UriParam(label = "security")
private boolean disableSecureDataChannelDefaults;
@UriParam(label = "security")
private String execProt;
@UriParam(label = "security")
private Long execPbsz;
public FtpsConfiguration() {
setProtocol("ftps");
}
public FtpsConfiguration(URI uri) {
super(uri);
}
/**
* Returns the underlying security protocol.
*/
public String getSecurityProtocol() {
return securityProtocol;
}
/**
* Set the underlying security protocol.
*/
public void setSecurityProtocol(String securityProtocol) {
this.securityProtocol = securityProtocol;
}
/**
* Returns the security mode(Implicit/Explicit). true - Implicit Mode / False - Explicit Mode
*/
public boolean isImplicit() {
return implicit;
}
/**
* Set the security mode (Implicit/Explicit). true - Implicit Mode / False - Explicit Mode
*/
public void setImplicit(boolean implicit) {
this.implicit = implicit;
}
public boolean isDisableSecureDataChannelDefaults() {
return disableSecureDataChannelDefaults;
}
/**
* Use this option to disable default options when using secure data channel.
* <p/>
* This allows you to be in full control what the execPbsz and execProt setting should be used.
* <p/>
* Default is <tt>false</tt>
*
* @see #setExecPbsz(Long)
* @see #setExecProt(String)
*/
public void setDisableSecureDataChannelDefaults(boolean disableSecureDataChannelDefaults) {
this.disableSecureDataChannelDefaults = disableSecureDataChannelDefaults;
}
public String getExecProt() {
return execProt;
}
/**
* The exec protection level
* <p/>
* PROT command. C - Clear S - Safe(SSL protocol only) E - Confidential(SSL protocol only) P - Private
*
* @param execProt either C, S, E or P
*/
public void setExecProt(String execProt) {
this.execProt = execProt;
}
public Long getExecPbsz() {
return execPbsz;
}
/**
* When using secure data channel you can set the exec protection buffer size
*
* @param execPbsz the buffer size
*/
public void setExecPbsz(Long execPbsz) {
this.execPbsz = execPbsz;
}
}
| FtpsConfiguration |
java | elastic__elasticsearch | x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/manual/ManualTrigger.java | {
"start": 563,
"end": 1774
} | class ____ implements Trigger {
@Override
public String type() {
return ManualTriggerEngine.TYPE;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return builder.startObject().endObject();
}
static ManualTrigger parse(XContentParser parser) throws IOException {
if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchParseException(
"unable to parse ["
+ ManualTriggerEngine.TYPE
+ "] trigger. expected a start object token, found ["
+ parser.currentToken()
+ "]"
);
}
XContentParser.Token token = parser.nextToken();
if (token != XContentParser.Token.END_OBJECT) {
throw new ElasticsearchParseException(
"unable to parse ["
+ ManualTriggerEngine.TYPE
+ "] trigger. expected an empty object, but found an object with ["
+ token
+ "]"
);
}
return new ManualTrigger();
}
}
| ManualTrigger |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/model/DirectResourceLoader.java | {
"start": 1366,
"end": 3103
} | class ____<DataT> implements ModelLoader<Integer, DataT> {
private final Context context;
private final ResourceOpener<DataT> resourceOpener;
public static ModelLoaderFactory<Integer, InputStream> inputStreamFactory(Context context) {
return new InputStreamFactory(context);
}
public static ModelLoaderFactory<Integer, AssetFileDescriptor> assetFileDescriptorFactory(
Context context) {
return new AssetFileDescriptorFactory(context);
}
public static ModelLoaderFactory<Integer, Drawable> drawableFactory(Context context) {
return new DrawableFactory(context);
}
DirectResourceLoader(Context context, ResourceOpener<DataT> resourceOpener) {
this.context = context.getApplicationContext();
this.resourceOpener = resourceOpener;
}
@Override
public LoadData<DataT> buildLoadData(
@NonNull Integer resourceId, int width, int height, @NonNull Options options) {
Theme theme = options.get(ResourceDrawableDecoder.THEME);
Resources resources =
Build.VERSION.SDK_INT >= VERSION_CODES.LOLLIPOP && theme != null
? theme.getResources()
: context.getResources();
return new LoadData<>(
// TODO(judds): We try to apply AndroidResourceSignature for caching in RequestBuilder.
// Arguably we should mix in that information here instead.
new ObjectKey(resourceId),
new ResourceDataFetcher<>(theme, resources, resourceOpener, resourceId));
}
@Override
public boolean handles(@NonNull Integer integer) {
// We could check that this is in fact a resource ID, but doing so isn't free and in practice
// it doesn't seem to have been an issue historically.
return true;
}
private | DirectResourceLoader |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/async/AsyncEndpointMulticastFineGrainedErrorHandlingTest.java | {
"start": 1033,
"end": 3046
} | class ____ extends ContextTestSupport {
@Test
public void testAsyncEndpointOK() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
context.addComponent("async", new MyAsyncComponent());
onException(Exception.class).redeliveryDelay(0).maximumRedeliveries(2);
from("direct:start").to("mock:a").multicast().stopOnException().to("mock:foo", "async:bye:camel", "mock:bar");
}
});
context.start();
getMockEndpoint("mock:a").expectedMessageCount(1);
getMockEndpoint("mock:foo").expectedMessageCount(1);
getMockEndpoint("mock:bar").expectedMessageCount(1);
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
@Test
public void testAsyncEndpointERROR() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
context.addComponent("async", new MyAsyncComponent());
onException(Exception.class).redeliveryDelay(0).maximumRedeliveries(2);
from("direct:start").to("mock:a").multicast().stopOnException().to("mock:foo", "async:bye:camel")
.throwException(new IllegalArgumentException("Damn"))
.to("mock:bar");
}
});
context.start();
getMockEndpoint("mock:a").expectedMessageCount(1);
getMockEndpoint("mock:foo").expectedMessageCount(1);
getMockEndpoint("mock:bar").expectedMessageCount(0);
try {
template.sendBody("direct:start", "Hello World");
fail("Should throw exception");
} catch (Exception e) {
// expected
}
assertMockEndpointsSatisfied();
}
@Override
public boolean isUseRouteBuilder() {
return false;
}
}
| AsyncEndpointMulticastFineGrainedErrorHandlingTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/EmbeddableInheritanceReplaceTest.java | {
"start": 2198,
"end": 2583
} | class ____ {
@Id
@GeneratedValue
private Integer id;
@Embedded
private Base base;
public Ent() {
}
public Ent(final Integer id) {
this.id = id;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public Base getBase() {
return base;
}
public void setBase(Base base) {
this.base = base;
}
}
}
| Ent |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/ResourceElementResolverMethodTests.java | {
"start": 5477,
"end": 5693
} | class ____ {
private String one;
public void setOne(String one) {
this.one = one;
}
public void setTest(String test) {
// no-op
}
public void setCount(Integer count) {
// no-op
}
}
}
| TestBean |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/FilePosition.java | {
"start": 1755,
"end": 8719
} | class ____ {
/**
* Holds block based information about a file.
*/
private BlockData blockData;
/**
* Information about the buffer in use.
*/
private BufferData data;
/**
* Provides access to the underlying file.
*/
private ByteBuffer buffer;
/**
* Start offset of the buffer relative to the start of a file.
*/
private long bufferStartOffset;
/**
* Offset where reading starts relative to the start of a file.
*/
private long readStartOffset;
// Read stats after a seek (mostly for debugging use).
private int numSingleByteReads;
private int numBytesRead;
private int numBufferReads;
/**
* Constructs an instance of {@link FilePosition}.
*
* @param fileSize size of the associated file.
* @param blockSize size of each block within the file.
*
* @throws IllegalArgumentException if fileSize is negative.
* @throws IllegalArgumentException if blockSize is zero or negative.
*/
public FilePosition(long fileSize, int blockSize) {
checkNotNegative(fileSize, "fileSize");
if (fileSize == 0) {
checkNotNegative(blockSize, "blockSize");
} else {
checkPositiveInteger(blockSize, "blockSize");
}
this.blockData = new BlockData(fileSize, blockSize);
// The position is valid only when a valid buffer is associated with this file.
this.invalidate();
}
/**
* Associates a buffer with this file.
*
* @param bufferData the buffer associated with this file.
* @param startOffset Start offset of the buffer relative to the start of a file.
* @param readOffset Offset where reading starts relative to the start of a file.
*
* @throws IllegalArgumentException if bufferData is null.
* @throws IllegalArgumentException if startOffset is negative.
* @throws IllegalArgumentException if readOffset is negative.
* @throws IllegalArgumentException if readOffset is outside the range [startOffset, buffer end].
*/
public void setData(BufferData bufferData,
long startOffset,
long readOffset) {
checkNotNull(bufferData, "bufferData");
checkNotNegative(startOffset, "startOffset");
checkNotNegative(readOffset, "readOffset");
checkWithinRange(
readOffset,
"readOffset",
startOffset,
startOffset + bufferData.getBuffer().limit());
data = bufferData;
buffer = bufferData.getBuffer().duplicate();
bufferStartOffset = startOffset;
readStartOffset = readOffset;
setAbsolute(readOffset);
resetReadStats();
}
public ByteBuffer buffer() {
throwIfInvalidBuffer();
return buffer;
}
public BufferData data() {
throwIfInvalidBuffer();
return data;
}
/**
* Gets the current absolute position within this file.
*
* @return the current absolute position within this file.
*/
public long absolute() {
throwIfInvalidBuffer();
return bufferStartOffset + relative();
}
/**
* If the given {@code pos} lies within the current buffer, updates the current position to
* the specified value and returns true; otherwise returns false without changing the position.
*
* @param pos the absolute position to change the current position to if possible.
* @return true if the given current position was updated, false otherwise.
*/
public boolean setAbsolute(long pos) {
if (isValid() && isWithinCurrentBuffer(pos)) {
int relativePos = (int) (pos - bufferStartOffset);
buffer.position(relativePos);
return true;
} else {
return false;
}
}
/**
* Gets the current position within this file relative to the start of the associated buffer.
*
* @return the current position within this file relative to the start of the associated buffer.
*/
public int relative() {
throwIfInvalidBuffer();
return buffer.position();
}
/**
* Determines whether the given absolute position lies within the current buffer.
*
* @param pos the position to check.
* @return true if the given absolute position lies within the current buffer, false otherwise.
*/
public boolean isWithinCurrentBuffer(long pos) {
throwIfInvalidBuffer();
long bufferEndOffset = bufferStartOffset + buffer.limit();
return (pos >= bufferStartOffset) && (pos <= bufferEndOffset);
}
/**
* Gets the id of the current block.
*
* @return the id of the current block.
*/
public int blockNumber() {
throwIfInvalidBuffer();
return blockData.getBlockNumber(bufferStartOffset);
}
/**
* Determines whether the current block is the last block in this file.
*
* @return true if the current block is the last block in this file, false otherwise.
*/
public boolean isLastBlock() {
return blockData.isLastBlock(blockNumber());
}
/**
* Determines if the current position is valid.
*
* @return true if the current position is valid, false otherwise.
*/
public boolean isValid() {
return buffer != null;
}
/**
* Marks the current position as invalid.
*/
public void invalidate() {
buffer = null;
bufferStartOffset = -1;
data = null;
}
/**
* Gets the start of the current block's absolute offset.
*
* @return the start of the current block's absolute offset.
*/
public long bufferStartOffset() {
throwIfInvalidBuffer();
return bufferStartOffset;
}
/**
* Determines whether the current buffer has been fully read.
*
* @return true if the current buffer has been fully read, false otherwise.
*/
public boolean bufferFullyRead() {
throwIfInvalidBuffer();
return (bufferStartOffset == readStartOffset)
&& (relative() == buffer.limit())
&& (numBytesRead == buffer.limit());
}
public void incrementBytesRead(int n) {
numBytesRead += n;
if (n == 1) {
numSingleByteReads++;
} else {
numBufferReads++;
}
}
public int numBytesRead() {
return numBytesRead;
}
public int numSingleByteReads() {
return numSingleByteReads;
}
public int numBufferReads() {
return numBufferReads;
}
private void resetReadStats() {
numBytesRead = 0;
numSingleByteReads = 0;
numBufferReads = 0;
}
public String toString() {
StringBuilder sb = new StringBuilder();
if (buffer == null) {
sb.append("currentBuffer = null");
} else {
int pos = buffer.position();
int val;
if (pos >= buffer.limit()) {
val = -1;
} else {
val = buffer.get(pos);
}
String currentBufferState =
String.format("%d at pos: %d, lim: %d", val, pos, buffer.limit());
sb.append(String.format(
"block: %d, pos: %d (CBuf: %s)%n",
blockNumber(), absolute(),
currentBufferState));
sb.append("\n");
}
return sb.toString();
}
private void throwIfInvalidBuffer() {
checkState(buffer != null, "'buffer' must not be null");
}
}
| FilePosition |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/util/Constants.java | {
"start": 943,
"end": 6488
} | class ____ {
/**
* Name of the system property to use to identify the LogEvent factory.
*/
public static final String LOG4J_LOG_EVENT_FACTORY = "Log4jLogEventFactory";
/**
* Name of the system property to use to identify the ContextSelector Class.
*/
public static final String LOG4J_CONTEXT_SELECTOR = "Log4jContextSelector";
/**
* Property name for the default status (internal log4j logging) level to use if not specified in configuration.
* @deprecated since 2.24.0 use
* {@link org.apache.logging.log4j.status.StatusLogger#DEFAULT_STATUS_LISTENER_LEVEL} instead.
*/
@Deprecated
public static final String LOG4J_DEFAULT_STATUS_LEVEL = "Log4jDefaultStatusLevel";
/**
* JNDI context name string literal.
*/
public static final String JNDI_CONTEXT_NAME = "java:comp/env/log4j/context-name";
/**
* Control which script languages are allowed, if any.
*/
public static final String SCRIPT_LANGUAGES = "log4j2.Script.enableLanguages";
/**
* Number of milliseconds in a second.
*/
public static final int MILLIS_IN_SECONDS = 1000;
/**
* Supports user request LOG4J2-898 to have the option to format a message in the background thread.
*/
public static final boolean FORMAT_MESSAGES_IN_BACKGROUND =
PropertiesUtil.getProperties().getBooleanProperty("log4j.format.msg.async", false);
/**
* LOG4J2-3198 property which used to globally opt out of lookups in pattern layout message text, however
* this is the default and this property is no longer read.
*
* Deprecated in 2.15.
*
* @since 2.10
* @deprecated no longer used, lookups are only used when {@code %m{lookups}} is specified
*/
@Deprecated
public static final boolean FORMAT_MESSAGES_PATTERN_DISABLE_LOOKUPS =
PropertiesUtil.getProperties().getBooleanProperty("log4j2.formatMsgNoLookups", true);
/**
* {@code true} if we think we are running in a web container, based on the boolean value of system property
* "log4j2.isWebapp", or (if this system property is not set) whether the {@code javax.servlet.Servlet} class
* is present in the classpath.
*/
public static final boolean IS_WEB_APP = org.apache.logging.log4j.util.Constants.IS_WEB_APP;
/**
* Kill switch for object pooling in ThreadLocals that enables much of the LOG4J2-1270 no-GC behaviour.
* <p>
* {@code True} for non-{@link #IS_WEB_APP web apps}, disable by setting system property
* "log4j2.enable.threadlocals" to "false".
*
* @since 2.6
*/
public static final boolean ENABLE_THREADLOCALS = org.apache.logging.log4j.util.Constants.ENABLE_THREADLOCALS;
/**
* Kill switch for garbage-free Layout behaviour that encodes LogEvents directly into
* {@link org.apache.logging.log4j.core.layout.ByteBufferDestination}s without creating intermediate temporary
* Objects.
* <p>
* This constant is {@code true} by default, but can be disabled using the
* {@code "log4j2.enableDirectEncoders"} system property.
* </p>
*
* @since 2.6
*/
public static final boolean ENABLE_DIRECT_ENCODERS = PropertiesUtil.getProperties()
.getBooleanProperty("log4j2.enable.direct.encoders", true); // enable GC-free text encoding by default
// the alternative is to enable GC-free encoding only by default only when using all-async loggers:
// AsyncLoggerContextSelector.class.getName().equals(PropertiesUtil.getProperties().getStringProperty(LOG4J_CONTEXT_SELECTOR)));
/**
* Initial StringBuilder size used in RingBuffer LogEvents to store the contents of reusable Messages.
* <p>
* The default value is 128, users can override with system property "log4j.initialReusableMsgSize".
* </p>
* @since 2.6
*/
public static final int INITIAL_REUSABLE_MESSAGE_SIZE = size("log4j.initialReusableMsgSize", 128);
/**
* Maximum size of the StringBuilders used in RingBuffer LogEvents to store the contents of reusable Messages.
* After a large message has been delivered to the appenders, the StringBuilder is trimmed to this size.
* <p>
* The default value is 518, which allows the StringBuilder to resize three times from its initial size.
* Users can override with system property "log4j.maxReusableMsgSize".
* </p>
* @since 2.6
*/
public static final int MAX_REUSABLE_MESSAGE_SIZE = size("log4j.maxReusableMsgSize", (128 * 2 + 2) * 2 + 2);
/**
* Size of CharBuffers used by text encoders.
* <p>
* The default value is 2048, users can override with system property "log4j.encoder.charBufferSize".
* </p>
* @since 2.6
*/
public static final int ENCODER_CHAR_BUFFER_SIZE = size("log4j.encoder.charBufferSize", 2048);
/**
* Default size of ByteBuffers used to encode LogEvents without allocating temporary objects.
* <p>
* The default value is 8192, users can override with system property "log4j.encoder.byteBufferSize".
* </p>
* @see org.apache.logging.log4j.core.layout.ByteBufferDestination
* @since 2.6
*/
public static final int ENCODER_BYTE_BUFFER_SIZE = size("log4j.encoder.byteBufferSize", 8 * 1024);
private static int size(final String property, final int defaultValue) {
return PropertiesUtil.getProperties().getIntegerProperty(property, defaultValue);
}
/**
* Prevent | Constants |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/engine/ThreadPoolMergeSchedulerTests.java | {
"start": 2457,
"end": 46909
} | class ____ extends ESTestCase {
private NodeEnvironment nodeEnvironment;
@After
public void closeNodeEnv() {
if (nodeEnvironment != null) {
nodeEnvironment.close();
nodeEnvironment = null;
}
}
public void testMergesExecuteInSizeOrder() throws IOException {
DeterministicTaskQueue threadPoolTaskQueue = new DeterministicTaskQueue();
Settings settings = Settings.builder()
// disable fs available disk space feature for this test
.put(ThreadPoolMergeExecutorService.INDICES_MERGE_DISK_CHECK_INTERVAL_SETTING.getKey(), "0s")
.build();
nodeEnvironment = newNodeEnvironment(settings);
ThreadPoolMergeExecutorService threadPoolMergeExecutorService = ThreadPoolMergeExecutorServiceTests
.getThreadPoolMergeExecutorService(threadPoolTaskQueue.getThreadPool(), settings, nodeEnvironment);
var mergeMetrics = mock(MergeMetrics.class);
try (
ThreadPoolMergeScheduler threadPoolMergeScheduler = new ThreadPoolMergeScheduler(
new ShardId("index", "_na_", 1),
IndexSettingsModule.newIndexSettings("index", Settings.EMPTY),
threadPoolMergeExecutorService,
merge -> 0,
mergeMetrics
)
) {
List<OneMerge> executedMergesList = new ArrayList<>();
int mergeCount = randomIntBetween(2, 10);
for (int i = 0; i < mergeCount; i++) {
MergeSource mergeSource = mock(MergeSource.class);
OneMerge oneMerge = mock(OneMerge.class);
when(oneMerge.getStoreMergeInfo()).thenReturn(getNewMergeInfo(randomLongBetween(1L, 10L)));
when(oneMerge.getMergeProgress()).thenReturn(new MergePolicy.OneMergeProgress());
when(mergeSource.getNextMerge()).thenReturn(oneMerge, (OneMerge) null);
doAnswer(invocation -> {
OneMerge merge = (OneMerge) invocation.getArguments()[0];
assertFalse(merge.isAborted());
executedMergesList.add(merge);
return null;
}).when(mergeSource).merge(any(OneMerge.class));
threadPoolMergeScheduler.merge(mergeSource, randomFrom(MergeTrigger.values()));
// verify queued byte metric is recorded for each merge
verify(mergeMetrics, times(i + 1)).incrementQueuedMergeBytes(any(), anyLong());
}
threadPoolTaskQueue.runAllTasks();
assertThat(executedMergesList.size(), is(mergeCount));
// verify metrics are reported for each merge
verify(mergeMetrics, times(mergeCount)).moveQueuedMergeBytesToRunning(any(), anyLong());
verify(mergeMetrics, times(mergeCount)).decrementRunningMergeBytes(any());
verify(mergeMetrics, times(mergeCount)).markMergeMetrics(any(), anyLong(), anyLong());
// assert merges are executed in ascending size order
for (int i = 1; i < mergeCount; i++) {
assertThat(
executedMergesList.get(i - 1).getStoreMergeInfo().estimatedMergeBytes(),
lessThanOrEqualTo(executedMergesList.get(i).getStoreMergeInfo().estimatedMergeBytes())
);
}
}
assertTrue(threadPoolMergeExecutorService.allDone());
}
public void testSimpleMergeTaskBacklogging() {
int mergeExecutorThreadCount = randomIntBetween(1, 5);
var mergeMetrics = mock(MergeMetrics.class);
Settings mergeSchedulerSettings = Settings.builder()
.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), mergeExecutorThreadCount)
.build();
ThreadPoolMergeExecutorService threadPoolMergeExecutorService = mock(ThreadPoolMergeExecutorService.class);
// close method waits for running merges to finish, but this test leaves running merges around
ThreadPoolMergeScheduler threadPoolMergeScheduler = new ThreadPoolMergeScheduler(
new ShardId("index", "_na_", 1),
IndexSettingsModule.newIndexSettings("index", mergeSchedulerSettings),
threadPoolMergeExecutorService,
merge -> 0,
mergeMetrics
);
// more merge tasks than merge threads
int mergeCount = mergeExecutorThreadCount + randomIntBetween(1, 5);
for (int i = 0; i < mergeCount; i++) {
MergeSource mergeSource = mock(MergeSource.class);
OneMerge oneMerge = mock(OneMerge.class);
when(oneMerge.getStoreMergeInfo()).thenReturn(getNewMergeInfo(randomLongBetween(1L, 10L)));
when(oneMerge.getMergeProgress()).thenReturn(new MergePolicy.OneMergeProgress());
when(mergeSource.getNextMerge()).thenReturn(oneMerge, (OneMerge) null);
Schedule schedule = threadPoolMergeScheduler.schedule(
threadPoolMergeScheduler.newMergeTask(mergeSource, oneMerge, randomFrom(MergeTrigger.values()))
);
if (i < mergeExecutorThreadCount) {
assertThat(schedule, is(Schedule.RUN));
} else {
assertThat(schedule, is(Schedule.BACKLOG));
}
}
assertThat(threadPoolMergeScheduler.getRunningMergeTasks().size(), is(mergeExecutorThreadCount));
assertThat(threadPoolMergeScheduler.getBackloggedMergeTasks().size(), is(mergeCount - mergeExecutorThreadCount));
// verify no metrics are recorded as no merges have been queued or executed through the merge scheduler
verifyNoInteractions(mergeMetrics);
}
public void testSimpleMergeTaskReEnqueueingBySize() {
int mergeExecutorThreadCount = randomIntBetween(1, 5);
Settings mergeSchedulerSettings = Settings.builder()
.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), mergeExecutorThreadCount)
.build();
ThreadPoolMergeExecutorService threadPoolMergeExecutorService = mock(ThreadPoolMergeExecutorService.class);
// close method waits for running merges to finish, but this test leaves running merges around
ThreadPoolMergeScheduler threadPoolMergeScheduler = new ThreadPoolMergeScheduler(
new ShardId("index", "_na_", 1),
IndexSettingsModule.newIndexSettings("index", mergeSchedulerSettings),
threadPoolMergeExecutorService,
merge -> 0,
MergeMetrics.NOOP
);
// sort backlogged merges by size
PriorityQueue<MergeTask> backloggedMergeTasks = new PriorityQueue<>(
16,
Comparator.comparingLong(MergeTask::estimatedRemainingMergeSize)
);
// more merge tasks than merge threads
int mergeCount = mergeExecutorThreadCount + randomIntBetween(2, 10);
for (int i = 0; i < mergeCount; i++) {
MergeSource mergeSource = mock(MergeSource.class);
OneMerge oneMerge = mock(OneMerge.class);
when(oneMerge.getStoreMergeInfo()).thenReturn(getNewMergeInfo(randomLongBetween(1L, 10L)));
when(oneMerge.getMergeProgress()).thenReturn(new MergePolicy.OneMergeProgress());
when(mergeSource.getNextMerge()).thenReturn(oneMerge, (OneMerge) null);
MergeTask mergeTask = threadPoolMergeScheduler.newMergeTask(mergeSource, oneMerge, randomFrom(MergeTrigger.values()));
Schedule schedule = threadPoolMergeScheduler.schedule(mergeTask);
if (i < mergeExecutorThreadCount) {
assertThat(schedule, is(Schedule.RUN));
} else {
assertThat(schedule, is(Schedule.BACKLOG));
backloggedMergeTasks.add(mergeTask);
}
}
assertThat(threadPoolMergeScheduler.getRunningMergeTasks().size(), is(mergeExecutorThreadCount));
assertThat(threadPoolMergeScheduler.getBackloggedMergeTasks().size(), is(backloggedMergeTasks.size()));
int enqueuedTasksCount = mergeCount - mergeExecutorThreadCount;
for (int i = 0; i < enqueuedTasksCount; i++) {
assertThat(threadPoolMergeScheduler.getBackloggedMergeTasks().size(), is(enqueuedTasksCount - i));
MergeTask runningMergeTask = randomFrom(threadPoolMergeScheduler.getRunningMergeTasks().values());
runningMergeTask.run();
var submittedMergeTaskCaptor = ArgumentCaptor.forClass(MergeTask.class);
verify(threadPoolMergeExecutorService, times(i + 1)).reEnqueueBackloggedMergeTask(submittedMergeTaskCaptor.capture());
assertThat(submittedMergeTaskCaptor.getValue(), is(backloggedMergeTasks.poll()));
Schedule schedule = threadPoolMergeScheduler.schedule(submittedMergeTaskCaptor.getValue());
assertThat(schedule, is(Schedule.RUN));
assertThat(threadPoolMergeScheduler.getRunningMergeTasks().size(), is(mergeExecutorThreadCount));
}
}
public void testIndexingThrottlingWhenSubmittingMergesWithDiskIOThrottlingEnabled() {
testIndexingThrottlingWhenSubmittingMerges(true);
}
public void testIndexingThrottlingWhenSubmittingMergesWithDiskIOThrottlingDisabled() {
testIndexingThrottlingWhenSubmittingMerges(false);
}
private void testIndexingThrottlingWhenSubmittingMerges(boolean withDiskIOThrottlingEnabled) {
final int maxThreadCount = randomIntBetween(1, 5);
// settings validation requires maxMergeCount >= maxThreadCount
final int maxMergeCount = maxThreadCount + randomIntBetween(0, 5);
List<MergeTask> submittedMergeTasks = new ArrayList<>();
AtomicBoolean isUsingMaxTargetIORate = new AtomicBoolean(false);
ThreadPoolMergeExecutorService threadPoolMergeExecutorService = mockThreadPoolMergeExecutorService(
submittedMergeTasks,
isUsingMaxTargetIORate
);
Settings mergeSchedulerSettings = Settings.builder()
.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), maxThreadCount)
.put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), maxMergeCount)
.put(MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey(), withDiskIOThrottlingEnabled)
.build();
TestThreadPoolMergeScheduler threadPoolMergeScheduler = new TestThreadPoolMergeScheduler(
new ShardId("index", "_na_", 1),
IndexSettingsModule.newIndexSettings("index", mergeSchedulerSettings),
threadPoolMergeExecutorService
);
// make sure there are more merges submitted than the max merge count limit (which triggers IO throttling)
int excessMerges = randomIntBetween(1, 10);
int mergesToSubmit = maxMergeCount + excessMerges;
boolean expectIndexThrottling = false;
int submittedMerges = 0;
// merges are submitted, while some are also scheduled (but none is run)
while (submittedMerges < mergesToSubmit - 1) {
isUsingMaxTargetIORate.set(randomBoolean());
if (submittedMergeTasks.isEmpty() == false && randomBoolean()) {
// maybe schedule one of the submitted merges (but still it's not run)
MergeTask mergeTask = randomFrom(submittedMergeTasks);
submittedMergeTasks.remove(mergeTask);
mergeTask.schedule();
} else {
// submit one new merge
MergeSource mergeSource = mock(MergeSource.class);
OneMerge oneMerge = mock(OneMerge.class);
when(oneMerge.getStoreMergeInfo()).thenReturn(getNewMergeInfo(randomLongBetween(1L, 10L)));
when(oneMerge.getMergeProgress()).thenReturn(new MergePolicy.OneMergeProgress());
when(mergeSource.getNextMerge()).thenReturn(oneMerge, (OneMerge) null);
threadPoolMergeScheduler.merge(mergeSource, randomFrom(MergeTrigger.values()));
submittedMerges++;
if ((isUsingMaxTargetIORate.get() || withDiskIOThrottlingEnabled == false) && submittedMerges > maxMergeCount) {
expectIndexThrottling = true;
} else if (submittedMerges <= maxMergeCount) {
expectIndexThrottling = false;
}
}
// assert IO throttle state
assertThat(threadPoolMergeScheduler.isIndexingThrottlingEnabled(), is(expectIndexThrottling));
}
if (withDiskIOThrottlingEnabled) {
// submit one last merge when IO throttling is at max value
isUsingMaxTargetIORate.set(true);
} else {
// but if disk IO throttling is not enabled, indexing throttling should still be triggered
isUsingMaxTargetIORate.set(randomBoolean());
}
MergeSource mergeSource = mock(MergeSource.class);
OneMerge oneMerge = mock(OneMerge.class);
when(oneMerge.getStoreMergeInfo()).thenReturn(getNewMergeInfo(randomLongBetween(1L, 10L)));
when(oneMerge.getMergeProgress()).thenReturn(new MergePolicy.OneMergeProgress());
when(mergeSource.getNextMerge()).thenReturn(oneMerge, (OneMerge) null);
threadPoolMergeScheduler.merge(mergeSource, randomFrom(MergeTrigger.values()));
// assert indexing throttling state because IO throttling is at max value OR disk IO throttling is disabled
assertThat(threadPoolMergeScheduler.isIndexingThrottlingEnabled(), is(true));
}
public void testIndexingThrottlingWhileMergesAreRunning() {
final int maxThreadCount = randomIntBetween(1, 5);
// settings validation requires maxMergeCount >= maxThreadCount
final int maxMergeCount = maxThreadCount + randomIntBetween(0, 5);
List<MergeTask> submittedMergeTasks = new ArrayList<>();
List<MergeTask> scheduledToRunMergeTasks = new ArrayList<>();
AtomicBoolean isUsingMaxTargetIORate = new AtomicBoolean(false);
ThreadPoolMergeExecutorService threadPoolMergeExecutorService = mockThreadPoolMergeExecutorService(
submittedMergeTasks,
isUsingMaxTargetIORate
);
Settings mergeSchedulerSettings = Settings.builder()
.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), maxThreadCount)
.put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), maxMergeCount)
.build();
TestThreadPoolMergeScheduler threadPoolMergeScheduler = new TestThreadPoolMergeScheduler(
new ShardId("index", "_na_", 1),
IndexSettingsModule.newIndexSettings("index", mergeSchedulerSettings),
threadPoolMergeExecutorService
);
int mergesToRun = randomIntBetween(0, 5);
// make sure there are more merges submitted and not run
int excessMerges = randomIntBetween(1, 10);
int mergesToSubmit = maxMergeCount + mergesToRun + excessMerges;
int mergesOutstanding = 0;
boolean expectIndexThrottling = false;
// merges are submitted, while some are also scheduled and run
while (mergesToSubmit > 0) {
isUsingMaxTargetIORate.set(randomBoolean());
if (submittedMergeTasks.isEmpty() == false && randomBoolean()) {
// maybe schedule one submitted merge
MergeTask mergeTask = randomFrom(submittedMergeTasks);
submittedMergeTasks.remove(mergeTask);
Schedule schedule = mergeTask.schedule();
if (schedule == Schedule.RUN) {
scheduledToRunMergeTasks.add(mergeTask);
}
} else {
if (mergesToRun > 0 && scheduledToRunMergeTasks.isEmpty() == false && randomBoolean()) {
// maybe run one scheduled merge
MergeTask mergeTask = randomFrom(scheduledToRunMergeTasks);
scheduledToRunMergeTasks.remove(mergeTask);
mergeTask.run();
mergesToRun--;
mergesOutstanding--;
} else {
// submit one merge
MergeSource mergeSource = mock(MergeSource.class);
OneMerge oneMerge = mock(OneMerge.class);
when(oneMerge.getStoreMergeInfo()).thenReturn(getNewMergeInfo(randomLongBetween(1L, 10L)));
when(oneMerge.getMergeProgress()).thenReturn(new MergePolicy.OneMergeProgress());
when(mergeSource.getNextMerge()).thenReturn(oneMerge, (OneMerge) null);
threadPoolMergeScheduler.merge(mergeSource, randomFrom(MergeTrigger.values()));
mergesToSubmit--;
mergesOutstanding++;
}
if (isUsingMaxTargetIORate.get() && mergesOutstanding > maxMergeCount) {
expectIndexThrottling = true;
} else if (mergesOutstanding <= maxMergeCount) {
expectIndexThrottling = false;
}
}
// assert IO throttle state
assertThat(threadPoolMergeScheduler.isIndexingThrottlingEnabled(), is(expectIndexThrottling));
}
// execute all remaining merges (submitted or scheduled)
while (mergesToRun > 0 || submittedMergeTasks.isEmpty() == false || scheduledToRunMergeTasks.isEmpty() == false) {
// simulate that the {@link ThreadPoolMergeExecutorService} maybe peaked IO un-throttling
isUsingMaxTargetIORate.set(randomBoolean());
if (submittedMergeTasks.isEmpty() == false && (scheduledToRunMergeTasks.isEmpty() || randomBoolean())) {
// maybe schedule one submitted merge
MergeTask mergeTask = randomFrom(submittedMergeTasks);
submittedMergeTasks.remove(mergeTask);
Schedule schedule = mergeTask.schedule();
if (schedule == Schedule.RUN) {
scheduledToRunMergeTasks.add(mergeTask);
}
} else {
// maybe run one scheduled merge
MergeTask mergeTask = randomFrom(scheduledToRunMergeTasks);
scheduledToRunMergeTasks.remove(mergeTask);
mergeTask.run();
mergesToRun--;
mergesOutstanding--;
if (isUsingMaxTargetIORate.get() && mergesOutstanding > maxMergeCount) {
expectIndexThrottling = true;
} else if (mergesOutstanding <= maxMergeCount) {
expectIndexThrottling = false;
}
}
// assert IO throttle state
assertThat(threadPoolMergeScheduler.isIndexingThrottlingEnabled(), is(expectIndexThrottling));
}
// all merges done
assertThat(threadPoolMergeScheduler.isIndexingThrottlingEnabled(), is(false));
}
public void testMergeSourceWithFollowUpMergesRunSequentially() throws Exception {
// test with min 2 allowed concurrent merges
int mergeExecutorThreadCount = randomIntBetween(2, 5);
Settings settings = Settings.builder()
.put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), mergeExecutorThreadCount)
.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), mergeExecutorThreadCount)
// disable fs available disk space feature for this test
.put(ThreadPoolMergeExecutorService.INDICES_MERGE_DISK_CHECK_INTERVAL_SETTING.getKey(), "0s")
.build();
nodeEnvironment = newNodeEnvironment(settings);
try (TestThreadPool testThreadPool = new TestThreadPool("test", settings)) {
ThreadPoolMergeExecutorService threadPoolMergeExecutorService = ThreadPoolMergeExecutorServiceTests
.getThreadPoolMergeExecutorService(testThreadPool, settings, nodeEnvironment);
assertThat(threadPoolMergeExecutorService.getMaxConcurrentMerges(), equalTo(mergeExecutorThreadCount));
try (
ThreadPoolMergeScheduler threadPoolMergeScheduler = new ThreadPoolMergeScheduler(
new ShardId("index", "_na_", 1),
IndexSettingsModule.newIndexSettings("index", settings),
threadPoolMergeExecutorService,
merge -> 0,
MergeMetrics.NOOP
)
) {
MergeSource mergeSource = mock(MergeSource.class);
OneMerge firstMerge = mock(OneMerge.class);
when(firstMerge.getStoreMergeInfo()).thenReturn(getNewMergeInfo(randomLongBetween(1L, 10L)));
when(firstMerge.getMergeProgress()).thenReturn(new MergePolicy.OneMergeProgress());
// at least one followup merge + null (i.e. no more followups)
int followUpMergeCount = randomIntBetween(2, 10);
OneMerge[] followUpMerges = new OneMerge[followUpMergeCount];
followUpMerges[followUpMergeCount - 1] = null;
for (int i = 0; i < followUpMergeCount - 1; i++) {
OneMerge oneMerge = mock(OneMerge.class);
when(oneMerge.getStoreMergeInfo()).thenReturn(getNewMergeInfo(randomLongBetween(1L, 10L)));
when(oneMerge.getMergeProgress()).thenReturn(new MergePolicy.OneMergeProgress());
followUpMerges[i] = oneMerge;
}
// the merge source with follow-up merges
when(mergeSource.getNextMerge()).thenReturn(firstMerge, followUpMerges);
AtomicBoolean isMergeInProgress = new AtomicBoolean();
AtomicInteger runMergeIdx = new AtomicInteger();
Semaphore runMergeSemaphore = new Semaphore(0);
Semaphore nextMergeSemaphore = new Semaphore(0);
doAnswer(invocation -> {
// assert only one merge can be in-progress at any point-in-time
assertTrue(isMergeInProgress.compareAndSet(false, true));
OneMerge mergeInvocation = (OneMerge) invocation.getArguments()[0];
assertFalse(mergeInvocation.isAborted());
// assert merges run in the order they are produced by the merge source
if (runMergeIdx.get() == 0) {
assertThat(mergeInvocation, is(firstMerge));
} else {
assertThat(mergeInvocation, is(followUpMerges[runMergeIdx.get() - 1]));
}
runMergeIdx.incrementAndGet();
// await before returning from the merge in order to really ensure that follow-up merges don't run concurrently
nextMergeSemaphore.release();
runMergeSemaphore.acquire();
assertTrue(isMergeInProgress.compareAndSet(true, false));
return null;
}).when(mergeSource).merge(any(OneMerge.class));
// trigger run merges on the merge source
threadPoolMergeScheduler.merge(mergeSource, randomFrom(MergeTrigger.values()));
boolean done = false;
while (done == false) {
// let merges run, but wait for the in-progress one to signal it is running
nextMergeSemaphore.acquire();
done = runMergeIdx.get() >= followUpMergeCount;
runMergeSemaphore.release();
}
assertBusy(() -> assertTrue(threadPoolMergeExecutorService.allDone()));
}
}
}
public void testMergesRunConcurrently() throws Exception {
// min 2 allowed concurrent merges, per scheduler
int mergeSchedulerMaxThreadCount = randomIntBetween(2, 4);
// the merge executor has at least 1 extra thread available
int mergeExecutorThreadCount = mergeSchedulerMaxThreadCount + randomIntBetween(1, 3);
Settings settings = Settings.builder()
.put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), mergeExecutorThreadCount)
.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), mergeSchedulerMaxThreadCount)
// disable fs available disk space feature for this test
.put(ThreadPoolMergeExecutorService.INDICES_MERGE_DISK_CHECK_INTERVAL_SETTING.getKey(), "0s")
.build();
var mergeMetrics = mock(MergeMetrics.class);
nodeEnvironment = newNodeEnvironment(settings);
try (TestThreadPool testThreadPool = new TestThreadPool("test", settings)) {
ThreadPoolMergeExecutorService threadPoolMergeExecutorService = ThreadPoolMergeExecutorServiceTests
.getThreadPoolMergeExecutorService(testThreadPool, settings, nodeEnvironment);
assertThat(threadPoolMergeExecutorService.getMaxConcurrentMerges(), equalTo(mergeExecutorThreadCount));
ThreadPoolExecutor threadPoolExecutor = (ThreadPoolExecutor) testThreadPool.executor(ThreadPool.Names.MERGE);
try (
ThreadPoolMergeScheduler threadPoolMergeScheduler = new ThreadPoolMergeScheduler(
new ShardId("index", "_na_", 1),
IndexSettingsModule.newIndexSettings("index", settings),
threadPoolMergeExecutorService,
merge -> 0,
mergeMetrics
)
) {
// at least 1 extra merge than there are concurrently allowed
int mergeCount = mergeExecutorThreadCount + randomIntBetween(1, 10);
Semaphore runMergeSemaphore = new Semaphore(0);
for (int i = 0; i < mergeCount; i++) {
MergeSource mergeSource = mock(MergeSource.class);
OneMerge oneMerge = mock(OneMerge.class);
when(oneMerge.getStoreMergeInfo()).thenReturn(getNewMergeInfo(randomLongBetween(1L, 10L)));
when(oneMerge.getMergeProgress()).thenReturn(new MergePolicy.OneMergeProgress());
when(mergeSource.getNextMerge()).thenReturn(oneMerge, (OneMerge) null);
doAnswer(invocation -> {
OneMerge merge = (OneMerge) invocation.getArguments()[0];
assertFalse(merge.isAborted());
// wait to be signalled before completing
runMergeSemaphore.acquire();
return null;
}).when(mergeSource).merge(any(OneMerge.class));
threadPoolMergeScheduler.merge(mergeSource, randomFrom(MergeTrigger.values()));
// verify queued byte metric is recorded for each merge
verify(mergeMetrics, times(i + 1)).incrementQueuedMergeBytes(any(), anyLong());
}
for (int completedMergesCount = 0; completedMergesCount < mergeCount
- mergeSchedulerMaxThreadCount; completedMergesCount++) {
int finalCompletedMergesCount = completedMergesCount;
assertBusy(() -> {
// assert that there are merges running concurrently at the max allowed concurrency rate
assertThat(threadPoolMergeScheduler.getRunningMergeTasks().size(), is(mergeSchedulerMaxThreadCount));
// with the other merges backlogged
assertThat(
threadPoolMergeScheduler.getBackloggedMergeTasks().size(),
is(mergeCount - mergeSchedulerMaxThreadCount - finalCompletedMergesCount)
);
// also check the same for the thread-pool executor
assertThat(threadPoolMergeExecutorService.getRunningMergeTasks().size(), is(mergeSchedulerMaxThreadCount));
// queued merge tasks do not include backlogged merges
assertThat(threadPoolMergeExecutorService.getMergeTasksQueueLength(), is(0));
// also check thread-pool stats for the same
// there are active thread-pool threads waiting for the backlogged merge tasks to be re-enqueued
int activeMergeThreads = Math.min(mergeCount - finalCompletedMergesCount, mergeExecutorThreadCount);
assertThat(threadPoolExecutor.getActiveCount(), is(activeMergeThreads));
assertThat(threadPoolExecutor.getQueue().size(), is(mergeCount - finalCompletedMergesCount - activeMergeThreads));
});
// let one merge task finish running
runMergeSemaphore.release();
}
// there are now fewer merges still running than available threads
for (int remainingMergesCount = mergeSchedulerMaxThreadCount; remainingMergesCount >= 0; remainingMergesCount--) {
int finalRemainingMergesCount = remainingMergesCount;
assertBusy(() -> {
// there are fewer available merges than available threads
assertThat(threadPoolMergeScheduler.getRunningMergeTasks().size(), is(finalRemainingMergesCount));
// no more backlogged merges
assertThat(threadPoolMergeScheduler.getBackloggedMergeTasks().size(), is(0));
// also check thread-pool executor for the same
assertThat(threadPoolMergeExecutorService.getRunningMergeTasks().size(), is(finalRemainingMergesCount));
// no more backlogged merges
assertThat(threadPoolMergeExecutorService.getMergeTasksQueueLength(), is(0));
// also check thread-pool stats for the same
assertThat(threadPoolExecutor.getActiveCount(), is(finalRemainingMergesCount));
assertThat(threadPoolExecutor.getQueue().size(), is(0));
});
// let one merge task finish running
runMergeSemaphore.release();
}
assertBusy(() -> assertTrue(threadPoolMergeExecutorService.allDone()));
// verify metrics are recorded for each merge
verify(mergeMetrics, times(mergeCount)).moveQueuedMergeBytesToRunning(any(), anyLong());
verify(mergeMetrics, times(mergeCount)).decrementRunningMergeBytes(any());
verify(mergeMetrics, times(mergeCount)).markMergeMetrics(any(), anyLong(), anyLong());
}
}
}
public void testSchedulerCloseWaitsForRunningMerge() throws Exception {
int mergeSchedulerMaxThreadCount = randomIntBetween(1, 3);
int mergeExecutorThreadCount = randomIntBetween(1, 3);
Settings settings = Settings.builder()
.put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), mergeExecutorThreadCount)
.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), mergeSchedulerMaxThreadCount)
// disable fs available disk space feature for this test
.put(ThreadPoolMergeExecutorService.INDICES_MERGE_DISK_CHECK_INTERVAL_SETTING.getKey(), "0s")
.build();
nodeEnvironment = newNodeEnvironment(settings);
try (TestThreadPool testThreadPool = new TestThreadPool("test", settings)) {
ThreadPoolMergeExecutorService threadPoolMergeExecutorService = ThreadPoolMergeExecutorServiceTests
.getThreadPoolMergeExecutorService(testThreadPool, settings, nodeEnvironment);
assertThat(threadPoolMergeExecutorService.getMaxConcurrentMerges(), equalTo(mergeExecutorThreadCount));
try (
ThreadPoolMergeScheduler threadPoolMergeScheduler = new ThreadPoolMergeScheduler(
new ShardId("index", "_na_", 1),
IndexSettingsModule.newIndexSettings("index", settings),
threadPoolMergeExecutorService,
merge -> 0,
MergeMetrics.NOOP
)
) {
CountDownLatch mergeDoneLatch = new CountDownLatch(1);
CountDownLatch mergeRunningLatch = new CountDownLatch(1);
MergeSource mergeSource = mock(MergeSource.class);
OneMerge oneMerge = mock(OneMerge.class);
when(oneMerge.getStoreMergeInfo()).thenReturn(getNewMergeInfo(randomLongBetween(1L, 10L)));
when(oneMerge.getMergeProgress()).thenReturn(new MergePolicy.OneMergeProgress());
when(mergeSource.getNextMerge()).thenReturn(oneMerge, (OneMerge) null);
doAnswer(invocation -> {
mergeRunningLatch.countDown();
OneMerge merge = (OneMerge) invocation.getArguments()[0];
assertFalse(merge.isAborted());
// wait to be signalled before completing the merge
mergeDoneLatch.await();
return null;
}).when(mergeSource).merge(any(OneMerge.class));
// submit the merge
threadPoolMergeScheduler.merge(mergeSource, randomFrom(MergeTrigger.values()));
Thread t = new Thread(() -> {
try {
threadPoolMergeScheduler.close();
} catch (IOException e) {
fail(e);
}
});
// test expects that there definitely is a running merge before closing the merge scheduler
mergeRunningLatch.await();
// closes the merge scheduler
t.start();
try {
assertTrue(t.isAlive());
// ensure the merge scheduler is effectively "closed"
assertBusy(() -> {
MergeSource mergeSource2 = mock(MergeSource.class);
threadPoolMergeScheduler.merge(mergeSource2, randomFrom(MergeTrigger.values()));
// when the merge scheduler is closed it won't pull in any new merges from the merge source
verifyNoInteractions(mergeSource2);
});
// assert the merge still shows up as "running"
assertThat(threadPoolMergeScheduler.getRunningMergeTasks().keySet(), contains(oneMerge));
assertThat(threadPoolMergeScheduler.getBackloggedMergeTasks().size(), is(0));
assertTrue(t.isAlive());
// signal the merge to finish
mergeDoneLatch.countDown();
} finally {
t.join();
}
assertBusy(() -> {
assertThat(threadPoolMergeScheduler.getRunningMergeTasks().size(), is(0));
assertThat(threadPoolMergeScheduler.getBackloggedMergeTasks().size(), is(0));
assertTrue(threadPoolMergeExecutorService.allDone());
});
}
}
}
public void testAutoIOThrottleForMergeTasksWhenSchedulerDisablesIt() throws Exception {
// merge scheduler configured with auto IO throttle disabled
Settings settings = Settings.builder().put(MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey(), false).build();
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("index", settings);
ThreadPoolMergeExecutorService threadPoolMergeExecutorService = mock(ThreadPoolMergeExecutorService.class);
MergePolicy.OneMergeProgress oneMergeProgress = new MergePolicy.OneMergeProgress();
OneMerge oneMerge = mock(OneMerge.class);
when(oneMerge.getStoreMergeInfo()).thenReturn(getNewMergeInfo(randomNonNegativeLong()));
when(oneMerge.getMergeProgress()).thenReturn(oneMergeProgress);
MergeSource mergeSource = mock(MergeSource.class);
when(mergeSource.getNextMerge()).thenReturn(oneMerge);
try (
ThreadPoolMergeScheduler threadPoolMergeScheduler = new ThreadPoolMergeScheduler(
new ShardId("index", "_na_", 1),
indexSettings,
threadPoolMergeExecutorService,
merge -> 0,
MergeMetrics.NOOP
)
) {
threadPoolMergeScheduler.merge(mergeSource, randomFrom(MergeTrigger.values()));
var submittedMergeTaskCaptor = ArgumentCaptor.forClass(MergeTask.class);
verify(threadPoolMergeExecutorService).submitMergeTask(submittedMergeTaskCaptor.capture());
assertFalse(submittedMergeTaskCaptor.getValue().supportsIOThrottling());
}
}
public void testAutoIOThrottleForMergeTasks() throws Exception {
final Settings.Builder settingsBuilder = Settings.builder();
// merge scheduler configured with auto IO throttle enabled
if (randomBoolean()) {
settingsBuilder.put(MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey(), true);
}
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("index", settingsBuilder.build());
MergePolicy.OneMergeProgress oneMergeProgress = new MergePolicy.OneMergeProgress();
OneMerge oneMerge = mock(OneMerge.class);
// forced merge with a set number of segments
when(oneMerge.getStoreMergeInfo()).thenReturn(getNewMergeInfo(randomNonNegativeLong(), randomNonNegativeInt()));
when(oneMerge.getMergeProgress()).thenReturn(oneMergeProgress);
MergeSource mergeSource = mock(MergeSource.class);
when(mergeSource.getNextMerge()).thenReturn(oneMerge);
ThreadPoolMergeExecutorService threadPoolMergeExecutorService = mock(ThreadPoolMergeExecutorService.class);
try (
ThreadPoolMergeScheduler threadPoolMergeScheduler = new ThreadPoolMergeScheduler(
new ShardId("index", "_na_", 1),
indexSettings,
threadPoolMergeExecutorService,
merge -> 0,
MergeMetrics.NOOP
)
) {
threadPoolMergeScheduler.merge(mergeSource, randomFrom(MergeTrigger.values()));
var submittedMergeTaskCaptor = ArgumentCaptor.forClass(MergeTask.class);
verify(threadPoolMergeExecutorService).submitMergeTask(submittedMergeTaskCaptor.capture());
// forced merge tasks should not be IO throttled
assertFalse(submittedMergeTaskCaptor.getValue().supportsIOThrottling());
}
// NOT a forced merge
when(oneMerge.getStoreMergeInfo()).thenReturn(getNewMergeInfo(randomNonNegativeLong(), -1));
threadPoolMergeExecutorService = mock(ThreadPoolMergeExecutorService.class);
try (
ThreadPoolMergeScheduler threadPoolMergeScheduler = new ThreadPoolMergeScheduler(
new ShardId("index", "_na_", 1),
indexSettings,
threadPoolMergeExecutorService,
merge -> 0,
MergeMetrics.NOOP
)
) {
// merge submitted upon closing
threadPoolMergeScheduler.merge(mergeSource, MergeTrigger.CLOSING);
var submittedMergeTaskCaptor = ArgumentCaptor.forClass(MergeTask.class);
verify(threadPoolMergeExecutorService).submitMergeTask(submittedMergeTaskCaptor.capture());
// merge tasks submitted when closing should not be IO throttled
assertFalse(submittedMergeTaskCaptor.getValue().supportsIOThrottling());
}
// otherwise, merge tasks should be auto IO throttled
threadPoolMergeExecutorService = mock(ThreadPoolMergeExecutorService.class);
try (
ThreadPoolMergeScheduler threadPoolMergeScheduler = new ThreadPoolMergeScheduler(
new ShardId("index", "_na_", 1),
indexSettings,
threadPoolMergeExecutorService,
merge -> 0,
MergeMetrics.NOOP
)
) {
// merge submitted upon closing
threadPoolMergeScheduler.merge(
mergeSource,
randomValueOtherThan(MergeTrigger.CLOSING, () -> randomFrom(MergeTrigger.values()))
);
var submittedMergeTaskCaptor = ArgumentCaptor.forClass(MergeTask.class);
verify(threadPoolMergeExecutorService).submitMergeTask(submittedMergeTaskCaptor.capture());
// merge tasks should be auto IO throttled
assertTrue(submittedMergeTaskCaptor.getValue().supportsIOThrottling());
}
}
public void testMergeSchedulerAbortsMergeWhenShouldSkipMergeIsTrue() throws IOException {
DeterministicTaskQueue threadPoolTaskQueue = new DeterministicTaskQueue();
Settings settings = Settings.builder()
// disable fs available disk space feature for this test
.put(ThreadPoolMergeExecutorService.INDICES_MERGE_DISK_CHECK_INTERVAL_SETTING.getKey(), "0s")
.build();
nodeEnvironment = newNodeEnvironment(settings);
ThreadPoolMergeExecutorService threadPoolMergeExecutorService = ThreadPoolMergeExecutorServiceTests
.getThreadPoolMergeExecutorService(threadPoolTaskQueue.getThreadPool(), settings, nodeEnvironment);
var mergeMetrics = mock(MergeMetrics.class);
try (
// build a scheduler that always returns true for shouldSkipMerge
ThreadPoolMergeScheduler threadPoolMergeScheduler = new ThreadPoolMergeScheduler(
new ShardId("index", "_na_", 1),
IndexSettingsModule.newIndexSettings("index", Settings.EMPTY),
threadPoolMergeExecutorService,
merge -> 0,
mergeMetrics
) {
@Override
protected boolean shouldSkipMerge() {
return true;
}
}
) {
int mergeCount = randomIntBetween(2, 10);
for (int i = 0; i < mergeCount; i++) {
MergeSource mergeSource = mock(MergeSource.class);
OneMerge oneMerge = mock(OneMerge.class);
when(oneMerge.getStoreMergeInfo()).thenReturn(getNewMergeInfo(randomLongBetween(1L, 10L)));
when(oneMerge.getMergeProgress()).thenReturn(new MergePolicy.OneMergeProgress());
when(mergeSource.getNextMerge()).thenReturn(oneMerge, (OneMerge) null);
// create the merge task
MergeTask mergeTask = threadPoolMergeScheduler.newMergeTask(mergeSource, oneMerge, randomFrom(MergeTrigger.values()));
// verify that calling schedule on the merge task indicates the merge should be aborted
Schedule schedule = threadPoolMergeScheduler.schedule(mergeTask);
assertThat(schedule, is(Schedule.ABORT));
// run the merge through the scheduler
threadPoolMergeScheduler.merge(mergeSource, randomFrom(MergeTrigger.values()));
// verify queued merge byte metrics are still recorded for each merge
verify(mergeMetrics, times(i + 1)).incrementQueuedMergeBytes(any(), anyLong());
}
// run all merges; they should all be aborted
threadPoolTaskQueue.runAllTasks();
// verify queued bytes metrics are moved to running and decremented
verify(mergeMetrics, times(mergeCount)).moveQueuedMergeBytesToRunning(any(), anyLong());
verify(mergeMetrics, times(mergeCount)).decrementRunningMergeBytes(any());
// verify we did not mark the merges as merged
verify(mergeMetrics, times(0)).markMergeMetrics(any(), anyLong(), anyLong());
}
}
private static MergeInfo getNewMergeInfo(long estimatedMergeBytes) {
return getNewMergeInfo(estimatedMergeBytes, randomFrom(-1, randomNonNegativeInt()));
}
private static MergeInfo getNewMergeInfo(long estimatedMergeBytes, int maxNumSegments) {
return new MergeInfo(randomNonNegativeInt(), estimatedMergeBytes, randomBoolean(), maxNumSegments);
}
static | ThreadPoolMergeSchedulerTests |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlCompleteLogoutRequest.java | {
"start": 774,
"end": 2949
} | class ____ extends LegacyActionRequest {
@Nullable
private String queryString;
@Nullable
private String content;
private List<String> validRequestIds;
private String realm;
public SamlCompleteLogoutRequest(StreamInput in) throws IOException {
super(in);
}
public SamlCompleteLogoutRequest() {}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (Strings.hasText(realm) == false) {
validationException = addValidationError("realm may not be empty", validationException);
}
if (Strings.hasText(queryString) == false && Strings.hasText(content) == false) {
validationException = addValidationError("query_string and content may not both be empty", validationException);
}
if (Strings.hasText(queryString) && Strings.hasText(content)) {
validationException = addValidationError("query_string and content may not both present", validationException);
}
return validationException;
}
public String getQueryString() {
return queryString;
}
public void setQueryString(String queryString) {
if (this.queryString == null) {
this.queryString = queryString;
} else {
throw new IllegalArgumentException("Must use either [query_string] or [queryString], not both at the same time");
}
}
public String getContent() {
return content;
}
public void setContent(String content) {
this.content = content;
}
public List<String> getValidRequestIds() {
return validRequestIds;
}
public void setValidRequestIds(List<String> validRequestIds) {
this.validRequestIds = validRequestIds;
}
public String getRealm() {
return realm;
}
public void setRealm(String realm) {
this.realm = realm;
}
public boolean isHttpRedirect() {
return queryString != null;
}
public String getPayload() {
return isHttpRedirect() ? queryString : content;
}
}
| SamlCompleteLogoutRequest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/filter/subclass/joined2/Animal.java | {
"start": 575,
"end": 720
} | class ____ {
@Id
@Column(name = "id_animal")
private int id;
private String name;
@Column(name = "id_company")
private long company;
}
| Animal |
java | netty__netty | microbench/src/main/java/io/netty/buffer/AbstractByteBufGetCharSequenceBenchmark.java | {
"start": 1222,
"end": 1412
} | class ____ extends AbstractMicrobenchmark {
private static final AdaptiveByteBufAllocator ADAPTIVE_ALLOC = new AdaptiveByteBufAllocator();
public | AbstractByteBufGetCharSequenceBenchmark |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/type/descriptor/java/SerializableJavaType.java | {
"start": 1095,
"end": 4606
} | class ____<S extends Serializable> extends MutableMutabilityPlan<S> {
public static final SerializableMutabilityPlan<Serializable> INSTANCE = new SerializableMutabilityPlan<>();
private SerializableMutabilityPlan() {
}
@Override
@SuppressWarnings("unchecked")
public S deepCopyNotNull(S value) {
return (S) SerializationHelper.clone( value );
}
}
public SerializableJavaType(Class<T> type) {
this( type, createMutabilityPlan( type ) );
}
public SerializableJavaType(Class<T> type, MutabilityPlan<T> mutabilityPlan) {
super( type, mutabilityPlan == null ? createMutabilityPlan( type ) : mutabilityPlan );
}
private static <T> MutabilityPlan<T> createMutabilityPlan(Class<T> type) {
if ( type.isAnnotationPresent( Immutable.class ) ) {
return ImmutableMutabilityPlan.instance();
}
return (MutabilityPlan<T>) SerializableMutabilityPlan.INSTANCE;
}
@Override
public boolean isInstance(Object value) {
return value instanceof Serializable;
}
@Override
public JdbcType getRecommendedJdbcType(JdbcTypeIndicators indicators) {
final int typeCode = indicators.isLob() ? Types.BLOB : Types.VARBINARY;
return indicators.getJdbcType( typeCode );
}
public String toString(T value) {
return PrimitiveByteArrayJavaType.INSTANCE.toString( toBytes( value ) );
}
public T fromString(CharSequence string) {
return fromBytes( PrimitiveByteArrayJavaType.INSTANCE.fromString( string ) );
}
@Override
public boolean areEqual(T one, T another) {
if ( one == another ) {
return true;
}
else if ( one == null || another == null ) {
return false;
}
else {
return one.equals( another )
|| Arrays.equals( toBytes( one ), toBytes( another ) );
}
}
@Override
public int extractHashCode(T value) {
return PrimitiveByteArrayJavaType.INSTANCE.extractHashCode( toBytes( value ) );
}
@SuppressWarnings("unchecked")
public <X> X unwrap(T value, Class<X> type, WrapperOptions options) {
if ( value == null ) {
return null;
}
else if ( type.isInstance( value ) ) {
return (X) value;
}
else if ( byte[].class.isAssignableFrom( type ) ) {
return (X) toBytes( value );
}
else if ( InputStream.class.isAssignableFrom( type ) ) {
return (X) new ByteArrayInputStream( toBytes( value ) );
}
else if ( BinaryStream.class.isAssignableFrom( type ) ) {
return (X) new ArrayBackedBinaryStream( toBytes( value ) );
}
else if ( Blob.class.isAssignableFrom( type ) ) {
return (X) options.getLobCreator().createBlob( toBytes( value ) );
}
throw unknownUnwrap( type );
}
@SuppressWarnings("unchecked")
public <X> T wrap(X value, WrapperOptions options) {
if ( value == null ) {
return null;
}
else if (value instanceof byte[] bytes) {
return fromBytes( bytes );
}
else if (value instanceof InputStream inputStream) {
return fromBytes( DataHelper.extractBytes( inputStream ) );
}
else if (value instanceof Blob blob) {
try {
return fromBytes( DataHelper.extractBytes( blob.getBinaryStream() ) );
}
catch ( SQLException e ) {
throw new HibernateException( e );
}
}
else if ( getJavaTypeClass().isInstance( value ) ) {
return (T) value;
}
throw unknownWrap( value.getClass() );
}
protected byte[] toBytes(T value) {
return SerializationHelper.serialize( value );
}
@SuppressWarnings("unchecked")
protected T fromBytes(byte[] bytes) {
return (T) SerializationHelper.deserialize( bytes, getJavaTypeClass().getClassLoader() );
}
}
| SerializableMutabilityPlan |
java | quarkusio__quarkus | independent-projects/qute/debug/src/main/java/io/quarkus/qute/debug/agent/resolvers/ValueResolverRegistry.java | {
"start": 211,
"end": 426
} | class ____ which collector to use for a given {@link ValueResolver}
* and provides methods to fill a {@link ValueResolverContext} with available
* properties and methods from Qute templates.
* </p>
*/
public | manages |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/access/NestedEmbeddableDefaultAccessTests.java | {
"start": 3960,
"end": 4201
} | class ____ {
@Convert( converter = SillyConverter.class )
@Column( name = "nested_data" )
private String nestedData;
@Enumerated(EnumType.STRING)
@Column( name = "nested_enum" )
private MyEnum nestedEnum;
}
public | NestedEmbeddable |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/transport/TripleCommandOutBoundHandler.java | {
"start": 1071,
"end": 1485
} | class ____ extends ChannelOutboundHandlerAdapter {
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
if (msg instanceof QueuedCommand) {
QueuedCommand command = (QueuedCommand) msg;
command.send(ctx, promise);
} else {
super.write(ctx, msg, promise);
}
}
}
| TripleCommandOutBoundHandler |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/read/SingletonOrdinalsBuilder.java | {
"start": 1102,
"end": 12142
} | class ____ implements BlockLoader.SingletonOrdinalsBuilder, Releasable, Block.Builder {
private final BlockFactory blockFactory;
private final SortedDocValues docValues;
private int minOrd = Integer.MAX_VALUE;
private int maxOrd = Integer.MIN_VALUE;
private final int[] ords;
private int count;
private final boolean isDense;
public SingletonOrdinalsBuilder(BlockFactory blockFactory, SortedDocValues docValues, int count, boolean isDense) {
this.blockFactory = blockFactory;
this.docValues = docValues;
blockFactory.adjustBreaker(ordsSize(count));
this.ords = new int[count];
this.isDense = isDense;
}
@Override
public SingletonOrdinalsBuilder appendNull() {
assert isDense == false;
ords[count++] = -1; // real ords can't be < 0, so we use -1 as null
return this;
}
@Override
public SingletonOrdinalsBuilder appendOrd(int ord) {
ords[count++] = ord;
minOrd = Math.min(minOrd, ord);
maxOrd = Math.max(maxOrd, ord);
return this;
}
@Override
public BlockLoader.SingletonOrdinalsBuilder appendOrds(int[] values, int from, int length, int minOrd, int maxOrd) {
System.arraycopy(values, from, ords, count, length);
this.count += length;
this.minOrd = Math.min(this.minOrd, minOrd);
this.maxOrd = Math.max(this.maxOrd, maxOrd);
return this;
}
@Override
public BlockLoader.SingletonOrdinalsBuilder appendOrds(int ord, int length) {
Arrays.fill(ords, count, count + length, ord);
this.minOrd = Math.min(this.minOrd, ord);
this.maxOrd = Math.max(this.maxOrd, ord);
this.count += length;
return this;
}
@Override
public SingletonOrdinalsBuilder beginPositionEntry() {
throw new UnsupportedOperationException("should only have one value per doc");
}
@Override
public SingletonOrdinalsBuilder endPositionEntry() {
throw new UnsupportedOperationException("should only have one value per doc");
}
private BytesRefBlock tryBuildConstantBlock() {
if (minOrd != maxOrd) {
return null;
}
if (isDense == false) {
for (int ord : ords) {
if (ord == -1) {
return null;
}
}
}
final BytesRef v;
try {
v = BytesRef.deepCopyOf(docValues.lookupOrd(minOrd));
} catch (IOException e) {
throw new UncheckedIOException("failed to lookup ordinals", e);
}
BytesRefVector bytes = null;
IntVector ordinals = null;
boolean success = false;
try {
bytes = blockFactory.newConstantBytesRefVector(v, 1);
ordinals = blockFactory.newConstantIntVector(0, ords.length);
// Ideally, we would return a ConstantBytesRefVector, but we return an ordinal constant block instead
// to ensure ordinal optimizations are applied when constant optimization is not available.
final var result = new OrdinalBytesRefBlock(ordinals.asBlock(), bytes);
success = true;
return result;
} finally {
if (success == false) {
Releasables.close(bytes, ordinals);
}
}
}
BytesRefBlock buildOrdinal() {
int valueCount = maxOrd - minOrd + 1;
long breakerSize = ordsSize(valueCount);
blockFactory.adjustBreaker(breakerSize);
BytesRefVector bytesVector = null;
IntBlock ordinalBlock = null;
try {
int[] newOrds = new int[valueCount];
Arrays.fill(newOrds, -1);
// Re-mapping ordinals to be more space-efficient:
if (isDense) {
for (int ord : ords) {
newOrds[ord - minOrd] = 0;
}
} else {
for (int ord : ords) {
if (ord != -1) {
newOrds[ord - minOrd] = 0;
}
}
}
// resolve the ordinals and remaps the ordinals
try {
int nextOrd = -1;
BytesRef firstTerm = minOrd != Integer.MAX_VALUE ? docValues.lookupOrd(minOrd) : null;
int estimatedSize;
if (firstTerm != null) {
estimatedSize = Math.min(valueCount, ords.length) * firstTerm.length;
} else {
estimatedSize = Math.min(valueCount, ords.length);
}
try (BytesRefVector.Builder bytesBuilder = blockFactory.newBytesRefVectorBuilder(estimatedSize)) {
if (firstTerm != null) {
newOrds[0] = ++nextOrd;
bytesBuilder.appendBytesRef(firstTerm);
}
for (int i = firstTerm != null ? 1 : 0; i < newOrds.length; i++) {
if (newOrds[i] != -1) {
newOrds[i] = ++nextOrd;
bytesBuilder.appendBytesRef(docValues.lookupOrd(i + minOrd));
}
}
bytesVector = bytesBuilder.build();
}
} catch (IOException e) {
throw new UncheckedIOException("error resolving ordinals", e);
}
if (isDense) {
// Reusing ords array and overwrite all slots with re-mapped ordinals
for (int i = 0; i < ords.length; i++) {
ords[i] = newOrds[ords[i] - minOrd];
}
ordinalBlock = blockFactory.newIntArrayVector(ords, ords.length).asBlock();
} else {
try (IntBlock.Builder ordinalsBuilder = blockFactory.newIntBlockBuilder(ords.length)) {
for (int ord : ords) {
if (ord == -1) {
ordinalsBuilder.appendNull();
} else {
ordinalsBuilder.appendInt(newOrds[ord - minOrd]);
}
}
ordinalBlock = ordinalsBuilder.build();
}
}
final OrdinalBytesRefBlock result = new OrdinalBytesRefBlock(ordinalBlock, bytesVector);
bytesVector = null;
ordinalBlock = null;
return result;
} finally {
Releasables.close(() -> blockFactory.adjustBreaker(-breakerSize), ordinalBlock, bytesVector);
}
}
BytesRefBlock buildRegularBlock() {
try {
long breakerSize = ordsSize(ords.length);
// Increment breaker for sorted ords.
blockFactory.adjustBreaker(breakerSize);
try {
int[] sortedOrds = ords.clone();
int uniqueCount = compactToUnique(sortedOrds);
try (BreakingBytesRefBuilder copies = new BreakingBytesRefBuilder(blockFactory.breaker(), "ords")) {
long offsetsAndLength = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (uniqueCount + 1) * Integer.BYTES;
blockFactory.adjustBreaker(offsetsAndLength);
breakerSize += offsetsAndLength;
int[] offsets = new int[uniqueCount + 1];
for (int o = 0; o < uniqueCount; o++) {
BytesRef v = docValues.lookupOrd(sortedOrds[o]);
offsets[o] = copies.length();
copies.append(v);
}
offsets[uniqueCount] = copies.length();
/*
* It'd be better if BytesRefBlock could run off of a deduplicated list of
* blocks. It can't at the moment. So we copy many times.
*/
BytesRef scratch = new BytesRef();
scratch.bytes = copies.bytes();
try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(ords.length)) {
for (int i = 0; i < ords.length; i++) {
if (ords[i] == -1) {
builder.appendNull();
continue;
}
int o = Arrays.binarySearch(sortedOrds, 0, uniqueCount, ords[i]);
assert 0 <= o && o < uniqueCount;
scratch.offset = offsets[o];
scratch.length = offsets[o + 1] - scratch.offset;
builder.appendBytesRef(scratch);
}
return builder.build();
}
}
} finally {
blockFactory.adjustBreaker(-breakerSize);
}
} catch (IOException e) {
throw new UncheckedIOException("error resolving ordinals", e);
}
}
@Override
public long estimatedBytes() {
/*
* This is a *terrible* estimate because we have no idea how big the
* values in the ordinals are.
*/
long overhead = shouldBuildOrdinalsBlock() ? 5 : 20;
return ords.length * overhead;
}
@Override
public BytesRefBlock build() {
if (count != ords.length) {
assert false : "expected " + ords.length + " values but got " + count;
throw new IllegalStateException("expected " + ords.length + " values but got " + count);
}
var constantBlock = tryBuildConstantBlock();
if (constantBlock != null) {
return constantBlock;
}
return shouldBuildOrdinalsBlock() ? buildOrdinal() : buildRegularBlock();
}
boolean shouldBuildOrdinalsBlock() {
if (minOrd <= maxOrd) {
int numOrds = maxOrd - minOrd + 1;
return OrdinalBytesRefBlock.isDense(ords.length, numOrds);
} else {
return false;
}
}
@Override
public void close() {
blockFactory.adjustBreaker(-ordsSize(ords.length));
}
@Override
public Block.Builder copyFrom(Block block, int beginInclusive, int endExclusive) {
throw new UnsupportedOperationException();
}
@Override
public Block.Builder mvOrdering(Block.MvOrdering mvOrdering) {
throw new UnsupportedOperationException();
}
private static long ordsSize(int ordsCount) {
return RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + ordsCount * Integer.BYTES;
}
static int compactToUnique(int[] sortedOrds) {
Arrays.sort(sortedOrds);
int uniqueSize = 0;
int prev = -1;
for (int i = 0; i < sortedOrds.length; i++) {
if (sortedOrds[i] != prev) {
sortedOrds[uniqueSize++] = prev = sortedOrds[i];
}
}
return uniqueSize;
}
}
| SingletonOrdinalsBuilder |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/junitrunner/DeepStubbingWithJUnitRunnerTest.java | {
"start": 356,
"end": 750
} | class ____ {
private final SomeClass someClass = new SomeClass();
@Mock(answer = Answers.RETURNS_DEEP_STUBS)
private Root root;
@Test
public void deep_stubs_dont_trigger_unnecessary_stubbing_exception() {
// when
someClass.someMethod(root);
// then unnecessary stubbing exception is not thrown
}
public static | DeepStubbingWithJUnitRunnerTest |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/client/runtime/src/main/java/org/jboss/resteasy/reactive/client/spi/MissingMessageBodyReaderErrorMessageContextualizer.java | {
"start": 475,
"end": 596
} | class ____ not able to provide any useful context information.
*/
String provideContextMessage(Input input);
| is |
java | spring-projects__spring-framework | spring-web/src/test/java/org/springframework/web/method/annotation/SessionAttributesHandlerTests.java | {
"start": 1449,
"end": 4601
} | class ____ {
private final SessionAttributeStore sessionAttributeStore = new DefaultSessionAttributeStore();
private final SessionAttributesHandler sessionAttributesHandler =
new SessionAttributesHandler(TestSessionAttributesHolder.class, sessionAttributeStore);
private final NativeWebRequest request = new ServletWebRequest(new MockHttpServletRequest());
@Test
void isSessionAttribute() {
assertThat(sessionAttributesHandler.isHandlerSessionAttribute("attr1", String.class)).isTrue();
assertThat(sessionAttributesHandler.isHandlerSessionAttribute("attr2", String.class)).isTrue();
assertThat(sessionAttributesHandler.isHandlerSessionAttribute("simple", TestBean.class)).isTrue();
assertThat(sessionAttributesHandler.isHandlerSessionAttribute("simple", String.class)).isFalse();
}
@Test
void retrieveAttributes() {
sessionAttributeStore.storeAttribute(request, "attr1", "value1");
sessionAttributeStore.storeAttribute(request, "attr2", "value2");
sessionAttributeStore.storeAttribute(request, "attr3", new TestBean());
sessionAttributeStore.storeAttribute(request, "attr4", new TestBean());
assertThat(sessionAttributesHandler.retrieveAttributes(request).keySet())
.as("Named attributes (attr1, attr2) should be 'known' right away")
.isEqualTo(new HashSet<>(asList("attr1", "attr2")));
// Resolve 'attr3' by type
sessionAttributesHandler.isHandlerSessionAttribute("attr3", TestBean.class);
assertThat(sessionAttributesHandler.retrieveAttributes(request).keySet())
.as("Named attributes (attr1, attr2) and resolved attribute (attr3) should be 'known'")
.isEqualTo(new HashSet<>(asList("attr1", "attr2", "attr3")));
}
@Test
void cleanupAttributes() {
sessionAttributeStore.storeAttribute(request, "attr1", "value1");
sessionAttributeStore.storeAttribute(request, "attr2", "value2");
sessionAttributeStore.storeAttribute(request, "attr3", new TestBean());
sessionAttributesHandler.cleanupAttributes(request);
assertThat(sessionAttributeStore.retrieveAttribute(request, "attr1")).isNull();
assertThat(sessionAttributeStore.retrieveAttribute(request, "attr2")).isNull();
assertThat(sessionAttributeStore.retrieveAttribute(request, "attr3")).isNotNull();
// Resolve 'attr3' by type
sessionAttributesHandler.isHandlerSessionAttribute("attr3", TestBean.class);
sessionAttributesHandler.cleanupAttributes(request);
assertThat(sessionAttributeStore.retrieveAttribute(request, "attr3")).isNull();
}
@Test
void storeAttributes() {
ModelMap model = new ModelMap();
model.put("attr1", "value1");
model.put("attr2", "value2");
model.put("attr3", new TestBean());
sessionAttributesHandler.storeAttributes(request, model);
assertThat(sessionAttributeStore.retrieveAttribute(request, "attr1")).isEqualTo("value1");
assertThat(sessionAttributeStore.retrieveAttribute(request, "attr2")).isEqualTo("value2");
boolean condition = sessionAttributeStore.retrieveAttribute(request, "attr3") instanceof TestBean;
assertThat(condition).isTrue();
}
@SessionAttributes(names = {"attr1", "attr2"}, types = TestBean.class)
private static | SessionAttributesHandlerTests |
java | elastic__elasticsearch | x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStopILMAction.java | {
"start": 1436,
"end": 3081
} | class ____ extends AcknowledgedTransportMasterNodeAction<StopILMRequest> {
private final ProjectResolver projectResolver;
@Inject
public TransportStopILMAction(
TransportService transportService,
ClusterService clusterService,
ThreadPool threadPool,
ActionFilters actionFilters,
ProjectResolver projectResolver
) {
super(
ILMActions.STOP.name(),
transportService,
clusterService,
threadPool,
actionFilters,
StopILMRequest::new,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
this.projectResolver = projectResolver;
}
@Override
protected void masterOperation(Task task, StopILMRequest request, ClusterState state, ActionListener<AcknowledgedResponse> listener) {
final var projectId = projectResolver.getProjectId();
submitUnbatchedTask(
"ilm_operation_mode_update[stopping]",
OperationModeUpdateTask.wrap(OperationModeUpdateTask.ilmMode(projectId, OperationMode.STOPPING), request, listener)
);
}
@SuppressForbidden(reason = "legacy usage of unbatched task") // TODO add support for batching here
private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String source, ClusterStateUpdateTask task) {
clusterService.submitUnbatchedStateUpdateTask(source, task);
}
@Override
protected ClusterBlockException checkBlock(StopILMRequest request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
}
| TransportStopILMAction |
java | junit-team__junit5 | junit-jupiter-api/src/main/java/org/junit/jupiter/api/extension/ClassTemplateInvocationContextProvider.java | {
"start": 2653,
"end": 3018
} | class ____
* about to be invoked; never {@code null}
* @return {@code true} if this provider can provide invocation contexts
* @see #provideClassTemplateInvocationContexts
* @see ExtensionContext
*/
boolean supportsClassTemplate(ExtensionContext context);
/**
* Provide {@linkplain ClassTemplateInvocationContext invocation contexts}
* for the | template |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/spi/CamelEvent.java | {
"start": 10597,
"end": 10756
} | interface ____ extends RouteEvent {
@Override
default Type getType() {
return Type.RouteStopped;
}
}
| RouteStoppedEvent |
java | apache__flink | flink-kubernetes/src/main/java/org/apache/flink/kubernetes/KubernetesResourceManagerDriver.java | {
"start": 3686,
"end": 19804
} | class ____
extends AbstractResourceManagerDriver<KubernetesWorkerNode> {
/** The taskmanager pod name pattern is {clusterId}-{taskmanager}-{attemptId}-{podIndex}. */
private static final String TASK_MANAGER_POD_FORMAT = "%s-taskmanager-%d-%d";
private static final Long TERMINATION_WAIT_SECOND = 5L;
private final String clusterId;
private final String webInterfaceUrl;
private final FlinkKubeClient flinkKubeClient;
/** Request resource futures, keyed by pod names. */
private final Map<String, CompletableFuture<KubernetesWorkerNode>> requestResourceFutures;
/** When ResourceManager failover, the max attempt should recover. */
private long currentMaxAttemptId = 0;
/** Current max pod index. When creating a new pod, it should increase one. */
private long currentMaxPodId = 0;
private CompletableFuture<KubernetesWatch> podsWatchOptFuture =
FutureUtils.completedExceptionally(
new ResourceManagerException(
"KubernetesResourceManagerDriver is not initialized."));
private volatile boolean running;
private FlinkPod taskManagerPodTemplate;
public KubernetesResourceManagerDriver(
Configuration flinkConfig,
FlinkKubeClient flinkKubeClient,
KubernetesResourceManagerDriverConfiguration configuration) {
super(flinkConfig, GlobalConfiguration.loadConfiguration());
this.clusterId = Preconditions.checkNotNull(configuration.getClusterId());
this.webInterfaceUrl = configuration.getWebInterfaceUrl();
this.flinkKubeClient = Preconditions.checkNotNull(flinkKubeClient);
this.requestResourceFutures = new HashMap<>();
this.running = false;
}
// ------------------------------------------------------------------------
// ResourceManagerDriver
// ------------------------------------------------------------------------
@Override
protected void initializeInternal() throws Exception {
podsWatchOptFuture = watchTaskManagerPods();
final File podTemplateFile = KubernetesUtils.getTaskManagerPodTemplateFileInPod();
if (podTemplateFile.exists()) {
taskManagerPodTemplate =
KubernetesUtils.loadPodFromTemplateFile(
flinkKubeClient, podTemplateFile, Constants.MAIN_CONTAINER_NAME);
} else {
taskManagerPodTemplate = new FlinkPod.Builder().build();
}
updateKubernetesServiceTargetPortIfNecessary();
recoverWorkerNodesFromPreviousAttempts();
this.running = true;
}
@Override
public void terminate() throws Exception {
if (!running) {
return;
}
running = false;
// shut down all components
Exception exception = null;
try {
podsWatchOptFuture.get(TERMINATION_WAIT_SECOND, TimeUnit.SECONDS).close();
} catch (Exception e) {
exception = e;
}
try {
flinkKubeClient.close();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
if (exception != null) {
throw exception;
}
}
@Override
public void deregisterApplication(
ApplicationStatus finalStatus, @Nullable String optionalDiagnostics) {
log.info(
"Deregistering Flink Kubernetes cluster, clusterId: {}, diagnostics: {}",
clusterId,
optionalDiagnostics == null ? "" : optionalDiagnostics);
flinkKubeClient.stopAndCleanupCluster(clusterId);
}
@Override
public CompletableFuture<KubernetesWorkerNode> requestResource(
TaskExecutorProcessSpec taskExecutorProcessSpec) {
final KubernetesTaskManagerParameters parameters =
createKubernetesTaskManagerParameters(
taskExecutorProcessSpec, getBlockedNodeRetriever().getAllBlockedNodeIds());
final KubernetesPod taskManagerPod =
KubernetesTaskManagerFactory.buildTaskManagerKubernetesPod(
taskManagerPodTemplate, parameters);
final String podName = taskManagerPod.getName();
final CompletableFuture<KubernetesWorkerNode> requestResourceFuture =
new CompletableFuture<>();
requestResourceFutures.put(podName, requestResourceFuture);
log.info(
"Creating new TaskManager pod with name {} and resource <{},{}>.",
podName,
parameters.getTaskManagerMemoryMB(),
parameters.getTaskManagerCPU());
final CompletableFuture<Void> createPodFuture =
flinkKubeClient.createTaskManagerPod(taskManagerPod);
FutureUtils.assertNoException(
createPodFuture.handleAsync(
(ignore, exception) -> {
if (exception != null) {
log.warn(
"Could not create pod {}, exception: {}",
podName,
exception);
CompletableFuture<KubernetesWorkerNode> future =
requestResourceFutures.remove(taskManagerPod.getName());
if (future != null) {
future.completeExceptionally(exception);
}
} else {
if (requestResourceFuture.isCancelled()) {
stopPod(podName);
log.info(
"pod {} is cancelled before create pod finish, stop it.",
podName);
} else {
log.info("Pod {} is created.", podName);
}
}
return null;
},
getMainThreadExecutor()));
FutureUtils.assertNoException(
requestResourceFuture.handle(
(ignore, t) -> {
if (t == null) {
return null;
}
// Unwrap CompletionException cause if any
if (t instanceof CompletionException && t.getCause() != null) {
t = t.getCause();
}
if (t instanceof CancellationException) {
requestResourceFutures.remove(taskManagerPod.getName());
if (createPodFuture.isDone()) {
log.info(
"pod {} is cancelled before scheduled, stop it.",
podName);
stopPod(taskManagerPod.getName());
}
} else if (t instanceof RetryableException
|| t instanceof KubernetesClientException) {
// ignore transient / retriable errors
} else {
log.error("Error completing resource request.", t);
ExceptionUtils.rethrow(t);
}
return null;
}));
return requestResourceFuture;
}
@Override
public void releaseResource(KubernetesWorkerNode worker) {
final String podName = worker.getResourceID().toString();
log.info("Stopping TaskManager pod {}.", podName);
stopPod(podName);
}
// ------------------------------------------------------------------------
// Internal
// ------------------------------------------------------------------------
private void recoverWorkerNodesFromPreviousAttempts() throws ResourceManagerException {
List<KubernetesPod> podList =
flinkKubeClient.getPodsWithLabels(
KubernetesUtils.getTaskManagerSelectors(clusterId));
final List<KubernetesWorkerNode> recoveredWorkers = new ArrayList<>();
for (KubernetesPod pod : podList) {
final KubernetesWorkerNode worker =
new KubernetesWorkerNode(new ResourceID(pod.getName()));
final long attempt = worker.getAttempt();
if (attempt > currentMaxAttemptId) {
currentMaxAttemptId = attempt;
}
if (pod.isTerminated() || !pod.isScheduled()) {
stopPod(pod.getName());
} else {
recoveredWorkers.add(worker);
}
}
log.info(
"Recovered {} pods from previous attempts, current attempt id is {}.",
recoveredWorkers.size(),
++currentMaxAttemptId);
getResourceEventHandler().onPreviousAttemptWorkersRecovered(recoveredWorkers);
}
private void updateKubernetesServiceTargetPortIfNecessary() throws Exception {
if (!KubernetesUtils.isHostNetwork(flinkConfig)) {
return;
}
final int restPort =
ResourceManagerUtils.parseRestBindPortFromWebInterfaceUrl(webInterfaceUrl);
Preconditions.checkArgument(
restPort > 0, "Failed to parse rest port from " + webInterfaceUrl);
final String restServiceName = ExternalServiceDecorator.getExternalServiceName(clusterId);
flinkKubeClient
.updateServiceTargetPort(restServiceName, Constants.REST_PORT_NAME, restPort)
.get();
if (!HighAvailabilityMode.isHighAvailabilityModeActivated(flinkConfig)) {
final String internalServiceName =
InternalServiceDecorator.getInternalServiceName(clusterId);
flinkKubeClient
.updateServiceTargetPort(
internalServiceName,
Constants.BLOB_SERVER_PORT_NAME,
Integer.parseInt(flinkConfig.get(BlobServerOptions.PORT)))
.get();
flinkKubeClient
.updateServiceTargetPort(
internalServiceName,
Constants.JOB_MANAGER_RPC_PORT_NAME,
flinkConfig.get(JobManagerOptions.PORT))
.get();
}
}
private KubernetesTaskManagerParameters createKubernetesTaskManagerParameters(
TaskExecutorProcessSpec taskExecutorProcessSpec, Set<String> blockedNodes) {
final String podName =
String.format(
TASK_MANAGER_POD_FORMAT, clusterId, currentMaxAttemptId, ++currentMaxPodId);
final ContaineredTaskManagerParameters taskManagerParameters =
ContaineredTaskManagerParameters.create(flinkConfig, taskExecutorProcessSpec);
final Configuration taskManagerConfig = new Configuration(flinkConfig);
taskManagerConfig.set(TaskManagerOptions.TASK_MANAGER_RESOURCE_ID, podName);
final String dynamicProperties =
BootstrapTools.getDynamicPropertiesAsString(flinkClientConfig, taskManagerConfig);
final String jvmMemOpts =
ProcessMemoryUtils.generateJvmParametersStr(taskExecutorProcessSpec);
return new KubernetesTaskManagerParameters(
flinkConfig,
podName,
dynamicProperties,
jvmMemOpts,
taskManagerParameters,
ExternalResourceUtils.getExternalResourceConfigurationKeys(
flinkConfig,
KubernetesConfigOptions.EXTERNAL_RESOURCE_KUBERNETES_CONFIG_KEY_SUFFIX),
blockedNodes);
}
private void handlePodEventsInMainThread(List<KubernetesPod> pods, PodEvent podEvent) {
getMainThreadExecutor()
.execute(
() -> {
for (KubernetesPod pod : pods) {
// we should also handle the deleted event to avoid situations where
// the pod itself doesn't reflect the status correctly (i.e. pod
// removed during the pending phase).
if (podEvent == PodEvent.DELETED || pod.isTerminated()) {
onPodTerminated(pod);
} else if (pod.isScheduled()) {
onPodScheduled(pod);
}
}
});
}
private void onPodScheduled(KubernetesPod pod) {
final String podName = pod.getName();
final CompletableFuture<KubernetesWorkerNode> requestResourceFuture =
requestResourceFutures.remove(podName);
if (requestResourceFuture == null) {
log.debug("Ignore TaskManager pod that is already added: {}", podName);
return;
}
log.info("Received new TaskManager pod: {}", podName);
requestResourceFuture.complete(new KubernetesWorkerNode(new ResourceID(podName)));
}
private void onPodTerminated(KubernetesPod pod) {
final String podName = pod.getName();
log.debug("TaskManager pod {} is terminated.", podName);
// this is a safe net, in case onModified/onDeleted/onError is
// received before onAdded
final CompletableFuture<KubernetesWorkerNode> requestResourceFuture =
requestResourceFutures.remove(podName);
if (requestResourceFuture != null) {
log.warn("Pod {} is terminated before being scheduled.", podName);
requestResourceFuture.completeExceptionally(
new RetryableException("Pod is terminated."));
}
getResourceEventHandler()
.onWorkerTerminated(new ResourceID(podName), pod.getTerminatedDiagnostics());
stopPod(podName);
}
private void stopPod(String podName) {
flinkKubeClient
.stopPod(podName)
.whenComplete(
(ignore, throwable) -> {
if (throwable != null) {
log.warn(
"Could not remove TaskManager pod {}, exception: {}",
podName,
throwable);
}
});
}
private CompletableFuture<KubernetesWatch> watchTaskManagerPods() throws Exception {
CompletableFuture<KubernetesWatch> kubernetesWatchCompletableFuture =
flinkKubeClient.watchPodsAndDoCallback(
KubernetesUtils.getTaskManagerSelectors(clusterId),
new PodCallbackHandlerImpl());
kubernetesWatchCompletableFuture.whenCompleteAsync(
(KubernetesWatch watch, Throwable throwable) -> {
if (throwable != null) {
getResourceEventHandler().onError(throwable);
} else {
log.info("Create watch on TaskManager pods successfully.");
}
},
getMainThreadExecutor());
return kubernetesWatchCompletableFuture;
}
// ------------------------------------------------------------------------
// FlinkKubeClient.WatchCallbackHandler
// ------------------------------------------------------------------------
private | KubernetesResourceManagerDriver |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/convert/converters/MultiValuesConverterFactory.java | {
"start": 26268,
"end": 26331
} | class ____ which to convert
*/
public abstract static | from |
java | google__dagger | javatests/dagger/functional/assisted/AssistedFactoryDuplicatedParamNamesTest.java | {
"start": 1345,
"end": 1380
} | class ____ {}
@AssistedFactory
| Bar |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java | {
"start": 7816,
"end": 14337
} | class ____ extends MapReduceBase
implements Reducer<WritableComparable, Writable,
WritableComparable, Writable> {
@Override
@SuppressWarnings("deprecation")
public void configure(JobConf job) {
boolean oldConfigs = job.getBoolean(OLD_CONFIGS, false);
if (oldConfigs) {
String javaOpts = job.get(JobConf.MAPRED_TASK_JAVA_OPTS);
assertNotNull(javaOpts, JobConf.MAPRED_TASK_JAVA_OPTS + " is null!");
assertThat(javaOpts)
.withFailMessage(JobConf.MAPRED_TASK_JAVA_OPTS + " has value of: "
+ javaOpts)
.isEqualTo(TASK_OPTS_VAL);
} else {
String reduceJavaOpts = job.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS);
assertNotNull(reduceJavaOpts,
JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS + " is null!");
assertThat(reduceJavaOpts)
.withFailMessage(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS +
" has value of: " + reduceJavaOpts)
.isEqualTo(REDUCE_OPTS_VAL);
}
// check if X=y works for an already existing parameter
checkEnv("LANG", "en_us_8859_1", "noappend");
// check if X=/tmp for a new env variable
checkEnv("MY_PATH", "/tmp", "noappend");
// check if X=$X:/tmp works for a new env var and results into :/tmp
checkEnv("NEW_PATH", File.pathSeparator + "/tmp", "noappend");
}
@Override
public void reduce(WritableComparable key, Iterator<Writable> values,
OutputCollector<WritableComparable, Writable> output,
Reporter reporter)
throws IOException {
}
}
@BeforeAll
public static void setup() throws IOException {
// create configuration, dfs, file system and mapred cluster
dfs = new MiniDFSCluster.Builder(conf).build();
fileSys = dfs.getFileSystem();
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
if (mr == null) {
mr = new MiniMRYarnCluster(TestMiniMRChildTask.class.getName());
Configuration conf = new Configuration();
mr.init(conf);
mr.start();
}
// Copy MRAppJar and make it private. TODO: FIXME. This is a hack to
// workaround the absent public discache.
localFs.copyFromLocalFile(new Path(MiniMRYarnCluster.APPJAR), APP_JAR);
localFs.setPermission(APP_JAR, new FsPermission("700"));
}
@AfterAll
public static void tearDown() {
// close file system and shut down dfs and mapred cluster
try {
if (fileSys != null) {
fileSys.close();
}
if (dfs != null) {
dfs.shutdown();
}
if (mr != null) {
mr.stop();
mr = null;
}
} catch (IOException ioe) {
LOG.info("IO exception in closing file system)" );
ioe.printStackTrace();
}
}
/**
* Test to test if the user set env variables reflect in the child
* processes. Mainly
* - x=y (x can be a already existing env variable or a new variable)
*/
@Test
public void testTaskEnv(){
try {
JobConf conf = new JobConf(mr.getConfig());
String baseDir = System.getProperty("test.build.data", "build/test/data");
// initialize input, output directories
Path inDir = new Path(baseDir + "/testing/wc/input1");
Path outDir = new Path(baseDir + "/testing/wc/output1");
FileSystem outFs = outDir.getFileSystem(conf);
runTestTaskEnv(conf, inDir, outDir, false);
outFs.delete(outDir, true);
} catch(Exception e) {
e.printStackTrace();
fail("Exception in testing child env");
tearDown();
}
}
/**
* Test to test if the user set *old* env variables reflect in the child
* processes. Mainly
* - x=y (x can be a already existing env variable or a new variable)
*/
@Test
public void testTaskOldEnv() {
try {
JobConf conf = new JobConf(mr.getConfig());
String baseDir = System.getProperty("test.build.data", "build/test/data");
// initialize input, output directories
Path inDir = new Path(baseDir + "/testing/wc/input1");
Path outDir = new Path(baseDir + "/testing/wc/output1");
FileSystem outFs = outDir.getFileSystem(conf);
runTestTaskEnv(conf, inDir, outDir, true);
outFs.delete(outDir, true);
} catch (Exception e) {
e.printStackTrace();
fail("Exception in testing child env");
tearDown();
}
}
@SuppressWarnings("deprecation")
private void runTestTaskEnv(JobConf config, Path inDir, Path outDir,
boolean oldConfigs)
throws IOException, InterruptedException, ClassNotFoundException {
String input = "The input";
configure(config, inDir, outDir, input,
EnvCheckMapper.class, EnvCheckReducer.class);
// test
// - new SET of new var (MY_PATH)
// - set of old var (LANG)
// - append to a new var (NEW_PATH)
String mapTaskEnvKey = JobConf.MAPRED_MAP_TASK_ENV;
String reduceTaskEnvKey = JobConf.MAPRED_MAP_TASK_ENV;
String mapTaskJavaOptsKey = JobConf.MAPRED_MAP_TASK_JAVA_OPTS;
String reduceTaskJavaOptsKey = JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS;
String mapTaskJavaOpts = MAP_OPTS_VAL;
String reduceTaskJavaOpts = REDUCE_OPTS_VAL;
config.setBoolean(OLD_CONFIGS, oldConfigs);
if (oldConfigs) {
mapTaskEnvKey = reduceTaskEnvKey = JobConf.MAPRED_TASK_ENV;
mapTaskJavaOptsKey = reduceTaskJavaOptsKey = JobConf.MAPRED_TASK_JAVA_OPTS;
mapTaskJavaOpts = reduceTaskJavaOpts = TASK_OPTS_VAL;
}
config.set(
mapTaskEnvKey,
Shell.WINDOWS ? "MY_PATH=/tmp,LANG=en_us_8859_1,NEW_PATH=%MY_PATH%;/tmp"
: "MY_PATH=/tmp,LANG=en_us_8859_1,NEW_PATH=$NEW_PATH:/tmp");
config.set(
reduceTaskEnvKey,
Shell.WINDOWS ? "MY_PATH=/tmp,LANG=en_us_8859_1,NEW_PATH=%MY_PATH%;/tmp"
: "MY_PATH=/tmp,LANG=en_us_8859_1,NEW_PATH=$NEW_PATH:/tmp");
config.set(mapTaskJavaOptsKey, mapTaskJavaOpts);
config.set(reduceTaskJavaOptsKey, reduceTaskJavaOpts);
Job job = Job.getInstance(config);
job.addFileToClassPath(APP_JAR);
job.setJarByClass(TestMiniMRChildTask.class);
job.setMaxMapAttempts(1); // speed up failures
job.waitForCompletion(true);
boolean succeeded = job.waitForCompletion(true);
assertTrue(succeeded, "The environment checker job failed.");
}
}
| EnvCheckReducer |
java | google__auto | common/src/main/java/com/google/auto/common/MoreElements.java | {
"start": 14326,
"end": 16165
} | class ____ interface
* that has more than one superinterface, the interfaces are in the order of their appearance in
* {@code implements} or {@code extends}.
*
* @param type the type whose own and inherited methods are to be returned
* @param elementUtils an {@link Elements} object, typically returned by {@link
* javax.annotation.processing.AbstractProcessor#processingEnv processingEnv}.{@link
* javax.annotation.processing.ProcessingEnvironment#getElementUtils getElementUtils()}
* @deprecated The method {@link #getLocalAndInheritedMethods(TypeElement, Types, Elements)} has
* better consistency between Java compilers.
*/
@Deprecated
public static ImmutableSet<ExecutableElement> getLocalAndInheritedMethods(
TypeElement type, Elements elementUtils) {
Overrides overrides = new Overrides.NativeOverrides(elementUtils);
return getLocalAndInheritedMethods(type, overrides);
}
/**
* Returns the set of all non-private, non-static methods from {@code type}, including methods
* that it inherits from its ancestors. Inherited methods that are overridden are not included in
* the result. So if {@code type} defines {@code public String toString()}, the returned set will
* contain that method, but not the {@code toString()} method defined by {@code Object}.
*
* <p>The returned set may contain more than one method with the same signature, if {@code type}
* inherits those methods from different ancestors. For example, if it inherits from unrelated
* interfaces {@code One} and {@code Two} which each define {@code void foo();}, and if it does
* not itself override the {@code foo()} method, then both {@code One.foo()} and {@code Two.foo()}
* will be in the returned set.
*
* <p>The order of the returned set is deterministic: within a | or |
java | elastic__elasticsearch | x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/ShrinkActionIT.java | {
"start": 3012,
"end": 24584
} | class ____ extends IlmESRestTestCase {
private static final String FAILED_STEP_RETRY_COUNT_FIELD = "failed_step_retry_count";
private static final String SHRINK_INDEX_NAME = "shrink_index_name";
private String policy;
private String index;
private String alias;
@Before
public void refreshAbstractions() {
policy = "policy-" + randomAlphaOfLength(5);
index = "index-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
alias = "alias-" + randomAlphaOfLength(5);
logger.info("--> running [{}] with index [{}], alias [{}] and policy [{}]", getTestName(), index, alias, policy);
}
public void testShrinkAction() throws Exception {
int numShards = 4;
int divisor = randomFrom(2, 4);
int expectedFinalShards = numShards / divisor;
createIndexWithSettings(
client(),
index,
alias,
Settings.builder().put(SETTING_NUMBER_OF_SHARDS, numShards).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
);
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(expectedFinalShards, null, false));
updatePolicy(client(), index, policy);
String shrunkenIndexName = waitAndGetShrinkIndexName(client(), index);
assertBusy(() -> assertTrue(indexExists(shrunkenIndexName)), 30, TimeUnit.SECONDS);
assertBusy(() -> assertTrue(aliasExists(shrunkenIndexName, index)));
assertBusy(
() -> assertThat(getStepKeyForIndex(client(), shrunkenIndexName), equalTo(PhaseCompleteStep.finalStep("warm").getKey()))
);
assertBusy(() -> {
Map<String, Object> settings = getOnlyIndexSettings(client(), shrunkenIndexName);
assertThat(settings.get(SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(expectedFinalShards)));
assertThat(settings.get(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true"));
assertThat(settings.get(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id"), nullValue());
});
expectThrows(ResponseException.class, () -> indexDocument(client(), index));
}
public void testSkipShrinkSameShardsWithNumberOfShards() throws Exception {
int numberOfShards = randomFrom(1, 2);
createIndexWithSettings(
client(),
index,
alias,
Settings.builder().put(SETTING_NUMBER_OF_SHARDS, numberOfShards).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
);
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(numberOfShards, null, false));
updatePolicy(client(), index, policy);
assertBusy(() -> {
assertTrue(indexExists(index));
Map<String, Object> settings = getOnlyIndexSettings(client(), index);
assertThat(getStepKeyForIndex(client(), index), equalTo(PhaseCompleteStep.finalStep("warm").getKey()));
assertThat(settings.get(SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(numberOfShards)));
assertNull(settings.get(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey()));
assertThat(settings.get(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id"), nullValue());
// the shrink action was effectively skipped so there must not be any `shrink_index_name` in the ILM state
assertThat(explainIndex(client(), index).get(SHRINK_INDEX_NAME), nullValue());
});
}
public void testSkipShrinkSameShardsWithMaxShardSize() throws Exception {
createIndexWithSettings(
client(),
index,
alias,
Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
);
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(null, ByteSizeValue.ofGb(50), false));
updatePolicy(client(), index, policy);
assertBusy(() -> {
assertTrue(indexExists(index));
Map<String, Object> settings = getOnlyIndexSettings(client(), index);
assertThat(getStepKeyForIndex(client(), index), equalTo(PhaseCompleteStep.finalStep("warm").getKey()));
assertNull(settings.get(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey()));
assertThat(settings.get(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id"), nullValue());
// the shrink action was effectively skipped so there must not be any `shrink_index_name` in the ILM state
assertThat(explainIndex(client(), index).get(SHRINK_INDEX_NAME), nullValue());
});
}
public void testShrinkDuringSnapshot() throws Exception {
// Create the repository before taking the snapshot.
Request request = new Request("PUT", "/_snapshot/repo");
request.setJsonEntity(
Strings.toString(
JsonXContent.contentBuilder()
.startObject()
.field("type", "fs")
.startObject("settings")
.field("compress", randomBoolean())
.field("location", repoDir.getRoot().getAbsolutePath())
.field("max_snapshot_bytes_per_sec", "256b")
.endObject()
.endObject()
)
);
assertOK(client().performRequest(request));
// create delete policy
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(1, null, false), TimeValue.timeValueMillis(0));
// create index without policy
createIndexWithSettings(
client(),
index,
alias,
Settings.builder()
.put(SETTING_NUMBER_OF_SHARDS, 2)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
// required so the shrink doesn't wait on SetSingleNodeAllocateStep
.put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_name", "test-cluster-0")
);
// index document so snapshot actually does something
indexDocument(client(), index);
// start snapshot
request = new Request("PUT", "/_snapshot/repo/snapshot");
request.addParameter("wait_for_completion", "false");
request.setJsonEntity("{\"indices\": \"" + index + "\"}");
assertOK(client().performRequest(request));
// add policy and expect it to trigger shrink immediately (while snapshot in progress)
updatePolicy(client(), index, policy);
String shrunkenIndex = waitAndGetShrinkIndexName(client(), index);
// assert that index was shrunk and original index was deleted
assertBusy(() -> {
assertTrue(indexExists(shrunkenIndex));
assertTrue(aliasExists(shrunkenIndex, index));
Map<String, Object> settings = getOnlyIndexSettings(client(), shrunkenIndex);
assertThat(getStepKeyForIndex(client(), shrunkenIndex), equalTo(PhaseCompleteStep.finalStep("warm").getKey()));
assertThat(settings.get(SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(1)));
assertThat(settings.get(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true"));
assertThat(settings.get(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id"), nullValue());
}, 2, TimeUnit.MINUTES);
expectThrows(ResponseException.class, () -> indexDocument(client(), index));
// assert that snapshot succeeded
assertThat(getSnapshotState(client(), "snapshot"), equalTo("SUCCESS"));
assertOK(client().performRequest(new Request("DELETE", "/_snapshot/repo/snapshot")));
}
public void testShrinkActionInTheHotPhase() throws Exception {
int numShards = 2;
int expectedFinalShards = 1;
String originalIndex = index + "-000001";
// add a policy
Map<String, LifecycleAction> hotActions = Map.of(
RolloverAction.NAME,
new RolloverAction(null, null, null, 1L, null, null, null, null, null, null),
ShrinkAction.NAME,
new ShrinkAction(expectedFinalShards, null, false)
);
Map<String, Phase> phases = Map.of("hot", new Phase("hot", TimeValue.ZERO, hotActions));
LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, phases);
Request createPolicyRequest = new Request("PUT", "_ilm/policy/" + policy);
createPolicyRequest.setJsonEntity("{ \"policy\":" + Strings.toString(lifecyclePolicy) + "}");
client().performRequest(createPolicyRequest);
// and a template
Request createTemplateRequest = new Request("PUT", "_template/" + index);
createTemplateRequest.setJsonEntity(Strings.format("""
{
"index_patterns": ["%s-*"],
"settings": {
"number_of_shards": %s,
"number_of_replicas": 0,
"index.lifecycle.name": "%s",
"index.lifecycle.rollover_alias": "%s"
}
}""", index, numShards, policy, alias));
createTemplateRequest.setOptions(expectWarnings(RestPutIndexTemplateAction.DEPRECATION_WARNING));
client().performRequest(createTemplateRequest);
// then create the index and index a document to trigger rollover
createIndexWithSettings(client(), originalIndex, alias, Settings.builder(), true);
index(client(), originalIndex, "_id", "foo", "bar");
String shrunkenIndex = waitAndGetShrinkIndexName(client(), originalIndex);
assertBusy(() -> assertTrue(indexExists(shrunkenIndex)), 30, TimeUnit.SECONDS);
assertBusy(() -> assertThat(getStepKeyForIndex(client(), shrunkenIndex), equalTo(PhaseCompleteStep.finalStep("hot").getKey())));
assertBusy(() -> {
Map<String, Object> settings = getOnlyIndexSettings(client(), shrunkenIndex);
assertThat(settings.get(SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(expectedFinalShards)));
});
}
public void testSetSingleNodeAllocationRetriesUntilItSucceeds() throws Exception {
int numShards = 2;
int expectedFinalShards = 1;
createIndexWithSettings(
client(),
index,
alias,
Settings.builder()
.put(SETTING_NUMBER_OF_SHARDS, numShards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.putNull(DataTier.TIER_PREFERENCE)
);
ensureGreen(index);
// unallocate all index shards
Request setAllocationToMissingAttribute = new Request("PUT", "/" + index + "/_settings");
setAllocationToMissingAttribute.setJsonEntity("""
{
"settings": {
"index.routing.allocation.include.rack": "bogus_rack"
}
}""");
client().performRequest(setAllocationToMissingAttribute);
ensureHealth(index, (request) -> {
request.addParameter("wait_for_status", "red");
request.addParameter("timeout", "70s");
request.addParameter("level", "shards");
});
// assign the policy that'll attempt to shrink the index (disabling the migrate action as it'll otherwise wait for
// all shards to be active and we want that to happen as part of the shrink action)
MigrateAction migrateAction = MigrateAction.DISABLED;
ShrinkAction shrinkAction = new ShrinkAction(expectedFinalShards, null, false);
Phase phase = new Phase(
"warm",
TimeValue.ZERO,
Map.of(migrateAction.getWriteableName(), migrateAction, shrinkAction.getWriteableName(), shrinkAction)
);
LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of(phase.getName(), phase));
XContentBuilder builder = jsonBuilder();
lifecyclePolicy.toXContent(builder, null);
final StringEntity entity = new StringEntity("{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON);
Request putPolicyRequest = new Request("PUT", "_ilm/policy/" + policy);
putPolicyRequest.setEntity(entity);
client().performRequest(putPolicyRequest);
updatePolicy(client(), index, policy);
assertTrue("ILM did not start retrying the set-single-node-allocation step", waitUntil(() -> {
try {
Map<String, Object> explainIndexResponse = explainIndex(client(), index);
if (explainIndexResponse == null) {
return false;
}
String failedStep = (String) explainIndexResponse.get("failed_step");
Integer retryCount = (Integer) explainIndexResponse.get(FAILED_STEP_RETRY_COUNT_FIELD);
return failedStep != null && failedStep.equals(SetSingleNodeAllocateStep.NAME) && retryCount != null && retryCount >= 1;
} catch (IOException e) {
return false;
}
}, 30, TimeUnit.SECONDS));
Request resetAllocationForIndex = new Request("PUT", "/" + index + "/_settings");
resetAllocationForIndex.setJsonEntity("""
{
"settings": {
"index.routing.allocation.include.rack": null }
}""");
client().performRequest(resetAllocationForIndex);
String shrunkenIndex = waitAndGetShrinkIndexName(client(), index);
assertBusy(() -> assertTrue(indexExists(shrunkenIndex)), 30, TimeUnit.SECONDS);
assertBusy(() -> assertTrue(aliasExists(shrunkenIndex, index)));
assertBusy(() -> assertThat(getStepKeyForIndex(client(), shrunkenIndex), equalTo(PhaseCompleteStep.finalStep("warm").getKey())));
}
public void testAutomaticRetryFailedShrinkAction() throws Exception {
int numShards = 4;
int divisor = randomFrom(2, 4);
int expectedFinalShards = numShards / divisor;
createIndexWithSettings(
client(),
index,
alias,
Settings.builder().put(SETTING_NUMBER_OF_SHARDS, numShards).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
);
var shrinkAction = new ShrinkAction(numShards + randomIntBetween(1, numShards), null, false);
createNewSingletonPolicy(client(), policy, "warm", shrinkAction);
updatePolicy(client(), index, policy);
assertBusy(
() -> assertThat(
getStepKeyForIndex(client(), index),
equalTo(new Step.StepKey("warm", ShrinkAction.NAME, CheckTargetShardsCountStep.NAME))
),
60,
TimeUnit.SECONDS
);
// update policy to be correct
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(expectedFinalShards, null, false));
updatePolicy(client(), index, policy);
// assert corrected policy is picked up and index is shrunken
String shrunkenIndex = waitAndGetShrinkIndexNameWithExtraClusterStateChange(client(), index);
assertThat(shrunkenIndex, notNullValue());
assertBusy(() -> assertTrue(indexExists(shrunkenIndex)), 30, TimeUnit.SECONDS);
assertBusy(() -> assertTrue(aliasExists(shrunkenIndex, index)));
assertBusy(() -> assertThat(getStepKeyForIndex(client(), shrunkenIndex), equalTo(PhaseCompleteStep.finalStep("warm").getKey())));
assertBusy(() -> {
Map<String, Object> settings = getOnlyIndexSettings(client(), shrunkenIndex);
assertThat(settings.get(SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(expectedFinalShards)));
assertThat(settings.get(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true"));
assertThat(settings.get(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id"), nullValue());
});
expectThrows(ResponseException.class, () -> indexDocument(client(), index));
}
/*
* This test verifies that we can still shrink an index even if the total number of shards in the index is greater than
* index.routing.allocation.total_shards_per_node.
*/
public void testTotalShardsPerNodeTooLow() throws Exception {
int numShards = 4;
int divisor = randomFrom(2, 4);
int expectedFinalShards = numShards / divisor;
createIndexWithSettings(
client(),
index,
alias,
Settings.builder()
.put(SETTING_NUMBER_OF_SHARDS, numShards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), numShards - 2)
);
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(expectedFinalShards, null, false));
updatePolicy(client(), index, policy);
String shrunkenIndexName = waitAndGetShrinkIndexName(client(), index);
assertBusy(() -> assertTrue(indexExists(shrunkenIndexName)), 60, TimeUnit.SECONDS);
assertBusy(() -> assertTrue(aliasExists(shrunkenIndexName, index)));
assertBusy(
() -> assertThat(getStepKeyForIndex(client(), shrunkenIndexName), equalTo(PhaseCompleteStep.finalStep("warm").getKey()))
);
assertBusy(() -> {
Map<String, Object> settings = getOnlyIndexSettings(client(), shrunkenIndexName);
assertThat(settings.get(SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(expectedFinalShards)));
assertThat(settings.get(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true"));
assertThat(settings.get(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id"), nullValue());
assertNull(settings.get(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey()));
});
expectThrows(ResponseException.class, () -> indexDocument(client(), index));
}
public void testAllowWritesInShrunkIndex() throws Exception {
int numShards = 4;
int divisor = randomFrom(2, 4);
int expectedFinalShards = numShards / divisor;
boolean initialIndexIsReadOnly = randomBoolean();
createIndexWithSettings(
client(),
index,
alias,
Settings.builder()
.put(SETTING_NUMBER_OF_SHARDS, numShards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey(), initialIndexIsReadOnly ? "true" : null)
);
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(expectedFinalShards, null, true));
updatePolicy(client(), index, policy);
String shrunkenIndexName = waitAndGetShrinkIndexName(client(), index);
assertBusy(() -> assertTrue(indexExists(shrunkenIndexName)), 30, TimeUnit.SECONDS);
assertBusy(() -> assertTrue(aliasExists(shrunkenIndexName, index)));
assertBusy(
() -> assertThat(getStepKeyForIndex(client(), shrunkenIndexName), equalTo(PhaseCompleteStep.finalStep("warm").getKey()))
);
assertBusy(() -> {
Map<String, Object> settings = getOnlyIndexSettings(client(), shrunkenIndexName);
assertThat(settings.get(SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(expectedFinalShards)));
assertThat(settings.get(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id"), nullValue());
// check that write block removed
assertNull(settings.get(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey()));
});
indexDocument(client(), index);
// check that actually wrote to index
assertBusy(() -> assertDocCount(client(), index, 1));
}
public void testAllowWritesWhenShrinkIsSkipped() throws Exception {
int numberOfShards = randomFrom(1, 2);
boolean initialIndexIsReadOnly = randomBoolean();
createIndexWithSettings(
client(),
index,
alias,
Settings.builder()
.put(SETTING_NUMBER_OF_SHARDS, numberOfShards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey(), initialIndexIsReadOnly ? "true" : null)
);
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(numberOfShards, null, true));
updatePolicy(client(), index, policy);
assertBusy(() -> {
assertTrue(indexExists(index));
Map<String, Object> settings = getOnlyIndexSettings(client(), index);
assertThat(getStepKeyForIndex(client(), index), equalTo(PhaseCompleteStep.finalStep("warm").getKey()));
assertThat(settings.get(SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(numberOfShards)));
assertThat(settings.get(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id"), nullValue());
// the shrink action was effectively skipped so there must not be any `shrink_index_name` in the ILM state
assertThat(explainIndex(client(), index).get(SHRINK_INDEX_NAME), nullValue());
// check that write block removed
assertNull(settings.get(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey()));
});
indexDocument(client(), index);
// check that actually wrote to index
assertBusy(() -> assertDocCount(client(), index, 1));
}
}
| ShrinkActionIT |
java | apache__logging-log4j2 | log4j-layout-template-json/src/main/java/org/apache/logging/log4j/layout/template/json/resolver/LoggerResolver.java | {
"start": 1393,
"end": 2695
} | class ____ implements EventResolver {
private static final EventResolver NAME_RESOLVER = (final LogEvent logEvent, final JsonWriter jsonWriter) -> {
final String loggerName = logEvent.getLoggerName();
jsonWriter.writeString(loggerName);
};
private static final EventResolver FQCN_RESOLVER = (final LogEvent logEvent, final JsonWriter jsonWriter) -> {
final String loggerFqcn = logEvent.getLoggerFqcn();
jsonWriter.writeString(loggerFqcn);
};
private final EventResolver internalResolver;
LoggerResolver(final TemplateResolverConfig config) {
this.internalResolver = createInternalResolver(config);
}
private static EventResolver createInternalResolver(final TemplateResolverConfig config) {
final String fieldName = config.getString("field");
if ("name".equals(fieldName)) {
return NAME_RESOLVER;
} else if ("fqcn".equals(fieldName)) {
return FQCN_RESOLVER;
}
throw new IllegalArgumentException("unknown field: " + config);
}
static String getName() {
return "logger";
}
@Override
public void resolve(final LogEvent logEvent, final JsonWriter jsonWriter) {
internalResolver.resolve(logEvent, jsonWriter);
}
}
| LoggerResolver |
java | apache__camel | components/camel-quickfix/src/generated/java/org/apache/camel/component/quickfixj/converter/QuickfixjConvertersLoader.java | {
"start": 895,
"end": 3494
} | class ____ implements TypeConverterLoader, CamelContextAware {
private CamelContext camelContext;
public QuickfixjConvertersLoader() {
}
@Override
public void setCamelContext(CamelContext camelContext) {
this.camelContext = camelContext;
}
@Override
public CamelContext getCamelContext() {
return camelContext;
}
@Override
public void load(TypeConverterRegistry registry) throws TypeConverterLoaderException {
registerConverters(registry);
}
private void registerConverters(TypeConverterRegistry registry) {
addTypeConverter(registry, java.io.InputStream.class, quickfix.Message.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.quickfixj.converter.QuickfixjConverters.toInputStream((quickfix.Message) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, quickfix.Message.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.quickfixj.converter.QuickfixjConverters.toMessage((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, quickfix.Message.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.quickfixj.converter.QuickfixjConverters.toMessage((java.lang.String) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, quickfix.SessionID.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.quickfixj.converter.QuickfixjConverters.toSessionID((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
}
private static void addTypeConverter(TypeConverterRegistry registry, Class<?> toType, Class<?> fromType, boolean allowNull, SimpleTypeConverter.ConversionMethod method) {
registry.addTypeConverter(toType, fromType, new SimpleTypeConverter(allowNull, method));
}
}
| QuickfixjConvertersLoader |
java | netty__netty | buffer/src/main/java/io/netty/buffer/UnpooledByteBufAllocator.java | {
"start": 6323,
"end": 7086
} | class ____ extends UnpooledHeapByteBuf {
InstrumentedUnpooledHeapByteBuf(UnpooledByteBufAllocator alloc, int initialCapacity, int maxCapacity) {
super(alloc, initialCapacity, maxCapacity);
}
@Override
protected byte[] allocateArray(int initialCapacity) {
byte[] bytes = super.allocateArray(initialCapacity);
((UnpooledByteBufAllocator) alloc()).incrementHeap(bytes.length);
return bytes;
}
@Override
protected void freeArray(byte[] array) {
int length = array.length;
super.freeArray(array);
((UnpooledByteBufAllocator) alloc()).decrementHeap(length);
}
}
private static final | InstrumentedUnpooledHeapByteBuf |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_wangran2.java | {
"start": 2198,
"end": 2654
} | class ____ {
private int id;
private Root root;
public Child(){
}
public Root getRoot() {
return root;
}
public void setRoot(Root root) {
System.out.println("setRoot");
this.root = root;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
}
}
// 500m / 300
| Child |
java | apache__avro | lang/java/avro/src/test/java/org/apache/avro/io/LegacyBinaryEncoder.java | {
"start": 1889,
"end": 5756
} | class ____ implements ByteWriter {
private final OutputStream out;
public SimpleByteWriter(OutputStream out) {
this.out = out;
}
@Override
public void write(ByteBuffer bytes) throws IOException {
encodeLong(bytes.remaining(), out);
out.write(bytes.array(), bytes.position(), bytes.remaining());
}
}
private final ByteWriter byteWriter;
/**
* Create a writer that sends its output to the underlying stream
* <code>out</code>.
*/
public LegacyBinaryEncoder(OutputStream out) {
this.out = out;
this.byteWriter = new SimpleByteWriter(out);
}
@Override
public void flush() throws IOException {
if (out != null) {
out.flush();
}
}
@Override
public void writeNull() throws IOException {
}
@Override
public void writeBoolean(boolean b) throws IOException {
out.write(b ? 1 : 0);
}
@Override
public void writeInt(int n) throws IOException {
encodeLong(n, out);
}
@Override
public void writeLong(long n) throws IOException {
encodeLong(n, out);
}
@Override
public void writeFloat(float f) throws IOException {
encodeFloat(f, out);
}
@Override
public void writeDouble(double d) throws IOException {
encodeDouble(d, out);
}
@Override
public void writeString(Utf8 utf8) throws IOException {
encodeString(utf8.getBytes(), 0, utf8.getByteLength());
}
@Override
public void writeString(String string) throws IOException {
byte[] bytes = Utf8.getBytesFor(string);
encodeString(bytes, 0, bytes.length);
}
private void encodeString(byte[] bytes, int offset, int length) throws IOException {
encodeLong(length, out);
out.write(bytes, offset, length);
}
@Override
public void writeBytes(ByteBuffer bytes) throws IOException {
byteWriter.write(bytes);
}
@Override
public void writeBytes(byte[] bytes, int start, int len) throws IOException {
encodeLong(len, out);
out.write(bytes, start, len);
}
@Override
public void writeFixed(byte[] bytes, int start, int len) throws IOException {
out.write(bytes, start, len);
}
@Override
public void writeEnum(int e) throws IOException {
encodeLong(e, out);
}
@Override
public void writeArrayStart() throws IOException {
}
@Override
public void setItemCount(long itemCount) throws IOException {
if (itemCount > 0) {
writeLong(itemCount);
}
}
@Override
public void startItem() throws IOException {
}
@Override
public void writeArrayEnd() throws IOException {
encodeLong(0, out);
}
@Override
public void writeMapStart() throws IOException {
}
@Override
public void writeMapEnd() throws IOException {
encodeLong(0, out);
}
@Override
public void writeIndex(int unionIndex) throws IOException {
encodeLong(unionIndex, out);
}
protected static void encodeLong(long n, OutputStream o) throws IOException {
n = (n << 1) ^ (n >> 63); // move sign to low-order bit
while ((n & ~0x7F) != 0) {
o.write((byte) ((n & 0x7f) | 0x80));
n >>>= 7;
}
o.write((byte) n);
}
protected static void encodeFloat(float f, OutputStream o) throws IOException {
long bits = Float.floatToRawIntBits(f);
o.write((int) (bits) & 0xFF);
o.write((int) (bits >> 8) & 0xFF);
o.write((int) (bits >> 16) & 0xFF);
o.write((int) (bits >> 24) & 0xFF);
}
protected static void encodeDouble(double d, OutputStream o) throws IOException {
long bits = Double.doubleToRawLongBits(d);
o.write((int) (bits) & 0xFF);
o.write((int) (bits >> 8) & 0xFF);
o.write((int) (bits >> 16) & 0xFF);
o.write((int) (bits >> 24) & 0xFF);
o.write((int) (bits >> 32) & 0xFF);
o.write((int) (bits >> 40) & 0xFF);
o.write((int) (bits >> 48) & 0xFF);
o.write((int) (bits >> 56) & 0xFF);
}
}
| SimpleByteWriter |
java | elastic__elasticsearch | libs/core/src/main/java/org/elasticsearch/core/internal/provider/InMemoryModuleFinder.java | {
"start": 1353,
"end": 4173
} | class ____ implements ModuleFinder {
private final Map<String, ModuleReference> namesToReference;
/**
* Creates a module finder that eagerly scans the given paths to build an in memory module
* finder.
*
* <p> The set missingModules are filtered out of the requires directives of the retrieved
* module descriptors.
*/
static InMemoryModuleFinder of(Set<String> missingModules, Path... entries) {
return new InMemoryModuleFinder(
Arrays.stream(entries)
.map(EmbeddedModulePath::descriptorFor)
.map(md -> filterRequires(md, missingModules))
.collect(
Collectors.toUnmodifiableMap(
ModuleDescriptor::name,
m -> new InMemoryModuleReference(m, URI.create("module:/" + m.name()))
)
)
);
}
static ModuleDescriptor filterRequires(ModuleDescriptor md, Set<String> missingModules) {
if (missingModules.size() == 0
|| md.isAutomatic()
|| md.requires().stream().anyMatch(req -> missingModules.contains(req.name())) == false) {
return md;
}
ModuleDescriptor.Builder builder;
if (md.isOpen()) {
builder = ModuleDescriptor.newOpenModule(md.name());
} else {
builder = ModuleDescriptor.newModule(md.name());
}
md.version().ifPresent(builder::version);
md.requires().stream().filter(req -> missingModules.contains(req.name()) == false).forEach(builder::requires);
md.exports().forEach(builder::exports);
md.opens().forEach(builder::opens);
md.provides().forEach(builder::provides);
md.uses().forEach(builder::uses);
builder.packages(md.packages());
return builder.build();
}
/**
* Creates a module finder of the given module descriptors.
*/
static InMemoryModuleFinder of(ModuleDescriptor... descriptors) {
return new InMemoryModuleFinder(
Arrays.stream(descriptors)
.collect(
Collectors.toUnmodifiableMap(
ModuleDescriptor::name,
m -> new InMemoryModuleReference(m, URI.create("module:/" + m.name()))
)
)
);
}
private InMemoryModuleFinder(Map<String, ModuleReference> mrefs) {
this.namesToReference = mrefs;
}
@Override
public Optional<ModuleReference> find(String name) {
Objects.requireNonNull(name);
return Optional.ofNullable(namesToReference.get(name));
}
@Override
public Set<ModuleReference> findAll() {
return Set.copyOf(namesToReference.values());
}
static | InMemoryModuleFinder |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/xml/XmlAndAnnotationAttributeOverrideTest.java | {
"start": 1154,
"end": 1801
} | class ____ {
@Test
@JiraKey(value = "HHH-14827")
public void testDerivedClassAttributeOverriding(EntityManagerFactoryScope scope) {
assertThat( SchemaUtil.getColumnNames( scope.getEntityManagerFactory(), DerivedEntityType.class ) )
.contains( "custom_name" )
.doesNotContain( "name" );
}
@Test
public void testEmbeddedAttributeOverriding(EntityManagerFactoryScope scope) {
assertThat( SchemaUtil.getColumnNames( scope.getEntityManagerFactory(), DerivedEntityType.class ) )
.contains( "custom_embeddable_name" )
.doesNotContain( "embeddable_name" );
}
@MappedSuperclass
public static | XmlAndAnnotationAttributeOverrideTest |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/bind/DefaultRequestBinderRegistry.java | {
"start": 17711,
"end": 18369
} | class ____<B> extends HttpRequestWrapper<B> implements PushCapableHttpRequest<B> {
private final PushCapableHttpRequest<?> push;
public PushCapableRequestWrapper(HttpRequest<B> primary, PushCapableHttpRequest<?> push) {
super(primary);
this.push = push;
}
@Override
public boolean isServerPushSupported() {
return push.isServerPushSupported();
}
@Override
public PushCapableHttpRequest<B> serverPush(@NonNull HttpRequest<?> request) {
push.serverPush(request);
return this;
}
}
private static | PushCapableRequestWrapper |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/onetoone/bidirectional/ids/BiEmbIdRefEdEntity.java | {
"start": 451,
"end": 1874
} | class ____ {
@EmbeddedId
private EmbId id;
@Audited
private String data;
@Audited
@OneToOne(mappedBy = "reference")
private BiEmbIdRefIngEntity referencing;
public BiEmbIdRefEdEntity() {
}
public BiEmbIdRefEdEntity(EmbId id, String data) {
this.id = id;
this.data = data;
}
public BiEmbIdRefEdEntity(EmbId id, String data, BiEmbIdRefIngEntity referencing) {
this.id = id;
this.data = data;
this.referencing = referencing;
}
public EmbId getId() {
return id;
}
public void setId(EmbId id) {
this.id = id;
}
public String getData() {
return data;
}
public void setData(String data) {
this.data = data;
}
public BiEmbIdRefIngEntity getReferencing() {
return referencing;
}
public void setReferencing(BiEmbIdRefIngEntity referencing) {
this.referencing = referencing;
}
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( !(o instanceof BiEmbIdRefEdEntity) ) {
return false;
}
BiEmbIdRefEdEntity that = (BiEmbIdRefEdEntity) o;
if ( data != null ? !data.equals( that.getData() ) : that.getData() != null ) {
return false;
}
if ( id != null ? !id.equals( that.getId() ) : that.getId() != null ) {
return false;
}
return true;
}
public int hashCode() {
int result;
result = (id != null ? id.hashCode() : 0);
result = 31 * result + (data != null ? data.hashCode() : 0);
return result;
}
}
| BiEmbIdRefEdEntity |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/annotations/SoftDelete.java | {
"start": 4372,
"end": 4452
} | interface ____ extends AttributeConverter<Boolean,Object> {}
}
| UnspecifiedConversion |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/state/internals/CompositeReadOnlyKeyValueStore.java | {
"start": 1415,
"end": 8349
} | class ____<K, V> implements ReadOnlyKeyValueStore<K, V> {
private final StateStoreProvider storeProvider;
private final QueryableStoreType<ReadOnlyKeyValueStore<K, V>> storeType;
private final String storeName;
public CompositeReadOnlyKeyValueStore(final StateStoreProvider storeProvider,
final QueryableStoreType<ReadOnlyKeyValueStore<K, V>> storeType,
final String storeName) {
this.storeProvider = storeProvider;
this.storeType = storeType;
this.storeName = storeName;
}
@Override
public V get(final K key) {
Objects.requireNonNull(key);
final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType);
for (final ReadOnlyKeyValueStore<K, V> store : stores) {
try {
final V result = store.get(key);
if (result != null) {
return result;
}
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata.");
}
}
return null;
}
@Override
public KeyValueIterator<K, V> range(final K from, final K to) {
final NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>> nextIteratorFunction = new NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>>() {
@Override
public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) {
try {
return store.range(from, to);
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata.");
}
}
};
final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType);
return new DelegatingPeekingKeyValueIterator<>(
storeName,
new CompositeKeyValueIterator<>(stores.iterator(), nextIteratorFunction));
}
@Override
public KeyValueIterator<K, V> reverseRange(final K from, final K to) {
final NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>> nextIteratorFunction = new NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>>() {
@Override
public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) {
try {
return store.reverseRange(from, to);
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata.");
}
}
};
final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType);
return new DelegatingPeekingKeyValueIterator<>(
storeName,
new CompositeKeyValueIterator<>(stores.iterator(), nextIteratorFunction));
}
@Override
public <PS extends Serializer<P>, P> KeyValueIterator<K, V> prefixScan(final P prefix, final PS prefixKeySerializer) {
Objects.requireNonNull(prefix);
Objects.requireNonNull(prefixKeySerializer);
final NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>> nextIteratorFunction = new NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>>() {
@Override
public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) {
try {
return store.prefixScan(prefix, prefixKeySerializer);
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata.");
}
}
};
final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType);
return new DelegatingPeekingKeyValueIterator<>(
storeName,
new CompositeKeyValueIterator<>(stores.iterator(), nextIteratorFunction));
}
@Override
public KeyValueIterator<K, V> all() {
final NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>> nextIteratorFunction = new NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>>() {
@Override
public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) {
try {
return store.all();
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata.");
}
}
};
final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType);
return new DelegatingPeekingKeyValueIterator<>(
storeName,
new CompositeKeyValueIterator<>(stores.iterator(), nextIteratorFunction));
}
@Override
public KeyValueIterator<K, V> reverseAll() {
final NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>> nextIteratorFunction = new NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>>() {
@Override
public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) {
try {
return store.reverseAll();
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata.");
}
}
};
final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType);
return new DelegatingPeekingKeyValueIterator<>(
storeName,
new CompositeKeyValueIterator<>(stores.iterator(), nextIteratorFunction));
}
@Override
public long approximateNumEntries() {
final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType);
long total = 0;
for (final ReadOnlyKeyValueStore<K, V> store : stores) {
total += store.approximateNumEntries();
if (total < 0) {
return Long.MAX_VALUE;
}
}
return total;
}
}
| CompositeReadOnlyKeyValueStore |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/BlockingSingleSubscriber.java | {
"start": 1176,
"end": 4624
} | class ____<T> extends CountDownLatch
implements InnerConsumer<T>, Disposable {
@Nullable T value;
@Nullable Throwable error;
@Nullable Subscription s;
final Context context;
volatile boolean cancelled;
BlockingSingleSubscriber(Context context) {
super(1);
this.context = context;
}
@Override
public final void onSubscribe(Subscription s) {
this.s = s;
if (!cancelled) {
s.request(Long.MAX_VALUE);
}
}
@Override
public final void onComplete() {
countDown();
}
@Override
public Context currentContext() {
return this.context;
}
@Override
public final void dispose() {
cancelled = true;
Subscription s = this.s;
if (s != null) {
this.s = null;
s.cancel();
}
}
/**
* Block until the first value arrives and return it, otherwise
* return null for an empty source and rethrow any exception.
*
* @return the first value or null if the source is empty
*/
final @Nullable T blockingGet() {
if (Schedulers.isInNonBlockingThread()) {
throw new IllegalStateException("block()/blockFirst()/blockLast() are blocking, which is not supported in thread " + Thread.currentThread().getName());
}
if (getCount() != 0) {
try {
await();
}
catch (InterruptedException ex) {
dispose();
Thread.currentThread().interrupt();
throw Exceptions.propagate(ex);
}
}
Throwable e = error;
if (e != null) {
RuntimeException re = Exceptions.propagate(e);
//this is ok, as re is always a new non-singleton instance
re.addSuppressed(new Exception("#block terminated with an error"));
throw re;
}
return value;
}
/**
* Block until the first value arrives and return it, otherwise
* return null for an empty source and rethrow any exception.
*
* @param timeout the timeout to wait
* @param unit the time unit
*
* @return the first value or null if the source is empty
*/
final @Nullable T blockingGet(long timeout, TimeUnit unit) {
if (Schedulers.isInNonBlockingThread()) {
throw new IllegalStateException("block()/blockFirst()/blockLast() are blocking, which is not supported in thread " + Thread.currentThread().getName());
}
if (getCount() != 0) {
try {
if (!await(timeout, unit)) {
dispose();
String errorMessage = "Timeout on blocking read for " + timeout + " " + unit;
throw new IllegalStateException(errorMessage, new TimeoutException(errorMessage));
}
}
catch (InterruptedException ex) {
dispose();
RuntimeException re = Exceptions.propagate(ex);
//this is ok, as re is always a new non-singleton instance
re.addSuppressed(new Exception("#block has been interrupted"));
Thread.currentThread().interrupt();
throw re;
}
}
Throwable e = error;
if (e != null) {
RuntimeException re = Exceptions.propagate(e);
//this is ok, as re is always a new non-singleton instance
re.addSuppressed(new Exception("#block terminated with an error"));
throw re;
}
return value;
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.TERMINATED) return getCount() == 0;
if (key == Attr.PARENT) return s;
if (key == Attr.CANCELLED) return cancelled;
if (key == Attr.ERROR) return error;
if (key == Attr.PREFETCH) return Integer.MAX_VALUE;
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return null;
}
@Override
public boolean isDisposed() {
return cancelled || getCount() == 0;
}
}
| BlockingSingleSubscriber |
java | grpc__grpc-java | okhttp/src/main/java/io/grpc/okhttp/OkHttpServerStream.java | {
"start": 2426,
"end": 4092
} | class ____ implements AbstractServerStream.Sink {
@Override
public void writeHeaders(Metadata metadata, boolean flush) {
try (TaskCloseable ignore =
PerfMark.traceTask("OkHttpServerStream$Sink.writeHeaders")) {
List<Header> responseHeaders = Headers.createResponseHeaders(metadata);
synchronized (state.lock) {
state.sendHeaders(responseHeaders);
}
}
}
@Override
public void writeFrame(WritableBuffer frame, boolean flush, int numMessages) {
try (TaskCloseable ignore =
PerfMark.traceTask("OkHttpServerStream$Sink.writeFrame")) {
Buffer buffer = ((OkHttpWritableBuffer) frame).buffer();
int size = (int) buffer.size();
if (size > 0) {
onSendingBytes(size);
}
synchronized (state.lock) {
state.sendBuffer(buffer, flush);
transportTracer.reportMessageSent(numMessages);
}
}
}
@Override
public void writeTrailers(Metadata trailers, boolean headersSent, Status status) {
try (TaskCloseable ignore =
PerfMark.traceTask("OkHttpServerStream$Sink.writeTrailers")) {
List<Header> responseTrailers = Headers.createResponseTrailers(trailers, headersSent);
synchronized (state.lock) {
state.sendTrailers(responseTrailers);
}
}
}
@Override
public void cancel(Status reason) {
try (TaskCloseable ignore =
PerfMark.traceTask("OkHttpServerStream$Sink.cancel")) {
synchronized (state.lock) {
state.cancel(ErrorCode.CANCEL, reason);
}
}
}
}
static | Sink |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/scripting/support/StubMessenger.java | {
"start": 772,
"end": 1049
} | class ____ implements ConfigurableMessenger {
private String message = "I used to be smart... now I'm just stupid.";
@Override
public void setMessage(String message) {
this.message = message;
}
@Override
public String getMessage() {
return message;
}
}
| StubMessenger |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStDBIterRequest.java | {
"start": 1532,
"end": 6276
} | class ____<K, N, UK, UV, R> implements Closeable {
/**
* ContextKey that use to calculate prefix bytes. All entries under the same key have the same
* prefix, hence we can stop iterating once coming across an entry with a different prefix.
*/
@Nonnull final ContextKey<K, N> contextKey;
/** The table that generated iter requests. */
final ForStMapState<K, N, UK, UV> table;
/**
* The state request handler, used for {@link
* org.apache.flink.runtime.asyncprocessing.StateRequestType#ITERATOR_LOADING}.
*/
final StateRequestHandler stateRequestHandler;
final int keyGroupPrefixBytes;
/**
* The rocksdb iterator. If null, create a new rocksdb iterator and seek start from the {@link
* #getKeyPrefixBytes}.
*/
@Nullable RocksIterator rocksIterator;
public ForStDBIterRequest(
ContextKey<K, N> contextKey,
ForStMapState<K, N, UK, UV> table,
StateRequestHandler stateRequestHandler,
RocksIterator rocksIterator) {
this.contextKey = contextKey;
this.table = table;
this.stateRequestHandler = stateRequestHandler;
this.keyGroupPrefixBytes = table.getKeyGroupPrefixBytes();
this.rocksIterator = rocksIterator;
}
protected UV deserializeUserValue(byte[] valueBytes) throws IOException {
return table.deserializeValue(valueBytes);
}
protected UK deserializeUserKey(byte[] userKeyBytes, int userKeyOffset) throws IOException {
return table.deserializeUserKey(userKeyBytes, userKeyOffset);
}
protected byte[] getKeyPrefixBytes() throws IOException {
Preconditions.checkState(contextKey.getUserKey() == null);
return table.serializeKey(contextKey);
}
/**
* Check if the raw key bytes start with the key prefix bytes.
*
* @param keyPrefixBytes the key prefix bytes.
* @param rawKeyBytes the raw key bytes.
* @param kgPrefixBytes the number of key group prefix bytes.
* @return true if the raw key bytes start with the key prefix bytes.
*/
protected static boolean startWithKeyPrefix(
byte[] keyPrefixBytes, byte[] rawKeyBytes, int kgPrefixBytes) {
if (rawKeyBytes.length < keyPrefixBytes.length) {
return false;
}
for (int i = keyPrefixBytes.length; --i >= kgPrefixBytes; ) {
if (rawKeyBytes[i] != keyPrefixBytes[i]) {
return false;
}
}
return true;
}
public void process(RocksDB db, int cacheSizeLimit) throws IOException {
// step 1: setup iterator, seek to the key
byte[] prefix = getKeyPrefixBytes();
int userKeyOffset = prefix.length;
if (rocksIterator == null) {
rocksIterator = db.newIterator(table.getColumnFamilyHandle());
rocksIterator.seek(prefix);
}
// step 2: iterate the entries, read at most cacheSizeLimit entries at a time. If not
// read all at once, other entries will be loaded in a new ITERATOR_LOADING request.
List<RawEntry> entries = new ArrayList<>(cacheSizeLimit);
boolean encounterEnd = false;
while (rocksIterator.isValid() && entries.size() < cacheSizeLimit) {
byte[] key = rocksIterator.key();
if (startWithKeyPrefix(prefix, key, keyGroupPrefixBytes)) {
entries.add(new RawEntry(key, rocksIterator.value()));
} else {
encounterEnd = true;
rocksIterator.close();
rocksIterator = null;
break;
}
rocksIterator.next();
}
if (!encounterEnd && (entries.size() < cacheSizeLimit || !rocksIterator.isValid())) {
encounterEnd = true;
rocksIterator.close();
rocksIterator = null;
}
// step 3: deserialize the entries
Collection<R> deserializedEntries = deserializeElement(entries, userKeyOffset);
// step 4: construct a ForStMapIterator.
buildIteratorAndCompleteFuture(deserializedEntries, encounterEnd);
}
public abstract void completeStateFutureExceptionally(String message, Throwable ex);
public abstract Collection<R> deserializeElement(List<RawEntry> entries, int userKeyOffset)
throws IOException;
public abstract void buildIteratorAndCompleteFuture(
Collection<R> partialResult, boolean encounterEnd);
public void close() throws IOException {
if (rocksIterator != null) {
rocksIterator.close();
rocksIterator = null;
}
}
/** The entry to store the raw key and value. */
static | ForStDBIterRequest |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/factory/aot/BeanInstanceSupplierTests.java | {
"start": 30983,
"end": 31461
} | class ____ implements ArgumentsProvider, AnnotationConsumer<ParameterizedResolverTest> {
private Sources source;
@Override
public void accept(ParameterizedResolverTest annotation) {
this.source = annotation.value();
}
@Override
public Stream<? extends Arguments> provideArguments(ParameterDeclarations parameters, ExtensionContext context) {
return this.source.provideArguments(context);
}
}
/**
* Sources for parameterized tests.
*/
| SourcesArguments |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/hql/mutation/UpdateEntityWithEmbeddedTest.java | {
"start": 2303,
"end": 2818
} | class ____ {
String fileName;
String fileExtension;
public Logo() {
}
public Logo(String fileName, String fileExtension) {
this.fileName = fileName;
this.fileExtension = fileExtension;
}
public String getFileName() {
return fileName;
}
public void setFileName(String fileName) {
this.fileName = fileName;
}
public String getFileExtension() {
return fileExtension;
}
public void setFileExtension(String fileExtension) {
this.fileExtension = fileExtension;
}
}
}
| Logo |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/util/LambdaSafe.java | {
"start": 6965,
"end": 8653
} | class ____: "java.lang.String cannot
// be cast..."
if (message.startsWith(argumentType.getName())) {
return true;
}
// On Java 11, the message starts with "class ..." a.k.a. Class.toString()
if (message.startsWith(argumentType.toString())) {
return true;
}
// On Java 9, the message used to contain the module name:
// "java.base/java.lang.String cannot be cast..."
int moduleSeparatorIndex = message.indexOf('/');
if (moduleSeparatorIndex != -1 && message.startsWith(argumentType.getName(), moduleSeparatorIndex + 1)) {
return true;
}
if (CLASS_GET_MODULE != null && MODULE_GET_NAME != null) {
Object module = ReflectionUtils.invokeMethod(CLASS_GET_MODULE, argumentType);
Object moduleName = ReflectionUtils.invokeMethod(MODULE_GET_NAME, module);
return message.startsWith(moduleName + "/" + argumentType.getName());
}
return false;
}
private void logNonMatchingType(C callback, ClassCastException ex) {
if (this.logger.isDebugEnabled()) {
Class<?> expectedType = ResolvableType.forClass(this.callbackType).resolveGeneric();
String expectedTypeName = (expectedType != null) ? ClassUtils.getShortName(expectedType) + " type"
: "type";
String message = "Non-matching " + expectedTypeName + " for callback "
+ ClassUtils.getShortName(this.callbackType) + ": " + callback;
this.logger.debug(message, ex);
}
}
@SuppressWarnings("unchecked")
private SELF self() {
return (SELF) this;
}
}
/**
* Represents a single callback that can be invoked in a lambda safe way.
*
* @param <C> the callback type
* @param <A> the primary argument type
*/
public static final | name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.