language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/JoinTypes.java
|
{
"start": 1333,
"end": 2238
}
|
enum ____ implements JoinType {
INNER(1, "INNER"),
LEFT(2, "LEFT OUTER"),
RIGHT(3, "RIGHT OUTER"),
FULL(4, "FULL OUTER"),
CROSS(5, "CROSS");
private final String name;
private final byte id;
CoreJoinType(int id, String name) {
this.id = (byte) id;
this.name = name;
}
@Override
public String joinName() {
return name;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeByte(id);
}
}
public static JoinType readFrom(StreamInput in) throws IOException {
byte id = in.readByte();
JoinType type = JOIN_TYPES.get(id);
if (type == null) {
throw new IllegalArgumentException("unsupported join [" + id + "]");
}
;
return type;
}
}
|
CoreJoinType
|
java
|
quarkusio__quarkus
|
extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/enhancer/HibernateEntityEnhancerPresentEmbeddableTest.java
|
{
"start": 10369,
"end": 11257
}
|
class ____ {
private String text;
@Embedded
private EmbeddableWithAnnotation embedded;
protected NestingEmbeddableWithAnnotation() {
// For Hibernate ORM only - it will change the property value through reflection
}
public NestingEmbeddableWithAnnotation(String text) {
this.text = text;
this.embedded = new EmbeddableWithAnnotation(text);
}
public String getText() {
return text;
}
public void setText(String text) {
this.text = text;
}
public EmbeddableWithAnnotation getEmbedded() {
return embedded;
}
public void setEmbedded(EmbeddableWithAnnotation embedded) {
this.embedded = embedded;
}
}
@MappedSuperclass
public static abstract
|
NestingEmbeddableWithAnnotation
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/AbstractRequestMatcherRegistryNoMvcTests.java
|
{
"start": 2993,
"end": 3234
}
|
class ____ extends AbstractRequestMatcherRegistry<List<RequestMatcher>> {
@Override
protected List<RequestMatcher> chainRequestMatchers(List<RequestMatcher> requestMatchers) {
return requestMatchers;
}
}
}
|
TestRequestMatcherRegistry
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/serialization/RecordDeserializer.java
|
{
"start": 1253,
"end": 2428
}
|
enum ____ {
PARTIAL_RECORD(false, true),
INTERMEDIATE_RECORD_FROM_BUFFER(true, false),
LAST_RECORD_FROM_BUFFER(true, true);
private final boolean isFullRecord;
private final boolean isBufferConsumed;
private DeserializationResult(boolean isFullRecord, boolean isBufferConsumed) {
this.isFullRecord = isFullRecord;
this.isBufferConsumed = isBufferConsumed;
}
public boolean isFullRecord() {
return this.isFullRecord;
}
public boolean isBufferConsumed() {
return this.isBufferConsumed;
}
}
DeserializationResult getNextRecord(T target) throws IOException;
void setNextBuffer(Buffer buffer) throws IOException;
void clear();
/**
* Gets the unconsumed buffer which needs to be persisted in unaligned checkpoint scenario.
*
* <p>Note that the unconsumed buffer might be null if the whole buffer was already consumed
* before and there are no partial length or data remained in the end of buffer.
*/
CloseableIterator<Buffer> getUnconsumedBuffer() throws IOException;
}
|
DeserializationResult
|
java
|
netty__netty
|
common/src/main/java/io/netty/util/concurrent/SingleThreadEventExecutor.java
|
{
"start": 2172,
"end": 13162
}
|
class ____ extends AbstractScheduledEventExecutor implements OrderedEventExecutor {
static final int DEFAULT_MAX_PENDING_EXECUTOR_TASKS = Math.max(16,
SystemPropertyUtil.getInt("io.netty.eventexecutor.maxPendingTasks", Integer.MAX_VALUE));
private static final InternalLogger logger =
InternalLoggerFactory.getInstance(SingleThreadEventExecutor.class);
private static final int ST_NOT_STARTED = 1;
private static final int ST_SUSPENDING = 2;
private static final int ST_SUSPENDED = 3;
private static final int ST_STARTED = 4;
private static final int ST_SHUTTING_DOWN = 5;
private static final int ST_SHUTDOWN = 6;
private static final int ST_TERMINATED = 7;
private static final Runnable NOOP_TASK = new Runnable() {
@Override
public void run() {
// Do nothing.
}
};
private static final AtomicIntegerFieldUpdater<SingleThreadEventExecutor> STATE_UPDATER =
AtomicIntegerFieldUpdater.newUpdater(SingleThreadEventExecutor.class, "state");
private static final AtomicReferenceFieldUpdater<SingleThreadEventExecutor, ThreadProperties> PROPERTIES_UPDATER =
AtomicReferenceFieldUpdater.newUpdater(
SingleThreadEventExecutor.class, ThreadProperties.class, "threadProperties");
private static final AtomicLongFieldUpdater<SingleThreadEventExecutor> ACCUMULATED_ACTIVE_TIME_NANOS_UPDATER =
AtomicLongFieldUpdater.newUpdater(SingleThreadEventExecutor.class, "accumulatedActiveTimeNanos");
private static final AtomicIntegerFieldUpdater<SingleThreadEventExecutor> CONSECUTIVE_IDLE_CYCLES_UPDATER =
AtomicIntegerFieldUpdater.newUpdater(SingleThreadEventExecutor.class, "consecutiveIdleCycles");
private static final AtomicIntegerFieldUpdater<SingleThreadEventExecutor> CONSECUTIVE_BUSY_CYCLES_UPDATER =
AtomicIntegerFieldUpdater.newUpdater(SingleThreadEventExecutor.class, "consecutiveBusyCycles");
private final Queue<Runnable> taskQueue;
private volatile Thread thread;
@SuppressWarnings("unused")
private volatile ThreadProperties threadProperties;
private final Executor executor;
private volatile boolean interrupted;
private final Lock processingLock = new ReentrantLock();
private final CountDownLatch threadLock = new CountDownLatch(1);
private final Set<Runnable> shutdownHooks = new LinkedHashSet<Runnable>();
private final boolean addTaskWakesUp;
private final int maxPendingTasks;
private final RejectedExecutionHandler rejectedExecutionHandler;
private final boolean supportSuspension;
// A running total of nanoseconds this executor has spent in an "active" state.
private volatile long accumulatedActiveTimeNanos;
// Timestamp of the last recorded activity (tasks + I/O).
private volatile long lastActivityTimeNanos;
/**
* Tracks the number of consecutive monitor cycles this executor's
* utilization has been below the scale-down threshold.
*/
private volatile int consecutiveIdleCycles;
/**
* Tracks the number of consecutive monitor cycles this executor's
* utilization has been above the scale-up threshold.
*/
private volatile int consecutiveBusyCycles;
private long lastExecutionTime;
@SuppressWarnings({ "FieldMayBeFinal", "unused" })
private volatile int state = ST_NOT_STARTED;
private volatile long gracefulShutdownQuietPeriod;
private volatile long gracefulShutdownTimeout;
private long gracefulShutdownStartTime;
private final Promise<?> terminationFuture = new DefaultPromise<Void>(GlobalEventExecutor.INSTANCE);
/**
* Create a new instance
*
* @param parent the {@link EventExecutorGroup} which is the parent of this instance and belongs to it
* @param threadFactory the {@link ThreadFactory} which will be used for the used {@link Thread}
* @param addTaskWakesUp {@code true} if and only if invocation of {@link #addTask(Runnable)} will wake up the
* executor thread
*/
protected SingleThreadEventExecutor(
EventExecutorGroup parent, ThreadFactory threadFactory, boolean addTaskWakesUp) {
this(parent, new ThreadPerTaskExecutor(threadFactory), addTaskWakesUp);
}
/**
* Create a new instance
*
* @param parent the {@link EventExecutorGroup} which is the parent of this instance and belongs to it
* @param threadFactory the {@link ThreadFactory} which will be used for the used {@link Thread}
* @param addTaskWakesUp {@code true} if and only if invocation of {@link #addTask(Runnable)} will wake up the
* executor thread
* @param maxPendingTasks the maximum number of pending tasks before new tasks will be rejected.
* @param rejectedHandler the {@link RejectedExecutionHandler} to use.
*/
protected SingleThreadEventExecutor(
EventExecutorGroup parent, ThreadFactory threadFactory,
boolean addTaskWakesUp, int maxPendingTasks, RejectedExecutionHandler rejectedHandler) {
this(parent, new ThreadPerTaskExecutor(threadFactory), addTaskWakesUp, maxPendingTasks, rejectedHandler);
}
/**
* Create a new instance
*
* @param parent the {@link EventExecutorGroup} which is the parent of this instance and belongs to it
* @param threadFactory the {@link ThreadFactory} which will be used for the used {@link Thread}
* @param addTaskWakesUp {@code true} if and only if invocation of {@link #addTask(Runnable)} will wake up the
* executor thread
* @param supportSuspension {@code true} if suspension of this {@link SingleThreadEventExecutor} is supported.
* @param maxPendingTasks the maximum number of pending tasks before new tasks will be rejected.
* @param rejectedHandler the {@link RejectedExecutionHandler} to use.
*/
protected SingleThreadEventExecutor(
EventExecutorGroup parent, ThreadFactory threadFactory,
boolean addTaskWakesUp, boolean supportSuspension,
int maxPendingTasks, RejectedExecutionHandler rejectedHandler) {
this(parent, new ThreadPerTaskExecutor(threadFactory), addTaskWakesUp, supportSuspension,
maxPendingTasks, rejectedHandler);
}
/**
* Create a new instance
*
* @param parent the {@link EventExecutorGroup} which is the parent of this instance and belongs to it
* @param executor the {@link Executor} which will be used for executing
* @param addTaskWakesUp {@code true} if and only if invocation of {@link #addTask(Runnable)} will wake up the
* executor thread
*/
protected SingleThreadEventExecutor(EventExecutorGroup parent, Executor executor, boolean addTaskWakesUp) {
this(parent, executor, addTaskWakesUp, DEFAULT_MAX_PENDING_EXECUTOR_TASKS, RejectedExecutionHandlers.reject());
}
/**
* Create a new instance
*
* @param parent the {@link EventExecutorGroup} which is the parent of this instance and belongs to it
* @param executor the {@link Executor} which will be used for executing
* @param addTaskWakesUp {@code true} if and only if invocation of {@link #addTask(Runnable)} will wake up the
* executor thread
* @param maxPendingTasks the maximum number of pending tasks before new tasks will be rejected.
* @param rejectedHandler the {@link RejectedExecutionHandler} to use.
*/
protected SingleThreadEventExecutor(EventExecutorGroup parent, Executor executor,
boolean addTaskWakesUp, int maxPendingTasks,
RejectedExecutionHandler rejectedHandler) {
this(parent, executor, addTaskWakesUp, false, maxPendingTasks, rejectedHandler);
}
/**
* Create a new instance
*
* @param parent the {@link EventExecutorGroup} which is the parent of this instance and belongs to it
* @param executor the {@link Executor} which will be used for executing
* @param addTaskWakesUp {@code true} if and only if invocation of {@link #addTask(Runnable)} will wake up the
* executor thread
* @param supportSuspension {@code true} if suspension of this {@link SingleThreadEventExecutor} is supported.
* @param maxPendingTasks the maximum number of pending tasks before new tasks will be rejected.
* @param rejectedHandler the {@link RejectedExecutionHandler} to use.
*/
protected SingleThreadEventExecutor(EventExecutorGroup parent, Executor executor,
boolean addTaskWakesUp, boolean supportSuspension,
int maxPendingTasks, RejectedExecutionHandler rejectedHandler) {
super(parent);
this.addTaskWakesUp = addTaskWakesUp;
this.supportSuspension = supportSuspension;
this.maxPendingTasks = Math.max(16, maxPendingTasks);
this.executor = ThreadExecutorMap.apply(executor, this);
taskQueue = newTaskQueue(this.maxPendingTasks);
rejectedExecutionHandler = ObjectUtil.checkNotNull(rejectedHandler, "rejectedHandler");
lastActivityTimeNanos = ticker().nanoTime();
}
protected SingleThreadEventExecutor(EventExecutorGroup parent, Executor executor,
boolean addTaskWakesUp, Queue<Runnable> taskQueue,
RejectedExecutionHandler rejectedHandler) {
this(parent, executor, addTaskWakesUp, false, taskQueue, rejectedHandler);
}
protected SingleThreadEventExecutor(EventExecutorGroup parent, Executor executor,
boolean addTaskWakesUp, boolean supportSuspension,
Queue<Runnable> taskQueue, RejectedExecutionHandler rejectedHandler) {
super(parent);
this.addTaskWakesUp = addTaskWakesUp;
this.supportSuspension = supportSuspension;
this.maxPendingTasks = DEFAULT_MAX_PENDING_EXECUTOR_TASKS;
this.executor = ThreadExecutorMap.apply(executor, this);
this.taskQueue = ObjectUtil.checkNotNull(taskQueue, "taskQueue");
this.rejectedExecutionHandler = ObjectUtil.checkNotNull(rejectedHandler, "rejectedHandler");
}
/**
* @deprecated Please use and override {@link #newTaskQueue(int)}.
*/
@Deprecated
protected Queue<Runnable> newTaskQueue() {
return newTaskQueue(maxPendingTasks);
}
/**
* Create a new {@link Queue} which will holds the tasks to execute. This default implementation will return a
* {@link LinkedBlockingQueue} but if your sub-
|
SingleThreadEventExecutor
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-common/spi-deployment/src/main/java/io/quarkus/resteasy/reactive/spi/CheckBean.java
|
{
"start": 50,
"end": 107
}
|
interface ____ {
boolean isRegisterAsBean();
}
|
CheckBean
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/naturalid/NaturalIdOnSingleManyToOneTest.java
|
{
"start": 3748,
"end": 4121
}
|
class ____ {
@Id
@GeneratedValue
private Integer id;
private String name;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
@Entity(name = "NaturalIdOnManyToOne")
@NaturalIdCache
public static
|
State
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
|
{
"start": 64251,
"end": 69123
}
|
class ____ be invoked in order to send a notification after the job
* has completed (success/failure).
*
* A notification url still has to be set which will be passed to
* {@link org.apache.hadoop.mapreduce.CustomJobEndNotifier#notifyOnce(
* java.net.URL, org.apache.hadoop.conf.Configuration)}
* along with the Job's conf.
*
* If this is set instead of using a simple HttpURLConnection
* we'll create a new instance of this class
* which should be an implementation of
* {@link org.apache.hadoop.mapreduce.CustomJobEndNotifier},
* and we'll invoke that.
*
* @param customNotifierClassName the fully-qualified name of the class
* which implements
* {@link org.apache.hadoop.mapreduce.CustomJobEndNotifier}
*
* @see JobConf#setJobEndNotificationURI(java.lang.String)
* @see
* org.apache.hadoop.mapreduce.MRJobConfig#MR_JOB_END_NOTIFICATION_CUSTOM_NOTIFIER_CLASS
*/
public void setJobEndNotificationCustomNotifierClass(
String customNotifierClassName) {
set(JobContext.MR_JOB_END_NOTIFICATION_CUSTOM_NOTIFIER_CLASS,
customNotifierClassName);
}
/**
* Get job-specific shared directory for use as scratch space
*
* <p>
* When a job starts, a shared directory is created at location
* <code>
* ${mapreduce.cluster.local.dir}/taskTracker/$user/jobcache/$jobid/work/ </code>.
* This directory is exposed to the users through
* <code>mapreduce.job.local.dir </code>.
* So, the tasks can use this space
* as scratch space and share files among them. </p>
* This value is available as System property also.
*
* @return The localized job specific shared directory
*/
public String getJobLocalDir() {
return get(JobContext.JOB_LOCAL_DIR);
}
/**
* Get memory required to run a map task of the job, in MB.
*
* If a value is specified in the configuration, it is returned.
* Else, it returns {@link JobContext#DEFAULT_MAP_MEMORY_MB}.
* <p>
* For backward compatibility, if the job configuration sets the
* key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different
* from {@link #DISABLED_MEMORY_LIMIT}, that value will be used
* after converting it from bytes to MB.
* @return memory required to run a map task of the job, in MB,
*/
public long getMemoryForMapTask() {
long value = getDeprecatedMemoryValue();
if (value < 0) {
return getMemoryRequired(TaskType.MAP);
}
return value;
}
public void setMemoryForMapTask(long mem) {
setLong(JobConf.MAPREDUCE_JOB_MAP_MEMORY_MB_PROPERTY, mem);
// In case that M/R 1.x applications use the old property name
setLong(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY, mem);
}
/**
* Get memory required to run a reduce task of the job, in MB.
*
* If a value is specified in the configuration, it is returned.
* Else, it returns {@link JobContext#DEFAULT_REDUCE_MEMORY_MB}.
* <p>
* For backward compatibility, if the job configuration sets the
* key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different
* from {@link #DISABLED_MEMORY_LIMIT}, that value will be used
* after converting it from bytes to MB.
* @return memory required to run a reduce task of the job, in MB.
*/
public long getMemoryForReduceTask() {
long value = getDeprecatedMemoryValue();
if (value < 0) {
return getMemoryRequired(TaskType.REDUCE);
}
return value;
}
// Return the value set to the key MAPRED_TASK_MAXVMEM_PROPERTY,
// converted into MBs.
// Returns DISABLED_MEMORY_LIMIT if unset, or set to a negative
// value.
private long getDeprecatedMemoryValue() {
long oldValue = getLong(MAPRED_TASK_MAXVMEM_PROPERTY,
DISABLED_MEMORY_LIMIT);
if (oldValue > 0) {
oldValue /= (1024*1024);
}
return oldValue;
}
public void setMemoryForReduceTask(long mem) {
setLong(JobConf.MAPREDUCE_JOB_REDUCE_MEMORY_MB_PROPERTY, mem);
// In case that M/R 1.x applications use the old property name
setLong(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY, mem);
}
/**
* Return the name of the queue to which this job is submitted.
* Defaults to 'default'.
*
* @return name of the queue
*/
public String getQueueName() {
return get(JobContext.QUEUE_NAME, DEFAULT_QUEUE_NAME);
}
/**
* Set the name of the queue to which this job should be submitted.
*
* @param queueName Name of the queue
*/
public void setQueueName(String queueName) {
set(JobContext.QUEUE_NAME, queueName);
}
/**
* Normalize the negative values in configuration
*
* @param val
* @return normalized value
*/
public static long normalizeMemoryConfigValue(long val) {
if (val < 0) {
val = DISABLED_MEMORY_LIMIT;
}
return val;
}
/**
* Find a jar that contains a
|
to
|
java
|
quarkusio__quarkus
|
extensions/resteasy-classic/resteasy-multipart/deployment/src/test/java/io/quarkus/resteasy/multipart/parttype/PartTypeDto.java
|
{
"start": 218,
"end": 545
}
|
class ____ {
@FormParam("myMapping")
@PartType(MediaType.APPLICATION_JSON)
public Map<String, String> myMapping;
@FormParam("flag")
@PartType(MediaType.TEXT_PLAIN)
public boolean flag;
@FormParam("reproduceEnum")
@PartType(MediaType.TEXT_PLAIN)
public PartTypeEnum partTypeEnum;
}
|
PartTypeDto
|
java
|
spring-projects__spring-boot
|
core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/condition/ConditionalOnMissingBeanTests.java
|
{
"start": 32924,
"end": 33255
}
|
class ____ {
@Bean
@ConditionalOnMissingBean(parameterizedContainer = TestParameterizedContainer.class)
TestParameterizedContainer<GenericExampleBean<String>> parameterizedContainerGenericExampleBean() {
return new TestParameterizedContainer<>();
}
}
static
|
TypeArgumentsConditionWithParameterizedContainerConfiguration
|
java
|
quarkusio__quarkus
|
extensions/hibernate-reactive/deployment/src/test/java/io/quarkus/hibernate/reactive/dev/Fruit.java
|
{
"start": 439,
"end": 1173
}
|
class ____ {
@Id
@SequenceGenerator(name = "fruitsSequence", sequenceName = "known_fruits_id_seq", allocationSize = 1, initialValue = 10)
@GeneratedValue(generator = "fruitsSequence")
private Integer id;
@Column(length = 40, unique = true)
private String name;
public Fruit() {
}
public Fruit(String name) {
this.name = name;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public String toString() {
return "Fruit{" + id + "," + name + '}';
}
}
|
Fruit
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/time/FastDatePrinter.java
|
{
"start": 11997,
"end": 13083
}
|
class ____ implements Rule {
private final int field;
private final String[] values;
/**
* Constructs an instance of {@link TextField}
* with the specified field and values.
*
* @param field the field.
* @param values the field values.
*/
TextField(final int field, final String[] values) {
this.field = field;
this.values = values;
}
/**
* {@inheritDoc}
*/
@Override
public void appendTo(final Appendable buffer, final Calendar calendar) throws IOException {
buffer.append(values[calendar.get(field)]);
}
/**
* {@inheritDoc}
*/
@Override
public int estimateLength() {
int max = 0;
for (int i = values.length; --i >= 0;) {
final int len = values[i].length();
if (len > max) {
max = len;
}
}
return max;
}
}
/**
* Inner
|
TextField
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/SerializableTokenListCategory.java
|
{
"start": 4840,
"end": 5648
}
|
class ____ the way
// it modifies them when new strings are added.
//
// However, over the years what actually got presented to users got changed. Instead of
// presenting the results of the reverse search a set of terms was presented that consisted of
// all base terms that are present in the common unique tokens, in the order they occur in the
// base tokens (potentially including duplicates). In the C++ code the length is capped at
// 10000 UTF-8 bytes (including separating spaces) and where the initially chosen tokens would
// exceed that length the rarest tokens (across all categories) are picked until the budget
// is reached. We cannot easily do this in the Java code, as the {@link CategorizationBytesRefHash}
//
|
and
|
java
|
micronaut-projects__micronaut-core
|
inject-java/src/test/groovy/io/micronaut/inject/provider/bug/Injectable.java
|
{
"start": 50,
"end": 227
}
|
class ____ {
private final int instance;
public Injectable(int instance) {
this.instance = instance;
}
public int getInstance() {
return instance;
}
}
|
Injectable
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/context/annotation/ConfigurationClassPostProcessor.java
|
{
"start": 25834,
"end": 25982
}
|
class
____.setAttribute(AutoProxyUtils.PRESERVE_TARGET_CLASS_ATTRIBUTE, Boolean.TRUE);
// Set enhanced subclass of the user-specified bean
|
beanDef
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/float_/FloatAssert_isGreaterThan_float_Test.java
|
{
"start": 890,
"end": 1216
}
|
class ____ extends FloatAssertBaseTest {
@Override
protected FloatAssert invoke_api_method() {
return assertions.isGreaterThan(6);
}
@Override
protected void verify_internal_effects() {
verify(floats).assertGreaterThan(getInfo(assertions), getActual(assertions), 6f);
}
}
|
FloatAssert_isGreaterThan_float_Test
|
java
|
apache__rocketmq
|
tools/src/main/java/org/apache/rocketmq/tools/command/consumer/GetConsumerConfigSubCommand.java
|
{
"start": 1620,
"end": 5092
}
|
class ____ implements SubCommand {
@Override
public String commandName() {
return "getConsumerConfig";
}
@Override
public String commandDesc() {
return "Get consumer config by subscription group name.";
}
@Override
public Options buildCommandlineOptions(final Options options) {
Option opt = new Option("g", "groupName", true, "subscription group name");
opt.setRequired(true);
options.addOption(opt);
return options;
}
@Override
public void execute(CommandLine commandLine, Options options,
RPCHook rpcHook) throws SubCommandException {
DefaultMQAdminExt adminExt = new DefaultMQAdminExt(rpcHook);
adminExt.setInstanceName(Long.toString(System.currentTimeMillis()));
String groupName = commandLine.getOptionValue('g').trim();
if (commandLine.hasOption('n')) {
adminExt.setNamesrvAddr(commandLine.getOptionValue('n').trim());
}
try {
adminExt.start();
List<ConsumerConfigInfo> consumerConfigInfoList = new ArrayList<>();
ClusterInfo clusterInfo = adminExt.examineBrokerClusterInfo();
Map<String, Set<String>> clusterAddrTable = clusterInfo.getClusterAddrTable();
for (Entry<String, BrokerData> brokerEntry : clusterInfo.getBrokerAddrTable().entrySet()) {
String clusterName = this.getClusterName(brokerEntry.getKey(), clusterAddrTable);
String brokerAddress = brokerEntry.getValue().selectBrokerAddr();
SubscriptionGroupConfig subscriptionGroupConfig = adminExt.examineSubscriptionGroupConfig(brokerAddress, groupName);
if (subscriptionGroupConfig == null) {
continue;
}
consumerConfigInfoList.add(new ConsumerConfigInfo(clusterName, brokerEntry.getKey(), subscriptionGroupConfig));
}
if (CollectionUtils.isEmpty(consumerConfigInfoList)) {
return;
}
for (ConsumerConfigInfo info : consumerConfigInfoList) {
System.out.printf("=============================%s:%s=============================\n",
info.getClusterName(), info.getBrokerName());
SubscriptionGroupConfig config = info.getSubscriptionGroupConfig();
Field[] fields = config.getClass().getDeclaredFields();
for (Field field : fields) {
field.setAccessible(true);
if (field.get(config) != null) {
System.out.printf("%s%-40s= %s\n", "", field.getName(), field.get(config).toString());
} else {
System.out.printf("%s%-40s= %s\n", "", field.getName(), "");
}
}
}
} catch (Exception e) {
throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e);
} finally {
adminExt.shutdown();
}
}
private String getClusterName(String brokeName, Map<String, Set<String>> clusterAddrTable) {
for (Map.Entry<String, Set<String>> entry : clusterAddrTable.entrySet()) {
Set<String> brokerNameSet = entry.getValue();
if (brokerNameSet.contains(brokeName)) {
return entry.getKey();
}
}
return null;
}
}
|
GetConsumerConfigSubCommand
|
java
|
apache__flink
|
flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/streaming/util/FiniteTestSource.java
|
{
"start": 1612,
"end": 4507
}
|
class ____<T> implements SourceFunction<T>, CheckpointListener {
private static final long serialVersionUID = 1L;
@SuppressWarnings("NonSerializableFieldInSerializableClass")
private final Iterable<T> elements;
private volatile boolean running = true;
private transient int numCheckpointsComplete;
@Nullable private final BooleanSupplier couldExit;
private final long waitTimeOut;
@SafeVarargs
public FiniteTestSource(T... elements) {
this(null, 0, Arrays.asList(elements));
}
public FiniteTestSource(Iterable<T> elements) {
this(null, 0, elements);
}
public FiniteTestSource(
@Nullable BooleanSupplier couldExit, long waitTimeOut, Iterable<T> elements) {
checkState(waitTimeOut >= 0);
this.couldExit = couldExit;
this.waitTimeOut = waitTimeOut;
this.elements = elements;
}
public FiniteTestSource(@Nullable BooleanSupplier couldExit, Iterable<T> elements) {
this.couldExit = couldExit;
this.waitTimeOut = 30_000;
this.elements = elements;
}
@Override
public void run(SourceContext<T> ctx) throws Exception {
// first round of sending the elements and waiting for the checkpoints
emitElementsAndWaitForCheckpoints(ctx, 2);
// second round of the same
emitElementsAndWaitForCheckpoints(ctx, 2);
// verify the source could exit or not
if (couldExit != null) {
final long beginTime = System.currentTimeMillis();
synchronized (ctx.getCheckpointLock()) {
while (running && !couldExit.getAsBoolean()) {
ctx.getCheckpointLock().wait(10);
if ((System.currentTimeMillis() - beginTime) > waitTimeOut) {
throw new TimeoutException(
"Wait source exit time out " + waitTimeOut + "ms.");
}
}
}
}
}
private void emitElementsAndWaitForCheckpoints(SourceContext<T> ctx, int checkpointsToWaitFor)
throws InterruptedException {
final Object lock = ctx.getCheckpointLock();
final int checkpointToAwait;
synchronized (lock) {
checkpointToAwait = numCheckpointsComplete + checkpointsToWaitFor;
for (T t : elements) {
ctx.collect(t);
}
}
synchronized (lock) {
while (running && numCheckpointsComplete < checkpointToAwait) {
lock.wait(1);
}
}
}
@Override
public void cancel() {
running = false;
}
@Override
public void notifyCheckpointComplete(long checkpointId) throws Exception {
numCheckpointsComplete++;
}
@Override
public void notifyCheckpointAborted(long checkpointId) {}
}
|
FiniteTestSource
|
java
|
quarkusio__quarkus
|
extensions/smallrye-jwt/deployment/src/test/java/io/quarkus/jwt/test/DefaultGroupsUnitTest.java
|
{
"start": 294,
"end": 2142
}
|
class ____ {
private static Class<?>[] testClasses = {
DefaultGroupsEndpoint.class,
CustomSecurityIdentityAugmentor.class,
TokenUtils.class
};
/**
* The test generated JWT token string
*/
private String token;
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(testClasses)
.addAsResource("publicKey.pem")
.addAsResource("privateKey.pem")
.addAsResource("TokenNoGroups.json")
.addAsResource("applicationDefaultGroups.properties", "application.properties"));
@BeforeEach
public void generateToken() throws Exception {
token = TokenUtils.generateTokenString("/TokenNoGroups.json");
}
/**
* Validate a request with MP-JWT without a 'groups' claim is successful
* due to the default value being provided in the configuration
*
*/
@Test
public void echoGroups() {
RestAssured.given().auth()
.oauth2(token)
.get("/endp/echo")
.then().assertThat().statusCode(200)
.body(equalTo("User"));
}
@Test
public void checkRoutingContext() {
RestAssured.given().auth()
.oauth2(token)
.get("/endp/routingContext")
.then().assertThat().statusCode(200)
.body(equalTo("User; routing-context-available:true"));
}
@Test
public void echoGroupsWithParser() {
RestAssured.given().auth()
.oauth2(token)
.get("/endp/echo-parser")
.then().assertThat().statusCode(200)
.body(equalTo("parser:User"));
}
}
|
DefaultGroupsUnitTest
|
java
|
spring-projects__spring-boot
|
module/spring-boot-jackson/src/main/java/org/springframework/boot/jackson/JacksonMixinModuleEntriesBeanRegistrationAotProcessor.java
|
{
"start": 2330,
"end": 4795
}
|
class ____ extends BeanRegistrationCodeFragmentsDecorator {
private static final Class<?> BEAN_TYPE = JacksonMixinModuleEntries.class;
private final RegisteredBean registeredBean;
private final @Nullable ClassLoader classLoader;
AotContribution(BeanRegistrationCodeFragments delegate, RegisteredBean registeredBean) {
super(delegate);
this.registeredBean = registeredBean;
this.classLoader = registeredBean.getBeanFactory().getBeanClassLoader();
}
@Override
public ClassName getTarget(RegisteredBean registeredBean) {
return ClassName.get(BEAN_TYPE);
}
@Override
public CodeBlock generateInstanceSupplierCode(GenerationContext generationContext,
BeanRegistrationCode beanRegistrationCode, boolean allowDirectSupplierShortcut) {
JacksonMixinModuleEntries entries = this.registeredBean.getBeanFactory()
.getBean(this.registeredBean.getBeanName(), JacksonMixinModuleEntries.class);
contributeHints(generationContext.getRuntimeHints(), entries);
GeneratedMethod generatedMethod = beanRegistrationCode.getMethods().add("getInstance", (method) -> {
method.addJavadoc("Get the bean instance for '$L'.", this.registeredBean.getBeanName());
method.addModifiers(Modifier.PRIVATE, Modifier.STATIC);
method.returns(BEAN_TYPE);
CodeBlock.Builder code = CodeBlock.builder();
code.add("return $T.create(", JacksonMixinModuleEntries.class).beginControlFlow("(mixins) ->");
entries.doWithEntry(this.classLoader, (type, mixin) -> addEntryCode(code, type, mixin));
code.endControlFlow(")");
method.addCode(code.build());
});
return generatedMethod.toMethodReference().toCodeBlock();
}
private void addEntryCode(CodeBlock.Builder code, Class<?> type, Class<?> mixin) {
AccessControl accessForTypes = AccessControl.lowest(AccessControl.forClass(type),
AccessControl.forClass(mixin));
if (accessForTypes.isPublic()) {
code.addStatement("$L.and($T.class, $T.class)", "mixins", type, mixin);
}
else {
code.addStatement("$L.and($S, $S)", "mixins", type.getName(), mixin.getName());
}
}
private void contributeHints(RuntimeHints runtimeHints, JacksonMixinModuleEntries entries) {
Set<Class<?>> mixins = new LinkedHashSet<>();
entries.doWithEntry(this.classLoader, (type, mixin) -> mixins.add(mixin));
new BindingReflectionHintsRegistrar().registerReflectionHints(runtimeHints.reflection(),
mixins.toArray(Class<?>[]::new));
}
}
}
|
AotContribution
|
java
|
apache__camel
|
components/camel-aws/camel-aws2-s3/src/main/java/org/apache/camel/component/aws2/s3/utils/AWS2S3Utils.java
|
{
"start": 1529,
"end": 6873
}
|
class ____ {
private AWS2S3Utils() {
}
/**
* Reads the bucket name from the header of the given exchange. If not provided, it's read from the endpoint
* configuration.
*
* @param exchange The exchange to read the header from
* @param configuration The AWS2 S3 configuration
* @return The bucket name.
* @throws IllegalArgumentException if the header could not be determined.
*/
public static String determineBucketName(final Exchange exchange, AWS2S3Configuration configuration) {
String bucketName = exchange.getIn().getHeader(AWS2S3Constants.OVERRIDE_BUCKET_NAME, String.class);
if (ObjectHelper.isEmpty(bucketName)) {
bucketName = configuration.getBucketName();
}
if (bucketName == null) {
throw new IllegalArgumentException("AWS S3 Bucket name header is missing or not configured.");
}
// dynamic keys using built-in simple language
if (hasSimpleFunction(bucketName)) {
Language simple = exchange.getContext().resolveLanguage("simple");
bucketName = simple.createExpression(bucketName).evaluate(exchange, String.class);
}
return bucketName;
}
public static String determineStorageClass(final Exchange exchange, AWS2S3Configuration configuration) {
String storageClass = exchange.getIn().getHeader(AWS2S3Constants.STORAGE_CLASS, String.class);
if (storageClass == null) {
storageClass = configuration.getStorageClass();
}
return storageClass;
}
public static String determineFileExtension(String keyName) {
int extPosition = keyName.lastIndexOf(".");
if (extPosition == -1) {
return "";
} else {
return keyName.substring(extPosition);
}
}
public static String determineFileName(String keyName) {
int extPosition = keyName.lastIndexOf(".");
if (extPosition == -1) {
return keyName;
} else {
return keyName.substring(0, extPosition);
}
}
public static long determineLengthInputStream(InputStream is) throws IOException {
if (is instanceof StreamCache) {
long len = ((StreamCache) is).length();
if (len > 0) {
return len;
}
} else if (is instanceof FileInputStream fis) {
return fis.getChannel().size();
}
if (!is.markSupported()) {
return -1;
}
if (is instanceof ByteArrayInputStream) {
return is.available();
}
long size = 0;
try {
is.mark(1024);
int i = is.available();
while (i > 0) {
long skip = is.skip(i);
size += skip;
i = is.available();
}
} finally {
is.reset();
}
return size;
}
public static byte[] toByteArray(InputStream is, final int size) throws IOException {
try (ByteArrayOutputStream output = new ByteArrayOutputStream()) {
byte[] data = new byte[4096];
int total = 0;
int n = 0;
while (total < size && (n = is.read(data)) != -1) {
output.write(data, 0, n);
total += n;
}
return output.toByteArray();
}
}
public static String determineKey(final Exchange exchange, AWS2S3Configuration configuration) {
String key = exchange.getIn().getHeader(AWS2S3Constants.KEY, String.class);
if (ObjectHelper.isEmpty(key)) {
key = configuration.getKeyName();
}
if (key == null) {
throw new IllegalArgumentException("AWS S3 Key header missing.");
}
// dynamic keys using built-in simple language
if (hasSimpleFunction(key)) {
Language simple = exchange.getContext().resolveLanguage("simple");
key = simple.createExpression(key).evaluate(exchange, String.class);
}
return key;
}
public static void setEncryption(
CreateMultipartUploadRequest.Builder createMultipartUploadRequest, AWS2S3Configuration configuration) {
if (configuration.isUseAwsKMS()) {
createMultipartUploadRequest.ssekmsKeyId(configuration.getAwsKMSKeyId());
createMultipartUploadRequest.serverSideEncryption(ServerSideEncryption.AWS_KMS);
}
if (configuration.isUseSSES3()) {
createMultipartUploadRequest.serverSideEncryption(ServerSideEncryption.AES256);
}
if (configuration.isUseCustomerKey()) {
if (ObjectHelper.isNotEmpty(configuration.getCustomerKeyId())) {
createMultipartUploadRequest.sseCustomerKey(configuration.getCustomerKeyId());
}
if (ObjectHelper.isNotEmpty(configuration.getCustomerKeyMD5())) {
createMultipartUploadRequest.sseCustomerKeyMD5(configuration.getCustomerKeyMD5());
}
if (ObjectHelper.isNotEmpty(configuration.getCustomerAlgorithm())) {
createMultipartUploadRequest.sseCustomerAlgorithm(configuration.getCustomerAlgorithm());
}
}
}
}
|
AWS2S3Utils
|
java
|
google__guava
|
android/guava/src/com/google/common/collect/LinkedListMultimap.java
|
{
"start": 4408,
"end": 4865
}
|
class ____<K extends @Nullable Object, V extends @Nullable Object>
extends AbstractMultimap<K, V> implements ListMultimap<K, V>, Serializable {
/*
* Order is maintained using a linked list containing all key-value pairs. In
* addition, a series of disjoint linked lists of "siblings", each containing
* the values for a specific key, is used to implement {@link
* ValueForKeyIterator} in constant time.
*/
static final
|
LinkedListMultimap
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/cache/NoCacheOnMethodsTest.java
|
{
"start": 560,
"end": 1803
}
|
class ____ {
@RegisterExtension
static QuarkusUnitTest test = new QuarkusUnitTest()
.setArchiveProducer(new Supplier<>() {
@Override
public JavaArchive get() {
return ShrinkWrap.create(JavaArchive.class)
.addClasses(ResourceWithNoCache.class);
}
});
@Test
public void testWithFields() {
RestAssured.get("/test/withFields")
.then()
.statusCode(200)
.body(equalTo("withFields"))
.header("Cache-Control", "no-cache=\"f1\", no-cache=\"f2\"");
}
@Test
public void testWithoutFields() {
RestAssured.get("/test/withoutFields")
.then()
.statusCode(200)
.body(equalTo("withoutFields"))
.header("Cache-Control", "no-cache");
}
@Test
public void testWithoutAnnotation() {
RestAssured.get("/test/withoutAnnotation")
.then()
.statusCode(200)
.body(equalTo("withoutAnnotation"))
.header("Cache-Control", nullValue());
}
@Path("test")
public static
|
NoCacheOnMethodsTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessor.java
|
{
"start": 775,
"end": 2703
}
|
class ____ extends BinaryProcessor {
public static final String NAME = "geo_distance";
public StDistanceProcessor(Processor source1, Processor source2) {
super(source1, source2);
}
public StDistanceProcessor(StreamInput in) throws IOException {
super(in);
}
@Override
protected void doWrite(StreamOutput out) throws IOException {
}
@Override
public Object process(Object input) {
Object l = left().process(input);
checkParameter(l);
Object r = right().process(input);
checkParameter(r);
return doProcess(l, r);
}
@Override
protected Object doProcess(Object left, Object right) {
return process(left, right);
}
public static Double process(Object source1, Object source2) {
if (source1 == null || source2 == null) {
return null;
}
if (source1 instanceof GeoShape == false) {
throw new SqlIllegalArgumentException("A geo_point or geo_shape with type point is required; received [{}]", source1);
}
if (source2 instanceof GeoShape == false) {
throw new SqlIllegalArgumentException("A geo_point or geo_shape with type point is required; received [{}]", source2);
}
return GeoShape.distance((GeoShape) source1, (GeoShape) source2);
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
StDistanceProcessor other = (StDistanceProcessor) obj;
return Objects.equals(left(), other.left()) && Objects.equals(right(), other.right());
}
@Override
public int hashCode() {
return Objects.hash(left(), right());
}
}
|
StDistanceProcessor
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/backquotes/Printer.java
|
{
"start": 291,
"end": 422
}
|
class ____ {
private Long id;
@Id
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
}
|
Printer
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/sql/results/internal/RowTransformerArrayImpl.java
|
{
"start": 317,
"end": 656
}
|
class ____ implements RowTransformer<Object[]> {
/**
* Singleton access
*/
private static final RowTransformerArrayImpl INSTANCE = new RowTransformerArrayImpl();
public static RowTransformerArrayImpl instance() {
return INSTANCE;
}
@Override
public Object[] transformRow(Object[] row) {
return row;
}
}
|
RowTransformerArrayImpl
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestResourceHandlerModule.java
|
{
"start": 1563,
"end": 4421
}
|
class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(TestResourceHandlerModule.class);
private Configuration emptyConf;
private Configuration networkEnabledConf;
@BeforeEach
public void setup() throws Exception {
emptyConf = new YarnConfiguration();
networkEnabledConf = new YarnConfiguration();
networkEnabledConf.setBoolean(YarnConfiguration.NM_NETWORK_RESOURCE_ENABLED,
true);
ResourceHandlerModule.nullifyResourceHandlerChain();
}
@Test
public void testOutboundBandwidthHandler() {
try {
//This resourceHandler should be non-null only if network as a resource
//is explicitly enabled
OutboundBandwidthResourceHandler resourceHandler = ResourceHandlerModule
.initOutboundBandwidthResourceHandler(emptyConf);
assertNull(resourceHandler);
//When network as a resource is enabled this should be non-null
resourceHandler = ResourceHandlerModule
.initOutboundBandwidthResourceHandler(networkEnabledConf);
assertNotNull(resourceHandler);
//Ensure that outbound bandwidth resource handler is present in the chain
ResourceHandlerChain resourceHandlerChain = ResourceHandlerModule
.getConfiguredResourceHandlerChain(networkEnabledConf,
mock(Context.class));
if (resourceHandlerChain != null) {
List<ResourceHandler> resourceHandlers = resourceHandlerChain
.getResourceHandlerList();
//Exactly one resource handler in chain
assertThat(resourceHandlers).hasSize(1);
//Same instance is expected to be in the chain.
assertTrue(resourceHandlers.get(0) == resourceHandler);
} else {
fail("Null returned");
}
} catch (ResourceHandlerException e) {
fail("Unexpected ResourceHandlerException: " + e);
}
}
@Test
public void testDiskResourceHandler() throws Exception {
DiskResourceHandler handler =
ResourceHandlerModule.initDiskResourceHandler(emptyConf);
assertNull(handler);
Configuration diskConf = new YarnConfiguration();
diskConf.setBoolean(YarnConfiguration.NM_DISK_RESOURCE_ENABLED, true);
handler = ResourceHandlerModule.initDiskResourceHandler(diskConf);
assertNotNull(handler);
ResourceHandlerChain resourceHandlerChain =
ResourceHandlerModule.getConfiguredResourceHandlerChain(diskConf,
mock(Context.class));
if (resourceHandlerChain != null) {
List<ResourceHandler> resourceHandlers =
resourceHandlerChain.getResourceHandlerList();
// Exactly one resource handler in chain
assertThat(resourceHandlers).hasSize(1);
// Same instance is expected to be in the chain.
assertTrue(resourceHandlers.get(0) == handler);
} else {
fail("Null returned");
}
}
}
|
TestResourceHandlerModule
|
java
|
spring-projects__spring-framework
|
spring-messaging/src/main/java/org/springframework/messaging/handler/annotation/support/HeadersMethodArgumentResolver.java
|
{
"start": 1475,
"end": 3140
}
|
class ____ implements HandlerMethodArgumentResolver {
@Override
public boolean supportsParameter(MethodParameter parameter) {
Class<?> paramType = parameter.getParameterType();
return ((parameter.hasParameterAnnotation(Headers.class) && Map.class.isAssignableFrom(paramType)) ||
MessageHeaders.class == paramType || MessageHeaderAccessor.class.isAssignableFrom(paramType));
}
@Override
public @Nullable Object resolveArgument(MethodParameter parameter, Message<?> message) throws Exception {
Class<?> paramType = parameter.getParameterType();
if (Map.class.isAssignableFrom(paramType)) {
return message.getHeaders();
}
else if (MessageHeaderAccessor.class == paramType) {
MessageHeaderAccessor accessor = MessageHeaderAccessor.getAccessor(message, MessageHeaderAccessor.class);
return accessor != null ? accessor : new MessageHeaderAccessor(message);
}
else if (MessageHeaderAccessor.class.isAssignableFrom(paramType)) {
MessageHeaderAccessor accessor = MessageHeaderAccessor.getAccessor(message, MessageHeaderAccessor.class);
if (accessor != null && paramType.isAssignableFrom(accessor.getClass())) {
return accessor;
}
else {
Method method = ReflectionUtils.findMethod(paramType, "wrap", Message.class);
if (method == null) {
throw new IllegalStateException(
"Cannot create accessor of type " + paramType + " for message " + message);
}
return ReflectionUtils.invokeMethod(method, null, message);
}
}
else {
throw new IllegalStateException("Unexpected parameter of type " + paramType +
" in method " + parameter.getMethod() + ". ");
}
}
}
|
HeadersMethodArgumentResolver
|
java
|
micronaut-projects__micronaut-core
|
inject/src/main/java/io/micronaut/context/env/PropertySourceLocator.java
|
{
"start": 811,
"end": 1082
}
|
interface ____ {
/**
* Locate a {@link PropertySource} for the given environment.
*
* @param environment The environment
* @return The located property source
*/
Optional<PropertySource> load(Environment environment);
}
|
PropertySourceLocator
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/map/MapAssert_isNullOrEmpty_Test.java
|
{
"start": 939,
"end": 1389
}
|
class ____ extends MapAssertBaseTest {
@Override
protected MapAssert<Object, Object> invoke_api_method() {
assertions.isNullOrEmpty();
return null;
}
@Override
protected void verify_internal_effects() {
verify(maps).assertNullOrEmpty(getInfo(assertions), getActual(assertions));
}
@Override
@Test
public void should_return_this() {
// Disable this test because isNullOrEmpty is void
}
}
|
MapAssert_isNullOrEmpty_Test
|
java
|
spring-projects__spring-boot
|
module/spring-boot-jdbc/src/main/java/org/springframework/boot/jdbc/metadata/DataSourcePoolMetadataProvider.java
|
{
"start": 905,
"end": 1296
}
|
interface ____ {
/**
* Return the {@link DataSourcePoolMetadata} instance able to manage the specified
* {@link DataSource} or {@code null} if the given data source could not be handled.
* @param dataSource the data source
* @return the data source pool metadata
*/
@Nullable DataSourcePoolMetadata getDataSourcePoolMetadata(DataSource dataSource);
}
|
DataSourcePoolMetadataProvider
|
java
|
google__guice
|
core/src/com/google/inject/internal/RealMapBinder.java
|
{
"start": 43084,
"end": 44916
}
|
class ____<K, V> implements Module {
private final BindingSelection<K, V> bindingSelection;
private MultimapBinder(BindingSelection<K, V> bindingSelection) {
this.bindingSelection = bindingSelection;
}
@SuppressWarnings({"unchecked", "rawtypes"}) // we use raw Key to link bindings together.
@Override
public void configure(Binder binder) {
// Binds a Map<K, Set<Provider<V>>>
binder
.bind(bindingSelection.getProviderSetMultimapKey())
.toProvider(new RealProviderMultimapProvider<K, V>(bindingSelection.getMapKey()));
// Provide links from a few different public keys to the providerMultimapKey.
// The collection this exposes is internally an ImmutableMap, so it's OK to massage
// the guice Provider to jakarta Provider in the value (since the guice Provider implements
// jakarta Provider).
binder
.bind(bindingSelection.getJakartaProviderSetMultimapKey())
.to((Key) bindingSelection.getProviderSetMultimapKey());
binder
.bind(bindingSelection.getProviderCollectionMultimapKey())
.to((Key) bindingSelection.getProviderSetMultimapKey());
binder
.bind(bindingSelection.getJakartaProviderCollectionMultimapKey())
.to((Key) bindingSelection.getProviderSetMultimapKey());
// Binds a Map<K, Set<V>>
binder
.bind(bindingSelection.getMultimapKey())
.toProvider(new RealMultimapProvider(bindingSelection.getMapKey()));
}
@Override
public int hashCode() {
return bindingSelection.hashCode();
}
@Override
public boolean equals(Object o) {
return o instanceof MultimapBinder
&& ((MultimapBinder<?, ?>) o).bindingSelection.equals(bindingSelection);
}
private static final
|
MultimapBinder
|
java
|
quarkusio__quarkus
|
extensions/netty/runtime/src/main/java/io/quarkus/netty/runtime/graal/HttpContentCompressorSubstitutions.java
|
{
"start": 428,
"end": 1096
}
|
class ____ {
@Substitute
protected ByteBuf allocateBuffer(ChannelHandlerContext ctx, ByteBuf msg, boolean preferDirect) throws Exception {
throw new UnsupportedOperationException();
}
@Substitute
protected void encode(ChannelHandlerContext ctx, ByteBuf in, ByteBuf out) {
throw new UnsupportedOperationException();
}
@Substitute
public void flush(final ChannelHandlerContext ctx) {
throw new UnsupportedOperationException();
}
}
@Substitute
@TargetClass(className = "io.netty.handler.codec.compression.ZstdConstants", onlyWith = IsZstdAbsent.class)
final
|
Target_io_netty_handler_codec_compression_ZstdEncoder
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/convert/UpdateViaObjectReaderTest.java
|
{
"start": 2729,
"end": 3505
}
|
class ____ extends StdDeserializer<AnimalWrapper> {
public AnimalWrapperDeserializer() {
super(AnimalWrapper.class);
}
@Override
public AnimalWrapper deserialize(JsonParser json, DeserializationContext context)
{
AnimalWrapper msg = new AnimalWrapper();
msg.setAnimal(json.readValueAs(AbstractAnimal.class));
return msg;
}
@Override
public AnimalWrapper deserialize(JsonParser json, DeserializationContext context, AnimalWrapper intoValue)
{
intoValue.setAnimal(json.readValueAs(AbstractAnimal.class));
return intoValue;
}
}
@JsonDeserialize(using = Custom3814DeserializerA.class)
static
|
AnimalWrapperDeserializer
|
java
|
grpc__grpc-java
|
istio-interop-testing/src/main/java/io/grpc/testing/istio/EchoTestServer.java
|
{
"start": 9710,
"end": 11178
}
|
class ____ implements ServerInterceptor {
@Override
public <ReqT, RespT> ServerCall.Listener<ReqT> interceptCall(ServerCall<ReqT, RespT> call,
final Metadata requestHeaders, ServerCallHandler<ReqT, RespT> next) {
final String methodName = call.getMethodDescriptor().getBareMethodName();
// we need this processing only for Echo
if (!"Echo".equals(methodName)) {
return next.startCall(call, requestHeaders);
}
final SocketAddress peerAddress = call.getAttributes()
.get(Grpc.TRANSPORT_ATTR_REMOTE_ADDR);
Context ctx = Context.current();
if (peerAddress instanceof InetSocketAddress) {
InetSocketAddress inetPeerAddress = (InetSocketAddress) peerAddress;
ctx = ctx.withValue(CLIENT_ADDRESS_CONTEXT_KEY,
inetPeerAddress.getAddress().getHostAddress());
}
ctx = ctx.withValue(AUTHORITY_CONTEXT_KEY, call.getAuthority());
Map<String, String> requestHeadersCopy = new HashMap<>();
for (String key : requestHeaders.keys()) {
if (!key.endsWith("-bin")) {
requestHeadersCopy.put(key,
requestHeaders.get(Metadata.Key.of(key, Metadata.ASCII_STRING_MARSHALLER)));
}
}
ctx = ctx.withValue(REQUEST_HEADERS_CONTEXT_KEY, requestHeadersCopy);
return Contexts.interceptCall(
ctx,
call,
requestHeaders,
next);
}
}
private static
|
EchoTestServerInterceptor
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java
|
{
"start": 3769,
"end": 5335
}
|
interface ____ {
void accept(LeafBucketCollector sub, int doc, long subBucketOrdinal) throws IOException;
}
@Override
protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, LeafBucketCollector sub) throws IOException {
if (valuesSource == null) {
return LeafBucketCollector.NO_OP_COLLECTOR;
}
BucketCollector collector;
if (parent() == null) {
grow(ranges.length);
collector = this::collectExistingBucket;
} else {
collector = this::collectBucket;
}
if (valuesSource instanceof ValuesSource.Bytes.WithOrdinals) {
SortedSetDocValues values = ((ValuesSource.Bytes.WithOrdinals) valuesSource).ordinalsValues(aggCtx.getLeafReaderContext());
return new SortedSetRangeLeafCollector(values, ranges, sub) {
@Override
protected void doCollect(LeafBucketCollector sub, int doc, long bucket) throws IOException {
collector.accept(sub, doc, bucket);
}
};
} else {
SortedBinaryDocValues values = valuesSource.bytesValues(aggCtx.getLeafReaderContext());
return new SortedBinaryRangeLeafCollector(values, ranges, sub) {
@Override
protected void doCollect(LeafBucketCollector sub, int doc, long bucket) throws IOException {
collector.accept(sub, doc, bucket);
}
};
}
}
abstract static
|
BucketCollector
|
java
|
apache__camel
|
components/camel-openstack/src/main/java/org/apache/camel/component/openstack/cinder/producer/SnapshotProducer.java
|
{
"start": 1573,
"end": 5443
}
|
class ____ extends AbstractOpenstackProducer {
public SnapshotProducer(CinderEndpoint endpoint, OSClient client) {
super(endpoint, client);
}
@Override
public void process(Exchange exchange) throws Exception {
String operation = getOperation(exchange);
switch (operation) {
case OpenstackConstants.CREATE:
doCreate(exchange);
break;
case OpenstackConstants.GET:
doGet(exchange);
break;
case OpenstackConstants.GET_ALL:
doGetAll(exchange);
break;
case OpenstackConstants.UPDATE:
doUpdate(exchange);
break;
case OpenstackConstants.DELETE:
doDelete(exchange);
break;
default:
throw new IllegalArgumentException("Unsupported operation " + operation);
}
}
private void doCreate(Exchange exchange) {
final Message msg = exchange.getIn();
final VolumeSnapshot in = messageToSnapshot(msg);
final VolumeSnapshot out = os.blockStorage().snapshots().create(in);
msg.setBody(out);
}
private void doGet(Exchange exchange) {
final Message msg = exchange.getIn();
final String id
= msg.getHeader(OpenstackConstants.ID, msg.getHeader(CinderConstants.SNAPSHOT_ID, String.class), String.class);
StringHelper.notEmpty(id, "Snapshot ID");
final VolumeSnapshot out = os.blockStorage().snapshots().get(id);
msg.setBody(out);
}
private void doGetAll(Exchange exchange) {
final List<? extends VolumeSnapshot> out = os.blockStorage().snapshots().list();
exchange.getIn().setBody(out);
}
private void doUpdate(Exchange exchange) {
final Message msg = exchange.getIn();
final String id
= msg.getHeader(OpenstackConstants.ID, msg.getHeader(CinderConstants.SNAPSHOT_ID, String.class), String.class);
final VolumeSnapshot vs = messageToSnapshot(msg);
StringHelper.notEmpty(id, "Cinder Snapshot ID");
final ActionResponse out = os.blockStorage().snapshots().update(id, vs.getName(), vs.getDescription());
checkFailure(out, exchange, "Update volume snapshot " + id);
}
private void doDelete(Exchange exchange) {
final Message msg = exchange.getIn();
final String id
= msg.getHeader(OpenstackConstants.ID, msg.getHeader(CinderConstants.SNAPSHOT_ID, String.class), String.class);
StringHelper.notEmpty(id, "Cinder Snapshot ID");
final ActionResponse out = os.blockStorage().snapshots().delete(id);
checkFailure(out, exchange, "Delete snapshot " + id);
}
private VolumeSnapshot messageToSnapshot(Message message) {
VolumeSnapshot volume = message.getBody(VolumeSnapshot.class);
if (volume == null) {
Map headers = message.getHeaders();
VolumeSnapshotBuilder builder = Builders.volumeSnapshot();
final String name = message.getHeader(OpenstackConstants.NAME, String.class);
StringHelper.notEmpty(name, "Name");
builder.name(name);
if (headers.containsKey(OpenstackConstants.DESCRIPTION)) {
builder.description(message.getHeader(OpenstackConstants.DESCRIPTION, String.class));
}
if (headers.containsKey(CinderConstants.VOLUME_ID)) {
builder.volume(message.getHeader(CinderConstants.VOLUME_ID, String.class));
}
if (headers.containsKey(CinderConstants.FORCE)) {
builder.force(message.getHeader(CinderConstants.FORCE, Boolean.class));
}
volume = builder.build();
}
return volume;
}
}
|
SnapshotProducer
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/tool/schema/spi/SchemaManagementToolCoordinator.java
|
{
"start": 16695,
"end": 18113
}
|
interface ____ {
Object getSourceTypeSetting(Map<?,?> configurationValues);
Object getScriptSourceSetting(Map<?,?> configurationValues);
Object getScriptTargetSetting(Map<?,?> configurationValues);
}
private static Object settingValue(Map<?,?> configuration, String referenceKey, String legacyKey) {
final Object setting = configuration.get( referenceKey );
if ( setting != null ) {
return setting;
}
else {
final Object legacySetting = configuration.get( legacyKey );
if ( legacySetting != null ) {
DEPRECATION_LOGGER.deprecatedSetting( referenceKey, legacyKey );
}
return legacySetting;
}
}
private static Object actionSettingValue(
Map<?, ?> configuration, String contributor,
String jakartaSettingName, String javaxSettingName) {
final Object actionSetting =
configuration.get( qualify( contributor, jakartaSettingName ) );
if ( actionSetting != null ) {
return actionSetting;
}
else {
final Object deprecatedActionSetting =
configuration.get( qualify( contributor, javaxSettingName ) );
if ( deprecatedActionSetting != null ) {
DEPRECATION_LOGGER.deprecatedSetting( javaxSettingName, jakartaSettingName );
}
return deprecatedActionSetting;
}
}
private static String qualify(String contributor, String settingName) {
return contributor == null ? settingName : settingName + '.' + contributor;
}
private static
|
SettingSelector
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportEvaluateDataFrameAction.java
|
{
"start": 5114,
"end": 6999
}
|
class ____ extends TypedChainTaskExecutor<Void> {
private final Client client;
private final EvaluationParameters parameters;
private final EvaluateDataFrameAction.Request request;
private final Evaluation evaluation;
private final SecurityContext securityContext;
EvaluationExecutor(
ThreadPool threadPool,
Client client,
EvaluationParameters parameters,
EvaluateDataFrameAction.Request request,
SecurityContext securityContext
) {
super(threadPool.generic(), Predicates.always(), Predicates.always());
this.client = client;
this.parameters = parameters;
this.request = request;
this.evaluation = request.getEvaluation();
this.securityContext = securityContext;
// Add one task only. Other tasks will be added as needed by the nextTask method itself.
add(nextTask());
}
private TypedChainTaskExecutor.ChainTask<Void> nextTask() {
return listener -> {
SearchSourceBuilder searchSourceBuilder = evaluation.buildSearch(parameters, request.getParsedQuery());
SearchRequest searchRequest = new SearchRequest(request.getIndices()).source(searchSourceBuilder);
useSecondaryAuthIfAvailable(
securityContext,
() -> client.execute(TransportSearchAction.TYPE, searchRequest, listener.delegateFailureAndWrap((l, searchResponse) -> {
evaluation.process(searchResponse);
if (evaluation.hasAllResults() == false) {
add(nextTask());
}
l.onResponse(null);
}))
);
};
}
}
}
|
EvaluationExecutor
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/collection/adder/source/SingleElementSource.java
|
{
"start": 244,
"end": 423
}
|
class ____ {
private String pet;
public String getPet() {
return pet;
}
public void setPet(String pet) {
this.pet = pet;
}
}
|
SingleElementSource
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/customexceptions/InvalidConditionalMappersTest.java
|
{
"start": 1611,
"end": 1861
}
|
class ____ {
@IfBuildProfile("test")
@ServerExceptionMapper
public Response request(IllegalArgumentException ignored) {
return Response.status(Response.Status.INTERNAL_SERVER_ERROR).build();
}
}
}
|
Mappers
|
java
|
apache__camel
|
core/camel-core-model/src/main/java/org/apache/camel/model/SetHeadersDefinition.java
|
{
"start": 1527,
"end": 4278
}
|
class ____ extends ProcessorDefinition<SetHeadersDefinition> {
/**
* This is provided to support XML and YAML DSL
*/
@XmlElementRef(name = "headers")
private List<SetHeaderDefinition> headers = new java.util.ArrayList<>();
public SetHeadersDefinition() {
}
protected SetHeadersDefinition(SetHeadersDefinition source) {
super(source);
this.headers = ProcessorDefinitionHelper.deepCopyDefinitions(source.headers);
}
@Override
public SetHeadersDefinition copyDefinition() {
return new SetHeadersDefinition(this);
}
/**
* Allow setting multiple headers using a single expression.
*/
public SetHeadersDefinition(Object... headerNamesAndExprs) {
createSetHeaderDefinitions(headerNamesAndExprs);
}
private void createSetHeaderDefinitions(Object[] headerNamesAndExprs) {
if (headerNamesAndExprs.length == 1 && headerNamesAndExprs[0] instanceof Map) {
createHeadersFromMap((Map<?, ?>) headerNamesAndExprs[0]);
} else if (headerNamesAndExprs.length % 2 != 0) {
throw new IllegalArgumentException("Must be a Map or have an even number of arguments!");
} else {
for (int i = 0; i < headerNamesAndExprs.length; i += 2) {
addHeader(headerNamesAndExprs[i], headerNamesAndExprs[i + 1]);
}
}
}
private void addHeader(Object key, Object value) {
if (!(key instanceof String)) {
throw new IllegalArgumentException("Keys must be Strings");
}
if (!(value instanceof Expression)) {
// Assume it's a constant of some kind
value = ExpressionBuilder.constantExpression(value);
}
headers.add(new SetHeaderDefinition((String) key, (Expression) value));
}
private void createHeadersFromMap(Map<?, ?> headerMap) {
for (Entry<?, ?> entry : headerMap.entrySet()) {
addHeader(entry.getKey(), entry.getValue());
}
}
public List<SetHeaderDefinition> getHeaders() {
return headers;
}
public void setHeaders(List<SetHeaderDefinition> headers) {
this.headers = headers;
}
@Override
public String getLabel() {
return "setHeaders[" + getHeaderNames() + "]";
}
private String getHeaderNames() {
StringJoiner sb = new StringJoiner(",");
for (SetHeaderDefinition def : headers) {
sb.add(def.getName());
}
return sb.toString();
}
@Override
public String getShortName() {
return "setHeaders";
}
@Override
public List<ProcessorDefinition<?>> getOutputs() {
return Collections.emptyList();
}
}
|
SetHeadersDefinition
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/SQLUtilsAddConditionTest_sqlserver.java
|
{
"start": 155,
"end": 1712
}
|
class ____ extends TestCase {
public void test_select() throws Exception {
assertEquals("SELECT *"
+ "\nFROM t"
+ "\nWHERE id = 0", SQLUtils.addCondition("select * from t", "id = 0", DbType.sqlserver));
}
public void test_select_1() throws Exception {
assertEquals("SELECT *"
+ "\nFROM t"
+ "\nWHERE id = 0"
+ "\n\tAND name = 'aaa'", SQLUtils.addCondition("select * from t where id = 0", "name = 'aaa'", DbType.sqlserver));
}
public void test_delete() throws Exception {
assertEquals("DELETE FROM t"
+ "\nWHERE id = 0", SQLUtils.addCondition("delete from t", "id = 0", DbType.sqlserver));
}
public void test_delete_1() throws Exception {
assertEquals("DELETE FROM t"
+ "\nWHERE id = 0"
+ "\n\tAND name = 'aaa'", SQLUtils.addCondition("delete from t where id = 0", "name = 'aaa'", DbType.sqlserver));
}
public void test_update() throws Exception {
assertEquals("UPDATE t"//
+ "\nSET f1 = ?"
+ "\nWHERE id = 0", SQLUtils.addCondition("update t set f1 = ?", "id = 0", DbType.sqlserver));
}
public void test_update_1() throws Exception {
assertEquals("UPDATE t"//
+ "\nSET f1 = ?"
+ "\nWHERE id = 0"
+ "\n\tAND name = 'bb'", SQLUtils.addCondition("update t set f1 = ? where id = 0", "name = 'bb'", DbType.sqlserver));
}
}
|
SQLUtilsAddConditionTest_sqlserver
|
java
|
quarkusio__quarkus
|
extensions/spring-data-jpa/deployment/src/test/java/io/quarkus/spring/data/deployment/User.java
|
{
"start": 678,
"end": 1780
}
|
class ____ {
@Id
private String userId;
private String fullName;
private boolean active;
private int loginCounter;
@OneToMany(mappedBy = "user", cascade = CascadeType.ALL, orphanRemoval = true, fetch = FetchType.EAGER)
private List<LoginEvent> loginEvents = new ArrayList<>();
public String getUserId() {
return userId;
}
public void setUserId(String userId) {
this.userId = userId;
}
public String getFullName() {
return fullName;
}
public void setFullName(String fullName) {
this.fullName = fullName;
}
public boolean isActive() {
return active;
}
public void setActive(boolean active) {
this.active = active;
}
public int getLoginCounter() {
return loginCounter;
}
public void setLoginCounter(int loginCounter) {
this.loginCounter = loginCounter;
}
public void addEvent(LoginEvent loginEvent) {
this.loginEvents.add(loginEvent);
}
public List<LoginEvent> getLoginEvents() {
return loginEvents;
}
}
|
User
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/SpringAiVectorStoreComponentBuilderFactory.java
|
{
"start": 1915,
"end": 7507
}
|
interface ____ extends ComponentBuilder<SpringAiVectorStoreComponent> {
/**
* The configuration.
*
* The option is a:
* <code>org.apache.camel.component.springai.vectorstore.SpringAiVectorStoreConfiguration</code> type.
*
* Group: producer
*
* @param configuration the value to set
* @return the dsl builder
*/
default SpringAiVectorStoreComponentBuilder configuration(org.apache.camel.component.springai.vectorstore.SpringAiVectorStoreConfiguration configuration) {
doSetProperty("configuration", configuration);
return this;
}
/**
* Filter expression for metadata-based filtering in searches.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param filterExpression the value to set
* @return the dsl builder
*/
default SpringAiVectorStoreComponentBuilder filterExpression(java.lang.String filterExpression) {
doSetProperty("filterExpression", filterExpression);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default SpringAiVectorStoreComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* The operation to perform on the vector store (ADD, DELETE,
* SIMILARITY_SEARCH).
*
* The option is a:
* <code>org.apache.camel.component.springai.vectorstore.SpringAiVectorStoreOperation</code> type.
*
* Default: ADD
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default SpringAiVectorStoreComponentBuilder operation(org.apache.camel.component.springai.vectorstore.SpringAiVectorStoreOperation operation) {
doSetProperty("operation", operation);
return this;
}
/**
* The minimum similarity score threshold (0-1) for similarity search.
*
* The option is a: <code>double</code> type.
*
* Group: producer
*
* @param similarityThreshold the value to set
* @return the dsl builder
*/
default SpringAiVectorStoreComponentBuilder similarityThreshold(double similarityThreshold) {
doSetProperty("similarityThreshold", similarityThreshold);
return this;
}
/**
* The maximum number of similar documents to return for similarity
* search.
*
* The option is a: <code>int</code> type.
*
* Default: 5
* Group: producer
*
* @param topK the value to set
* @return the dsl builder
*/
default SpringAiVectorStoreComponentBuilder topK(int topK) {
doSetProperty("topK", topK);
return this;
}
/**
* The VectorStore to use for vector operations.
*
* The option is a:
* <code>org.springframework.ai.vectorstore.VectorStore</code> type.
*
* Group: producer
*
* @param vectorStore the value to set
* @return the dsl builder
*/
default SpringAiVectorStoreComponentBuilder vectorStore(org.springframework.ai.vectorstore.VectorStore vectorStore) {
doSetProperty("vectorStore", vectorStore);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default SpringAiVectorStoreComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
}
|
SpringAiVectorStoreComponentBuilder
|
java
|
apache__flink
|
flink-tests/src/test/java/org/apache/flink/test/operators/util/CollectionDataStreams.java
|
{
"start": 6716,
"end": 6894
}
|
class ____ {
public String group;
public Date date;
public Category cat;
}
/** POJO with generic collection. */
public static
|
PojoWithDateAndEnum
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/async/AsyncEndpointRoutingSlipBeanNonBlockingTest.java
|
{
"start": 3350,
"end": 3524
}
|
class ____ {
@RoutingSlip
public String doSomething() {
return "direct:asyncRoute,mock:result";
}
}
private static
|
MyRoutingSlipBean
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapReduce.java
|
{
"start": 8033,
"end": 18720
}
|
class ____
extends Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
public void reduce(IntWritable key, Iterator<IntWritable> it,
Context context) throws IOException, InterruptedException {
int keyint = key.get();
int total = 0;
while (it.hasNext()) {
total += it.next().get();
}
context.write(new IntWritable(keyint), new IntWritable(total));
}
}
private static int range = 10;
private static int counts = 100;
private static Random r = new Random();
@AfterEach
public void cleanup() {
FileUtil.fullyDelete(TEST_DIR);
}
@Test
public void testMapred() throws Exception {
launch();
}
private static void launch() throws Exception {
//
// Generate distribution of ints. This is the answer key.
//
Configuration conf = new Configuration();
int countsToGo = counts;
int dist[] = new int[range];
for (int i = 0; i < range; i++) {
double avgInts = (1.0 * countsToGo) / (range - i);
dist[i] = (int) Math.max(0, Math.round(avgInts +
(Math.sqrt(avgInts) * r.nextGaussian())));
countsToGo -= dist[i];
}
if (countsToGo > 0) {
dist[dist.length-1] += countsToGo;
}
//
// Write the answer key to a file.
//
Path testdir = new Path(TEST_DIR.getAbsolutePath());
if (!fs.mkdirs(testdir)) {
throw new IOException("Mkdirs failed to create " + testdir.toString());
}
Path randomIns = new Path(testdir, "genins");
if (!fs.mkdirs(randomIns)) {
throw new IOException("Mkdirs failed to create " + randomIns.toString());
}
Path answerkey = new Path(randomIns, "answer.key");
SequenceFile.Writer out =
SequenceFile.createWriter(fs, conf, answerkey, IntWritable.class,
IntWritable.class,
SequenceFile.CompressionType.NONE);
try {
for (int i = 0; i < range; i++) {
out.append(new IntWritable(i), new IntWritable(dist[i]));
}
} finally {
out.close();
}
printFiles(randomIns, conf);
//
// Now we need to generate the random numbers according to
// the above distribution.
//
// We create a lot of map tasks, each of which takes at least
// one "line" of the distribution. (That is, a certain number
// X is to be generated Y number of times.)
//
// A map task emits Y key/val pairs. The val is X. The key
// is a randomly-generated number.
//
// The reduce task gets its input sorted by key. That is, sorted
// in random order. It then emits a single line of text that
// for the given values. It does not emit the key.
//
// Because there's just one reduce task, we emit a single big
// file of random numbers.
//
Path randomOuts = new Path(testdir, "genouts");
fs.delete(randomOuts, true);
Job genJob = Job.getInstance(conf);
FileInputFormat.setInputPaths(genJob, randomIns);
genJob.setInputFormatClass(SequenceFileInputFormat.class);
genJob.setMapperClass(RandomGenMapper.class);
FileOutputFormat.setOutputPath(genJob, randomOuts);
genJob.setOutputKeyClass(IntWritable.class);
genJob.setOutputValueClass(IntWritable.class);
genJob.setReducerClass(RandomGenReducer.class);
genJob.setNumReduceTasks(1);
genJob.waitForCompletion(true);
printFiles(randomOuts, conf);
//
// Next, we read the big file in and regenerate the
// original map. It's split into a number of parts.
// (That number is 'intermediateReduces'.)
//
// We have many map tasks, each of which read at least one
// of the output numbers. For each number read in, the
// map task emits a key/value pair where the key is the
// number and the value is "1".
//
// We have a single reduce task, which receives its input
// sorted by the key emitted above. For each key, there will
// be a certain number of "1" values. The reduce task sums
// these values to compute how many times the given key was
// emitted.
//
// The reduce task then emits a key/val pair where the key
// is the number in question, and the value is the number of
// times the key was emitted. This is the same format as the
// original answer key (except that numbers emitted zero times
// will not appear in the regenerated key.) The answer set
// is split into a number of pieces. A final MapReduce job
// will merge them.
//
// There's not really a need to go to 10 reduces here
// instead of 1. But we want to test what happens when
// you have multiple reduces at once.
//
int intermediateReduces = 10;
Path intermediateOuts = new Path(testdir, "intermediateouts");
fs.delete(intermediateOuts, true);
Job checkJob = Job.getInstance(conf);
FileInputFormat.setInputPaths(checkJob, randomOuts);
checkJob.setMapperClass(RandomCheckMapper.class);
FileOutputFormat.setOutputPath(checkJob, intermediateOuts);
checkJob.setOutputKeyClass(IntWritable.class);
checkJob.setOutputValueClass(IntWritable.class);
checkJob.setOutputFormatClass(MapFileOutputFormat.class);
checkJob.setReducerClass(RandomCheckReducer.class);
checkJob.setNumReduceTasks(intermediateReduces);
checkJob.waitForCompletion(true);
printFiles(intermediateOuts, conf);
//
// OK, now we take the output from the last job and
// merge it down to a single file. The map() and reduce()
// functions don't really do anything except reemit tuples.
// But by having a single reduce task here, we end up merging
// all the files.
//
Path finalOuts = new Path(testdir, "finalouts");
fs.delete(finalOuts, true);
Job mergeJob = Job.getInstance(conf);
FileInputFormat.setInputPaths(mergeJob, intermediateOuts);
mergeJob.setInputFormatClass(SequenceFileInputFormat.class);
mergeJob.setMapperClass(MergeMapper.class);
FileOutputFormat.setOutputPath(mergeJob, finalOuts);
mergeJob.setOutputKeyClass(IntWritable.class);
mergeJob.setOutputValueClass(IntWritable.class);
mergeJob.setOutputFormatClass(SequenceFileOutputFormat.class);
mergeJob.setReducerClass(MergeReducer.class);
mergeJob.setNumReduceTasks(1);
mergeJob.waitForCompletion(true);
printFiles(finalOuts, conf);
//
// Finally, we compare the reconstructed answer key with the
// original one. Remember, we need to ignore zero-count items
// in the original key.
//
boolean success = true;
Path recomputedkey = new Path(finalOuts, "part-r-00000");
SequenceFile.Reader in = new SequenceFile.Reader(fs, recomputedkey, conf);
int totalseen = 0;
try {
IntWritable key = new IntWritable();
IntWritable val = new IntWritable();
for (int i = 0; i < range; i++) {
if (dist[i] == 0) {
continue;
}
if (!in.next(key, val)) {
System.err.println("Cannot read entry " + i);
success = false;
break;
} else {
if (!((key.get() == i) && (val.get() == dist[i]))) {
System.err.println("Mismatch! Pos=" + key.get() + ", i=" + i +
", val=" + val.get() + ", dist[i]=" + dist[i]);
success = false;
}
totalseen += val.get();
}
}
if (success) {
if (in.next(key, val)) {
System.err.println("Unnecessary lines in recomputed key!");
success = false;
}
}
} finally {
in.close();
}
int originalTotal = 0;
for (int i = 0; i < dist.length; i++) {
originalTotal += dist[i];
}
System.out.println("Original sum: " + originalTotal);
System.out.println("Recomputed sum: " + totalseen);
//
// Write to "results" whether the test succeeded or not.
//
Path resultFile = new Path(testdir, "results");
BufferedWriter bw = new BufferedWriter(
new OutputStreamWriter(fs.create(resultFile)));
try {
bw.write("Success=" + success + "\n");
System.out.println("Success=" + success);
} finally {
bw.close();
}
assertTrue(success, "testMapRed failed");
fs.delete(testdir, true);
}
private static void printTextFile(FileSystem fs, Path p) throws IOException {
BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(p)));
String line;
while ((line = in.readLine()) != null) {
System.out.println(" Row: " + line);
}
in.close();
}
private static void printSequenceFile(FileSystem fs, Path p,
Configuration conf) throws IOException {
SequenceFile.Reader r = new SequenceFile.Reader(fs, p, conf);
Object key = null;
Object value = null;
while ((key = r.next(key)) != null) {
value = r.getCurrentValue(value);
System.out.println(" Row: " + key + ", " + value);
}
r.close();
}
private static boolean isSequenceFile(FileSystem fs,
Path f) throws IOException {
DataInputStream in = fs.open(f);
try {
byte[] seq = "SEQ".getBytes();
for (int i = 0; i < seq.length; ++i) {
if (seq[i] != in.read()) {
return false;
}
}
} finally {
in.close();
}
return true;
}
private static void printFiles(Path dir,
Configuration conf) throws IOException {
FileSystem fs = dir.getFileSystem(conf);
for(FileStatus f: fs.listStatus(dir)) {
System.out.println("Reading " + f.getPath() + ": ");
if (f.isDirectory()) {
System.out.println(" it is a map file.");
printSequenceFile(fs, new Path(f.getPath(), "data"), conf);
} else if (isSequenceFile(fs, f.getPath())) {
System.out.println(" it is a sequence file.");
printSequenceFile(fs, f.getPath(), conf);
} else {
System.out.println(" it is a text file.");
printTextFile(fs, f.getPath());
}
}
}
/**
* Launches all the tasks in order.
*/
public static void main(String[] argv) throws Exception {
if (argv.length < 2) {
System.err.println("Usage: TestMapReduce <range> <counts>");
System.err.println();
System.err.println("Note: a good test will have a <counts> value" +
" that is substantially larger than the <range>");
return;
}
int i = 0;
range = Integer.parseInt(argv[i++]);
counts = Integer.parseInt(argv[i++]);
try {
launch();
} finally {
FileUtil.fullyDelete(TEST_DIR);
}
}
}
|
MergeReducer
|
java
|
apache__camel
|
components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/processor/SpringWireTapTest.java
|
{
"start": 1032,
"end": 1275
}
|
class ____ extends WireTapTest {
@Override
protected CamelContext createCamelContext() throws Exception {
return createSpringCamelContext(this, "org/apache/camel/spring/processor/SpringWireTapTest.xml");
}
}
|
SpringWireTapTest
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/util/ListDG.java
|
{
"start": 1138,
"end": 1270
}
|
class ____ {
int ivex; // 该边所指向的顶点的位置
ENode nextEdge; // 指向下一条弧的指针
}
// 邻接表中表的顶点
private static
|
ENode
|
java
|
quarkusio__quarkus
|
extensions/micrometer/deployment/src/test/java/io/quarkus/micrometer/deployment/pathparams/HttpPathParamLimitWithJaxRs400Test.java
|
{
"start": 2887,
"end": 3803
}
|
class ____ {
@GET
@Path("/jaxrs")
public String jaxrs() {
return "hello";
}
@GET
@Path("/jaxrs/{message}")
public String jaxrsWithPathParam(@PathParam("message") String message) {
return "hello " + message;
}
@GET
@Path("/bad")
public Response bad() {
return Response.status(400).build();
}
@GET
@Path("/bad/{message}")
public Response bad(@PathParam("message") String message) {
return Response.status(400).build();
}
@GET
@Path("/fail")
public Response fail() {
return Response.status(500).build();
}
@GET
@Path("/fail/{message}")
public Response fail(@PathParam("message") String message) {
return Response.status(500).build();
}
}
}
|
Resource
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/test/java/org/springframework/boot/context/properties/ConfigurationPropertiesBeanTests.java
|
{
"start": 17813,
"end": 17889
}
|
class ____ {
}
@ConfigurationProperties("prefix")
static
|
AnnotatedComponent
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/cfg/SchemaToolingSettings.java
|
{
"start": 557,
"end": 9324
}
|
interface ____ {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// JPA settings
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/**
* Specifies what type of schema tooling action should be performed against the
* database specified using either {@value JdbcSettings#JAKARTA_HBM2DDL_CONNECTION} or the
* configured {@link org.hibernate.engine.jdbc.connections.spi.ConnectionProvider}
* for the {@link org.hibernate.SessionFactory}.
* <p>
* Valid options are enumerated by {@link org.hibernate.tool.schema.Action}.
* <p>
* This setting takes precedence over {@value #HBM2DDL_AUTO}.
* <p>
* If no value is specified, the default is
* {@link org.hibernate.tool.schema.Action#NONE "none"}.
*
* @see org.hibernate.tool.schema.Action
* @see JdbcSettings#JAKARTA_HBM2DDL_CONNECTION
* @see JdbcSettings#JAKARTA_JDBC_URL
*/
String JAKARTA_HBM2DDL_DATABASE_ACTION = "jakarta.persistence.schema-generation.database.action";
/**
* Specifies what type of schema tooling action should be written to script files.
* <p>
* Valid options are enumerated by {@link org.hibernate.tool.schema.Action}.
* <p>
* The script file is identified using {@value #JAKARTA_HBM2DDL_SCRIPTS_CREATE_TARGET}.
* <p>
* If no value is specified, the default is
* {@link org.hibernate.tool.schema.Action#NONE "none"}.
*
* @see org.hibernate.tool.schema.Action
* @see #JAKARTA_HBM2DDL_SCRIPTS_CREATE_TARGET
* @see #JAKARTA_HBM2DDL_SCRIPTS_DROP_TARGET
*/
String JAKARTA_HBM2DDL_SCRIPTS_ACTION = "jakarta.persistence.schema-generation.scripts.action";
/**
* Specifies whether schema generation commands for schema creation are to be determined
* based on object/relational mapping metadata, DDL scripts, or a combination of the two.
* See {@link org.hibernate.tool.schema.SourceType} for the list of legal values.
* <p>
* If no value is specified, a default is inferred as follows:
* <ul>
* <li>if source scripts are specified via {@value #JAKARTA_HBM2DDL_CREATE_SCRIPT_SOURCE},
* then {@link org.hibernate.tool.schema.SourceType#SCRIPT "script"} is assumed, or
* <li>otherwise, {@link org.hibernate.tool.schema.SourceType#SCRIPT "metadata"} is
* assumed.
* </ul>
*
* @see org.hibernate.tool.schema.SourceType
*/
String JAKARTA_HBM2DDL_CREATE_SOURCE = "jakarta.persistence.schema-generation.create-source";
/**
* Specifies whether schema generation commands for schema dropping are to be determined
* based on object/relational mapping metadata, DDL scripts, or a combination of the two.
* See {@link org.hibernate.tool.schema.SourceType} for the list of legal values.
* <p>
* If no value is specified, a default is inferred as follows:
* <ul>
* <li>if source scripts are specified via {@value #JAKARTA_HBM2DDL_DROP_SCRIPT_SOURCE},
* then {@linkplain org.hibernate.tool.schema.SourceType#SCRIPT "script"} is assumed, or
* <li>otherwise, {@linkplain org.hibernate.tool.schema.SourceType#SCRIPT "metadata"}
* is assumed.
* </ul>
*
* @see org.hibernate.tool.schema.SourceType
*/
String JAKARTA_HBM2DDL_DROP_SOURCE = "jakarta.persistence.schema-generation.drop-source";
/**
* Specifies the CREATE script file as either a {@link java.io.Reader} configured for reading
* the DDL script file or a string designating a file {@link java.net.URL} for the DDL script.
* <p>
* The script should contain mostly DDL {@code CREATE} statements. For importing data using DML,
* use {@link #JAKARTA_HBM2DDL_LOAD_SCRIPT_SOURCE}.
*
* @see #JAKARTA_HBM2DDL_CREATE_SOURCE
* @see #JAKARTA_HBM2DDL_LOAD_SCRIPT_SOURCE
*/
String JAKARTA_HBM2DDL_CREATE_SCRIPT_SOURCE = "jakarta.persistence.schema-generation.create-script-source";
/**
* Specifies the DROP script file as either a {@link java.io.Reader} configured for reading
* the DDL script file or a string designating a file {@link java.net.URL} for the DDL script.
* <p>
* The script should contain mostly DDL {@code DROP} statements.
*
* @see #JAKARTA_HBM2DDL_DROP_SOURCE
*/
String JAKARTA_HBM2DDL_DROP_SCRIPT_SOURCE = "jakarta.persistence.schema-generation.drop-script-source";
/**
* For cases where {@value #JAKARTA_HBM2DDL_SCRIPTS_ACTION} indicates that schema creation
* commands should be written to a script file, this setting specifies either a
* {@link java.io.Writer} configured for output of the DDL script or a string specifying
* the file URL for the DDL script.
*
* @see #JAKARTA_HBM2DDL_SCRIPTS_ACTION
*/
String JAKARTA_HBM2DDL_SCRIPTS_CREATE_TARGET = "jakarta.persistence.schema-generation.scripts.create-target";
/**
* For cases where {@value #JAKARTA_HBM2DDL_SCRIPTS_ACTION} indicates that schema
* drop commands should be written to a script file, this setting specifies either a
* {@link java.io.Writer} configured for output of the DDL script or a string
* specifying the file URL for the DDL script.
*
* @see #JAKARTA_HBM2DDL_SCRIPTS_ACTION
*/
String JAKARTA_HBM2DDL_SCRIPTS_DROP_TARGET = "jakarta.persistence.schema-generation.scripts.drop-target";
/**
* JPA-standard variant of {@link #HBM2DDL_IMPORT_FILES} for specifying a database
* initialization script to be run after {@linkplain org.hibernate.relational.SchemaManager
* exporting or truncating the database schema}.
* <p>
* Specifies a {@link java.io.Reader} configured for reading of the SQL load script
* or a string designating the {@link java.net.URL} for the SQL load script.
* <p>
* The script should contain mostly DML {@code INSERT} statements. For DDL schema creation,
* use {@link #JAKARTA_HBM2DDL_CREATE_SCRIPT_SOURCE}
* <p>
* Hibernate historically also accepted {@link #HBM2DDL_IMPORT_FILES} for a similar purpose.
* This setting is now preferred.
*
* @see #JAKARTA_HBM2DDL_DATABASE_ACTION
* @see #JAKARTA_HBM2DDL_CREATE_SCRIPT_SOURCE
* @see org.hibernate.relational.SchemaManager#populate
* @see org.hibernate.relational.SchemaManager#exportMappedObjects
* @see org.hibernate.relational.SchemaManager#truncateMappedObjects
*/
String JAKARTA_HBM2DDL_LOAD_SCRIPT_SOURCE = "jakarta.persistence.sql-load-script-source";
/**
* The JPA variant of {@link #HBM2DDL_CREATE_NAMESPACES} used to specify whether database
* schemas used in the mapping model should be created on export in addition to creating
* the tables, sequences, etc.
* <p>
* The default is {@code false}, meaning to not create schemas
*/
String JAKARTA_HBM2DDL_CREATE_SCHEMAS = "jakarta.persistence.create-database-schemas";
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Hibernate settings
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/**
* Specifies the {@link org.hibernate.tool.schema.spi.SchemaManagementTool} to use for
* performing schema management.
* <p>
* By default, {@link org.hibernate.tool.schema.internal.HibernateSchemaManagementTool}
* is used.
*
* @since 5.0
*/
String SCHEMA_MANAGEMENT_TOOL = "hibernate.schema_management_tool";
/**
* Setting to perform {@link org.hibernate.tool.schema.spi.SchemaManagementTool}
* actions automatically as part of the {@link org.hibernate.SessionFactory}
* lifecycle. Valid options are enumerated by {@link org.hibernate.tool.schema.Action}.
* <p>
* Interpreted in combination with {@link #JAKARTA_HBM2DDL_DATABASE_ACTION} and
* {@link #JAKARTA_HBM2DDL_SCRIPTS_ACTION}. If no value is specified, the default
* is {@linkplain org.hibernate.tool.schema.Action#NONE "none"}.
*
* @settingDefault {@code "none"}
*
* @see org.hibernate.tool.schema.Action
*/
String HBM2DDL_AUTO = "hibernate.hbm2ddl.auto";
/**
* For cases where the {@value #JAKARTA_HBM2DDL_SCRIPTS_ACTION} value indicates that schema
* commands should be written to a DDL script file, specifies if schema commands should be
* appended to the end of the file rather than written at the beginning of the file.
* <p>
* Values are: {@code true} for appending schema commands to the end of the file, {@code false}
* for writing schema commands at the beginning.
*
* @settingDefault {@code true}
*/
String HBM2DDL_SCRIPTS_CREATE_APPEND = "hibernate.hbm2ddl.schema-generation.script.append";
/**
* The {@link org.hibernate.tool.schema.spi.SqlScriptCommandExtractor} implementation
* to use for parsing source/import files specified by {@link #JAKARTA_HBM2DDL_CREATE_SCRIPT_SOURCE},
* {@link #JAKARTA_HBM2DDL_DROP_SCRIPT_SOURCE} or {@link #HBM2DDL_IMPORT_FILES}. Either:
* <ul>
* <li>an instance of {@link org.hibernate.tool.schema.spi.SqlScriptCommandExtractor},
* <li>a {@link Class} object representing a
|
SchemaToolingSettings
|
java
|
apache__kafka
|
streams/src/main/java/org/apache/kafka/streams/state/internals/KeyValueIterators.java
|
{
"start": 1097,
"end": 1569
}
|
class ____<K, V> implements KeyValueIterator<K, V> {
@Override
public void close() {
}
@Override
public K peekNextKey() {
throw new NoSuchElementException();
}
@Override
public boolean hasNext() {
return false;
}
@Override
public KeyValue<K, V> next() {
throw new NoSuchElementException();
}
}
private static
|
EmptyKeyValueIterator
|
java
|
apache__camel
|
components/camel-aws/camel-aws-bedrock/src/generated/java/org/apache/camel/component/aws2/bedrock/runtime/BedrockEndpointUriFactory.java
|
{
"start": 530,
"end": 3255
}
|
class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":label";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(26);
props.add("accessKey");
props.add("bedrockRuntimeAsyncClient");
props.add("bedrockRuntimeClient");
props.add("guardrailIdentifier");
props.add("guardrailTrace");
props.add("guardrailVersion");
props.add("includeStreamingMetadata");
props.add("label");
props.add("lazyStartProducer");
props.add("modelId");
props.add("operation");
props.add("overrideEndpoint");
props.add("pojoRequest");
props.add("profileCredentialsName");
props.add("proxyHost");
props.add("proxyPort");
props.add("proxyProtocol");
props.add("region");
props.add("secretKey");
props.add("sessionToken");
props.add("streamOutputMode");
props.add("trustAllCertificates");
props.add("uriEndpointOverride");
props.add("useDefaultCredentialsProvider");
props.add("useProfileCredentialsProvider");
props.add("useSessionCredentials");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
Set<String> secretProps = new HashSet<>(3);
secretProps.add("accessKey");
secretProps.add("secretKey");
secretProps.add("sessionToken");
SECRET_PROPERTY_NAMES = Collections.unmodifiableSet(secretProps);
MULTI_VALUE_PREFIXES = Collections.emptyMap();
}
@Override
public boolean isEnabled(String scheme) {
return "aws-bedrock".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "label", null, true, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
|
BedrockEndpointUriFactory
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/model/source/internal/hbm/PluralAttributeMapKeySourceBasicImpl.java
|
{
"start": 581,
"end": 3695
}
|
class ____
extends AbstractHbmSourceNode
implements PluralAttributeMapKeySourceBasic {
private final HibernateTypeSourceImpl typeSource;
private final List<RelationalValueSource> valueSources;
private final String xmlNodeName;
public PluralAttributeMapKeySourceBasicImpl(
MappingDocument sourceMappingDocument,
final JaxbHbmMapKeyBasicType jaxbMapKey) {
super( sourceMappingDocument );
this.typeSource = new HibernateTypeSourceImpl( jaxbMapKey );
this.valueSources = RelationalValueSourceHelper.buildValueSources(
sourceMappingDocument(),
null,
new RelationalValueSourceHelper.AbstractColumnsAndFormulasSource() {
@Override
public XmlElementMetadata getSourceType() {
return XmlElementMetadata.MAP_KEY;
}
@Override
public String getSourceName() {
return null;
}
@Override
public String getFormulaAttribute() {
return jaxbMapKey.getFormulaAttribute();
}
@Override
public String getColumnAttribute() {
return jaxbMapKey.getColumnAttribute();
}
@Override
public List getColumnOrFormulaElements() {
return jaxbMapKey.getColumnOrFormula();
}
@Override
public SizeSource getSizeSource() {
return Helper.interpretSizeSource(
jaxbMapKey.getLength(),
(Integer) null,
null
);
}
}
);
this.xmlNodeName = jaxbMapKey.getNode();
}
public PluralAttributeMapKeySourceBasicImpl(MappingDocument sourceMappingDocument, final JaxbHbmIndexType jaxbIndex) {
super( sourceMappingDocument );
this.typeSource = new HibernateTypeSourceImpl( jaxbIndex.getType() );
this.valueSources = RelationalValueSourceHelper.buildValueSources(
sourceMappingDocument(),
null,
new RelationalValueSourceHelper.AbstractColumnsAndFormulasSource() {
@Override
public XmlElementMetadata getSourceType() {
return XmlElementMetadata.MAP_KEY;
}
@Override
public String getSourceName() {
return null;
}
@Override
public String getColumnAttribute() {
return jaxbIndex.getColumnAttribute();
}
@Override
public SizeSource getSizeSource() {
return Helper.interpretSizeSource(
jaxbIndex.getLength(),
(Integer) null,
null
);
}
@Override
public List getColumnOrFormulaElements() {
return jaxbIndex.getColumn();
}
}
);
this.xmlNodeName = null;
}
@Override
public PluralAttributeIndexNature getNature() {
return PluralAttributeIndexNature.BASIC;
}
@Override
public List<RelationalValueSource> getRelationalValueSources() {
return valueSources;
}
@Override
public boolean areValuesIncludedInInsertByDefault() {
return true;
}
@Override
public boolean areValuesIncludedInUpdateByDefault() {
return true;
}
@Override
public boolean areValuesNullableByDefault() {
return false;
}
@Override
public HibernateTypeSourceImpl getTypeInformation() {
return typeSource;
}
@Override
public String getXmlNodeName() {
return xmlNodeName;
}
}
|
PluralAttributeMapKeySourceBasicImpl
|
java
|
apache__rocketmq
|
broker/src/main/java/org/apache/rocketmq/broker/processor/PullMessageProcessor.java
|
{
"start": 4469,
"end": 49497
}
|
class ____ implements NettyRequestProcessor {
private static final Logger LOGGER = LoggerFactory.getLogger(LoggerName.BROKER_LOGGER_NAME);
private List<ConsumeMessageHook> consumeMessageHookList;
private PullMessageResultHandler pullMessageResultHandler;
private final BrokerController brokerController;
public PullMessageProcessor(final BrokerController brokerController) {
this.brokerController = brokerController;
this.pullMessageResultHandler = new DefaultPullMessageResultHandler(brokerController);
}
private RemotingCommand rewriteRequestForStaticTopic(PullMessageRequestHeader requestHeader,
TopicQueueMappingContext mappingContext) {
try {
if (mappingContext.getMappingDetail() == null) {
return null;
}
TopicQueueMappingDetail mappingDetail = mappingContext.getMappingDetail();
String topic = mappingContext.getTopic();
Integer globalId = mappingContext.getGlobalId();
// if the leader? consider the order consumer, which will lock the mq
if (!mappingContext.isLeader()) {
return buildErrorResponse(ResponseCode.NOT_LEADER_FOR_QUEUE, String.format("%s-%d cannot find mapping item in request process of current broker %s", topic, globalId, mappingDetail.getBname()));
}
Long globalOffset = requestHeader.getQueueOffset();
LogicQueueMappingItem mappingItem = TopicQueueMappingUtils.findLogicQueueMappingItem(mappingContext.getMappingItemList(), globalOffset, true);
mappingContext.setCurrentItem(mappingItem);
if (globalOffset < mappingItem.getLogicOffset()) {
//handleOffsetMoved
//If the physical queue is reused, we should handle the PULL_OFFSET_MOVED independently
//Otherwise, we could just transfer it to the physical process
}
//below are physical info
String bname = mappingItem.getBname();
Integer phyQueueId = mappingItem.getQueueId();
Long phyQueueOffset = mappingItem.computePhysicalQueueOffset(globalOffset);
requestHeader.setQueueId(phyQueueId);
requestHeader.setQueueOffset(phyQueueOffset);
if (mappingItem.checkIfEndOffsetDecided()
&& requestHeader.getMaxMsgNums() != null) {
requestHeader.setMaxMsgNums((int) Math.min(mappingItem.getEndOffset() - mappingItem.getStartOffset(), requestHeader.getMaxMsgNums()));
}
if (mappingDetail.getBname().equals(bname)) {
//just let it go, do the local pull process
return null;
}
int sysFlag = requestHeader.getSysFlag();
requestHeader.setLo(false);
requestHeader.setBrokerName(bname);
sysFlag = PullSysFlag.clearSuspendFlag(sysFlag);
sysFlag = PullSysFlag.clearCommitOffsetFlag(sysFlag);
requestHeader.setSysFlag(sysFlag);
RpcRequest rpcRequest = new RpcRequest(RequestCode.PULL_MESSAGE, requestHeader, null);
RpcResponse rpcResponse = this.brokerController.getBrokerOuterAPI().getRpcClient().invoke(rpcRequest, this.brokerController.getBrokerConfig().getForwardTimeout()).get();
if (rpcResponse.getException() != null) {
throw rpcResponse.getException();
}
PullMessageResponseHeader responseHeader = (PullMessageResponseHeader) rpcResponse.getHeader();
{
RemotingCommand rewriteResult = rewriteResponseForStaticTopic(requestHeader, responseHeader, mappingContext, rpcResponse.getCode());
if (rewriteResult != null) {
return rewriteResult;
}
}
return RpcClientUtils.createCommandForRpcResponse(rpcResponse);
} catch (Throwable t) {
LOGGER.warn("", t);
return buildErrorResponse(ResponseCode.SYSTEM_ERROR, t.toString());
}
}
protected RemotingCommand rewriteResponseForStaticTopic(PullMessageRequestHeader requestHeader,
PullMessageResponseHeader responseHeader,
TopicQueueMappingContext mappingContext, final int code) {
try {
if (mappingContext.getMappingDetail() == null) {
return null;
}
TopicQueueMappingDetail mappingDetail = mappingContext.getMappingDetail();
LogicQueueMappingItem leaderItem = mappingContext.getLeaderItem();
LogicQueueMappingItem currentItem = mappingContext.getCurrentItem();
LogicQueueMappingItem earlistItem = TopicQueueMappingUtils.findLogicQueueMappingItem(mappingContext.getMappingItemList(), 0L, true);
assert currentItem.getLogicOffset() >= 0;
long requestOffset = requestHeader.getQueueOffset();
long nextBeginOffset = responseHeader.getNextBeginOffset();
long minOffset = responseHeader.getMinOffset();
long maxOffset = responseHeader.getMaxOffset();
int responseCode = code;
//consider the following situations
// 1. read from slave, currently not supported
// 2. the middle queue is truncated because of deleting commitlog
if (code != ResponseCode.SUCCESS) {
//note the currentItem maybe both the leader and the earliest
boolean isRevised = false;
if (leaderItem.getGen() == currentItem.getGen()) {
//read the leader
if (requestOffset > maxOffset) {
//actually, we need do nothing, but keep the code structure here
if (code == ResponseCode.PULL_OFFSET_MOVED) {
responseCode = ResponseCode.PULL_OFFSET_MOVED;
nextBeginOffset = maxOffset;
} else {
//maybe current broker is the slave
responseCode = code;
}
} else if (requestOffset < minOffset) {
nextBeginOffset = minOffset;
responseCode = ResponseCode.PULL_RETRY_IMMEDIATELY;
} else {
responseCode = code;
}
}
//note the currentItem maybe both the leader and the earliest
if (earlistItem.getGen() == currentItem.getGen()) {
//read the earliest one
if (requestOffset < minOffset) {
if (code == ResponseCode.PULL_OFFSET_MOVED) {
responseCode = ResponseCode.PULL_OFFSET_MOVED;
nextBeginOffset = minOffset;
} else {
//maybe read from slave, but we still set it to moved
responseCode = ResponseCode.PULL_OFFSET_MOVED;
nextBeginOffset = minOffset;
}
} else if (requestOffset >= maxOffset) {
//just move to another item
LogicQueueMappingItem nextItem = TopicQueueMappingUtils.findNext(mappingContext.getMappingItemList(), currentItem, true);
if (nextItem != null) {
isRevised = true;
currentItem = nextItem;
nextBeginOffset = currentItem.getStartOffset();
minOffset = currentItem.getStartOffset();
maxOffset = minOffset;
responseCode = ResponseCode.PULL_RETRY_IMMEDIATELY;
} else {
//maybe the next one's logic offset is -1
responseCode = ResponseCode.PULL_NOT_FOUND;
}
} else {
//let it go
responseCode = code;
}
}
//read from the middle item, ignore the PULL_OFFSET_MOVED
if (!isRevised
&& leaderItem.getGen() != currentItem.getGen()
&& earlistItem.getGen() != currentItem.getGen()) {
if (requestOffset < minOffset) {
nextBeginOffset = minOffset;
responseCode = ResponseCode.PULL_RETRY_IMMEDIATELY;
} else if (requestOffset >= maxOffset) {
//just move to another item
LogicQueueMappingItem nextItem = TopicQueueMappingUtils.findNext(mappingContext.getMappingItemList(), currentItem, true);
if (nextItem != null) {
currentItem = nextItem;
nextBeginOffset = currentItem.getStartOffset();
minOffset = currentItem.getStartOffset();
maxOffset = minOffset;
responseCode = ResponseCode.PULL_RETRY_IMMEDIATELY;
} else {
//maybe the next one's logic offset is -1
responseCode = ResponseCode.PULL_NOT_FOUND;
}
} else {
responseCode = code;
}
}
}
//handle nextBeginOffset
//the next begin offset should no more than the end offset
if (currentItem.checkIfEndOffsetDecided()
&& nextBeginOffset >= currentItem.getEndOffset()) {
nextBeginOffset = currentItem.getEndOffset();
}
responseHeader.setNextBeginOffset(currentItem.computeStaticQueueOffsetStrictly(nextBeginOffset));
//handle min offset
responseHeader.setMinOffset(currentItem.computeStaticQueueOffsetStrictly(Math.max(currentItem.getStartOffset(), minOffset)));
//handle max offset
responseHeader.setMaxOffset(Math.max(currentItem.computeStaticQueueOffsetStrictly(maxOffset),
TopicQueueMappingDetail.computeMaxOffsetFromMapping(mappingDetail, mappingContext.getGlobalId())));
//set the offsetDelta
responseHeader.setOffsetDelta(currentItem.computeOffsetDelta());
if (code != ResponseCode.SUCCESS) {
return RemotingCommand.createResponseCommandWithHeader(responseCode, responseHeader);
} else {
return null;
}
} catch (Throwable t) {
LOGGER.warn("", t);
return buildErrorResponse(ResponseCode.SYSTEM_ERROR, t.toString());
}
}
@Override
public RemotingCommand processRequest(final ChannelHandlerContext ctx,
RemotingCommand request) throws RemotingCommandException {
return this.processRequest(ctx.channel(), request, true, true);
}
@Override
public boolean rejectRequest() {
if (!this.brokerController.getBrokerConfig().isSlaveReadEnable()
&& this.brokerController.getMessageStoreConfig().getBrokerRole() == BrokerRole.SLAVE) {
return true;
}
return false;
}
private RemotingCommand processRequest(final Channel channel, RemotingCommand request, boolean brokerAllowSuspend,
boolean brokerAllowFlowCtrSuspend)
throws RemotingCommandException {
final long beginTimeMills = this.brokerController.getMessageStore().now();
RemotingCommand response = RemotingCommand.createResponseCommand(PullMessageResponseHeader.class);
final PullMessageResponseHeader responseHeader = (PullMessageResponseHeader) response.readCustomHeader();
final PullMessageRequestHeader requestHeader =
(PullMessageRequestHeader) request.decodeCommandCustomHeader(PullMessageRequestHeader.class);
response.setOpaque(request.getOpaque());
LOGGER.debug("receive PullMessage request command, {}", request);
if (!PermName.isReadable(this.brokerController.getBrokerConfig().getBrokerPermission())) {
response.setCode(ResponseCode.NO_PERMISSION);
responseHeader.setForbiddenType(ForbiddenType.BROKER_FORBIDDEN);
response.setRemark(String.format("the broker[%s] pulling message is forbidden",
this.brokerController.getBrokerConfig().getBrokerIP1()));
return response;
}
if (request.getCode() == RequestCode.LITE_PULL_MESSAGE && !this.brokerController.getBrokerConfig().isLitePullMessageEnable()) {
response.setCode(ResponseCode.NO_PERMISSION);
responseHeader.setForbiddenType(ForbiddenType.BROKER_FORBIDDEN);
response.setRemark(
"the broker[" + this.brokerController.getBrokerConfig().getBrokerIP1() + "] for lite pull consumer is forbidden");
return response;
}
SubscriptionGroupConfig subscriptionGroupConfig =
this.brokerController.getSubscriptionGroupManager().findSubscriptionGroupConfig(requestHeader.getConsumerGroup());
if (null == subscriptionGroupConfig) {
response.setCode(ResponseCode.SUBSCRIPTION_GROUP_NOT_EXIST);
response.setRemark(String.format("subscription group [%s] does not exist, %s", requestHeader.getConsumerGroup(), FAQUrl.suggestTodo(FAQUrl.SUBSCRIPTION_GROUP_NOT_EXIST)));
return response;
}
if (!subscriptionGroupConfig.isConsumeEnable()) {
response.setCode(ResponseCode.NO_PERMISSION);
responseHeader.setForbiddenType(ForbiddenType.GROUP_FORBIDDEN);
response.setRemark("subscription group no permission, " + requestHeader.getConsumerGroup());
return response;
}
TopicConfig topicConfig = this.brokerController.getTopicConfigManager().selectTopicConfig(requestHeader.getTopic());
if (null == topicConfig) {
LOGGER.error("the topic {} not exist, consumer: {}", requestHeader.getTopic(), RemotingHelper.parseChannelRemoteAddr(channel));
response.setCode(ResponseCode.TOPIC_NOT_EXIST);
response.setRemark(String.format("topic[%s] not exist, apply first please! %s", requestHeader.getTopic(), FAQUrl.suggestTodo(FAQUrl.APPLY_TOPIC_URL)));
return response;
}
if (!PermName.isReadable(topicConfig.getPerm())) {
response.setCode(ResponseCode.NO_PERMISSION);
responseHeader.setForbiddenType(ForbiddenType.TOPIC_FORBIDDEN);
response.setRemark("the topic[" + requestHeader.getTopic() + "] pulling message is forbidden");
return response;
}
TopicQueueMappingContext mappingContext = this.brokerController.getTopicQueueMappingManager().buildTopicQueueMappingContext(requestHeader, false);
{
RemotingCommand rewriteResult = rewriteRequestForStaticTopic(requestHeader, mappingContext);
if (rewriteResult != null) {
return rewriteResult;
}
}
if (requestHeader.getQueueId() < 0 || requestHeader.getQueueId() >= topicConfig.getReadQueueNums()) {
String errorInfo = String.format("queueId[%d] is illegal, topic:[%s] topicConfig.readQueueNums:[%d] consumer:[%s]",
requestHeader.getQueueId(), requestHeader.getTopic(), topicConfig.getReadQueueNums(), channel.remoteAddress());
LOGGER.warn(errorInfo);
response.setCode(ResponseCode.INVALID_PARAMETER);
response.setRemark(errorInfo);
return response;
}
ConsumerManager consumerManager = brokerController.getConsumerManager();
switch (RequestSource.parseInteger(requestHeader.getRequestSource())) {
case PROXY_FOR_BROADCAST:
consumerManager.compensateBasicConsumerInfo(requestHeader.getConsumerGroup(), ConsumeType.CONSUME_PASSIVELY, MessageModel.BROADCASTING);
break;
case PROXY_FOR_STREAM:
consumerManager.compensateBasicConsumerInfo(requestHeader.getConsumerGroup(), ConsumeType.CONSUME_ACTIVELY, MessageModel.CLUSTERING);
break;
default:
consumerManager.compensateBasicConsumerInfo(requestHeader.getConsumerGroup(), ConsumeType.CONSUME_PASSIVELY, MessageModel.CLUSTERING);
break;
}
SubscriptionData subscriptionData = null;
ConsumerFilterData consumerFilterData = null;
final boolean hasSubscriptionFlag = PullSysFlag.hasSubscriptionFlag(requestHeader.getSysFlag());
if (hasSubscriptionFlag) {
try {
subscriptionData = FilterAPI.build(
requestHeader.getTopic(), requestHeader.getSubscription(), requestHeader.getExpressionType()
);
consumerManager.compensateSubscribeData(requestHeader.getConsumerGroup(), requestHeader.getTopic(), subscriptionData);
if (!ExpressionType.isTagType(subscriptionData.getExpressionType())) {
consumerFilterData = ConsumerFilterManager.build(
requestHeader.getTopic(), requestHeader.getConsumerGroup(), requestHeader.getSubscription(),
requestHeader.getExpressionType(), requestHeader.getSubVersion()
);
assert consumerFilterData != null;
}
} catch (Exception e) {
LOGGER.warn("Parse the consumer's subscription[{}] failed, group: {}", requestHeader.getSubscription(),
requestHeader.getConsumerGroup());
response.setCode(ResponseCode.SUBSCRIPTION_PARSE_FAILED);
response.setRemark("parse the consumer's subscription failed");
return response;
}
} else {
ConsumerGroupInfo consumerGroupInfo =
this.brokerController.getConsumerManager().getConsumerGroupInfo(requestHeader.getConsumerGroup());
if (null == consumerGroupInfo) {
LOGGER.warn("the consumer's group info not exist, group: {}", requestHeader.getConsumerGroup());
response.setCode(ResponseCode.SUBSCRIPTION_NOT_EXIST);
response.setRemark("the consumer's group info not exist" + FAQUrl.suggestTodo(FAQUrl.SAME_GROUP_DIFFERENT_TOPIC));
return response;
}
if (!subscriptionGroupConfig.isConsumeBroadcastEnable()
&& consumerGroupInfo.getMessageModel() == MessageModel.BROADCASTING) {
response.setCode(ResponseCode.NO_PERMISSION);
responseHeader.setForbiddenType(ForbiddenType.BROADCASTING_DISABLE_FORBIDDEN);
response.setRemark("the consumer group[" + requestHeader.getConsumerGroup() + "] can not consume by broadcast way");
return response;
}
boolean readForbidden = this.brokerController.getSubscriptionGroupManager().getForbidden(//
subscriptionGroupConfig.getGroupName(), requestHeader.getTopic(), PermName.INDEX_PERM_READ);
if (readForbidden) {
response.setCode(ResponseCode.NO_PERMISSION);
responseHeader.setForbiddenType(ForbiddenType.SUBSCRIPTION_FORBIDDEN);
response.setRemark("the consumer group[" + requestHeader.getConsumerGroup() + "] is forbidden for topic[" + requestHeader.getTopic() + "]");
return response;
}
subscriptionData = consumerGroupInfo.findSubscriptionData(requestHeader.getTopic());
if (null == subscriptionData) {
LOGGER.warn("the consumer's subscription not exist, group: {}, topic:{}", requestHeader.getConsumerGroup(), requestHeader.getTopic());
response.setCode(ResponseCode.SUBSCRIPTION_NOT_EXIST);
response.setRemark("the consumer's subscription not exist" + FAQUrl.suggestTodo(FAQUrl.SAME_GROUP_DIFFERENT_TOPIC));
return response;
}
if (subscriptionData.getSubVersion() < requestHeader.getSubVersion()) {
LOGGER.warn("The broker's subscription is not latest, group: {} {}", requestHeader.getConsumerGroup(),
subscriptionData.getSubString());
response.setCode(ResponseCode.SUBSCRIPTION_NOT_LATEST);
response.setRemark("the consumer's subscription not latest");
return response;
}
if (!ExpressionType.isTagType(subscriptionData.getExpressionType())) {
consumerFilterData = this.brokerController.getConsumerFilterManager().get(requestHeader.getTopic(),
requestHeader.getConsumerGroup());
if (consumerFilterData == null) {
response.setCode(ResponseCode.FILTER_DATA_NOT_EXIST);
response.setRemark("The broker's consumer filter data is not exist!Your expression may be wrong!");
return response;
}
if (consumerFilterData.getClientVersion() < requestHeader.getSubVersion()) {
LOGGER.warn("The broker's consumer filter data is not latest, group: {}, topic: {}, serverV: {}, clientV: {}",
requestHeader.getConsumerGroup(), requestHeader.getTopic(), consumerFilterData.getClientVersion(), requestHeader.getSubVersion());
response.setCode(ResponseCode.FILTER_DATA_NOT_LATEST);
response.setRemark("the consumer's consumer filter data not latest");
return response;
}
}
}
if (!ExpressionType.isTagType(subscriptionData.getExpressionType())
&& !this.brokerController.getBrokerConfig().isEnablePropertyFilter()) {
response.setCode(ResponseCode.SYSTEM_ERROR);
response.setRemark("The broker does not support consumer to filter message by " + subscriptionData.getExpressionType());
return response;
}
MessageFilter messageFilter;
if (this.brokerController.getBrokerConfig().isFilterSupportRetry()) {
messageFilter = new ExpressionForRetryMessageFilter(subscriptionData, consumerFilterData,
this.brokerController.getConsumerFilterManager());
} else {
messageFilter = new ExpressionMessageFilter(subscriptionData, consumerFilterData,
this.brokerController.getConsumerFilterManager());
}
if (brokerController.getBrokerConfig().isRejectPullConsumerEnable()) {
ConsumerGroupInfo consumerGroupInfo =
this.brokerController.getConsumerManager().getConsumerGroupInfo(requestHeader.getConsumerGroup());
if (null == consumerGroupInfo || ConsumeType.CONSUME_ACTIVELY == consumerGroupInfo.getConsumeType()) {
if ((null == consumerGroupInfo || null == consumerGroupInfo.findChannel(channel))
&& !MixAll.isSysConsumerGroupPullMessage(requestHeader.getConsumerGroup())) {
response.setCode(ResponseCode.SUBSCRIPTION_NOT_EXIST);
response.setRemark("the consumer's group info not exist, or the pull consumer is rejected by server." + FAQUrl.suggestTodo(FAQUrl.SUBSCRIPTION_GROUP_NOT_EXIST));
return response;
}
}
}
final MessageStore messageStore = brokerController.getMessageStore();
if (this.brokerController.getMessageStore() instanceof DefaultMessageStore) {
DefaultMessageStore defaultMessageStore = (DefaultMessageStore) this.brokerController.getMessageStore();
boolean cgNeedColdDataFlowCtr = brokerController.getColdDataCgCtrService().isCgNeedColdDataFlowCtr(requestHeader.getConsumerGroup());
if (cgNeedColdDataFlowCtr) {
boolean isMsgLogicCold = defaultMessageStore.getCommitLog()
.getColdDataCheckService().isMsgInColdArea(requestHeader.getConsumerGroup(),
requestHeader.getTopic(), requestHeader.getQueueId(), requestHeader.getQueueOffset());
if (isMsgLogicCold) {
ConsumeType consumeType = this.brokerController.getConsumerManager().getConsumerGroupInfo(requestHeader.getConsumerGroup()).getConsumeType();
if (consumeType == ConsumeType.CONSUME_PASSIVELY) {
response.setCode(ResponseCode.SYSTEM_BUSY);
response.setRemark("This consumer group is reading cold data. It has been flow control");
return response;
} else if (consumeType == ConsumeType.CONSUME_ACTIVELY) {
if (brokerAllowFlowCtrSuspend) { // second arrived, which will not be held
PullRequest pullRequest = new PullRequest(request, channel, 1000,
this.brokerController.getMessageStore().now(), requestHeader.getQueueOffset(), subscriptionData, messageFilter);
this.brokerController.getColdDataPullRequestHoldService().suspendColdDataReadRequest(pullRequest);
return null;
}
requestHeader.setMaxMsgNums(1);
}
}
}
}
final boolean useResetOffsetFeature = brokerController.getBrokerConfig().isUseServerSideResetOffset();
String topic = requestHeader.getTopic();
String group = requestHeader.getConsumerGroup();
int queueId = requestHeader.getQueueId();
Long resetOffset = brokerController.getConsumerOffsetManager().queryThenEraseResetOffset(topic, group, queueId);
GetMessageResult getMessageResult = null;
if (useResetOffsetFeature && null != resetOffset) {
getMessageResult = new GetMessageResult();
getMessageResult.setStatus(GetMessageStatus.OFFSET_RESET);
getMessageResult.setNextBeginOffset(resetOffset);
getMessageResult.setMinOffset(messageStore.getMinOffsetInQueue(topic, queueId));
try {
getMessageResult.setMaxOffset(messageStore.getMaxOffsetInQueue(topic, queueId));
} catch (ConsumeQueueException e) {
throw new RemotingCommandException("Failed tp get max offset in queue", e);
}
getMessageResult.setSuggestPullingFromSlave(false);
} else {
long broadcastInitOffset = queryBroadcastPullInitOffset(topic, group, queueId, requestHeader, channel);
if (broadcastInitOffset >= 0) {
getMessageResult = new GetMessageResult();
getMessageResult.setStatus(GetMessageStatus.OFFSET_RESET);
getMessageResult.setNextBeginOffset(broadcastInitOffset);
} else {
SubscriptionData finalSubscriptionData = subscriptionData;
RemotingCommand finalResponse = response;
messageStore.getMessageAsync(group, topic, queueId, requestHeader.getQueueOffset(),
requestHeader.getMaxMsgNums(), messageFilter)
.thenApply(result -> {
if (null == result) {
finalResponse.setCode(ResponseCode.SYSTEM_ERROR);
finalResponse.setRemark("store getMessage return null");
return finalResponse;
}
brokerController.getColdDataCgCtrService().coldAcc(requestHeader.getConsumerGroup(), result.getColdDataSum());
return pullMessageResultHandler.handle(
result,
request,
requestHeader,
channel,
finalSubscriptionData,
subscriptionGroupConfig,
brokerAllowSuspend,
messageFilter,
finalResponse,
mappingContext,
beginTimeMills
);
})
.thenAccept(result -> NettyRemotingAbstract.writeResponse(channel, request, result, null, brokerController.getBrokerMetricsManager().getRemotingMetricsManager()));
}
}
if (getMessageResult != null) {
return this.pullMessageResultHandler.handle(
getMessageResult,
request,
requestHeader,
channel,
subscriptionData,
subscriptionGroupConfig,
brokerAllowSuspend,
messageFilter,
response,
mappingContext,
beginTimeMills
);
}
return null;
}
public boolean hasConsumeMessageHook() {
return consumeMessageHookList != null && !this.consumeMessageHookList.isEmpty();
}
/**
* Composes the header of the response message to be sent back to the client
*
* @param requestHeader - the header of the request message
* @param getMessageResult - the result of the GetMessage request
* @param topicSysFlag - the system flag of the topic
* @param subscriptionGroupConfig - configuration of the subscription group
* @param response - the response message to be sent back to the client
* @param clientAddress - the address of the client
*/
protected void composeResponseHeader(PullMessageRequestHeader requestHeader, GetMessageResult getMessageResult,
int topicSysFlag, SubscriptionGroupConfig subscriptionGroupConfig, RemotingCommand response,
String clientAddress) {
final PullMessageResponseHeader responseHeader = (PullMessageResponseHeader) response.readCustomHeader();
response.setRemark(getMessageResult.getStatus().name());
responseHeader.setNextBeginOffset(getMessageResult.getNextBeginOffset());
responseHeader.setMinOffset(getMessageResult.getMinOffset());
// this does not need to be modified since it's not an accurate value under logical queue.
responseHeader.setMaxOffset(getMessageResult.getMaxOffset());
responseHeader.setTopicSysFlag(topicSysFlag);
responseHeader.setGroupSysFlag(subscriptionGroupConfig.getGroupSysFlag());
switch (getMessageResult.getStatus()) {
case FOUND:
response.setCode(ResponseCode.SUCCESS);
break;
case MESSAGE_WAS_REMOVING:
case NO_MATCHED_MESSAGE:
response.setCode(ResponseCode.PULL_RETRY_IMMEDIATELY);
break;
case NO_MATCHED_LOGIC_QUEUE:
case NO_MESSAGE_IN_QUEUE:
if (0 != requestHeader.getQueueOffset()) {
response.setCode(ResponseCode.PULL_OFFSET_MOVED);
// XXX: warn and notify me
LOGGER.info("the broker stores no queue data, fix the request offset {} to {}, Topic: {} QueueId: {} Consumer Group: {}",
requestHeader.getQueueOffset(),
getMessageResult.getNextBeginOffset(),
requestHeader.getTopic(),
requestHeader.getQueueId(),
requestHeader.getConsumerGroup()
);
} else {
response.setCode(ResponseCode.PULL_NOT_FOUND);
}
break;
case OFFSET_FOUND_NULL:
case OFFSET_OVERFLOW_ONE:
response.setCode(ResponseCode.PULL_NOT_FOUND);
break;
case OFFSET_OVERFLOW_BADLY:
response.setCode(ResponseCode.PULL_OFFSET_MOVED);
// XXX: warn and notify me
LOGGER.info("the request offset: {} over flow badly, fix to {}, broker max offset: {}, consumer: {}",
requestHeader.getQueueOffset(), getMessageResult.getNextBeginOffset(), getMessageResult.getMaxOffset(), clientAddress);
break;
case OFFSET_RESET:
response.setCode(ResponseCode.PULL_OFFSET_MOVED);
LOGGER.info("The queue under pulling was previously reset to start from {}",
getMessageResult.getNextBeginOffset());
break;
case OFFSET_TOO_SMALL:
response.setCode(ResponseCode.PULL_OFFSET_MOVED);
LOGGER.info("the request offset too small. group={}, topic={}, requestOffset={}, brokerMinOffset={}, clientIp={}",
requestHeader.getConsumerGroup(), requestHeader.getTopic(), requestHeader.getQueueOffset(),
getMessageResult.getMinOffset(), clientAddress);
break;
default:
assert false;
break;
}
if (this.brokerController.getBrokerConfig().isSlaveReadEnable() && !this.brokerController.getBrokerConfig().isInBrokerContainer()) {
// consume too slow ,redirect to another machine
if (getMessageResult.isSuggestPullingFromSlave()) {
responseHeader.setSuggestWhichBrokerId(subscriptionGroupConfig.getWhichBrokerWhenConsumeSlowly());
}
// consume ok
else {
responseHeader.setSuggestWhichBrokerId(subscriptionGroupConfig.getBrokerId());
}
} else {
responseHeader.setSuggestWhichBrokerId(MixAll.MASTER_ID);
}
if (this.brokerController.getBrokerConfig().getBrokerId() != MixAll.MASTER_ID && !getMessageResult.isSuggestPullingFromSlave()) {
if (this.brokerController.getMinBrokerIdInGroup() == MixAll.MASTER_ID) {
LOGGER.debug("slave redirect pullRequest to master, topic: {}, queueId: {}, consumer group: {}, next: {}, min: {}, max: {}",
requestHeader.getTopic(),
requestHeader.getQueueId(),
requestHeader.getConsumerGroup(),
responseHeader.getNextBeginOffset(),
responseHeader.getMinOffset(),
responseHeader.getMaxOffset()
);
responseHeader.setSuggestWhichBrokerId(MixAll.MASTER_ID);
if (!getMessageResult.getStatus().equals(GetMessageStatus.FOUND)) {
response.setCode(ResponseCode.PULL_RETRY_IMMEDIATELY);
}
}
}
}
protected void executeConsumeMessageHookBefore(RemotingCommand request, PullMessageRequestHeader requestHeader,
GetMessageResult getMessageResult, boolean brokerAllowSuspend, int responseCode) {
if (this.hasConsumeMessageHook()) {
String owner = request.getExtFields().get(BrokerStatsManager.COMMERCIAL_OWNER);
String authType = request.getExtFields().get(BrokerStatsManager.ACCOUNT_AUTH_TYPE);
String ownerParent = request.getExtFields().get(BrokerStatsManager.ACCOUNT_OWNER_PARENT);
String ownerSelf = request.getExtFields().get(BrokerStatsManager.ACCOUNT_OWNER_SELF);
ConsumeMessageContext context = new ConsumeMessageContext();
context.setConsumerGroup(requestHeader.getConsumerGroup());
context.setTopic(requestHeader.getTopic());
context.setQueueId(requestHeader.getQueueId());
context.setAccountAuthType(authType);
context.setAccountOwnerParent(ownerParent);
context.setAccountOwnerSelf(ownerSelf);
context.setNamespace(NamespaceUtil.getNamespaceFromResource(requestHeader.getTopic()));
context.setFilterMessageCount(getMessageResult.getFilterMessageCount());
switch (responseCode) {
case ResponseCode.SUCCESS:
int commercialBaseCount = brokerController.getBrokerConfig().getCommercialBaseCount();
int incValue = getMessageResult.getMsgCount4Commercial() * commercialBaseCount;
context.setCommercialRcvStats(BrokerStatsManager.StatsType.RCV_SUCCESS);
context.setCommercialRcvTimes(incValue);
context.setCommercialRcvSize(getMessageResult.getBufferTotalSize());
context.setCommercialOwner(owner);
context.setRcvStat(BrokerStatsManager.StatsType.RCV_SUCCESS);
context.setRcvMsgNum(getMessageResult.getMessageCount());
context.setRcvMsgSize(getMessageResult.getBufferTotalSize());
context.setCommercialRcvMsgNum(getMessageResult.getMsgCount4Commercial());
break;
case ResponseCode.PULL_NOT_FOUND:
if (!brokerAllowSuspend) {
context.setCommercialRcvStats(BrokerStatsManager.StatsType.RCV_EPOLLS);
context.setCommercialRcvTimes(1);
context.setCommercialOwner(owner);
context.setRcvStat(BrokerStatsManager.StatsType.RCV_EPOLLS);
context.setRcvMsgNum(0);
context.setRcvMsgSize(0);
context.setCommercialRcvMsgNum(0);
}
break;
case ResponseCode.PULL_RETRY_IMMEDIATELY:
case ResponseCode.PULL_OFFSET_MOVED:
context.setCommercialRcvStats(BrokerStatsManager.StatsType.RCV_EPOLLS);
context.setCommercialRcvTimes(1);
context.setCommercialOwner(owner);
context.setRcvStat(BrokerStatsManager.StatsType.RCV_EPOLLS);
context.setRcvMsgNum(0);
context.setRcvMsgSize(0);
context.setCommercialRcvMsgNum(0);
break;
default:
assert false;
break;
}
for (ConsumeMessageHook hook : this.consumeMessageHookList) {
try {
hook.consumeMessageBefore(context);
} catch (Throwable ignored) {
}
}
}
}
protected void tryCommitOffset(boolean brokerAllowSuspend, PullMessageRequestHeader requestHeader,
long nextOffset, String clientAddress) {
this.brokerController.getConsumerOffsetManager().commitPullOffset(clientAddress,
requestHeader.getConsumerGroup(), requestHeader.getTopic(), requestHeader.getQueueId(), nextOffset);
boolean storeOffsetEnable = brokerAllowSuspend;
final boolean hasCommitOffsetFlag = PullSysFlag.hasCommitOffsetFlag(requestHeader.getSysFlag());
storeOffsetEnable = storeOffsetEnable && hasCommitOffsetFlag;
if (storeOffsetEnable) {
this.brokerController.getConsumerOffsetManager().commitOffset(clientAddress, requestHeader.getConsumerGroup(),
requestHeader.getTopic(), requestHeader.getQueueId(), requestHeader.getCommitOffset());
}
}
public void executeRequestWhenWakeup(final Channel channel, final RemotingCommand request) {
Runnable run = () -> {
try {
boolean brokerAllowFlowCtrSuspend = !(request.getExtFields() != null && request.getExtFields().containsKey(ColdDataPullRequestHoldService.NO_SUSPEND_KEY));
final RemotingCommand response = PullMessageProcessor.this.processRequest(channel, request, false, brokerAllowFlowCtrSuspend);
if (response != null) {
response.setOpaque(request.getOpaque());
response.markResponseType();
try {
NettyRemotingAbstract.writeResponse(channel, request, response, future -> {
if (!future.isSuccess()) {
LOGGER.error("processRequestWrapper response to {} failed", channel.remoteAddress(), future.cause());
LOGGER.error(request.toString());
LOGGER.error(response.toString());
}
}, brokerController.getBrokerMetricsManager().getRemotingMetricsManager());
} catch (Throwable e) {
LOGGER.error("processRequestWrapper process request over, but response failed", e);
LOGGER.error(request.toString());
LOGGER.error(response.toString());
}
}
} catch (RemotingCommandException e1) {
LOGGER.error("executeRequestWhenWakeup run", e1);
}
};
this.brokerController.getPullMessageExecutor().submit(new RequestTask(run, channel, request));
}
public void registerConsumeMessageHook(List<ConsumeMessageHook> consumeMessageHookList) {
this.consumeMessageHookList = consumeMessageHookList;
}
public void setPullMessageResultHandler(PullMessageResultHandler pullMessageResultHandler) {
this.pullMessageResultHandler = pullMessageResultHandler;
}
private boolean isBroadcast(boolean proxyPullBroadcast, ConsumerGroupInfo consumerGroupInfo) {
return proxyPullBroadcast ||
consumerGroupInfo != null
&& MessageModel.BROADCASTING.equals(consumerGroupInfo.getMessageModel())
&& ConsumeType.CONSUME_PASSIVELY.equals(consumerGroupInfo.getConsumeType());
}
protected void updateBroadcastPulledOffset(String topic, String group, int queueId,
PullMessageRequestHeader requestHeader, Channel channel, RemotingCommand response, long nextBeginOffset) {
if (response == null || !this.brokerController.getBrokerConfig().isEnableBroadcastOffsetStore()) {
return;
}
boolean proxyPullBroadcast = Objects.equals(
RequestSource.PROXY_FOR_BROADCAST.getValue(), requestHeader.getRequestSource());
ConsumerGroupInfo consumerGroupInfo = this.brokerController.getConsumerManager().getConsumerGroupInfo(group);
if (isBroadcast(proxyPullBroadcast, consumerGroupInfo)) {
long offset = requestHeader.getQueueOffset();
if (ResponseCode.PULL_OFFSET_MOVED == response.getCode()) {
offset = nextBeginOffset;
}
String clientId;
if (proxyPullBroadcast) {
clientId = requestHeader.getProxyFrowardClientId();
} else {
ClientChannelInfo clientChannelInfo = consumerGroupInfo.findChannel(channel);
if (clientChannelInfo == null) {
return;
}
clientId = clientChannelInfo.getClientId();
}
this.brokerController.getBroadcastOffsetManager()
.updateOffset(topic, group, queueId, offset, clientId, proxyPullBroadcast);
}
}
/**
* When pull request is not broadcast or not return -1
*/
protected long queryBroadcastPullInitOffset(String topic, String group, int queueId,
PullMessageRequestHeader requestHeader, Channel channel) throws RemotingCommandException {
if (!this.brokerController.getBrokerConfig().isEnableBroadcastOffsetStore()) {
return -1L;
}
ConsumerGroupInfo consumerGroupInfo = this.brokerController.getConsumerManager().getConsumerGroupInfo(group);
boolean proxyPullBroadcast = Objects.equals(
RequestSource.PROXY_FOR_BROADCAST.getValue(), requestHeader.getRequestSource());
if (isBroadcast(proxyPullBroadcast, consumerGroupInfo)) {
String clientId;
if (proxyPullBroadcast) {
clientId = requestHeader.getProxyFrowardClientId();
} else {
ClientChannelInfo clientChannelInfo = consumerGroupInfo.findChannel(channel);
if (clientChannelInfo == null) {
return -1;
}
clientId = clientChannelInfo.getClientId();
}
try {
return this.brokerController.getBroadcastOffsetManager()
.queryInitOffset(topic, group, queueId, clientId, requestHeader.getQueueOffset(), proxyPullBroadcast);
} catch (ConsumeQueueException e) {
throw new RemotingCommandException("Failed to query initial offset", e);
}
}
return -1L;
}
}
|
PullMessageProcessor
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/hive/HiveCreateTableTest_27.java
|
{
"start": 917,
"end": 6442
}
|
class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = //
"CREATE EXTERNAL TABLE `customer_case.tradelist_csv`(\n" +
" `t_userid` string COMMENT '??ID', \n" +
" `t_dealdate` string COMMENT '????', \n" +
" `t_businflag` string COMMENT '????', \n" +
" `t_cdate` string COMMENT '????', \n" +
" `t_date` string COMMENT '????', \n" +
" `t_serialno` string COMMENT '????', \n" +
" `t_agencyno` string COMMENT '?????', \n" +
" `t_netno` string COMMENT '????', \n" +
" `t_fundacco` string COMMENT '????', \n" +
" `t_tradeacco` string COMMENT '????', \n" +
" `t_fundcode` string COMMENT '????', \n" +
" `t_sharetype` string COMMENT '????', \n" +
" `t_confirmbalance` double COMMENT '????', \n" +
" `t_tradefare` double COMMENT '???', \n" +
" `t_backfare` double COMMENT '?????', \n" +
" `t_otherfare1` double COMMENT '????1', \n" +
" `t_remark` string COMMENT '??')\n" +
"ROW FORMAT DELIMITED \n" +
" FIELDS TERMINATED BY ',' \n" +
"STORED AS INPUTFORMAT \n" +
" 'org.apache.hadoop.mapred.TextInputFormat' \n" +
"OUTPUTFORMAT \n" +
" 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'\n" +
"LOCATION\n" +
" 'oss://acs:ram::1:role&role@oss-cn-hangzhou-for-openanalytics/datasets/basic/customer_20180526/trade'\n" +
"TBLPROPERTIES (\n" +
" 'COLUMN_STATS_ACCURATE'='false', \n" +
" 'numFiles'='1', \n" +
" 'numRows'='-1', \n" +
" 'rawDataSize'='-1', \n" +
" 'totalSize'='1870175', \n" +
" 'transient_lastDdlTime'='1527408051')";
List<SQLStatement> statementList = SQLUtils.toStatementList(sql, JdbcConstants.HIVE);
SQLStatement stmt = statementList.get(0);
System.out.println(stmt.toString());
assertEquals(1, statementList.size());
SchemaStatVisitor visitor = SQLUtils.createSchemaStatVisitor(JdbcConstants.HIVE);
stmt.accept(visitor);
{
String text = SQLUtils.toSQLString(stmt, JdbcConstants.HIVE);
assertEquals("CREATE EXTERNAL TABLE `customer_case.tradelist_csv` (\n" +
"\t`t_userid` string COMMENT '??ID',\n" +
"\t`t_dealdate` string COMMENT '????',\n" +
"\t`t_businflag` string COMMENT '????',\n" +
"\t`t_cdate` string COMMENT '????',\n" +
"\t`t_date` string COMMENT '????',\n" +
"\t`t_serialno` string COMMENT '????',\n" +
"\t`t_agencyno` string COMMENT '?????',\n" +
"\t`t_netno` string COMMENT '????',\n" +
"\t`t_fundacco` string COMMENT '????',\n" +
"\t`t_tradeacco` string COMMENT '????',\n" +
"\t`t_fundcode` string COMMENT '????',\n" +
"\t`t_sharetype` string COMMENT '????',\n" +
"\t`t_confirmbalance` double COMMENT '????',\n" +
"\t`t_tradefare` double COMMENT '???',\n" +
"\t`t_backfare` double COMMENT '?????',\n" +
"\t`t_otherfare1` double COMMENT '????1',\n" +
"\t`t_remark` string COMMENT '??'\n" +
")\n" +
"ROW FORMAT DELIMITED\n" +
"\tFIELDS TERMINATED BY ','\n" +
"STORED AS\n" +
"\tINPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'\n" +
"\tOUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'\n" +
"LOCATION 'oss://acs:ram::1:role&role@oss-cn-hangzhou-for-openanalytics/datasets/basic/customer_20180526/trade'\n" +
"TBLPROPERTIES (\n" +
"\t'COLUMN_STATS_ACCURATE' = 'false',\n" +
"\t'numFiles' = '1',\n" +
"\t'numRows' = '-1',\n" +
"\t'rawDataSize' = '-1',\n" +
"\t'totalSize' = '1870175',\n" +
"\t'transient_lastDdlTime' = '1527408051'\n" +
")", text);
}
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertEquals(17, visitor.getColumns().size());
assertEquals(0, visitor.getConditions().size());
assertEquals(0, visitor.getRelationships().size());
assertEquals(0, visitor.getOrderByColumns().size());
assertTrue(visitor.containsTable("customer_case.tradelist_csv"));
}
}
|
HiveCreateTableTest_27
|
java
|
apache__logging-log4j2
|
log4j-taglib/src/main/java/org/apache/logging/log4j/taglib/EntryTag.java
|
{
"start": 1149,
"end": 2169
}
|
class ____ extends LoggerAwareTagSupport implements DynamicAttributes {
private static final long serialVersionUID = 1L;
private static final String FQCN = EntryTag.class.getName();
private List<Object> attributes;
@Override
protected void init() {
super.init();
if (this.attributes == null) {
this.attributes = new ArrayList<>();
} else {
this.attributes.clear();
}
}
@Override
public void setDynamicAttribute(final String uri, final String name, final Object value) {
this.attributes.add(value);
}
@Override
public int doEndTag() throws JspException {
final Log4jTaglibLogger logger = this.getLogger();
if (TagUtils.isEnabled(logger, Level.TRACE, null)) {
if (this.attributes.isEmpty()) {
logger.entry(FQCN);
} else {
logger.entry(FQCN, this.attributes.toArray());
}
}
return Tag.EVAL_PAGE;
}
}
|
EntryTag
|
java
|
elastic__elasticsearch
|
server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java
|
{
"start": 2605,
"end": 3452
}
|
class ____ extends AbstractMultiClustersTestCase {
private static final String REMOTE_CLUSTER = "cluster_a";
private static long EARLIEST_TIMESTAMP = 1691348810000L;
private static long LATEST_TIMESTAMP = 1691348820000L;
@Override
protected List<String> remoteClusterAlias() {
return List.of(REMOTE_CLUSTER);
}
@Override
protected Map<String, Boolean> skipUnavailableForRemoteClusters() {
return Map.of(REMOTE_CLUSTER, randomBoolean());
}
@Override
protected boolean reuseClusters() {
return false;
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins(String clusterAlias) {
return CollectionUtils.appendToCopy(super.nodePlugins(clusterAlias), CrossClusterSearchIT.TestQueryBuilderPlugin.class);
}
public static
|
CrossClusterSearchIT
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/sql/CustomSQLTest.java
|
{
"start": 3739,
"end": 4545
}
|
class ____ {
@Id
@GeneratedValue
private Long id;
private String name;
@ElementCollection
@SQLInsert(
sql = "INSERT INTO person_phones (person_id, phones, valid) VALUES (?, ?, true) ")
@SQLDeleteAll(
sql = "UPDATE person_phones SET valid = false WHERE person_id = ?")
@SQLRestriction("valid = true")
private List<String> phones = new ArrayList<>();
//Getters and setters are omitted for brevity
//end::sql-custom-crud-example[]
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public List<String> getPhones() {
return phones;
}
//tag::sql-custom-crud-example[]
}
//end::sql-custom-crud-example[]
}
|
Person
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/loading/multiLoad/FindMultipleFetchProfileTest.java
|
{
"start": 1072,
"end": 2129
}
|
class ____ {
@Test void test(SessionFactoryScope scope) {
scope.inTransaction(s-> {
Owner gavin = new Owner("gavin");
s.persist(gavin);
s.persist(new Record(123L,gavin,"hello earth"));
s.persist(new Record(456L,gavin,"hello mars"));
});
scope.inTransaction(s-> {
List<Record> all = s.findMultiple(Record.class, List.of(456L, 123L, 2L));
assertEquals("hello mars",all.get(0).message);
assertEquals("hello earth",all.get(1).message);
assertNull(all.get(2));
assertFalse(Hibernate.isInitialized(all.get(0).owner));
assertFalse(Hibernate.isInitialized(all.get(1).owner));
});
scope.inTransaction(s-> {
List<Record> all = s.findMultiple(Record.class, List.of(456L, 123L),
new EnabledFetchProfile("withOwner"));
assertEquals("hello mars",all.get(0).message);
assertEquals("hello earth",all.get(1).message);
assertTrue(Hibernate.isInitialized(all.get(0).owner));
assertTrue(Hibernate.isInitialized(all.get(1).owner));
});
}
@Entity
@FetchProfile(name = "withOwner")
static
|
FindMultipleFetchProfileTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/PriorityProcessWorkerExecutorServiceTests.java
|
{
"start": 1063,
"end": 9304
}
|
class ____ extends ESTestCase {
private final ThreadPool threadPool = new TestThreadPool("PriorityProcessWorkerExecutorServiceTests");
@After
public void stopThreadPool() {
terminate(threadPool);
}
public void testQueueCapacityReached() {
var executor = createProcessWorkerExecutorService(2);
var counter = new AtomicInteger();
var r1 = new RunOrderValidator(1, counter);
executor.executeWithPriority(r1, RequestPriority.NORMAL, 100L);
var r2 = new RunOrderValidator(2, counter);
executor.executeWithPriority(r2, RequestPriority.NORMAL, 101L);
var r3 = new RunOrderValidator(3, counter);
executor.executeWithPriority(r3, RequestPriority.NORMAL, 101L);
assertTrue(r1.initialized);
assertTrue(r2.initialized);
assertTrue(r3.initialized);
assertTrue(r3.hasBeenRejected);
assertFalse(r1.hasBeenRejected);
assertFalse(r2.hasBeenRejected);
}
public void testQueueCapacityReached_HighestPriority() {
var executor = createProcessWorkerExecutorService(2);
var counter = new AtomicInteger();
executor.executeWithPriority(new RunOrderValidator(1, counter), RequestPriority.NORMAL, 100L);
executor.executeWithPriority(new RunOrderValidator(2, counter), RequestPriority.NORMAL, 102L);
// queue is now full
var r3 = new RunOrderValidator(3, counter);
executor.executeWithPriority(r3, RequestPriority.HIGH, 103L);
var highestPriorityAlwaysAccepted = new RunOrderValidator(4, counter);
executor.executeWithPriority(highestPriorityAlwaysAccepted, RequestPriority.HIGHEST, 104L);
var r5 = new RunOrderValidator(5, counter);
executor.executeWithPriority(r5, RequestPriority.NORMAL, 105L);
assertTrue(r3.initialized);
assertTrue(r3.hasBeenRejected);
assertTrue(highestPriorityAlwaysAccepted.initialized);
assertFalse(highestPriorityAlwaysAccepted.hasBeenRejected);
assertTrue(r5.initialized);
assertTrue(r5.hasBeenRejected);
}
public void testOrderedRunnables_NormalPriority() {
var executor = createProcessWorkerExecutorService(100);
var counter = new AtomicInteger();
var r1 = new RunOrderValidator(1, counter);
executor.executeWithPriority(r1, RequestPriority.NORMAL, 100L);
var r2 = new RunOrderValidator(2, counter);
executor.executeWithPriority(r2, RequestPriority.NORMAL, 101L);
var r3 = new RunOrderValidator(3, counter);
executor.executeWithPriority(r3, RequestPriority.NORMAL, 102L);
// final action stops the executor
executor.executeWithPriority(new ShutdownExecutorRunnable(executor), RequestPriority.NORMAL, 10000L);
executor.start();
assertTrue(r1.initialized);
assertTrue(r2.initialized);
assertTrue(r3.initialized);
assertTrue(r1.hasBeenRun);
assertTrue(r2.hasBeenRun);
assertTrue(r3.hasBeenRun);
}
public void testExecutorShutsDownAfterCompletingWork() {
var executor = createProcessWorkerExecutorService(100);
var counter = new AtomicInteger();
var r1 = new RunOrderValidator(1, counter);
executor.executeWithPriority(r1, RequestPriority.NORMAL, 100L);
var r2 = new RunOrderValidator(2, counter);
executor.executeWithPriority(r2, RequestPriority.NORMAL, 101L);
var r3 = new RunOrderValidator(3, counter);
executor.executeWithPriority(r3, RequestPriority.NORMAL, 102L);
runExecutorAndAssertTermination(executor);
assertTrue(r1.initialized);
assertTrue(r2.initialized);
assertTrue(r3.initialized);
assertTrue(r1.hasBeenRun);
assertTrue(r2.hasBeenRun);
assertTrue(r3.hasBeenRun);
}
private void runExecutorAndAssertTermination(PriorityProcessWorkerExecutorService executor) {
Future<?> executorTermination = threadPool.generic().submit(() -> {
try {
executor.shutdown();
executor.awaitTermination(1, TimeUnit.MINUTES);
} catch (Exception e) {
fail(Strings.format("Failed to gracefully shutdown executor: %s", e.getMessage()));
}
});
executor.start();
try {
executorTermination.get(1, TimeUnit.SECONDS);
} catch (Exception e) {
fail(Strings.format("Executor finished before it was signaled to shutdown gracefully"));
}
assertTrue(executor.isShutdown());
assertTrue(executor.isTerminated());
}
public void testOrderedRunnables_MixedPriorities() {
var executor = createProcessWorkerExecutorService(100);
assertThat(RequestPriority.HIGH.compareTo(RequestPriority.NORMAL), lessThan(0));
var counter = new AtomicInteger();
long requestId = 1;
List<RunOrderValidator> validators = new ArrayList<>();
validators.add(new RunOrderValidator(2, counter));
validators.add(new RunOrderValidator(3, counter));
validators.add(new RunOrderValidator(4, counter));
validators.add(new RunOrderValidator(1, counter)); // high priority request runs first
validators.add(new RunOrderValidator(5, counter));
validators.add(new RunOrderValidator(6, counter));
executor.executeWithPriority(validators.get(0), RequestPriority.NORMAL, requestId++);
executor.executeWithPriority(validators.get(1), RequestPriority.NORMAL, requestId++);
executor.executeWithPriority(validators.get(2), RequestPriority.NORMAL, requestId++);
executor.executeWithPriority(validators.get(3), RequestPriority.HIGH, requestId++);
executor.executeWithPriority(validators.get(4), RequestPriority.NORMAL, requestId++);
executor.executeWithPriority(validators.get(5), RequestPriority.NORMAL, requestId++);
// final action stops the executor
executor.executeWithPriority(new ShutdownExecutorRunnable(executor), RequestPriority.NORMAL, 10000L);
executor.start();
for (var validator : validators) {
assertTrue(validator.hasBeenRun);
}
}
public void testNotifyQueueRunnables_notifiesAllQueuedRunnables() throws InterruptedException {
notifyQueueRunnables(false);
}
public void testNotifyQueueRunnables_notifiesAllQueuedRunnables_withError() throws InterruptedException {
notifyQueueRunnables(true);
}
private void notifyQueueRunnables(boolean withError) {
int queueSize = 10;
var executor = createProcessWorkerExecutorService(queueSize);
List<QueueDrainingRunnable> runnables = new ArrayList<>(queueSize);
// First fill the queue
for (int i = 0; i < queueSize; ++i) {
QueueDrainingRunnable runnable = new QueueDrainingRunnable();
runnables.add(runnable);
executor.executeWithPriority(runnable, RequestPriority.NORMAL, i);
}
assertThat(executor.queueSize(), is(queueSize));
// Set the executor to be stopped
if (withError) {
executor.shutdownNowWithError(new Exception());
} else {
executor.shutdownNow();
}
// Start the executor, which will cause notifyQueueRunnables() to be called immediately since the executor is already stopped
executor.start();
// Confirm that all the runnables were notified
for (QueueDrainingRunnable runnable : runnables) {
assertThat(runnable.initialized, is(true));
assertThat(runnable.hasBeenRun, is(false));
assertThat(runnable.hasBeenRejected, not(withError));
assertThat(runnable.hasBeenFailed, is(withError));
}
assertThat(executor.queueSize(), is(0));
}
private PriorityProcessWorkerExecutorService createProcessWorkerExecutorService(int queueSize) {
return new PriorityProcessWorkerExecutorService(
threadPool.getThreadContext(),
"PriorityProcessWorkerExecutorServiceTests",
queueSize
);
}
private static
|
PriorityProcessWorkerExecutorServiceTests
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
|
{
"start": 19854,
"end": 26200
}
|
class ____ {
private String pid; // process-id
private String name; // command name
private Integer pgrpId; // process group-id
private String ppid; // parent process-id
private Integer sessionId; // session-id
private Long vmem; // virtual memory usage
private Long rssmemPage; // rss memory usage in # of pages
private Long utime = 0L; // # of jiffies in user mode
private final BigInteger MAX_LONG = BigInteger.valueOf(Long.MAX_VALUE);
private BigInteger stime = new BigInteger("0"); // # of jiffies in kernel mode
// how many times has this process been seen alive
private int age;
// # of jiffies used since last update:
private Long dtime = 0L;
// dtime = (utime + stime) - (utimeOld + stimeOld)
// We need this to compute the cumulative CPU time
// because the subprocess may finish earlier than root process
private List<ProcessInfo> children = new ArrayList<ProcessInfo>(); // list of children
public ProcessInfo(String pid) {
this.pid = pid;
// seeing this the first time.
this.age = 1;
}
public String getPid() {
return pid;
}
public String getName() {
return name;
}
public Integer getPgrpId() {
return pgrpId;
}
public String getPpid() {
return ppid;
}
public Integer getSessionId() {
return sessionId;
}
public Long getVmem() {
return vmem;
}
public Long getUtime() {
return utime;
}
public BigInteger getStime() {
return stime;
}
public Long getDtime() {
return dtime;
}
public Long getRssmemPage() { // get rss # of pages
return rssmemPage;
}
public int getAge() {
return age;
}
public void updateProcessInfo(String name, String ppid, Integer pgrpId,
Integer sessionId, Long utime, BigInteger stime, Long vmem, Long rssmem) {
this.name = name;
this.ppid = ppid;
this.pgrpId = pgrpId;
this.sessionId = sessionId;
this.utime = utime;
this.stime = stime;
this.vmem = vmem;
this.rssmemPage = rssmem;
}
public void updateJiffy(ProcessInfo oldInfo) {
if (oldInfo == null) {
BigInteger sum = this.stime.add(BigInteger.valueOf(this.utime));
if (sum.compareTo(MAX_LONG) > 0) {
this.dtime = 0L;
LOG.warn("Sum of stime (" + this.stime + ") and utime (" + this.utime
+ ") is greater than " + Long.MAX_VALUE);
} else {
this.dtime = sum.longValue();
}
return;
}
this.dtime = (this.utime - oldInfo.utime +
this.stime.subtract(oldInfo.stime).longValue());
}
public void updateAge(ProcessInfo oldInfo) {
this.age = oldInfo.age + 1;
}
public boolean addChild(ProcessInfo p) {
return children.add(p);
}
public List<ProcessInfo> getChildren() {
return children;
}
public String getCmdLine(String procfsDir) {
String ret = "N/A";
if (pid == null) {
return ret;
}
BufferedReader in = null;
InputStreamReader fReader = null;
try {
fReader = new InputStreamReader(
new FileInputStream(
new File(new File(procfsDir, pid.toString()), PROCFS_CMDLINE_FILE)),
StandardCharsets.UTF_8);
} catch (FileNotFoundException f) {
// The process vanished in the interim!
return ret;
}
in = new BufferedReader(fReader);
try {
ret = in.readLine(); // only one line
if (ret == null) {
ret = "N/A";
} else {
ret = ret.replace('\0', ' '); // Replace each null char with a space
if (ret.isEmpty()) {
// The cmdline might be empty because the process is swapped out or
// is a zombie.
ret = "N/A";
}
}
} catch (IOException io) {
LOG.warn("Error reading the stream", io);
ret = "N/A";
} finally {
// Close the streams
try {
fReader.close();
try {
in.close();
} catch (IOException i) {
LOG.warn("Error closing the stream", i);
}
} catch (IOException i) {
LOG.warn("Error closing the stream", i);
}
}
return ret;
}
}
/**
* Update memory related information
*
* @param pInfo
* @param procfsDir
*/
private static void constructProcessSMAPInfo(ProcessTreeSmapMemInfo pInfo,
String procfsDir) {
BufferedReader in = null;
InputStreamReader fReader = null;
try {
File pidDir = new File(procfsDir, pInfo.getPid());
File file = new File(pidDir, SMAPS);
if (!file.exists()) {
return;
}
fReader = new InputStreamReader(
new FileInputStream(file), StandardCharsets.UTF_8);
in = new BufferedReader(fReader);
ProcessSmapMemoryInfo memoryMappingInfo = null;
List<String> lines = IOUtils.readLines(in);
for (String line : lines) {
line = line.trim();
try {
Matcher address = ADDRESS_PATTERN.matcher(line);
if (address.find()) {
memoryMappingInfo = new ProcessSmapMemoryInfo(line);
memoryMappingInfo.setPermission(address.group(4));
pInfo.getMemoryInfoList().add(memoryMappingInfo);
continue;
}
Matcher memInfo = MEM_INFO_PATTERN.matcher(line);
if (memInfo.find()) {
String key = memInfo.group(1).trim();
String value = memInfo.group(2).replace(KB, "").trim();
LOG.debug("MemInfo : {} : Value : {}", key, value);
if (memoryMappingInfo != null) {
memoryMappingInfo.setMemInfo(key, value);
}
}
} catch (Throwable t) {
LOG
.warn("Error parsing smaps line : " + line + "; " + t.getMessage());
}
}
} catch (FileNotFoundException f) {
LOG.error(f.toString());
} catch (IOException e) {
LOG.error(e.toString());
} catch (Throwable t) {
LOG.error(t.toString());
} finally {
org.apache.hadoop.io.IOUtils.closeStream(in);
}
}
/**
* Placeholder for process's SMAPS information
*/
static
|
ProcessInfo
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/test/java/org/springframework/boot/logging/logback/RootLogLevelConfiguratorTests.java
|
{
"start": 1027,
"end": 1458
}
|
class ____ {
@Test
void shouldSetRootLogLevelToInfo() {
LoggerContext context = new LoggerContext();
assertThat(context.getLogger(Logger.ROOT_LOGGER_NAME).getLevel()).isEqualTo(Level.DEBUG);
assertThat(new RootLogLevelConfigurator().configure(context)).isEqualTo(ExecutionStatus.INVOKE_NEXT_IF_ANY);
assertThat(context.getLogger(Logger.ROOT_LOGGER_NAME).getLevel()).isEqualTo(Level.INFO);
}
}
|
RootLogLevelConfiguratorTests
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java
|
{
"start": 1484,
"end": 4084
}
|
class ____ extends ValuesSourceAggregationBuilder.SingleMetricAggregationBuilder<MaxAggregationBuilder> {
public static final String NAME = "max";
public static final ValuesSourceRegistry.RegistryKey<MetricAggregatorSupplier> REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>(
NAME,
MetricAggregatorSupplier.class
);
public static final ObjectParser<MaxAggregationBuilder, String> PARSER = ObjectParser.fromBuilder(NAME, MaxAggregationBuilder::new);
static {
ValuesSourceAggregationBuilder.declareFields(PARSER, true, true, false);
}
public static void registerAggregators(ValuesSourceRegistry.Builder builder) {
MaxAggregatorFactory.registerAggregators(builder);
}
public MaxAggregationBuilder(String name) {
super(name);
}
protected MaxAggregationBuilder(
MaxAggregationBuilder clone,
AggregatorFactories.Builder factoriesBuilder,
Map<String, Object> metadata
) {
super(clone, factoriesBuilder, metadata);
}
@Override
protected ValuesSourceType defaultValueSourceType() {
return CoreValuesSourceType.NUMERIC;
}
@Override
protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map<String, Object> metadata) {
return new MaxAggregationBuilder(this, factoriesBuilder, metadata);
}
/**
* Read from a stream.
*/
public MaxAggregationBuilder(StreamInput in) throws IOException {
super(in);
}
@Override
public boolean supportsSampling() {
return true;
}
@Override
protected void innerWriteTo(StreamOutput out) {
// Do nothing, no extra state to write to stream
}
@Override
protected MaxAggregatorFactory innerBuild(
AggregationContext context,
ValuesSourceConfig config,
AggregatorFactory parent,
AggregatorFactories.Builder subFactoriesBuilder
) throws IOException {
MetricAggregatorSupplier aggregatorSupplier = context.getValuesSourceRegistry().getAggregator(REGISTRY_KEY, config);
return new MaxAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metadata, aggregatorSupplier);
}
@Override
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
return builder;
}
@Override
public String getType() {
return NAME;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersion.zero();
}
}
|
MaxAggregationBuilder
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/id/RootEntity.java
|
{
"start": 348,
"end": 1025
}
|
class ____ implements Serializable {
@Id
@GeneratedValue(strategy= GenerationType.IDENTITY)
@Column(name = "universalid")// "uid" is a keyword in Oracle
private long uid;
public String description;
@jakarta.persistence.OneToMany(mappedBy = "linkedRoot")
private java.util.List<RelatedEntity> linkedEntities = new java.util.ArrayList<RelatedEntity>();
public long getUid() {
return uid;
}
public void setUid(long uid) {
this.uid = uid;
}
public void setLinkedEntities(java.util.List<RelatedEntity> linkedEntities) {
this.linkedEntities = linkedEntities;
}
public java.util.List<RelatedEntity> getLinkedEntities() {
return linkedEntities;
}
}
|
RootEntity
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/LicenseAware.java
|
{
"start": 353,
"end": 551
}
|
interface ____ {
/** Return true if the implementer can be executed under the provided {@link XPackLicenseState}, otherwise false.*/
boolean licenseCheck(XPackLicenseState state);
}
|
LicenseAware
|
java
|
spring-projects__spring-security
|
config/src/main/java/org/springframework/security/config/annotation/web/configurers/AuthorizeHttpRequestsConfigurer.java
|
{
"start": 7132,
"end": 9658
}
|
class ____
extends AbstractRequestMatcherRegistry<AuthorizedUrl> {
private final RequestMatcherDelegatingAuthorizationManager.Builder managerBuilder = RequestMatcherDelegatingAuthorizationManager
.builder();
private List<RequestMatcher> unmappedMatchers;
private int mappingCount;
private AuthorizationManagerRequestMatcherRegistry(ApplicationContext context) {
setApplicationContext(context);
}
private void addMapping(RequestMatcher matcher,
AuthorizationManager<? super RequestAuthorizationContext> manager) {
this.unmappedMatchers = null;
this.managerBuilder.add(matcher, manager);
this.mappingCount++;
}
private void addFirst(RequestMatcher matcher,
AuthorizationManager<? super RequestAuthorizationContext> manager) {
this.unmappedMatchers = null;
this.managerBuilder.mappings((m) -> m.add(0, new RequestMatcherEntry<>(matcher, manager)));
this.mappingCount++;
}
private AuthorizationManager<HttpServletRequest> createAuthorizationManager() {
Assert.state(this.unmappedMatchers == null,
() -> "An incomplete mapping was found for " + this.unmappedMatchers
+ ". Try completing it with something like requestUrls().<something>.hasRole('USER')");
Assert.state(this.mappingCount > 0,
"At least one mapping is required (for example, authorizeHttpRequests().anyRequest().authenticated())");
AuthorizationManager<HttpServletRequest> manager = postProcess(
(AuthorizationManager<HttpServletRequest>) this.managerBuilder.build());
return AuthorizeHttpRequestsConfigurer.this.postProcessor.postProcess(manager);
}
@Override
protected AuthorizedUrl chainRequestMatchers(List<RequestMatcher> requestMatchers) {
this.unmappedMatchers = requestMatchers;
return new AuthorizedUrl(requestMatchers, AuthorizeHttpRequestsConfigurer.this.authorizationManagerFactory);
}
/**
* Adds an {@link ObjectPostProcessor} for this class.
* @param objectPostProcessor the {@link ObjectPostProcessor} to use
* @return the {@link AuthorizationManagerRequestMatcherRegistry} for further
* customizations
*/
public AuthorizationManagerRequestMatcherRegistry withObjectPostProcessor(
ObjectPostProcessor<?> objectPostProcessor) {
addObjectPostProcessor(objectPostProcessor);
return this;
}
}
/**
* An object that allows configuring the {@link AuthorizationManager} for
* {@link RequestMatcher}s.
*
* @author Evgeniy Cheban
* @author Josh Cummings
*/
public
|
AuthorizationManagerRequestMatcherRegistry
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/SelfAssertionTest.java
|
{
"start": 937,
"end": 1640
}
|
class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(SelfAssertion.class, getClass());
@Test
public void positiveCase() {
compilationHelper
.addSourceLines(
"SelfAssertionPositiveCases.java",
"""
package com.google.errorprone.bugpatterns.testdata;
import static com.google.common.truth.Truth.assertThat;
import static com.google.common.truth.Truth.assertWithMessage;
/**
* Positive test cases for SelfAssertion check.
*
* @author bhagwani@google.com (Sumit Bhagwani)
*/
public
|
SelfAssertionTest
|
java
|
apache__kafka
|
clients/src/test/java/org/apache/kafka/common/resource/ResourceTypeTest.java
|
{
"start": 991,
"end": 3648
}
|
class ____ {
private final ResourceType resourceType;
private final int code;
private final String name;
private final boolean unknown;
AclResourceTypeTestInfo(ResourceType resourceType, int code, String name, boolean unknown) {
this.resourceType = resourceType;
this.code = code;
this.name = name;
this.unknown = unknown;
}
}
private static final AclResourceTypeTestInfo[] INFOS = {
new AclResourceTypeTestInfo(ResourceType.UNKNOWN, 0, "unknown", true),
new AclResourceTypeTestInfo(ResourceType.ANY, 1, "any", false),
new AclResourceTypeTestInfo(ResourceType.TOPIC, 2, "topic", false),
new AclResourceTypeTestInfo(ResourceType.GROUP, 3, "group", false),
new AclResourceTypeTestInfo(ResourceType.CLUSTER, 4, "cluster", false),
new AclResourceTypeTestInfo(ResourceType.TRANSACTIONAL_ID, 5, "transactional_id", false),
new AclResourceTypeTestInfo(ResourceType.DELEGATION_TOKEN, 6, "delegation_token", false),
new AclResourceTypeTestInfo(ResourceType.USER, 7, "user", false)
};
@Test
public void testIsUnknown() {
for (AclResourceTypeTestInfo info : INFOS) {
assertEquals(info.unknown, info.resourceType.isUnknown(),
info.resourceType + " was supposed to have unknown == " + info.unknown);
}
}
@Test
public void testCode() {
assertEquals(ResourceType.values().length, INFOS.length);
for (AclResourceTypeTestInfo info : INFOS) {
assertEquals(info.code, info.resourceType.code(),
info.resourceType + " was supposed to have code == " + info.code);
assertEquals(info.resourceType, ResourceType.fromCode((byte) info.code), "AclResourceType.fromCode(" + info.code + ") was supposed to be " +
info.resourceType);
}
assertEquals(ResourceType.UNKNOWN, ResourceType.fromCode((byte) 120));
}
@Test
public void testName() {
for (AclResourceTypeTestInfo info : INFOS) {
assertEquals(info.resourceType, ResourceType.fromString(info.name), "ResourceType.fromString(" + info.name + ") was supposed to be " +
info.resourceType);
}
assertEquals(ResourceType.UNKNOWN, ResourceType.fromString("something"));
}
@Test
public void testExhaustive() {
assertEquals(INFOS.length, ResourceType.values().length);
for (int i = 0; i < INFOS.length; i++) {
assertEquals(INFOS[i].resourceType, ResourceType.values()[i]);
}
}
}
|
AclResourceTypeTestInfo
|
java
|
netty__netty
|
transport/src/test/java/io/netty/channel/CompleteChannelFutureTest.java
|
{
"start": 1003,
"end": 2410
}
|
class ____ {
@Test
public void shouldDisallowNullChannel() {
assertThrows(NullPointerException.class, new Executable() {
@Override
public void execute() {
new CompleteChannelFutureImpl(null);
}
});
}
@Test
public void shouldNotDoAnythingOnRemove() {
Channel channel = Mockito.mock(Channel.class);
CompleteChannelFuture future = new CompleteChannelFutureImpl(channel);
ChannelFutureListener l = Mockito.mock(ChannelFutureListener.class);
future.removeListener(l);
Mockito.verifyNoMoreInteractions(l);
Mockito.verifyZeroInteractions(channel);
}
@Test
public void testConstantProperties() throws InterruptedException {
Channel channel = Mockito.mock(Channel.class);
CompleteChannelFuture future = new CompleteChannelFutureImpl(channel);
assertSame(channel, future.channel());
assertTrue(future.isDone());
assertSame(future, future.await());
assertTrue(future.await(1));
assertTrue(future.await(1, TimeUnit.NANOSECONDS));
assertSame(future, future.awaitUninterruptibly());
assertTrue(future.awaitUninterruptibly(1));
assertTrue(future.awaitUninterruptibly(1, TimeUnit.NANOSECONDS));
Mockito.verifyZeroInteractions(channel);
}
private static
|
CompleteChannelFutureTest
|
java
|
alibaba__nacos
|
plugin-default-impl/nacos-default-control-plugin/src/main/java/com/alibaba/nacos/plugin/control/impl/NacosConnectionControlManager.java
|
{
"start": 1396,
"end": 3243
}
|
class ____ extends ConnectionControlManager {
@Override
public String getName() {
return "nacos";
}
public NacosConnectionControlManager() {
super();
}
@Override
public void applyConnectionLimitRule(ConnectionControlRule connectionControlRule) {
super.connectionControlRule = connectionControlRule;
Loggers.CONTROL.info("Connection control rule updated to ->" + (this.connectionControlRule == null ? null
: JacksonUtils.toJson(this.connectionControlRule)));
Loggers.CONTROL.warn("Connection control updated, But connection control manager is no limit implementation.");
}
@Override
public ConnectionCheckResponse check(ConnectionCheckRequest connectionCheckRequest) {
ConnectionCheckResponse connectionCheckResponse = new ConnectionCheckResponse();
connectionCheckResponse.setSuccess(true);
connectionCheckResponse.setCode(ConnectionCheckCode.PASS_BY_TOTAL);
int totalCountLimit = connectionControlRule.getCountLimit();
// If totalCountLimit less than 0, no limit is applied.
if (totalCountLimit < 0) {
return connectionCheckResponse;
}
// Get total connection from metrics
Map<String, Integer> metricsTotalCount = metricsCollectorList.stream().collect(
Collectors.toMap(ConnectionMetricsCollector::getName, ConnectionMetricsCollector::getTotalCount));
int totalCount = metricsTotalCount.values().stream().mapToInt(Integer::intValue).sum();
if (totalCount >= totalCountLimit) {
connectionCheckResponse.setSuccess(false);
connectionCheckResponse.setCode(ConnectionCheckCode.DENY_BY_TOTAL_OVER);
}
return connectionCheckResponse;
}
}
|
NacosConnectionControlManager
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/issue_2000/Issue2065.java
|
{
"start": 1363,
"end": 1667
}
|
enum ____ {
A(1);
@JSONField
private int code;
EnumClass(int code) {
this.code = code;
}
public int getCode() {
return code;
}
public void setCode(int code) {
this.code = code;
}
}
}
|
EnumClass
|
java
|
playframework__playframework
|
documentation/manual/working/javaGuide/main/dependencyinjection/code/javaguide/di/guice/classfield/SubclassController.java
|
{
"start": 309,
"end": 464
}
|
class ____ extends BaseController {
public Result index() {
return someBaseAction("index");
}
}
// #class-field-dependency-injection
|
SubclassController
|
java
|
apache__camel
|
components/camel-smpp/src/test/java/org/apache/camel/component/smpp/SmppUtilsTest.java
|
{
"start": 1490,
"end": 4189
}
|
class ____ {
private static TimeZone defaultTimeZone;
@BeforeAll
public static void setUpBeforeClass() {
defaultTimeZone = TimeZone.getDefault();
TimeZone.setDefault(TimeZone.getTimeZone("GMT"));
}
@AfterAll
public static void tearDownAfterClass() {
if (defaultTimeZone != null) {
TimeZone.setDefault(defaultTimeZone);
}
}
@Test
public void formatTime() {
assertEquals("-300101000000000+", SmppUtils.formatTime(new Date(0L)));
assertEquals("-300101024640000+", SmppUtils.formatTime(new Date(10000000L)));
}
@Test
public void string2Date() {
Date date = SmppUtils.string2Date("-300101010000004+");
Calendar calendar = Calendar.getInstance();
calendar.setTime(date);
assertEquals(5, calendar.get(Calendar.YEAR));
assertEquals(11, calendar.get(Calendar.MONTH));
assertEquals(10, calendar.get(Calendar.DAY_OF_MONTH));
assertEquals(10, calendar.get(Calendar.HOUR));
assertEquals(10, calendar.get(Calendar.MINUTE));
assertEquals(0, calendar.get(Calendar.SECOND));
}
@ParameterizedTest
@MethodSource("decodeBodyProvider")
void testDecodeBodyWhenBodyIsNot8bitAlphabetTheContentShouldBeDecoded(
String content, Charset encoding, byte dataCoding, String defaultEncoding)
throws UnsupportedEncodingException {
byte[] body = content.getBytes(encoding);
Assertions.assertEquals(content, SmppUtils.decodeBody(body, dataCoding, defaultEncoding));
}
@Test
void testDecodeBodyWhenBodyIs8bitShouldReturnNull() throws UnsupportedEncodingException {
byte[] body = new byte[] { 0, 1, 2, 3, 4 };
Assertions.assertNull(SmppUtils.decodeBody(body, Alphabet.ALPHA_8_BIT.value(), "X-Gsm7Bit"));
}
@Test
void testDecodeBodyWithUnsupportedDefaultEncodingShouldThrow() throws UnsupportedEncodingException {
Assertions.assertThrows(UnsupportedEncodingException.class, () -> {
SmppUtils.decodeBody(new byte[] { 0 }, Alphabet.ALPHA_DEFAULT.value(), "X-Gsm7Bit");
});
}
private static Stream<Arguments> decodeBodyProvider() {
return Stream.of(
Arguments.of("This is an ascii test !", StandardCharsets.US_ASCII, Alphabet.ALPHA_IA5.value(), "X-Gsm7Bit"),
Arguments.of("This is a latin1 test ®", StandardCharsets.ISO_8859_1, Alphabet.ALPHA_LATIN1.value(),
"X-Gsm7Bit"),
Arguments.of("This is a utf-16 test \uD83D\uDE00", StandardCharsets.UTF_16BE, Alphabet.ALPHA_UCS2.value(),
"X-Gsm7Bit"));
}
}
|
SmppUtilsTest
|
java
|
quarkusio__quarkus
|
extensions/smallrye-openapi/deployment/src/test/java/io/quarkus/smallrye/openapi/test/jaxrs/AutoSecurityRolesAllowedWithScopesTestBase.java
|
{
"start": 552,
"end": 3256
}
|
class ____ {
static Matcher<Iterable<Object>> schemeArray(String schemeName, Matcher<Iterable<?>> scopesMatcher) {
return allOf(
iterableWithSize(1),
hasItem(allOf(
aMapWithSize(1),
hasEntry(equalTo(schemeName), scopesMatcher))));
}
void testAutoSecurityRequirement(String schemeType) {
RestAssured.given()
.header("Accept", "application/json")
.when()
.get("/q/openapi")
.then()
.log().body()
.and()
.body("components.securitySchemes.MyScheme", allOf(
hasEntry("type", schemeType),
hasEntry("description", "Authentication using MyScheme")))
.and()
// OpenApiResourceSecuredAtMethodLevel
.body("paths.'/resource2/test-security/naked'.get.security", schemeArray("MyScheme", contains("admin")))
.body("paths.'/resource2/test-security/annotated'.get.security",
schemeArray("JWTCompanyAuthentication", emptyIterable()))
.body("paths.'/resource2/test-security/methodLevel/1'.get.security", schemeArray("MyScheme", contains("user1")))
.body("paths.'/resource2/test-security/methodLevel/2'.get.security", schemeArray("MyScheme", contains("user2")))
.body("paths.'/resource2/test-security/methodLevel/public'.get.security", nullValue())
.body("paths.'/resource2/test-security/annotated/documented'.get.security",
schemeArray("JWTCompanyAuthentication", emptyIterable()))
.body("paths.'/resource2/test-security/methodLevel/3'.get.security", schemeArray("MyScheme", contains("admin")))
.and()
// OpenApiResourceSecuredAtClassLevel
.body("paths.'/resource2/test-security/classLevel/1'.get.security", schemeArray("MyScheme", contains("user1")))
.body("paths.'/resource2/test-security/classLevel/2'.get.security", schemeArray("MyScheme", contains("user2")))
.body("paths.'/resource2/test-security/classLevel/3'.get.security", schemeArray("MyOwnName", emptyIterable()))
.body("paths.'/resource2/test-security/classLevel/4'.get.security", schemeArray("MyScheme", contains("admin")))
.and()
// OpenApiResourceSecuredAtMethodLevel2
.body("paths.'/resource3/test-security/annotated'.get.security",
schemeArray("AtClassLevel", emptyIterable()));
}
}
|
AutoSecurityRolesAllowedWithScopesTestBase
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/websocket/WebSocketTestCase.java
|
{
"start": 561,
"end": 2294
}
|
class ____ {
@TestHTTPResource("/ws")
URI uri;
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(WebSocketResource.class));
@Test
public void testWebSocket() throws Exception {
HttpClient httpClient = VertxCoreRecorder.getVertx().get().createHttpClient();
try {
final CompletableFuture<String> result = new CompletableFuture<>();
httpClient.webSocket(uri.getPort(), uri.getHost(), uri.getPath(), new Handler<AsyncResult<WebSocket>>() {
@Override
public void handle(AsyncResult<WebSocket> event) {
if (event.failed()) {
result.completeExceptionally(event.cause());
} else {
event.result().exceptionHandler(new Handler<Throwable>() {
@Override
public void handle(Throwable event) {
result.completeExceptionally(event);
}
});
event.result().textMessageHandler(new Handler<String>() {
@Override
public void handle(String event) {
result.complete(event);
}
});
event.result().writeTextMessage("Hello World");
}
}
});
Assertions.assertEquals("Hello World", result.get());
} finally {
httpClient.close();
}
}
}
|
WebSocketTestCase
|
java
|
google__error-prone
|
core/src/main/java/com/google/errorprone/bugpatterns/UnnecessaryAnonymousClass.java
|
{
"start": 3193,
"end": 5510
}
|
class ____ extends BugChecker implements VariableTreeMatcher {
@Override
public Description matchVariable(VariableTree tree, VisitorState state) {
if (tree.getInitializer() == null) {
return NO_MATCH;
}
if (!(tree.getInitializer() instanceof NewClassTree classTree)) {
return NO_MATCH;
}
if (classTree.getClassBody() == null) {
return NO_MATCH;
}
ImmutableList<? extends Tree> members =
classTree.getClassBody().getMembers().stream()
.filter(
x -> !(x instanceof MethodTree methodTree && isGeneratedConstructor(methodTree)))
.collect(toImmutableList());
if (members.size() != 1) {
return NO_MATCH;
}
Tree member = getOnlyElement(members);
if (!(member instanceof MethodTree implementation)) {
return NO_MATCH;
}
VarSymbol varSym = getSymbol(tree);
if (varSym.getKind() != ElementKind.FIELD
|| !canBeRemoved(varSym)
|| !varSym.getModifiers().contains(Modifier.FINAL)) {
return NO_MATCH;
}
Type type = getType(tree.getType());
if (type == null || !state.getTypes().isFunctionalInterface(type)) {
return NO_MATCH;
}
MethodSymbol methodSymbol = getSymbol(implementation);
Symbol descriptorSymbol = state.getTypes().findDescriptorSymbol(type.tsym);
if (!methodSymbol.getSimpleName().contentEquals(descriptorSymbol.getSimpleName())) {
return NO_MATCH;
}
if (!methodSymbol.overrides(
descriptorSymbol, enclosingClass(methodSymbol), state.getTypes(), false)) {
return NO_MATCH;
}
if (tree.getModifiers().getAnnotations().stream()
.anyMatch(at -> getSymbol(at).getQualifiedName().contentEquals("org.mockito.Spy"))) {
return NO_MATCH;
}
SuggestedFix.Builder fixBuilder = SuggestedFix.builder();
// Derive new method name from identifier.
String newName =
varSym.isStatic()
? UPPER_UNDERSCORE.converterTo(LOWER_CAMEL).convert(tree.getName().toString())
: tree.getName().toString();
fixBuilder.merge(SuggestedFixes.renameMethod(implementation, newName, state));
// Make non-final.
SuggestedFixes.removeModifiers(tree, state, Modifier.FINAL).ifPresent(fixBuilder::merge);
// Convert the anonymous
|
UnnecessaryAnonymousClass
|
java
|
apache__avro
|
lang/java/avro/src/test/java/org/apache/avro/reflect/TestNonStringMapKeys.java
|
{
"start": 16716,
"end": 17492
}
|
class ____ {
HashMap<Integer, String> map1;
ConcurrentHashMap<Integer, String> map2;
LinkedHashMap<Integer, String> map3;
TreeMap<Integer, String> map4;
public Map<Integer, String> getMap1() {
return map1;
}
public void setMap1(HashMap<Integer, String> map1) {
this.map1 = map1;
}
public Map<Integer, String> getMap2() {
return map2;
}
public void setMap2(ConcurrentHashMap<Integer, String> map2) {
this.map2 = map2;
}
public Map<Integer, String> getMap3() {
return map3;
}
public void setMap3(LinkedHashMap<Integer, String> map3) {
this.map3 = map3;
}
public Map<Integer, String> getMap4() {
return map4;
}
public void setMap4(TreeMap<Integer, String> map4) {
this.map4 = map4;
}
}
|
SameMapSignature
|
java
|
quarkusio__quarkus
|
extensions/cache/deployment/src/test/java/io/quarkus/cache/test/runtime/CacheKeyGeneratorTest.java
|
{
"start": 5961,
"end": 6390
}
|
class ____ implements CacheKeyGenerator {
static final AtomicBoolean DESTROYED = new AtomicBoolean();
@Override
public Object generate(Method method, Object... methodParams) {
return new CompositeCacheKey(ASPARAGUS, methodParams[1]);
}
@PreDestroy
void preDestroy() {
DESTROYED.set(true);
}
}
@Dependent
public static
|
RequestScopedKeyGen
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/api/options/JsonBucketParams.java
|
{
"start": 737,
"end": 1012
}
|
class ____<V> extends BaseOptions<JsonBucketOptions<V>, JsonCodec> implements JsonBucketOptions<V> {
private final String name;
JsonBucketParams(String name) {
this.name = name;
}
public String getName() {
return name;
}
}
|
JsonBucketParams
|
java
|
micronaut-projects__micronaut-core
|
inject-java/src/test/groovy/io/micronaut/inject/factory/primary_and_named_parameterizedfactory/MyBeanUser.java
|
{
"start": 84,
"end": 405
}
|
class ____ {
private final String name;
private final MyBean myBean;
public MyBeanUser(String name, MyBean myBean) {
this.name = name;
this.myBean = myBean;
}
public MyBean getMyBean() {
return myBean;
}
public String getName() {
return name;
}
}
|
MyBeanUser
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java
|
{
"start": 1540,
"end": 6221
}
|
class ____ extends AllocationDecider {
public static final Setting<Integer> SETTING_ALLOCATION_MAX_RETRY = Setting.intSetting(
"index.allocation.max_retries",
5,
0,
Setting.Property.Dynamic,
Setting.Property.IndexScope,
Setting.Property.NotCopyableOnResize
);
private static final String RETRY_FAILED_API = "POST /_cluster/reroute?retry_failed";
public static final String NAME = "max_retry";
private static final Decision YES_NO_FAILURES = Decision.single(Decision.Type.YES, NAME, "shard has no previous failures");
private static final Decision YES_SIMULATING = Decision.single(Decision.Type.YES, NAME, "previous failures ignored when simulating");
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) {
if (allocation.isSimulating()) {
return YES_SIMULATING;
}
final int maxRetries = SETTING_ALLOCATION_MAX_RETRY.get(allocation.metadata().indexMetadata(shardRouting.index()).getSettings());
final var unassignedInfo = shardRouting.unassignedInfo();
final int numFailedAllocations = unassignedInfo == null ? 0 : unassignedInfo.failedAllocations();
if (numFailedAllocations > 0) {
final var decision = numFailedAllocations >= maxRetries ? Decision.NO : Decision.YES;
return allocation.debugDecision() ? debugDecision(decision, unassignedInfo, numFailedAllocations, maxRetries) : decision;
}
final var relocationFailureInfo = shardRouting.relocationFailureInfo();
final int numFailedRelocations = relocationFailureInfo == null ? 0 : relocationFailureInfo.failedRelocations();
if (numFailedRelocations > 0) {
final var decision = numFailedRelocations >= maxRetries ? Decision.NO : Decision.YES;
return allocation.debugDecision() ? debugDecision(decision, relocationFailureInfo, numFailedRelocations, maxRetries) : decision;
}
return YES_NO_FAILURES;
}
private static Decision debugDecision(Decision decision, UnassignedInfo info, int numFailedAllocations, int maxRetries) {
if (decision.type() == Decision.Type.NO) {
return Decision.single(
Decision.Type.NO,
NAME,
"shard has exceeded the maximum number of retries [%d] on failed allocation attempts - "
+ "manually call [%s] to retry, and for more information, see [%s] [%s]",
maxRetries,
RETRY_FAILED_API,
ReferenceDocs.ALLOCATION_EXPLAIN_MAX_RETRY,
info.toString()
);
} else {
return Decision.single(
Decision.Type.YES,
NAME,
"shard has failed allocating [%d] times but [%d] retries are allowed",
numFailedAllocations,
maxRetries
);
}
}
private static Decision debugDecision(Decision decision, RelocationFailureInfo info, int numFailedRelocations, int maxRetries) {
if (decision.type() == Decision.Type.NO) {
return Decision.single(
Decision.Type.NO,
NAME,
"shard has exceeded the maximum number of retries [%d] on failed relocation attempts - manually call [%s] to retry, [%s]",
maxRetries,
RETRY_FAILED_API,
info.toString()
);
} else {
return Decision.single(
Decision.Type.YES,
NAME,
"shard has failed relocating [%d] times but [%d] retries are allowed",
numFailedRelocations,
maxRetries
);
}
}
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
return canAllocate(shardRouting, allocation);
}
@Override
public Decision canForceAllocatePrimary(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
assert shardRouting.primary() : "must not call canForceAllocatePrimary on a non-primary shard " + shardRouting;
// check if we have passed the maximum retry threshold through canAllocate,
// if so, we don't want to force the primary allocation here
return canAllocate(shardRouting, node, allocation);
}
@Override
public Decision canForceAllocateDuringReplace(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
return canAllocate(shardRouting, node, allocation);
}
}
|
MaxRetryAllocationDecider
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
|
{
"start": 1818,
"end": 9200
}
|
class ____
implements Comparator<DatanodeInfoWithStorage>, Serializable {
private static final long serialVersionUID = 6441720011443190984L;
@Override
public int compare(DatanodeInfoWithStorage dns1,
DatanodeInfoWithStorage dns2) {
if (StorageType.PROVIDED.equals(dns1.getStorageType())
&& !StorageType.PROVIDED.equals(dns2.getStorageType())) {
return 1;
}
if (!StorageType.PROVIDED.equals(dns1.getStorageType())
&& StorageType.PROVIDED.equals(dns2.getStorageType())) {
return -1;
}
// Storage types of dns1 and dns2 are now both provided or not provided;
// thus, are essentially equal for the purpose of this comparator.
return 0;
}
}
private final ExtendedBlock b;
private long offset; // offset of the first byte of the block in the file
private final DatanodeInfoWithStorage[] locs;
/** Cached storage ID for each replica */
private final String[] storageIDs;
/** Cached storage type for each replica, if reported. */
private final StorageType[] storageTypes;
// corrupt flag is true if all of the replicas of a block are corrupt.
// else false. If block has few corrupt replicas, they are filtered and
// their locations are not part of this object
private boolean corrupt;
private Token<BlockTokenIdentifier> blockToken = new Token<>();
// use one instance of the Provided comparator as it uses no state.
private static ProvidedLastComparator providedLastComparator =
new ProvidedLastComparator();
/**
* List of cached datanode locations
*/
private DatanodeInfo[] cachedLocs;
// Used when there are no locations
static final DatanodeInfoWithStorage[] EMPTY_LOCS =
new DatanodeInfoWithStorage[0];
public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs) {
// By default, startOffset is unknown(-1) and corrupt is false.
this(b, convert(locs, null, null), null, null, -1, false, EMPTY_LOCS);
}
public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs,
String[] storageIDs, StorageType[] storageTypes) {
this(b, convert(locs, storageIDs, storageTypes),
storageIDs, storageTypes, -1, false, EMPTY_LOCS);
}
public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs,
String[] storageIDs, StorageType[] storageTypes, long startOffset,
boolean corrupt, DatanodeInfo[] cachedLocs) {
this(b, convert(locs, storageIDs, storageTypes),
storageIDs, storageTypes, startOffset, corrupt,
null == cachedLocs || 0 == cachedLocs.length ? EMPTY_LOCS : cachedLocs);
}
public LocatedBlock(ExtendedBlock b, DatanodeInfoWithStorage[] locs,
String[] storageIDs, StorageType[] storageTypes, long startOffset,
boolean corrupt, DatanodeInfo[] cachedLocs) {
this.b = b;
this.offset = startOffset;
this.corrupt = corrupt;
this.locs = null == locs ? EMPTY_LOCS : locs;
this.storageIDs = storageIDs;
this.storageTypes = storageTypes;
this.cachedLocs = null == cachedLocs || 0 == cachedLocs.length
? EMPTY_LOCS
: cachedLocs;
}
private static DatanodeInfoWithStorage[] convert(
DatanodeInfo[] infos, String[] storageIDs, StorageType[] storageTypes) {
if (null == infos) {
return EMPTY_LOCS;
}
DatanodeInfoWithStorage[] ret = new DatanodeInfoWithStorage[infos.length];
for(int i = 0; i < infos.length; i++) {
ret[i] = new DatanodeInfoWithStorage(infos[i],
storageIDs != null ? storageIDs[i] : null,
storageTypes != null ? storageTypes[i] : null);
}
return ret;
}
public Token<BlockTokenIdentifier> getBlockToken() {
return blockToken;
}
public void setBlockToken(Token<BlockTokenIdentifier> token) {
this.blockToken = token;
}
public ExtendedBlock getBlock() {
return b;
}
/**
* Returns the locations associated with this block. The returned array is not
* expected to be modified. If it is, caller must immediately invoke
* {@link org.apache.hadoop.hdfs.protocol.LocatedBlock#updateCachedStorageInfo}
* to update the cached Storage ID/Type arrays.
*/
public DatanodeInfoWithStorage[] getLocations() {
return locs;
}
public StorageType[] getStorageTypes() {
return storageTypes;
}
public String[] getStorageIDs() {
return storageIDs;
}
/**
* Updates the cached StorageID and StorageType information. Must be
* called when the locations array is modified.
*/
public void updateCachedStorageInfo() {
if (storageIDs != null) {
for(int i = 0; i < locs.length; i++) {
storageIDs[i] = locs[i].getStorageID();
}
}
if (storageTypes != null) {
for(int i = 0; i < locs.length; i++) {
storageTypes[i] = locs[i].getStorageType();
}
}
}
/**
* Moves all locations that have {@link StorageType}
* {@code PROVIDED} to the end of the locations array without
* changing the relative ordering of the remaining locations
* Only the first {@code activeLen} locations are considered.
* The caller must immediately invoke {@link
* org.apache.hadoop.hdfs.protocol.LocatedBlock#updateCachedStorageInfo}
* to update the cached Storage ID/Type arrays.
* @param activeLen
*/
public void moveProvidedToEnd(int activeLen) {
if (activeLen <= 0) {
return;
}
// as this is a stable sort, for elements that are equal,
// the current order of the elements is maintained
Arrays.sort(locs, 0, (activeLen < locs.length) ? activeLen : locs.length,
providedLastComparator);
}
public long getStartOffset() {
return offset;
}
public long getBlockSize() {
return b.getNumBytes();
}
public void setStartOffset(long value) {
this.offset = value;
}
public void setCorrupt(boolean corrupt) {
this.corrupt = corrupt;
}
public boolean isCorrupt() {
return this.corrupt;
}
/**
* Add a the location of a cached replica of the block.
*
* @param loc of datanode with the cached replica
*/
public void addCachedLoc(DatanodeInfo loc) {
List<DatanodeInfo> cachedList = Lists.newArrayList(cachedLocs);
if (cachedList.contains(loc)) {
return;
}
// Try to re-use a DatanodeInfo already in loc
for (DatanodeInfoWithStorage di : locs) {
if (loc.equals(di)) {
cachedList.add(di);
cachedLocs = cachedList.toArray(cachedLocs);
return;
}
}
// Not present in loc, add it and go
cachedList.add(loc);
Preconditions.checkArgument(cachedLocs != EMPTY_LOCS,
"Cached locations should only be added when having a backing"
+ " disk replica!", loc, locs.length, Arrays.toString(locs));
cachedLocs = cachedList.toArray(cachedLocs);
}
/**
* @return Datanodes with a cached block replica
*/
public DatanodeInfo[] getCachedLocations() {
return cachedLocs;
}
@Override
public String toString() {
return getClass().getSimpleName() + "{" + b
+ "; getBlockSize()=" + getBlockSize()
+ "; corrupt=" + corrupt
+ "; offset=" + offset
+ "; locs=" + Arrays.asList(locs)
+ "; cachedLocs=" + Arrays.asList(cachedLocs)
+ "}";
}
public boolean isStriped() {
return false;
}
public BlockType getBlockType() {
return BlockType.CONTIGUOUS;
}
}
|
ProvidedLastComparator
|
java
|
quarkusio__quarkus
|
extensions/vertx-http/runtime/src/main/java/io/quarkus/vertx/http/ManagementInterface.java
|
{
"start": 161,
"end": 284
}
|
class ____ a CDI observer:
* {@code public void init(@Observe ManagementInterface mi) {...}}
* <p>
* If the management
|
using
|
java
|
apache__kafka
|
connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginScanner.java
|
{
"start": 1678,
"end": 2036
}
|
class ____ implement {@link #scanPlugins(PluginSource)}, in order to scan a single source.
* The returned {@link PluginScanResult} should contain only plugins which are loadable from the passed-in source.
* The superclass has some common functionality which is usable in subclasses, and handles merging multiple results.
*
* <p>Implementations of this
|
should
|
java
|
spring-projects__spring-framework
|
spring-jdbc/src/main/java/org/springframework/jdbc/support/lob/TemporaryLobCreator.java
|
{
"start": 1896,
"end": 5846
}
|
class ____ implements LobCreator {
protected static final Log logger = LogFactory.getLog(TemporaryLobCreator.class);
private final Set<Blob> temporaryBlobs = new LinkedHashSet<>(1);
private final Set<Clob> temporaryClobs = new LinkedHashSet<>(1);
@Override
public void setBlobAsBytes(PreparedStatement ps, int paramIndex, byte @Nullable [] content)
throws SQLException {
if (content != null) {
Blob blob = ps.getConnection().createBlob();
blob.setBytes(1, content);
this.temporaryBlobs.add(blob);
ps.setBlob(paramIndex, blob);
}
else {
ps.setBlob(paramIndex, (Blob) null);
}
if (logger.isDebugEnabled()) {
logger.debug(content != null ? "Copied bytes into temporary BLOB with length " + content.length :
"Set BLOB to null");
}
}
@Override
public void setBlobAsBinaryStream(
PreparedStatement ps, int paramIndex, @Nullable InputStream binaryStream, int contentLength)
throws SQLException {
if (binaryStream != null) {
Blob blob = ps.getConnection().createBlob();
try {
FileCopyUtils.copy(binaryStream, blob.setBinaryStream(1));
}
catch (IOException ex) {
throw new DataAccessResourceFailureException("Could not copy into LOB stream", ex);
}
this.temporaryBlobs.add(blob);
ps.setBlob(paramIndex, blob);
}
else {
ps.setBlob(paramIndex, (Blob) null);
}
if (logger.isDebugEnabled()) {
logger.debug(binaryStream != null ?
"Copied binary stream into temporary BLOB with length " + contentLength :
"Set BLOB to null");
}
}
@Override
public void setClobAsString(PreparedStatement ps, int paramIndex, @Nullable String content)
throws SQLException {
if (content != null) {
Clob clob = ps.getConnection().createClob();
clob.setString(1, content);
this.temporaryClobs.add(clob);
ps.setClob(paramIndex, clob);
}
else {
ps.setClob(paramIndex, (Clob) null);
}
if (logger.isDebugEnabled()) {
logger.debug(content != null ? "Copied string into temporary CLOB with length " + content.length() :
"Set CLOB to null");
}
}
@Override
public void setClobAsAsciiStream(
PreparedStatement ps, int paramIndex, @Nullable InputStream asciiStream, int contentLength)
throws SQLException {
if (asciiStream != null) {
Clob clob = ps.getConnection().createClob();
try {
FileCopyUtils.copy(asciiStream, clob.setAsciiStream(1));
}
catch (IOException ex) {
throw new DataAccessResourceFailureException("Could not copy into LOB stream", ex);
}
this.temporaryClobs.add(clob);
ps.setClob(paramIndex, clob);
}
else {
ps.setClob(paramIndex, (Clob) null);
}
if (logger.isDebugEnabled()) {
logger.debug(asciiStream != null ?
"Copied ASCII stream into temporary CLOB with length " + contentLength :
"Set CLOB to null");
}
}
@Override
public void setClobAsCharacterStream(
PreparedStatement ps, int paramIndex, @Nullable Reader characterStream, int contentLength)
throws SQLException {
if (characterStream != null) {
Clob clob = ps.getConnection().createClob();
try {
FileCopyUtils.copy(characterStream, clob.setCharacterStream(1));
}
catch (IOException ex) {
throw new DataAccessResourceFailureException("Could not copy into LOB stream", ex);
}
this.temporaryClobs.add(clob);
ps.setClob(paramIndex, clob);
}
else {
ps.setClob(paramIndex, (Clob) null);
}
if (logger.isDebugEnabled()) {
logger.debug(characterStream != null ?
"Copied character stream into temporary CLOB with length " + contentLength :
"Set CLOB to null");
}
}
@Override
public void close() {
for (Blob blob : this.temporaryBlobs) {
try {
blob.free();
}
catch (SQLException ex) {
logger.warn("Could not free BLOB", ex);
}
}
for (Clob clob : this.temporaryClobs) {
try {
clob.free();
}
catch (SQLException ex) {
logger.warn("Could not free CLOB", ex);
}
}
}
}
|
TemporaryLobCreator
|
java
|
apache__flink
|
flink-clients/src/test/java/org/apache/flink/client/cli/CliFrontendSavepointTest.java
|
{
"start": 10657,
"end": 13315
}
|
class ____
extends RestClusterClient<StandaloneClusterId> {
private final Function<String, CompletableFuture<Acknowledge>> disposeSavepointFunction;
DisposeSavepointClusterClient(
Function<String, CompletableFuture<Acknowledge>> disposeSavepointFunction,
Configuration configuration)
throws Exception {
super(configuration, StandaloneClusterId.getInstance());
this.disposeSavepointFunction = Preconditions.checkNotNull(disposeSavepointFunction);
}
@Override
public CompletableFuture<Acknowledge> disposeSavepoint(String savepointPath) {
return disposeSavepointFunction.apply(savepointPath);
}
}
@BeforeEach
void replaceStdOutAndStdErr() {
stdOut = System.out;
stdErr = System.err;
buffer = new ByteArrayOutputStream();
PrintStream capture = new PrintStream(buffer);
System.setOut(capture);
System.setErr(capture);
}
@AfterEach
void restoreStdOutAndStdErr() {
System.setOut(stdOut);
System.setErr(stdErr);
}
private static ClusterClient<String> createClusterClient(String expectedResponse) {
final ClusterClient<String> clusterClient = mock(ClusterClient.class);
when(clusterClient.triggerSavepoint(
any(JobID.class),
nullable(String.class),
nullable(SavepointFormatType.class)))
.thenReturn(CompletableFuture.completedFuture(expectedResponse));
return clusterClient;
}
private static ClusterClient<String> createDetachClusterClient(String expectedResponse) {
final ClusterClient<String> clusterClient = mock(ClusterClient.class);
when(clusterClient.triggerDetachedSavepoint(
any(JobID.class),
nullable(String.class),
nullable(SavepointFormatType.class)))
.thenReturn(CompletableFuture.completedFuture(expectedResponse));
return clusterClient;
}
private static ClusterClient<String> createFailingClusterClient(Exception expectedException) {
final ClusterClient<String> clusterClient = mock(ClusterClient.class);
when(clusterClient.triggerSavepoint(
any(JobID.class),
nullable(String.class),
nullable(SavepointFormatType.class)))
.thenReturn(FutureUtils.completedExceptionally(expectedException));
return clusterClient;
}
}
|
DisposeSavepointClusterClient
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableDoOnLifecycle.java
|
{
"start": 1649,
"end": 4280
}
|
class ____<T> implements FlowableSubscriber<T>, Subscription {
final Subscriber<? super T> downstream;
final Consumer<? super Subscription> onSubscribe;
final LongConsumer onRequest;
final Action onCancel;
Subscription upstream;
SubscriptionLambdaSubscriber(Subscriber<? super T> actual,
Consumer<? super Subscription> onSubscribe,
LongConsumer onRequest,
Action onCancel) {
this.downstream = actual;
this.onSubscribe = onSubscribe;
this.onCancel = onCancel;
this.onRequest = onRequest;
}
@Override
public void onSubscribe(Subscription s) {
// this way, multiple calls to onSubscribe can show up in tests that use doOnSubscribe to validate behavior
try {
onSubscribe.accept(s);
} catch (Throwable e) {
Exceptions.throwIfFatal(e);
s.cancel();
this.upstream = SubscriptionHelper.CANCELLED;
EmptySubscription.error(e, downstream);
return;
}
if (SubscriptionHelper.validate(this.upstream, s)) {
this.upstream = s;
downstream.onSubscribe(this);
}
}
@Override
public void onNext(T t) {
downstream.onNext(t);
}
@Override
public void onError(Throwable t) {
if (upstream != SubscriptionHelper.CANCELLED) {
downstream.onError(t);
} else {
RxJavaPlugins.onError(t);
}
}
@Override
public void onComplete() {
if (upstream != SubscriptionHelper.CANCELLED) {
downstream.onComplete();
}
}
@Override
public void request(long n) {
try {
onRequest.accept(n);
} catch (Throwable e) {
Exceptions.throwIfFatal(e);
RxJavaPlugins.onError(e);
}
upstream.request(n);
}
@Override
public void cancel() {
Subscription s = upstream;
if (s != SubscriptionHelper.CANCELLED) {
upstream = SubscriptionHelper.CANCELLED;
try {
onCancel.run();
} catch (Throwable e) {
Exceptions.throwIfFatal(e);
RxJavaPlugins.onError(e);
}
s.cancel();
}
}
}
}
|
SubscriptionLambdaSubscriber
|
java
|
apache__logging-log4j2
|
log4j-core-test/src/main/java/org/apache/logging/log4j/core/test/hamcrest/Descriptors.java
|
{
"start": 1032,
"end": 1590
}
|
class ____ {
/**
* Decorating Matcher similar to {@code is()}, but for better grammar.
*
* @param matcher the Matcher to decorate.
* @param <T> the type expected by the Matcher.
* @return the decorated Matcher.
*/
public static <T> Matcher<T> that(final Matcher<T> matcher) {
return new Is<T>(matcher) {
@Override
public void describeTo(final Description description) {
description.appendText("that ").appendDescriptionOf(matcher);
}
};
}
}
|
Descriptors
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java
|
{
"start": 934,
"end": 9287
}
|
class ____ extends AbstractArrayBlock implements BooleanBlock {
private static final long BASE_RAM_BYTES_USED = 0; // TODO: fix this
private final BooleanBigArrayVector vector;
public BooleanBigArrayBlock(
BitArray values,
int positionCount,
int[] firstValueIndexes,
BitSet nulls,
MvOrdering mvOrdering,
BlockFactory blockFactory
) {
this(
new BooleanBigArrayVector(values, firstValueIndexes == null ? positionCount : firstValueIndexes[positionCount], blockFactory),
positionCount,
firstValueIndexes,
nulls,
mvOrdering
);
}
private BooleanBigArrayBlock(
BooleanBigArrayVector vector, // stylecheck
int positionCount,
int[] firstValueIndexes,
BitSet nulls,
MvOrdering mvOrdering
) {
super(positionCount, firstValueIndexes, nulls, mvOrdering);
this.vector = vector;
assert firstValueIndexes == null
? vector.getPositionCount() == getPositionCount()
: firstValueIndexes[getPositionCount()] == vector.getPositionCount();
}
static BooleanBigArrayBlock readArrayBlock(BlockFactory blockFactory, BlockStreamInput in) throws IOException {
final SubFields sub = new SubFields(blockFactory, in);
BooleanBigArrayVector vector = null;
boolean success = false;
try {
vector = BooleanBigArrayVector.readArrayVector(sub.vectorPositions(), in, blockFactory);
var block = new BooleanBigArrayBlock(vector, sub.positionCount, sub.firstValueIndexes, sub.nullsMask, sub.mvOrdering);
blockFactory.adjustBreaker(block.ramBytesUsed() - vector.ramBytesUsed() - sub.bytesReserved);
success = true;
return block;
} finally {
if (success == false) {
Releasables.close(vector);
blockFactory.adjustBreaker(-sub.bytesReserved);
}
}
}
void writeArrayBlock(StreamOutput out) throws IOException {
writeSubFields(out);
vector.writeArrayVector(vector.getPositionCount(), out);
}
@Override
public BooleanVector asVector() {
return null;
}
@Override
public ToMask toMask() {
if (getPositionCount() == 0) {
return new ToMask(blockFactory().newConstantBooleanVector(false, 0), false);
}
try (BooleanVector.FixedBuilder builder = blockFactory().newBooleanVectorFixedBuilder(getPositionCount())) {
boolean hasMv = false;
for (int p = 0; p < getPositionCount(); p++) {
builder.appendBoolean(switch (getValueCount(p)) {
case 0 -> false;
case 1 -> getBoolean(getFirstValueIndex(p));
default -> {
hasMv = true;
yield false;
}
});
}
return new ToMask(builder.build(), hasMv);
}
}
@Override
public boolean getBoolean(int valueIndex) {
return vector.getBoolean(valueIndex);
}
@Override
public BooleanBlock filter(int... positions) {
try (var builder = blockFactory().newBooleanBlockBuilder(positions.length)) {
for (int pos : positions) {
if (isNull(pos)) {
builder.appendNull();
continue;
}
int valueCount = getValueCount(pos);
int first = getFirstValueIndex(pos);
if (valueCount == 1) {
builder.appendBoolean(getBoolean(getFirstValueIndex(pos)));
} else {
builder.beginPositionEntry();
for (int c = 0; c < valueCount; c++) {
builder.appendBoolean(getBoolean(first + c));
}
builder.endPositionEntry();
}
}
return builder.mvOrdering(mvOrdering()).build();
}
}
@Override
public BooleanBlock keepMask(BooleanVector mask) {
if (getPositionCount() == 0) {
incRef();
return this;
}
if (mask.isConstant()) {
if (mask.getBoolean(0)) {
incRef();
return this;
}
return (BooleanBlock) blockFactory().newConstantNullBlock(getPositionCount());
}
try (BooleanBlock.Builder builder = blockFactory().newBooleanBlockBuilder(getPositionCount())) {
// TODO if X-ArrayBlock used BooleanVector for it's null mask then we could shuffle references here.
for (int p = 0; p < getPositionCount(); p++) {
if (false == mask.getBoolean(p)) {
builder.appendNull();
continue;
}
int valueCount = getValueCount(p);
if (valueCount == 0) {
builder.appendNull();
continue;
}
int start = getFirstValueIndex(p);
if (valueCount == 1) {
builder.appendBoolean(getBoolean(start));
continue;
}
int end = start + valueCount;
builder.beginPositionEntry();
for (int i = start; i < end; i++) {
builder.appendBoolean(getBoolean(i));
}
builder.endPositionEntry();
}
return builder.build();
}
}
@Override
public ReleasableIterator<BooleanBlock> lookup(IntBlock positions, ByteSizeValue targetBlockSize) {
return new BooleanLookup(this, positions, targetBlockSize);
}
@Override
public ElementType elementType() {
return ElementType.BOOLEAN;
}
@Override
public BooleanBlock expand() {
if (firstValueIndexes == null) {
incRef();
return this;
}
if (nullsMask == null) {
vector.incRef();
return vector.asBlock();
}
// The following line is correct because positions with multi-values are never null.
int expandedPositionCount = vector.getPositionCount();
long bitSetRamUsedEstimate = Math.max(nullsMask.size(), BlockRamUsageEstimator.sizeOfBitSet(expandedPositionCount));
blockFactory().adjustBreaker(bitSetRamUsedEstimate);
BooleanBigArrayBlock expanded = new BooleanBigArrayBlock(
vector,
expandedPositionCount,
null,
shiftNullsToExpandedPositions(),
MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING
);
blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate);
// We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips.
vector.incRef();
return expanded;
}
private long ramBytesUsedOnlyBlock() {
return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask);
}
@Override
public long ramBytesUsed() {
return ramBytesUsedOnlyBlock() + RamUsageEstimator.sizeOf(vector);
}
@Override
public boolean equals(Object obj) {
if (obj instanceof BooleanBlock that) {
return BooleanBlock.equals(this, that);
}
return false;
}
@Override
public int hashCode() {
return BooleanBlock.hash(this);
}
@Override
public String toString() {
return getClass().getSimpleName()
+ "[positions="
+ getPositionCount()
+ ", mvOrdering="
+ mvOrdering()
+ ", ramBytesUsed="
+ vector.ramBytesUsed()
+ ']';
}
@Override
public void allowPassingToDifferentDriver() {
vector.allowPassingToDifferentDriver();
}
@Override
public BlockFactory blockFactory() {
return vector.blockFactory();
}
@Override
public void closeInternal() {
blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock());
Releasables.closeExpectNoException(vector);
}
}
|
BooleanBigArrayBlock
|
java
|
micronaut-projects__micronaut-core
|
core/src/main/java/io/micronaut/core/execution/DelayedExecutionFlowImpl.java
|
{
"start": 12225,
"end": 12772
}
|
class ____<I, O> extends Step<I, O> {
private final Supplier<? extends ExecutionFlow<? extends O>> transformer;
private Then(Supplier<? extends ExecutionFlow<? extends O>> transformer) {
this.transformer = transformer;
}
@Override
ExecutionFlow<O> apply(ExecutionFlow<I> executionFlow) {
try {
return executionFlow.then(transformer);
} catch (Exception e) {
return returnError(e);
}
}
}
private static final
|
Then
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java
|
{
"start": 1936,
"end": 13716
}
|
class ____ extends YarnCLI {
public static final String QUEUE = "queue";
public static final String ALLTAG = "all";
public static void main(String[] args) throws Exception {
QueueCLI cli = new QueueCLI();
cli.setSysOutPrintStream(System.out);
cli.setSysErrPrintStream(System.err);
int res = ToolRunner.run(cli, args);
cli.stop();
System.exit(res);
}
@Override
public int run(String[] args) throws Exception {
Options opts = new Options();
opts.addOption(STATUS_CMD, true,
"List queue information about given queue.");
opts.addOption(HELP_CMD, false, "Displays help for all commands.");
opts.getOption(STATUS_CMD).setArgName("Queue Name");
opts.addOption(LIST_CMD, true,
"All child queues are displayed according to the parent queue. " +
"If the value is all, all queues are displayed.");
opts.getOption(LIST_CMD).setArgName("Parent Queue Name");
opts.addOption(OPTION_SUBCLUSTERID, true, "We support setting subClusterId in " +
"YARN Federation mode to specify specific subClusters.");
CommandLine cliParser = null;
try {
cliParser = new GnuParser().parse(opts, args);
} catch (MissingArgumentException ex) {
sysout.println("Missing argument for options");
printUsage(opts);
return -1;
}
createAndStartYarnClient();
if (cliParser.hasOption(STATUS_CMD)) {
// Our possible options are -status root.a,
// -subcluster sc-1, we will have up to 4 args
if (args.length > 4) {
printUsage(opts);
return -1;
}
String queue = cliParser.getOptionValue(STATUS_CMD);
String subClusterId = cliParser.getOptionValue(OPTION_SUBCLUSTERID);
if (isYarnFederationEnabled(getConf()) && StringUtils.isNotBlank(subClusterId)) {
return listQueue(queue, subClusterId);
} else {
return listQueue(queue);
}
} else if (cliParser.hasOption(HELP_CMD)) {
printUsage(opts);
return 0;
} else if (cliParser.hasOption(LIST_CMD)) {
if (args.length != 2) {
printUsage(opts);
return -1;
}
return listChildQueues(cliParser.getOptionValue(LIST_CMD));
} else {
syserr.println("Invalid Command Usage : ");
printUsage(opts);
return -1;
}
}
/**
* It prints the usage of the command
*
* @param opts
*/
@VisibleForTesting
void printUsage(Options opts) {
new HelpFormatter().printHelp(QUEUE, opts);
}
/**
* Lists the Queue Information matching the given queue name.
*
* @param queueName Queue name to be queried.
* @throws YarnException YarnException indicates exceptions from yarn servers.
* @throws IOException I/O exception has occurred.
* @return 0, the command execution is successful; -1, the command execution fails.
*/
private int listQueue(String queueName) throws YarnException, IOException {
int rc;
PrintWriter writer = new PrintWriter(
new OutputStreamWriter(sysout, StandardCharsets.UTF_8));
QueueInfo queueInfo = client.getQueueInfo(queueName);
if (queueInfo != null) {
if (isYarnFederationEnabled(getConf())) {
writer.println("Using YARN Federation mode.");
}
writer.println("Queue Information : ");
printQueueInfo(writer, queueInfo);
rc = 0;
} else {
writer.println("Cannot get queue from RM by queueName = " + queueName
+ ", please check.");
rc = -1;
}
writer.flush();
return rc;
}
/**
* Lists the Queue Information matching the given queue name.
*
* @param queueName Queue name to be queried.
* @param subClusterId Subcluster id.
* @throws YarnException YarnException indicates exceptions from yarn servers.
* @throws IOException I/O exception has occurred.
* @return 0, the command execution is successful; -1, the command execution fails.
*/
private int listQueue(String queueName, String subClusterId)
throws YarnException, IOException {
int rc;
PrintWriter writer = new PrintWriter(
new OutputStreamWriter(sysout, StandardCharsets.UTF_8));
QueueInfo queueInfo = client.getQueueInfo(queueName, subClusterId);
if (queueInfo != null) {
if (isYarnFederationEnabled(getConf())) {
writer.println("Using YARN Federation mode.");
}
if (StringUtils.isNotBlank(subClusterId)) {
writer.println("SubClusterId : " + subClusterId + ", Queue Information : ");
} else {
writer.println("Queue Information : ");
}
printQueueInfo(writer, queueInfo);
rc = 0;
} else {
writer.println("Cannot get queue from RM by queueName = " + queueName
+ ", subClusterId = " + subClusterId + " please check.");
rc = -1;
}
writer.flush();
return rc;
}
/**
* List information about all child queues based on the parent queue.
* @param parentQueueName The name of the payment queue.
* @return The status code of execution.
* @throws IOException failed or interrupted I/O operations.
* @throws YarnException exceptions from yarn servers.
*/
private int listChildQueues(String parentQueueName) throws IOException, YarnException {
int exitCode;
PrintWriter writer = new PrintWriter(new OutputStreamWriter(
sysout, StandardCharsets.UTF_8));
if (parentQueueName.equalsIgnoreCase(ALLTAG)) {
List<QueueInfo> queueInfos = client.getAllQueues();
if (queueInfos != null) {
printQueueInfos(writer, queueInfos);
exitCode = 0;
} else {
writer.println("Cannot get any queues from RM,please check.");
exitCode = -1;
}
} else {
List<QueueInfo> childQueueInfos = client.getChildQueueInfos(parentQueueName);
if (childQueueInfos != null) {
printQueueInfos(writer, childQueueInfos);
exitCode = 0;
} else {
writer.println("Cannot get any queues under " + parentQueueName + " from RM,please check.");
exitCode = -1;
}
}
writer.flush();
return exitCode;
}
private void printQueueInfo(PrintWriter writer, QueueInfo queueInfo) {
String schedulerType = queueInfo.getSchedulerType();
if (StringUtils.equals("FairScheduler", schedulerType)) {
printFairSchedulerQueue(writer, queueInfo);
} else {
printQueue(writer, queueInfo);
}
}
/**
* Print Queue information of FairScheduler.
*
* @param writer PrintWriter.
* @param queueInfo Queue Information.
*/
private void printFairSchedulerQueue(PrintWriter writer, QueueInfo queueInfo) {
String generateQueueInfoMessage = generateQueueInfoMessage(queueInfo);
writer.print(generateQueueInfoMessage);
}
private String generateQueueInfoMessage(QueueInfo queueInfo) {
StringBuilder stringBuilder = new StringBuilder();
if (queueInfo.getSchedulerType() != null) {
stringBuilder.append("Scheduler Name : ").append(queueInfo.getSchedulerType()).append("\n");
}
stringBuilder.append("Queue Name : ").append(queueInfo.getQueueName()).append("\n");
DecimalFormat df = new DecimalFormat("0.00");
stringBuilder.append("\tWeight : ").append(df.format(queueInfo.getWeight())).append("\n");
stringBuilder.append("\tState : ").append(queueInfo.getQueueState()).append("\n");
stringBuilder.append("\tMinResource : ").append("<memory : ")
.append(queueInfo.getMinResourceMemory()).append(", vCores:")
.append(queueInfo.getMinResourceVCore()).append(">").append("\n");
stringBuilder.append("\tMaxResource : ").append("<memory : ")
.append(queueInfo.getMaxResourceMemory()).append(", vCores:")
.append(queueInfo.getMaxResourceVCore()).append(">").append("\n");
stringBuilder.append("\tReservedResource : ").append("<memory : ")
.append(queueInfo.getReservedResourceMemory()).append(", vCores:")
.append(queueInfo.getReservedResourceVCore()).append(">").append("\n");
stringBuilder.append("\tSteadyFairShare : ").append("<memory : ")
.append(queueInfo.getSteadyFairShareMemory()).append(", vCores:")
.append(queueInfo.getSteadyFairShareVCore()).append(">").append("\n");
Boolean queuePreemption = queueInfo.getPreemptionDisabled();
if (queuePreemption != null) {
stringBuilder.append("\tQueue Preemption : ")
.append(queuePreemption ? "enabled" : "disabled").append("\n");
}
return stringBuilder.toString();
}
/**
* Print Queue information.
*
* @param writer PrintWriter.
* @param queueInfo Queue Information.
*/
private void printQueue(PrintWriter writer, QueueInfo queueInfo) {
if (queueInfo.getSchedulerType() != null) {
writer.print("Scheduler Name : ");
writer.println(queueInfo.getSchedulerType());
}
writer.print("Queue Name : ");
writer.println(queueInfo.getQueueName());
writer.print("Queue Path : ");
writer.println(queueInfo.getQueuePath());
writer.print("\tState : ");
writer.println(queueInfo.getQueueState());
DecimalFormat df = new DecimalFormat("#.00");
writer.print("\tCapacity : ");
writer.println(df.format(queueInfo.getCapacity() * 100) + "%");
writer.print("\tCurrent Capacity : ");
writer.println(df.format(queueInfo.getCurrentCapacity() * 100) + "%");
writer.print("\tMaximum Capacity : ");
writer.println(df.format(queueInfo.getMaximumCapacity() * 100) + "%");
writer.print("\tWeight : ");
writer.println(df.format(queueInfo.getWeight()));
writer.print("\tMaximum Parallel Apps : ");
writer.println(queueInfo.getMaxParallelApps());
writer.print("\tDefault Node Label expression : ");
String nodeLabelExpression = queueInfo.getDefaultNodeLabelExpression();
nodeLabelExpression =
(nodeLabelExpression == null || nodeLabelExpression.trim().isEmpty())
? NodeLabel.DEFAULT_NODE_LABEL_PARTITION : nodeLabelExpression;
writer.println(nodeLabelExpression);
Set<String> nodeLabels = queueInfo.getAccessibleNodeLabels();
StringBuilder labelList = new StringBuilder();
writer.print("\tAccessible Node Labels : ");
for (String nodeLabel : nodeLabels) {
if (labelList.length() > 0) {
labelList.append(',');
}
labelList.append(nodeLabel);
}
writer.println(labelList);
Boolean preemptStatus = queueInfo.getPreemptionDisabled();
if (preemptStatus != null) {
writer.print("\tPreemption : ");
writer.println(preemptStatus ? "disabled" : "enabled");
}
Boolean intraQueuePreemption = queueInfo.getIntraQueuePreemptionDisabled();
if (intraQueuePreemption != null) {
writer.print("\tIntra-queue Preemption : ");
writer.println(intraQueuePreemption ? "disabled" : "enabled");
}
}
private void printQueueInfos(PrintWriter writer, List<QueueInfo> queueInfos) {
String titleString = queueInfos.size() + " queues were found";
List<String> headerStrings = Arrays.asList("Queue Name", "Queue Path", "State", "Capacity",
"Current Capacity", "Maximum Capacity", "Weight", "Maximum Parallel Apps");
FormattingCLIUtils formattingCLIUtils = new FormattingCLIUtils(titleString)
.addHeaders(headerStrings);
DecimalFormat df = new DecimalFormat("#.00");
for (QueueInfo queueInfo : queueInfos) {
formattingCLIUtils.addLine(queueInfo.getQueueName(), queueInfo.getQueuePath(),
queueInfo.getQueueState(), df.format(queueInfo.getCapacity() * 100) + "%",
df.format(queueInfo.getCurrentCapacity() * 100) + "%",
df.format(queueInfo.getMaximumCapacity() * 100) + "%",
df.format(queueInfo.getWeight()),
queueInfo.getMaxParallelApps());
}
writer.print(formattingCLIUtils.render());
}
}
|
QueueCLI
|
java
|
micronaut-projects__micronaut-core
|
inject/src/main/java/io/micronaut/context/DefaultConstructorInjectionPoint.java
|
{
"start": 1870,
"end": 4218
}
|
class ____
* @param annotationMetadata The annotation metadata
* @param arguments The arguments
*/
DefaultConstructorInjectionPoint(
BeanDefinition<T> declaringBean,
Class<T> declaringType,
AnnotationMetadata annotationMetadata,
Argument<?>[] arguments) {
this.argTypes = Argument.toClassArray(arguments);
this.declaringBean = declaringBean;
this.declaringType = declaringType;
if (!(annotationMetadata instanceof DefaultAnnotationMetadata)) {
this.annotationMetadata = AnnotationMetadata.EMPTY_METADATA;
} else {
if (annotationMetadata.hasPropertyExpressions()) {
this.annotationMetadata = new ConstructorAnnotationMetadata((DefaultAnnotationMetadata) annotationMetadata);
} else {
this.annotationMetadata = annotationMetadata;
}
}
this.arguments = arguments == null ? Argument.ZERO_ARGUMENTS : arguments;
}
@Override
public final boolean hasPropertyExpressions() {
return annotationMetadata.hasPropertyExpressions();
}
@Override
public void configure(Environment environment) {
this.environment = environment;
}
@Override
public AnnotationMetadata getAnnotationMetadata() {
return annotationMetadata;
}
@Override
@NonNull
public Argument<?>[] getArguments() {
return arguments;
}
@Override
@NonNull
public BeanDefinition<T> getDeclaringBean() {
return declaringBean;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DefaultConstructorInjectionPoint<?> that = (DefaultConstructorInjectionPoint<?>) o;
return Objects.equals(declaringType, that.declaringType) &&
Arrays.equals(argTypes, that.argTypes);
}
@Override
public int hashCode() {
return ObjectUtils.hash(declaringType, argTypes);
}
@Override
public String toString() {
return declaringType.getName() + "(" + Argument.toString(arguments) + ")";
}
/**
* Internal environment aware annotation metadata delegate.
*/
private final
|
type
|
java
|
square__retrofit
|
retrofit/java-test/src/test/java/retrofit2/RetrofitTest.java
|
{
"start": 4254,
"end": 4316
}
|
interface ____ {
@GET("/")
void nope();
}
|
VoidService
|
java
|
elastic__elasticsearch
|
modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ESSolrSynonymParserTests.java
|
{
"start": 1236,
"end": 3391
}
|
class ____ extends ESTokenStreamTestCase {
public void testLenientParser() throws IOException, ParseException {
ESSolrSynonymParser parser = new ESSolrSynonymParser(true, false, true, new StandardAnalyzer());
String rules = """
&,and
come,advance,approach
""";
StringReader rulesReader = new StringReader(rules);
parser.parse(rulesReader);
SynonymMap synonymMap = parser.build();
Tokenizer tokenizer = new StandardTokenizer();
tokenizer.setReader(new StringReader("approach quietly then advance & destroy"));
TokenStream ts = new SynonymFilter(tokenizer, synonymMap, false);
assertTokenStreamContents(ts, new String[] { "come", "quietly", "then", "come", "destroy" });
}
public void testLenientParserWithSomeIncorrectLines() throws IOException, ParseException {
CharArraySet stopSet = new CharArraySet(1, true);
stopSet.add("bar");
ESSolrSynonymParser parser = new ESSolrSynonymParser(true, false, true, new StandardAnalyzer(stopSet));
String rules = "foo,bar,baz";
StringReader rulesReader = new StringReader(rules);
parser.parse(rulesReader);
SynonymMap synonymMap = parser.build();
Tokenizer tokenizer = new StandardTokenizer();
tokenizer.setReader(new StringReader("first word is foo, then bar and lastly baz"));
TokenStream ts = new SynonymFilter(new StopFilter(tokenizer, stopSet), synonymMap, false);
assertTokenStreamContents(ts, new String[] { "first", "word", "is", "foo", "then", "and", "lastly", "foo" });
}
public void testNonLenientParser() {
ESSolrSynonymParser parser = new ESSolrSynonymParser(true, false, false, new StandardAnalyzer());
String rules = """
&,and=>and
come,advance,approach
""";
StringReader rulesReader = new StringReader(rules);
ParseException ex = expectThrows(ParseException.class, () -> parser.parse(rulesReader));
assertThat(ex.getMessage(), containsString("Invalid synonym rule at line 1"));
}
}
|
ESSolrSynonymParserTests
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/junit/jupiter/nested/ContextConfigurationTestClassScopedExtensionContextNestedTests.java
|
{
"start": 3394,
"end": 3994
}
|
class ____ {
@Autowired(required = false)
@Qualifier("foo")
String localFoo;
@Autowired
String bar;
@Test
void test() {
// Since the configuration is inherited, the foo field in the outer instance
// and the bar field in the inner instance should both have been injected
// from the test ApplicationContext for the outer instance.
assertThat(foo).isEqualTo(FOO);
assertThat(this.localFoo).isEqualTo(FOO);
assertThat(this.bar).isEqualTo(FOO);
}
@Nested
@NestedTestConfiguration(OVERRIDE)
@SpringJUnitConfig(NestedConfig.class)
|
NestedWithInheritedConfigTests
|
java
|
apache__camel
|
core/camel-api/src/main/java/org/apache/camel/spi/CamelEvent.java
|
{
"start": 6025,
"end": 6205
}
|
interface ____ extends CamelContextEvent {
@Override
default Type getType() {
return Type.RoutesStopped;
}
}
|
CamelContextRoutesStoppedEvent
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/query/criteria/JpaInPredicate.java
|
{
"start": 287,
"end": 854
}
|
interface ____<T> extends JpaPredicate, CriteriaBuilder.In<T> {
/**
* Return the expression to be tested against the
* list of values.
* @return expression
*/
@Override
JpaExpression<T> getExpression();
/**
* Add to list of values to be tested against.
* @param value value
* @return in predicate
*/
@Override
JpaInPredicate<T> value(@NonNull T value);
/**
* Add to list of values to be tested against.
* @param value expression
* @return in predicate
*/
JpaInPredicate<T> value(JpaExpression<? extends T> value);
}
|
JpaInPredicate
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.